python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
from functools import partial
from typing import List
import time
import torch
import unittest
from apex.transformer._ucc_util import HAS_UCC
from apex.transformer import parallel_state
from apex.transformer.enums import ModelType
from apex.transformer.tensor_parallel import model_parallel_cuda_manual_seed
from apex.transformer.pipeline_parallel.utils import (
average_losses_across_data_parallel_group, unwrap_model, setup_microbatch_calculator,
get_ltor_masks_and_position_ids
)
from apex.transformer.pipeline_parallel.schedules.common import (
_get_params_for_weight_decay_optimization, build_model
)
from apex.transformer.pipeline_parallel.schedules.fwd_bwd_pipelining_without_interleaving import (
forward_backward_pipelining_without_interleaving,
)
from apex.transformer.testing.standalone_gpt import gpt_model_provider
from apex.transformer.testing import global_vars
from apex.transformer.testing.distributed_test_base import UccDistributedTestBase, NcclDistributedTestBase
from torch.testing._internal import common_utils
from torch.testing._internal.common_device_type import instantiate_device_type_tests
class GptTestBase:
def _download_fancy_data(self):
text = """
An original sentence not subject to any license restrictions, copyright, or royalty payments. Nothing to see here. Commercial or non-commercial use. Research or non-research purposes. The quick brown fox jumps over the lazy dog. Lorem ipsum.
"""
text = text * 1024
encoded = text.encode("ascii", "replace")
ints = [int(encoded[i]) for i in range(len(encoded))]
return torch.tensor(ints)
# build a batch given sequence_len and batch size
def _generate_fancy_data_labels(self, sequence_len, batch_size):
temps = list()
for i in range(batch_size):
if self.inds is None or self.data_idx >= len(self.inds):
# hack as use of RNG will fall out of sync due to pipelines being different
model_parallel_cuda_manual_seed(self.MANUAL_SEED)
self.inds = torch.randperm(effective_length, device="cuda")
self.MANUAL_SEED += 1
self.data_idx = 0
data_idx_ = self.data_idx
offset = self.inds[data_idx_]
self.data_idx += 1
curr = fancy_data[offset: offset +
sequence_len + 1].clone().detach()
temps.append(curr)
temp = torch.stack(temps, dim=0).cuda()
return temp
def _get_batch(self, int_tensors: List[torch.Tensor]):
data = int_tensors[0]
# Unpack.
tokens_ = data.long()
labels = tokens_[:, 1:].contiguous()
tokens = tokens_[:, :-1].contiguous()
# Get the masks and position ids.
attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids(
tokens,
self.N_VOCAB, # tokenizer.eod,
False, # args.reset_position_ids,
False, # args.reset_attention_mask,
False, # args.eod_mask_loss,
)
return tokens, labels, loss_mask, attention_mask, position_ids
# Ref: https://github.com/NVIDIA/Megatron-LM/blob/b31e1296354e979722627a6c4dedafe19b51fa97/pretrain_gpt.py#L75
def _loss_func(self, loss_mask, output_tensor):
losses = output_tensor.float()
loss_mask = loss_mask.view(-1).float()
loss = torch.sum(losses.view(-1) * loss_mask) / loss_mask.sum()
# Reduce loss for logging.
averaged_loss = average_losses_across_data_parallel_group([loss])
return loss, {"lm loss": averaged_loss[0]}
# Ref: https://github.com/NVIDIA/Megatron-LM/blob/b31e1296354e979722627a6c4dedafe19b51fa97/pretrain_gpt.py#L86
def _fwd_step_func(self, batch, model):
"""Forward step."""
tokens, labels, loss_mask, attention_mask, position_ids = self._get_batch(
batch)
output_tensor = model(tokens, position_ids,
attention_mask, labels=labels)
return output_tensor, partial(self._loss_func, loss_mask)
def _train(self, model, optim, pipeline_model_parallel_size, async_comm):
args = global_vars.get_args()
fwd_bwd_func = forward_backward_pipelining_without_interleaving
tensor_shape = (args.seq_length, args.micro_batch_size,
args.hidden_size)
runtime = 0
# training loop
for i in range(3):
since = time.time()
if torch.distributed.get_rank() == 0:
print("begin iter", i)
batch = [
self._generate_fancy_data_labels(
args.seq_length, args.global_batch_size)
for _ in range(pipeline_model_parallel_size)
]
if torch.distributed.get_rank() == 0:
print("finished making batch...")
optim.zero_grad()
fwd_bwd_func(
self._fwd_step_func,
batch,
model,
forward_only=False,
tensor_shape=tensor_shape,
async_comm=async_comm,
sequence_parallel_enabled=args.sequence_parallel,
)
if torch.distributed.get_rank() == 0:
print("finished forward step")
# All-reduce layernorm parameters across model parallel nodes
# when sequence parallelism is used
if parallel_state.get_tensor_model_parallel_world_size() > 1 and global_vars.get_args().sequence_parallel:
for model_module in model:
unwrapped_model = unwrap_model(model_module)
for param in unwrapped_model.parameters():
if getattr(param, 'sequence_parallel_enabled', False):
grad = param.grad
torch.distributed.all_reduce(
grad, group=parallel_state.get_tensor_model_parallel_group())
optim.step()
if torch.distributed.get_rank() == 0:
print("finished iter", i)
runtime += time.time() - since
return runtime / 3.0
@unittest.skipUnless(torch.cuda.device_count() > 2, "requires at least 3 gpus")
def test_gpt(self):
self.MANUAL_SEED = 42
self.inds = None
self.data_idx = 0
self.N_VOCAB = 128
init = True
tensor_model_parallel_size = 2 if self.world_size % 2 == 0 and self.world_size >= 4 else 1
pipeline_model_parallel_size = self.world_size // tensor_model_parallel_size
override_args = {
"micro_batch_size": 2,
"num_layers": 16,
"hidden_size": 256,
"num_attention_heads": 8,
"max_position_embeddings": 512,
"seq_length": 512,
"global_batch_size": 128,
"pipeline_model_parallel_size": pipeline_model_parallel_size,
"tensor_model_parallel_size": tensor_model_parallel_size,
"world_size": self.world_size,
"rank": self.rank,
}
global_vars.set_global_variables(override_args=override_args, ignore_unknown_args=True)
args = global_vars.get_args()
for async_comm in (False,) if args.sequence_parallel else (False, True):
global fancy_data
global effective_length
if init:
init = False
fancy_data = self._download_fancy_data()
args = global_vars.get_args()
args.model_type = ModelType.encoder_or_decoder
effective_length = fancy_data.size(0) // args.seq_length
effective_length = fancy_data.size(0) - args.seq_length
args.padded_vocab_size = 128
setup_microbatch_calculator(
args.rank,
args.rampup_batch_size,
args.global_batch_size,
args.micro_batch_size,
args.data_parallel_size,
)
print(args.tensor_model_parallel_size, "MODEL PARALLEL SIZE")
parallel_state.initialize_model_parallel(
tensor_model_parallel_size_=args.tensor_model_parallel_size,
pipeline_model_parallel_size_=args.pipeline_model_parallel_size,
default_backend="nccl",
p2p_backend=self.DISTRIBUTED_BACKEND,
)
model_parallel_cuda_manual_seed(0)
model = build_model(
gpt_model_provider,
wrap_with_ddp=parallel_state.get_data_parallel_world_size() > 1,
virtual_pipeline_model_parallel_size=None,
cpu_offload=args.cpu_offload,
)
assert isinstance(model, list), model
_param_groups = _get_params_for_weight_decay_optimization(model)
optim = torch.optim.Adam(_param_groups)
runtime = self._train(
model, optim, args.pipeline_model_parallel_size, async_comm)
parallel_state.destroy_model_parallel()
torch.cuda.synchronize()
class NcclGptTest(GptTestBase, NcclDistributedTestBase):
@property
def world_size(self) -> int:
return min(torch.cuda.device_count(), 8)
@unittest.skipUnless(HAS_UCC, "requires pytorch to be built with native ucc")
class UccGptTest(GptTestBase, UccDistributedTestBase):
@property
def world_size(self) -> int:
return min(torch.cuda.device_count(), 8)
if __name__ == "__main__":
common_utils.run_tests()
| GeneSplice-main | GeneSplice/apex/tests/L0/run_transformer/test_gpt_minimal.py |
import torch
from torch.testing._internal import common_utils
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from apex.transformer.pipeline_parallel.utils import _split_batch_into_microbatch as split_batch_into_microbatch
class MyIterableDataset(Dataset):
def __init__(self, start, end):
super().__init__()
assert end > start, "this example code only works with end >= start"
self.start = start
self.end = end
self.samples = list(range(self.start, self.end))
def __iter__(self):
return iter(range(self.start, self.end))
def __getitem__(self, index):
return self.samples[index]
class MegatronPretrainingRandomSampler:
def __init__(self, total_samples, consumed_samples, micro_batch_size,
data_parallel_rank, data_parallel_size):
# Keep a copy of input params for later use.
self.total_samples = total_samples
self.consumed_samples = consumed_samples
self.micro_batch_size = micro_batch_size
self.data_parallel_rank = data_parallel_rank
self.data_parallel_size = data_parallel_size
self.micro_batch_times_data_parallel_size = \
self.micro_batch_size * data_parallel_size
self.last_batch_size = \
self.total_samples % self.micro_batch_times_data_parallel_size
# Sanity checks.
assert self.total_samples > 0, \
'no sample to consume: {}'.format(self.total_samples)
assert self.micro_batch_size > 0
assert data_parallel_size > 0
assert self.data_parallel_rank < data_parallel_size, \
'data_parallel_rank should be smaller than data size: {}, ' \
'{}'.format(self.data_parallel_rank, data_parallel_size)
def __len__(self):
return self.total_samples
def __iter__(self):
active_total_samples = self.total_samples - self.last_batch_size
self.epoch = self.consumed_samples // active_total_samples
current_epoch_samples = self.consumed_samples % active_total_samples
assert current_epoch_samples % self.micro_batch_times_data_parallel_size == 0
# data sharding and random sampling
bucket_size = (self.total_samples // self.micro_batch_times_data_parallel_size) * self.micro_batch_size
bucket_offset = current_epoch_samples // self.data_parallel_size
start_idx = self.data_parallel_rank * bucket_size
g = torch.Generator()
g.manual_seed(self.epoch)
random_idx = torch.randperm(bucket_size, generator=g).tolist()
idx_range = [start_idx + x for x in random_idx[bucket_offset:]]
batch = []
# Last batch if not complete will be dropped.
for idx in idx_range:
batch.append(idx)
if len(batch) == self.micro_batch_size:
self.consumed_samples += self.micro_batch_times_data_parallel_size
yield batch
batch = []
# Samples 8 tensors in total.
# First sample 4 tensors twice, then sample 2 tensors fourth.
class TestBatchSamplerBehavior(common_utils.TestCase):
def tearDown(self) -> None:
torch.cuda.empty_cache()
super().tearDown()
def test_batch_sampler_behavior(self):
dataset = MyIterableDataset(0, 100)
for num_workers in (1, 2, 4):
torch.manual_seed(42)
loader = DataLoader(dataset, batch_sampler=MegatronPretrainingRandomSampler(100, 0, 4, 0, 1), num_workers=num_workers)
samples = []
for i, batch in enumerate(loader):
samples.append(batch)
if i == 2 - 1:
break
torch.manual_seed(42)
loader = DataLoader(dataset, batch_sampler=MegatronPretrainingRandomSampler(100, 0, 2, 0, 1), num_workers=num_workers)
samples2 = []
for i, batch in enumerate(loader):
samples2.append(batch)
if i == 4 - 1:
break
self.assertEqual(torch.cat(samples), torch.cat(samples2), msg=f"num_workers={num_workers}")
def test_split_batch(self):
class MyIterableDataset(Dataset):
def __init__(self, start, end):
super().__init__()
assert end > start, "this example code only works with end >= start"
self.start = start
self.end = end
self.samples = list(range(self.start, self.end))
def __len__(self):
return self.end - self.start
def __iter__(self):
return iter(range(self.start, self.end))
def __getitem__(self, index):
return (torch.tensor([index, index]), torch.tensor([index // 2, index // 2]))
dataset = MyIterableDataset(0, 100)
torch.manual_seed(42)
global_batch_size = 16
loader = DataLoader(dataset, batch_sampler=MegatronPretrainingRandomSampler(100, 0, global_batch_size, 0, 1), num_workers=2)
batch = next(iter(loader))
for _micro_batch_size in (1, 2, 4, 8):
microbatches = list(split_batch_into_microbatch(
batch,
_micro_batch_size=_micro_batch_size,
_global_batch_size=global_batch_size,
))
self.assertEqual(len(microbatches), global_batch_size // _micro_batch_size)
self.assertEqual(len(microbatches[0][0]), _micro_batch_size)
if __name__ == "__main__":
common_utils.run_tests()
| GeneSplice-main | GeneSplice/apex/tests/L0/run_transformer/test_batch_sampler.py |
import logging
import unittest
import typing
import torch
import torch.nn as nn
from torch.testing._internal import common_utils
from apex.transformer import parallel_state
from apex.transformer.tensor_parallel import layers
from apex.transformer.testing.commons import set_random_seed
from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase
from apex.transformer.testing.distributed_test_base import UccDistributedTestBase
logging.getLogger("torch").setLevel(logging.WARNING)
logging.getLogger("apex").setLevel(logging.WARNING)
# N.B.(mkozuki): Disable TF32 matrix multiply.
# Matrices used in this test are so small that TF32 matmul
# can be less precise so that `self.assertEqual` raises.
torch.backends.cuda.matmul.allow_tf32 = False
class TensorParallelLayerTestBase:
BATCH_SIZE: int = 8
SEQUENCE_LENGTH: int = 128
VOCAB_SIZE: int = 1024
HIDDEN_SIZE: int = 256
INPUT_SIZE_COEFF: int = 256
OUTPUT_SIZE_COEFF: int = 256
SEED: int = 123456
@property
def tensor_shape(self) -> typing.Sequence[int]:
return [self.SEQUENCE_LENGTH, self.BATCH_SIZE, self.HIDDEN_SIZE]
@torch.no_grad()
@unittest.skipIf(torch.cuda.device_count() < 2, "Requires >=2 GPUs")
def test_all_gather_parity(self) -> None:
if self.DISTRIBUTED_BACKEND == "ucc":
self.skipTest("torch_ucc does NOT support `torch.distributed._all_gather_base` as of 2022/06/15")
from torch.distributed.distributed_c10d import all_gather, _all_gather_base # NOQA
for tensor_model_parallel_world_size in range(1, self.world_size + 1):
if self.world_size % tensor_model_parallel_world_size:
continue
parallel_state.initialize_model_parallel(
tensor_model_parallel_size_=tensor_model_parallel_world_size,
)
tensor_model_parallel_rank = parallel_state.get_tensor_model_parallel_rank()
cur_tensor_model_device = torch.device(f"cuda:{tensor_model_parallel_rank}")
with torch.no_grad():
tensor = tensor_model_parallel_rank * torch.ones(
self.tensor_shape, dtype=torch.float32, device=cur_tensor_model_device)
numel = tensor.numel()
numel_gathered = tensor_model_parallel_world_size * numel
gathered = torch.empty(
torch.Size((numel_gathered,)),
device=cur_tensor_model_device,
dtype=torch.float32,
requires_grad=False,
)
chunks = [
gathered[i * numel : (i + 1) * numel]
for i in range(tensor_model_parallel_world_size)
]
all_gather(chunks, tensor, group=parallel_state.get_tensor_model_parallel_group())
gathered_for_base = torch.empty(
torch.Size((numel_gathered,)),
device=cur_tensor_model_device,
dtype=torch.float32,
requires_grad=False,
)
_all_gather_base(
gathered_for_base,
tensor,
group=parallel_state.get_tensor_model_parallel_group(),
)
msg = f"tensor_model_parallel_world_size: {tensor_model_parallel_world_size}"
self.assertEqual(gathered, gathered_for_base, msg=msg)
parallel_state.destroy_model_parallel()
@torch.no_grad()
@unittest.skipIf(torch.cuda.device_count() < 2, "Requires >=2 GPUs")
def test_reduce_scatter_parity(self) -> None:
if self.DISTRIBUTED_BACKEND == "ucc":
self.skipTest("torch_ucc does NOT support `torch.distributed._reduce_scatter_base` as of 2022/06/15")
from torch.distributed.distributed_c10d import reduce_scatter, _reduce_scatter_base # NOQA
for tensor_model_parallel_world_size in range(2, self.world_size + 1):
if self.world_size % tensor_model_parallel_world_size:
continue
parallel_state.initialize_model_parallel(
tensor_model_parallel_size_=tensor_model_parallel_world_size,
)
tensor_model_parallel_rank = parallel_state.get_tensor_model_parallel_rank()
cur_tensor_model_device = torch.device(f"cuda:{tensor_model_parallel_rank}")
with torch.no_grad():
input = torch.cat([
i * torch.ones(self.tensor_shape, dtype=torch.float32, device=cur_tensor_model_device)
for i in range(tensor_model_parallel_world_size)
])
input_list = [t.clone() for t in input.chunk(tensor_model_parallel_world_size)]
output = torch.empty(
self.tensor_shape,
device=cur_tensor_model_device,
dtype=torch.float32,
requires_grad=False,
)
reduce_scatter(
output, input_list,
group=parallel_state.get_tensor_model_parallel_group(),
)
output_for_base = torch.empty(
self.tensor_shape,
device=cur_tensor_model_device,
dtype=torch.float32,
requires_grad=False,
)
_reduce_scatter_base(
output_for_base,
input,
group=parallel_state.get_tensor_model_parallel_group(),
)
msg = f"tensor_model_parallel_world_size: {tensor_model_parallel_world_size}"
self.assertEqual(output, output_for_base, msg=msg)
self.assertEqual(input, torch.cat(input_list), msg=msg)
parallel_state.destroy_model_parallel()
def test_parallel_embedding(self) -> None:
for tensor_model_parallel_world_size in range(1, self.world_size + 1):
if self.world_size % tensor_model_parallel_world_size:
continue
parallel_state.initialize_model_parallel(
tensor_model_parallel_size_=tensor_model_parallel_world_size,
)
set_random_seed(self.SEED + 1)
input_tensor = torch.randint(
0,
self.VOCAB_SIZE,
(
self.BATCH_SIZE,
self.SEQUENCE_LENGTH,
),
device="cuda",
)
loss_weight = torch.randn(
(
self.BATCH_SIZE,
self.SEQUENCE_LENGTH,
self.HIDDEN_SIZE,
),
device="cuda",
)
set_random_seed(self.SEED)
embedding_torch = nn.Embedding(
self.VOCAB_SIZE,
self.HIDDEN_SIZE,
).cuda()
output_torch = embedding_torch(input_tensor)
loss_torch = torch.mul(output_torch, loss_weight).sum()
loss_torch.backward()
# N.B.(mkozuki): With affine weight initialization on GPU,
# it's super difficult to keep the consistency with nn.Embedding.
# Thus, turning on `use_cpu_initialization`.
set_random_seed(self.SEED)
embedding_vocab_parallel = layers.VocabParallelEmbedding(
self.VOCAB_SIZE,
self.HIDDEN_SIZE,
init_method=nn.init.normal_,
use_cpu_initialization=True,
).cuda()
output_vocab_parallel = embedding_vocab_parallel(input_tensor)
loss_vocab_parallel = torch.mul(
output_vocab_parallel, loss_weight
).sum()
loss_vocab_parallel.backward()
msg = f"tensor_model_parallel_world_size: {tensor_model_parallel_world_size}"
self.assertEqual(output_torch, output_vocab_parallel, msg=msg)
self.assertEqual(loss_torch, loss_vocab_parallel, msg=msg)
splitted_weight_torch = torch.split(
embedding_torch.weight.grad,
self.VOCAB_SIZE
// tensor_model_parallel_world_size,
0,
)[parallel_state.get_tensor_model_parallel_rank()]
self.assertEqual(
splitted_weight_torch, embedding_vocab_parallel.weight.grad, msg=msg,
)
parallel_state.destroy_model_parallel()
def _affine_weight_init_test_impl(
self, init_device: str, is_column_parallel: bool
) -> None:
dim = int(not is_column_parallel)
for tensor_model_parallel_world_size in range(1, self.world_size + 1):
if self.world_size % tensor_model_parallel_world_size:
continue
parallel_state.initialize_model_parallel(
tensor_model_parallel_size_=tensor_model_parallel_world_size
)
input_size: int = self.INPUT_SIZE_COEFF * tensor_model_parallel_world_size
output_size: int = self.OUTPUT_SIZE_COEFF * tensor_model_parallel_world_size
weight_shape = (
(self.OUTPUT_SIZE_COEFF, input_size)
if is_column_parallel
else (output_size, self.INPUT_SIZE_COEFF)
)
weight = torch.empty(weight_shape)
set_random_seed(self.SEED)
sharding_dim_size = (
self.OUTPUT_SIZE_COEFF
if is_column_parallel
else self.INPUT_SIZE_COEFF
)
if init_device == "cpu":
layers._initialize_affine_weight_cpu(
weight,
output_size,
input_size,
sharding_dim_size,
dim,
nn.init.normal_,
params_dtype=torch.float32,
)
else:
layers._initialize_affine_weight_gpu(
weight, torch.nn.init.normal_, dim
)
# Target
set_random_seed(self.SEED)
if init_device == "cpu":
main_weight = torch.empty(output_size, input_size)
nn.init.normal_(main_weight)
curr_weight = torch.split(main_weight, sharding_dim_size, dim=dim)[
parallel_state.get_tensor_model_parallel_rank()
]
else:
curr_weight = torch.empty(*weight_shape)
nn.init.normal_(curr_weight)
self.assertEqual(
curr_weight, weight, msg=f"tensor_model_parallel_world_size: {tensor_model_parallel_world_size}")
parallel_state.destroy_model_parallel()
def test_affine_weight_init_column_parallel_cpu(self) -> None:
self._affine_weight_init_test_impl(init_device="cpu", is_column_parallel=True)
def test_affine_weight_init_column_parallel_gpu(self) -> None:
self._affine_weight_init_test_impl(init_device="gpu", is_column_parallel=True)
def test_affine_weight_init_row_parallel_cpu(self) -> None:
self._affine_weight_init_test_impl(init_device="cpu", is_column_parallel=False)
def test_affine_weight_init_row_parallel_gpu(self) -> None:
self._affine_weight_init_test_impl(init_device="gpu", is_column_parallel=False)
def test_row_parallel_linear(self) -> None:
self._row_parallel_linear_test_impl(False, False, False)
def test_row_parallel_linear_gradient_accumulation_fusion(self) -> None:
self._row_parallel_linear_test_impl(True, False, False)
def test_row_parallel_linear_gradient_accumulation_fusion_in_fp16(self) -> None:
self._row_parallel_linear_test_impl(True, True, False)
# fails on native ucc and torch ucc: ucc does not support reduce scatter
@unittest.skipIf(torch.cuda.device_count() < 2, "Sequence Parallel requires >=2 GPUs")
def test_row_parallel_linear_sequence_parallel(self) -> None:
self._row_parallel_linear_test_impl(False, False, True)
# TODO(mkozuki): Merge this with `_column_parallel_linear_test_impl`
# Note that `input_is_parallel` is unique to `RowParallelLinear` which could make the merge complicated.
def _row_parallel_linear_test_impl(
self,
gradient_accumulation_fusion: bool,
accumulation_in_fp16: bool,
sequence_parallel_enabled: bool,
) -> None:
tensor_shape = (
self.SEQUENCE_LENGTH,
self.BATCH_SIZE,
self.HIDDEN_SIZE,
)
for tensor_model_parallel_world_size in range(
1 + int(sequence_parallel_enabled), self.world_size + 1
):
if self.world_size % tensor_model_parallel_world_size:
continue
parallel_state.initialize_model_parallel(
tensor_model_parallel_size_=tensor_model_parallel_world_size,
)
set_random_seed(self.SEED)
linear = layers.RowParallelLinear(
self.HIDDEN_SIZE,
self.HIDDEN_SIZE,
keep_master_weight_for_test=True,
params_dtype=torch.float32,
use_cpu_initialization=True,
gradient_accumulation_fusion=gradient_accumulation_fusion,
accumulation_in_fp16=accumulation_in_fp16,
sequence_parallel_enabled=sequence_parallel_enabled,
# n.b.(mkozuki): RowParallelLinear is constructed with `input_is_parallel=True`
# by default, e.g. https://github.com/NVIDIA/NeMo/blob/782b4e1652aaa43c8be390d9\
# db0dc89544afa080/nemo/collections/nlp/modules/common/megatron/transformer.py#L204
input_is_parallel=True,
).cuda()
if accumulation_in_fp16:
linear = linear.half()
# Simulate the situation where fusion of weight grad calculation and gradient accumulation is enabled.
if gradient_accumulation_fusion:
with torch.no_grad():
linear.weight.main_grad = torch.zeros_like(linear.weight)
msg = f"tensor_model_parallel_world_size: {tensor_model_parallel_world_size}"
with torch.no_grad():
orig_input_tensor = torch.randn(tensor_shape, requires_grad=True, device="cuda")
orig_loss_weight = torch.randn(tensor_shape, device="cuda")
input_tensor = orig_input_tensor.chunk(
chunks=tensor_model_parallel_world_size,
dim=2,
)[parallel_state.get_tensor_model_parallel_rank()].contiguous()
if sequence_parallel_enabled:
loss_weight = orig_loss_weight.chunk(
chunks=tensor_model_parallel_world_size,
dim=0,
)[parallel_state.get_tensor_model_parallel_rank()]
else:
loss_weight = orig_loss_weight
if accumulation_in_fp16:
orig_input_tensor = orig_input_tensor.half()
input_tensor = input_tensor.half()
loss_weight = loss_weight.half()
input_tensor.requires_grad_()
output, _ = linear(input_tensor)
loss = torch.mul(output, loss_weight).sum()
loss.backward()
self.assertIsNotNone(input_tensor.grad, msg=msg)
ref_linear = nn.Linear(
in_features=self.HIDDEN_SIZE,
out_features=self.HIDDEN_SIZE,
bias=False,
device="cuda",
)
with torch.no_grad():
dldy = orig_loss_weight.clone()
x = orig_input_tensor.clone()
ref_linear.weight.copy_(linear.master_weight)
if accumulation_in_fp16:
ref_linear = ref_linear.half()
x.requires_grad_()
expected_output = ref_linear(x)
expected_loss = torch.mul(expected_output, dldy).sum()
expected_loss.backward()
if not accumulation_in_fp16:
if sequence_parallel_enabled:
self.assertEqual(
x=output,
y=expected_output.chunk(
chunks=tensor_model_parallel_world_size,
dim=0,
)[parallel_state.get_tensor_model_parallel_rank()],
msg=msg,
)
else:
self.assertEqual(
x=output,
y=expected_output,
msg=msg,
)
grad_attr_name = "main_grad" if gradient_accumulation_fusion else "grad"
# NOTE(mkozuki): Numerical errors seems to be enlarged by tensor model parallel.
if tensor_model_parallel_world_size == 1:
self.assertEqual(
x=getattr(linear.weight, grad_attr_name),
y=ref_linear.weight.grad.chunk(
chunks=tensor_model_parallel_world_size,
dim=0,
)[parallel_state.get_tensor_model_parallel_rank()],
msg=msg,
)
parallel_state.destroy_model_parallel()
def test_column_parallel_linear(self):
self._column_parallel_linear_test_impl(False, False, False, False)
def test_column_parallel_linear_async(self):
self._column_parallel_linear_test_impl(True, False, False, False)
def test_column_parallel_linear_gradient_accumulation_fusion(self):
self._column_parallel_linear_test_impl(False, True, False, False)
def test_column_parallel_linear_gradient_accumulation_fusion_in_fp16(self):
self._column_parallel_linear_test_impl(False, True, True, False)
def test_column_parallel_linear_sequence_parallel(self):
if self.DISTRIBUTED_BACKEND == "ucc":
self.skipTest("Backward's reduce_scatter fails. as of 2022/06/15")
self._column_parallel_linear_test_impl(False, False, False, True)
@unittest.skipIf(torch.cuda.device_count() < 2, "Sequence Parallel requires >= 2 GPUs")
def test_column_parallel_linear_exception(self):
with self.assertRaisesRegex(
RuntimeError,
"`async_tensor_model_parallel_allreduce` and `sequence_parallel_enabled` cannot be enabled at the same time.",
):
self._column_parallel_linear_test_impl(True, False, False, True)
def _column_parallel_linear_test_impl(
self,
async_tensor_model_parallel_allreduce: bool,
gradient_accumulation_fusion: bool,
accumulation_in_fp16: bool,
sequence_parallel_enabled: bool,
):
for tensor_model_parallel_world_size in range(1, self.world_size + 1):
if async_tensor_model_parallel_allreduce and sequence_parallel_enabled:
if tensor_model_parallel_world_size == 1:
continue
if self.world_size % tensor_model_parallel_world_size:
continue
msg = f"tensor_model_parallel_world_size: {tensor_model_parallel_world_size}"
parallel_state.initialize_model_parallel(
tensor_model_parallel_size_=tensor_model_parallel_world_size,
)
input_tensor_shape = self.tensor_shape
expected_output_shape = self.tensor_shape
# When sequence parallel, `gather_output` is disabled, i.e.,
# output of matmul isn't gathered in dimension of feature/hidden (last dim).
if sequence_parallel_enabled:
expected_output_shape[-1] //= tensor_model_parallel_world_size
# tensor's shape is [sequence length, batch size, hidden size]
set_random_seed(self.SEED)
linear = layers.ColumnParallelLinear(
self.HIDDEN_SIZE,
self.HIDDEN_SIZE,
bias=False,
keep_master_weight_for_test=True,
params_dtype=torch.float32,
use_cpu_initialization=True,
gather_output=not sequence_parallel_enabled,
no_async_tensor_model_parallel_allreduce=not async_tensor_model_parallel_allreduce,
gradient_accumulation_fusion=gradient_accumulation_fusion,
accumulation_in_fp16=accumulation_in_fp16,
sequence_parallel_enabled=sequence_parallel_enabled,
).cuda()
if accumulation_in_fp16:
linear = linear.half()
# Simulate the situation where fusion of weight grad calculation and gradient accumulation happens.
if gradient_accumulation_fusion:
with torch.no_grad():
linear.weight.main_grad = torch.zeros_like(linear.weight)
orig_input_tensor = torch.randn(input_tensor_shape, device="cuda", requires_grad=True)
if accumulation_in_fp16:
orig_input_tensor = orig_input_tensor.half()
if sequence_parallel_enabled:
input_tensor = list(
orig_input_tensor.chunk(tensor_model_parallel_world_size, dim=0)
)[parallel_state.get_tensor_model_parallel_rank()]
else:
input_tensor = orig_input_tensor
output, _ = linear(input_tensor)
# The order of dimension is expected to be (sequence, batch, hidden)
self.assertEqual(output.shape, expected_output_shape, msg=msg)
orig_loss_weight = torch.randn(input_tensor_shape, device="cuda")
if accumulation_in_fp16:
orig_loss_weight = orig_loss_weight.half()
if sequence_parallel_enabled:
loss_weight = orig_loss_weight.chunk(
tensor_model_parallel_world_size, dim=2,
)[parallel_state.get_tensor_model_parallel_rank()]
else:
loss_weight = orig_loss_weight
loss = torch.mul(output, loss_weight).sum()
loss.backward()
with torch.no_grad():
dldy = orig_loss_weight.clone()
x = orig_input_tensor.clone()
ref_linear = nn.Linear(
in_features=self.HIDDEN_SIZE,
out_features=self.HIDDEN_SIZE,
bias=False,
device="cuda",
)
if accumulation_in_fp16:
ref_linear = ref_linear.half()
# NOTE(mkozuki): `master_weight` is available because `keep_master_weight_for_test` is set.
ref_linear.weight.copy_(linear.master_weight)
x.requires_grad_()
expected_output = ref_linear(x)
if sequence_parallel_enabled:
chunk = expected_output.chunk(
tensor_model_parallel_world_size,
dim=2,
)[parallel_state.get_tensor_model_parallel_rank()]
self.assertEqual(
x=output,
y=chunk,
msg=msg,
)
else:
self.assertEqual(
x=output,
y=expected_output,
msg=msg,
)
expected_loss = torch.mul(expected_output, dldy).sum()
expected_loss.backward()
grad_attr_name = "main_grad" if gradient_accumulation_fusion else "grad"
# NOTE(mkozuki): Numerical errors seems to be enlarged by tensor model parallel.
if tensor_model_parallel_world_size == 1:
self.assertEqual(
x=getattr(linear.weight, grad_attr_name),
y=ref_linear.weight.grad.chunk(
chunks=tensor_model_parallel_world_size,
dim=0,
)[parallel_state.get_tensor_model_parallel_rank()],
msg=msg,
)
parallel_state.destroy_model_parallel()
class NcclTensorParallelLayerTest(TensorParallelLayerTestBase, NcclDistributedTestBase):
pass
class UccTensorParallelLayerTest(TensorParallelLayerTestBase, UccDistributedTestBase):
pass
if __name__ == "__main__":
common_utils.run_tests()
| GeneSplice-main | GeneSplice/apex/tests/L0/run_transformer/test_layers.py |
GeneSplice-main | GeneSplice/apex/tests/L0/run_transformer/__init__.py |
|
import logging
from typing import List, Optional
from torch.testing._internal import common_utils
logging.getLogger("torch").setLevel(logging.WARNING)
from apex.transformer import parallel_state
from apex.transformer.pipeline_parallel.utils import (
_reconfigure_microbatch_calculator,
get_micro_batch_size,
get_num_microbatches,
get_current_global_batch_size,
update_num_microbatches,
)
from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase
from apex.transformer.testing.distributed_test_base import UccDistributedTestBase
logging.getLogger("apex").setLevel(logging.WARNING)
class MicrobatchCalculatorTestBase:
GLOBAL_BATCH_SIZE: int = 1024
MICRO_BATCH_SIZE: int = 1
def _test(self, rampup_batch_size: Optional[List[int]]) -> None:
for data_parallel_size in range(1, self.world_size + 1):
expected_global_batch_size = self.GLOBAL_BATCH_SIZE
expected_micro_batch_size = self.MICRO_BATCH_SIZE
if rampup_batch_size:
expected_global_batch_size = rampup_batch_size[0]
num_consumed_samples = 0
step_of_global_batch_size = rampup_batch_size[1]
threshold = rampup_batch_size[2]
if data_parallel_size > 1 and data_parallel_size % 2 != 0:
continue
if self.world_size % data_parallel_size != 0:
continue
msg = f"data_parallel_size: {data_parallel_size}"
parallel_state.initialize_model_parallel(
tensor_model_parallel_size_=self.world_size // data_parallel_size,
pipeline_model_parallel_size_=1,
)
self.assertEqual(data_parallel_size, parallel_state.get_data_parallel_world_size(), msg=msg)
_reconfigure_microbatch_calculator(
self.rank,
rampup_batch_size,
self.GLOBAL_BATCH_SIZE,
self.MICRO_BATCH_SIZE,
data_parallel_size,
)
self.assertEqual(get_micro_batch_size(), expected_micro_batch_size, msg=msg)
self.assertEqual(get_num_microbatches(), expected_global_batch_size / expected_micro_batch_size / data_parallel_size, msg=msg)
current_global_batch_size = get_current_global_batch_size()
self.assertEqual(current_global_batch_size, expected_global_batch_size, msg=msg)
# Make sure `global_batch_size` equals to the final global batch size after
# certain number of updates.
if rampup_batch_size:
update_num_microbatches(current_global_batch_size)
for i in range(100):
current_global_batch_size = get_current_global_batch_size()
update_num_microbatches(current_global_batch_size)
current_global_batch_size = get_current_global_batch_size()
self.assertEqual(get_current_global_batch_size(), self.GLOBAL_BATCH_SIZE, msg=msg)
parallel_state.destroy_model_parallel()
def test_constant_microbatch_calculator(self):
self._test(rampup_batch_size=None)
def test_dynamic_microbatch_calculator(self):
self._test(rampup_batch_size=[256, 128, 500])
class NcclMicrobatchCalculatorTest(MicrobatchCalculatorTestBase, NcclDistributedTestBase): pass
class UccMicrobatchCalculatorTest(MicrobatchCalculatorTestBase, UccDistributedTestBase): pass
if __name__ == "__main__":
common_utils.run_tests()
| GeneSplice-main | GeneSplice/apex/tests/L0/run_transformer/test_microbatches.py |
import logging
import torch.testing
from torch.testing._internal import common_utils
logging.getLogger("torch").setLevel(logging.WARNING)
from apex.transformer import parallel_state
from apex.transformer.tensor_parallel import data as data_utils
from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase
from apex.transformer.testing.distributed_test_base import UccDistributedTestBase
logging.getLogger("torch").setLevel(logging.WARNING)
class BroadcastDataTestBase:
def test_broadcast_data(self):
tensor_model_parallel_world_size: int = self.world_size // (
1 + self.world_size > 1
)
parallel_state.initialize_model_parallel(
tensor_model_parallel_size_=tensor_model_parallel_world_size
)
target_key_size = {
"key1": [7, 11],
"key2": [8, 2, 1],
"key3": [13],
"key4": [5, 1, 2],
"key5": [5, 12],
}
keys = [k for k in target_key_size]
data = {}
data_t = {}
with torch.no_grad():
for key in target_key_size:
data[key] = torch.randint(0, 1000, size=target_key_size[key])
data_t[key] = data[key].clone()
# "key_x" is supposed to be ignored.
data["key_x"] = torch.rand(5)
data_t["key_x"] = data["key_x"].clone()
if parallel_state.get_tensor_model_parallel_rank() != 0:
data = None
data_utils._check_data_types(keys, data_t, torch.int64)
key_size, _, _ = data_utils._build_key_size_numel_dictionaries(keys, data)
for key in keys:
self.assertEqual(target_key_size[key], key_size[key])
broadcasted_data = data_utils.broadcast_data(keys, data, torch.int64)
for key in keys:
self.assertEqual(broadcasted_data[key], data_t[key].cuda())
parallel_state.destroy_model_parallel()
class NcclBroadcastDataTest(BroadcastDataTestBase, NcclDistributedTestBase): pass
class UccBroadcastDataTest(BroadcastDataTestBase, UccDistributedTestBase): pass
if __name__ == "__main__":
common_utils.run_tests()
| GeneSplice-main | GeneSplice/apex/tests/L0/run_transformer/test_data.py |
import torch
import unittest
from apex.transformer.testing import global_vars
from apex.transformer.testing.standalone_bert import bert_model_provider
from apex.transformer.pipeline_parallel.schedules.common import (
_get_params_for_weight_decay_optimization, build_model
)
from apex.transformer.pipeline_parallel.schedules import get_forward_backward_func
from apex.transformer.pipeline_parallel.utils import (
average_losses_across_data_parallel_group, unwrap_model, setup_microbatch_calculator
)
from apex.transformer.log_util import set_logging_level
from apex.transformer import tensor_parallel, parallel_state
from apex.transformer.enums import ModelType
from apex.transformer._ucc_util import HAS_UCC
from apex.transformer.testing.distributed_test_base import UccDistributedTestBase, NcclDistributedTestBase
import logging
from torch.testing._internal import common_utils
logging.getLogger("torch").setLevel(logging.WARNING)
logging.getLogger("apex").setLevel(logging.WARNING)
set_logging_level("WARNING")
class BertTestBase:
def _download_fancy_data(self):
text = """
An original sentence not subject to any license restrictions, copyright, or royalty payments. Nothing to see here. Commercial or non-commercial use. Research or non-research purposes. The quick brown fox jumps over the lazy dog. Lorem ipsum.
"""
text = text * 1024
encoded = text.encode("ascii", "replace")
ints = [int(encoded[i]) for i in range(len(encoded))]
return torch.tensor(ints)
# build a batch given sequence_len and batch size
def _generate_fancy_data_labels(self, sequence_len, batch_size):
temps = []
for i in range(batch_size):
if self.inds is None or self.data_idx >= len(self.inds):
# hack as use of RNG will fall out of sync due to pipelines being different
torch.manual_seed(self.MANUAL_SEED)
self.inds = torch.randperm(
self.effective_length, device="cuda")
self.masks = (
torch.rand(
len(self.inds) // batch_size + 1, batch_size, sequence_len, device="cuda"
)
>= self.MASK_PROB
).long()
self.MANUAL_SEED += 1
self.data_idx = 0
if self.rank == 0:
print("new epoch", len(self.inds))
print("my start", self.inds[0:5])
print("masks_checksum:", torch.sum(self.masks))
if self.EASY_MODE:
data_idx_ = self.data_idx % self.EASY_MODE_SIZ
else:
data_idx_ = self.data_idx
offset = self.inds[data_idx_] # * SEQUENCE_LEN
self.data_idx += 1
curr = self.fancy_data[offset: offset +
sequence_len].clone().detach()
temps.append(curr)
temp = torch.stack(temps, dim=0).cuda()
mask = self.masks[self.data_idx // batch_size]
mask_not = torch.logical_not(mask).long()
data = mask * temp + mask_not * 124
label = temp
if parallel_state.get_tensor_model_parallel_rank() == 0:
data_dict = {"text": data, "label": label, "mask_not": mask_not}
else:
data_dict = None
keys = ["text", "label", "mask_not"]
broadcasted_data = tensor_parallel.broadcast_data(
keys, data_dict, torch.long)
return (
broadcasted_data["text"].long(),
broadcasted_data["label"].long(),
broadcasted_data["mask_not"],
)
def _fwd_step_func(self, batch, model):
data, label, loss_mask = batch
y = model(data, torch.ones_like(data), lm_labels=label)
def loss_func(output_tensor):
output_tensor, _ = output_tensor
lm_loss_ = output_tensor.float()
lm_loss = torch.sum(lm_loss_.view(-1) *
loss_mask.reshape(-1)) / loss_mask.sum()
averaged_loss = average_losses_across_data_parallel_group([
lm_loss])
if self.data_idx >= 1536:
# NOTE (patwang): Loss cutoff might be excessively high but roughly one in five
# unlucky random seeds do cause loss to spike to just under 8.0
self.assertLess(averaged_loss, 8.0)
return lm_loss, {"avg": averaged_loss}
return y, loss_func
def _train(
self, model, optim, virtual_pipeline_model_parallel_size, pipeline_model_parallel_size, async_comm
):
args = global_vars.get_args()
sequence_len = args.seq_length
micro_batch_size = args.micro_batch_size
hidden_size = args.hidden_size
global_batch_size = args.global_batch_size
forward_backward_func = get_forward_backward_func(
virtual_pipeline_model_parallel_size, pipeline_model_parallel_size
)
tensor_shape = (sequence_len, micro_batch_size, hidden_size)
for _ in range(16):
batch = self._generate_fancy_data_labels(
sequence_len, global_batch_size)
optim.zero_grad()
forward_backward_func(
self._fwd_step_func,
batch,
model,
forward_only=False,
tensor_shape=tensor_shape,
async_comm=async_comm,
sequence_parallel_enabled=args.sequence_parallel,
)
# All-reduce layernorm parameters across model parallel nodes
# when sequence parallelism is used
if parallel_state.get_tensor_model_parallel_world_size() > 1 and args.sequence_parallel:
for model_module in model:
unwrapped_model = unwrap_model(model_module)
for param in unwrapped_model.parameters():
if getattr(param, 'sequence_parallel_enabled', False):
grad = param.grad
torch.distributed.all_reduce(
grad, group=parallel_state.get_tensor_model_parallel_group())
optim.step()
@unittest.skipUnless(torch.cuda.device_count() > 2, "requires at least 3 gpus")
def test_bert_without_interleaving(self):
self._test_bert(virtual_pipeline_model_parallel_size=None)
@unittest.skipUnless(torch.cuda.device_count() > 2, "requires at least 3 gpus")
def test_bert_with_interleaving(self):
if self.DISTRIBUTED_BACKEND == 'ucc':
self.skipTest('skip interleaving with ucc')
self._test_bert(virtual_pipeline_model_parallel_size=2)
def _test_bert(self, virtual_pipeline_model_parallel_size):
self.MANUAL_SEED = 42
self.inds = None
self.masks = None
self.data_idx = 0
self.MASK_PROB = 0.1
self.EASY_MODE = False
self.EASY_MODE_SIZ = 32
tensor_model_parallel_size = 2 if self.world_size % 2 == 0 and self.world_size > 4 else 1
pipeline_model_parallel_size = self.world_size // tensor_model_parallel_size
override_args = {
"micro_batch_size": 2,
"num_layers": 16,
"hidden_size": 256,
"num_attention_heads": 8,
"max_position_embeddings": 512,
"seq_length": 512,
"global_batch_size": 128,
"pipeline_model_parallel_size": pipeline_model_parallel_size,
"tensor_model_parallel_size": tensor_model_parallel_size,
"bert_binary_head": False,
"world_size": self.world_size,
"rank": self.rank,
}
global_vars.set_global_variables(override_args=override_args, ignore_unknown_args=True)
args = global_vars.get_args()
self.fancy_data = self._download_fancy_data()
self.effective_length = self.fancy_data.size(0) // args.seq_length
self.effective_length = self.fancy_data.size(0) - args.seq_length
if self.rank == 0:
print(
f'testing backend: {self.DISTRIBUTED_BACKEND} with virtual_pipeline_model_parallel_size: {virtual_pipeline_model_parallel_size}')
async_comm = not args.sequence_parallel and virtual_pipeline_model_parallel_size is None
self.data_idx = 0
args.padded_vocab_size = 128 # needed in standalone gpt
args.model_type = ModelType.encoder_or_decoder
setup_microbatch_calculator(
args.rank,
args.rampup_batch_size,
args.global_batch_size,
args.micro_batch_size,
args.data_parallel_size,
)
parallel_state.initialize_model_parallel(
args.tensor_model_parallel_size,
args.pipeline_model_parallel_size,
virtual_pipeline_model_parallel_size,
default_backend="nccl",
p2p_backend=self.DISTRIBUTED_BACKEND,
)
tensor_parallel.random.model_parallel_cuda_manual_seed(0)
model = build_model(
bert_model_provider,
wrap_with_ddp=parallel_state.get_data_parallel_world_size() > 1,
virtual_pipeline_model_parallel_size=virtual_pipeline_model_parallel_size,
cpu_offload=args.cpu_offload,
)
assert isinstance(model, list)
assert len(model) == (
1
if virtual_pipeline_model_parallel_size is None
else virtual_pipeline_model_parallel_size
)
_param_groups = _get_params_for_weight_decay_optimization(model)
optim = torch.optim.Adam(_param_groups)
self._train(
model,
optim,
virtual_pipeline_model_parallel_size,
args.pipeline_model_parallel_size,
async_comm,
)
torch.cuda.synchronize()
class NcclBertTest(BertTestBase, NcclDistributedTestBase):
@property
def world_size(self) -> int:
return min(torch.cuda.device_count(), 8)
@unittest.skipUnless(HAS_UCC, "requires pytorch to be built with native ucc")
class UccBertTest(BertTestBase, UccDistributedTestBase):
@property
def world_size(self) -> int:
return min(torch.cuda.device_count(), 8)
if __name__ == "__main__":
common_utils.run_tests()
| GeneSplice-main | GeneSplice/apex/tests/L0/run_transformer/test_bert_minimal.py |
import logging
import torch
from torch.testing._internal import common_utils
logging.getLogger("torch").setLevel(logging.WARNING)
from apex.transformer import parallel_state
from apex.transformer.tensor_parallel import utils
from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase
logging.getLogger("apex").setLevel(logging.WARNING)
class TransformerUtilsTest(NcclDistributedTestBase):
def test_split_tensor_along_last_dim(self):
for tensor_model_paralell_world_size in range(1, self.world_size + 1):
if self.world_size % tensor_model_paralell_world_size > 0:
continue
parallel_state.initialize_model_parallel(
tensor_model_parallel_size_=tensor_model_paralell_world_size
)
device = "cpu"
input_tensor = torch.randn((100, 100, 100), device=device)
splits = utils.split_tensor_along_last_dim(input_tensor, 10)
last_dim_shapes = torch.tensor(
[int(split.size()[-1]) for split in splits]
)
self.assertTrue(
torch.equal(last_dim_shapes, torch.full((10,), 10),),
msg=f"tensor_model_paralell_world_size: {tensor_model_paralell_world_size}",
)
parallel_state.destroy_model_parallel()
if __name__ == "__main__":
common_utils.run_tests()
| GeneSplice-main | GeneSplice/apex/tests/L0/run_transformer/test_transformer_utils.py |
import logging
import os
from torch.testing._internal import common_utils
logging.getLogger("torch").setLevel(logging.WARNING)
from apex.transformer import parallel_state
from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase
from apex.transformer.testing.distributed_test_base import UccDistributedTestBase
logging.getLogger("apex").setLevel(logging.WARNING)
os.environ["BACKEND"] = "NCCL"
DATA_PARALLEL_WORLD_SIZE: int = 1
def calc_expected_tensor_model_paralell_rank(
rank: int, tensor_model_parallel_world_size: int,
) -> int:
return rank % tensor_model_parallel_world_size
class ParallelStateTestBase:
def test_initialize_model_parallel(self) -> None:
self.assertFalse(parallel_state.model_parallel_is_initialized())
for tensor_model_parallel_world_size in range(1, self.world_size + 1):
msg = f"tensor_model_parallel_world_siz: {tensor_model_parallel_world_size}"
if self.world_size % tensor_model_parallel_world_size:
continue
pipeline_model_parallel_world_size = (
self.world_size // tensor_model_parallel_world_size
)
parallel_state.initialize_model_parallel(
tensor_model_parallel_size_=tensor_model_parallel_world_size,
pipeline_model_parallel_size_=pipeline_model_parallel_world_size,
)
self.assertEqual(
tensor_model_parallel_world_size,
parallel_state.get_tensor_model_parallel_world_size(),
msg=msg,
)
expected_tensor_model_parallel_rank = calc_expected_tensor_model_paralell_rank(
self.rank, tensor_model_parallel_world_size
)
self.assertEqual(
expected_tensor_model_parallel_rank,
parallel_state.get_tensor_model_parallel_rank(),
msg=msg,
)
expected_tensor_model_parallel_src_rank = (
self.rank // tensor_model_parallel_world_size
) * tensor_model_parallel_world_size
self.assertEqual(
expected_tensor_model_parallel_src_rank,
parallel_state.get_tensor_model_parallel_src_rank(),
msg=msg,
)
parallel_state.destroy_model_parallel()
self.assertFalse(parallel_state.model_parallel_is_initialized(), msg=msg)
def test_initialize_model_parallel_with_virtual_and_split(self) -> None:
if self.world_size < 4:
self.skipTest("requires >= 4 GPUs")
self.assertFalse(parallel_state.model_parallel_is_initialized())
tensor_model_parallel_world_size = 1 + int(self.world_size > 4)
pipeline_model_parallel_world_size = (
self.world_size // tensor_model_parallel_world_size
)
virtual_pipeline_model_parallel_world_size = 2
pipeline_model_parallel_split_rank = pipeline_model_parallel_world_size // 2
parallel_state.initialize_model_parallel(
tensor_model_parallel_size_=tensor_model_parallel_world_size,
pipeline_model_parallel_size_=pipeline_model_parallel_world_size,
virtual_pipeline_model_parallel_size_=virtual_pipeline_model_parallel_world_size,
pipeline_model_parallel_split_rank_=pipeline_model_parallel_split_rank,
)
self.assertEqual(
calc_expected_tensor_model_paralell_rank(
self.rank, tensor_model_parallel_world_size
),
parallel_state.get_tensor_model_parallel_rank(),
)
self.assertEqual(
pipeline_model_parallel_world_size,
parallel_state.get_pipeline_model_parallel_world_size(),
)
self.assertEqual(
virtual_pipeline_model_parallel_world_size,
parallel_state.get_virtual_pipeline_model_parallel_world_size(),
)
expected_pipeline_rank = (
self.rank - (self.rank % tensor_model_parallel_world_size)
) % pipeline_model_parallel_world_size
self.assertEqual(
expected_pipeline_rank, parallel_state.get_pipeline_model_parallel_rank(),
)
# virtual pipeline model parallel rank is lazily set, i.e., right after the call of
# `initialize_model_parallel`, it's set to 0.
self.assertEqual(
0, parallel_state.get_virtual_pipeline_model_parallel_rank(),
)
self.assertEqual(
pipeline_model_parallel_split_rank,
parallel_state.get_pipeline_model_parallel_split_rank(),
)
fake_split_rank = 77
parallel_state.set_pipeline_model_parallel_split_rank(fake_split_rank)
self.assertEqual(
fake_split_rank, parallel_state.get_pipeline_model_parallel_split_rank()
)
# relative position embedding groups check
self.assertEqual(
expected_pipeline_rank < pipeline_model_parallel_split_rank,
parallel_state.is_rank_in_encoder_relative_position_embedding_group(),
)
self.assertEqual(
expected_pipeline_rank >= pipeline_model_parallel_split_rank,
parallel_state.is_rank_in_decoder_relative_position_embedding_group(),
)
parallel_state.destroy_model_parallel()
def test_initialize_model_parallel_decoder_only(self) -> None:
"""Initialize model parallelism for decoder-only Transformers like GPT-3"""
self.assertFalse(parallel_state.model_parallel_is_initialized())
for tensor_model_parallel_world_size in range(1, self.world_size + 1):
msg = f"tensor_model_parallel_world_size: {tensor_model_parallel_world_size}"
if self.world_size % tensor_model_parallel_world_size:
continue
pipeline_model_parallel_world_size = (
self.world_size // tensor_model_parallel_world_size
)
parallel_state.initialize_model_parallel(
tensor_model_parallel_size_=tensor_model_parallel_world_size,
pipeline_model_parallel_size_=pipeline_model_parallel_world_size,
pipeline_model_parallel_split_rank_=0,
)
self.assertEqual(
tensor_model_parallel_world_size,
parallel_state.get_tensor_model_parallel_world_size(),
msg=msg,
)
expected_tensor_model_parallel_rank = calc_expected_tensor_model_paralell_rank(
self.rank, tensor_model_parallel_world_size
)
self.assertEqual(
expected_tensor_model_parallel_rank,
parallel_state.get_tensor_model_parallel_rank(),
msg=msg,
)
expected_tensor_model_parallel_src_rank = (
self.rank // tensor_model_parallel_world_size
) * tensor_model_parallel_world_size
self.assertEqual(
expected_tensor_model_parallel_src_rank,
parallel_state.get_tensor_model_parallel_src_rank(),
msg=msg,
)
parallel_state.destroy_model_parallel()
self.assertFalse(parallel_state.model_parallel_is_initialized(), msg=msg)
class NcclParallelStateTest(ParallelStateTestBase, NcclDistributedTestBase): pass
class UccParallelStateTest(ParallelStateTestBase, UccDistributedTestBase): pass
if __name__ == "__main__":
common_utils.run_tests()
| GeneSplice-main | GeneSplice/apex/tests/L0/run_transformer/test_parallel_state.py |
import contextlib
import logging
import itertools
import os
from datetime import datetime
from packaging.version import parse, Version
import re
from typing import Optional, Tuple, List
import unittest
import torch
from torch.testing._internal import common_utils
from apex._autocast_utils import _get_autocast_dtypes
from apex.transformer import parallel_state
from apex.transformer.enums import ModelType
from apex.transformer.pipeline_parallel import utils as pp_utils
from apex.transformer.pipeline_parallel.schedules.common import (
FwdStepFunc,
build_model,
_get_params_for_weight_decay_optimization,
)
from apex.transformer.pipeline_parallel.schedules.fwd_bwd_no_pipelining import (
forward_backward_no_pipelining,
)
from apex.transformer.pipeline_parallel.schedules.fwd_bwd_pipelining_with_interleaving import (
_forward_backward_pipelining_with_interleaving,
)
from apex.transformer.pipeline_parallel.schedules.fwd_bwd_pipelining_without_interleaving import (
forward_backward_pipelining_without_interleaving,
)
from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase
from apex.transformer.testing.distributed_test_base import UccDistributedTestBase
from apex.transformer.testing.distributed_test_base import HAS_TORCH_UCC_COMPAT_NVIDIA_DRIVER
from apex.transformer.testing import commons as testing_utils
from apex.transformer._ucc_util import HAS_UCC
logging.getLogger("torch").setLevel(logging.WARNING)
logging.getLogger("apex").setLevel(logging.WARNING)
weight_coeff = 1024
# Guard for https://github.com/pytorch/pytorch/pull/82450
def get_nvidia_pytorch_version():
ver = os.getenv("NVIDIA_PYTORCH_VERSION", "22.08")
if "master" in ver:
ver = datetime.today().strftime("%y.%m")
elif "update_for_" in ver:
ver = ver.replace("update_for_", "")
return ver
CAN_SKIP_SYNC_AFTER_BATCH_ISEND_IRECV = False
ngc_container_2209, pytorch_113 = Version("22.09"), Version("1.13")
if parse(torch.__version__) >= pytorch_113:
CAN_SKIP_SYNC_AFTER_BATCH_ISEND_IRECV = True
elif parse(get_nvidia_pytorch_version()) >= ngc_container_2209:
CAN_SKIP_SYNC_AFTER_BATCH_ISEND_IRECV = True
else:
CAN_SKIP_SYNC_AFTER_BATCH_ISEND_IRECV = False
def get_init_weights_func(offset: int = 0):
@torch.no_grad()
def init_weights(m):
rank = parallel_state.get_pipeline_model_parallel_rank()
if isinstance(m, torch.nn.Linear):
m.weight.fill_((rank + offset + 1.0) / weight_coeff)
m.bias.fill_(1.0)
return init_weights
def get_dtype_for_comparison():
if(torch.cuda.get_device_capability() >= (8, 0)):
return torch.float64
return torch.float32
def get_target_loss_and_model(global_batch_shape: tuple, hidden_size: int, total_layers: int) -> Tuple[torch.Tensor, List[torch.Tensor]]:
model = []
dtype = get_dtype_for_comparison()
data = torch.ones(global_batch_shape, dtype=dtype)
for i in range(total_layers):
w = torch.ones((hidden_size, hidden_size), dtype=dtype) * (i + 1.0) / weight_coeff
b = torch.ones(hidden_size, dtype=dtype)
w.requires_grad_()
b.requires_grad_()
# don't need to care about transpose semantics as all values are the same
data = torch.matmul(w, data) + b
model.append([w, b])
loss = data.sum() / global_batch_shape[0]
loss.backward()
return loss, model
def _get_default_world_sizes_model_parallel_world_size(pipeline_model_parallel_world_size: Optional[int] = None
) -> Tuple[int, int, int]:
# TODO: revisit if we can fold this into the class for skip logic / avoid duplication
# of world size computation
world_size = torch.cuda.device_count()
tensor_model_parallel_world_size = 1
data_parallel_size = 1 + (world_size >= 8 and world_size % 2 == 0)
if pipeline_model_parallel_world_size is None:
pipeline_model_parallel_world_size = world_size // (tensor_model_parallel_world_size * data_parallel_size)
else:
data_parallel_size = world_size // (tensor_model_parallel_world_size * pipeline_model_parallel_world_size)
return tensor_model_parallel_world_size, data_parallel_size, pipeline_model_parallel_world_size
class PipelineParallelForwardBackwardTestBase:
GLOBAL_BATCH_SIZE = 16
MICRO_BATCH_SIZE = 2
HIDDEN_SIZE = 32
deallocate_options = (True, False)
# If :obj:`None`, (torch.float32, torch.float16, torch.bfloat16) are dtype options on Ampere.
# You can limit the options by overriding the following `dtypes`.
dtypes = None
def _forward_backward_test_impl(
self,
forward_only: bool,
fwd_bwd_func: FwdStepFunc,
pipeline_model_parallel_world_size: Optional[int],
virtual_pipeline_model_parallel_size: Optional[int],
async_comm: bool = False,
*,
default_backend: Optional[str] = None,
p2p_backend: Optional[str] = None,
sync_batch_comm: bool = True,
) -> None:
if fwd_bwd_func == _forward_backward_pipelining_with_interleaving:
self.assertIsNotNone(virtual_pipeline_model_parallel_size)
self.assertGreater(virtual_pipeline_model_parallel_size, 1)
dtype_options = self.dtypes or [torch.float32, torch.double] + _get_autocast_dtypes()
for dtype, deallocate_pipeline_outputs in itertools.product(
dtype_options, self.deallocate_options,
):
grad_scaler = (
torch.cuda.amp.GradScaler(init_scale=4.0)
if dtype == torch.half
else None
)
(tensor_model_parallel_world_size,
data_parallel_size,
pipeline_model_parallel_world_size) = _get_default_world_sizes_model_parallel_world_size(pipeline_model_parallel_world_size)
parallel_state.initialize_model_parallel(
tensor_model_parallel_size_=tensor_model_parallel_world_size,
pipeline_model_parallel_size_=pipeline_model_parallel_world_size,
virtual_pipeline_model_parallel_size_=virtual_pipeline_model_parallel_size,
default_backend=default_backend,
p2p_backend=p2p_backend,
)
pp_utils._reconfigure_microbatch_calculator(
rank=parallel_state.get_tensor_model_parallel_rank(),
rampup_batch_size=None,
global_batch_size=self.GLOBAL_BATCH_SIZE,
micro_batch_size=self.MICRO_BATCH_SIZE,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
global_batch_shape = (
self.GLOBAL_BATCH_SIZE
// parallel_state.get_data_parallel_world_size(),
self.HIDDEN_SIZE,
self.HIDDEN_SIZE,
)
batch = None
if parallel_state.is_pipeline_first_stage():
batch = (torch.ones(global_batch_shape, dtype=dtype).cuda(), )
model = build_model(
testing_utils.model_provider_func,
# Use DDP only when it's better to have
wrap_with_ddp=data_parallel_size > 1,
virtual_pipeline_model_parallel_size=virtual_pipeline_model_parallel_size,
hidden_size=self.HIDDEN_SIZE,
)
offset = pipeline_model_parallel_world_size if virtual_pipeline_model_parallel_size is not None else 0
for idx, model_module in enumerate(model):
model_module = model_module.to(dtype)
model_module.apply(get_init_weights_func(idx*offset))
_param_groups = _get_params_for_weight_decay_optimization(model)
optimizer = torch.optim.Adam(_param_groups, lr=1e-3)
pp_utils.update_num_microbatches(0)
loss = fwd_bwd_func(
testing_utils.fwd_step_func,
batch,
model,
forward_only=forward_only,
# `tensor_shape` is the shape of micro batch.
tensor_shape=(
self.MICRO_BATCH_SIZE,
self.HIDDEN_SIZE,
self.HIDDEN_SIZE,
),
dtype=dtype,
async_comm=async_comm,
grad_scaler=grad_scaler,
deallocate_pipeline_output=deallocate_pipeline_outputs,
sync_batch_comm=sync_batch_comm,
)
if dtype == get_dtype_for_comparison():
torch.cuda.synchronize()
hidden_size = self.HIDDEN_SIZE
microbatch_size = self.MICRO_BATCH_SIZE
total_layers = pipeline_model_parallel_world_size
if virtual_pipeline_model_parallel_size is not None:
total_layers *= virtual_pipeline_model_parallel_size
target_loss, target_model = get_target_loss_and_model(global_batch_shape, hidden_size, total_layers)
for loss_item in loss:
x = loss_item['avg']
self.assertEqual(x.item() / microbatch_size, target_loss.item())
if not forward_only:
for vm_id, model_module in enumerate(model):
params = list(model_module.parameters())
rank = params[0].get_device()
offset = pipeline_model_parallel_world_size
param_id = rank // data_parallel_size + vm_id * offset
target_params = target_model[param_id]
self.assertEqual(params[0].cpu(), target_params[0])
self.assertEqual(params[1].cpu(), target_params[1])
self.assertEqual(params[0].grad.cpu() / microbatch_size, target_params[0].grad)
self.assertEqual(params[1].grad.cpu() / microbatch_size, target_params[1].grad)
if not forward_only:
for m in model:
for p in m.parameters():
self.assertIsNotNone(p.grad)
optimizer.step()
optimizer.zero_grad(set_to_none=True)
parallel_state.destroy_model_parallel()
def test_learning_no_pipelining(self):
self._forward_backward_test_impl(False, forward_backward_no_pipelining, 1, None)
def test_inference_no_pipelining(self):
self._forward_backward_test_impl(True, forward_backward_no_pipelining, 1, None)
def test_learning_pipelining_without_interleaving(self, sync_batch_comm: bool = True):
self._forward_backward_test_impl(
False, forward_backward_pipelining_without_interleaving, None, None, sync_batch_comm=sync_batch_comm,
)
def test_inference_pipelining_without_interleaving(self, sync_batch_comm: bool = True):
self._forward_backward_test_impl(
True, forward_backward_pipelining_without_interleaving, None, None, sync_batch_comm=sync_batch_comm,
)
def test_learning_async_pipelining_without_interleaving(self, sync_batch_comm: bool = True):
self._forward_backward_test_impl(
False, forward_backward_pipelining_without_interleaving, None, None, async_comm=True,
sync_batch_comm=sync_batch_comm,
)
def test_inference_async_pipelining_without_interleaving(self, sync_batch_comm: bool = True):
self._forward_backward_test_impl(
True, forward_backward_pipelining_without_interleaving, None, None, async_comm=True,
sync_batch_comm=sync_batch_comm,
)
# fails on native ucc: times out
@unittest.skipUnless(_get_default_world_sizes_model_parallel_world_size()[-1] > 2, "Interleaved schedule requires pipeline_model_parallel_world_size > 2")
def test_learning_pipelining_with_interleaving(self, sync_batch_comm: bool = True):
self._forward_backward_test_impl(
False, _forward_backward_pipelining_with_interleaving, None, virtual_pipeline_model_parallel_size=2,
sync_batch_comm=sync_batch_comm,
)
# fails on native ucc: times out
@unittest.skipUnless(_get_default_world_sizes_model_parallel_world_size()[-1] > 2, "Interleaved schedule requires pipeline_model_parallel_world_size > 2")
def test_inference_pipelining_with_interleaving(self, sync_batch_comm: bool = True):
self._forward_backward_test_impl(
True, _forward_backward_pipelining_with_interleaving, None, virtual_pipeline_model_parallel_size=2,
sync_batch_comm=sync_batch_comm,
)
# fails on native ucc: times out
@unittest.skipUnless(_get_default_world_sizes_model_parallel_world_size()[-1] > 2, "Interleaved schedule requires pipeline_model_parallel_world_size > 2")
def test_learning_async_pipelining_with_interleaving(self, sync_batch_comm: bool = True):
self._forward_backward_test_impl(
False, _forward_backward_pipelining_with_interleaving, None, virtual_pipeline_model_parallel_size=2, async_comm=True,
sync_batch_comm=sync_batch_comm,
)
# fails on native ucc: times out
@unittest.skipUnless(_get_default_world_sizes_model_parallel_world_size()[-1] > 2, "Interleaved schedule requires pipeline_model_parallel_world_size > 2")
def test_inference_async_pipelining_with_interleaving(self, sync_batch_comm: bool = True):
self._forward_backward_test_impl(
True, _forward_backward_pipelining_with_interleaving, None, virtual_pipeline_model_parallel_size=2, async_comm=True,
sync_batch_comm=sync_batch_comm,
)
class NcclPipelineParallelForwardBackwardTest(NcclDistributedTestBase, PipelineParallelForwardBackwardTestBase):
@property
def world_size(self) -> int:
return min(torch.cuda.device_count(), 8)
def _run_hybrid_distributed_backend(self, forward_only: bool) -> None:
self._forward_backward_test_impl(
forward_only, forward_backward_pipelining_without_interleaving, None, None,
default_backend="nccl", p2p_backend="ucc",
)
@unittest.skipUnless(HAS_TORCH_UCC_COMPAT_NVIDIA_DRIVER, "Needs driver >= 470.42.01")
def _test_hybrid_backends(self, forward_only: bool) -> None:
if HAS_UCC:
self._run_hybrid_distributed_backend(forward_only)
else:
with self.assertRaisesRegex(
ImportError,
re.escape("UCC backend requires pytorch source build with UCC installed and enabled"),
):
self._run_hybrid_distributed_backend(forward_only)
def test_learning_pipelining_without_interleaving_ucc_for_p2p(self):
self._test_hybrid_backends(False)
def test_inference_pipelining_without_interleaving_ucc_for_p2p(self):
self._test_hybrid_backends(True)
@unittest.skipUnless(CAN_SKIP_SYNC_AFTER_BATCH_ISEND_IRECV, "Requires https://github.com/pytorch/pytorch/pull/82450")
def test_learning_pipelining_without_interleaving_skyp_sync_after_batch_isend_irecv(self):
self.test_learning_pipelining_without_interleaving(sync_batch_comm=False)
@unittest.skipUnless(CAN_SKIP_SYNC_AFTER_BATCH_ISEND_IRECV, "Requires https://github.com/pytorch/pytorch/pull/82450")
def test_inference_pipelining_without_interleaving_skip_sync_after_batch_isend_irecv(self):
self.test_inference_pipelining_without_interleaving(sync_batch_comm=False)
@unittest.skipUnless(CAN_SKIP_SYNC_AFTER_BATCH_ISEND_IRECV, "Requires https://github.com/pytorch/pytorch/pull/82450")
def test_learning_async_pipelining_without_interleaving_skip_sync_after_batch_isend_irecv(self):
self.test_learning_async_pipelining_without_interleaving(sync_batch_comm=False)
@unittest.skipUnless(CAN_SKIP_SYNC_AFTER_BATCH_ISEND_IRECV, "Requires https://github.com/pytorch/pytorch/pull/82450")
def test_inference_async_pipelining_without_interleaving_skip_sync_after_batch_isend_irecv(self):
self.test_inference_async_pipelining_without_interleaving(sync_batch_comm=False)
@unittest.skipUnless(CAN_SKIP_SYNC_AFTER_BATCH_ISEND_IRECV, "Requires https://github.com/pytorch/pytorch/pull/82450")
def test_learning_pipelining_with_interleaving_skip_sync_after_batch_isend_irecv(self):
self.test_learning_pipelining_with_interleaving(sync_batch_comm=False)
@unittest.skipUnless(CAN_SKIP_SYNC_AFTER_BATCH_ISEND_IRECV, "Requires https://github.com/pytorch/pytorch/pull/82450")
def test_inference_pipelining_with_interleaving_skip_sync_after_batch_isend_irecv(self):
self.test_inference_pipelining_with_interleaving(sync_batch_comm=False)
@unittest.skipUnless(CAN_SKIP_SYNC_AFTER_BATCH_ISEND_IRECV, "Requires https://github.com/pytorch/pytorch/pull/82450")
def test_learning_async_pipelining_with_interleaving_skip_sync_after_batch_isend_irecv(self):
self.test_learning_async_pipelining_with_interleaving(sync_batch_comm=False)
@unittest.skipUnless(CAN_SKIP_SYNC_AFTER_BATCH_ISEND_IRECV, "Requires https://github.com/pytorch/pytorch/pull/82450")
def test_inference_async_pipelining_with_interleaving_skip_sync_after_batch_isend_irecv(self):
self.test_inference_async_pipelining_with_interleaving(sync_batch_comm=False)
# n.b.(mkozuki): pipeline parallel w/o interleaving with UCX_TLS=tcp,sm fails.
class UccPipelineParallelForwardBackwardTest(UccDistributedTestBase, PipelineParallelForwardBackwardTestBase):
@property
def world_size(self) -> int:
return min(torch.cuda.device_count(), 8)
deallocate_options = (False,)
dtypes = (torch.float32,)
# Sanity checking the functionality of `forward_backward_pipelining_without_interleaving` with
# `model_type=ModelType.encoder_and_decoder` which is used for pipeline training of transformer
# models such as T5.
@unittest.skipIf(torch.cuda.device_count() < 4, "Requires >= 4 GPUs")
class NcclPipelineParallelWithToyParallelMLP(NcclDistributedTestBase):
GLOBAL_BATCH_SIZE: int = 16
MICRO_BATCH_SIZE: int = 2
HIDDEN_SIZE: int = 64
# TODO(mkozuki): Change `DECODER_SEQUENCE_LENGTH` to a value different from `ENCODER_SEQUENCE_LENGTH`.
# To test forward_backward_pipelining_without_interleaving with `model_type=ModelType.encoder_and_decoder`,
# `decoder_seq_length` is necessary and ideally should be different from `encoder_sequence_length`
# but my laziness let me use the same value.
# Note that you may have to either update `MyModel` def or define another `MyModel`.
# to support different `DECODER_SEQUENCE_LENGTH`.
ENCODER_SEQUENCE_LENGTH: int = 32
DECODER_SEQUENCE_LENGTH: int = 32
@property
def world_size(self) -> int:
return min(torch.cuda.device_count(), 8)
# TODO(mkozuki): Set `tensor_model_parallel>1` for encoder_and_decoder as well if there's enough GPUs
# in order to let `sequence_parallel_enabled` have an effect on tensor shape logic.
def _forward_backward_test_impl(
self,
*,
forward_only: bool,
sequence_parallel_enabled: bool,
model_type: ModelType,
dtype: torch.dtype = torch.float32,
) -> None:
# N.B.(mkozuki): It might be better to set `tensor_model_parallel_size` to >1
# if `self.world_size > 5`. Otherwise, `pipeline_model_parallel_split_rank`
# can be 1, which can be too far real usecase.
tensor_model_parallel_size = 1 + int(self.world_size >= 4)
pipeline_model_parallel_world_size = self.world_size // tensor_model_parallel_size
if model_type == ModelType.encoder_and_decoder:
pipeline_model_parallel_split_rank = pipeline_model_parallel_world_size // 2
else:
pipeline_model_parallel_split_rank = None
parallel_state.initialize_model_parallel(
tensor_model_parallel_size_=tensor_model_parallel_size,
pipeline_model_parallel_size_=pipeline_model_parallel_world_size,
virtual_pipeline_model_parallel_size_=None,
pipeline_model_parallel_split_rank_=pipeline_model_parallel_split_rank,
)
testing_utils.set_random_seed(567)
pp_utils._reconfigure_microbatch_calculator(
rank=parallel_state.get_tensor_model_parallel_rank(),
rampup_batch_size=None,
global_batch_size=self.GLOBAL_BATCH_SIZE,
micro_batch_size=self.MICRO_BATCH_SIZE,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
# TODO(mkozuki): Call `build_model` with `model_type`.
model = build_model(
testing_utils.mlp_provider_func,
wrap_with_ddp=False,
virtual_pipeline_model_parallel_size=None,
hidden_size=self.HIDDEN_SIZE,
sequence_parallel_enabled=sequence_parallel_enabled,
)
model = [m.to(dtype=dtype) for m in model]
if parallel_state.is_pipeline_first_stage():
batch: Tuple[torch.Tensor] = (
torch.ones(
(self.GLOBAL_BATCH_SIZE, self.ENCODER_SEQUENCE_LENGTH, self.HIDDEN_SIZE),
dtype=dtype,
device="cuda",
),
)
else:
batch = None
forward_backward_pipelining_without_interleaving(
forward_step_func=testing_utils.ToyParallelMLPFwdBwdStepFunc(
sequence_parallel_enabled=sequence_parallel_enabled,
),
batch=batch,
model=model,
forward_only=forward_only,
tensor_shape=(
self.ENCODER_SEQUENCE_LENGTH,
self.MICRO_BATCH_SIZE,
self.HIDDEN_SIZE,
),
model_type=model_type,
decoder_sequence_length=self.DECODER_SEQUENCE_LENGTH,
async_comm=False,
grad_scaler=None,
deallocate_pipeline_outputs=False,
dtype=dtype,
sequence_parallel_enabled=sequence_parallel_enabled,
)
def test_pipelining_without_interleaving_encoder_and_decoder(self) -> None:
self._forward_backward_test_impl(forward_only=False, sequence_parallel_enabled=False, model_type=ModelType.encoder_and_decoder)
def test_pipelining_without_interleaving_inferenc_encoder_and_decoder(self) -> None:
self._forward_backward_test_impl(forward_only=True, sequence_parallel_enabled=False, model_type=ModelType.encoder_and_decoder)
def test_pipelining_without_interleaving_sequence_paralle_encoder_and_decoder(self) -> None:
self._forward_backward_test_impl(forward_only=False, sequence_parallel_enabled=True, model_type=ModelType.encoder_and_decoder)
def test_pipelining_without_interleaving_inference_sequence_paralle_encoder_and_decoder(self) -> None:
self._forward_backward_test_impl(forward_only=True, sequence_parallel_enabled=True, model_type=ModelType.encoder_and_decoder)
def test_pipelining_without_interleaving_encoder_or_decoder(self) -> None:
self._forward_backward_test_impl(forward_only=False, sequence_parallel_enabled=False, model_type=ModelType.encoder_or_decoder)
def test_pipelining_without_interleaving_sequence_parallel_encoder_or_decoder(self) -> None:
self._forward_backward_test_impl(forward_only=False, sequence_parallel_enabled=True, model_type=ModelType.encoder_or_decoder)
def test_pipelining_without_interleaving_sequence_parallel_encoder_or_decoder_half(self) -> None:
self._forward_backward_test_impl(forward_only=False, sequence_parallel_enabled=True, model_type=ModelType.encoder_or_decoder, dtype=torch.half)
class NcclPipelineParallelWithCustomSyncContextHandler(NcclDistributedTestBase):
GLOBAL_BATCH_SIZE = 32
MICRO_BATCH_SIZE = 1
HIDDEN_SIZE = 1
@property
def world_size(self) -> int:
return min(torch.cuda.device_count(), 8)
@unittest.skipIf(torch.cuda.device_count() < 2 or torch.cuda.device_count() % 2 != 0, "Requires >= 2 GPUs")
def test_pipelining_without_interleaving_with_custom_sync_context_handler(self) -> None:
# Parallel configuration
world_size = torch.cuda.device_count()
tensor_model_parallel_world_size = 1
data_parallel_size = 2 if world_size > 2 else 1
pipeline_model_parallel_world_size = world_size // data_parallel_size
# Initialize pipeline parallelism
parallel_state.initialize_model_parallel(
tensor_model_parallel_size_=tensor_model_parallel_world_size,
pipeline_model_parallel_size_=pipeline_model_parallel_world_size,
)
pp_utils._reconfigure_microbatch_calculator(
rank=parallel_state.get_tensor_model_parallel_rank(),
rampup_batch_size=None,
global_batch_size=self.GLOBAL_BATCH_SIZE,
micro_batch_size=self.MICRO_BATCH_SIZE,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
pp_utils.update_num_microbatches(0)
# Construct synthetic data
dtype = get_dtype_for_comparison()
hidden_size = self.HIDDEN_SIZE
microbatch_size = self.MICRO_BATCH_SIZE
global_batch_shape = (
self.GLOBAL_BATCH_SIZE
// parallel_state.get_data_parallel_world_size(),
hidden_size,
hidden_size,
)
batch = None
if parallel_state.is_pipeline_first_stage():
batch = (torch.ones(global_batch_shape, dtype=dtype).cuda(), )
# Construct model
model = build_model(
testing_utils.model_provider_func,
wrap_with_ddp=True,
hidden_size=hidden_size,
)[0]
model = model.to(dtype)
model.module.apply(get_init_weights_func(0))
# Construct context that destroys all grads on exit
has_entered_grad_sync_context = False
has_exited_grad_sync_context = False
has_called_grad_sync_func = False
@contextlib.contextmanager
def custom_grad_sync_context():
try:
nonlocal has_entered_grad_sync_context
has_entered_grad_sync_context = True
yield
finally:
nonlocal has_exited_grad_sync_context
has_exited_grad_sync_context = True
for param in model.parameters():
param.grad = None
def custom_grad_sync_func():
nonlocal has_called_grad_sync_func
has_called_grad_sync_func = True
# Training step with pipeline parallelism
loss = forward_backward_pipelining_without_interleaving(
testing_utils.fwd_step_func,
batch,
model,
forward_only=False,
tensor_shape=(microbatch_size, hidden_size, hidden_size),
dtype=dtype,
async_comm=False,
grad_scaler=None,
deallocate_pipeline_outputs=False,
sequence_parallel_enabled=False,
custom_sync_context_handler=custom_grad_sync_context,
custom_grad_sync_func=custom_grad_sync_func,
)
torch.cuda.synchronize()
# Check if model has initialized gradients
has_any_grads = any(param.grad is not None for param in model.parameters())
has_all_grads = all(param.grad is not None for param in model.parameters())
# Check context behavior
self.assertTrue(has_entered_grad_sync_context, 'Has not entered custom sync context')
self.assertTrue(has_exited_grad_sync_context, 'Has not exited custom sync context')
self.assertEqual(
has_any_grads,
has_all_grads,
'Expected gradients to all be uninitialized or all be initialized',
)
self.assertEqual(
has_all_grads,
parallel_state.is_pipeline_first_stage(),
'Expected gradients to be initialized only in first pipeline stage',
)
# Clean up
parallel_state.destroy_model_parallel()
@unittest.skipIf(torch.cuda.device_count() < 4 or torch.cuda.device_count() % 2 != 0, "Requires >= 4 GPUs")
def test_pipelining_with_interleaving_with_custom_sync_context_handler(self) -> None:
# Parallel configuration
world_size = torch.cuda.device_count()
tensor_model_parallel_world_size = 1
data_parallel_size = 2 if world_size > 4 else 1
pipeline_model_parallel_world_size = world_size // data_parallel_size
virtual_pipeline_model_parallel_size = 2
# Initialize pipeline parallelism
parallel_state.initialize_model_parallel(
tensor_model_parallel_size_=tensor_model_parallel_world_size,
pipeline_model_parallel_size_=pipeline_model_parallel_world_size,
virtual_pipeline_model_parallel_size_=virtual_pipeline_model_parallel_size,
)
pp_utils._reconfigure_microbatch_calculator(
rank=parallel_state.get_tensor_model_parallel_rank(),
rampup_batch_size=None,
global_batch_size=self.GLOBAL_BATCH_SIZE,
micro_batch_size=self.MICRO_BATCH_SIZE,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
pp_utils.update_num_microbatches(0)
# Construct synthetic data
dtype = get_dtype_for_comparison()
hidden_size = self.HIDDEN_SIZE
microbatch_size = self.MICRO_BATCH_SIZE
global_batch_shape = (
self.GLOBAL_BATCH_SIZE
// parallel_state.get_data_parallel_world_size(),
hidden_size,
hidden_size,
)
batch = None
if parallel_state.is_pipeline_first_stage():
batch = (torch.ones(global_batch_shape, dtype=dtype).cuda(), )
# Construct model
model = build_model(
testing_utils.model_provider_func,
wrap_with_ddp=True,
virtual_pipeline_model_parallel_size=virtual_pipeline_model_parallel_size,
hidden_size=hidden_size,
)
for module in model:
module.to(dtype)
module.module.apply(get_init_weights_func(0))
# Construct context that keeps track whenever entered/exited
grad_sync_context_enter_count = 0
grad_sync_context_exit_count = 0
@contextlib.contextmanager
def custom_grad_sync_context():
try:
nonlocal grad_sync_context_enter_count
grad_sync_context_enter_count += 1
yield
finally:
nonlocal grad_sync_context_exit_count
grad_sync_context_exit_count += 1
for module in model:
for param in module.parameters():
param.grad = None
# Training step with pipeline parallelism
loss = _forward_backward_pipelining_with_interleaving(
testing_utils.fwd_step_func,
batch,
model,
forward_only=False,
tensor_shape=(microbatch_size, hidden_size, hidden_size),
dtype=dtype,
async_comm=False,
grad_scaler=None,
deallocate_pipeline_outputs=False,
sequence_parallel_enabled=False,
custom_sync_context_handler=custom_grad_sync_context,
)
torch.cuda.synchronize()
# Check context behavior
self.assertTrue(
grad_sync_context_enter_count > 0,
'Has not entered custom sync context',
)
self.assertEqual(
grad_sync_context_enter_count,
grad_sync_context_exit_count,
'Has not entered and exited custom sync context '
'the same number of times',
)
self.assertEqual(
grad_sync_context_exit_count,
virtual_pipeline_model_parallel_size + 1,
'Expected to exit custom sync context once per model chunk '
'and once at the function end',
)
# Clean up
parallel_state.destroy_model_parallel()
if __name__ == "__main__":
common_utils.run_tests()
| GeneSplice-main | GeneSplice/apex/tests/L0/run_transformer/test_pipeline_parallel_fwd_bwd.py |
import logging
import torch
from torch.testing._internal import common_utils
from apex.transformer import parallel_state
from apex.transformer.tensor_parallel import mappings
from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase
from apex.transformer.testing.distributed_test_base import UccDistributedTestBase
logging.getLogger("torch").setLevel(logging.WARNING)
logging.getLogger("apex").setLevel(logging.WARNING)
class MappingTestBase:
def test_reduce(self):
for tensor_model_paralell_world_size in range(1, self.world_size + 1):
if self.world_size % tensor_model_paralell_world_size > 0:
continue
parallel_state.initialize_model_parallel(
tensor_model_parallel_size_=tensor_model_paralell_world_size
)
t = torch.full((10, 10, 10, 10), 50, device=f"cuda:{self.rank}")
expected = torch.full(
(10, 10, 10, 10),
50 * tensor_model_paralell_world_size,
device=f"cuda:{self.rank}",
)
self.assertTrue(
torch.equal(mappings._reduce(t), expected),
msg=f"tensor_model_paralell_world_size: {tensor_model_paralell_world_size}",
)
parallel_state.destroy_model_parallel()
def test_split(self):
for tensor_model_paralell_world_size in range(1, self.world_size + 1):
if self.world_size % tensor_model_paralell_world_size > 0:
continue
parallel_state.initialize_model_parallel(
tensor_model_parallel_size_=tensor_model_paralell_world_size
)
tensors = [
torch.randn(10, 1)
for _ in range(tensor_model_paralell_world_size)
]
x = torch.cat(tensors, 1)
out = mappings._split_along_last_dim(x)
self.assertTrue(
torch.equal(
out, tensors[parallel_state.get_tensor_model_parallel_rank()]
),
msg=f"tensor_model_paralell_world_size: {tensor_model_paralell_world_size}"
)
parallel_state.destroy_model_parallel()
def test_gather(self):
for tensor_model_paralell_world_size in range(1, self.world_size + 1):
if self.world_size % tensor_model_paralell_world_size > 0:
continue
parallel_state.initialize_model_parallel(
tensor_model_parallel_size_=tensor_model_paralell_world_size
)
device = f"cuda:{self.rank}"
gathered = mappings._gather_along_last_dim(
torch.tensor(
[parallel_state.get_tensor_model_parallel_rank()], device=device
)
)
expected = torch.tensor(
[rank for rank in range(tensor_model_paralell_world_size)],
device=device,
)
self.assertTrue(
torch.equal(gathered, expected),
msg=f"tensor_model_paralell_world_size: {tensor_model_paralell_world_size}",
)
parallel_state.destroy_model_parallel()
class NcclMappingTest(MappingTestBase, NcclDistributedTestBase): pass
class UccMappingTest(MappingTestBase, UccDistributedTestBase): pass
if __name__ == "__main__":
common_utils.run_tests()
| GeneSplice-main | GeneSplice/apex/tests/L0/run_transformer/test_mapping.py |
import logging
import torch
from torch.testing._internal import common_utils
logging.getLogger("torch").setLevel(logging.WARNING)
from apex.transformer import parallel_state
from apex.transformer import tensor_parallel
from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase
from apex.transformer.testing.distributed_test_base import UccDistributedTestBase
logging.getLogger("apex").setLevel(logging.WARNING)
class TransformerRandomTestBase:
def test_set_cuda_rng_state(self):
for tensor_model_parallel_world_size in range(1, self.world_size + 1):
if self.world_size % tensor_model_parallel_world_size:
continue
msg = f"tensor_model_parallel_world_size: {tensor_model_parallel_world_size}"
parallel_state.initialize_model_parallel(
tensor_model_parallel_size_=tensor_model_parallel_world_size
)
size, seed = 123, 1234
torch.cuda.manual_seed(seed)
tensor = torch.cuda.FloatTensor(size)
rng_state = torch.cuda.get_rng_state()
rng_state_clone = rng_state.clone()
for _ in range(5):
torch.randn(size, out=tensor)
result_1 = tensor.clone()
self.assertEqual(rng_state.sub(rng_state_clone).max(), 0, msg=msg)
self.assertGreater(
torch.cuda.get_rng_state().sub(rng_state_clone).max(), 0,
msg=msg,
)
new_rng_state = torch.cuda.get_rng_state()
self.assertGreater(new_rng_state.sub(rng_state).max(), 0, msg=msg)
tensor_parallel.random._set_cuda_rng_state(rng_state)
for _ in range(5):
torch.randn(size, out=tensor)
tensor_parallel.random._set_cuda_rng_state(rng_state)
for _ in range(5):
torch.randn(size, out=tensor)
result_2 = tensor.clone()
self.assertEqual(result_2, result_1, msg=msg)
self.assertEqual(rng_state.sub(rng_state_clone).max(), 0, msg=msg)
parallel_state.destroy_model_parallel()
def test_cuda_rng_tracker(self):
for tensor_model_parallel_world_size in range(1, self.world_size + 1):
if self.world_size % tensor_model_parallel_world_size:
continue
msg = f"tensor_model_parallel_world_size: {tensor_model_parallel_world_size}"
parallel_state.initialize_model_parallel(
tensor_model_parallel_size_=tensor_model_parallel_world_size
)
seed_1, seed_2, size = 1234, 4321, [12, 21]
tensor = torch.cuda.FloatTensor(size)
torch.cuda.manual_seed(seed_1)
torch.randn(size, out=tensor)
target_11 = tensor.clone()
torch.randn(size, out=tensor)
target_12 = tensor.clone()
torch.cuda.manual_seed(seed_2)
torch.randn(size, out=tensor)
targt_21 = tensor.clone()
torch.randn(size, out=tensor)
target_22 = tensor.clone()
torch.cuda.manual_seed(seed_1)
tensor_parallel.random.get_cuda_rng_tracker().add("test", seed_2)
torch.randn(size, out=tensor)
result_11 = tensor.clone()
with tensor_parallel.random.get_cuda_rng_tracker().fork("test"):
torch.randn(size, out=tensor)
result_21 = tensor.clone()
torch.randn(size, out=tensor)
result_12 = tensor.clone()
with tensor_parallel.random.get_cuda_rng_tracker().fork("test"):
torch.randn(size, out=tensor)
result_22 = tensor.clone()
self.assertEqual(target_11, result_11, msg=msg)
self.assertEqual(target_12, result_12, msg=msg)
self.assertEqual(targt_21, result_21, msg=msg)
self.assertEqual(target_22, result_22, msg=msg)
self.assertNotEqual(result_11, result_21, msg=msg)
self.assertNotEqual(result_21, result_22, msg=msg)
tensor_parallel.random.get_cuda_rng_tracker().reset()
parallel_state.destroy_model_parallel()
class NcclTransformerRandomTest(TransformerRandomTestBase, NcclDistributedTestBase): pass
class UccTransformerRandomTest(TransformerRandomTestBase, UccDistributedTestBase): pass
if __name__ == "__main__":
common_utils.run_tests()
| GeneSplice-main | GeneSplice/apex/tests/L0/run_transformer/test_random.py |
import unittest
import functools as ft
import itertools as it
from apex import amp
from apex.amp import _amp_state
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import Parameter
from utils import common_init, HALF, FLOAT,\
ALWAYS_HALF, ALWAYS_FLOAT, MATCH_INPUT
class MyModel(torch.nn.Module):
def __init__(self, unique):
super(MyModel, self).__init__()
self.weight0 = Parameter(unique +
torch.arange(2, device='cuda', dtype=torch.float32))
self.weight1 = Parameter(1. + unique + torch.arange(2, device='cuda', dtype=torch.float16))
@staticmethod
def ops(input, weight0, weight1):
return ((input*(weight0.float()))*(weight1.float())).sum()
def forward(self, input):
return self.ops(input, self.weight0, self.weight1)
# Abandon all hope, ye who enter here.
# This is hands down the ugliest code I have ever written, but it succeeds in testing
# multiple models/optimizers/losses fairly thoroughly. Many of the different test cases
# require slightly divergent code in a way that seems near-impossible to genericize into a simple
# cross product or nested loops.
class TestMultipleModelsOptimizersLosses(unittest.TestCase):
def setUp(self):
self.x = torch.ones((2), device='cuda', dtype=torch.float32)
common_init(self)
def tearDown(self):
pass
def test_2models2losses1optimizer(self):
model0 = MyModel(1)
model1 = MyModel(2)
optimizer = torch.optim.SGD([{'params' : model0.parameters(), 'lr' : 0.25},
{'params' : model1.parameters(), 'lr' : 0.5}],
momentum=0.125)
reference_grads = []
for i in range(2):
optimizer.zero_grad()
loss0 = model0(self.x)
loss1 = model1(self.x)
loss0.backward()
loss1.backward()
reference_grads.append([param.grad.data.clone() for param in model0.parameters()] +
[param.grad.data.clone() for param in model1.parameters()])
optimizer.step()
final_params = [param.data.clone() for param in model0.parameters()] + \
[param.data.clone() for param in model1.parameters()]
for opt_level in ("O0", "O1", "O2", "O3"):
for how_to_zero in ("none", "model", "optimizer"):
for use_multiple_loss_scalers in (True, False):
if opt_level == "O1" or opt_level == "O2":
inject_inf_iters = (-1, 0, 1)
else:
inject_inf_iters = (-1,)
for inject_inf in inject_inf_iters:
if inject_inf >= 0:
inject_inf_locs = ("fp16", "fp32")
which_backwards = (0, 1)
else:
inject_inf_locs = ("fdsa",)
which_backwards = (None,)
for inject_inf_loc in inject_inf_locs:
for which_backward in which_backwards:
if use_multiple_loss_scalers:
num_losses = 2
loss_ids = [0, 1]
else:
num_losses = 1
loss_ids = [0, 0]
if inject_inf >= 0:
iters = 3
else:
iters = 2
model0 = MyModel(1)
model1 = MyModel(2)
models = [model0, model1]
optimizer = torch.optim.SGD([{'params' : model0.parameters(), 'lr' : 0.25},
{'params' : model1.parameters(), 'lr' : 0.5}],
momentum=0.125)
_amp_state.allow_incoming_model_not_fp32 = True
[model0, model1], optimizer = amp.initialize(
[model0, model1],
optimizer,
opt_level=opt_level,
verbosity=0,
cast_model_type=False,
num_losses=num_losses)
_amp_state.allow_incoming_model_not_fp32 = False
_amp_state.loss_scalers[0]._loss_scale = 4.0
if use_multiple_loss_scalers:
_amp_state.loss_scalers[1]._loss_scale = 16.0
unskipped = 0
for i in range(iters):
if how_to_zero == "none":
for model in models:
for param in model.parameters():
param.grad = None
elif how_to_zero == "model":
for model in models:
model.zero_grad()
else:
optimizer.zero_grad()
loss0 = model0(self.x)
loss1 = model1(self.x)
with amp.scale_loss(loss0, optimizer, loss_id=loss_ids[0]) as scaled_loss:
scaled_loss.backward()
if i == inject_inf and which_backward == 0:
if inject_inf_loc == "fp32":
model0.weight0.grad[0] = float('inf')
elif inject_inf_loc == "fp16":
model0.weight1.grad[0] = float('inf')
with amp.scale_loss(loss1, optimizer, loss_id=loss_ids[1]) as scaled_loss:
scaled_loss.backward()
if i == inject_inf and which_backward == 1:
if inject_inf_loc == "fp32":
model1.weight0.grad[0] = float('inf')
elif inject_inf_loc == "fp16":
model1.weight1.grad[0] = float('inf')
if i != inject_inf:
for param, reference_grad in zip(amp.master_params(optimizer),
reference_grads[unskipped]):
self.assertTrue(torch.allclose(param.grad.float(), reference_grad.float()))
unskipped += 1
optimizer.step()
model_params = [p for p in model0.parameters()] + [p for p in model1.parameters()]
for model, master, reference in zip(
model_params,
amp.master_params(optimizer),
final_params):
self.assertTrue(torch.allclose(model, reference))
self.assertTrue(torch.allclose(model, master.to(model.dtype)))
if opt_level == "O1":
_amp_state.handle._deactivate()
def test_3models2losses1optimizer(self):
model0 = MyModel(1)
model1 = MyModel(2)
model2 = MyModel(3)
optimizer = torch.optim.SGD([{'params' : model0.parameters(), 'lr' : 0.25},
{'params' : model1.parameters(), 'lr' : 0.5},
{'params' : model2.parameters(), 'lr' : 0.125}],
momentum=0.125)
reference_grads = []
for i in range(2):
optimizer.zero_grad()
loss0 = model0(self.x) + model2(self.x)
loss1 = model1(self.x) + model2(self.x)
loss0.backward()
loss1.backward()
reference_grads.append([param.grad.data.clone() for param in model0.parameters()] +
[param.grad.data.clone() for param in model1.parameters()] +
[param.grad.data.clone() for param in model2.parameters()])
optimizer.step()
final_params = [param.data.clone() for param in model0.parameters()] + \
[param.data.clone() for param in model1.parameters()] + \
[param.data.clone() for param in model2.parameters()]
for opt_level in ("O0", "O1", "O2", "O3"):
for how_to_zero in ("none", "model", "optimizer"):
for use_multiple_loss_scalers in (True, False):
if opt_level == "O1" or opt_level == "O2":
inject_inf_iters = (-1, 0, 1)
else:
inject_inf_iters = (-1,)
for inject_inf in inject_inf_iters:
if inject_inf >= 0:
inject_inf_locs = ("fp16", "fp32")
which_backwards = (0, 1)
else:
inject_inf_locs = ("fdsa",)
which_backwards = (None,)
for inject_inf_loc in inject_inf_locs:
for which_backward in which_backwards:
if use_multiple_loss_scalers:
num_losses = 2
loss_ids = [0, 1]
else:
num_losses = 1
loss_ids = [0, 0]
if inject_inf >= 0:
iters = 3
if which_backward == 0:
which_models = (0, 2)
elif which_backward == 1:
which_models = (1, 2)
else:
iters = 2
which_models = (None,)
for which_model in which_models:
model0 = MyModel(1)
model1 = MyModel(2)
model2 = MyModel(3)
models = [model0, model1, model2]
optimizer = torch.optim.SGD([{'params' : model0.parameters(), 'lr' : 0.25},
{'params' : model1.parameters(), 'lr' : 0.5},
{'params' : model2.parameters(), 'lr' : 0.125}],
momentum=0.125)
_amp_state.allow_incoming_model_not_fp32 = True
[model0, model1, model2], optimizer = amp.initialize(
[model0, model1, model2],
optimizer,
opt_level=opt_level,
verbosity=0,
cast_model_type=False,
num_losses=num_losses)
_amp_state.allow_incoming_model_not_fp32 = False
_amp_state.loss_scalers[0]._loss_scale = 4.0
if use_multiple_loss_scalers:
_amp_state.loss_scalers[1]._loss_scale = 16.0
unskipped = 0
for i in range(iters):
if how_to_zero == "none":
for model in models:
for param in model.parameters():
param.grad = None
elif how_to_zero == "model":
for model in models:
model.zero_grad()
else:
optimizer.zero_grad()
# print("opt_level {} i {} inject_inf {} which_backward {} inject_inf_loc {} which_model {} use_multiple_loss_scalers {}".format(opt_level, i, inject_inf, which_backward, inject_inf_loc, which_model, use_multiple_loss_scalers))
loss0 = model0(self.x) + model2(self.x)
loss1 = model1(self.x) + model2(self.x)
with amp.scale_loss(loss0, optimizer, loss_id=loss_ids[0]) as scaled_loss:
scaled_loss.backward()
if i == inject_inf and which_backward == 0:
if which_model == 0:
inj_model = model0
elif which_model == 2:
inj_model = model2
else:
raise RuntimeError(which_model + " invalid for loss 0")
if inject_inf_loc == "fp32":
inj_model.weight0.grad[0] = float('inf')
elif inject_inf_loc == "fp16":
inj_model.weight1.grad[0] = float('inf')
with amp.scale_loss(loss1, optimizer, loss_id=loss_ids[1]) as scaled_loss:
scaled_loss.backward()
if i == inject_inf and which_backward == 1:
if which_model == 1:
inj_model = model1
elif which_model == 2:
inj_model = model2
else:
raise RuntimeError(which_model + " invalid for loss 1 ")
if inject_inf_loc == "fp32":
inj_model.weight0.grad[0] = float('inf')
elif inject_inf_loc == "fp16":
inj_model.weight1.grad[0] = float('inf')
if i != inject_inf:
for param, reference_grad in zip(amp.master_params(optimizer),
reference_grads[unskipped]):
self.assertTrue(torch.allclose(param.grad.float(), reference_grad.float()))
unskipped += 1
optimizer.step()
model_params = [p for p in model0.parameters()] + \
[p for p in model1.parameters()] + \
[p for p in model2.parameters()]
for model, master, reference in zip(
model_params,
amp.master_params(optimizer),
final_params):
self.assertTrue(torch.allclose(model, reference))
self.assertTrue(torch.allclose(model, master.to(model.dtype)))
if opt_level == "O1":
_amp_state.handle._deactivate()
def test_2models2losses2optimizers(self):
model0 = MyModel(1)
model1 = MyModel(2)
optimizer0 = torch.optim.SGD([{'params' : model0.parameters(), 'lr' : 0.25}],
momentum=0.125)
optimizer1 = torch.optim.SGD([{'params' : model1.parameters(), 'lr' : 0.5}],
momentum=0.25)
# Don't do it like this: reference_grads = [[]]*5
# because then it creates a list of 5 references to the same "[]" and appending
# to any of them effectively makes you append to all of them, which multiplies
# the resulting size of reference_grads by 5x and needless to say makes the test fail.
reference_grads = [[], [], [], [], []]
final_params = [None, None, None, None, None]
for i in range(2):
optimizer0.zero_grad()
optimizer1.zero_grad()
loss0 = model0(self.x)
loss1 = model1(self.x)
loss0.backward()
loss1.backward()
reference_grads[0].append([param.grad.data.clone() for param in model0.parameters()] +
[param.grad.data.clone() for param in model1.parameters()])
optimizer0.step()
optimizer1.step()
final_params[0] = [param.data.clone() for param in model0.parameters()] + \
[param.data.clone() for param in model1.parameters()]
def what_got_skipped(which_iter, which_backward):
if which_iter == 0 and which_backward == 0:
return 1
if which_iter == 0 and which_backward == 1:
return 2
if which_iter == 1 and which_backward == 0:
return 3
if which_iter == 1 and which_backward == 1:
return 4
return 0
for which_iter in (0,1):
for which_backward in (0,1):
model0 = MyModel(1)
model1 = MyModel(2)
optimizer0 = torch.optim.SGD([{'params' : model0.parameters(), 'lr' : 0.25}],
momentum=0.125)
optimizer1 = torch.optim.SGD([{'params' : model1.parameters(), 'lr' : 0.5}],
momentum=0.25)
for i in range(3):
optimizer0.zero_grad()
optimizer1.zero_grad()
loss0 = model0(self.x)
loss1 = model1(self.x)
loss0.backward()
loss1.backward()
if i != which_iter:
reference_grads[what_got_skipped(which_iter, which_backward)].append(
[param.grad.data.clone() for param in model0.parameters()] +
[param.grad.data.clone() for param in model1.parameters()])
if i == which_iter:
if which_backward == 0:
optimizer1.step()
else:
optimizer0.step()
else:
optimizer0.step()
optimizer1.step()
final_params[what_got_skipped(which_iter, which_backward)] = \
[param.data.clone() for param in model0.parameters()] + \
[param.data.clone() for param in model1.parameters()]
for opt_level in ("O0", "O1", "O2", "O3"):
for how_to_zero in ("none", "model", "optimizer"):
for use_multiple_loss_scalers in (True, False):
if opt_level == "O1" or opt_level == "O2":
inject_inf_iters = (-1, 0, 1)
else:
inject_inf_iters = (-1,)
for inject_inf in inject_inf_iters:
if inject_inf >= 0:
inject_inf_locs = ("fp16", "fp32")
which_backwards = (0, 1)
else:
inject_inf_locs = ("fdsa",)
which_backwards = (None,)
for inject_inf_loc in inject_inf_locs:
for which_backward in which_backwards:
if use_multiple_loss_scalers:
num_losses = 2
loss_ids = [0, 1]
else:
num_losses = 1
loss_ids = [0, 0]
if inject_inf >= 0:
iters = 3
else:
iters = 2
model0 = MyModel(1)
model1 = MyModel(2)
models = [model0, model1]
optimizer0 = torch.optim.SGD([{'params' : model0.parameters(), 'lr' : 0.25}],
momentum=0.125)
optimizer1 = torch.optim.SGD([{'params' : model1.parameters(), 'lr' : 0.5}],
momentum=0.25)
_amp_state.allow_incoming_model_not_fp32 = True
[model0, model1], [optimizer0, optimizer1] = amp.initialize(
[model0, model1],
[optimizer0, optimizer1],
opt_level=opt_level,
verbosity=0,
cast_model_type=False,
num_losses=num_losses)
_amp_state.allow_incoming_model_not_fp32 = False
_amp_state.loss_scalers[0]._loss_scale = 4.0
if use_multiple_loss_scalers:
_amp_state.loss_scalers[1]._loss_scale = 16.0
unskipped = 0
for i in range(iters):
if how_to_zero == "none":
for model in models:
for param in model.parameters():
param.grad = None
elif how_to_zero == "model":
for model in models:
model.zero_grad()
else:
optimizer0.zero_grad()
optimizer1.zero_grad()
loss0 = model0(self.x)
loss1 = model1(self.x)
with amp.scale_loss(loss0, optimizer0, loss_id=loss_ids[0]) as scaled_loss:
scaled_loss.backward()
if i == inject_inf and which_backward == 0:
if inject_inf_loc == "fp32":
model0.weight0.grad[0] = float('inf')
elif inject_inf_loc == "fp16":
model0.weight1.grad[0] = float('inf')
with amp.scale_loss(loss1, optimizer1, loss_id=loss_ids[1]) as scaled_loss:
scaled_loss.backward()
if i == inject_inf and which_backward == 1:
if inject_inf_loc == "fp32":
model1.weight0.grad[0] = float('inf')
elif inject_inf_loc == "fp16":
model1.weight1.grad[0] = float('inf')
# print("opt_level {} i {} inject_inf {} which_backward {} inject_inf_loc {} use_multiple_loss_scalers {}".format(opt_level, i, inject_inf, which_backward, inject_inf_loc, use_multiple_loss_scalers))
if i != inject_inf:
master_params = list(amp.master_params(optimizer0)) + \
list(amp.master_params(optimizer1))
for param, reference_grad in zip(master_params,
reference_grads[what_got_skipped(inject_inf, which_backward)][unskipped]):
self.assertTrue(torch.allclose(param.grad.float(), reference_grad.float()))
unskipped += 1
optimizer0.step()
optimizer1.step()
model_params = [p for p in model0.parameters()] + [p for p in model1.parameters()]
master_params = [p for p in amp.master_params(optimizer0)] + \
[p for p in amp.master_params(optimizer1)]
for model, master, reference in zip(
model_params,
master_params,
final_params[what_got_skipped(inject_inf, which_backward)]):
self.assertTrue(torch.allclose(model, reference))
self.assertTrue(torch.allclose(model, master.to(model.dtype)))
if opt_level == "O1":
_amp_state.handle._deactivate()
def test_3models2losses2optimizers(self):
model0 = MyModel(1)
model1 = MyModel(2)
model2 = MyModel(3)
optimizer0 = torch.optim.SGD([{'params' : model0.parameters(), 'lr' : 0.25},
{'params' : model1.parameters(), 'lr' : 1.0}],
momentum=0.5)
optimizer1 = torch.optim.SGD([{'params' : model2.parameters(), 'lr' : 0.5}],
momentum=0.25)
# Again, can't do this: reference_grads = [[]]*9
reference_grads = [[], [], [], [], [], [], [], [], []]
final_params = [None, None, None, None, None, None, None, None, None]
for i in range(2):
optimizer0.zero_grad()
optimizer1.zero_grad()
loss0 = model0(self.x) + model1(self.x)
loss1 = model2(self.x) + model1(self.x)
loss0.backward()
loss1.backward()
reference_grads[0].append([param.grad.data.clone() for param in model0.parameters()] +
[param.grad.data.clone() for param in model1.parameters()])
optimizer0.step()
optimizer1.step()
final_params[0] = \
[param.data.clone() for param in model0.parameters()] + \
[param.data.clone() for param in model1.parameters()] + \
[param.data.clone() for param in model2.parameters()]
def what_got_skipped(which_iter, which_backward, which_model):
if which_iter == 0:
if which_backward == 0:
if which_model == 0:
return 1
if which_model == 1:
return 2
if which_backward == 1:
if which_model == 2:
return 3
if which_model == 1:
return 4
if which_iter == 1:
if which_backward == 0:
if which_model == 0:
return 5
if which_model == 1:
return 6
if which_backward == 1:
if which_model == 2:
return 7
if which_model == 1:
return 8
return 0
for which_iter in (0,1):
for which_backward in (0,1):
if which_backward == 0:
which_models = (0,1)
if which_backward == 1:
which_models = (2,1)
for which_model in which_models:
model0 = MyModel(1)
model1 = MyModel(2)
model2 = MyModel(3)
optimizer0 = torch.optim.SGD([{'params' : model0.parameters(), 'lr' : 0.25},
{'params' : model1.parameters(), 'lr' : 1.0}],
momentum=0.5)
optimizer1 = torch.optim.SGD([{'params' : model2.parameters(), 'lr' : 0.5}],
momentum=0.25)
for i in range(3):
optimizer0.zero_grad()
optimizer1.zero_grad()
loss0 = model0(self.x) + model1(self.x)
loss1 = model2(self.x) + model1(self.x)
loss0.backward()
loss1.backward()
if i != which_iter:
reference_grads[what_got_skipped(which_iter,
which_backward, which_model)].append(
[param.grad.data.clone() for param in model0.parameters()] +
[param.grad.data.clone() for param in model1.parameters()])
if i == which_iter:
if which_backward == 0:
# if which_model == 0:
optimizer1.step()
# if which_model == 1:
# optimizer1.step()
if which_backward == 1:
# if which_model == 2:
# optimizer0.step()
# if which_model == 1:
continue
else:
optimizer0.step()
optimizer1.step()
final_params[what_got_skipped(which_iter, which_backward, which_model)] = \
[param.data.clone() for param in model0.parameters()] + \
[param.data.clone() for param in model1.parameters()] + \
[param.data.clone() for param in model2.parameters()]
for opt_level in ("O0", "O1", "O2", "O3"):
for how_to_zero in ("none", "model", "optimizer"):
for use_multiple_loss_scalers in (True, False):
if opt_level == "O1" or opt_level == "O2":
inject_inf_iters = (-1, 0, 1)
else:
inject_inf_iters = (-1,)
for inject_inf in inject_inf_iters:
if inject_inf >= 0:
inject_inf_locs = ("fp16", "fp32")
which_backwards = (0, 1)
else:
inject_inf_locs = ("fdsa",)
which_backwards = (None,)
for inject_inf_loc in inject_inf_locs:
for which_backward in which_backwards:
if use_multiple_loss_scalers:
num_losses = 2
loss_ids = [0, 1]
else:
num_losses = 1
loss_ids = [0, 0]
if inject_inf >= 0:
iters = 3
if which_backward == 0:
which_models = (0, 1)
elif which_backward == 1:
which_models = (2, 1)
else:
iters = 2
which_models = (None,)
for which_model in which_models:
model0 = MyModel(1)
model1 = MyModel(2)
model2 = MyModel(3)
models = [model0, model1, model2]
optimizer0 = torch.optim.SGD([{'params' : model0.parameters(), 'lr' : 0.25},
{'params' : model1.parameters(), 'lr' : 1.0}],
momentum=0.5)
optimizer1 = torch.optim.SGD([{'params' : model2.parameters(), 'lr' : 0.5}],
momentum=0.25)
_amp_state.allow_incoming_model_not_fp32 = True
[model0, model1, model2], [optimizer0, optimizer1] = amp.initialize(
[model0, model1, model2],
[optimizer0, optimizer1],
opt_level=opt_level,
verbosity=0,
cast_model_type=False,
num_losses=num_losses)
_amp_state.allow_incoming_model_not_fp32 = False
_amp_state.loss_scalers[0]._loss_scale = 4.0
if use_multiple_loss_scalers:
_amp_state.loss_scalers[1]._loss_scale = 16.0
unskipped = 0
for i in range(iters):
if how_to_zero == "none":
for model in models:
for param in model.parameters():
param.grad = None
elif how_to_zero == "model":
for model in models:
model.zero_grad()
else:
optimizer0.zero_grad()
optimizer1.zero_grad()
loss0 = model0(self.x) + model1(self.x)
loss1 = model2(self.x) + model1(self.x)
with amp.scale_loss(loss0, optimizer0, loss_id=loss_ids[0]) as scaled_loss:
scaled_loss.backward()
if i == inject_inf and which_backward == 0:
if which_model == 0:
inj_model = model0
elif which_model == 1:
inj_model = model1
else:
raise RuntimeError(which_model + " invalid for loss 0")
if inject_inf_loc == "fp32":
inj_model.weight0.grad[0] = float('inf')
elif inject_inf_loc == "fp16":
inj_model.weight1.grad[0] = float('inf')
with amp.scale_loss(loss1, [optimizer0, optimizer1], loss_id=loss_ids[1]) as scaled_loss:
scaled_loss.backward()
if i == inject_inf and which_backward == 1:
if which_model == 2:
inj_model = model2
elif which_model == 1:
inj_model = model1
else:
raise RuntimeError(which_model + " invalid for loss 1 ")
if inject_inf_loc == "fp32":
inj_model.weight0.grad[0] = float('inf')
elif inject_inf_loc == "fp16":
inj_model.weight1.grad[0] = float('inf')
if i != inject_inf:
master_params = list(amp.master_params(optimizer0)) + \
list(amp.master_params(optimizer1))
for param, reference_grad in zip(master_params,
reference_grads[what_got_skipped(inject_inf,
which_backward, which_model)][unskipped]):
self.assertTrue(torch.allclose(param.grad.float(), reference_grad.float()))
unskipped += 1
optimizer0.step()
optimizer1.step()
model_params = [p for p in model0.parameters()] + \
[p for p in model1.parameters()] + \
[p for p in model2.parameters()]
master_params = [p for p in amp.master_params(optimizer0)] + \
[p for p in amp.master_params(optimizer1)]
# print("opt_level {} i {} inject_inf {} which_backward {} inject_inf_loc {} use_multiple_loss_scalers {} which_model {}".format(opt_level, i, inject_inf, which_backward, inject_inf_loc, use_multiple_loss_scalers, which_model))
for model, master, reference in zip(
model_params,
master_params,
final_params[what_got_skipped(inject_inf, which_backward, which_model)]):
self.assertTrue(torch.allclose(model, reference))
self.assertTrue(torch.allclose(model, master.to(model.dtype)))
if opt_level == "O1":
_amp_state.handle._deactivate()
if __name__ == '__main__':
unittest.main()
| GeneSplice-main | GeneSplice/apex/tests/L0/run_amp/test_multiple_models_optimizers_losses.py |
import unittest
import functools as ft
import itertools as it
from apex import amp
import torch
from torch import nn
import torch.nn.functional as F
from utils import common_init, HALF, FLOAT,\
ALWAYS_HALF, ALWAYS_FLOAT, MATCH_INPUT
try:
import amp_C
from amp_C import multi_tensor_l2norm
from apex.multi_tensor_apply import MultiTensorApply
disabled = False
except ImportError as err:
print("amp_C fused kernels unavailable, disabling TestMultiTensorApply. ImportError was ", err)
disabled = True
class TestMultiTensorL2Norm(unittest.TestCase):
def setUp(self):
common_init(self)
self.val = 4.0
self.overflow_buf = torch.cuda.IntTensor(1).zero_()
def tearDown(self):
pass
# The tensor creation here is written for convenience, not speed.
def l2norm(self, sizea, sizeb, applier, repeat_tensors, in_type, per_tensor):
self.overflow_buf.zero_()
a = torch.cuda.FloatTensor(sizea).fill_(self.val)
b = torch.cuda.FloatTensor(sizeb).fill_(self.val)
in_list = []
for i in range(repeat_tensors):
in_list += [a.clone().to(in_type), b.clone().to(in_type)]
if per_tensor:
norm, norm_per_tensor = applier(multi_tensor_l2norm, self.overflow_buf, [in_list], True)
normab = torch.cat((a.norm().view(1), b.norm().view(1)))
norm_per_tensor = norm_per_tensor.view(-1, 2)
else:
norm, _ = applier(multi_tensor_l2norm, self.overflow_buf, [in_list], True)
reference = torch.cuda.FloatTensor((sizea + sizeb)*repeat_tensors).fill_(self.val).norm()
self.assertTrue(torch.allclose(norm, reference))
if per_tensor:
self.assertTrue(torch.allclose(norm_per_tensor, normab))
self.assertTrue(self.overflow_buf.item() == 0)
@unittest.skipIf(disabled, "amp_C is unavailable")
def test_fuzz(self):
input_size_pairs = (
(7777*77, 555*555),
(777, 555),
(555, 2048*32+1),
(2048*32+1, 555),
(555, 2048*32),
(2048*32, 555),
(33333, 555),
(555, 33333))
appliers = (
MultiTensorApply(2048*32),
MultiTensorApply(333),
MultiTensorApply(33333))
repeat_tensors = (
1,
55)
for sizea, sizeb in input_size_pairs:
for applier in appliers:
for repeat in repeat_tensors:
for in_type in (torch.float32, torch.float16):
for per_tensor in (False, True):
self.l2norm(sizea, sizeb, applier, repeat, in_type, per_tensor)
if __name__ == '__main__':
unittest.main()
| GeneSplice-main | GeneSplice/apex/tests/L0/run_amp/test_multi_tensor_l2norm.py |
import unittest
import functools as ft
import itertools as it
from apex import amp
from apex.amp import _amp_state
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import Parameter
from utils import common_init, HALF, FLOAT,\
ALWAYS_HALF, ALWAYS_FLOAT, MATCH_INPUT
try:
import amp_C
disabled = False
from apex.optimizers import FusedSGD as FusedSGD
except ImportError as err:
print("amp_C fused kernels unavailable, disabling TestMultiTensorApply. ImportError was ", err)
disabled = True
class MyModel(torch.nn.Module):
def __init__(self, unique):
super(MyModel, self).__init__()
self.weight0 = Parameter(unique +
torch.arange(2, device='cuda', dtype=torch.float32))
self.weight1 = Parameter(1. + unique + torch.arange(2, device='cuda', dtype=torch.float16))
@staticmethod
def ops(input, weight0, weight1):
return ((input*(weight0.float()))*(weight1.float())).sum()
def forward(self, input):
return self.ops(input, self.weight0, self.weight1)
# Abandon all hope, ye who enter here.
# This is hands down the ugliest code I have ever written, but it succeeds in testing
# multiple models/optimizers/losses fairly thoroughly. Many of the different test cases
# require slightly divergent code in a way that seems near-impossible to genericize into a simple
# cross product or nested loops.
class TestMultipleModelsOptimizersLosses(unittest.TestCase):
def setUp(self):
self.x = torch.ones((2), device='cuda', dtype=torch.float32)
common_init(self)
def tearDown(self):
pass
@unittest.skipIf(disabled, "amp_C is unavailable")
def test_2models2losses1optimizer(self):
model0 = MyModel(1)
model1 = MyModel(2)
optimizer = torch.optim.SGD([{'params' : model0.parameters(), 'lr' : 0.25},
{'params' : model1.parameters(), 'lr' : 0.5}],
momentum=0.125)
reference_grads = []
for i in range(2):
optimizer.zero_grad()
loss0 = model0(self.x)
loss1 = model1(self.x)
loss0.backward()
loss1.backward()
reference_grads.append([param.grad.data.clone() for param in model0.parameters()] +
[param.grad.data.clone() for param in model1.parameters()])
optimizer.step()
final_params = [param.data.clone() for param in model0.parameters()] + \
[param.data.clone() for param in model1.parameters()]
for materialize_master_grads in (False, True):
for opt_level in ("O0", "O1", "O2", "O3"):
for how_to_zero in ("none", "model", "optimizer"):
for use_multiple_loss_scalers in (False, True):
if opt_level == "O1" or opt_level == "O2":
inject_inf_iters = (-1, 0, 1)
else:
inject_inf_iters = (-1,)
for inject_inf in inject_inf_iters:
if inject_inf >= 0:
inject_inf_locs = ("fp16", "fp32")
which_backwards = (0, 1)
else:
inject_inf_locs = ("fdsa",)
which_backwards = (None,)
for inject_inf_loc in inject_inf_locs:
for which_backward in which_backwards:
if use_multiple_loss_scalers:
num_losses = 2
loss_ids = [0, 1]
else:
num_losses = 1
loss_ids = [0, 0]
if inject_inf >= 0:
iters = 3
else:
iters = 2
model0 = MyModel(1)
model1 = MyModel(2)
models = [model0, model1]
optimizer = FusedSGD([{'params' : model0.parameters(), 'lr' : 0.25},
{'params' : model1.parameters(), 'lr' : 0.5}],
momentum=0.125,
materialize_master_grads=materialize_master_grads)
_amp_state.allow_incoming_model_not_fp32 = True
[model0, model1], optimizer = amp.initialize(
[model0, model1],
optimizer,
opt_level=opt_level,
verbosity=0,
cast_model_type=False,
num_losses=num_losses)
_amp_state.allow_incoming_model_not_fp32 = False
_amp_state.loss_scalers[0]._loss_scale = 4.0
if use_multiple_loss_scalers:
_amp_state.loss_scalers[1]._loss_scale = 16.0
unskipped = 0
for i in range(iters):
if how_to_zero == "none":
for model in models:
for param in model.parameters():
param.grad = None
elif how_to_zero == "model":
for model in models:
model.zero_grad()
else:
optimizer.zero_grad()
loss0 = model0(self.x)
loss1 = model1(self.x)
with amp.scale_loss(loss0, optimizer, loss_id=loss_ids[0]) as scaled_loss:
scaled_loss.backward()
if i == inject_inf and which_backward == 0:
if inject_inf_loc == "fp32":
model0.weight0.grad[0] = float('inf')
elif inject_inf_loc == "fp16":
model0.weight1.grad[0] = float('inf')
with amp.scale_loss(loss1, optimizer, loss_id=loss_ids[1]) as scaled_loss:
scaled_loss.backward()
if i == inject_inf and which_backward == 1:
if inject_inf_loc == "fp32":
model1.weight0.grad[0] = float('inf')
elif inject_inf_loc == "fp16":
model1.weight1.grad[0] = float('inf')
if i != inject_inf:
master_params = amp.master_params(optimizer)
for param, reference_grad in zip(master_params, reference_grads[unskipped]):
if opt_level == "O2" and not materialize_master_grads:
continue
else:
self.assertTrue(torch.allclose(param.grad.float(), reference_grad.float()),
"opt_level {} i {} inject_inf {} which_backward {} inject_inf_loc {} use_multiple_loss_scalers {}".format(opt_level, i, inject_inf, which_backward, inject_inf_loc, use_multiple_loss_scalers))
unskipped += 1
optimizer.step()
model_params = [p for p in model0.parameters()] + [p for p in model1.parameters()]
for model, master, reference in zip(
model_params,
amp.master_params(optimizer),
final_params):
self.assertTrue(torch.allclose(model, reference))
self.assertTrue(torch.allclose(model, master.to(model.dtype)))
if opt_level == "O1":
_amp_state.handle._deactivate()
@unittest.skipIf(disabled, "amp_C is unavailable")
def test_3models2losses1optimizer(self):
model0 = MyModel(1)
model1 = MyModel(2)
model2 = MyModel(3)
optimizer = torch.optim.SGD([{'params' : model0.parameters(), 'lr' : 0.25},
{'params' : model1.parameters(), 'lr' : 0.5},
{'params' : model2.parameters(), 'lr' : 0.125}],
momentum=0.125)
reference_grads = []
for i in range(2):
optimizer.zero_grad()
loss0 = model0(self.x) + model2(self.x)
loss1 = model1(self.x) + model2(self.x)
loss0.backward()
loss1.backward()
reference_grads.append([param.grad.data.clone() for param in model0.parameters()] +
[param.grad.data.clone() for param in model1.parameters()] +
[param.grad.data.clone() for param in model2.parameters()])
optimizer.step()
final_params = [param.data.clone() for param in model0.parameters()] + \
[param.data.clone() for param in model1.parameters()] + \
[param.data.clone() for param in model2.parameters()]
for materialize_master_grads in (False, True):
for opt_level in ("O0", "O1", "O2", "O3"):
for how_to_zero in ("none", "model", "optimizer"):
for use_multiple_loss_scalers in (False, True):
if opt_level == "O1" or opt_level == "O2":
inject_inf_iters = (-1, 0, 1)
else:
inject_inf_iters = (-1,)
for inject_inf in inject_inf_iters:
if inject_inf >= 0:
inject_inf_locs = ("fp16", "fp32")
which_backwards = (0, 1)
else:
inject_inf_locs = ("fdsa",)
which_backwards = (None,)
for inject_inf_loc in inject_inf_locs:
for which_backward in which_backwards:
if use_multiple_loss_scalers:
num_losses = 2
loss_ids = [0, 1]
else:
num_losses = 1
loss_ids = [0, 0]
if inject_inf >= 0:
iters = 3
if which_backward == 0:
which_models = (0, 2)
elif which_backward == 1:
which_models = (1, 2)
else:
iters = 2
which_models = (None,)
for which_model in which_models:
model0 = MyModel(1)
model1 = MyModel(2)
model2 = MyModel(3)
models = [model0, model1, model2]
optimizer = FusedSGD([{'params' : model0.parameters(), 'lr' : 0.25},
{'params' : model1.parameters(), 'lr' : 0.5},
{'params' : model2.parameters(), 'lr' : 0.125}],
momentum=0.125,
materialize_master_grads=materialize_master_grads)
_amp_state.allow_incoming_model_not_fp32 = True
[model0, model1, model2], optimizer = amp.initialize(
[model0, model1, model2],
optimizer,
opt_level=opt_level,
verbosity=0,
cast_model_type=False,
num_losses=num_losses)
_amp_state.allow_incoming_model_not_fp32 = False
_amp_state.loss_scalers[0]._loss_scale = 4.0
if use_multiple_loss_scalers:
_amp_state.loss_scalers[1]._loss_scale = 16.0
unskipped = 0
for i in range(iters):
if how_to_zero == "none":
for model in models:
for param in model.parameters():
param.grad = None
elif how_to_zero == "model":
for model in models:
model.zero_grad()
else:
optimizer.zero_grad()
loss0 = model0(self.x) + model2(self.x)
loss1 = model1(self.x) + model2(self.x)
with amp.scale_loss(loss0, optimizer, loss_id=loss_ids[0]) as scaled_loss:
scaled_loss.backward()
if i == inject_inf and which_backward == 0:
if which_model == 0:
inj_model = model0
elif which_model == 2:
inj_model = model2
else:
raise RuntimeError(which_model + " invalid for loss 0")
if inject_inf_loc == "fp32":
inj_model.weight0.grad[0] = float('inf')
elif inject_inf_loc == "fp16":
inj_model.weight1.grad[0] = float('inf')
with amp.scale_loss(loss1, optimizer, loss_id=loss_ids[1]) as scaled_loss:
scaled_loss.backward()
if i == inject_inf and which_backward == 1:
if which_model == 1:
inj_model = model1
elif which_model == 2:
inj_model = model2
else:
raise RuntimeError(which_model + " invalid for loss 1 ")
if inject_inf_loc == "fp32":
inj_model.weight0.grad[0] = float('inf')
elif inject_inf_loc == "fp16":
inj_model.weight1.grad[0] = float('inf')
if i != inject_inf:
master_params = amp.master_params(optimizer)
for param, reference_grad in zip(master_params, reference_grads[unskipped]):
if opt_level == "O2" and not materialize_master_grads:
continue
else:
self.assertTrue(torch.allclose(param.grad.float(), reference_grad.float()),
"opt_level {} i {} inject_inf {} which_backward {} inject_inf_loc {} which_model {} use_multiple_loss_scalers {}".format(opt_level, i, inject_inf, which_backward, inject_inf_loc, which_model, use_multiple_loss_scalers))
unskipped += 1
optimizer.step()
model_params = [p for p in model0.parameters()] + \
[p for p in model1.parameters()] + \
[p for p in model2.parameters()]
for model, master, reference in zip(
model_params,
amp.master_params(optimizer),
final_params):
self.assertTrue(torch.allclose(model, reference))
self.assertTrue(torch.allclose(model, master.to(model.dtype)))
if opt_level == "O1":
_amp_state.handle._deactivate()
@unittest.skipIf(disabled, "amp_C is unavailable")
def test_2models2losses2optimizers(self):
model0 = MyModel(1)
model1 = MyModel(2)
optimizer0 = torch.optim.SGD([{'params' : model0.parameters(), 'lr' : 0.25}],
momentum=0.125)
optimizer1 = torch.optim.SGD([{'params' : model1.parameters(), 'lr' : 0.5}],
momentum=0.25)
# Don't do it like this: reference_grads = [[]]*5
# because then it creates a list of 5 references to the same "[]" and appending
# to any of them effectively makes you append to all of them, which multiplies
# the resulting size of reference_grads by 5x and needless to say makes the test fail.
reference_grads = [[], [], [], [], []]
final_params = [None, None, None, None, None]
for i in range(2):
optimizer0.zero_grad()
optimizer1.zero_grad()
loss0 = model0(self.x)
loss1 = model1(self.x)
loss0.backward()
loss1.backward()
reference_grads[0].append([param.grad.data.clone() for param in model0.parameters()] +
[param.grad.data.clone() for param in model1.parameters()])
optimizer0.step()
optimizer1.step()
final_params[0] = [param.data.clone() for param in model0.parameters()] + \
[param.data.clone() for param in model1.parameters()]
def what_got_skipped(which_iter, which_backward):
if which_iter == 0 and which_backward == 0:
return 1
if which_iter == 0 and which_backward == 1:
return 2
if which_iter == 1 and which_backward == 0:
return 3
if which_iter == 1 and which_backward == 1:
return 4
return 0
for which_iter in (0,1):
for which_backward in (0,1):
model0 = MyModel(1)
model1 = MyModel(2)
optimizer0 = torch.optim.SGD([{'params' : model0.parameters(), 'lr' : 0.25}],
momentum=0.125)
optimizer1 = torch.optim.SGD([{'params' : model1.parameters(), 'lr' : 0.5}],
momentum=0.25)
for i in range(3):
optimizer0.zero_grad()
optimizer1.zero_grad()
loss0 = model0(self.x)
loss1 = model1(self.x)
loss0.backward()
loss1.backward()
if i != which_iter:
reference_grads[what_got_skipped(which_iter, which_backward)].append(
[param.grad.data.clone() for param in model0.parameters()] +
[param.grad.data.clone() for param in model1.parameters()])
if i == which_iter:
if which_backward == 0:
optimizer1.step()
else:
optimizer0.step()
else:
optimizer0.step()
optimizer1.step()
final_params[what_got_skipped(which_iter, which_backward)] = \
[param.data.clone() for param in model0.parameters()] + \
[param.data.clone() for param in model1.parameters()]
for materialize_master_grads in (False, True):
for opt_level in ("O0", "O1", "O2", "O3"):
for how_to_zero in ("none", "model", "optimizer"):
for use_multiple_loss_scalers in (False, True):
if opt_level == "O1" or opt_level == "O2":
inject_inf_iters = (-1, 0, 1)
else:
inject_inf_iters = (-1,)
for inject_inf in inject_inf_iters:
if inject_inf >= 0:
inject_inf_locs = ("fp16", "fp32")
which_backwards = (0, 1)
else:
inject_inf_locs = ("fdsa",)
which_backwards = (None,)
for inject_inf_loc in inject_inf_locs:
for which_backward in which_backwards:
if use_multiple_loss_scalers:
num_losses = 2
loss_ids = [0, 1]
else:
num_losses = 1
loss_ids = [0, 0]
if inject_inf >= 0:
iters = 3
else:
iters = 2
model0 = MyModel(1)
model1 = MyModel(2)
models = [model0, model1]
optimizer0 = FusedSGD([{'params' : model0.parameters(), 'lr' : 0.25}],
momentum=0.125, materialize_master_grads=materialize_master_grads)
optimizer1 = FusedSGD([{'params' : model1.parameters(), 'lr' : 0.5}],
momentum=0.25, materialize_master_grads=materialize_master_grads)
_amp_state.allow_incoming_model_not_fp32 = True
[model0, model1], [optimizer0, optimizer1] = amp.initialize(
[model0, model1],
[optimizer0, optimizer1],
opt_level=opt_level,
verbosity=0,
cast_model_type=False,
num_losses=num_losses)
_amp_state.allow_incoming_model_not_fp32 = False
_amp_state.loss_scalers[0]._loss_scale = 4.0
if use_multiple_loss_scalers:
_amp_state.loss_scalers[1]._loss_scale = 16.0
unskipped = 0
for i in range(iters):
if how_to_zero == "none":
for model in models:
for param in model.parameters():
param.grad = None
elif how_to_zero == "model":
for model in models:
model.zero_grad()
else:
optimizer0.zero_grad()
optimizer1.zero_grad()
loss0 = model0(self.x)
loss1 = model1(self.x)
with amp.scale_loss(loss0, optimizer0, loss_id=loss_ids[0]) as scaled_loss:
scaled_loss.backward()
if i == inject_inf and which_backward == 0:
if inject_inf_loc == "fp32":
model0.weight0.grad[0] = float('inf')
elif inject_inf_loc == "fp16":
model0.weight1.grad[0] = float('inf')
with amp.scale_loss(loss1, optimizer1, loss_id=loss_ids[1]) as scaled_loss:
scaled_loss.backward()
if i == inject_inf and which_backward == 1:
if inject_inf_loc == "fp32":
model1.weight0.grad[0] = float('inf')
elif inject_inf_loc == "fp16":
model1.weight1.grad[0] = float('inf')
# print("opt_level {} i {} inject_inf {} which_backward {} inject_inf_loc {} use_multiple_loss_scalers {}".format(opt_level, i, inject_inf, which_backward, inject_inf_loc, use_multiple_loss_scalers))
if i != inject_inf:
master_params = list(amp.master_params(optimizer0)) + \
list(amp.master_params(optimizer1))
for param, reference_grad in zip(master_params,
reference_grads[what_got_skipped(inject_inf, which_backward)][unskipped]):
if opt_level == "O2" and not materialize_master_grads:
continue
else:
self.assertTrue(torch.allclose(param.grad.float(), reference_grad.float()))
unskipped += 1
optimizer0.step()
optimizer1.step()
model_params = [p for p in model0.parameters()] + [p for p in model1.parameters()]
master_params = [p for p in amp.master_params(optimizer0)] + \
[p for p in amp.master_params(optimizer1)]
for model, master, reference in zip(
model_params,
master_params,
final_params[what_got_skipped(inject_inf, which_backward)]):
self.assertTrue(torch.allclose(model, reference))
self.assertTrue(torch.allclose(model, master.to(model.dtype)))
if opt_level == "O1":
_amp_state.handle._deactivate()
@unittest.skipIf(disabled, "amp_C is unavailable")
def test_3models2losses2optimizers(self):
model0 = MyModel(1)
model1 = MyModel(2)
model2 = MyModel(3)
optimizer0 = torch.optim.SGD([{'params' : model0.parameters(), 'lr' : 0.25},
{'params' : model1.parameters(), 'lr' : 1.0}],
momentum=0.5)
optimizer1 = torch.optim.SGD([{'params' : model2.parameters(), 'lr' : 0.5}],
momentum=0.25)
# Again, can't do this: reference_grads = [[]]*9
reference_grads = [[], [], [], [], [], [], [], [], []]
final_params = [None, None, None, None, None, None, None, None, None]
for i in range(2):
optimizer0.zero_grad()
optimizer1.zero_grad()
loss0 = model0(self.x) + model1(self.x)
loss1 = model2(self.x) + model1(self.x)
loss0.backward()
loss1.backward()
reference_grads[0].append([param.grad.data.clone() for param in model0.parameters()] +
[param.grad.data.clone() for param in model1.parameters()])
optimizer0.step()
optimizer1.step()
final_params[0] = \
[param.data.clone() for param in model0.parameters()] + \
[param.data.clone() for param in model1.parameters()] + \
[param.data.clone() for param in model2.parameters()]
def what_got_skipped(which_iter, which_backward, which_model):
if which_iter == 0:
if which_backward == 0:
if which_model == 0:
return 1
if which_model == 1:
return 2
if which_backward == 1:
if which_model == 2:
return 3
if which_model == 1:
return 4
if which_iter == 1:
if which_backward == 0:
if which_model == 0:
return 5
if which_model == 1:
return 6
if which_backward == 1:
if which_model == 2:
return 7
if which_model == 1:
return 8
return 0
for which_iter in (0,1):
for which_backward in (0,1):
if which_backward == 0:
which_models = (0,1)
if which_backward == 1:
which_models = (2,1)
for which_model in which_models:
model0 = MyModel(1)
model1 = MyModel(2)
model2 = MyModel(3)
optimizer0 = torch.optim.SGD([{'params' : model0.parameters(), 'lr' : 0.25},
{'params' : model1.parameters(), 'lr' : 1.0}],
momentum=0.5)
optimizer1 = torch.optim.SGD([{'params' : model2.parameters(), 'lr' : 0.5}],
momentum=0.25)
for i in range(3):
optimizer0.zero_grad()
optimizer1.zero_grad()
loss0 = model0(self.x) + model1(self.x)
loss1 = model2(self.x) + model1(self.x)
loss0.backward()
loss1.backward()
if i != which_iter:
reference_grads[what_got_skipped(which_iter,
which_backward, which_model)].append(
[param.grad.data.clone() for param in model0.parameters()] +
[param.grad.data.clone() for param in model1.parameters()])
if i == which_iter:
if which_backward == 0:
# if which_model == 0:
optimizer1.step()
# if which_model == 1:
# optimizer1.step()
if which_backward == 1:
# if which_model == 2:
# optimizer0.step()
# if which_model == 1:
continue
else:
optimizer0.step()
optimizer1.step()
final_params[what_got_skipped(which_iter, which_backward, which_model)] = \
[param.data.clone() for param in model0.parameters()] + \
[param.data.clone() for param in model1.parameters()] + \
[param.data.clone() for param in model2.parameters()]
for materialize_master_grads in (False, True):
for opt_level in ("O0", "O1", "O2", "O3"):
for how_to_zero in ("none", "model", "optimizer"):
for use_multiple_loss_scalers in (False, True):
if opt_level == "O1" or opt_level == "O2":
inject_inf_iters = (-1, 0, 1)
else:
inject_inf_iters = (-1,)
for inject_inf in inject_inf_iters:
if inject_inf >= 0:
inject_inf_locs = ("fp16", "fp32")
which_backwards = (0, 1)
else:
inject_inf_locs = ("fdsa",)
which_backwards = (None,)
for inject_inf_loc in inject_inf_locs:
for which_backward in which_backwards:
if use_multiple_loss_scalers:
num_losses = 2
loss_ids = [0, 1]
else:
num_losses = 1
loss_ids = [0, 0]
if inject_inf >= 0:
iters = 3
if which_backward == 0:
which_models = (0, 1)
elif which_backward == 1:
which_models = (2, 1)
else:
iters = 2
which_models = (None,)
for which_model in which_models:
model0 = MyModel(1)
model1 = MyModel(2)
model2 = MyModel(3)
models = [model0, model1, model2]
optimizer0 = FusedSGD([{'params' : model0.parameters(), 'lr' : 0.25},
{'params' : model1.parameters(), 'lr' : 1.0}],
momentum=0.5, materialize_master_grads=materialize_master_grads)
optimizer1 = FusedSGD([{'params' : model2.parameters(), 'lr' : 0.5}],
momentum=0.25, materialize_master_grads=materialize_master_grads)
_amp_state.allow_incoming_model_not_fp32 = True
[model0, model1, model2], [optimizer0, optimizer1] = amp.initialize(
[model0, model1, model2],
[optimizer0, optimizer1],
opt_level=opt_level,
verbosity=0,
cast_model_type=False,
num_losses=num_losses)
_amp_state.allow_incoming_model_not_fp32 = False
_amp_state.loss_scalers[0]._loss_scale = 4.0
if use_multiple_loss_scalers:
_amp_state.loss_scalers[1]._loss_scale = 16.0
unskipped = 0
for i in range(iters):
if how_to_zero == "none":
for model in models:
for param in model.parameters():
param.grad = None
elif how_to_zero == "model":
for model in models:
model.zero_grad()
else:
optimizer0.zero_grad()
optimizer1.zero_grad()
loss0 = model0(self.x) + model1(self.x)
loss1 = model2(self.x) + model1(self.x)
with amp.scale_loss(loss0, optimizer0, loss_id=loss_ids[0]) as scaled_loss:
scaled_loss.backward()
if i == inject_inf and which_backward == 0:
if which_model == 0:
inj_model = model0
elif which_model == 1:
inj_model = model1
else:
raise RuntimeError(which_model + " invalid for loss 0")
if inject_inf_loc == "fp32":
inj_model.weight0.grad[0] = float('inf')
elif inject_inf_loc == "fp16":
inj_model.weight1.grad[0] = float('inf')
with amp.scale_loss(loss1, [optimizer0, optimizer1], loss_id=loss_ids[1]) as scaled_loss:
scaled_loss.backward()
if i == inject_inf and which_backward == 1:
if which_model == 2:
inj_model = model2
elif which_model == 1:
inj_model = model1
else:
raise RuntimeError(which_model + " invalid for loss 1 ")
if inject_inf_loc == "fp32":
inj_model.weight0.grad[0] = float('inf')
elif inject_inf_loc == "fp16":
inj_model.weight1.grad[0] = float('inf')
if i != inject_inf:
master_params = list(amp.master_params(optimizer0)) + \
list(amp.master_params(optimizer1))
for param, reference_grad in zip(master_params,
reference_grads[what_got_skipped(inject_inf,
which_backward, which_model)][unskipped]):
if opt_level == "O2" and not materialize_master_grads:
continue
else:
self.assertTrue(torch.allclose(param.grad.float(), reference_grad.float()))
unskipped += 1
optimizer0.step()
optimizer1.step()
model_params = [p for p in model0.parameters()] + \
[p for p in model1.parameters()] + \
[p for p in model2.parameters()]
master_params = [p for p in amp.master_params(optimizer0)] + \
[p for p in amp.master_params(optimizer1)]
# print("opt_level {} i {} inject_inf {} which_backward {} inject_inf_loc {} use_multiple_loss_scalers {} which_model {}".format(opt_level, i, inject_inf, which_backward, inject_inf_loc, use_multiple_loss_scalers, which_model))
for model, master, reference in zip(
model_params,
master_params,
final_params[what_got_skipped(inject_inf, which_backward, which_model)]):
self.assertTrue(torch.allclose(model, reference))
self.assertTrue(torch.allclose(model, master.to(model.dtype)))
if opt_level == "O1":
_amp_state.handle._deactivate()
if __name__ == '__main__':
unittest.main()
| GeneSplice-main | GeneSplice/apex/tests/L0/run_amp/test_fused_sgd.py |
import unittest
import functools as ft
import itertools as it
from apex import amp
from apex.amp import _amp_state
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import Parameter
from utils import common_init, HALF, FLOAT,\
ALWAYS_HALF, ALWAYS_FLOAT, MATCH_INPUT
class MyModel(torch.nn.Module):
def __init__(self, unique):
super(MyModel, self).__init__()
self.weight0 = Parameter(unique +
torch.arange(2, device='cuda', dtype=torch.float32))
self.weight1 = Parameter(1. + unique + torch.arange(2, device='cuda', dtype=torch.float16))
@staticmethod
def ops(input, weight0, weight1):
return ((input*(weight0.float()))*(weight1.float())).sum()
def forward(self, input):
return self.ops(input, self.weight0, self.weight1)
# Abandon all hope, ye who enter here.
class TestAddParamGroup(unittest.TestCase):
def setUp(self):
self.x = torch.ones((2), device='cuda', dtype=torch.float32)
common_init(self)
def tearDown(self):
pass
def zero_grad(self, models, optimizer, how_to_zero):
if how_to_zero == "none":
for model in models:
for param in model.parameters():
param.grad = None
elif how_to_zero == "model":
for model in models:
model.zero_grad()
elif how_to_zero == "optimizer":
optimizer.zero_grad()
def test_add_param_group(self):
for opt_level in ("O0", "O1", "O2", "O3"):
for zero_before_add in (True, False):
for try_accumulation in (True, False):
model0 = MyModel(1)
model1 = MyModel(2)
optimizer = torch.optim.SGD([{'params' : model0.parameters(), 'lr' : 0.25}],
momentum=0.125)
optimizer.zero_grad()
loss = model0(self.x)
loss.backward()
optimizer.step()
if zero_before_add:
optimizer.zero_grad()
optimizer.add_param_group({'params' : model1.parameters(), 'lr' : 0.5})
if not zero_before_add:
optimizer.zero_grad()
loss = model0(self.x) + model1(self.x)
loss.backward(retain_graph=try_accumulation)
if try_accumulation:
loss.backward()
optimizer.step()
# Once more to make sure the new params pick up momemtums properly
optimizer.zero_grad()
loss = model0(self.x) + model1(self.x)
loss.backward(retain_graph=try_accumulation)
if try_accumulation:
loss.backward()
optimizer.step()
reference_params = [param.data.clone() for param in model0.parameters()] + \
[param.data.clone() for param in model1.parameters()]
for how_to_zero in "none", "model", "optimizer":
model0 = MyModel(1)
model1 = MyModel(2)
optimizer = torch.optim.SGD([{'params' : model0.parameters(), 'lr' : 0.25}],
momentum=0.125)
_amp_state.allow_incoming_model_not_fp32 = True
[model0, model1], optimizer = amp.initialize([model0, model1],
optimizer,
opt_level=opt_level,
verbosity=0,
cast_model_type=False)
_amp_state.allow_incoming_model_not_fp32 = False
_amp_state.loss_scalers[0]._loss_scale = 4.0
self.zero_grad([model0, model1], optimizer, how_to_zero)
loss = model0(self.x)
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
if zero_before_add:
self.zero_grad([model0, model1], optimizer, how_to_zero)
optimizer.add_param_group({'params' : model1.parameters(), 'lr' : 0.5})
if not zero_before_add:
self.zero_grad([model0, model1], optimizer, how_to_zero)
loss = model0(self.x) + model1(self.x)
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward(retain_graph=try_accumulation)
if try_accumulation:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
# Once more to make sure the new params pick up momentums properly
self.zero_grad([model0, model1], optimizer, how_to_zero)
loss = model0(self.x) + model1(self.x)
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward(retain_graph=try_accumulation)
if try_accumulation:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
final_params = [param.data.clone() for param in model0.parameters()] + \
[param.data.clone() for param in model1.parameters()]
for reference, final in zip(reference_params, final_params):
self.assertTrue(torch.allclose(reference.to(final.dtype), final),
"opt_level = {}, how_to_zero = {}, zero_before_add = {}".format(
opt_level, how_to_zero, zero_before_add))
if __name__ == '__main__':
unittest.main()
| GeneSplice-main | GeneSplice/apex/tests/L0/run_amp/test_add_param_group.py |
GeneSplice-main | GeneSplice/apex/tests/L0/run_amp/__init__.py |
|
import unittest
import itertools as it
from apex import amp
import torch
from torch import nn
import torch.nn.functional as F
from utils import common_init, HALF, FLOAT, DTYPES
class TestPromotion(unittest.TestCase):
def setUp(self):
self.handle = amp.init(enabled=True)
common_init(self)
def tearDown(self):
self.handle._deactivate()
def run_binary_promote_test(self, fns, input_shape, x_inplace=False):
type_pairs = it.product(DTYPES, DTYPES)
for fn, (xtype, ytype) in it.product(fns, type_pairs):
x = torch.randn(input_shape, dtype=xtype).requires_grad_()
x_leaf = x
if x_inplace:
# We need a non-leaf to call in place on
x = x.clone()
y = torch.randn(input_shape, dtype=ytype)
out = fn(x, y)
if x_inplace:
# In place: always match xtype
self.assertEqual(out.type(), x.type())
else:
# Out of place: match widest type
if xtype == torch.float or ytype == torch.float:
self.assertEqual(out.type(), FLOAT)
else:
self.assertEqual(out.type(), HALF)
out.float().sum().backward()
self.assertEqual(x_leaf.grad.dtype, xtype)
def test_atan2_matches_widest(self):
fns = [lambda x, y : torch.atan2(x, y),
lambda x, y : x.atan2(y)]
self.run_binary_promote_test(fns, (self.b,))
def test_mul_matches_widest(self):
fns = [lambda x, y : torch.mul(x, y),
lambda x, y: x.mul(y)]
self.run_binary_promote_test(fns, (self.b,))
def test_cat_matches_widest(self):
shape = self.b
ys = [torch.randn(shape, dtype=torch.half) for _ in range(5)]
x_float = torch.randn(shape)
out = torch.cat(ys + [x_float])
self.assertEqual(out.type(), FLOAT)
x_half = torch.randn(shape, dtype=torch.half)
out = torch.cat(ys + [x_half])
self.assertEqual(out.type(), HALF)
def test_inplace_exp_is_error_for_half(self):
xs = torch.randn(self.b)
xs.exp_()
self.assertEqual(xs.type(), FLOAT)
xs = torch.randn(self.b, dtype=torch.half)
with self.assertRaises(NotImplementedError):
xs.exp_()
def test_inplace_add_matches_self(self):
fn = lambda x, y: x.add_(y)
self.run_binary_promote_test([fn], (self.b,), x_inplace=True)
if __name__ == '__main__':
unittest.main()
| GeneSplice-main | GeneSplice/apex/tests/L0/run_amp/test_promotion.py |
import unittest
import functools as ft
import itertools as it
from apex import amp
import torch
from torch import nn
import torch.nn.functional as F
from math import floor
from utils import common_init, HALF, FLOAT,\
ALWAYS_HALF, ALWAYS_FLOAT, MATCH_INPUT
try:
import amp_C
from amp_C import multi_tensor_axpby
from apex.multi_tensor_apply import MultiTensorApply
disabled = False
except ImportError as err:
print("amp_C fused kernels unavailable, disabling TestMultiTensorApply. ImportError was ", err)
disabled = True
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
try_nhwc = (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 4)
class TestMultiTensorAxpby(unittest.TestCase):
def setUp(self):
common_init(self)
self.a = 2.0
self.b = 8.0
self.xval = 4.0
self.yval = 16.0
self.overflow_buf = torch.cuda.IntTensor(1).zero_()
self.ref = torch.full((1,), 136.0, device="cuda", dtype=torch.float32)
def tearDown(self):
pass
# The tensor creation here is written for convenience, not speed.
def axpby(self, sizea, sizeb, applier, repeat_tensors,
x_type, y_type, out_type, inplace=False, nhwc=False):
self.overflow_buf.zero_()
sizea = sizea if isinstance(sizea, tuple) else (sizea,)
sizeb = sizeb if isinstance(sizeb, tuple) else (sizeb,)
t1 = torch.full(sizea, 1.0, device="cuda", dtype=torch.float32)
t2 = torch.full(sizeb, 1.0, device="cuda", dtype=torch.float32)
def to_fmt(t, tp):
if nhwc:
return t.clone().to(tp, memory_format=torch.channels_last)
else:
return t.clone().to(tp)
y_list = []
for i in range(repeat_tensors):
y_list += [to_fmt(t1, y_type)*self.yval, to_fmt(t2, y_type)*self.yval]
x_list = [to_fmt(x, x_type)*(self.xval/self.yval) for x in y_list]
if inplace:
out_list = y_list
else:
out_list = [to_fmt(out, out_type)*3.0 for out in y_list]
applier(multi_tensor_axpby, self.overflow_buf, [x_list, y_list, out_list], self.a, self.b, -1)
self.assertTrue(all([torch.allclose(out, self.ref.to(out_type)) for out in out_list]),
msg="{} {} {} {} {} {} {}".format(sizea, sizeb, repeat_tensors,
x_type, y_type, out_type, inplace))
self.assertTrue(self.overflow_buf.item() == 0,
msg="{} {} {} {} {} {} {}".format(sizea, sizeb, repeat_tensors,
x_type, y_type, out_type, inplace))
# def find_inf(self, sizea, sizeb, applier, repeat_tensors, in_type, out_type, t, ind, val, inplace=False):
# self.overflow_buf.zero_()
# a = torch.cuda.FloatTensor(sizea).fill_(self.scale)
# b = torch.cuda.FloatTensor(sizeb).fill_(self.scale)
# out_list = []
# for i in range(repeat_tensors):
# out_list += [a.clone().to(out_type), b.clone().to(out_type)]
# if inplace:
# in_list = out_list
# else:
# in_list = [out.clone().to(in_type) for out in out_list]
# applier(multi_tensor_scale, self.overflow_buf, [in_list, out_list], 1./self.scale)
# self.overflow_buf.zero_()
# in_list[t][ind] = val
# applier(multi_tensor_scale, self.overflow_buf, [in_list, out_list], 1./self.scale)
# self.assertTrue(self.overflow_buf.item())
@unittest.skipIf(disabled, "amp_C is unavailable")
def test_fuzz(self):
input_size_pairs = (
(7777*77, 555*555),
(777, 555),
(555, 2048*32+1),
(2048*32+1, 555),
(555, 2048*32),
(2048*32, 555),
(33333, 555),
(555, 33333))
appliers = (
MultiTensorApply(2048*32),
MultiTensorApply(333),
MultiTensorApply(33333))
repeat_tensors = (
1,
55)
for sizea, sizeb in input_size_pairs:
for applier in appliers:
for repeat in repeat_tensors:
for x_type in (torch.float32, torch.float16):
for y_type in (torch.float32, torch.float16):
for out_type in (torch.float32, torch.float16):
for inplace in (True, False):
if inplace is True and (y_type is not out_type):
continue
else:
self.axpby(sizea, sizeb, applier, repeat,
x_type, y_type, out_type, inplace=inplace)
# self.find_inf(sizea, sizeb, applier, repeat, in_type, out_type,
# 0, 0, float('nan'), inplace=inplace)
# self.find_inf(sizea, sizeb, applier, repeat, in_type, out_type,
# 2*repeat-1, sizeb-1, float('inf'), inplace=inplace)
# self.find_inf(sizea, sizeb, applier, repeat, in_type, out_type,
# 2*(repeat//2), sizea//2, float('inf'), inplace=inplace)
@unittest.skipIf(disabled, "amp_C is unavailable")
@unittest.skipIf(not try_nhwc, "torch version is 1.4 or earlier, may not support nhwc")
def test_fuzz_nhwc(self):
input_size_pairs = (
((7, 77, 7, 77), (5, 55, 5, 55)),
((1, 1, 777, 1), (1, 1, 555, 1)),
((5, 47, 5, 55), (1, 1, 1, 2048*32 + 1)),
((1, 1, 1, 2048*32 + 1), (55, 47, 5, 55)),
((555, 1, 1, 1), (32, 8, 32, 8)),
((32, 8, 32, 8), (55, 47, 5, 55)),
((1, 1, 33333, 1), (55, 47, 55, 5)),
((55, 47, 55, 5), (1, 1, 33333, 1)))
appliers = (
MultiTensorApply(2048*32),
MultiTensorApply(333),
MultiTensorApply(33333))
repeat_tensors = (
1,
55)
for sizea, sizeb in input_size_pairs:
for applier in appliers:
for repeat in repeat_tensors:
for x_type in (torch.float32, torch.float16):
for y_type in (torch.float32, torch.float16):
for out_type in (torch.float32, torch.float16):
for inplace in (True, False):
if inplace is True and (y_type is not out_type):
continue
else:
self.axpby(sizea, sizeb, applier, repeat,
x_type, y_type, out_type, inplace=inplace, nhwc=True)
# self.find_inf(sizea, sizeb, applier, repeat, in_type, out_type,
# 0, 0, float('nan'), inplace=inplace)
# self.find_inf(sizea, sizeb, applier, repeat, in_type, out_type,
# 2*repeat-1, sizeb-1, float('inf'), inplace=inplace)
# self.find_inf(sizea, sizeb, applier, repeat, in_type, out_type,
# 2*(repeat//2), sizea//2, float('inf'), inplace=inplace)
if __name__ == '__main__':
unittest.main()
| GeneSplice-main | GeneSplice/apex/tests/L0/run_amp/test_multi_tensor_axpby.py |
import torch
HALF = 'torch.cuda.HalfTensor'
FLOAT = 'torch.cuda.FloatTensor'
DTYPES = [torch.half, torch.float]
ALWAYS_HALF = {torch.float: HALF,
torch.half: HALF}
ALWAYS_FLOAT = {torch.float: FLOAT,
torch.half: FLOAT}
MATCH_INPUT = {torch.float: FLOAT,
torch.half: HALF}
def common_init(test_case):
test_case.h = 64
test_case.b = 16
test_case.c = 16
test_case.k = 3
test_case.t = 10
torch.set_default_tensor_type(torch.cuda.FloatTensor)
| GeneSplice-main | GeneSplice/apex/tests/L0/run_amp/utils.py |
import unittest
from apex import amp
import random
import torch
from torch import nn
from utils import common_init, HALF
class TestRnnCells(unittest.TestCase):
def setUp(self):
self.handle = amp.init(enabled=True)
common_init(self)
def tearDown(self):
self.handle._deactivate()
def run_cell_test(self, cell, state_tuple=False):
shape = (self.b, self.h)
for typ in [torch.float, torch.half]:
xs = [torch.randn(shape, dtype=typ).requires_grad_()
for _ in range(self.t)]
hidden_fn = lambda: torch.zeros(shape, dtype=typ)
if state_tuple:
hidden = (hidden_fn(), hidden_fn())
else:
hidden = hidden_fn()
outputs = []
for i in range(self.t):
hidden = cell(xs[i], hidden)
if state_tuple:
output = hidden[0]
else:
output = hidden
outputs.append(output)
for y in outputs:
self.assertEqual(y.type(), HALF)
outputs[-1].float().sum().backward()
for i, x in enumerate(xs):
self.assertEqual(x.grad.dtype, x.dtype)
def test_rnn_cell_is_half(self):
cell = nn.RNNCell(self.h, self.h)
self.run_cell_test(cell)
def test_gru_cell_is_half(self):
cell = nn.GRUCell(self.h, self.h)
self.run_cell_test(cell)
def test_lstm_cell_is_half(self):
cell = nn.LSTMCell(self.h, self.h)
self.run_cell_test(cell, state_tuple=True)
class TestRnns(unittest.TestCase):
def setUp(self):
self.handle = amp.init(enabled=True)
common_init(self)
def tearDown(self):
self.handle._deactivate()
def run_rnn_test(self, rnn, layers, bidir, state_tuple=False):
for typ in [torch.float, torch.half]:
x = torch.randn((self.t, self.b, self.h), dtype=typ).requires_grad_()
hidden_fn = lambda: torch.zeros((layers + (layers * bidir),
self.b, self.h), dtype=typ)
if state_tuple:
hidden = (hidden_fn(), hidden_fn())
else:
hidden = hidden_fn()
output, _ = rnn(x, hidden)
self.assertEqual(output.type(), HALF)
output[-1, :, :].float().sum().backward()
self.assertEqual(x.grad.dtype, x.dtype)
def test_rnn_is_half(self):
configs = [(1, False), (2, False), (2, True)]
for layers, bidir in configs:
rnn = nn.RNN(input_size=self.h, hidden_size=self.h, num_layers=layers,
nonlinearity='relu', bidirectional=bidir)
self.run_rnn_test(rnn, layers, bidir)
def test_gru_is_half(self):
configs = [(1, False), (2, False), (2, True)]
for layers, bidir in configs:
rnn = nn.GRU(input_size=self.h, hidden_size=self.h, num_layers=layers,
bidirectional=bidir)
self.run_rnn_test(rnn, layers, bidir)
def test_lstm_is_half(self):
configs = [(1, False), (2, False), (2, True)]
for layers, bidir in configs:
rnn = nn.LSTM(input_size=self.h, hidden_size=self.h, num_layers=layers,
bidirectional=bidir)
self.run_rnn_test(rnn, layers, bidir, state_tuple=True)
def test_rnn_packed_sequence(self):
num_layers = 2
rnn = nn.RNN(input_size=self.h, hidden_size=self.h, num_layers=num_layers)
for typ in [torch.float, torch.half]:
x = torch.randn((self.t, self.b, self.h), dtype=typ).requires_grad_()
lens = sorted([random.randint(self.t // 2, self.t) for _ in range(self.b)],
reverse=True)
# `pack_padded_sequence` breaks if default tensor type is non-CPU
torch.set_default_tensor_type(torch.FloatTensor)
lens = torch.tensor(lens, dtype=torch.int64, device=torch.device('cpu'))
packed_seq = nn.utils.rnn.pack_padded_sequence(x, lens)
torch.set_default_tensor_type(torch.cuda.FloatTensor)
hidden = torch.zeros((num_layers, self.b, self.h), dtype=typ)
output, _ = rnn(packed_seq, hidden)
self.assertEqual(output.data.type(), HALF)
output.data.float().sum().backward()
self.assertEqual(x.grad.dtype, x.dtype)
if __name__ == '__main__':
unittest.main()
| GeneSplice-main | GeneSplice/apex/tests/L0/run_amp/test_rnn.py |
import unittest
import functools as ft
import itertools as it
from apex import amp
import torch
from torch import nn
import torch.nn.functional as F
from utils import common_init, HALF, FLOAT,\
ALWAYS_HALF, ALWAYS_FLOAT, MATCH_INPUT
try:
import amp_C
from amp_C import multi_tensor_scale
from apex.multi_tensor_apply import MultiTensorApply
disabled = False
except ImportError as err:
print("amp_C fused kernels unavailable, disabling TestMultiTensorApply. ImportError was ", err)
disabled = True
class TestMultiTensorScale(unittest.TestCase):
def setUp(self):
common_init(self)
self.scale = 4.0
self.overflow_buf = torch.cuda.IntTensor(1).zero_()
self.ref = torch.cuda.FloatTensor([1.0])
def tearDown(self):
pass
# The tensor creation here is written for convenience, not speed.
def downscale(self, sizea, sizeb, applier, repeat_tensors, in_type, out_type, inplace=False):
self.overflow_buf.zero_()
a = torch.cuda.FloatTensor(sizea).fill_(self.scale)
b = torch.cuda.FloatTensor(sizeb).fill_(self.scale)
out_list = []
for i in range(repeat_tensors):
out_list += [a.clone().to(out_type), b.clone().to(out_type)]
if inplace:
in_list = out_list
else:
in_list = [out.clone().to(in_type) for out in out_list]
applier(multi_tensor_scale, self.overflow_buf, [in_list, out_list], 1./self.scale)
self.assertTrue(all([torch.allclose(out, self.ref.to(out_type)) for out in out_list]))
self.assertTrue(self.overflow_buf.item() == 0)
def find_inf(self, sizea, sizeb, applier, repeat_tensors, in_type, out_type, t, ind, val, inplace=False):
self.overflow_buf.zero_()
a = torch.cuda.FloatTensor(sizea).fill_(self.scale)
b = torch.cuda.FloatTensor(sizeb).fill_(self.scale)
out_list = []
for i in range(repeat_tensors):
out_list += [a.clone().to(out_type), b.clone().to(out_type)]
if inplace:
in_list = out_list
else:
in_list = [out.clone().to(in_type) for out in out_list]
applier(multi_tensor_scale, self.overflow_buf, [in_list, out_list], 1./self.scale)
self.overflow_buf.zero_()
in_list[t][ind] = val
applier(multi_tensor_scale, self.overflow_buf, [in_list, out_list], 1./self.scale)
self.assertTrue(self.overflow_buf.item())
# Currently, the fused kernel gives a hard error if you attempt to downscale
# into fp16 output, which imo is the desired behavior. Maybe someday we
# will learn otherwise.
# @unittest.skipIf(disabled, "amp_C is unavailable")
# def test_fp16_to_fp16(self):
# self.downscale(self.fp16, self.fp16, self.fp16_ref)
#
# @unittest.skipIf(disabled, "amp_C is unavailable")
# def test_fp32_to_fp16(self):
# self.downscale(self.fp32, self.fp16, self.fp16_ref)
@unittest.skipIf(disabled, "amp_C is unavailable")
def test_fuzz(self):
input_size_pairs = (
(7777*77, 555*555),
(777, 555),
(555, 2048*32+1),
(2048*32+1, 555),
(555, 2048*32),
(2048*32, 555),
(33333, 555),
(555, 33333))
appliers = (
MultiTensorApply(2048*32),
MultiTensorApply(333),
MultiTensorApply(33333))
repeat_tensors = (
1,
55)
for sizea, sizeb in input_size_pairs:
for applier in appliers:
for repeat in repeat_tensors:
for in_type in (torch.float32, torch.float16):
for out_type in (torch.float32, torch.float16):
for inplace in (True, False):
if inplace is True and (out_type is not in_type):
continue
else:
self.downscale(sizea, sizeb, applier, repeat, in_type, out_type, inplace=inplace)
self.find_inf(sizea, sizeb, applier, repeat, in_type, out_type,
0, 0, float('nan'), inplace=inplace)
self.find_inf(sizea, sizeb, applier, repeat, in_type, out_type,
2*repeat-1, sizeb-1, float('inf'), inplace=inplace)
self.find_inf(sizea, sizeb, applier, repeat, in_type, out_type,
2*(repeat//2), sizea//2, float('inf'), inplace=inplace)
if __name__ == '__main__':
unittest.main()
| GeneSplice-main | GeneSplice/apex/tests/L0/run_amp/test_multi_tensor_scale.py |
import unittest
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from apex import amp
from utils import common_init, FLOAT
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 3, 1, 1)
self.bn1 = nn.BatchNorm2d(6)
self.param = nn.Parameter(torch.randn(1))
def forward(self, x):
x = x * self.param
x = F.relu(self.conv1(x))
x = self.bn1(x)
return x
class TestCheckpointing(unittest.TestCase):
def setUp(self):
self.initial_lr = 1e-3
self.test_opt_levels = ("O0", "O1", "O2", "O3")
def seed(self):
torch.manual_seed(2809)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def check_state_dict_fp32(self, state_dict):
for key in state_dict:
if 'num_batches_tracked' in key:
continue
param = state_dict[key]
self.assertEqual(param.type(), FLOAT,
'Parameter in state_dict not FLOAT')
def train_step(self, model, optimizer, data, loss_ids):
optimizer.zero_grad()
output = model(data)
# Call backward for num_losses-1
for idx in loss_ids:
loss = output.mean()
with amp.scale_loss(loss, optimizer, loss_id=idx) as scaled_loss:
scaled_loss.backward(retain_graph=True)
optimizer.step()
return output
def compare_models(self, modelA, modelB, test_setup=''):
state_dictA = modelA.state_dict()
state_dictB = modelB.state_dict()
self.assertEqual(len(state_dictA), len(state_dictB),
'state_dicts have different lengths' + test_setup)
for key in state_dictA:
paramA = state_dictA[key]
paramB = state_dictB[key]
self.assertTrue((paramA==paramB).all(),
msg='Parameters in state_dices not equal.' +
'key: {}\nparam: {}\nrestored: {}\ndiff: {} for {}'.format(
key, paramA, paramB, paramA - paramB, test_setup))
def test_restoring(self):
nb_epochs = 10
nb_epochs_restore = nb_epochs // 2
for opt_level in self.test_opt_levels:
for res_opt_level in self.test_opt_levels:
for amp_before_load in [True, False]:
for num_losses in range(1, 3):
test_setup = ('#' * 75 + '\n' + \
f'opt_level {opt_level}\n' + \
f'restore_opt_level {res_opt_level}\n' + \
f'amp_before_load {amp_before_load}\n' + \
f'num_losses {num_losses}\n')
self.seed()
# Create reference model
model = MyModel().to('cuda')
optimizer = optim.SGD(model.parameters(),
lr=self.initial_lr)
# Initialize with num_losses*2 for the original model and the restored one
model, optimizer = amp.initialize(
model, optimizer, opt_level=opt_level,
num_losses=num_losses*2, verbosity=0)
# Compare training behavior for same restore option
# We cannot really generalize it, since a saved model in O0
# would introduce a skipped step in O1, which will raise an error
if opt_level == res_opt_level:
# train for nb_epochs and restore after nb_epochs_restore
for epoch in range(nb_epochs):
x = torch.randn(16, 3, 24, 24, device='cuda')
output = self.train_step(
model, optimizer, x, range(num_losses))
# Initialize model one step before comparing.
# Otherwise the batchnorm layers will be updated
# additionally in restore_model
if epoch == (nb_epochs_restore - 1):
# Load model and optimizer
checkpoint = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'amp': amp.state_dict()
}
# Check state_dict for FP32 tensors
self.check_state_dict_fp32(checkpoint['model'])
# Restore model
restore_model = MyModel().to('cuda')
restore_optimizer = optim.SGD(
restore_model.parameters(),
lr=self.initial_lr)
if amp_before_load:
restore_model, restore_optimizer = amp.initialize(
restore_model,
restore_optimizer,
opt_level=res_opt_level,
num_losses=num_losses*2,
verbosity=0)
restore_model.load_state_dict(checkpoint['model'])
restore_optimizer.load_state_dict(checkpoint['optimizer'])
# FIXME: We cannot test the amp.state_dict in the same script
# amp.load_state_dict(checkpoint['amp'])
if not amp_before_load:
restore_model, restore_optimizer = amp.initialize(
restore_model,
restore_optimizer,
opt_level=res_opt_level,
num_losses=num_losses*2,
verbosity=0)
elif epoch >= nb_epochs_restore:
restore_output = self.train_step(
restore_model,
restore_optimizer,
x,
range(num_losses, num_losses*2))
self.assertTrue(
torch.allclose(output.float(), restore_output.float()),
'Output of reference and restored models differ for ' + test_setup)
self.compare_models(model, restore_model, test_setup)
# if opt_level != res_opt_level
else:
# skip tests for different opt_levels
continue
def test_loss_scale_decrease(self):
num_losses = 3
nb_decrease_loss_scales = [0, 1, 2]
for opt_level in self.test_opt_levels:
#print('#' * 75 + f'\n opt_level {opt_level}\n')
# Create new tmp copy for this run
nb_decrease_loss_scales_tmp = list(nb_decrease_loss_scales)
model = MyModel().to('cuda')
optimizer = optim.SGD(model.parameters(),
lr=self.initial_lr)
model, optimizer = amp.initialize(
model, optimizer, opt_level=opt_level, num_losses=num_losses,
verbosity=0)
if amp._amp_state.opt_properties.loss_scale != 'dynamic':
#print('Static loss scale set. Skipping opt_level.')
continue
# force to skip some updates to decrease the loss_scale
initial_loss_scales = []
for idx in range(num_losses):
initial_loss_scales.append(
amp._amp_state.loss_scalers[idx].loss_scale())
for _ in range(len(nb_decrease_loss_scales)):
x = torch.randn(16, 3, 24, 24, device='cuda')
for idx in range(num_losses):
while nb_decrease_loss_scales_tmp[idx] > 0:
optimizer.zero_grad()
output = model(x * 2**17)
loss = output.mean()
with amp.scale_loss(loss, optimizer, loss_id=idx) as scaled_loss:
scaled_loss.backward(retain_graph=True)
optimizer.step()
nb_decrease_loss_scales_tmp[idx] -= 1
# Check loss scales afterwards
updated_loss_scales = []
for idx in range(num_losses):
updated_loss_scales.append(
amp._amp_state.loss_scalers[idx].loss_scale())
for factor, update_ls, init_ls in zip(nb_decrease_loss_scales,
updated_loss_scales,
initial_loss_scales):
self.assertEqual(update_ls, init_ls / 2**factor)
# Check state dict
amp_state_dict = amp.state_dict()
for scaler_idx, factor, init_ls in zip(amp_state_dict,
nb_decrease_loss_scales,
initial_loss_scales):
scaler = amp_state_dict[scaler_idx]
self.assertEqual(scaler['loss_scale'], init_ls / 2**factor)
unskipped_target = 0
self.assertEqual(scaler['unskipped'], unskipped_target)
def test_state_dict(self):
for opt_level in self.test_opt_levels:
# Skip O3
if opt_level == 'O3':
continue
model = MyModel().to('cuda')
optimizer = optim.Adam(model.parameters(), lr=1e-3)
model, optimizer = amp.initialize(
model, optimizer, opt_level=opt_level, verbosity=0)
# Export state_dict and check for Half
state_dict = model.state_dict()
for key in state_dict:
self.assertFalse('Half' in state_dict[key].type())
# Check, if model is still trainable
# Create dummy data
data = torch.randn(10, 3, 4, 4, device='cuda')
target = torch.randn(10, 6, 4, 4, device='cuda')
# Get initnial loss
optimizer.zero_grad()
output = model(data)
loss = F.mse_loss(output, target)
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
last_loss = loss.item()
# train for some epochs
for epoch in range(10):
optimizer.zero_grad()
output = model(data)
loss = F.mse_loss(output, target)
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
self.assertTrue(loss.item() < last_loss)
last_loss = loss.item()
if __name__=='__main__':
unittest.main()
| GeneSplice-main | GeneSplice/apex/tests/L0/run_amp/test_checkpointing.py |
import unittest
import functools as ft
import itertools as it
from apex import amp
from apex.amp import _amp_state
import torch
from torch import nn
import torch.nn.functional as F
from utils import common_init, HALF, FLOAT,\
ALWAYS_HALF, ALWAYS_FLOAT, MATCH_INPUT
def get_reference_grad(i, w, ops):
# Creating new tensors ensures, among other things, that the new tensors are not in the cache.
# In fact, they are guaranteed not to use the cache because they are not torch.nn.Parameters.
fp32_i = i.detach().clone().float()
fp32_w = w.detach().clone().float().requires_grad_()
loss = ops(fp32_i, fp32_w)
loss.backward()
return fp32_w.grad
class WhitelistModule(torch.nn.Module):
def __init__(self, dtype):
super(WhitelistModule, self).__init__()
self.weight = torch.nn.Parameter(torch.arange(8*8, device='cuda', dtype=dtype).view(8,8))
@staticmethod
def ops(input, weight):
return (input.mm(weight)).mm(weight).sum()
def forward(self, input):
return self.ops(input, self.weight)
class BlacklistModule(torch.nn.Module):
def __init__(self, dtype):
super(BlacklistModule, self).__init__()
self.weight = torch.nn.Parameter(torch.arange(2*8, device='cuda', dtype=dtype).view(2,8))
@staticmethod
def ops(input, weight):
return (input + torch.pow(weight, 2) + torch.pow(weight, 2)).sum()
def forward(self, input):
return self.ops(input, self.weight)
class PromoteModule(torch.nn.Module):
def __init__(self, dtype):
super(PromoteModule, self).__init__()
self.weight = torch.nn.Parameter(torch.arange(2*8, device='cuda', dtype=dtype).view(2,8))
@staticmethod
def ops(input, weight):
return ((input*weight)*weight).sum()
def forward(self, input):
return self.ops(input, self.weight)
class TestCache(unittest.TestCase):
def setUp(self):
self.x = torch.ones((2, 8), device='cuda', dtype=torch.float32)
common_init(self)
def tearDown(self):
pass
def train_eval_train_test(self, module, t):
model = module(t).cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=1.0)
_amp_state.allow_incoming_model_not_fp32 = True
model, optimizer = amp.initialize(model, optimizer, opt_level="O1", verbosity=0)
_amp_state.allow_incoming_model_not_fp32 = False
def training_step():
for param in model.parameters():
param.grad = None
loss = model(self.x).sum()
_amp_state.loss_scalers[0]._loss_scale = 4.0
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
self.assertEqual(len([p.grad for p in model.parameters() if p.grad is not None]), 1)
self.assertEqual(model.weight.grad.type(), model.weight.type())
reference_grad = get_reference_grad(self.x, model.weight, model.ops)
# Currently there's no difference in the allclose calls, so no need for branching,
# but I'm keeping this in case we want different tolerances for fp16 and fp32 checks.
if model.weight.grad.type() == "torch.cuda.HalfTensor":
self.assertTrue(torch.allclose(model.weight.grad.float(), reference_grad))
elif model.weight.grad.type() == "torch.cuda.FloatTensor":
self.assertTrue(torch.allclose(model.weight.grad.float(), reference_grad))
else:
raise RuntimeError("model.weight.grad.type = {}".format(model.weight.grad.type()))
model.weight.data -= 1.
# Simulates first epoch
training_step()
# Simulates eval
with torch.no_grad():
loss = model(self.x).sum()
# Simulates resuming training after eval
training_step()
_amp_state.handle._deactivate()
# I could easily have these as a set of for loops in a single test,
# instead of going for granularity.
def test_whitelist_module_fp16_weight(self):
self.train_eval_train_test(WhitelistModule, torch.float16)
def test_whitelist_module_fp32_weight(self):
self.train_eval_train_test(WhitelistModule, torch.float32)
def test_blacklist_module_fp16_weight(self):
self.train_eval_train_test(BlacklistModule, torch.float16)
def test_blacklist_module_fp32_weight(self):
self.train_eval_train_test(BlacklistModule, torch.float32)
def test_promote_module_fp16_weight(self):
self.train_eval_train_test(PromoteModule, torch.float16)
def test_promote_module_fp32_weight(self):
self.train_eval_train_test(PromoteModule, torch.float32)
if __name__ == '__main__':
unittest.main()
| GeneSplice-main | GeneSplice/apex/tests/L0/run_amp/test_cache.py |
import unittest
import torch
from torch import nn
from torch.nn import Parameter
from apex import amp
from apex.parallel.LARC import LARC
from utils import common_init
class MyModel(torch.nn.Module):
def __init__(self, unique):
super(MyModel, self).__init__()
self.weight0 = Parameter(
unique + torch.arange(2, device="cuda", dtype=torch.float32)
)
def forward(self, input):
return (input * self.weight0).sum()
class TestLARC(unittest.TestCase):
def setUp(self):
self.x = torch.ones((2), device="cuda", dtype=torch.float32)
common_init(self)
def tearDown(self):
pass
def test_larc_mixed_precision(self):
for opt_level in ["O0", "O1", "O2", "O3"]:
model = MyModel(1)
optimizer = LARC(
torch.optim.SGD(
[{"params": model.parameters(), "lr": 0.25}], momentum=0.125
)
)
model, optimizer = amp.initialize(
model, optimizer, opt_level=opt_level, verbosity=0
)
optimizer.zero_grad()
loss = model(self.x)
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
if __name__ == "__main__":
unittest.main()
| GeneSplice-main | GeneSplice/apex/tests/L0/run_amp/test_larc.py |
import unittest
import functools as ft
import itertools as it
from apex import amp
import torch
from torch import nn
import torch.nn.functional as F
from utils import common_init, HALF, FLOAT,\
ALWAYS_HALF, ALWAYS_FLOAT, MATCH_INPUT
def run_layer_test(test_case, fns, expected, input_shape, test_backward=True):
for fn, typ in it.product(fns, expected.keys()):
x = torch.randn(input_shape, dtype=typ).requires_grad_()
y = fn(x)
test_case.assertEqual(y.type(), expected[typ])
if test_backward:
y.float().sum().backward()
test_case.assertEqual(x.grad.type(), MATCH_INPUT[typ])
class TestBasicCasts(unittest.TestCase):
def setUp(self):
self.handle = amp.init(enabled=True)
common_init(self)
def tearDown(self):
self.handle._deactivate()
def test_linear_is_half(self):
m = nn.Linear(self.h, self.h)
f = ft.partial(F.linear, weight=m.weight, bias=m.bias)
run_layer_test(self, [m, f], ALWAYS_HALF, (self.b, self.h))
def test_conv2d_is_half(self):
m = nn.Conv2d(self.c, self.c, self.k)
f = ft.partial(F.conv2d, weight=m.weight, bias=m.bias)
run_layer_test(self, [m, f], ALWAYS_HALF, (self.b, self.c, self.h, self.h))
def test_softmax_is_float(self):
m = nn.Softmax(dim=1)
f = ft.partial(F.softmax, dim=1)
run_layer_test(self, [m, f], ALWAYS_FLOAT, (self.b, self.h))
def test_group_norm_is_float(self):
m = nn.GroupNorm(num_groups=4, num_channels=self.c)
run_layer_test(self, [m], ALWAYS_FLOAT, (self.b, self.c, self.h, self.h))
def test_mse_loss_is_float(self):
shape = (self.b, self.h)
target = torch.randn(shape)
mod = nn.MSELoss()
m = lambda x: mod(x, target)
f = ft.partial(F.mse_loss, target=target)
run_layer_test(self, [m], ALWAYS_FLOAT, shape)
def test_relu_is_match(self):
run_layer_test(self, [nn.ReLU(), F.relu], MATCH_INPUT, (self.b, self.h))
def test_batch_norm_is_match(self):
m = nn.BatchNorm2d(num_features=self.c)
f = ft.partial(F.batch_norm, running_mean=m.running_mean, running_var=m.running_var,
weight=m.weight, bias=m.bias, training=True)
run_layer_test(self, [m], MATCH_INPUT, (self.b, self.c, self.h, self.h))
# Test forward-only for BN inference
m.eval()
f = ft.partial(F.batch_norm, running_mean=m.running_mean, running_var=m.running_var,
weight=m.weight, bias=m.bias, training=False)
run_layer_test(self, [m, f], MATCH_INPUT, (self.b, self.c, self.h, self.h),
test_backward=False)
class TestBannedMethods(unittest.TestCase):
def setUp(self):
self.handle = amp.init(enabled=True)
common_init(self)
def tearDown(self):
self.handle._deactivate()
def bce_common(self, assertion):
shape = (self.b, self.h)
target = torch.rand(shape)
mod = nn.BCELoss()
m = lambda x: mod(x, target)
f = ft.partial(F.binary_cross_entropy, target=target)
for fn in [m, f]:
x = torch.rand(shape, dtype=torch.half)
assertion(fn, x)
def test_bce_raises_by_default(self):
assertion = lambda fn, x: self.assertRaises(NotImplementedError, fn, x)
self.bce_common(assertion)
def test_bce_is_float_with_allow_banned(self):
self.handle._deactivate()
self.handle = amp.init(enabled=True, allow_banned=True)
assertion = lambda fn, x: self.assertEqual(fn(x).type(), FLOAT)
self.bce_common(assertion)
class TestTensorCasts(unittest.TestCase):
def setUp(self):
self.handle = amp.init(enabled=True)
common_init(self)
def tearDown(self):
self.handle._deactivate()
def test_matmul_method_is_half(self):
other = torch.randn(self.h, self.h)
lhs = lambda x: x.matmul(other)
rhs = lambda x: other.matmul(x)
run_layer_test(self, [lhs, rhs], ALWAYS_HALF, (self.h, self.h))
def test_matmul_op_is_half(self):
other = torch.randn(self.h, self.h)
lhs = lambda x: x @ other
rhs = lambda x: other @ x
run_layer_test(self, [lhs, rhs], ALWAYS_HALF, (self.h, self.h))
def test_pow_method_is_float(self):
fn = lambda x: x.pow(2.)
run_layer_test(self, [fn], ALWAYS_FLOAT, (self.b, self.h))
def test_pow_op_is_float(self):
fn = lambda x: x ** 2.
run_layer_test(self, [fn], ALWAYS_FLOAT, (self.b, self.h))
def test_cpu_is_float(self):
fn = lambda x: x.cpu()
always_cpu_float = {torch.float: 'torch.FloatTensor',
torch.half: 'torch.FloatTensor'}
run_layer_test(self, [fn], always_cpu_float, (self.b, self.h))
def test_sum_is_float(self):
fn = lambda x: x.sum()
run_layer_test(self, [fn], ALWAYS_FLOAT, (self.b, self.h))
# TODO: maybe more tests on disabled casting?
if __name__ == '__main__':
unittest.main()
| GeneSplice-main | GeneSplice/apex/tests/L0/run_amp/test_basic_casts.py |
import unittest
import torch
import torch.nn as nn
from apex.fp16_utils import FP16Model
class DummyBlock(nn.Module):
def __init__(self):
super(DummyBlock, self).__init__()
self.conv = nn.Conv2d(10, 10, 2)
self.bn = nn.BatchNorm2d(10, affine=True)
def forward(self, x):
return self.conv(self.bn(x))
class DummyNet(nn.Module):
def __init__(self):
super(DummyNet, self).__init__()
self.conv1 = nn.Conv2d(3, 10, 2)
self.bn1 = nn.BatchNorm2d(10, affine=False)
self.db1 = DummyBlock()
self.db2 = DummyBlock()
def forward(self, x):
out = x
out = self.conv1(out)
out = self.bn1(out)
out = self.db1(out)
out = self.db2(out)
return out
class DummyNetWrapper(nn.Module):
def __init__(self):
super(DummyNetWrapper, self).__init__()
self.bn = nn.BatchNorm2d(3, affine=True)
self.dn = DummyNet()
def forward(self, x):
return self.dn(self.bn(x))
class TestFP16Model(unittest.TestCase):
def setUp(self):
self.N = 64
self.C_in = 3
self.H_in = 16
self.W_in = 32
self.in_tensor = torch.randn((self.N, self.C_in, self.H_in, self.W_in)).cuda()
self.orig_model = DummyNetWrapper().cuda()
self.fp16_model = FP16Model(self.orig_model)
def test_params_and_buffers(self):
exempted_modules = [
self.fp16_model.network.bn,
self.fp16_model.network.dn.db1.bn,
self.fp16_model.network.dn.db2.bn,
]
for m in self.fp16_model.modules():
expected_dtype = torch.float if (m in exempted_modules) else torch.half
for p in m.parameters(recurse=False):
assert p.dtype == expected_dtype
for b in m.buffers(recurse=False):
assert b.dtype in (expected_dtype, torch.int64)
def test_output_is_half(self):
out_tensor = self.fp16_model(self.in_tensor)
assert out_tensor.dtype == torch.half
| GeneSplice-main | GeneSplice/apex/tests/L0/run_fp16util/test_fp16util.py |
GeneSplice-main | GeneSplice/apex/tests/L0/run_fp16util/__init__.py |
|
import unittest
import torch
import apex
from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase
def init_model_and_optimizer():
model = torch.nn.Linear(1, 1, bias=False).cuda()
optimizer = torch.optim.SGD(model.parameters(), 1.0)
return model, optimizer
@unittest.skipUnless(torch.cuda.is_available(), "")
class TestDeprecatedWarning(unittest.TestCase):
def test_amp(self):
model, optimizer = init_model_and_optimizer()
with self.assertWarns(apex.DeprecatedFeatureWarning):
_ = apex.amp.initialize(model, optimizer)
def test_fp16_model(self):
model, _ = init_model_and_optimizer()
with self.assertWarns(apex.DeprecatedFeatureWarning):
_ = apex.fp16_utils.FP16Model(model)
def test_fp16_optimizer(self):
_, optimizer = init_model_and_optimizer()
with self.assertWarns(apex.DeprecatedFeatureWarning):
_ = apex.fp16_utils.FP16_Optimizer(optimizer)
def test_fp16_loss_scaler(self):
with self.assertWarns(apex.DeprecatedFeatureWarning):
apex.fp16_utils.LossScaler()
class TestParallel(NcclDistributedTestBase):
@property
def world_size(self):
return min(torch.cuda.device_count(), 2)
def test_distributed_data_parallel(self):
model, _ = init_model_and_optimizer()
with self.assertWarns(apex.DeprecatedFeatureWarning):
_ = apex.parallel.DistributedDataParallel(model)
def test_convert_syncbn_model(self):
model, _ = init_model_and_optimizer()
with self.assertWarns(apex.DeprecatedFeatureWarning):
_ = apex.parallel.convert_syncbn_model(model)
if __name__ == "__main__":
unittest.main()
| GeneSplice-main | GeneSplice/apex/tests/L0/run_deprecated/test_deprecated_warning.py |
"""Tests for c++ MLP"""
from itertools import product
from time import time
import torch
from torch import nn
from torch.testing._internal import common_utils
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_device_type import onlyCUDA
from apex.mlp import MLP
batch_size = 1024
mlp_sizes = [480, 1024, 1024, 512, 256, 1]
num_iters = 10
# note(crcrpar): On Ampere, this test should be run without TF32 enabled.
class TestMLP(common_utils.TestCase):
def test_creation(self):
MLP(mlp_sizes)
def test_numeric(self):
mlp = MLP(mlp_sizes).cuda()
mlp_layers = []
for i in range(mlp.num_layers):
linear = nn.Linear(mlp_sizes[i], mlp_sizes[i + 1])
with torch.no_grad():
mlp.weights[i].copy_(linear.weight)
mlp.biases[i].copy_(linear.bias)
mlp_layers.append(linear)
mlp_layers.append(nn.ReLU())
ref_mlp = nn.Sequential(*mlp_layers).cuda()
test_input = (
torch.empty(batch_size, mlp_sizes[0], device="cuda")
.uniform_(-1.0, 1.0)
.requires_grad_()
)
ref_input = test_input.clone().detach().requires_grad_()
mlp_out = mlp(test_input)
ref_out = ref_mlp(ref_input)
self.assertEqual(mlp_out, ref_out)
# Use mean value as scalar loss. Multiply 10 to make it big enough not zero out
mlp_out.mean().mul(10.0).backward()
ref_out.mean().mul(10.0).backward()
self.assertEqual(test_input.grad, ref_input.grad)
self.assertEqual(mlp.biases[0].grad, ref_mlp[0].bias.grad)
def _test_mlp_impl(self, use_activation: str, bias: bool, enable_autocast: bool):
mlp = MLP(mlp_sizes, bias=bias, activation=use_activation).cuda()
mlp_layers = []
for i in range(mlp.num_layers):
linear = nn.Linear(mlp_sizes[i], mlp_sizes[i + 1], bias=bias)
with torch.no_grad():
mlp.weights[i].copy_(linear.weight)
if bias:
mlp.biases[i].copy_(linear.bias)
mlp_layers.append(linear)
if use_activation == "relu":
mlp_layers.append(nn.ReLU())
if use_activation == "sigmoid":
mlp_layers.append(nn.Sigmoid())
ref_mlp = nn.Sequential(*mlp_layers).cuda()
test_input = (
torch.empty(batch_size, mlp_sizes[0], device="cuda")
.uniform_(-1.0, 1.0)
.requires_grad_()
)
ref_input = test_input.clone().detach().requires_grad_()
with torch.cuda.amp.autocast_mode.autocast(enabled=enable_autocast):
mlp_out = mlp(test_input)
mlp_loss = mlp_out.mean().mul(10.0)
# Use mean value as scalar loss. Multiply 10 to make it big enough not zero out
ref_out = ref_mlp(ref_input)
ref_loss = ref_out.mean().mul(10.0)
mlp_loss.backward()
ref_loss.backward()
if enable_autocast:
self.assertEqual(mlp_out.dtype, torch.float16)
self.assertEqual(ref_out.dtype, torch.float16)
else:
self.assertEqual(mlp_out, ref_out)
self.assertEqual(test_input.grad, ref_input.grad)
self.assertEqual(mlp.weights[0].grad, ref_mlp[0].weight.grad)
@common_utils.parametrize(
"use_activation,bias",
list(product(("none", "relu", "sigmoid"), (True, False))),
)
def test_mlp(self, use_activation: str, bias: bool):
self._test_mlp_impl(use_activation, bias, enable_autocast=False)
@common_utils.parametrize(
"use_activation,bias",
list(product(("none", "relu", "sigmoid"), (True, False))),
)
def test_mlp_autocast_fp16(self, use_activation: str, bias: bool):
self._test_mlp_impl(use_activation, bias, enable_autocast=True)
def test_no_grad(self):
mlp = MLP(mlp_sizes).cuda()
mlp_layers = []
for i in range(mlp.num_layers):
linear = nn.Linear(mlp_sizes[i], mlp_sizes[i + 1])
with torch.no_grad():
mlp.weights[i].copy_(linear.weight)
mlp.biases[i].copy_(linear.bias)
mlp_layers.append(linear)
mlp_layers.append(nn.ReLU(inplace=True))
ref_mlp = nn.Sequential(*mlp_layers).cuda()
test_input = torch.empty(batch_size, mlp_sizes[0], device="cuda").uniform_(-1.0, 1.0)
ref_input = test_input.clone().detach()
mlp_out = mlp(test_input)
ref_out = ref_mlp(ref_input)
self.assertEqual(mlp_out, ref_out)
# Use mean value as scalar loss. Multiply 10 to make it big enough not zero out
mlp_out.mean().mul(10.0).backward()
ref_out.mean().mul(10.0).backward()
self.assertEqual(mlp.weights[0].grad, ref_mlp[0].weight.grad)
def test_performance_half(self):
mlp = MLP(mlp_sizes).cuda().half()
mlp_layers = []
for i in range(mlp.num_layers):
linear = nn.Linear(mlp_sizes[i], mlp_sizes[i + 1])
mlp.weights[i].data.copy_(linear.weight)
mlp.biases[i].data.copy_(linear.bias)
mlp_layers.append(linear)
mlp_layers.append(nn.ReLU(inplace=True))
ref_mlp = nn.Sequential(*mlp_layers).cuda().half()
test_input = (
torch.empty(batch_size, mlp_sizes[0], device="cuda", dtype=torch.half)
.fill_(10.0)
.requires_grad_()
)
ref_input = (
torch.empty(batch_size, mlp_sizes[0], device="cuda", dtype=torch.half)
.fill_(10.0)
.requires_grad_()
)
# Warm up GPU
for _ in range(100):
ref_out = ref_mlp(ref_input)
ref_loss = ref_out.mean()
ref_mlp.zero_grad()
ref_loss.backward()
mlp_out = mlp(test_input)
test_loss = mlp_out.mean()
mlp.zero_grad()
test_loss.backward()
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
ref_out = ref_mlp(ref_input)
ref_loss = ref_out.mean()
ref_mlp.zero_grad()
ref_loss.backward()
torch.cuda.synchronize()
stop_time = time()
ref_time = (stop_time - start_time) * 1000.0 / num_iters
print(f"\nPytorch MLP time {ref_time:.4f} ms")
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
mlp_out = mlp(test_input)
test_loss = mlp_out.mean()
mlp.zero_grad()
test_loss.backward()
torch.cuda.synchronize()
stop_time = time()
actual_time = (stop_time - start_time) * 1000.0 / num_iters
print(f"C++ MLP time {actual_time:.4f} ms")
torch.cuda.profiler.stop()
self.assertLessEqual(
actual_time,
ref_time,
msg=f"Custom extension took {actual_time:.4f} while PyTorch took {ref_time:.4f}",
)
instantiate_device_type_tests(TestMLP, globals(), only_for=("cuda",))
if __name__ == "__main__":
common_utils.run_tests()
| GeneSplice-main | GeneSplice/apex/tests/L0/run_mlp/test_mlp.py |
import os
import logging
import itertools
from typing import Optional, Tuple, List
import unittest
import torch
from torch.testing._internal import common_utils
from torch.testing._internal import common_cuda
from torch.testing._internal import common_distributed
from apex._autocast_utils import _get_autocast_dtypes
from apex.transformer import parallel_state
from apex.transformer.pipeline_parallel import utils as pp_utils
from apex.transformer.pipeline_parallel.schedules.common import (
FwdStepFunc,
build_model,
_get_params_for_weight_decay_optimization,
)
from apex.transformer.pipeline_parallel.schedules.fwd_bwd_no_pipelining import (
forward_backward_no_pipelining,
)
from apex.transformer.pipeline_parallel.schedules.fwd_bwd_pipelining_with_interleaving import (
_forward_backward_pipelining_with_interleaving,
)
from apex.transformer.pipeline_parallel.schedules.fwd_bwd_pipelining_without_interleaving import (
forward_backward_pipelining_without_interleaving,
)
from apex.transformer.testing.distributed_test_base import UccDistributedTestBase
from apex.transformer.testing import commons as testing_utils
logging.getLogger("torch").setLevel(logging.WARNING)
logging.getLogger("apex").setLevel(logging.WARNING)
def _get_default_world_sizes_model_parallel_world_size(pipeline_model_parallel_world_size: Optional[int] = None
) -> Tuple[int, int, int]:
# TODO: revisit if we can fold this into the class for skip logic / avoid duplication
# of world size computation
world_size = torch.cuda.device_count()
tensor_model_parallel_world_size = 1
data_parallel_size = 1 + (world_size >= 8 and world_size % 2 == 0)
if pipeline_model_parallel_world_size is None:
pipeline_model_parallel_world_size = world_size // (tensor_model_parallel_world_size * data_parallel_size)
else:
data_parallel_size = world_size // (tensor_model_parallel_world_size * pipeline_model_parallel_world_size)
return tensor_model_parallel_world_size, data_parallel_size, pipeline_model_parallel_world_size
class UccPipelineParallelForwardBackwardProf(UccDistributedTestBase):
# The purpose of this class is to test and confirm asynchronous communication via profiling.
# Having that in mind, it is safe to skip all the numerical checks.
# For unit testing with numerical checks please refer to `tests/L0/run_transformer/test_pipeline_parallel_fwd_bwd.py`.
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.GLOBAL_BATCH_SIZE = 1024
self.MICRO_BATCH_SIZE = 64
self.HIDDEN_SIZE = 256
self.NUM_FWD_BWD_ITERATIONS = 4
self.deallocate_options = (False,)
self.dtypes = (torch.float32,)
@property
def world_size(self) -> int:
return min(torch.cuda.device_count(), 8)
def _forward_backward_test_impl(
self,
forward_only: bool,
fwd_bwd_func: FwdStepFunc,
pipeline_model_parallel_world_size: Optional[int],
virtual_pipeline_model_parallel_size: Optional[int],
async_comm: bool = False,
*,
default_backend: Optional[str] = None,
p2p_backend: Optional[str] = None,
) -> None:
if fwd_bwd_func == _forward_backward_pipelining_with_interleaving:
self.assertIsNotNone(virtual_pipeline_model_parallel_size)
self.assertGreater(virtual_pipeline_model_parallel_size, 1)
dtype_options = self.dtypes or [torch.float32, torch.double] + _get_autocast_dtypes()
for dtype, deallocate_pipeline_outputs in itertools.product(
dtype_options, self.deallocate_options,
):
grad_scaler = (
torch.cuda.amp.GradScaler(init_scale=4.0)
if dtype == torch.half
else None
)
(tensor_model_parallel_world_size,
data_parallel_size,
pipeline_model_parallel_world_size) = _get_default_world_sizes_model_parallel_world_size(pipeline_model_parallel_world_size)
parallel_state.initialize_model_parallel(
tensor_model_parallel_size_=tensor_model_parallel_world_size,
pipeline_model_parallel_size_=pipeline_model_parallel_world_size,
virtual_pipeline_model_parallel_size_=virtual_pipeline_model_parallel_size,
default_backend=default_backend,
p2p_backend=p2p_backend,
)
pp_utils._reconfigure_microbatch_calculator(
rank=parallel_state.get_tensor_model_parallel_rank(),
rampup_batch_size=None,
global_batch_size=self.GLOBAL_BATCH_SIZE,
micro_batch_size=self.MICRO_BATCH_SIZE,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
global_batch_shape = (
self.GLOBAL_BATCH_SIZE
// parallel_state.get_data_parallel_world_size(),
self.HIDDEN_SIZE,
self.HIDDEN_SIZE,
)
batch = None
if parallel_state.is_pipeline_first_stage():
batch = (torch.ones(global_batch_shape, dtype=dtype).cuda(), )
model = build_model(
testing_utils.model_provider_func,
# Use DDP only when it's better to have
wrap_with_ddp=data_parallel_size > 1,
virtual_pipeline_model_parallel_size=virtual_pipeline_model_parallel_size,
hidden_size=self.HIDDEN_SIZE,
)
offset = pipeline_model_parallel_world_size if virtual_pipeline_model_parallel_size is not None else 0
for idx, model_module in enumerate(model):
model_module = model_module.to(dtype)
_param_groups = _get_params_for_weight_decay_optimization(model)
optimizer = torch.optim.Adam(_param_groups, lr=1e-3)
pp_utils.update_num_microbatches(0)
for _ in range(self.NUM_FWD_BWD_ITERATIONS):
loss = fwd_bwd_func(
testing_utils.fwd_step_func,
batch,
model,
forward_only=forward_only,
# `tensor_shape` is the shape of micro batch.
tensor_shape=(
self.MICRO_BATCH_SIZE,
self.HIDDEN_SIZE,
self.HIDDEN_SIZE,
),
dtype=dtype,
async_comm=async_comm,
grad_scaler=grad_scaler,
deallocate_pipeline_output=deallocate_pipeline_outputs,
)
parallel_state.destroy_model_parallel()
def test_learning_no_pipelining(self):
self._forward_backward_test_impl(False, forward_backward_no_pipelining, 1, None)
def test_inference_no_pipelining(self):
self._forward_backward_test_impl(True, forward_backward_no_pipelining, 1, None)
def test_learning_pipelining_without_interleaving(self):
self._forward_backward_test_impl(
False, forward_backward_pipelining_without_interleaving, None, None
)
def test_inference_pipelining_without_interleaving(self):
self._forward_backward_test_impl(
True, forward_backward_pipelining_without_interleaving, None, None
)
def test_learning_async_pipelining_without_interleaving(self):
self._forward_backward_test_impl(
False, forward_backward_pipelining_without_interleaving, None, None, async_comm=True
)
def test_inference_async_pipelining_without_interleaving(self):
self._forward_backward_test_impl(
True, forward_backward_pipelining_without_interleaving, None, None, async_comm=True
)
@unittest.skipUnless(_get_default_world_sizes_model_parallel_world_size()[-1] > 2, "Interleaved schedule requires pipeline_model_parallel_world_size > 2")
def test_learning_pipelining_with_interleaving(self):
self._forward_backward_test_impl(
False, _forward_backward_pipelining_with_interleaving, None, virtual_pipeline_model_parallel_size=2
)
@unittest.skipUnless(_get_default_world_sizes_model_parallel_world_size()[-1] > 2, "Interleaved schedule requires pipeline_model_parallel_world_size > 2")
def test_inference_pipelining_with_interleaving(self):
self._forward_backward_test_impl(
True, _forward_backward_pipelining_with_interleaving, None, virtual_pipeline_model_parallel_size=2
)
@unittest.skipUnless(_get_default_world_sizes_model_parallel_world_size()[-1] > 2, "Interleaved schedule requires pipeline_model_parallel_world_size > 2")
def test_learning_async_pipelining_with_interleaving(self):
self._forward_backward_test_impl(
False, _forward_backward_pipelining_with_interleaving, None, virtual_pipeline_model_parallel_size=2, async_comm=True
)
@unittest.skipUnless(_get_default_world_sizes_model_parallel_world_size()[-1] > 2, "Interleaved schedule requires pipeline_model_parallel_world_size > 2")
def test_inference_async_pipelining_with_interleaving(self):
self._forward_backward_test_impl(
True, _forward_backward_pipelining_with_interleaving, None, virtual_pipeline_model_parallel_size=2, async_comm=True
)
if __name__ == "__main__":
os.environ["UCC_TLS"] = "ucp,cuda"
common_distributed.TIMEOUT_DEFAULT = 500
common_utils.run_tests()
| GeneSplice-main | GeneSplice/apex/tests/L1/transformer/pipeline_parallel_fwd_bwd_ucc_async.py |
import argparse
import os
import shutil
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import numpy as np
try:
from apex.parallel import DistributedDataParallel as DDP
from apex.fp16_utils import *
from apex import amp, optimizers
from apex.multi_tensor_apply import multi_tensor_applier
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.")
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size per process (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='Initial learning rate. Will be scaled by <global batch size>/256: args.lr = args.lr*float(args.batch_size*args.world_size)/256. A warmup schedule will also be applied over the first 5 epochs.')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--prof', dest='prof', action='store_true',
help='Only run 10 iterations for profiling.')
parser.add_argument('--deterministic', action='store_true')
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument('--sync_bn', action='store_true',
help='enabling apex sync BN.')
parser.add_argument('--has-ext', action='store_true')
parser.add_argument('--opt-level', type=str)
parser.add_argument('--keep-batchnorm-fp32', type=str, default=None)
parser.add_argument('--loss-scale', type=str, default=None)
parser.add_argument('--fused-adam', action='store_true')
parser.add_argument('--prints-to-process', type=int, default=10)
cudnn.benchmark = True
def fast_collate(batch):
imgs = [img[0] for img in batch]
targets = torch.tensor([target[1] for target in batch], dtype=torch.int64)
w = imgs[0].size[0]
h = imgs[0].size[1]
tensor = torch.zeros( (len(imgs), 3, h, w), dtype=torch.uint8 )
for i, img in enumerate(imgs):
nump_array = np.asarray(img, dtype=np.uint8)
if(nump_array.ndim < 3):
nump_array = np.expand_dims(nump_array, axis=-1)
nump_array = np.rollaxis(nump_array, 2)
tensor[i] += torch.from_numpy(nump_array)
return tensor, targets
best_prec1 = 0
args = parser.parse_args()
# Let multi_tensor_applier be the canary in the coalmine
# that verifies if the backend is what we think it is
assert multi_tensor_applier.available == args.has_ext
print("opt_level = {}".format(args.opt_level))
print("keep_batchnorm_fp32 = {}".format(args.keep_batchnorm_fp32), type(args.keep_batchnorm_fp32))
print("loss_scale = {}".format(args.loss_scale), type(args.loss_scale))
print("\nCUDNN VERSION: {}\n".format(torch.backends.cudnn.version()))
if args.deterministic:
cudnn.benchmark = False
cudnn.deterministic = True
torch.manual_seed(args.local_rank)
torch.set_printoptions(precision=10)
def main():
global best_prec1, args
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
args.gpu = 0
args.world_size = 1
if args.distributed:
args.gpu = args.local_rank % torch.cuda.device_count()
torch.cuda.set_device(args.gpu)
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
args.world_size = torch.distributed.get_world_size()
assert torch.backends.cudnn.enabled, "Amp requires cudnn backend to be enabled."
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
if args.sync_bn:
import apex
print("using apex synced BN")
model = apex.parallel.convert_syncbn_model(model)
model = model.cuda()
# Scale learning rate based on global batch size
args.lr = args.lr*float(args.batch_size*args.world_size)/256.
if args.fused_adam:
optimizer = optimizers.FusedAdam(model.parameters())
else:
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
model, optimizer = amp.initialize(
model, optimizer,
# enabled=False,
opt_level=args.opt_level,
keep_batchnorm_fp32=args.keep_batchnorm_fp32,
loss_scale=args.loss_scale
)
if args.distributed:
# By default, apex.parallel.DistributedDataParallel overlaps communication with
# computation in the backward pass.
# model = DDP(model)
# delay_allreduce delays all communication to the end of the backward pass.
model = DDP(model, delay_allreduce=True)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
# Optionally resume from a checkpoint
if args.resume:
# Use a local scope to avoid dangling references
def resume():
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume, map_location = lambda storage, loc: storage.cuda(args.gpu))
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
resume()
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
if(args.arch == "inception_v3"):
crop_size = 299
val_size = 320 # I chose this value arbitrarily, we can adjust.
else:
crop_size = 224
val_size = 256
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(crop_size),
transforms.RandomHorizontalFlip(),
# transforms.ToTensor(), Too slow
# normalize,
]))
val_dataset = datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(val_size),
transforms.CenterCrop(crop_size),
]))
train_sampler = None
val_sampler = None
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler, collate_fn=fast_collate)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True,
sampler=val_sampler,
collate_fn=fast_collate)
if args.evaluate:
validate(val_loader, model, criterion)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch)
if args.prof:
break
# evaluate on validation set
prec1 = validate(val_loader, model, criterion)
# remember best prec@1 and save checkpoint
if args.local_rank == 0:
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer' : optimizer.state_dict(),
}, is_best)
class data_prefetcher():
def __init__(self, loader):
self.loader = iter(loader)
self.stream = torch.cuda.Stream()
self.mean = torch.tensor([0.485 * 255, 0.456 * 255, 0.406 * 255]).cuda().view(1,3,1,1)
self.std = torch.tensor([0.229 * 255, 0.224 * 255, 0.225 * 255]).cuda().view(1,3,1,1)
# With Amp, it isn't necessary to manually convert data to half.
# if args.fp16:
# self.mean = self.mean.half()
# self.std = self.std.half()
self.preload()
def preload(self):
try:
self.next_input, self.next_target = next(self.loader)
except StopIteration:
self.next_input = None
self.next_target = None
return
with torch.cuda.stream(self.stream):
self.next_input = self.next_input.cuda(non_blocking=True)
self.next_target = self.next_target.cuda(non_blocking=True)
# With Amp, it isn't necessary to manually convert data to half.
# if args.fp16:
# self.next_input = self.next_input.half()
# else:
self.next_input = self.next_input.float()
self.next_input = self.next_input.sub_(self.mean).div_(self.std)
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
input = self.next_input
target = self.next_target
self.preload()
return input, target
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
run_info_dict = {"Iteration" : [],
"Loss" : [],
"Speed" : []}
prefetcher = data_prefetcher(train_loader)
input, target = prefetcher.next()
i = -1
while input is not None:
i += 1
# No learning rate warmup for this test, to expose bitwise inaccuracies more quickly
# adjust_learning_rate(optimizer, epoch, i, len(train_loader))
if args.prof:
if i > 10:
break
# measure data loading time
data_time.update(time.time() - end)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data)
prec1 = reduce_tensor(prec1)
prec5 = reduce_tensor(prec5)
else:
reduced_loss = loss.data
losses.update(to_python_float(reduced_loss), input.size(0))
top1.update(to_python_float(prec1), input.size(0))
top5.update(to_python_float(prec5), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
# for param in model.parameters():
# print(param.data.double().sum().item(), param.grad.data.double().sum().item())
# torch.cuda.synchronize()
torch.cuda.nvtx.range_push("step")
optimizer.step()
torch.cuda.nvtx.range_pop()
torch.cuda.synchronize()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# If you decide to refactor this test, like examples/imagenet, to sample the loss every
# print_freq iterations, make sure to move this prefetching below the accuracy calculation.
input, target = prefetcher.next()
if i % args.print_freq == 0 and i > 1:
if args.local_rank == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Speed {3:.3f} ({4:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.10f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader),
args.world_size * args.batch_size / batch_time.val,
args.world_size * args.batch_size / batch_time.avg,
batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
run_info_dict["Iteration"].append(i)
run_info_dict["Loss"].append(losses.val)
run_info_dict["Speed"].append(args.world_size * args.batch_size / batch_time.val)
if len(run_info_dict["Loss"]) == args.prints_to_process:
if args.local_rank == 0:
torch.save(run_info_dict,
str(args.has_ext) + "_" + str(args.opt_level) + "_" +
str(args.loss_scale) + "_" + str(args.keep_batchnorm_fp32) + "_" +
str(args.fused_adam))
quit()
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
prefetcher = data_prefetcher(val_loader)
input, target = prefetcher.next()
i = -1
while input is not None:
i += 1
# compute output
with torch.no_grad():
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data)
prec1 = reduce_tensor(prec1)
prec5 = reduce_tensor(prec5)
else:
reduced_loss = loss.data
losses.update(to_python_float(reduced_loss), input.size(0))
top1.update(to_python_float(prec1), input.size(0))
top5.update(to_python_float(prec5), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if args.local_rank == 0 and i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Speed {2:.3f} ({3:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader),
args.world_size * args.batch_size / batch_time.val,
args.world_size * args.batch_size / batch_time.avg,
batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
input, target = prefetcher.next()
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, step, len_epoch):
"""LR schedule that should yield 76% converged accuracy with batch size 256"""
factor = epoch // 30
if epoch >= 80:
factor = factor + 1
lr = args.lr*(0.1**factor)
"""Warmup"""
if epoch < 5:
lr = lr*float(1 + step + epoch*len_epoch)/(5.*len_epoch)
# if(args.local_rank == 0):
# print("epoch = {}, step = {}, lr = {}".format(epoch, step, lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def reduce_tensor(tensor):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.reduce_op.SUM)
rt /= args.world_size
return rt
if __name__ == '__main__':
main()
| GeneSplice-main | GeneSplice/apex/tests/L1/common/main_amp.py |
import argparse
import torch
parser = argparse.ArgumentParser(description='Compare')
parser.add_argument('--opt-level', type=str)
parser.add_argument('--keep-batchnorm-fp32', type=str, default=None)
parser.add_argument('--loss-scale', type=str, default=None)
parser.add_argument('--fused-adam', action='store_true')
parser.add_argument('--use_baseline', action='store_true')
args = parser.parse_args()
base_file = str(args.opt_level) + "_" +\
str(args.loss_scale) + "_" +\
str(args.keep_batchnorm_fp32) + "_" +\
str(args.fused_adam)
file_e = "True_" + base_file
file_p = "False_" + base_file
if args.use_baseline:
file_b = "baselines/True_" + base_file
dict_e = torch.load(file_e)
dict_p = torch.load(file_p)
if args.use_baseline:
dict_b = torch.load(file_b)
torch.set_printoptions(precision=10)
print(file_e)
print(file_p)
if args.use_baseline:
print(file_b)
# ugly duplication here...
if not args.use_baseline:
for n, (i_e, i_p) in enumerate(zip(dict_e["Iteration"], dict_p["Iteration"])):
assert i_e == i_p, "i_e = {}, i_p = {}".format(i_e, i_p)
loss_e = dict_e["Loss"][n]
loss_p = dict_p["Loss"][n]
assert loss_e == loss_p, "Iteration {}, loss_e = {}, loss_p = {}".format(i_e, loss_e, loss_p)
print("{:4} {:15.10f} {:15.10f} {:15.10f} {:15.10f}".format(
i_e,
loss_e,
loss_p,
dict_e["Speed"][n],
dict_p["Speed"][n]))
else:
for n, (i_e, i_p) in enumerate(zip(dict_e["Iteration"], dict_p["Iteration"])):
assert i_e == i_p, "i_e = {}, i_p = {}".format(i_e, i_p)
loss_e = dict_e["Loss"][n]
loss_p = dict_p["Loss"][n]
loss_b = dict_b["Loss"][n]
assert loss_e == loss_p, "Iteration {}, loss_e = {}, loss_p = {}".format(i_e, loss_e, loss_p)
assert loss_e == loss_b, "Iteration {}, loss_e = {}, loss_b = {}".format(i_e, loss_e, loss_b)
print("{:4} {:15.10f} {:15.10f} {:15.10f} {:15.10f} {:15.10f} {:15.10f}".format(
i_e,
loss_b,
loss_e,
loss_p,
dict_b["Speed"][n],
dict_e["Speed"][n],
dict_p["Speed"][n]))
| GeneSplice-main | GeneSplice/apex/tests/L1/common/compare.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# PyTorch documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 23 13:31:47 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
# sys.path.insert(0, os.path.abspath('../../apex/parallel/'))
import apex
# import multiproc
import sphinx_rtd_theme
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx.ext.extlinks',
]
napoleon_use_ivar = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Apex'
copyright = '2018'
author = 'Christian Sarofeen, Natalia Gimelshein, Michael Carilli, Raul Puri'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# TODO: change to [:2] at v1.0
# version = 'master (' + torch.__version__ + ' )'
version = '0.1'
# The full version, including alpha/beta/rc tags.
# TODO: verify this works as expected
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'collapse_navigation': False,
'display_version': True,
'logo_only': True,
}
# html_logo = '_static/img/nv-pytorch2.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# html_style_path = 'css/pytorch_theme.css'
html_context = {
'css_files': [
'https://fonts.googleapis.com/css?family=Lato',
'_static/css/pytorch_theme.css'
],
}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyTorchdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'apex.tex', 'Apex Documentation',
'Torch Contributors', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'Apex', 'Apex Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Apex', 'Apex Documentation',
author, 'Apex', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
}
# -- A patch that prevents Sphinx from cross-referencing ivar tags -------
# See http://stackoverflow.com/a/41184353/3343043
from docutils import nodes
from sphinx.util.docfields import TypedField
from sphinx import addnodes
def patched_make_field(self, types, domain, items, **kw):
# `kw` catches `env=None` needed for newer sphinx while maintaining
# backwards compatibility when passed along further down!
# type: (List, unicode, Tuple) -> nodes.field
def handle_item(fieldarg, content):
par = nodes.paragraph()
par += addnodes.literal_strong('', fieldarg) # Patch: this line added
# par.extend(self.make_xrefs(self.rolename, domain, fieldarg,
# addnodes.literal_strong))
if fieldarg in types:
par += nodes.Text(' (')
# NOTE: using .pop() here to prevent a single type node to be
# inserted twice into the doctree, which leads to
# inconsistencies later when references are resolved
fieldtype = types.pop(fieldarg)
if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text):
typename = u''.join(n.astext() for n in fieldtype)
typename = typename.replace('int', 'python:int')
typename = typename.replace('long', 'python:long')
typename = typename.replace('float', 'python:float')
typename = typename.replace('type', 'python:type')
par.extend(self.make_xrefs(self.typerolename, domain, typename,
addnodes.literal_emphasis, **kw))
else:
par += fieldtype
par += nodes.Text(')')
par += nodes.Text(' -- ')
par += content
return par
fieldname = nodes.field_name('', self.label)
if len(items) == 1 and self.can_collapse:
fieldarg, content = items[0]
bodynode = handle_item(fieldarg, content)
else:
bodynode = self.list_type()
for fieldarg, content in items:
bodynode += nodes.list_item('', handle_item(fieldarg, content))
fieldbody = nodes.field_body('', bodynode)
return nodes.field('', fieldname, fieldbody)
TypedField.make_field = patched_make_field
| GeneSplice-main | GeneSplice/apex/docs/source/conf.py |
from __future__ import print_function
import argparse
import os
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.")
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='cifar10', help='cifar10 | lsun | mnist |imagenet | folder | lfw | fake')
parser.add_argument('--dataroot', default='./', help='path to dataset')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=2)
parser.add_argument('--batchSize', type=int, default=64, help='input batch size')
parser.add_argument('--imageSize', type=int, default=64, help='the height / width of the input image to network')
parser.add_argument('--nz', type=int, default=100, help='size of the latent z vector')
parser.add_argument('--ngf', type=int, default=64)
parser.add_argument('--ndf', type=int, default=64)
parser.add_argument('--niter', type=int, default=25, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
parser.add_argument('--netG', default='', help="path to netG (to continue training)")
parser.add_argument('--netD', default='', help="path to netD (to continue training)")
parser.add_argument('--outf', default='.', help='folder to output images and model checkpoints')
parser.add_argument('--manualSeed', type=int, help='manual seed')
parser.add_argument('--classes', default='bedroom', help='comma separated list of classes for the lsun data set')
parser.add_argument('--opt_level', default='O1', help='amp opt_level, default="O1"')
opt = parser.parse_args()
print(opt)
try:
os.makedirs(opt.outf)
except OSError:
pass
if opt.manualSeed is None:
opt.manualSeed = 2809
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
cudnn.benchmark = True
if opt.dataset in ['imagenet', 'folder', 'lfw']:
# folder dataset
dataset = dset.ImageFolder(root=opt.dataroot,
transform=transforms.Compose([
transforms.Resize(opt.imageSize),
transforms.CenterCrop(opt.imageSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
nc=3
elif opt.dataset == 'lsun':
classes = [ c + '_train' for c in opt.classes.split(',')]
dataset = dset.LSUN(root=opt.dataroot, classes=classes,
transform=transforms.Compose([
transforms.Resize(opt.imageSize),
transforms.CenterCrop(opt.imageSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
nc=3
elif opt.dataset == 'cifar10':
dataset = dset.CIFAR10(root=opt.dataroot, download=True,
transform=transforms.Compose([
transforms.Resize(opt.imageSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
nc=3
elif opt.dataset == 'mnist':
dataset = dset.MNIST(root=opt.dataroot, download=True,
transform=transforms.Compose([
transforms.Resize(opt.imageSize),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,)),
]))
nc=1
elif opt.dataset == 'fake':
dataset = dset.FakeData(image_size=(3, opt.imageSize, opt.imageSize),
transform=transforms.ToTensor())
nc=3
assert dataset
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers))
device = torch.device("cuda:0")
ngpu = int(opt.ngpu)
nz = int(opt.nz)
ngf = int(opt.ngf)
ndf = int(opt.ndf)
# custom weights initialization called on netG and netD
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class Generator(nn.Module):
def __init__(self, ngpu):
super(Generator, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d( nz, ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
# state size. (ngf) x 32 x 32
nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False),
nn.Tanh()
# state size. (nc) x 64 x 64
)
def forward(self, input):
if input.is_cuda and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output
netG = Generator(ngpu).to(device)
netG.apply(weights_init)
if opt.netG != '':
netG.load_state_dict(torch.load(opt.netG))
print(netG)
class Discriminator(nn.Module):
def __init__(self, ngpu):
super(Discriminator, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is (nc) x 64 x 64
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
)
def forward(self, input):
if input.is_cuda and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output.view(-1, 1).squeeze(1)
netD = Discriminator(ngpu).to(device)
netD.apply(weights_init)
if opt.netD != '':
netD.load_state_dict(torch.load(opt.netD))
print(netD)
criterion = nn.BCEWithLogitsLoss()
fixed_noise = torch.randn(opt.batchSize, nz, 1, 1, device=device)
real_label = 1
fake_label = 0
# setup optimizer
optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
[netD, netG], [optimizerD, optimizerG] = amp.initialize(
[netD, netG], [optimizerD, optimizerG], opt_level=opt.opt_level, num_losses=3)
for epoch in range(opt.niter):
for i, data in enumerate(dataloader, 0):
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
# train with real
netD.zero_grad()
real_cpu = data[0].to(device)
batch_size = real_cpu.size(0)
label = torch.full((batch_size,), real_label, device=device)
output = netD(real_cpu)
errD_real = criterion(output, label)
with amp.scale_loss(errD_real, optimizerD, loss_id=0) as errD_real_scaled:
errD_real_scaled.backward()
D_x = output.mean().item()
# train with fake
noise = torch.randn(batch_size, nz, 1, 1, device=device)
fake = netG(noise)
label.fill_(fake_label)
output = netD(fake.detach())
errD_fake = criterion(output, label)
with amp.scale_loss(errD_fake, optimizerD, loss_id=1) as errD_fake_scaled:
errD_fake_scaled.backward()
D_G_z1 = output.mean().item()
errD = errD_real + errD_fake
optimizerD.step()
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
netG.zero_grad()
label.fill_(real_label) # fake labels are real for generator cost
output = netD(fake)
errG = criterion(output, label)
with amp.scale_loss(errG, optimizerG, loss_id=2) as errG_scaled:
errG_scaled.backward()
D_G_z2 = output.mean().item()
optimizerG.step()
print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
% (epoch, opt.niter, i, len(dataloader),
errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))
if i % 100 == 0:
vutils.save_image(real_cpu,
'%s/real_samples.png' % opt.outf,
normalize=True)
fake = netG(fixed_noise)
vutils.save_image(fake.detach(),
'%s/amp_fake_samples_epoch_%03d.png' % (opt.outf, epoch),
normalize=True)
# do checkpointing
torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' % (opt.outf, epoch))
torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' % (opt.outf, epoch))
| GeneSplice-main | GeneSplice/apex/examples/dcgan/main_amp.py |
import torch
import argparse
import os
from apex import amp
# FOR DISTRIBUTED: (can also use torch.nn.parallel.DistributedDataParallel instead)
from apex.parallel import DistributedDataParallel
parser = argparse.ArgumentParser()
# FOR DISTRIBUTED: Parse for the local_rank argument, which will be supplied
# automatically by torch.distributed.launch.
parser.add_argument("--local_rank", default=0, type=int)
args = parser.parse_args()
# FOR DISTRIBUTED: If we are running under torch.distributed.launch,
# the 'WORLD_SIZE' environment variable will also be set automatically.
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
if args.distributed:
# FOR DISTRIBUTED: Set the device according to local_rank.
torch.cuda.set_device(args.local_rank)
# FOR DISTRIBUTED: Initialize the backend. torch.distributed.launch will provide
# environment variables, and requires that you use init_method=`env://`.
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
torch.backends.cudnn.benchmark = True
N, D_in, D_out = 64, 1024, 16
# Each process receives its own batch of "fake input data" and "fake target data."
# The "training loop" in each process just uses this fake batch over and over.
# https://github.com/NVIDIA/apex/tree/master/examples/imagenet provides a more realistic
# example of distributed data sampling for both training and validation.
x = torch.randn(N, D_in, device='cuda')
y = torch.randn(N, D_out, device='cuda')
model = torch.nn.Linear(D_in, D_out).cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
if args.distributed:
# FOR DISTRIBUTED: After amp.initialize, wrap the model with
# apex.parallel.DistributedDataParallel.
model = DistributedDataParallel(model)
# torch.nn.parallel.DistributedDataParallel is also fine, with some added args:
# model = torch.nn.parallel.DistributedDataParallel(model,
# device_ids=[args.local_rank],
# output_device=args.local_rank)
loss_fn = torch.nn.MSELoss()
for t in range(500):
optimizer.zero_grad()
y_pred = model(x)
loss = loss_fn(y_pred, y)
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
if args.local_rank == 0:
print("final loss = ", loss)
| GeneSplice-main | GeneSplice/apex/examples/simple/distributed/distributed_data_parallel.py |
import argparse
import os
import shutil
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import numpy as np
try:
from apex.parallel import DistributedDataParallel as DDP
from apex.fp16_utils import *
from apex import amp, optimizers
from apex.multi_tensor_apply import multi_tensor_applier
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.")
def fast_collate(batch, memory_format):
imgs = [img[0] for img in batch]
targets = torch.tensor([target[1] for target in batch], dtype=torch.int64)
w = imgs[0].size[0]
h = imgs[0].size[1]
tensor = torch.zeros( (len(imgs), 3, h, w), dtype=torch.uint8).contiguous(memory_format=memory_format)
for i, img in enumerate(imgs):
nump_array = np.asarray(img, dtype=np.uint8)
if(nump_array.ndim < 3):
nump_array = np.expand_dims(nump_array, axis=-1)
nump_array = np.rollaxis(nump_array, 2)
tensor[i] += torch.from_numpy(nump_array)
return tensor, targets
def parse():
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size per process (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='Initial learning rate. Will be scaled by <global batch size>/256: args.lr = args.lr*float(args.batch_size*args.world_size)/256. A warmup schedule will also be applied over the first 5 epochs.')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--prof', default=-1, type=int,
help='Only run 10 iterations for profiling.')
parser.add_argument('--deterministic', action='store_true')
parser.add_argument("--local_rank", default=os.getenv('LOCAL_RANK', 0), type=int)
parser.add_argument('--sync_bn', action='store_true',
help='enabling apex sync BN.')
parser.add_argument('--opt-level', type=str)
parser.add_argument('--keep-batchnorm-fp32', type=str, default=None)
parser.add_argument('--loss-scale', type=str, default=None)
parser.add_argument('--channels-last', type=bool, default=False)
args = parser.parse_args()
return args
def main():
global best_prec1, args
args = parse()
print("opt_level = {}".format(args.opt_level))
print("keep_batchnorm_fp32 = {}".format(args.keep_batchnorm_fp32), type(args.keep_batchnorm_fp32))
print("loss_scale = {}".format(args.loss_scale), type(args.loss_scale))
print("\nCUDNN VERSION: {}\n".format(torch.backends.cudnn.version()))
cudnn.benchmark = True
best_prec1 = 0
if args.deterministic:
cudnn.benchmark = False
cudnn.deterministic = True
torch.manual_seed(args.local_rank)
torch.set_printoptions(precision=10)
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
args.gpu = 0
args.world_size = 1
if args.distributed:
args.gpu = args.local_rank
torch.cuda.set_device(args.gpu)
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
args.world_size = torch.distributed.get_world_size()
assert torch.backends.cudnn.enabled, "Amp requires cudnn backend to be enabled."
if args.channels_last:
memory_format = torch.channels_last
else:
memory_format = torch.contiguous_format
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
if args.sync_bn:
import apex
print("using apex synced BN")
model = apex.parallel.convert_syncbn_model(model)
model = model.cuda().to(memory_format=memory_format)
# Scale learning rate based on global batch size
args.lr = args.lr*float(args.batch_size*args.world_size)/256.
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# Initialize Amp. Amp accepts either values or strings for the optional override arguments,
# for convenient interoperation with argparse.
model, optimizer = amp.initialize(model, optimizer,
opt_level=args.opt_level,
keep_batchnorm_fp32=args.keep_batchnorm_fp32,
loss_scale=args.loss_scale
)
# For distributed training, wrap the model with apex.parallel.DistributedDataParallel.
# This must be done AFTER the call to amp.initialize. If model = DDP(model) is called
# before model, ... = amp.initialize(model, ...), the call to amp.initialize may alter
# the types of model's parameters in a way that disrupts or destroys DDP's allreduce hooks.
if args.distributed:
# By default, apex.parallel.DistributedDataParallel overlaps communication with
# computation in the backward pass.
# model = DDP(model)
# delay_allreduce delays all communication to the end of the backward pass.
model = DDP(model, delay_allreduce=True)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
# Optionally resume from a checkpoint
if args.resume:
# Use a local scope to avoid dangling references
def resume():
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume, map_location = lambda storage, loc: storage.cuda(args.gpu))
args.start_epoch = checkpoint['epoch']
global best_prec1
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
resume()
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
if(args.arch == "inception_v3"):
raise RuntimeError("Currently, inception_v3 is not supported by this example.")
# crop_size = 299
# val_size = 320 # I chose this value arbitrarily, we can adjust.
else:
crop_size = 224
val_size = 256
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(crop_size),
transforms.RandomHorizontalFlip(),
# transforms.ToTensor(), Too slow
# normalize,
]))
val_dataset = datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(val_size),
transforms.CenterCrop(crop_size),
]))
train_sampler = None
val_sampler = None
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)
collate_fn = lambda b: fast_collate(b, memory_format)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler, collate_fn=collate_fn)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True,
sampler=val_sampler,
collate_fn=collate_fn)
if args.evaluate:
validate(val_loader, model, criterion)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion)
# remember best prec@1 and save checkpoint
if args.local_rank == 0:
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer' : optimizer.state_dict(),
}, is_best)
class data_prefetcher():
def __init__(self, loader):
self.loader = iter(loader)
self.stream = torch.cuda.Stream()
self.mean = torch.tensor([0.485 * 255, 0.456 * 255, 0.406 * 255]).cuda().view(1,3,1,1)
self.std = torch.tensor([0.229 * 255, 0.224 * 255, 0.225 * 255]).cuda().view(1,3,1,1)
# With Amp, it isn't necessary to manually convert data to half.
# if args.fp16:
# self.mean = self.mean.half()
# self.std = self.std.half()
self.preload()
def preload(self):
try:
self.next_input, self.next_target = next(self.loader)
except StopIteration:
self.next_input = None
self.next_target = None
return
# if record_stream() doesn't work, another option is to make sure device inputs are created
# on the main stream.
# self.next_input_gpu = torch.empty_like(self.next_input, device='cuda')
# self.next_target_gpu = torch.empty_like(self.next_target, device='cuda')
# Need to make sure the memory allocated for next_* is not still in use by the main stream
# at the time we start copying to next_*:
# self.stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self.stream):
self.next_input = self.next_input.cuda(non_blocking=True)
self.next_target = self.next_target.cuda(non_blocking=True)
# more code for the alternative if record_stream() doesn't work:
# copy_ will record the use of the pinned source tensor in this side stream.
# self.next_input_gpu.copy_(self.next_input, non_blocking=True)
# self.next_target_gpu.copy_(self.next_target, non_blocking=True)
# self.next_input = self.next_input_gpu
# self.next_target = self.next_target_gpu
# With Amp, it isn't necessary to manually convert data to half.
# if args.fp16:
# self.next_input = self.next_input.half()
# else:
self.next_input = self.next_input.float()
self.next_input = self.next_input.sub_(self.mean).div_(self.std)
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
input = self.next_input
target = self.next_target
if input is not None:
input.record_stream(torch.cuda.current_stream())
if target is not None:
target.record_stream(torch.cuda.current_stream())
self.preload()
return input, target
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
prefetcher = data_prefetcher(train_loader)
input, target = prefetcher.next()
i = 0
while input is not None:
i += 1
if args.prof >= 0 and i == args.prof:
print("Profiling begun at iteration {}".format(i))
torch.cuda.cudart().cudaProfilerStart()
if args.prof >= 0: torch.cuda.nvtx.range_push("Body of iteration {}".format(i))
adjust_learning_rate(optimizer, epoch, i, len(train_loader))
# compute output
if args.prof >= 0: torch.cuda.nvtx.range_push("forward")
output = model(input)
if args.prof >= 0: torch.cuda.nvtx.range_pop()
loss = criterion(output, target)
# compute gradient and do SGD step
optimizer.zero_grad()
if args.prof >= 0: torch.cuda.nvtx.range_push("backward")
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
if args.prof >= 0: torch.cuda.nvtx.range_pop()
# for param in model.parameters():
# print(param.data.double().sum().item(), param.grad.data.double().sum().item())
if args.prof >= 0: torch.cuda.nvtx.range_push("optimizer.step()")
optimizer.step()
if args.prof >= 0: torch.cuda.nvtx.range_pop()
if i%args.print_freq == 0:
# Every print_freq iterations, check the loss, accuracy, and speed.
# For best performance, it doesn't make sense to print these metrics every
# iteration, since they incur an allreduce and some host<->device syncs.
# Measure accuracy
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
# Average loss and accuracy across processes for logging
if args.distributed:
reduced_loss = reduce_tensor(loss.data)
prec1 = reduce_tensor(prec1)
prec5 = reduce_tensor(prec5)
else:
reduced_loss = loss.data
# to_python_float incurs a host<->device sync
losses.update(to_python_float(reduced_loss), input.size(0))
top1.update(to_python_float(prec1), input.size(0))
top5.update(to_python_float(prec5), input.size(0))
torch.cuda.synchronize()
batch_time.update((time.time() - end)/args.print_freq)
end = time.time()
if args.local_rank == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Speed {3:.3f} ({4:.3f})\t'
'Loss {loss.val:.10f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader),
args.world_size*args.batch_size/batch_time.val,
args.world_size*args.batch_size/batch_time.avg,
batch_time=batch_time,
loss=losses, top1=top1, top5=top5))
if args.prof >= 0: torch.cuda.nvtx.range_push("prefetcher.next()")
input, target = prefetcher.next()
if args.prof >= 0: torch.cuda.nvtx.range_pop()
# Pop range "Body of iteration {}".format(i)
if args.prof >= 0: torch.cuda.nvtx.range_pop()
if args.prof >= 0 and i == args.prof + 10:
print("Profiling ended at iteration {}".format(i))
torch.cuda.cudart().cudaProfilerStop()
quit()
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
prefetcher = data_prefetcher(val_loader)
input, target = prefetcher.next()
i = 0
while input is not None:
i += 1
# compute output
with torch.no_grad():
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data)
prec1 = reduce_tensor(prec1)
prec5 = reduce_tensor(prec5)
else:
reduced_loss = loss.data
losses.update(to_python_float(reduced_loss), input.size(0))
top1.update(to_python_float(prec1), input.size(0))
top5.update(to_python_float(prec5), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# TODO: Change timings to mirror train().
if args.local_rank == 0 and i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Speed {2:.3f} ({3:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader),
args.world_size * args.batch_size / batch_time.val,
args.world_size * args.batch_size / batch_time.avg,
batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
input, target = prefetcher.next()
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, step, len_epoch):
"""LR schedule that should yield 76% converged accuracy with batch size 256"""
factor = epoch // 30
if epoch >= 80:
factor = factor + 1
lr = args.lr*(0.1**factor)
"""Warmup"""
if epoch < 5:
lr = lr*float(1 + step + epoch*len_epoch)/(5.*len_epoch)
# if(args.local_rank == 0):
# print("epoch = {}, step = {}, lr = {}".format(epoch, step, lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def reduce_tensor(tensor):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.reduce_op.SUM)
rt /= args.world_size
return rt
if __name__ == '__main__':
main()
| GeneSplice-main | GeneSplice/apex/examples/imagenet/main_amp.py |
import logging
import math
from typing import Callable, Optional, Tuple
import torch
from torch.optim.optimizer import Optimizer
log = logging.getLogger(__name__)
class DecoupledLionW(Optimizer):
"""
DecoupledLionW is an optimizer designed to improve training performance and convergence for deep learning models.
It is an extension of the Lion optimizer, incorporating decoupled weight decay and a momentum-based update rule.
The optimizer utilizes the Adam-like update rule, where the weight decay is applied separately from the gradient update.
The update rule consists of three steps: weight decay, momentum update, and momentum decay.
Weight decay reduces the magnitude of the model's weights, preventing overfitting and improving generalization.
The momentum update is an interpolation between the current gradient and the previous momentum state, allowing for faster convergence and smoother optimization.
Momentum decay gradually reduces the momentum term over time, preventing it from becoming too large and destabilizing the optimization process.
The optimizer supports both single-node and multi-node distributed training, enabling efficient training on parallel computing environments.
It provides various metric functions to track the optimization process, such as L2 norm of moments, parameters, updates, and gradients, as well as cosine similarity between updates and gradients.
The optimizer allows reporting per-parameter metrics to analyze the behavior of individual model parameters during training.
"""
metric_functions = {
'l2_norm/moment': lambda param, optim_state, step_tensor: torch.linalg.vector_norm(optim_state['exp_avg']),
'l2_norm/param': lambda param, optim_state, step_tensor: torch.linalg.vector_norm(param.data),
'l2_norm/update': lambda param, optim_state, step_tensor: torch.linalg.vector_norm(step_tensor),
'l2_norm/grad': lambda param, optim_state, step_tensor: torch.linalg.vector_norm(param.grad),
'cosine/update_grad': lambda param, optim_state, step_tensor: torch.nn.functional.cosine_similarity(param.grad.flatten(), step_tensor.flatten(), dim=0),
'cosine/moment_grad': lambda param, optim_state, step_tensor: torch.nn.functional.cosine_similarity(param.grad.flatten(), optim_state['exp_avg'].flatten(), dim=0),
}
def __init__(
self,
params,
lr: float = 1e-4,
betas: Tuple[float, float] = (0.9, 0.99),
weight_decay: float = 0.0,
):
if lr <= 0.:
raise Exception(f'Invalid LR: {lr}. LR must be > 0')
if not all([0. <= beta <= 1. for beta in betas]):
raise Exception(f'Invalid beta values: {betas}. All betas must be between 0 and 1.')
if weight_decay >= 1e-3:
log.warning(f'You are using a high value of `weight_decay={weight_decay}` for the `DecoupledLionW` optimizer. Are you sure you want to do this? Your model\'s weights will be multiplied by {1.0 - weight_decay} on every step!')
defaults = {'lr': lr, 'betas': betas, 'weight_decay': weight_decay}
super().__init__(params, defaults)
for group in self.param_groups:
group['initial_lr'] = group['lr']
@staticmethod
def lionw(p, grad, exp_avg, lr, initial_lr, wd, beta1, beta2) -> None:
if wd != 0:
decay_factor = (lr / initial_lr) if initial_lr else 1.0
p.data.mul_(1 - decay_factor * wd)
update = exp_avg.lerp(grad, 1 - beta1).sign_()
p.add_(update, alpha=-lr)
exp_avg.lerp_(grad, 1 - beta2)
@torch.no_grad()
def step(self, closure: Optional[Callable] = None):
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in filter(lambda p: p.grad is not None and p.requires_grad, group['params']):
grad, lr, initial_lr, wd, beta1, beta2, state = p.grad, group['lr'], group['initial_lr'], group['weight_decay'], *group['betas'], self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
exp_avg = state['exp_avg']
self.lionw(p, grad, exp_avg, lr, initial_lr, wd, beta1, beta2)
return loss
def pre_reduce_metrics(self, optimizer_metrics):
metrics = optimizer_metrics.keys()
metrics = sorted(metrics, key=lambda metric: 0 if 'l2_norm' in metric else 1)
for metric in metrics:
if metric.startswith('l2_norm'):
optimizer_metrics[metric] = optimizer_metrics[metric]**2
elif metric.startswith('cosine'):
_, vectors, layer = tuple(metric.split('/'))
A, B = tuple(vectors.split('_'))
A_rank_subset_norm = math.sqrt(optimizer_metrics[f'l2_norm/{A}/{layer}'])
B_rank_subset_norm = math.sqrt(optimizer_metrics[f'l2_norm/{B}/{layer}'])
optimizer_metrics[metric] *= A_rank_subset_norm * B_rank_subset_norm
return optimizer_metrics
def report_per_parameter_metrics(self, param: torch.Tensor, name: str, optimizer_metrics: dict):
lr = self.param_groups[0]['lr']
weight_decay = self.param_groups[0]['weight_decay']
initial_lr = self.param_groups[0]['initial_lr']
beta1, _ = self.param_groups[0]['betas']
if param in self.state:
param_optim_state = self.state[param]
step_tensor = param_optim_state['exp_avg'].clone().lerp_(param.grad, 1 - beta1).sign_().mul_(lr)
decay_factor = (lr / initial_lr) if initial_lr else 1.0
step_tensor.add_(param, alpha=-weight_decay * decay_factor)
for metric in self.metric_functions:
optimizer_metrics[f'{metric}/{name}'] = self.metric_functions[metric](param, param_optim_state, step_tensor)
return optimizer_metrics
| DecoupledLionW-main | lion.py |
from setuptools import setup, find_packages
setup(
name = 'decoupledLionW',
packages = find_packages(exclude=[]),
version = '0.1.2',
license='MIT',
description = 'Lion Optimizer - Pytorch',
author = 'Kye Gomez',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/kyegomez/DecoupledLionW',
keywords = [
'artificial intelligence',
'deep learning',
'optimizers'
],
install_requires=[
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
) | DecoupledLionW-main | setup.py |
OpenBioMed-main | open_biomed/__init__.py |
|
OpenBioMed-main | open_biomed/tasks/__init__.py |
|
import logging
logger = logging.getLogger(__name__)
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import argparse
import json
import math
from tqdm import tqdm
import torch
from torch.autograd import Variable
from torch.utils.data import DataLoader
from utils import EarlyStopping, AverageMeter, MolCollator, ToDevice, recall_at_k
from utils.optimizers import BertAdam
from datasets.mtr_dataset import SUPPORTED_MTR_DATASETS
from models.multimodal import KVPLM, MolBERT, BioMedGPTCLIP, MoMu, MolFM, DrugFM
from models.task_model.mtr_model import MTRModel
SUPPORTED_MTR_MODEL = {
"scibert": MolBERT,
"kv-plm": KVPLM,
"kv-plm*": KVPLM,
"momu": MoMu,
"molfm": MolFM,
"drugfm": DrugFM,
"biomedgpt": BioMedGPTCLIP,
"combined": MTRModel
}
def similarity(logits1, logits2):
if len(logits1.shape) >= 2:
sim = logits1 @ logits2.transpose(0, 1)
sim, _ = torch.max(sim, dim=0)
else:
sim = torch.cosine_similarity(logits1, logits2)
return sim
def contrastive_loss(logits_structure, logits_text, margin, device):
if len(logits_structure.shape) <= 2:
scores = torch.cosine_similarity(
logits_structure.unsqueeze(1).expand(logits_structure.shape[0], logits_structure.shape[0], logits_structure.shape[1]),
logits_text.unsqueeze(0).expand(logits_text.shape[0], logits_text.shape[0], logits_text.shape[1]),
dim=-1
)
else:
scores = torch.matmul(logits_structure.unsqueeze(1), logits_text.unsqueeze(-1)).squeeze()
scores, _ = scores.max(dim=-1)
#print(scores)
diagonal = scores.diag().view(logits_text.size(0), 1)
d1 = diagonal.expand_as(scores)
d2 = diagonal.t().expand_as(scores)
cost_s2t = (margin + scores - d1).clamp(min=0)
cost_t2s = (margin + scores - d2).clamp(min=0)
# clear diagonals
mask = torch.eye(scores.size(0)) > .5
I = Variable(mask)
if torch.cuda.is_available():
I = I.to(device)
cost_s2t = cost_s2t.masked_fill_(I, 0)
cost_t2s = cost_t2s.masked_fill_(I, 0)
# keep the maximum violating negative for each query
#if self.max_violation:
cost_s2t = cost_s2t.max(1)[0]
cost_t2s = cost_t2s.max(0)[0]
return cost_s2t.sum() + cost_t2s.sum()
def train_mtr(train_dataset, val_dataset, model, collator, args):
train_loader = DataLoader(train_dataset, batch_size=args.train_batch_size, shuffle=True, num_workers=args.num_workers, collate_fn=collator)
loss_fn = contrastive_loss
params = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{
'params': [p for n, p in params if not any(nd in n for nd in no_decay)],
'weight_decay': 0.01
},{
'params': [p for n, p in params if any(nd in n for nd in no_decay)],
'weight_decay': 0.0
}]
#optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.lr)
optimizer = BertAdam(
optimizer_grouped_parameters,
weight_decay=0,
lr=args.lr,
warmup=args.warmup,
t_total=len(train_loader) * args.epochs,
)
stopper = EarlyStopping(mode="higher", patience=args.patience, filename=args.output_path)
running_loss = AverageMeter()
for epoch in range(args.epochs):
logger.info("========Epoch %d========" % (epoch + 1))
logger.info("Training...")
model.train()
running_loss.reset()
step = 0
for mol in tqdm(train_loader):
mol = ToDevice(mol, args.device)
mol_rep = model.encode_mol(mol["structure"])
if hasattr(model, "structure_proj_head"):
mol_rep = model.structure_proj_head(mol_rep)
text_rep = model.encode_text(mol["text"])
loss = loss_fn(mol_rep, text_rep, margin=args.margin, device=args.device)
if hasattr(model, "calculate_matching_loss"):
matching_loss = model.calculate_matching_loss(mol["structure"], mol["text"])
loss += matching_loss
loss.backward()
optimizer.step()
optimizer.zero_grad()
running_loss.update(loss.detach().cpu().item())
step += 1
if step % args.log_every == 0:
logger.info("Steps=%d Training Loss=%.4lf" % (step, running_loss.get_average()))
running_loss.reset()
val_metrics = val_mtr(val_dataset, model, collator, False, args)
logger.info(", ".join(["val %s: %.4lf" % (k, val_metrics[k]) for k in val_metrics]))
if stopper.step((val_metrics["mrr_d2t"] + val_metrics["mrr_t2d"]), model):
break
model.load_state_dict(torch.load(args.output_path)["model_state_dict"])
return model
def rerank(dataset, model, index_structure, index_text, score, alpha, collator, device):
mini_batch = []
for i in index_structure:
for j in index_text:
mini_batch.append({
"structure": dataset[i]["structure"],
"text": dataset[j]["text"],
})
mini_batch = ToDevice(collator(mini_batch), device)
#print(index_structure, index_text, score, model.predict_similarity_score(mini_batch).squeeze())
score = score.to(device) * alpha + model.predict_similarity_score(mini_batch).squeeze() * (1 - alpha)
_, new_idx = torch.sort(score, descending=True)
if len(index_structure) > 1:
return torch.LongTensor([index_structure[i] for i in new_idx.detach().cpu().tolist()])
else:
return torch.LongTensor([index_text[i] for i in new_idx.detach().cpu().tolist()])
def val_mtr(val_dataset, model, collator, apply_rerank, args):
val_loader = DataLoader(val_dataset, batch_size=args.val_batch_size, shuffle=False, num_workers=args.num_workers, collate_fn=collator)
model.eval()
mol_rep_total, text_rep_total = [], []
n_samples = 0
with torch.no_grad():
for mol in tqdm(val_loader):
mol = ToDevice(mol, args.device)
mol_rep = model.encode_mol(mol["structure"])
if hasattr(model, "structure_proj_head"):
mol_rep = model.structure_proj_head(mol_rep)
text_rep = model.encode_text(mol["text"])
mol_rep_total.append(mol_rep)
text_rep_total.append(text_rep)
n_samples += mol_rep.shape[0]
mol_rep = torch.cat(mol_rep_total, dim=0)
text_rep = torch.cat(text_rep_total, dim=0)
score = torch.zeros(n_samples, n_samples)
mrr_m2t, mrr_t2m = 0, 0
rec_m2t, rec_t2m = [0, 0, 0], [0, 0, 0]
logger.info("Calculating cosine similarity...")
for i in tqdm(range(n_samples)):
score[i] = similarity(mol_rep[i], text_rep)
if hasattr(model, "predict_similarity_score") and apply_rerank:
logger.info("Reranking...")
for i in tqdm(range(n_samples)):
_, idx = torch.sort(score[i, :], descending=True)
idx = idx.detach().cpu()
if hasattr(model, "predict_similarity_score") and apply_rerank:
idx = torch.cat((
rerank(val_dataset, model, [i], idx[:args.rerank_num].tolist(), score[i, idx[:args.rerank_num]], args.alpha_m2t, collator, args.device),
idx[args.rerank_num:]
), dim=0)
for j, k in enumerate([1, 5, 10]):
rec_m2t[j] += recall_at_k(idx, i, k)
mrr_m2t += 1.0 / ((idx == i).nonzero(as_tuple=True)[0].item() + 1)
_, idx = torch.sort(score[:, i], descending=True)
idx = idx.detach().cpu()
if hasattr(model, "predict_similarity_score") and apply_rerank:
idx = torch.cat((
rerank(val_dataset, model, idx[:args.rerank_num].tolist(), [i], score[idx[:args.rerank_num], i], args.alpha_t2m, collator, args.device),
idx[args.rerank_num:]
), dim=0)
for j, k in enumerate([1, 5, 10]):
rec_t2m[j] += recall_at_k(idx, i, k)
mrr_t2m += 1.0 / ((idx == i).nonzero(as_tuple=True)[0].item() + 1)
result = {
"mrr_d2t": mrr_m2t / n_samples,
"mrr_t2d": mrr_t2m / n_samples,
}
for idx, k in enumerate([1, 5, 10]):
result["rec@%d_d2t" % k] = rec_m2t[idx] / n_samples
result["rec@%d_t2d" % k] = rec_t2m[idx] / n_samples
return result
def main(args, config):
dataset = SUPPORTED_MTR_DATASETS[args.dataset](args.dataset_path, config["data"], args.dataset_mode, args.filter, args.filter_path)
train_dataset = dataset.index_select(dataset.train_index)
val_dataset = dataset.index_select(dataset.val_index)
test_dataset = dataset.index_select(dataset.test_index)
val_dataset.set_test()
test_dataset.set_test()
collator = MolCollator(config["data"]["mol"])
model = SUPPORTED_MTR_MODEL[config["model"]](config["network"])
if args.init_checkpoint != "None":
ckpt = torch.load(args.init_checkpoint, map_location="cpu")
if args.param_key != "None":
ckpt = ckpt[args.param_key]
to_add = []
to_remove = []
for name in ckpt:
if "graph_" in name:
key_orig = name.replace("graph_", "structure_")
to_add.append((key_orig, ckpt[name]))
to_remove.append(name)
for elem in to_add:
ckpt[elem[0]] = elem[1]
for key in to_remove:
del ckpt[key]
model.load_state_dict(ckpt)
model = model.to(args.device)
if args.mode == "zero_shot":
model.eval()
result = val_mtr(test_dataset, model, collator, args.rerank, args)
print(result)
elif args.mode == "train":
train_mtr(train_dataset, val_dataset, model, collator, args)
result = val_mtr(test_dataset, model, collator, args.rerank, args)
print(result)
def add_arguments(parser):
parser.add_argument("--device", type=str, default="cuda:0")
parser.add_argument("--mode", type=str, default="zero_shot")
parser.add_argument("--config_path", type=str, default="")
parser.add_argument('--dataset', type=str, default='PCdes')
parser.add_argument("--dataset_path", type=str, default='../datasets/mtr/PCdes/')
parser.add_argument("--dataset_mode", type=str, default="paragraph")
parser.add_argument("--filter", action="store_true")
parser.add_argument("--filter_path", type=str, default="")
parser.add_argument("--init_checkpoint", type=str, default="None")
parser.add_argument("--output_path", type=str, default="../ckpts/finetune_ckpts/finetune.pth")
parser.add_argument("--param_key", type=str, default="None")
parser.add_argument("--num_workers", type=int, default=4)
parser.add_argument("--weight_decay", type=float, default=0)
parser.add_argument("--lr", type=float, default=5e-5)
parser.add_argument("--warmup", type=float, default=0.03)
parser.add_argument("--train_batch_size", type=int, default=32)
parser.add_argument("--val_batch_size", type=int, default=64)
parser.add_argument("--epochs", type=int, default=30)
parser.add_argument("--patience", type=int, default=10)
parser.add_argument("--log_every", type=int, default=50)
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--margin", type=float, default=0.2)
rerank_group = parser.add_mutually_exclusive_group(required=False)
rerank_group.add_argument("--rerank", action="store_true")
rerank_group.add_argument("--no_rerank", action="store_false")
parser.set_defaults(rerank=False)
parser.add_argument("--rerank_num", type=int, default=32)
parser.add_argument("--alpha_m2t", type=float, default=0.8)
parser.add_argument("--alpha_t2m", type=float, default=0.8)
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
parser = argparse.ArgumentParser()
add_arguments(parser)
args = parser.parse_args()
config = json.load(open(args.config_path, "r"))
if args.dataset_mode == "sentence":
config["data"]["mol"]["featurizer"]["text"]["name"] = "TransformerSentenceTokenizer"
config["data"]["mol"]["featurizer"]["text"]["min_sentence_length"] = 5
main(args, config) | OpenBioMed-main | open_biomed/tasks/multi_modal_task/mtr.py |
import logging
logger = logging.getLogger(__name__)
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import argparse
import json
from tqdm import tqdm
import numpy as np
import torch
from torch.utils.data import DataLoader
from rdkit import Chem, RDLogger, DataStructs
from rdkit.Chem import AllChem, MACCSkeys
RDLogger.DisableLog('rdApp.*')
from nltk.translate.bleu_score import corpus_bleu
from Levenshtein import distance as lev
from datasets.molcap_dataset import SUPPORTED_MOLCAP_DATASET
from models.multimodal.text2mol import Text2MolMLP
from models.task_model.text2smi_model import Text2SMILESModel
from utils import AverageMeter, ToDevice, MolCollator
def train_text2smi(train_loader, val_loader, test_loader, test_dataset, model, args, device):
requires_grad = []
for k, v in model.named_parameters():
if v.requires_grad:
requires_grad.append(k)
logger.debug("parameters requires grad: %s" % (" ".join(requires_grad)))
optimizer = torch.optim.Adam([p for p in model.parameters() if p.requires_grad], lr=args.lr, weight_decay=args.weight_decay)
running_loss = AverageMeter()
step = 0
for epoch in range(args.epochs):
logger.info("========Epoch %d========" % (epoch + 1))
logger.info("Training...")
#model.train()
for mol in train_loader:
mol = ToDevice(mol, device)
loss = model(mol)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss.update(loss.detach().cpu().item())
step += 1
if step % args.logging_steps == 0:
logger.info("Steps=%d Training Loss=%.4lf" % (step, running_loss.get_average()))
running_loss.reset()
val_text2smi(val_loader, model, device)
if (epoch + 1) % 10 == 0:
torch.save({'model_state_dict': model.state_dict()}, os.path.join(args.output_path, "checkpoint_" + str(epoch) + ".pth"))
print(test_text2smi(test_dataset, test_loader, model, args, device))
return model
def val_text2smi(val_loader, model, device):
model.eval()
val_loss = 0
logger.info("Validating...")
with torch.no_grad():
for mol in val_loader:
mol = ToDevice(mol, device)
loss = model(mol)
val_loss += loss.detach().cpu().item()
logger.info("validation loss %.4lf" % (val_loss / len(val_loader)))
return val_loss / len(val_loader)
def test_text2smi(test_dataset, test_loader, model, args, device):
model.eval()
outputs = []
gts = test_dataset.smiles
logger.info("Testing...")
for i, mol in enumerate(tqdm(test_loader)):
mol = ToDevice(mol, device)
output = model.decode(mol, num_beams=5, max_length=512)
outputs += output
if i <= 3:
for j in range(5):
logger.info("Generated: %s" % outputs[-j])
logger.info("Ground truth: %s" % gts[len(outputs) - j])
logger.info("------------------------------------------------------")
N = len(outputs)
output_tokens = []
gt_tokens = []
levs = []
maccs_sim, rdk_sim, morgan_sim = [], [], []
n_bad_mols = 0
n_exact = 0
with open(args.smi_save_path, "w") as f:
f.write("text\tground truth\toutput\n")
for i in range(N):
output_tokens.append([c for c in outputs[i]])
gt_tokens.append([[c for c in gts[i]]])
try:
mol_output = Chem.MolFromSmiles(outputs[i])
mol_gt = Chem.MolFromSmiles(gts[i])
if Chem.MolToInchi(mol_output) == Chem.MolToInchi(mol_gt):
n_exact += 1
maccs_sim.append(DataStructs.FingerprintSimilarity(MACCSkeys.GenMACCSKeys(mol_output), MACCSkeys.GenMACCSKeys(mol_gt), metric=DataStructs.TanimotoSimilarity))
rdk_sim.append(DataStructs.FingerprintSimilarity(Chem.RDKFingerprint(mol_output), Chem.RDKFingerprint(mol_gt), metric=DataStructs.TanimotoSimilarity))
morgan_sim.append(DataStructs.TanimotoSimilarity(AllChem.GetMorganFingerprint(mol_output, 2), AllChem.GetMorganFingerprint(mol_gt, 2)))
except:
n_bad_mols += 1
levs.append(lev(outputs[i], gts[i]))
f.write("%s\t%s\t%s\n" % (test_dataset.texts[i], gts[i], outputs[i]))
bleu = corpus_bleu(gt_tokens, output_tokens)
return {
"BLEU": bleu,
"Levenshtein": np.mean(levs),
"Valid": 1 - n_bad_mols * 1.0 / N,
"Exact": n_exact * 1.0 / N,
"MACCS FTS": np.mean(maccs_sim),
"RDKit FTS": np.mean(rdk_sim),
"Morgan FTS": np.mean(morgan_sim),
}
def test_text2mol(args):
text2mol = Text2MolMLP(
ninp=768,
nhid=600,
nout=300,
model_name_or_path=args.text2mol_bert_path,
cid2smiles_path=None,
cid2vec_path=None,
mol2vec_output_path=os.path.join(args.text2mol_data_path, "tmp.csv")
)
text2mol.load_state_dict(torch.load(args.text2mol_ckpt_path))
device = torch.device(args.device)
text2mol.to(device)
logger.info("Calculating Text2Mol Metric...")
text2mol_scores = []
bad_smiles = 0
with open(args.smi_save_path, "r") as f:
for i, line in enumerate(f.readlines()):
if i == 0:
continue
line = line.rstrip("\n").split("\t")
try:
smi = Chem.MolToSmiles(Chem.MolFromSmiles(line[2]))
if smi in text2mol.smiles2vec:
text2mol_scores.append(text2mol(smi, line[0], device).detach().cpu().item())
except:
bad_smiles += 1
logger.info("Bad SMILES: %d" % (bad_smiles))
return np.mean(text2mol_scores)
def add_arguments(parser):
parser.add_argument("--device", type=str, default="cuda:0")
parser.add_argument("--config_path", type=str, default="")
parser.add_argument('--dataset', type=str, default='chebi-20')
parser.add_argument("--dataset_path", type=str, default='../datasets/molcap/chebi-20')
parser.add_argument("--output_path", type=str, default="../ckpts/finetune_ckpts/text2smi/")
parser.add_argument("--smi_save_path", type=str, default="../assets/outputs.txt")
parser.add_argument("--mode", type=str, default="train")
parser.add_argument("--epochs", type=int, default=10)
parser.add_argument("--num_workers", type=int, default=1)
parser.add_argument("--patience", type=int, default=3)
parser.add_argument("--lr", type=float, default=1e-4)
parser.add_argument("--weight_decay", type=float, default=0)
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--logging_steps", type=int, default=300)
parser.add_argument("--text2mol_bert_path", type=str, default="")
parser.add_argument("--text2mol_data_path", type=str, default="")
parser.add_argument("--text2mol_ckpt_path", type=str, default="")
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
parser = argparse.ArgumentParser()
add_arguments(parser)
args = parser.parse_args()
config = json.load(open(args.config_path))
device = torch.device(args.device)
if args.mode == "test_text2mol":
print("Text2Mol:", test_text2mol(args))
exit(0)
# load dataset
train_dataset = SUPPORTED_MOLCAP_DATASET[args.dataset](args.dataset_path, config["data"]["mol"], split="train")
val_dataset = SUPPORTED_MOLCAP_DATASET[args.dataset](args.dataset_path, config["data"]["mol"], split="validation")
test_dataset = SUPPORTED_MOLCAP_DATASET[args.dataset](args.dataset_path, config["data"]["mol"], split="test")
collator = MolCollator(config["data"]["mol"])
train_dataloader = DataLoader(train_dataset, args.batch_size, shuffle=True, collate_fn=collator, num_workers=args.num_workers)
val_dataloader = DataLoader(val_dataset, args.batch_size, shuffle=False, collate_fn=collator, num_workers=args.num_workers)
test_dataloader = DataLoader(test_dataset, args.batch_size, shuffle=False, collate_fn=collator, num_workers=args.num_workers)
# load model
model = Text2SMILESModel(config["network"])
model = model.to(device)
if args.mode == "train":
train_text2smi(train_dataloader, val_dataloader, test_dataloader, test_dataset, model, args, device)
elif args.mode == "test":
if os.path.exists(args.output_path):
state_dict = torch.load(args.output_path, map_location=device)["model_state_dict"]
model.load_state_dict(state_dict)
results = test_text2smi(test_dataset, test_dataloader, model, args, device)
print(results)
elif args.mode == "traintest":
train_text2smi(train_dataloader, val_dataloader, test_dataloader, test_dataset, model, args, device)
results = test_text2smi(test_dataset, test_dataloader, model, args, device)
print(results) | OpenBioMed-main | open_biomed/tasks/multi_modal_task/text2smigen.py |
import logging
logger = logging.getLogger(__name__)
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import argparse
import json
from tqdm import tqdm
import numpy as np
import torch
from torch.utils.data import DataLoader
from transformers import BertTokenizerFast
from datasets.molqa_dataset import SUPPORTED_MOLQA_DATASET
from models.task_model.molqa_model import SUPPORTED_MOLQA_MODELS
from utils import AverageMeter, ToDevice, MolQACollator
def normalize_text(s, rm_punc=True):
"""Removing articles and punctuation, and standardizing whitespace are all typical text processing steps."""
import string, re
def remove_articles(text):
regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
return re.sub(regex, " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
if rm_punc:
s = remove_punc(lower(s))
else:
s = lower(s)
return white_space_fix(remove_articles(s))
def train_molqa(train_loader, test_loader, test_dataset, model, args, device):
optimizer_grouped_parameters = [p for n, p in list(model.named_parameters()) if not "mol_encoder" in n and not "mol_proj" in n]
optimizer = torch.optim.Adam([p for p in model.parameters()], lr=args.lr, weight_decay=args.weight_decay)
#optimizer1 = torch.optim.Adam(optimizer_grouped_parameters, lr=args.lr, weight_decay=args.weight_decay)
#optimizer2 = torch.optim.Adam([p for p in model.mol_encoder.parameters()] + [p for p in model.mol_proj.parameters()], lr=args.lr*10, weight_decay=args.weight_decay)
running_loss = AverageMeter()
step = 0
for epoch in range(args.epochs):
logger.info("========Epoch %d========" % (epoch + 1))
logger.info("Training...")
model.train()
for mol, question, answer in train_loader:
mol = ToDevice(mol, device)
question = ToDevice(question, device)
answer = ToDevice(answer, device)
loss = model(mol, question, answer)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss.update(loss.detach().cpu().item())
step += 1
if step % args.logging_steps == 0:
logger.info("Steps=%d Training Loss=%.4lf" % (step, running_loss.get_average()))
running_loss.reset()
if (epoch + 1) % 10 == 0:
torch.save({'model_state_dict': model.state_dict()}, os.path.join(args.output_path, "checkpoint_" + str(epoch) + ".pth"))
print(test_molqa(test_loader, model, args, device))
return model
def test_molqa(test_loader, model, args, device):
model.eval()
exact, f1 = [], []
logger.info("Testing...")
with torch.no_grad():
for i, (mol, question, answer) in enumerate(tqdm(test_loader)):
mol = ToDevice(mol, device)
question = ToDevice(question, device)
output = model.generate(mol, question, num_beams=5, max_length=512)
if i <= 3:
logger.info("Outputs: %s" % output)
logger.info("Ground truth: %s" % answer)
logger.info("------------------------------------------------------")
for j in range(len(output)):
y_true = normalize_text(answer[j], rm_punc=True)
y_pred = normalize_text(output[j], rm_punc=True)
exact.append(int(y_true == y_pred))
y_true = y_true.split()
y_pred = y_pred.split()
common_tokens = set(y_true) & set(y_pred)
if len(common_tokens) == 0:
f1.append(0)
else:
precision = len(common_tokens) / len(y_pred)
recall = len(common_tokens) / len(y_true)
f1.append(2 * precision * recall / (precision + recall))
return {
"exact": np.mean(exact),
"f1": np.mean(f1),
}
def add_arguments(parser):
parser.add_argument("--device", type=str, default="cuda:0")
parser.add_argument("--config_path", type=str, default="")
parser.add_argument('--dataset', type=str, default='chembl-qa')
parser.add_argument("--dataset_path", type=str, default='./datasets/molqa/ChEMBL')
parser.add_argument("--init_checkpoint", type=str, default='None')
parser.add_argument("--param_key", type=str, default="None")
parser.add_argument("--output_path", type=str, default='./ckpts/finetune_ckpts/molqa/molt5')
parser.add_argument("--mode", type=str, default="train")
parser.add_argument("--epochs", type=int, default=10)
parser.add_argument("--num_workers", type=int, default=1)
parser.add_argument("--patience", type=int, default=3)
parser.add_argument("--lr", type=float, default=1e-4)
parser.add_argument("--weight_decay", type=float, default=0.0)
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--logging_steps", type=int, default=300)
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
parser = argparse.ArgumentParser()
add_arguments(parser)
args = parser.parse_args()
config = json.load(open(args.config_path))
device = torch.device(args.device)
# load dataset
train_dataset = SUPPORTED_MOLQA_DATASET[args.dataset](args.dataset_path, config["data"], split="train")
test_dataset = SUPPORTED_MOLQA_DATASET[args.dataset](args.dataset_path, config["data"], split="test")
train_collator = MolQACollator(config["data"], collate_outputs=True)
test_collator = MolQACollator(config["data"], collate_outputs=False)
train_dataloader = DataLoader(train_dataset, args.batch_size, shuffle=True, collate_fn=train_collator, num_workers=args.num_workers)
test_dataloader = DataLoader(test_dataset, args.batch_size, shuffle=False, collate_fn=test_collator, num_workers=args.num_workers)
# load model
model = SUPPORTED_MOLQA_MODELS[config["network"]["type"]](config["network"])
if args.init_checkpoint != "None":
state_dict = torch.load(args.init_checkpoint)
if args.param_key != "None":
state_dict = state_dict[args.param_key]
model.load_state_dict(state_dict)
model = model.to(device)
if args.mode == "train":
train_molqa(train_dataloader, test_dataloader, test_dataset, model, args, device)
elif args.mode == "test":
if os.path.exists(args.output_path):
state_dict = torch.load(args.output_path, map_location=device)["model_state_dict"]
model.load_state_dict(state_dict)
results = test_molqa(test_dataloader, model, args, device)
print(results)
elif args.mode == "traintest":
train_molqa(train_dataloader, test_dataloader, test_dataset, model, args, device)
results = test_molqa(test_dataloader, model, args, device)
print(results)
| OpenBioMed-main | open_biomed/tasks/multi_modal_task/molqa.py |
"""
import logging
logger = logging.getLogger(__name__)
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import argparse
import json
import math
import numpy as np
import pickle
from rdkit import Chem
from rdkit.Chem import Draw, Descriptors
import torch
import torch.nn.functional as F
from datasets.text2mol_dataset import SUPPORTED_TEXT2MOLGEN_DATASET
from feature.mol_featurizer import SUPPORTED_MOL_FEATURIZER, MolGGNNFeaturizer
from models.multimodal import MoMu, MolFM
from models.molecule.moflow import MoFlow, construct_mol, check_validity
from utils import AverageMeter, ToDevice
atomic_num_list = [6, 7, 8, 9, 15, 16, 17, 35, 53, 0]
SUPPORTED_DRUG_ENCODER = {
"MoMu": MoMu,
"MolFM": MolFM
}
SUPPORTED_DRUG_DECODER = {
"MoFlow": MoFlow,
"MolT5": None
}
def generate_mol(z, decoder, featurizer, device):
adj, x = decoder.decode(z)
mol = construct_mol(x.detach().cpu().numpy(), adj.detach().cpu().numpy(), atomic_num_list)
if featurizer is not None:
mol = featurizer(mol).to(device)
atoms = torch.argmax(x, dim=1)
x = x[atoms != len(atomic_num_list) - 1]
x = x.softmax(dim=1)
return mol, x
def optimize_z(z, anchor, text_feat, encoder, decoder, structure_featurizer, args, device):
optimizer = torch.optim.Adam([z.requires_grad_()], lr=0.01)
schedular = torch.optim.lr_scheduler.StepLR(optimizer, args.optimize_steps, gamma=0.1)
running_loss_text = AverageMeter()
running_loss_anchor = AverageMeter()
for i in range(args.optimize_steps):
mol, x = generate_mol(z, decoder, structure_featurizer, device)
if (x.shape[0] < 1) or (x.shape[1] < 5) or (mol.x.shape[0] < 1) or (mol.x.shape[1] < 2):
logger.warn("x shape: ", x.shape, "mol.x shape: ", mol.x.shape[0], "Too small, exited")
break
mol_feat = encoder.encode_structure_with_prob(mol, x, atomic_num_list, device)
mol_feat = F.normalize(mol_feat, dim=1)
loss = - mol_feat @ text_feat.t() / 0.1
running_loss_text.update(loss.detach().cpu().item())
if torch.isnan(loss):
logger.warn("loss is nan, exited")
break
optimizer.zero_grad()
loss.backward()
optimizer.step()
schedular.step()
if i % args.logging_steps == 0:
logger.info("Steps=%d Loss1=%.4lf Loss2=%.4lf" % (i, running_loss_text.get_average(), running_loss_anchor.get_average()))
running_loss_text.reset()
running_loss_anchor.reset()
return z
def stop_gradient(model):
for key, params in model.named_parameters():
params.requires_grad = False
def add_arguments(parser):
parser.add_argument("--device", type=str, default="cuda:0")
parser.add_argument("--technique", type=str, default="z_optimize")
parser.add_argument("--encoder_config_path", type=str, default="")
parser.add_argument("--init_encoder_checkpoint", type=str, default="None")
parser.add_argument("--encoder_param_key", type=str, default="None")
parser.add_argument("--decoder_config_path", type=str, default="")
parser.add_argument('--dataset', type=str, default='AttrPrompt')
parser.add_argument("--dataset_path", type=str, default='../datasets/molgen/attr_prompt')
def add_z_optimize_arguments(parser):
parser.add_argument("--rounds_per_text", type=int, default=60)
parser.add_argument("--optimize_steps", type=int, default=500)
parser.add_argument("--logging_steps", type=int, default=20)
parser.add_argument("--temperature", type=float, default=1.0)
parser.add_argument("--lambd", type=float, default=1.0)
parser.add_argument("--evaluate", action="store_true")
parser.add_argument("--save_fig", action="store_true")
parser.add_argument("--save_path", type=str, default="../tmps/molgen/")
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
parser = argparse.ArgumentParser()
add_arguments(parser)
args, _ = parser.parse_known_args()
if args.technique == "z_optimize":
add_z_optimize_arguments(parser)
args = parser.parse_args()
encoder_config = json.load(open(args.encoder_config_path, "r"))
decoder_config = json.load(open(args.decoder_config_path, "r"))
# load dataset
dataset = SUPPORTED_TEXT2MOLGEN_DATASET[args.dataset](args.dataset_path, encoder_config["data"]["drug"])
# load featurizer
feat_config = encoder_config["data"]["drug"]["featurizer"]["structure"]
structure_featurizer = SUPPORTED_MOL_FEATURIZER[feat_config["name"]](feat_config)
# load encoder
device = torch.device(args.device)
encoder = SUPPORTED_DRUG_ENCODER[encoder_config["model"]](encoder_config["network"])
if args.init_encoder_checkpoint != "None":
ckpt = torch.load(args.init_encoder_checkpoint, map_location="cpu")
if args.encoder_param_key != "None":
ckpt = ckpt[args.encoder_param_key]
encoder.load_state_dict(ckpt)
# load decoder
decoder = SUPPORTED_DRUG_DECODER[decoder_config["model"]](decoder_config["network"])
if args.technique == "z_optimize":
anchor = Chem.MolFromSmiles("COC(=O)C1=C(C)N(C)C(C)=C(C(=O)OC)C1c1ccc(Cl)cc1")
print("anchor: COC(=O)C1=C(C)N(C)C(C)=C(C(=O)OC)C1c1ccc(Cl)cc1 log_p:", Descriptors.MolLogP(anchor), "TPSA:", Descriptors.TPSA(anchor))
img = Draw.MolsToGridImage([anchor], legends=['COC(=O)C1=C(C)N(C)C(C)=C(C(=O)OC)C1c1ccc(Cl)cc1'],
molsPerRow=1, subImgSize=(300, 300))
img.save(os.path.join(args.save_path, 'anchor.png'))
#anchor = None
featurizer = MolGGNNFeaturizer({"max_n_atoms": 38, "atomic_num_list": atomic_num_list})
x, adj, normalized_adj = featurizer(anchor)
z_dim = decoder.a_size + decoder.b_size
mean = torch.zeros(1, z_dim)
std = torch.ones(1, z_dim) * math.sqrt(math.exp(decoder.ln_var.item())) * args.temperature
encoder.eval()
decoder.eval()
stop_gradient(encoder)
stop_gradient(decoder)
encoder.to(device)
decoder.to(device)
with torch.no_grad():
x = x.unsqueeze(0).to(device)
adj = adj.unsqueeze(0).to(device)
normalized_adj = normalized_adj.unsqueeze(0).to(device)
z_init, _ = decoder(adj, x, normalized_adj)
z_init = torch.cat((z_init[0].view(1, -1), z_init[1].view(1, -1)), dim=1)
mean = z_init.detach().cpu()
mols = {}
for i, text in enumerate(dataset.texts):
text = {k: torch.tensor([v]).to(device) for k, v in text.items()}
text_feat = F.normalize(encoder.encode_text(text), dim=-1)
all_adj, all_x = [], []
valid_ratio, unique_ratio = [], []
for j in range(args.rounds_per_text):
z = torch.normal(mean, std).to(device)
z.requires_grad_(True)
z = optimize_z(z, anchor, text_feat, encoder, decoder, structure_featurizer, args, device)
adj, x = decoder.decode(z)
all_adj.append(adj.detach().cpu().numpy())
all_x.append(x.detach().cpu().numpy())
result = check_validity(all_adj, all_x, atomic_num_list)
valid_ratio.append(result["valid_ratio"])
unique_ratio.append(result["unique_ratio"])
mols[i] = result["valid_smiles"]
if args.save_fig:
os.makedirs(os.path.join(args.save_path, "text-" + str(i)), exist_ok=True)
for j, mol in enumerate(result["valid_mols"]):
save_path = os.path.join(args.save_path, "text-" + str(i), "mol-" + str(j) + ".png")
img = Draw.MolsToGridImage([mol], legends=[result['valid_smiles'][j]],
molsPerRow=1, subImgSize=(300, 300))
img.save(save_path)
if args.evaluate:
for smi in result["valid_smiles"]:
mol = Chem.MolFromSmiles(smi)
logger.info("smi: %s, log_p: %.4lf, tPSA: %.4lf" % (smi, Descriptors.MolLogP(mol), Descriptors.TPSA(mol)))
pickle.dump(mols, open(os.path.join(args.save_path, "smiles.pkl"), "wb"))
logger.info("Valid ratio %.4lf±%.4lf" % (np.mean(valid_ratio), np.std(valid_ratio)))
logger.info("Unique ratio %.4lf±%.4lf" % (np.mean(unique_ratio), np.std(unique_ratio)))
elif args.technique == "adapt":
pass
""" | OpenBioMed-main | open_biomed/tasks/multi_modal_task/moledit.py |
import logging
logger = logging.getLogger(__name__)
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import argparse
import json
from tqdm import tqdm
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from transformers import BertTokenizerFast
from nltk.translate.bleu_score import corpus_bleu
from nltk.translate.meteor_score import meteor_score
from rouge_score import rouge_scorer
from datasets.molcap_dataset import SUPPORTED_MOLCAP_DATASET
from models.multimodal.text2mol import Text2MolMLP
from models.task_model.molcap_model import MolCapModel, GraphEnhancedMolCapModel
from utils import AverageMeter, ToDevice, MolCollator
def train_molcap(train_loader, val_loader, test_loader, test_dataset, model, args, device):
requires_grad = []
for k, v in model.named_parameters():
if v.requires_grad:
requires_grad.append(k)
logger.debug("parameters requires grad: %s" % (" ".join(requires_grad)))
optimizer = torch.optim.Adam([p for p in model.parameters() if p.requires_grad], lr=args.lr, weight_decay=args.weight_decay)
running_loss = AverageMeter()
step = 0
for epoch in range(args.epochs):
logger.info("========Epoch %d========" % (epoch + 1))
logger.info("Training...")
#model.train()
for mol in train_loader:
mol = ToDevice(mol, device)
loss = model(mol)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss.update(loss.detach().cpu().item())
step += 1
if step % args.logging_steps == 0:
logger.info("Steps=%d Training Loss=%.4lf" % (step, running_loss.get_average()))
running_loss.reset()
val_molcap(val_loader, model, device)
if (epoch + 1) % 10 == 0:
torch.save({'model_state_dict': model.state_dict()}, os.path.join(args.output_path, "checkpoint_" + str(epoch) + ".pth"))
print(test_molcap(test_dataset, test_loader, model, args, device))
return model
def val_molcap(val_loader, model, device):
model.eval()
val_loss = 0
logger.info("Validating...")
with torch.no_grad():
for mol in val_loader:
mol = ToDevice(mol, device)
loss = model(mol)
val_loss += loss.detach().cpu().item()
logger.info("validation loss %.4lf" % (val_loss / len(val_loader)))
return val_loss / len(val_loader)
def test_molcap(test_dataset, test_loader, model, args, device):
model.eval()
outputs = []
gts = test_dataset.texts
logger.info("Testing...")
with torch.no_grad():
for i, mol in enumerate(tqdm(test_loader)):
mol = ToDevice(mol, device)
output = model.decode(mol, num_beams=5, max_length=512)
outputs += output
if i <= 3:
for j in range(5):
logger.info("Generated: %s" % outputs[-j])
logger.info("Ground truth: %s" % gts[len(outputs) - j])
logger.info("------------------------------------------------------")
tokenizer = BertTokenizerFast.from_pretrained(args.text2mol_bert_path)
output_tokens = []
gt_tokens = []
meteor_scores = []
rouge_scores = []
text2mol_scores = []
scorer = rouge_scorer.RougeScorer(['rouge1', 'rouge2', 'rougeL'])
text2mol = Text2MolMLP(
ninp=768,
nhid=600,
nout=300,
model_name_or_path=args.text2mol_bert_path,
cid2smiles_path=os.path.join(args.text2mol_data_path, "cid_to_smiles.pkl"),
cid2vec_path=os.path.join(args.text2mol_data_path, "test.txt")
)
text2mol.load_state_dict(torch.load(args.text2mol_ckpt_path), strict=False)
device = torch.device(args.device)
text2mol.to(device)
with open(args.caption_save_path, "w") as f:
f.write("SMILES\tground truth\toutput\n")
for i in range(len(outputs)):
output_tokens.append(tokenizer.tokenize(outputs[i], truncation=True, max_length=512, padding='max_length'))
output_tokens[i] = list(filter(('[PAD]').__ne__, output_tokens[i]))
output_tokens[i] = list(filter(('[CLS]').__ne__, output_tokens[i]))
output_tokens[i] = list(filter(('[SEP]').__ne__, output_tokens[i]))
gt_tokens.append(tokenizer.tokenize(gts[i], truncation=True, max_length=512, padding='max_length'))
gt_tokens[i] = list(filter(('[PAD]').__ne__, gt_tokens[i]))
gt_tokens[i] = list(filter(('[CLS]').__ne__, gt_tokens[i]))
gt_tokens[i] = [list(filter(('[SEP]').__ne__, gt_tokens[i]))]
meteor_scores.append(meteor_score(gt_tokens[i], output_tokens[i]))
rouge_scores.append(scorer.score(outputs[i], gts[i]))
text2mol_scores.append(text2mol(test_dataset.smiles[i], outputs[i], device).detach().cpu().item())
f.write(test_dataset.smiles[i] + '\t' + gts[i] + '\t' + outputs[i] + '\n')
bleu2 = corpus_bleu(gt_tokens, output_tokens, weights=(0.5, 0.5))
bleu4 = corpus_bleu(gt_tokens, output_tokens, weights=(0.25, 0.25, 0.25, 0.25))
return {
"BLEU-2": bleu2,
"BLEU-4": bleu4,
"Meteor": np.mean(meteor_scores),
"ROUGE-1": np.mean([rs['rouge1'].fmeasure for rs in rouge_scores]),
"ROUGE-2": np.mean([rs['rouge2'].fmeasure for rs in rouge_scores]),
"ROUGE-L": np.mean([rs['rougeL'].fmeasure for rs in rouge_scores]),
"Text2Mol": np.mean(text2mol_scores)
}
def test_molcap_from_file(file, args, device):
tokenizer = BertTokenizerFast.from_pretrained(args.text2mol_bert_path)
output_tokens = []
gt_tokens = []
meteor_scores = []
rouge_scores = []
text2mol_scores = []
scorer = rouge_scorer.RougeScorer(['rouge1', 'rouge2', 'rougeL'])
text2mol = Text2MolMLP(
ninp=768,
nhid=600,
nout=300,
model_name_or_path=args.text2mol_bert_path,
cid2smiles_path=os.path.join(args.text2mol_data_path, "cid_to_smiles.pkl"),
cid2vec_path=os.path.join(args.text2mol_data_path, "test.txt")
)
text2mol.load_state_dict(torch.load(args.text2mol_ckpt_path), strict=False)
device = torch.device(args.device)
text2mol.to(device)
with open(file, "r") as f:
f.readline()
for i, line in enumerate(f.readlines()):
line = line.rstrip("\n").split("\t")
print(i, line[0])
output_tokens.append(tokenizer.tokenize(line[1], truncation=True, max_length=512, padding='max_length'))
output_tokens[i] = list(filter(('[PAD]').__ne__, output_tokens[i]))
output_tokens[i] = list(filter(('[CLS]').__ne__, output_tokens[i]))
output_tokens[i] = list(filter(('[SEP]').__ne__, output_tokens[i]))
gt_tokens.append(tokenizer.tokenize(line[2], truncation=True, max_length=512, padding='max_length'))
gt_tokens[i] = list(filter(('[PAD]').__ne__, gt_tokens[i]))
gt_tokens[i] = list(filter(('[CLS]').__ne__, gt_tokens[i]))
gt_tokens[i] = [list(filter(('[SEP]').__ne__, gt_tokens[i]))]
meteor_scores.append(meteor_score(gt_tokens[i], output_tokens[i]))
rouge_scores.append(scorer.score(line[1], line[2]))
text2mol_scores.append(text2mol(line[0], line[1], device).detach().cpu().item())
bleu2 = corpus_bleu(gt_tokens, output_tokens, weights=(0.5, 0.5))
bleu4 = corpus_bleu(gt_tokens, output_tokens, weights=(0.25, 0.25, 0.25, 0.25))
return {
"BLEU-2": bleu2,
"BLEU-4": bleu4,
"Meteor": np.mean(meteor_scores),
"ROUGE-1": np.mean([rs['rouge1'].fmeasure for rs in rouge_scores]),
"ROUGE-2": np.mean([rs['rouge2'].fmeasure for rs in rouge_scores]),
"ROUGE-L": np.mean([rs['rougeL'].fmeasure for rs in rouge_scores]),
"Text2Mol": np.mean(text2mol_scores)
}
def add_arguments(parser):
parser.add_argument("--device", type=str, default="cuda:0")
parser.add_argument("--config_path", type=str, default="")
parser.add_argument('--dataset', type=str, default='chebi-20')
parser.add_argument("--dataset_path", type=str, default='../datasets/molcap/chebi-20')
parser.add_argument("--output_path", type=str, default="../ckpts/finetune_ckpts/caption.pth")
parser.add_argument("--caption_save_path", type=str, default="../assets/outputs.txt")
parser.add_argument("--mode", type=str, default="train")
parser.add_argument("--epochs", type=int, default=10)
parser.add_argument("--num_workers", type=int, default=1)
parser.add_argument("--patience", type=int, default=3)
parser.add_argument("--lr", type=float, default=1e-4)
parser.add_argument("--weight_decay", type=float, default=0)
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--logging_steps", type=int, default=300)
parser.add_argument("--text2mol_bert_path", type=str, default="../ckpts/text_ckpts/scibert_scivocab_uncased/")
parser.add_argument("--text2mol_data_path", type=str, default="../assets/molcap/text2mol_data/")
parser.add_argument("--text2mol_ckpt_path", type=str, default="../ckpts/fusion_ckpts/text2mol/test_outputfinal_weights.320.pt")
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
parser = argparse.ArgumentParser()
add_arguments(parser)
args = parser.parse_args()
device = torch.device(args.device)
if args.mode == "test_from_file":
results = test_molcap_from_file(args.caption_save_path, args, device)
print(results)
exit(0)
config = json.load(open(args.config_path))
# load dataset
train_dataset = SUPPORTED_MOLCAP_DATASET[args.dataset](args.dataset_path, config["data"]["mol"], split="train")
val_dataset = SUPPORTED_MOLCAP_DATASET[args.dataset](args.dataset_path, config["data"]["mol"], split="validation")
test_dataset = SUPPORTED_MOLCAP_DATASET[args.dataset](args.dataset_path, config["data"]["mol"], split="test")
collator = MolCollator(config["data"]["mol"])
train_dataloader = DataLoader(train_dataset, args.batch_size, shuffle=True, collate_fn=collator, num_workers=args.num_workers)
val_dataloader = DataLoader(val_dataset, args.batch_size, shuffle=False, collate_fn=collator, num_workers=args.num_workers)
test_dataloader = DataLoader(test_dataset, args.batch_size, shuffle=False, collate_fn=collator, num_workers=args.num_workers)
# load model
if config["data"]["mol"]["featurizer"]["structure"]["name"] == "MultiScale":
model = GraphEnhancedMolCapModel(config["network"])
else:
model = MolCapModel(config["network"])
model = model.to(device)
if args.mode == "train":
train_molcap(train_dataloader, val_dataloader, test_dataloader, test_dataset, model, args, device)
elif args.mode == "test":
if os.path.exists(args.output_path):
state_dict = torch.load(args.output_path, map_location=device)["model_state_dict"]
model.load_state_dict(state_dict)
results = test_molcap(test_dataset, test_dataloader, model, args, device)
print(results)
elif args.mode == "traintest":
train_molcap(train_dataloader, val_dataloader, test_dataloader, test_dataset, model, args, device)
results = test_molcap(test_dataset, test_dataloader, model, args, device)
print(results)
| OpenBioMed-main | open_biomed/tasks/multi_modal_task/molcap.py |
import logging
logger = logging.getLogger(__name__)
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import random
import argparse
import json
import numpy as np
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from datasets.ppi_dataset import SUPPORTED_PPI_DATASETS
from models.task_model.ppi_model import PPISeqModel, PPIGraphModel
from utils import PPICollator, AverageMeter, EarlyStopping, ToDevice
from utils.metrics import multilabel_f1
def train_ppi(train_loader, train_network, val_loader, val_network, model, args, device):
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
loss_fn = nn.BCEWithLogitsLoss()
running_loss = AverageMeter()
stopper = EarlyStopping(mode='higher', patience=args.patience, filename=args.output_path)
for epoch in range(args.epochs):
logger.info("Epoch %d" % (epoch))
logger.info("Training...")
model.train()
step = 0
for prot1, prot2, label in train_loader:
prot1 = ToDevice(prot1, device)
prot2 = ToDevice(prot2, device)
label = label.to(device)
if train_network is not None:
pred = model(prot1, prot2, train_network)
else:
pred = model(prot1, prot2)
loss = loss_fn(pred, label)
loss.backward()
running_loss.update(loss.item(), label.size(0))
step += 1
optimizer.step()
optimizer.zero_grad()
if step % args.logging_steps == 0:
logger.info("Steps=%d Training Loss=%.4lf" % (step, running_loss.get_average()))
running_loss.reset()
if val_loader is not None:
results = eval_ppi(val_loader, val_network, model, args, device)
logger.info(", ".join(["%s: %.4lf" % (k, v) for k, v in results.items()]))
if stopper.step(results["F1"], model):
break
if val_loader is not None:
model.load_state_dict(torch.load(args.output_path)["model_state_dict"])
return model
def eval_ppi(val_loader, val_network, model, args, device):
model.eval()
logger.info("Validating...")
loss_fn = nn.BCEWithLogitsLoss()
all_loss = 0
all_preds = []
all_labels = []
with torch.no_grad():
for prot1, prot2, label in val_loader:
prot1 = ToDevice(prot1, device)
prot2 = ToDevice(prot2, device)
label = label.to(device)
if val_network is not None:
pred = model(prot1, prot2, val_network)
else:
pred = model(prot1, prot2)
all_loss += loss_fn(pred, label).item()
pred = torch.sigmoid(pred) > 0.5
all_preds.append(np.array(pred.detach().cpu(), dtype=float))
all_labels.append(np.array(label.detach().cpu()))
all_preds = np.concatenate(all_preds, axis=0)
all_labels = np.concatenate(all_labels, axis=0)
f1, precision, recall = multilabel_f1(all_preds, all_labels)
results = {
"F1": f1,
"Precision": precision,
"Recall": recall,
}
return results
def main(args, config):
# configure seed
random.seed(42)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
args.graph_ppi = config["model"].startswith("gnn_ppi")
dataset = SUPPORTED_PPI_DATASETS[args.dataset](args.dataset_path, config["data"]["protein"], directed=False, make_network=args.graph_ppi, split=args.split_strategy)
if args.mode == "train":
train_dataset = dataset.index_select(dataset.train_indexes, split="train")
test_dataset = dataset.index_select(dataset.test_indexes, split="test")
logger.info("Num train: %d, Num test: %d" % (len(train_dataset), len(test_dataset)))
collator = PPICollator(config["data"]["protein"], args.graph_ppi)
train_loader = DataLoader(train_dataset, args.batch_size, shuffle=True, num_workers=args.num_workers, collate_fn=collator)
test_loader = DataLoader(test_dataset, args.batch_size, shuffle=False, num_workers=args.num_workers, collate_fn=collator)
device = torch.device(args.device)
if not args.graph_ppi:
model = PPISeqModel(config["network"], train_dataset.num_classes)
train_network = None
test_network = None
else:
model = PPIGraphModel(config["network"], train_dataset.num_classes)
train_network = train_dataset.network.to(device)
test_network = test_dataset.network.to(device)
if args.init_checkpoint != "None":
ckpt = torch.load(args.init_checkpoint)
if args.param_key != "None":
ckpt = ckpt[args.param_key]
model.load_state_dict(ckpt)
model = model.to(device)
model = train_ppi(train_loader, train_network, test_loader, test_network, model, args, device)
results = eval_ppi(test_loader, test_network, model, args, device)
logger.info(", ".join(["%s: %.4lf" % (k, v) for k, v in results.items()]))
def add_arguments(parser):
parser.add_argument("--device", type=str, default="cuda:0")
parser.add_argument("--mode", type=str, default="train")
parser.add_argument("--config_path", type=str, default="")
parser.add_argument('--dataset', type=str, default="SHS27k")
parser.add_argument("--dataset_path", type=str, default="../datasets/ppi/SHS27k/")
parser.add_argument("--split_strategy", type=str, default="random")
parser.add_argument("--init_checkpoint", type=str, default="None")
parser.add_argument("--param_key", type=str, default="None")
parser.add_argument("--output_path", type=str, default="../ckpts/finetune_ckpts/ppi/finetune.pth")
parser.add_argument("--num_workers", type=int, default=4)
parser.add_argument("--weight_decay", type=float, default=1e-5)
parser.add_argument("--lr", type=float, default=1e-3)
parser.add_argument("--batch_size", type=int, default=512)
parser.add_argument("--epochs", type=int, default=100)
parser.add_argument("--patience", type=int, default=10)
parser.add_argument("--logging_steps", type=int, default=50)
parser.add_argument("--seed", type=int, default=42)
return parser
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
parser = argparse.ArgumentParser()
parser = add_arguments(parser)
args = parser.parse_args()
config = json.load(open(args.config_path, "r"))
main(args, config) | OpenBioMed-main | open_biomed/tasks/prot_task/ppi.py |
import logging
logger = logging.getLogger(__name__)
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import argparse
import json
from tqdm import tqdm
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from sklearn.metrics import accuracy_score, f1_score
from datasets.ctc_dataset import SUPPORTED_CTC_DATASETS
# from models.ctc_model import CTCModel
# from datasets.ctc_dataset import SUPPORTED_CTC_DATASETS
from models.task_model.ctc_model import CTCModel
from utils import EarlyStopping, AverageMeter, seed_all, ToDevice
from utils.distributed_utils import init_distributed_mode, get_rank, is_main_process, concat_reduce
from utils.schedulars import CosineAnnealingWarmupRestarts
def add_arguments(parser):
parser.add_argument("--mode", type=str, default="train")
parser.add_argument("--device", type=int, default=0)
parser.add_argument("--config_path", type=str, default="")
parser.add_argument('--dataset', type=str, default='zheng68k')
parser.add_argument("--dataset_path", type=str, default='../datasets/ctc/zheng68k/')
parser.add_argument("--output_path", type=str, default="../ckpts/finetune_ckpts/ctc/finetune.pth")
parser.add_argument("--distributed", action="store_true")
parser.add_argument('--world_size', type=int, default=1, help='number of distributed processes')
parser.add_argument('--local_rank', type=int, default=0, help='local rank')
parser.add_argument("--lr", type=float, default=1e-4)
parser.add_argument("--batch_size", type=int, default=3)
parser.add_argument("--gradient_accumulation_steps", type=int, default=1)
parser.add_argument("--epochs", type=int, default=100)
parser.add_argument("--patience", type=int, default=10)
parser.add_argument("--logging_steps", type=int, default=1000)
parser.add_argument("--unassign_threshold", type=float, default=0.0)
parser.add_argument("--seed", type=int, default=42)
return parser
def train_ctc(train_loader, val_loader, model, device, args):
class_num = np.unique(train_loader.dataset.labels, return_counts=True)[1].tolist()
class_weight = torch.tensor([(1 - (x / sum(class_num))) ** 2 for x in class_num]).to(device)
loss_fn = nn.CrossEntropyLoss(weight = class_weight)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
# schedular = CosineAnnealingWarmupRestarts(
# optimizer,
# first_cycle_steps=15,
# cycle_mult=2,
# max_lr=args.lr,
# min_lr=1e-6,
# warmup_steps=5,
# gamma=0.9
# )
stopper = EarlyStopping(mode="higher", patience=args.patience, filename=args.output_path)
running_loss = AverageMeter(distributed=args.distributed, local_rank=args.local_rank, dest_device=0, world_size=args.world_size)
running_acc = AverageMeter(distributed=args.distributed, local_rank=args.local_rank, dest_device=0, world_size=args.world_size)
for epoch in range(args.epochs):
logger.info("========Epoch %d========" % (epoch + 1))
model.train()
if args.distributed:
train_loader.sampler.set_epoch(epoch)
running_loss.reset()
running_acc.reset()
for step, (cell, label) in enumerate(tqdm(train_loader)):
cell, label = ToDevice(cell, device), label.to(device)
logits = model(cell)
loss = loss_fn(logits, label)
loss.backward()
if (step + 1) % args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), int(1e6))
optimizer.step()
optimizer.zero_grad()
pred = nn.Softmax(dim=-1)(logits).argmax(dim=-1)
correct = torch.eq(pred, label).sum(dim=-1).item()
running_loss.update(loss.item())
running_acc.update(correct / args.batch_size)
if (step + 1) * args.world_size % args.logging_steps == 0:
if args.distributed:
dist.barrier()
logger.info("Steps=%d Training Loss=%.4lf, Acc=%.4lf" % (
step,
running_loss.get_average(),
running_acc.get_average()
))
running_loss.reset()
running_acc.reset()
if args.distributed:
dist.barrier()
# schedular.step()
results = val_ctc(val_loader, model, device, args)
logger.info(", ".join(["%s: %.4lf" % (k, v) for k, v in results.items()]))
if args.distributed:
model_without_ddp = model.module
else:
model_without_ddp = model
if stopper.step((results["Accuracy"]), model_without_ddp):
break
model.load_state_dict(torch.load(args.output_path)["model_state_dict"])
return model
def val_ctc(val_loader, model, device, args):
with torch.no_grad():
model.eval()
all_preds, all_y = [], []
for cell, label in tqdm(val_loader):
cell= ToDevice(cell, device)
logits = model(cell)
pred = nn.Softmax(dim=-1)(logits).argmax(dim=-1).detach().cpu()
# pred[np.amax(np.array(pred), axis=-1) < args.unassign_threshold] = -1
all_preds.append(pred)
all_y.append(label)
if args.distributed:
dist.barrier()
all_preds = concat_reduce(all_preds, len(val_loader.dataset), args.world_size)
all_y = concat_reduce(all_y, len(val_loader.dataset), args.world_size)
else:
all_preds = torch.cat(all_preds, dim=0)
all_y = torch.cat(all_y, dim=0)
for i in range(20):
print(all_preds[i].item(), all_y[i].item())
return {
"Accuracy": accuracy_score(all_preds, all_y),
"F1 Score": f1_score(all_preds, all_y, average='macro'),
}
def main(args, config):
# prepare dataset
dataset = SUPPORTED_CTC_DATASETS[args.dataset](args.dataset_path, config["data"], args.seed)
train_dataset = dataset.index_select(dataset.train_index)
val_dataset = dataset.index_select(dataset.val_index)
if args.distributed:
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, sampler=DistributedSampler(train_dataset, shuffle=True))
val_loader = DataLoader(val_dataset, batch_size=args.batch_size, sampler=DistributedSampler(val_dataset, shuffle=False))
else:
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False)
# prepare model
device = torch.device(args.device)
model = CTCModel(config["network"], dataset.num_classes)
model.to(device)
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.device])
seed_all(args.seed)
if args.mode == "train":
train_ctc(train_loader, val_loader, model, device, args)
results = val_ctc(val_loader, model, device, args)
print(results)
elif args.mode == "test":
results = val_ctc(val_loader, model, device, args)
print(results)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser = add_arguments(parser)
args = parser.parse_args()
init_distributed_mode(args)
# Print INFO on main process and DEBUG and
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if is_main_process() else logging.WARN,
)
config = json.load(open(args.config_path, "r"))
main(args, config) | OpenBioMed-main | open_biomed/tasks/cell_task/ctc.py |
import logging
logger = logging.getLogger(__name__)
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import argparse
import copy
import math
import numpy as np
import json
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
from scipy.stats import pearsonr
from datasets.drp_dataset import SUPPORTED_DRP_DATASET, TCGA
# from models.drp_model import TGDRP
# from datasets.drp_dataset import SUPPORTED_DRP_DATASET, TCGA
from models.task_model.drp_model import TGDRP
from utils import EarlyStopping, AverageMeter, seed_all, roc_auc, metrics_average
from utils.collators import DRPCollator
SUPPORTED_DRP_MODEL = {
"TGDRP": TGDRP,
}
def train_drp(train_loader, val_loader, model, args):
if args.task == "classification":
loss_fn = nn.BCEWithLogitsLoss()
metric = "roc_auc"
mode = "higher"
elif args.task == "regression":
loss_fn = nn.MSELoss()
metric = "rmse"
mode = "lower"
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
stopper = EarlyStopping(mode=mode, patience=args.patience)
running_loss = AverageMeter()
for i in range(args.epochs):
logger.info("========Epoch %d========" % (i + 1))
logger.info("Training...")
model.train()
step_loss = 0
running_loss.reset()
t = tqdm(train_loader, desc="Loss=%.4lf" % (step_loss))
for drug, cell, label in t:
if isinstance(cell, list):
drug, cell, label = drug.to(args.device), [feat.to(args.device) for feat in cell], label.to(args.device)
else:
drug, cell, label = drug.to(args.device), cell.to(args.device), label.to(args.device)
pred = model(drug, cell)
loss = loss_fn(pred, label.view(-1, 1).float())
optimizer.zero_grad()
loss.backward()
optimizer.step()
step_loss = loss.item()
running_loss.update(step_loss)
t.set_description("Loss=%.4lf" % (step_loss))
del drug, cell, label
logger.info("Average training loss %.4lf" % (running_loss.get_average()))
val_metrics = val_drp(val_loader, model, args)
if stopper.step(val_metrics[metric], model):
break
return model
def val_drp(val_loader, model, args):
model.eval()
y_true, y_pred = [], []
logger.info("Validating...")
for drug, cell, label in tqdm(val_loader):
torch.cuda.empty_cache()
if isinstance(cell, list):
drug, cell, label = drug.to(args.device), [feat.to(args.device) for feat in cell], label.to(args.device)
else:
drug, cell, label = drug.to(args.device), cell.to(args.device), label.to(args.device)
with torch.no_grad():
pred = model(drug, cell)
y_true.append(label.view(-1, 1).detach().cpu())
y_pred.append(pred.detach().cpu())
y_true = torch.cat(y_true, dim=0).numpy().flatten()
y_pred = torch.cat(y_pred, dim=0).numpy().flatten()
if args.task == "classification":
results = {
"roc_auc": roc_auc(y_true, y_pred)
}
elif args.task == "regression":
results = {
"rmse": math.sqrt(mean_squared_error(y_true, y_pred)),
"mae": mean_absolute_error(y_true, y_pred),
"r2": r2_score(y_true, y_pred),
"pearson": pearsonr(y_true, y_pred)[0]
}
logger.info(" ".join(["%s: %.4lf" % (key, results[key]) for key in results]))
return results
def main(args, config):
# set random seed
seed_all(args.seed)
# build dataset
dataset = SUPPORTED_DRP_DATASET[args.dataset](args.dataset_path, config["data"], task=args.task)
collate_fn = DRPCollator(config["data"])
# build model
model = SUPPORTED_DRP_MODEL[config["model"]](config["network"])
if config["model"] in ["TGSA", "TGDRP"]:
if config["data"]["cell"]["featurizer"]["name"] == "TGSA":
model.cluster_predefine = {i: j.to(args.device) for i, j in dataset.predefined_cluster.items()}
model._build()
model = model.to(args.device)
if args.init_checkpoint != '':
ckpt = torch.load(args.weight_path)
if args.param_key != '':
ckpt = ckpt[args.param_key]
model.load_state_dict(ckpt)
if args.mode == "train":
train_dataset = dataset.index_select(dataset.train_indexes)
val_dataset = dataset.index_select(dataset.val_indexes)
test_dataset = dataset.index_select(dataset.test_indexes)
logger.info("# Samples: Train - %d, Val - %d, Test - %d" % (len(train_dataset), len(val_dataset), len(test_dataset)))
train_loader = DataLoader(train_dataset, args.batch_size, shuffle=True, num_workers=4, collate_fn=collate_fn)
val_loader = DataLoader(val_dataset, args.batch_size, shuffle=False, num_workers=4, collate_fn=collate_fn)
test_loader = DataLoader(test_dataset, args.batch_size, shuffle=False, num_workers=4, collate_fn=collate_fn)
model = train_drp(train_loader, val_loader, model, args)
val_drp(test_loader, model, args)
elif args.mode == "test":
val_dataset = dataset.index_select(dataset.val_indexes)
test_dataset = dataset.index_select(dataset.test_indexes)
val_loader = DataLoader(val_dataset, args.batch_size, shuffle=False, num_workers=4, collate_fn=collate_fn)
test_loader = DataLoader(test_dataset, args.batch_size, shuffle=False, num_workers=4, collate_fn=collate_fn)
model.load_state_dict(torch.load(args.weight_path, map_location=args.device)['model_state_dict'])
val_drp(val_loader, model, args)
val_drp(test_loader, model, args)
elif args.mode == "zero_shot_transfer":
nfolds = config["data"]["split"]["nfolds"]
all_patients = ["BRCA_28", "CESC_24", "COAD_8", "GBM_69", "HNSC_45", "KIRC_47", "LUAD_23", "LUSC_20", "PAAD_55", "READ_8", "SARC_30", "SKCM_56", "STAD_30"]
results = {x: [] for x in all_patients}
for fold in range(nfolds):
train_fold = list(range(fold)) + list(range(fold + 1, nfolds))
train_dataset = dataset.index_select(np.concatenate([dataset.fold_indexes[i] for i in train_fold], axis=0))
val_dataset = dataset.index_select(dataset.fold_indexes[fold])
train_loader = DataLoader(train_dataset, args.batch_size, shuffle=True, num_workers=4, collate_fn=collate_fn)
val_loader = DataLoader(val_dataset, args.batch_size, shuffle=False, num_workers=4, collate_fn=collate_fn)
fold_model = copy.deepcopy(model)
fold_model = train_drp(train_loader, val_loader, fold_model, args)
val_drp(val_loader, model, args)
for patient in all_patients:
test_dataset = TCGA(args.transfer_dataset_path, config["data"], subset=patient)
test_loader = DataLoader(test_dataset, args.batch_size, shuffle=False, num_workers=4, collate_fn=collate_fn)
results[patient].append(val_drp(test_loader, model, args))
for patient in all_patients:
mean, std = metrics_average(results[patient])["roc_auc"]
print("roc_auc on TCGA-%s: %.4lf±%.4lf" % (patient, mean, std))
def add_arguments(parser):
parser.add_argument('--seed', type=int, default=42, help='seed')
parser.add_argument('--device', type=str, default='cuda:0', help='device')
parser.add_argument('--task', type=str, default='regression', help='task type: classification or regression')
parser.add_argument('--dataset', type=str, default='GDSC', help='dataset')
parser.add_argument("--dataset_path", type=str, default='../datasets/drp/GDSC/', help='path to the dataset')
parser.add_argument('--config_path', type=str, help='path to the configuration file')
parser.add_argument('--batch_size', type=int, default=128, help='batch size (default: 128)')
parser.add_argument('--num_workers', type=int, default=4, help='number of workers (default: 4)')
parser.add_argument('--lr', type=float, default=0.0001, help='learning rate')
parser.add_argument('--weight_decay', type=float, default=0, help='weight decay')
parser.add_argument('--epochs', type=int, default=300, help='maximum number of epochs (default: 300)')
parser.add_argument('--patience', type=int, default=10, help='patience for earlystopping (default: 10)')
parser.add_argument('--setup', type=str, default='known', help='experimental setup')
parser.add_argument('--init_checkpoint', type=str, default='', help='filepath for pretrained weights')
parser.add_argument('--param_key', type=str, default='', help='the key to obtain model state dict')
parser.add_argument('--mode', type=str, default='test', help='train, test or zero-shot transfer')
# arguments for zero-shot transfer
parser.add_argument("--transfer_dataset_path", type=str, default='../datasets/drp/tcga', help='path to the transfer dataset')
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
parser = argparse.ArgumentParser()
add_arguments(parser)
args = parser.parse_args()
config = json.load(open(args.config_path, "r"))
main(args, config) | OpenBioMed-main | open_biomed/tasks/mol_task/drp.py |
import logging
logger = logging.getLogger(__name__)
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import random
import argparse
import json
import numpy as np
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from datasets.dti_dataset import SUPPORTED_DTI_DATASETS
# from models.dti_model import DTIModel, DeepEIK4DTI
# from datasets.dti_dataset import SUPPORTED_DTI_DATASETS
from models.task_model.dti_model import DTIModel, DeepEIK4DTI
from utils import DTICollator, AverageMeter, EarlyStopping, ToDevice, metrics_average
from utils.metrics import roc_auc, pr_auc, concordance_index, rm2_index
from sklearn.metrics import f1_score, precision_score, recall_score, mean_squared_error
from scipy.stats import pearsonr, spearmanr
def train_dti(train_loader, val_loader, model, args, device):
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
if args.task == "classification":
loss_fn = nn.CrossEntropyLoss()
stop_mode = 'higher'
key = "ROC_AUC"
elif args.task == "regression":
loss_fn = nn.MSELoss()
stop_mode = 'lower'
key = "MSE"
running_loss = AverageMeter()
stopper = EarlyStopping(mode=stop_mode, patience=args.patience, filename=args.output_path)
for epoch in range(args.epochs):
logger.info("Epoch %d" % (epoch))
logger.info("Training...")
model.train()
step = 0
for mol, prot, label in train_loader:
mol = ToDevice(mol, device)
prot = ToDevice(prot, device)
label = label.to(device)
pred = model(mol, prot)
loss = loss_fn(pred, label)
loss.backward()
running_loss.update(loss.item(), label.size(0))
step += 1
#if step % args.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
if step % args.logging_steps == 0:
logger.info("Steps=%d Training Loss=%.4lf" % (step, running_loss.get_average()))
running_loss.reset()
if args.eval_train_epochs > 0 and epoch % args.eval_train_epochs == 0:
eval_dti("train", train_loader, model, args, device)
if val_loader is not None:
results = eval_dti("valid", val_loader, model, args, device)
logger.info(", ".join(["%s: %.4lf" % (k, v) for k, v in results.items()]))
if stopper.step(results[key], model):
break
if val_loader is not None:
model.load_state_dict(torch.load(args.output_path)["model_state_dict"])
return model
def eval_dti(split, val_loader, model, args, device):
model.eval()
logger.info("Validating...")
if args.task == "classification":
loss_fn = nn.CrossEntropyLoss()
elif args.task == "regression":
loss_fn = nn.MSELoss()
all_loss = 0
all_preds = []
all_labels = []
for mol, prot, label in val_loader:
mol = ToDevice(mol, device)
prot = ToDevice(prot, device)
label = label.to(device)
pred = model(mol, prot)
if args.task == "classification" and len(pred.shape) < 2:
pred = pred.unsqueeze(0)
all_loss += loss_fn(pred, label).item()
if args.task == "classification":
pred = F.softmax(pred, dim=-1)[:, 1]
all_preds.append(np.array(pred.detach().cpu()))
all_labels.append(np.array(label.detach().cpu()))
logger.info("Average %s loss: %.4lf" % (split, all_loss / len(val_loader)))
all_preds = np.concatenate(all_preds, axis=0)
all_labels = np.concatenate(all_labels, axis=0)
if args.task == "classification":
outputs = np.array([1 if x >= 0.5 else 0 for x in all_preds])
results = {
"ROC_AUC": roc_auc(all_labels, all_preds),
"PR_AUC": pr_auc(all_labels, all_preds),
"F1": f1_score(all_labels, outputs),
"Precision": precision_score(all_labels, outputs),
"Recall": recall_score(all_labels, outputs),
}
elif args.task == "regression":
results = {
"MSE": mean_squared_error(all_labels, all_preds),
"Pearson": pearsonr(all_labels, all_preds)[0],
"Spearman": spearmanr(all_labels, all_preds)[0],
"CI": concordance_index(all_labels, all_preds),
"r_m^2": rm2_index(all_labels, all_preds)
}
return results
def main(args, config):
# configure seed
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# prepare dataset
if args.dataset in ['yamanishi08', 'bmkg-dti']:
args.task = "classification"
pred_dim = 2
else:
args.task = "regression"
pred_dim = 1
dataset = SUPPORTED_DTI_DATASETS[args.dataset](args.dataset_path, config["data"], args.split_strategy)
if args.mode == "train":
train_dataset = dataset.index_select(dataset.train_index)
if len(dataset.val_index) > 0:
val_dataset = dataset.index_select(dataset.val_index)
else:
val_dataset = None
test_dataset = dataset.index_select(dataset.test_index)
train_dataset._build(
test_dataset.pair_index,
None if "kg" not in config["data"]["mol"]["modality"] else os.path.join(config["data"]["mol"]["featurizer"]["kg"]["save_path"], "dti-" + args.dataset + ".pkl")
)
if val_dataset is not None:
val_dataset._build(
test_dataset.pair_index,
None if "kg" not in config["data"]["mol"]["modality"] else os.path.join(config["data"]["mol"]["featurizer"]["kg"]["save_path"], "dti-" + args.dataset + ".pkl")
)
test_dataset._build(
test_dataset.pair_index,
None if "kg" not in config["data"]["mol"]["modality"] else os.path.join(config["data"]["mol"]["featurizer"]["kg"]["save_path"], "dti-" + args.dataset + ".pkl")
)
collator = DTICollator(config["data"])
train_loader = DataLoader(train_dataset, args.batch_size, shuffle=True, num_workers=args.num_workers, collate_fn=collator)
if val_dataset is not None:
val_loader = DataLoader(val_dataset, args.batch_size, shuffle=False, num_workers=args.num_workers, collate_fn=collator)
else:
val_loader = None
test_loader = DataLoader(test_dataset, args.batch_size, shuffle=False, num_workers=args.num_workers, collate_fn=collator)
if len(config["data"]["mol"]["modality"]) > 1:
model = DeepEIK4DTI(config["network"], pred_dim)
else:
model = DTIModel(config["network"], pred_dim)
if args.init_checkpoint != "None":
ckpt = torch.load(args.init_checkpoint)
if args.param_key != "None":
ckpt = ckpt[args.param_key]
model.load_state_dict(ckpt)
device = torch.device(args.device)
model = model.to(device)
model = train_dti(train_loader, test_loader, model, args, device)
results = eval_dti("test", test_loader, model, args, device)
logger.info(", ".join(["%s: %.4lf" % (k, v) for k, v in results.items()]))
elif args.mode == "kfold":
results = []
for i in range(dataset.nfolds):
logger.info("Fold %d", i)
train_dataset = dataset.index_select(dataset.folds[i]["train"])
test_dataset = dataset.index_select(dataset.folds[i]["test"])
train_dataset._build(
test_dataset.pair_index,
None if "kg" not in config["data"]["mol"]["modality"] else os.path.join(config["data"]["mol"]["featurizer"]["kg"]["save_path"], "dti-" + args.dataset + "-" + args.split_strategy + "-fold" + str(i) + ".pkl")
)
test_dataset._build(
test_dataset.pair_index,
None if "kg" not in config["data"]["mol"]["modality"] else os.path.join(config["data"]["mol"]["featurizer"]["kg"]["save_path"], "dti-" + args.dataset + "-" + args.split_strategy + "-fold" + str(i) + ".pkl")
)
collator = DTICollator(config["data"])
train_loader = DataLoader(train_dataset, args.batch_size, shuffle=True, num_workers=args.num_workers, collate_fn=collator)
test_loader = DataLoader(test_dataset, args.batch_size, shuffle=False, num_workers=args.num_workers, collate_fn=collator)
# prepare model
if len(config["data"]["mol"]["modality"]) > 1:
model = DeepEIK4DTI(config["network"], pred_dim)
else:
model = DTIModel(config["network"], pred_dim)
#print(model)
if args.init_checkpoint != "None":
ckpt = torch.load(args.init_checkpoint)
if args.param_key != "None":
ckpt = ckpt[args.param_key]
model.load_state_dict(ckpt)
device = torch.device(args.device)
model = model.to(device)
model = train_dti(train_loader, test_loader, model, args, device)
results.append(eval_dti("test", test_loader, model, args, device))
results = metrics_average(results)
for key in results:
print("%s: %.4lf±%.4lf" % (key, results[key][0], results[key][1]))
def add_arguments(parser):
parser.add_argument("--device", type=str, default="cuda:0")
parser.add_argument("--mode", type=str, default="train")
parser.add_argument("--config_path", type=str, default="")
parser.add_argument('--dataset', type=str, default="Yamanishi08")
parser.add_argument("--dataset_path", type=str, default="../datasets/dti/Yamanishi08/")
parser.add_argument("--split_strategy", type=str, default="random")
parser.add_argument("--init_checkpoint", type=str, default="None")
parser.add_argument("--param_key", type=str, default="None")
parser.add_argument("--output_path", type=str, default="../ckpts/finetune_ckpts/dp/finetune.pth")
parser.add_argument("--num_workers", type=int, default=4)
parser.add_argument("--weight_decay", type=float, default=1e-5)
parser.add_argument("--lr", type=float, default=1e-3)
parser.add_argument("--batch_size", type=int, default=128)
parser.add_argument("--gradient_accumulation_steps", type=int, default=1)
parser.add_argument("--epochs", type=int, default=100)
parser.add_argument("--patience", type=int, default=10)
parser.add_argument("--logging_steps", type=int, default=50)
parser.add_argument("--eval_train_epochs", type=int, default=-1)
parser.add_argument("--seed", type=int, default=42)
return parser
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
parser = argparse.ArgumentParser()
parser = add_arguments(parser)
args = parser.parse_args()
output_dir = os.path.dirname(args.output_path)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
config = json.load(open(args.config_path, "r"))
main(args, config)
| OpenBioMed-main | open_biomed/tasks/mol_task/dti.py |
import logging
logger = logging.getLogger(__name__)
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import argparse
import json
from tqdm import tqdm
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from sklearn.metrics import mean_absolute_error, mean_squared_error, roc_auc_score
from datasets.dp_dataset import SUPPORTED_DP_DATASETS, Task
from utils import DPCollator, roc_auc, EarlyStopping, AverageMeter, ToDevice
from models.task_model.dp_model import DPModel, DeepEIK4DP
def add_arguments(parser):
parser.add_argument("--device", type=str, default="cuda:0")
parser.add_argument("--mode", type=str, default="train")
parser.add_argument("--config_path", type=str, default="")
parser.add_argument('--dataset', type=str, default='MoleculeNet')
parser.add_argument("--dataset_path", type=str,
default='../datasets/dp/MoleculeNet/')
parser.add_argument("--dataset_name", type=str, default='BBBP')
parser.add_argument("--init_checkpoint", type=str, default="")
parser.add_argument("--param_key", type=str, default="None")
parser.add_argument("--output_path", type=str,
default="../ckpts/finetune_ckpts/dp/finetune.pth")
parser.add_argument("--num_workers", type=int, default=4)
parser.add_argument("--weight_decay", type=float, default=1e-5)
parser.add_argument("--lr", type=float, default=1e-3)
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--epochs", type=int, default=100)
parser.add_argument("--patience", type=int, default=10)
parser.add_argument("--logging_steps", type=int, default=50)
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--dropout", type=float, default=0.0)
parser.add_argument("--classfy_type", type=str, default="multi")
return parser
def get_num_task(dataset):
dataset = dataset.lower()
""" used in molecule_finetune.py """
if dataset == 'tox21':
return 12
elif dataset in ['hiv', 'bace', 'bbbp', 'donor']:
return 1
elif dataset == 'pcba':
return 92
elif dataset == 'muv':
return 17
elif dataset == 'toxcast':
return 617
elif dataset == 'sider':
return 27
elif dataset == 'clintox':
return 2
raise ValueError('Invalid dataset name.')
def eval(model, device, loader):
model.eval()
y_true = []
y_scores = []
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
batch = batch.to(device)
with torch.no_grad():
pred = model(batch.x, batch.edge_index,
batch.edge_attr, batch.batch)
y_true.append(batch.y.view(pred.shape))
y_scores.append(pred)
y_true = torch.cat(y_true, dim=0).cpu().numpy()
y_scores = torch.cat(y_scores, dim=0).cpu().numpy()
roc_list = []
for i in range(y_true.shape[1]):
# AUC is only defined when there is at least one positive data.
if np.sum(y_true[:, i] == 1) > 0 and np.sum(y_true[:, i] == -1) > 0:
is_valid = y_true[:, i]**2 > 0
roc_list.append(roc_auc_score(
(y_true[is_valid, i] + 1)/2, y_scores[is_valid, i]))
if len(roc_list) < y_true.shape[1]:
print("Some target is missing!")
print("Missing ratio: %f" % (1 - float(len(roc_list))/y_true.shape[1]))
return sum(roc_list)/len(roc_list) # y_true.shape[1]
def get_metric(task, name):
if task == Task.CLASSFICATION:
metric_name = "roc_auc"
metric = roc_auc
elif task == Task.REGRESSION:
if name in ["qm7", "qm8", "qm9"]:
metric_name = "MAE"
metric = mean_absolute_error
else:
metric_name = "MSE"
metric = mean_squared_error
return metric_name, metric
def train_dp(train_loader, val_loader, test_loader, model, task, args):
device = torch.device(args.device)
if task == Task.CLASSFICATION:
loss_fn = nn.BCEWithLogitsLoss(reduction="none")
mode = "higher"
elif task == Task.REGRESSION:
if args.dataset_name in ["qm7", "qm8", "qm9"]:
loss_fn = nn.L1Loss()
mode = "lower"
else:
loss_fn = nn.MSELoss()
mode = "lower"
optimizer = torch.optim.Adam(
model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
stopper = EarlyStopping(
mode=mode, patience=args.patience, filename=args.output_path)
metric_name, _ = get_metric(task, args.dataset_name)
running_loss = AverageMeter()
for epoch in range(args.epochs):
logger.info("========Epoch %d========" % (epoch + 1))
model.train()
running_loss.reset()
for step, (drug, label) in enumerate(train_loader):
drug = ToDevice(drug, device)
pred = model(drug)
y = label.view(pred.shape).to(torch.float64).to(device)
is_valid = y**2 > 0
# Loss matrix
loss_mat = loss_fn(pred.double(), (y+1)/2)
# loss matrix after removing null target
loss_mat = torch.where(is_valid, loss_mat, torch.zeros(
loss_mat.shape).to(loss_mat.device).to(loss_mat.dtype))
optimizer.zero_grad()
loss = torch.sum(loss_mat)/torch.sum(is_valid)
loss.backward()
optimizer.step()
running_loss.update(loss.detach().cpu().item())
if (step + 1) % args.logging_steps == 0:
logger.info("Steps=%d Training Loss=%.4lf" %
(step, running_loss.get_average()))
running_loss.reset()
val_metrics = val_dp(val_loader, model, task, args)
test_metrics = val_dp(test_loader, model, task, args)
logger.info("%s val %s=%.4lf" %
(args.dataset_name, metric_name, val_metrics[metric_name]))
logger.info("%s test %s=%.4lf" % (args.dataset_name,
metric_name, test_metrics[metric_name]))
if stopper.step((val_metrics[metric_name]), model):
break
model.load_state_dict(torch.load(args.output_path)["model_state_dict"])
return model, epoch
def val_dp(val_loader, model, task, args):
device = torch.device(args.device)
metric_name, metric = get_metric(task, args.dataset_name)
model.eval()
all_preds, y_true = [], []
for drug, label in val_loader:
drug = ToDevice(drug, device)
pred = model(drug).detach().cpu()
label = label.view(pred.shape)
all_preds.append(pred)
y_true.append(label)
all_preds = torch.cat(all_preds, dim=0).cpu().numpy()
y_true = torch.cat(y_true, dim=0).cpu().numpy()
roc_list = []
for i in range(y_true.shape[1]):
# AUC is only defined when there is at least one positive data.
if np.sum(y_true[:, i] == 1) > 0 and np.sum(y_true[:, i] == -1) > 0:
is_valid = y_true[:, i]**2 > 0
roc_list.append(roc_auc_score(
(y_true[is_valid, i] + 1)/2, all_preds[is_valid, i]))
if len(roc_list) < y_true.shape[1]:
print("Some target is missing!")
print("Missing ratio: %f" % (1 - float(len(roc_list))/y_true.shape[1]))
return {metric_name: sum(roc_list)/len(roc_list)} # y_true.shape[1]
# return {metric_name: metric(all_y, all_preds)}
def main(args, config):
# prepare dataset
dataset = SUPPORTED_DP_DATASETS[args.dataset](
args.dataset_path, config["data"], args.dataset_name, 2)
task = dataset.task
train_dataset = dataset.index_select(dataset.train_index)
val_dataset = dataset.index_select(dataset.val_index)
test_dataset = dataset.index_select(dataset.test_index)
# _build
save_path = ""
if len(config["data"]["mol"]["modality"]) > 1 and "kg" in config["data"]["mol"]["featurizer"]:
save_path = os.path.join(
config["data"]["mol"]["featurizer"]["kg"]["save_path"], "dp-" + args.dataset + "_val.pkl")
train_dataset._build(save_path)
val_dataset._build(save_path)
test_dataset._build(save_path)
collator = DPCollator(config["data"]["mol"])
train_loader = DataLoader(train_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
collate_fn=collator)
val_loader = DataLoader(val_dataset, batch_size=args.batch_size,
shuffle=False, num_workers=args.num_workers, collate_fn=collator)
test_loader = DataLoader(test_dataset, batch_size=args.batch_size,
shuffle=False, num_workers=args.num_workers, collate_fn=collator)
# prepare model
task_num = get_num_task(args.dataset_name)
if len(config["data"]["mol"]["modality"]) > 1 and config["model"] != "molalbef":
model = DeepEIK4DP(config["network"], task_num)
else:
model = DPModel(config, task_num)
if args.init_checkpoint != "":
ckpt = torch.load(args.init_checkpoint)
if args.param_key != "":
ckpt = ckpt[args.param_key]
model.load_state_dict(ckpt)
device = torch.device(args.device)
model = model.to(device)
# configure metric
metric_name, _ = get_metric(task, args.dataset_name)
# TODO: support two and multiple classification
if args.mode == "train":
model, epoch = train_dp(train_loader, val_loader,
test_loader, model, task, args)
results = val_dp(test_loader, model, task, args)
logger.info("%s test %s=%.4lf" %
(args.dataset_name, metric_name, results[metric_name]))
elif args.mode == "test":
results = val_dp(test_loader, model, task, args)
logger.info("%s test %s=%.4lf" %
(args.dataset_name, metric_name, results[metric_name]))
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
parser = argparse.ArgumentParser()
parser = add_arguments(parser)
args = parser.parse_args()
config = json.load(open(args.config_path, "r"))
# config['network']['structure']['drop_ratio'] = args.dropout
# set seed
random.seed(args.seed)
# np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
main(args, config) | OpenBioMed-main | open_biomed/tasks/mol_task/dp.py |
OpenBioMed-main | open_biomed/tasks/mol_task/__init__.py |
|
import logging
logger = logging.getLogger(__name__)
from abc import ABC, abstractmethod
import os
import csv
import torch
from torch.utils.data import Dataset
from feature.mol_featurizer import MolMultiModalFeaturizer
from feature.text_featurizer import TextTransformerTokFeaturizer
from utils.mol_utils import valid_smiles
class MolCapDataset(Dataset, ABC):
def __init__(self, path, config):
super(MolCapDataset, self).__init__()
self.path = path
self.config = config
self._load_data()
self._featurize()
@abstractmethod
def _load_data(self):
raise NotImplementedError
def _featurize(self):
featurizer = MolMultiModalFeaturizer(self.config)
featurizer.set_mol2text_dict(self.smi2text)
self.mols = [featurizer(smi) for smi in self.smiles]
if "additional_text" in self.config["modality"]:
featurizer = TextTransformerTokFeaturizer(self.config["featurizer"]["additional_text"])
for i, smi in enumerate(self.smiles):
self.mols[i]["additional_text"] = featurizer(self.smi2addtext[smi])
def __getitem__(self, index):
return self.mols[index]
def __len__(self):
return len(self.smiles)
class CheBI_20(MolCapDataset):
def __init__(self, path, config, split):
self.split = split
super(CheBI_20, self).__init__(path, config)
def _load_data(self):
self.smiles = []
self.texts = []
with open(os.path.join(self.path, self.split + ".txt")) as f:
reader = csv.DictReader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
for line in reader:
if valid_smiles(line["SMILES"]):
self.smiles.append(line["SMILES"])
self.texts.append(line["description"])
else:
logger.warn("Failed to generate 2D Graph for %s" % (line["SMILES"]))
self.smi2text = dict(zip(self.smiles, self.texts))
logger.info("Split: %s, Num Samples: %d" % (self.split, len(self)))
if "additional_text" in self.config["modality"]:
self.smi2addtext = {}
with open(os.path.join(self.path, "molt5-captions-" + self.split + ".txt")) as f:
reader = csv.DictReader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
for line in reader:
self.smi2addtext[line["SMILES"]] = line["output"]
SUPPORTED_MOLCAP_DATASET = {
"chebi-20": CheBI_20
} | OpenBioMed-main | open_biomed/datasets/molcap_dataset.py |
from abc import ABC, abstractmethod
import logging
logger = logging.getLogger(__name__)
import copy
import numpy as np
import json
import os.path as osp
from tqdm import tqdm
import torch
from torch.utils.data import Dataset
from torch_geometric.data import Data
from feature.protein_featurizer import SUPPORTED_PROTEIN_FEATURIZER, ProteinMultiModalFeaturizer
from utils.kg_utils import subgraph_sample
class PPIDataset(Dataset, ABC):
def __init__(self, path, config, directed=False, make_network=False, split='random'):
super(PPIDataset, self).__init__()
self.path = path
self.config = config
self.directed = directed
self.make_network = make_network
self._load_proteins()
self._load_ppis()
self._featurize()
self._train_test_split(strategy=split)
@abstractmethod
def _load_proteins(self, path):
raise NotImplementedError
@abstractmethod
def _load_ppis(self):
raise NotImplementedError
def _featurize(self):
logger.info("Featurizing...")
if len(self.config["modality"]) > 1:
featurizer = ProteinMultiModalFeaturizer(self.config["featurizer"])
else:
featurizer = SUPPORTED_PROTEIN_FEATURIZER[self.config["featurizer"]["structure"]["name"]](self.config["featurizer"]["structure"])
self.proteins = [featurizer(protein) for protein in tqdm(self.proteins)]
self.labels = torch.tensor(self.labels, dtype=torch.float)
@abstractmethod
def _train_test_split(self, strategy='random'):
raise NotImplementedError
def index_select(self, indexes, split='train'):
new_dataset = copy.deepcopy(self)
new_dataset.pair_index = [self.pair_index[i] for i in indexes]
new_dataset.labels = [self.labels[i] for i in indexes]
if self.make_network:
if split == 'train':
# inductive setting, remove edges in the test set during training
new_dataset.network = Data(
x=torch.stack(self.proteins),
edge_index=torch.tensor(np.array(new_dataset.pair_index).T, dtype=torch.long)
)
else:
new_dataset.network = Data(
x=torch.stack(self.proteins),
edge_index=torch.tensor(np.array(self.pair_index).T, dtype=torch.long)
)
return new_dataset
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
if not self.make_network:
return self.proteins[self.pair_index[idx][0]], self.proteins[self.pair_index[idx][1]], self.labels[idx]
else:
return self.pair_index[idx][0], self.pair_index[idx][1], self.labels[idx]
class STRINGDataset(PPIDataset):
def __init__(self, path, config, directed=False, make_network=False, split='bfs'):
super(STRINGDataset, self).__init__(path, config, directed, make_network, split)
self.num_classes = 7
def _load_proteins(self):
self.proteins = []
self.protname2id = {}
with open(osp.join(self.path, "sequences.tsv")) as f:
for i, line in enumerate(f.readlines()):
line = line.rstrip("\n").split("\t")
self.protname2id[line[0]] = i
self.proteins.append(line[1])
logger.info("Num proteins: %d" % (len(self.proteins)))
def _load_ppis(self):
ppi_dict = {}
class_map = {'reaction': 0, 'binding': 1, 'ptmod': 2, 'activation': 3, 'inhibition': 4, 'catalysis': 5, 'expression': 6}
with open(osp.join(self.path, "interactions.txt")) as f:
for i, line in enumerate(f.readlines()):
if i == 0:
continue
line = line.rstrip("\t").split("\t")
prot1, prot2 = self.protname2id[line[0]], self.protname2id[line[1]]
if not self.directed and prot1 > prot2:
t = prot1
prot1 = prot2
prot2 = t
if (prot1, prot2) not in ppi_dict:
ppi_dict[(prot1, prot2)] = [0] * 7
ppi_dict[(prot1, prot2)][class_map[line[2]]] = 1
self.pair_index = []
self.labels = []
for prot_pair in ppi_dict:
self.pair_index.append(list(prot_pair))
self.labels.append(ppi_dict[prot_pair])
if not self.directed:
self.pair_index.append([prot_pair[1], prot_pair[0]])
self.labels.append(ppi_dict[prot_pair])
logger.info("Num ppis: %d" % (len(self.labels)))
def _train_test_split(self, strategy='bfs', test_ratio=0.2, random_new=False):
if random_new or not osp.exists(osp.join(self.path, "split.json")):
self.test_indexes = subgraph_sample(len(self.proteins), self.pair_index, strategy, int(len(self.pair_index) * test_ratio), directed=False)
self.train_indexes = []
for i in range(len(self.pair_index)):
if i not in self.test_indexes:
self.train_indexes.append(i)
json.dump({
"train": self.train_indexes,
"test": self.test_indexes
}, open(osp.join(self.path, "split_%s.json" % (strategy)), "w"))
else:
split = json.load(open(osp.join(self.path, "split_%s.json" % (strategy)), "r"))
self.train_indexes = split["train"]
self.test_indexes = split["test"]
SUPPORTED_PPI_DATASETS = {
"SHS27k": STRINGDataset,
"SHS148k": STRINGDataset,
"STRING": STRINGDataset,
} | OpenBioMed-main | open_biomed/datasets/ppi_dataset.py |
import logging
logger = logging.getLogger(__name__)
from abc import ABC, abstractmethod
import os
import csv
import json
import torch
from torch.utils.data import Dataset
from feature.mol_featurizer import SUPPORTED_MOL_FEATURIZER
from feature.text_featurizer import SUPPORTED_TEXT_FEATURIZER
from utils.mol_utils import valid_smiles
class MolQADataset(Dataset, ABC):
def __init__(self, path, config):
super(MolQADataset, self).__init__()
self.path = path
self.config = config
self._load_data()
self._featurize(True if self.split == "train" else False)
@abstractmethod
def _load_data(self):
raise NotImplementedError
def _featurize(self, featurize_output=True):
featurizer = SUPPORTED_MOL_FEATURIZER[self.config["mol"]["featurizer"]["structure"]["name"]](self.config["mol"]["featurizer"]["structure"])
self.mols = [featurizer(smi) for smi in self.smiles]
featurizer = SUPPORTED_TEXT_FEATURIZER[self.config["text"]["question"]["featurizer"]["name"]](self.config["text"]["question"]["featurizer"])
self.questions = [featurizer(text) for text in self.questions]
if featurize_output:
featurizer = SUPPORTED_TEXT_FEATURIZER[self.config["text"]["answer"]["featurizer"]["name"]](self.config["text"]["answer"]["featurizer"])
self.answers = [featurizer(text) for text in self.answers]
def __getitem__(self, index):
return self.mols[index], self.questions[index], self.answers[index]
def __len__(self):
return len(self.smiles)
class ChEMBLQA(MolQADataset):
def __init__(self, path, config, split):
self.split = split
super(ChEMBLQA, self).__init__(path, config)
def _load_data(self):
self.smiles = []
self.questions = []
self.answers = []
self.num_mols = 0
data = json.load(open(os.path.join(self.path, "ChEMBL_QA_" + self.split + ".json"), "r"))
for smi in data:
if valid_smiles(smi):
self.num_mols += 1
for question in data[smi]:
self.smiles.append(smi)
self.questions.append(question[0])
self.answers.append(str(question[1]))
else:
logger.debug("Failed to generate 2D Graph for %s" % (smi))
self.smi2text = dict(zip(self.smiles, self.questions))
logger.info("Split: %s, Num Molecules: %d, Num Questions: %d" % (self.split, self.num_mols, len(self)))
class BioMedQA(MolQADataset):
def __init__(self, path, config):
super(BioMedQA).__init__(path, config)
SUPPORTED_MOLQA_DATASET = {
"chembl-qa": ChEMBLQA
} | OpenBioMed-main | open_biomed/datasets/molqa_dataset.py |
OpenBioMed-main | open_biomed/datasets/__init__.py |
|
"""
Dataset for Molecule-Text Retrieval
"""
from abc import ABC, abstractmethod
import logging
logger = logging.getLogger(__name__)
import os
import os.path as osp
import copy
import random
import rdkit.Chem as Chem
from rdkit import RDLogger
RDLogger.DisableLog("rdApp.*")
import numpy as np
import torch
from torch.utils.data import Dataset
from feature.mol_featurizer import MolMultiModalFeaturizer
from utils.split_utils import scaffold_split
class MTRDataset(Dataset, ABC):
def __init__(self, path, config):
super(MTRDataset, self).__init__()
self.path = path
self.config = config
self._load_data()
self._featurize()
@abstractmethod
def _load_data(self):
raise NotImplementedError
def _featurize(self):
# featurize mol with paired text
featurizer = MolMultiModalFeaturizer(self.config["mol"])
featurizer.set_mol2text_dict(self.mol2text)
self.mols = [featurizer(mol) for mol in self.mols]
def index_select(self, indexes):
new_dataset = copy.copy(self)
new_dataset.mols = [new_dataset.mols[i] for i in indexes]
return new_dataset
def __len__(self):
return len(self.mols)
def set_test(self):
self.test = True
if self.mode == "sentence":
rnd = 42
self.pseudorandom = []
for i in range(len(self)):
self.pseudorandom.append(rnd)
rnd = rnd * 16807 % ((1 << 31) - 1)
def __getitem__(self, index):
if self.mode == "sentence":
if not self.test:
ind = random.randint(0, len(self.mols[index]["text"]) - 1)
else:
ind = self.pseudorandom[index] % len(self.mols[index]["text"])
return {
"structure": self.mols[index]["structure"],
"text": self.mols[index]["text"][ind],
}
else:
return self.mols[index]
class PCdes(MTRDataset):
def __init__(self, path, config, mode='paragraph', filter=True, filter_path=""):
self.filter = filter
self.filter_path = filter_path
self.test = False
self.mode = mode
super(PCdes, self).__init__(path, config)
self._train_test_split()
def _load_data(self):
with open(osp.join(self.path, "align_smiles.txt"), "r") as f:
mols = f.readlines()
with open(osp.join(self.path, "align_des_filt3.txt"), "r") as f:
texts = f.readlines()[:len(mols)]
if self.filter:
with open(self.filter_path, "r") as f:
filter_mols = []
for line in f.readlines():
mol = line.rstrip("\n").split("\t")[1]
mol = Chem.MolFromSmiles(mol)
if mol is not None:
filter_mols.append(Chem.MolToSmiles(mol, isomericSmiles=True))
self.mols = []
self.texts = []
for i, mol in enumerate(mols):
try:
mol = Chem.MolFromSmiles(mol.strip("\n"))
smi_orig = Chem.MolToSmiles(mol, isomericSmiles=False)
smi = Chem.MolToSmiles(mol, isomericSmiles=True)
if mol is not None and not smi in filter_mols:
self.mols.append(smi_orig)
self.texts.append(texts[i].strip("\n"))
except:
logger.debug("fail to generate 2D graph, data removed")
self.smiles = self.mols
self.mol2text = dict(zip(self.mols, self.texts))
logger.info("Num Samples: %d" % len(self))
def _train_test_split(self):
self.train_index, self.val_index, self.test_index = scaffold_split(self, 0.1, 0.2)
class PubChem15K(MTRDataset):
def __init__(self, path, config, mode, filter, filter_path):
self.mode = mode
self.test = False
super(PubChem15K, self).__init__(path, config)
self._train_test_split()
def _load_data(self):
random.seed(42)
self.mols, self.texts = [], []
with open(os.path.join(self.path, "pair.txt")) as f:
for line in f.readlines():
line = line.rstrip("\n").split("\t")
text_name, smi = line[0], line[1]
try:
mol = Chem.MolFromSmiles(smi)
if mol is not None:
self.mols.append(smi)
text_list = []
count = 0
for line in open(os.path.join(self.path, "text", "text_" + text_name + ".txt"), 'r', encoding='utf-8'):
count += 1
text_list.append(line)
if count > 500:
break
#text = random.sample(text_list, 1)[0]
text = text_list[0]
if len(text) > 256:
text = text[:256]
self.texts.append(text)
except:
continue
if len(self.mols) >= 480:
break
self.mol2text = dict(zip(self.mols, self.texts))
def _train_test_split(self):
self.train_index = np.arange(0, 480)
self.val_index = np.arange(0, 480)
self.test_index = np.arange(0, 480)
SUPPORTED_MTR_DATASETS = {
"PCdes": PCdes,
"PubChem15K": PubChem15K,
} | OpenBioMed-main | open_biomed/datasets/mtr_dataset.py |
from abc import ABC, abstractmethod
import logging
logger = logging.getLogger(__name__)
import os
import copy
import scanpy
import numpy as np
from sklearn.model_selection import StratifiedShuffleSplit
import torch
from torch.utils.data import Dataset
from feature.cell_featurizer import SUPPORTED_CELL_FEATURIZER
class CTCDataset(Dataset, ABC):
def __init__(self, path, config, seed):
super(CTCDataset, self).__init__()
self.config = config
self.path = path
self.seed = seed
self._load_data()
self._featurize()
@abstractmethod
def _load_data(self):
raise NotImplementedError
def _featurize(self):
feat_config = self.config["cell"]["featurizer"]["structure"]
featurizer = SUPPORTED_CELL_FEATURIZER[feat_config["name"]](feat_config)
self.cells = [featurizer(cell) for cell in self.cells]
def index_select(self, indexes):
new_dataset = copy.copy(self)
new_dataset.cells = [self.cells[i] for i in indexes]
new_dataset.labels = [self.labels[i] for i in indexes]
return new_dataset
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
return self.cells[index], self.labels[index]
class Zheng68k(CTCDataset):
def __init__(self, path, config, seed):
super(Zheng68k, self).__init__(path, config, seed)
self._train_test_split(seed)
def _load_data(self):
data = scanpy.read_h5ad(os.path.join(self.path, "Zheng68K.h5ad"))
self.cells = data.X
self.label_dict, self.labels = np.unique(np.array(data.obs['celltype']), return_inverse=True)
self.num_classes = self.label_dict.shape[0]
def _train_test_split(self, seed):
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=seed).split(self.cells, self.labels)
for train_index, val_index in split:
self.train_index = train_index
self.val_index = val_index
class Baron(CTCDataset):
def __init__(self, path, config, seed):
super(Baron, self).__init__(path, config, seed)
self._train_test_split(seed)
def _load_data(self):
data = scanpy.read_h5ad(os.path.join(self.path, "Baron.h5ad"))
self.cells = data.X
self.label_dict, self.labels = np.unique(np.array(data.obs['celltype']), return_inverse=True)
self.num_classes = self.label_dict.shape[0]
def _train_test_split(self, seed):
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=seed).split(self.cells, self.labels)
for train_index, val_index in split:
self.train_index = train_index
self.val_index = val_index
SUPPORTED_CTC_DATASETS = {
"zheng68k": Zheng68k,
"baron": Baron
} | OpenBioMed-main | open_biomed/datasets/ctc_dataset.py |
from abc import ABC, abstractmethod
import torch
from torch.utils.data import Dataset
from feature.text_featurizer import SUPPORTED_TEXT_FEATURIZER
class Text2MolGenDataset(Dataset, ABC):
def __init__(self, path, config):
super(Text2MolGenDataset, self).__init__()
self.path = path
self.config = config
self._load_data()
self._featurize()
@abstractmethod
def _load_data(self):
raise NotImplementedError
def _featurize(self):
feat_config = self.config["featurizer"]["text"]
featurizer = SUPPORTED_TEXT_FEATURIZER[feat_config["name"]](feat_config)
self.texts = [featurizer(text) for text in self.texts]
class PromptGenDataset(Text2MolGenDataset):
def __init__(self, path, config):
super().__init__(path, config)
def _load_data(self):
self.texts = [
'The molecule is soluable in water.',
'The molecule is not soluable in water.',
'The molecule has high permeability.',
'The molecule has low permeability.',
'The molecule is like Felodipine.',
'The molecule is like Lercanidipine.'
]
"""
self.texts = [
'The molecule is beautiful.',
'The molecule is versatile.',
'The molecule is strange.',
'fluorescent molecules',
'The molecule contains hydroxyl and carboxyl groups, which can be thermally decomposed to generate ammonia gas, and the oxygen content in the molecule is not less than 20%.',
'The molecule has high water solubility and barrier permeability with low toxicity.',
'molecules containing nucleophilic groups',
'molecules containing electrophilic groups',
'molecules containing hydrophilic groups',
'molecules containing lipophilic groups'
]
"""
self.smiles = None
class CheBI20(Text2MolGenDataset):
def __init__(self, path, config):
super().__init__(path, config)
def _load_data(self):
pass
SUPPORTED_TEXT2MOLGEN_DATASET = {
"prompt": PromptGenDataset,
"CheBI-20": CheBI20
} | OpenBioMed-main | open_biomed/datasets/text2mol_dataset.py |
from abc import ABC, abstractmethod
import logging
logger = logging.getLogger(__name__)
import copy
import pickle
import json
import random
import numpy as np
import pandas as pd
import os.path as osp
import torch
from torch.utils.data import Dataset
from torch_geometric.data import Batch
from feature.mol_featurizer import SUPPORTED_MOL_FEATURIZER, MolMultiModalFeaturizer
from feature.cell_featurizer import SUPPORTED_CELL_FEATURIZER
from utils.cell_utils import SUPPORTED_GENE_SELECTOR
class DRPDataset(Dataset, ABC):
def __init__(self, path, config, task="regression"):
super(DRPDataset, self).__init__()
self.path = path
self.config = config
self.task = task
self.gene_selector = SUPPORTED_GENE_SELECTOR[config["cell"]["gene_selector"]]()
self._load_data()
self._featurize()
@abstractmethod
def _load_data(self):
raise NotImplementedError
def _featurize(self):
# featurize drug
if len(self.config["mol"]["modality"]) > 1:
featurizer = MolMultiModalFeaturizer(self.config["mol"])
else:
featurizer = SUPPORTED_MOL_FEATURIZER[self.config["mol"]["featurizer"]["structure"]["name"]](self.config["mol"]["featurizer"]["structure"])
for key in self.drug_dict:
smi = self.drug_dict[key]
self.drug_dict[key] = featurizer(smi)
# featurize cell
featurizer = SUPPORTED_CELL_FEATURIZER[self.config["cell"]["featurizer"]["name"]](self.config["cell"]["featurizer"])
self.cell_dict = featurizer(self.cell_dict)
if self.config["cell"]["featurizer"]["name"] == "TGSA":
self.predefined_cluster = featurizer.predefined_cluster
# convert labels to tensor
if self.task == "regression":
self.IC = torch.FloatTensor(self.IC)
if self.task == "classification":
self.response = torch.FloatTensor(self.response)
def _train_test_split(self):
N = len(self)
if self.config["split"]["type"] == "random":
train_ratio, val_ratio = self.config["split"]["train"], self.config["split"]["val"]
indexes = np.random.permutation(N)
self.train_indexes = indexes[:int(N * train_ratio)]
self.val_indexes = indexes[int(N * train_ratio): int(N * (train_ratio + val_ratio))]
self.test_indexes = indexes[int(N * (train_ratio + val_ratio)):]
elif self.config["split"]["type"] == "kfold":
nfolds = self.config["split"]["nfolds"]
indexes = np.random.permutation(N)
self.fold_indexes = [indexes[int(N * k / nfolds): int(N * (k + 1) / nfolds)] for k in range(nfolds)]
elif self.config["split"]["type"] == "cell":
train_ratio, val_ratio = self.config["split"]["train"], self.config["split"]["val"]
cells = list(set(self.cell_dict.keys()))
random.shuffle(cells)
cell_num = len(cells)
train_cells = cells[:int(cell_num * train_ratio)]
val_cells = cells[int(cell_num * train_ratio): int(cell_num * (train_ratio + val_ratio))]
test_cells = cells[int(cell_num * (train_ratio + val_ratio)):]
self.train_indexes, self.val_indexes, self.test_indexes = [], [], []
for i in range(N):
if self.cell_index[i] in val_cells:
self.val_indexes.append(i)
elif self.cell_index[i] in test_cells:
self.test_indexes.append(i)
else:
self.train_indexes.append(i)
self.train_indexes, self.val_indexes, self.test_indexes = np.array(self.train_indexes), np.array(self.val_indexes), np.array(self.test_indexes)
def index_select(self, indexes):
new_dataset = copy.copy(self)
new_dataset.drug_index = new_dataset.drug_index[indexes]
new_dataset.cell_index = new_dataset.cell_index[indexes]
if self.task == "regression":
new_dataset.IC = new_dataset.IC[indexes]
if self.task == "classification":
new_dataset.response = new_dataset.response[indexes]
return new_dataset
def __getitem__(self, index):
return self.drug_dict[self.drug_index[index]], self.cell_dict[self.cell_index[index]], self.IC[index] if self.task == "regression" else self.response[index]
def __len__(self):
return len(self.IC) if self.task == "regression" else len(self.response)
class GDSC(DRPDataset):
def __init__(self, path, config, task="regression"):
super(GDSC, self).__init__(path, config, task)
self._train_test_split()
def _load_data(self):
# load drug information
data_drug = np.loadtxt(osp.join(self.path, "GDSC_MolAnnotation.csv"), dtype=str, delimiter=',', comments='?', skiprows=1)
self.drug_dict = dict([
(data_drug[i][0], data_drug[i][2]) for i in range(data_drug.shape[0])
])
# load cell-line information
save_path = osp.join(self.path, "celldict_%s_%s.pkl" % ("-".join(self.config["cell"]["gene_feature"]), "-".join([str(v) for v in self.config["cell"]["featurizer"].values()])))
if osp.exists(save_path):
self.cell_dict = pickle.load(open(save_path, "rb"))
else:
self.cell_dict = {}
for feat in self.config["cell"]["gene_feature"]:
data_cell = np.loadtxt(osp.join(self.path, "GDSC_%s.csv" % (feat)), dtype=str, delimiter=',')
gene_names = data_cell[0][1:]
# select genes strongly related to tumor expression
selected_idx = [0] + self.gene_selector(gene_names)
data_cell = data_cell[1:, selected_idx]
for cell in data_cell:
if cell[0] not in self.cell_dict:
self.cell_dict[cell[0]] = cell[1:].reshape(-1, 1).astype(np.float)
else:
self.cell_dict[cell[0]] = np.concatenate((self.cell_dict[cell[0]], cell[1:].reshape(-1, 1).astype(np.float)), axis=1)
pickle.dump(self.cell_dict, open(save_path, "wb"))
# load drug-cell response information
data_IC50 = pd.read_csv(osp.join(self.path, "GDSC_DR.csv"))
self.drug_index = data_IC50["MOL_NAME"].to_numpy()
self.cell_index = data_IC50["cell_tissue"].to_numpy()
self.IC = data_IC50["LN_IC50"].astype(np.float)
resp2val = {'R': 1, 'S': 0}
self.response = np.array([resp2val[x] for x in data_IC50["BinaryResponse"]])
class TCGA(DRPDataset):
def __init__(self, path, config, subset="BRCA_28"):
self.subset = subset
super(TCGA, self).__init__(path, config, task="classification")
def _load_data(self):
# load cell-line data
feat2file = {"EXP": "xena_gex", "MUT": "xena_mutation"}
save_path = osp.join(self.path, "celldict_%s_%s.pkl" % ("-".join(self.config["cell"]["gene_feature"]), "-".join([str(v) for v in self.config["cell"]["featurizer"].values()])))
if osp.exists(save_path):
self.cell_dict = pickle.load(open(save_path, "rb"))
else:
self.cell_dict = {}
for feat in self.config["cell"]["gene_feature"]:
data_cell = np.loadtxt(osp.join(self.path, feat2file[feat] + ".csv"), dtype=str, delimiter=',')
gene_names = data_cell[0][1:]
selected_idx = [0] + self.gene_selector(gene_names)
data_cell = data_cell[1:, selected_idx]
cur_cell_feat = {}
for cell in data_cell:
cell_name = cell[0][:12]
if cell_name not in self.cell_dict:
cur_cell_feat[cell_name] = cell[1:].reshape(-1, 1).astype(np.float)
else:
cur_cell_feat[cell_name] = np.concatenate((cur_cell_feat[cell_name], cell[1:].reshape(-1, 1).astype(np.float)), axis=1)
for key in cur_cell_feat:
value = np.mean(cur_cell_feat[key], axis=1).reshape(-1, 1)
if key not in self.cell_dict:
self.cell_dict[key] = value
else:
self.cell_dict[key] = np.concatenate((self.cell_dict[key], value), axis=1)
pickle.dump(self.cell_dict, open(save_path, "wb"))
# load drug and its response data
df = pd.read_csv(osp.join(self.path, "tcga_clinical_data", self.subset + ".csv"))
drugs = df["smiles"].unique()
self.drug_dict = dict(zip(drugs, drugs))
self.drug_index = df["smiles"].to_numpy()
self.cell_index = df["bcr_patient_barcode"].to_numpy()
self.response = df["label"].to_numpy().astype(np.float)
class GDSC2(DRPDataset):
def __init__(self, path, config, task="regression"):
super(GDSC2, self).__init__(path, config, task)
self._train_test_split()
def _load_data(self):
pertid_ach_smiles_ic50s = json.load(open(osp.join(self.path, "gdsc.json")))
self.drug_index = [i[2] for i in pertid_ach_smiles_ic50s]
self.drug_dict = {}
for smiles in set(self.drug_index):
self.drug_dict[smiles] = smiles
self.drug_index = np.array(self.drug_index)
self.cell_index = [i[1] for i in pertid_ach_smiles_ic50s]
self.cell_index = np.array(self.cell_index)
self.IC = [float(i[-1]) for i in pertid_ach_smiles_ic50s]
if 'lnIC' in self.config and self.config['lnIC']:
self.IC = [np.log(ic) for ic in self.IC]
self.IC = np.array(self.IC)
self.response = np.zeros_like(self.IC)
if 'ach2vec' in self.config['cell']:
self.cell_dict = json.load(open(osp.join(self.path, self.config['cell']['ach2vec'])))
else:
self.cell_dict = json.load(open(osp.join(self.path, "ach2gene.json")))
for k in self.cell_dict:
self.cell_dict[k] = np.array(self.cell_dict[k])
SUPPORTED_DRP_DATASET = {
"GDSC": GDSC,
"TCGA": TCGA,
"GDSC2": GDSC2
} | OpenBioMed-main | open_biomed/datasets/drp_dataset.py |
from abc import ABC, abstractmethod
import logging
logger = logging.getLogger(__name__)
import copy
from enum import Enum
import numpy as np
import pandas as pd
import os
from rdkit import Chem
import os
import sys
import torch
from torch.utils.data import Dataset
from rdkit.Chem import AllChem, Descriptors
from feature.mol_featurizer import SUPPORTED_MOL_FEATURIZER, MolMultiModalFeaturizer
from utils.kg_utils import SUPPORTED_KG, embed
from utils.split_utils import random_split, scaffold_split
from utils import Normalizer
sys.path.insert(0, os.path.dirname(__file__))
def _load_bbbp_dataset(input_path):
input_df = pd.read_csv(input_path, sep=',')
smiles_list = input_df['smiles']
rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
preprocessed_rdkit_mol_objs_list = [m if m is not None else None
for m in rdkit_mol_objs_list]
preprocessed_smiles_list = [AllChem.MolToSmiles(m) if m is not None else None
for m in preprocessed_rdkit_mol_objs_list]
labels = input_df['p_np']
# convert 0 to -1
labels = labels.replace(0, -1)
# there are no nans
assert len(smiles_list) == len(preprocessed_rdkit_mol_objs_list)
assert len(smiles_list) == len(preprocessed_smiles_list)
assert len(smiles_list) == len(labels)
return preprocessed_smiles_list, \
preprocessed_rdkit_mol_objs_list, labels.values
def _load_clintox_dataset(input_path):
input_df = pd.read_csv(input_path, sep=',')
smiles_list = input_df['smiles']
rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
preprocessed_rdkit_mol_objs_list = [m if m is not None else None
for m in rdkit_mol_objs_list]
preprocessed_smiles_list = [AllChem.MolToSmiles(m) if m is not None else None
for m in preprocessed_rdkit_mol_objs_list]
tasks = ['FDA_APPROVED', 'CT_TOX']
labels = input_df[tasks]
# convert 0 to -1
labels = labels.replace(0, -1)
# there are no nans
assert len(smiles_list) == len(preprocessed_rdkit_mol_objs_list)
assert len(smiles_list) == len(preprocessed_smiles_list)
assert len(smiles_list) == len(labels)
return preprocessed_smiles_list, \
preprocessed_rdkit_mol_objs_list, labels.values
# input_path = 'dataset/clintox/raw/clintox.csv'
# smiles_list, rdkit_mol_objs_list, labels = _load_clintox_dataset(input_path)
def _load_esol_dataset(input_path):
# NB: some examples have multiple species
input_df = pd.read_csv(input_path, sep=',')
smiles_list = input_df['smiles']
rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
labels = input_df['measured log solubility in mols per litre']
assert len(smiles_list) == len(rdkit_mol_objs_list)
assert len(smiles_list) == len(labels)
return smiles_list, rdkit_mol_objs_list, labels.values
# input_path = 'dataset/esol/raw/delaney-processed.csv'
# smiles_list, rdkit_mol_objs_list, labels = _load_esol_dataset(input_path)
def _load_freesolv_dataset(input_path):
input_df = pd.read_csv(input_path, sep=',')
smiles_list = input_df['smiles']
rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
labels = input_df['expt']
assert len(smiles_list) == len(rdkit_mol_objs_list)
assert len(smiles_list) == len(labels)
return smiles_list, rdkit_mol_objs_list, labels.values
def _load_lipophilicity_dataset(input_path):
input_df = pd.read_csv(input_path, sep=',')
smiles_list = input_df['smiles']
rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
labels = input_df['exp']
assert len(smiles_list) == len(rdkit_mol_objs_list)
assert len(smiles_list) == len(labels)
return smiles_list, rdkit_mol_objs_list, labels.values
def _load_malaria_dataset(input_path):
input_df = pd.read_csv(input_path, sep=',')
smiles_list = input_df['smiles']
rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
labels = input_df['activity']
assert len(smiles_list) == len(rdkit_mol_objs_list)
assert len(smiles_list) == len(labels)
return smiles_list, rdkit_mol_objs_list, labels.values
def _load_cep_dataset(input_path):
input_df = pd.read_csv(input_path, sep=',')
smiles_list = input_df['smiles']
rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
labels = input_df['PCE']
assert len(smiles_list) == len(rdkit_mol_objs_list)
assert len(smiles_list) == len(labels)
return smiles_list, rdkit_mol_objs_list, labels.values
def _load_muv_dataset(input_path):
input_df = pd.read_csv(input_path, sep=',')
smiles_list = input_df['smiles']
rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
tasks = ['MUV-466', 'MUV-548', 'MUV-600', 'MUV-644', 'MUV-652', 'MUV-689',
'MUV-692', 'MUV-712', 'MUV-713', 'MUV-733', 'MUV-737', 'MUV-810',
'MUV-832', 'MUV-846', 'MUV-852', 'MUV-858', 'MUV-859']
labels = input_df[tasks]
# convert 0 to -1
labels = labels.replace(0, -1)
# convert nan to 0
labels = labels.fillna(0)
assert len(smiles_list) == len(rdkit_mol_objs_list)
assert len(smiles_list) == len(labels)
return smiles_list, rdkit_mol_objs_list, labels.values
def check_columns(df, tasks, N):
bad_tasks = []
total_missing_count = 0
for task in tasks:
value_list = df[task]
pos_count = sum(value_list == 1)
neg_count = sum(value_list == -1)
missing_count = sum(value_list == 0)
total_missing_count += missing_count
pos_ratio = 100. * pos_count / (pos_count + neg_count)
missing_ratio = 100. * missing_count / N
assert pos_count + neg_count + missing_count == N
if missing_ratio >= 50:
bad_tasks.append(task)
print('task {}\t\tpos_ratio: {:.5f}\tmissing ratio: {:.5f}'.format(task, pos_ratio, missing_ratio))
print('total missing ratio: {:.5f}'.format(100. * total_missing_count / len(tasks) / N))
return bad_tasks
def check_rows(labels, N):
from collections import defaultdict
p, n, m = defaultdict(int), defaultdict(int), defaultdict(int)
bad_count = 0
for i in range(N):
value_list = labels[i]
pos_count = sum(value_list == 1)
neg_count = sum(value_list == -1)
missing_count = sum(value_list == 0)
p[pos_count] += 1
n[neg_count] += 1
m[missing_count] += 1
if pos_count + neg_count == 0:
bad_count += 1
print('bad_count\t', bad_count)
print('pos\t', p)
print('neg\t', n)
print('missing\t', m)
return
def _load_pcba_dataset(input_path):
input_df = pd.read_csv(input_path, sep=',')
tasks = list(input_df.columns)[:-2]
N = input_df.shape[0]
temp_df = input_df[tasks]
temp_df = temp_df.replace(0, -1)
temp_df = temp_df.fillna(0)
bad_tasks = check_columns(temp_df, tasks, N)
for task in bad_tasks:
tasks.remove(task)
print('good tasks\t', len(tasks))
labels = input_df[tasks]
labels = labels.replace(0, -1)
labels = labels.fillna(0)
labels = labels.values
print(labels.shape) # 439863, 92
check_rows(labels, N)
input_df.dropna(subset=tasks, how='all', inplace=True)
# convert 0 to -1
input_df = input_df.replace(0, -1)
# convert nan to 0
input_df = input_df.fillna(0)
labels = input_df[tasks].values
print(input_df.shape) # 435685, 92
N = input_df.shape[0]
check_rows(labels, N)
smiles_list = input_df['smiles'].tolist()
rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
assert len(smiles_list) == len(rdkit_mol_objs_list)
assert len(smiles_list) == len(labels)
return smiles_list, rdkit_mol_objs_list, labels
def _load_sider_dataset(input_path):
input_df = pd.read_csv(input_path, sep=',')
smiles_list = input_df['smiles']
rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
tasks = ['Hepatobiliary disorders',
'Metabolism and nutrition disorders', 'Product issues', 'Eye disorders',
'Investigations', 'Musculoskeletal and connective tissue disorders',
'Gastrointestinal disorders', 'Social circumstances',
'Immune system disorders', 'Reproductive system and breast disorders',
'Neoplasms benign, malignant and unspecified (incl cysts and polyps)',
'General disorders and administration site conditions',
'Endocrine disorders', 'Surgical and medical procedures',
'Vascular disorders', 'Blood and lymphatic system disorders',
'Skin and subcutaneous tissue disorders',
'Congenital, familial and genetic disorders',
'Infections and infestations',
'Respiratory, thoracic and mediastinal disorders',
'Psychiatric disorders', 'Renal and urinary disorders',
'Pregnancy, puerperium and perinatal conditions',
'Ear and labyrinth disorders', 'Cardiac disorders',
'Nervous system disorders',
'Injury, poisoning and procedural complications']
labels = input_df[tasks]
# convert 0 to -1
labels = labels.replace(0, -1)
assert len(smiles_list) == len(rdkit_mol_objs_list)
assert len(smiles_list) == len(labels)
return smiles_list, rdkit_mol_objs_list, labels.values
def _load_toxcast_dataset(input_path):
# NB: some examples have multiple species, some example smiles are invalid
input_df = pd.read_csv(input_path, sep=',')
smiles_list = input_df['smiles']
rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
# Some smiles could not be successfully converted
# to rdkit mol object so them to None
preprocessed_rdkit_mol_objs_list = [m if m is not None else None
for m in rdkit_mol_objs_list]
preprocessed_smiles_list = [AllChem.MolToSmiles(m) if m is not None else None
for m in preprocessed_rdkit_mol_objs_list]
tasks = list(input_df.columns)[1:]
labels = input_df[tasks]
# convert 0 to -1
labels = labels.replace(0, -1)
# convert nan to 0
labels = labels.fillna(0)
assert len(smiles_list) == len(preprocessed_rdkit_mol_objs_list)
assert len(smiles_list) == len(preprocessed_smiles_list)
assert len(smiles_list) == len(labels)
return preprocessed_smiles_list, \
preprocessed_rdkit_mol_objs_list, labels.values
# root_path = 'dataset/chembl_with_labels'
def check_smiles_validity(smiles):
try:
m = Chem.MolFromSmiles(smiles)
if m:
return True
else:
return False
except:
return False
def split_rdkit_mol_obj(mol):
"""
Split rdkit mol object containing multiple species or one species into a
list of mol objects or a list containing a single object respectively """
smiles = AllChem.MolToSmiles(mol, isomericSmiles=True)
smiles_list = smiles.split('.')
mol_species_list = []
for s in smiles_list:
if check_smiles_validity(s):
mol_species_list.append(AllChem.MolFromSmiles(s))
return mol_species_list
def get_largest_mol(mol_list):
"""
Given a list of rdkit mol objects, returns mol object containing the
largest num of atoms. If multiple containing largest num of atoms,
picks the first one """
num_atoms_list = [len(m.GetAtoms()) for m in mol_list]
largest_mol_idx = num_atoms_list.index(max(num_atoms_list))
return mol_list[largest_mol_idx]
def _load_tox21_dataset(input_path):
input_df = pd.read_csv(input_path, sep=',')
smiles_list = input_df['smiles']
rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
tasks = ['NR-AR', 'NR-AR-LBD', 'NR-AhR', 'NR-Aromatase', 'NR-ER', 'NR-ER-LBD',
'NR-PPAR-gamma', 'SR-ARE', 'SR-ATAD5', 'SR-HSE', 'SR-MMP', 'SR-p53']
labels = input_df[tasks]
# convert 0 to -1
labels = labels.replace(0, -1)
# convert nan to 0
labels = labels.fillna(0)
assert len(smiles_list) == len(rdkit_mol_objs_list)
assert len(smiles_list) == len(labels)
return smiles_list, rdkit_mol_objs_list, labels.values
def _load_hiv_dataset(input_path):
input_df = pd.read_csv(input_path, sep=',')
smiles_list = input_df['smiles']
rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
labels = input_df['HIV_active']
# convert 0 to -1
labels = labels.replace(0, -1)
# there are no nans
assert len(smiles_list) == len(rdkit_mol_objs_list)
assert len(smiles_list) == len(labels)
return smiles_list, rdkit_mol_objs_list, labels.values
def _load_bace_dataset(input_path):
input_df = pd.read_csv(input_path, sep=',')
smiles_list = input_df['mol']
rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
labels = input_df['Class']
# convert 0 to -1
labels = labels.replace(0, -1)
# there are no nans
folds = input_df['Model']
folds = folds.replace('Train', 0) # 0 -> train
folds = folds.replace('Valid', 1) # 1 -> valid
folds = folds.replace('Test', 2) # 2 -> test
assert len(smiles_list) == len(rdkit_mol_objs_list)
assert len(smiles_list) == len(labels)
assert len(smiles_list) == len(folds)
# return smiles_list, rdkit_mol_objs_list, folds.values, labels.values
return smiles_list, rdkit_mol_objs_list, labels.values
datasetname2function = {
"bbbp": _load_bbbp_dataset,
"clintox": _load_clintox_dataset,
"tox21": _load_tox21_dataset,
"toxcast": _load_toxcast_dataset,
"sider": _load_sider_dataset,
"hiv": _load_hiv_dataset,
"bace": _load_bace_dataset,
"muv": _load_muv_dataset,
"freesolv": _load_freesolv_dataset,
"esol": _load_esol_dataset,
"lipophilicity": _load_lipophilicity_dataset,
}
class Task(Enum):
CLASSFICATION = 0
REGRESSION = 1
class DPDataset(Dataset, ABC):
def __init__(self, path, config, in_memory=True):
super(DPDataset, self).__init__()
self.path = path
self.config = config
self.in_memory = in_memory
self._load_data()
# self._featurize()
@abstractmethod
def _load_data(self, path):
raise NotImplementedError
def _featurize(self):
logger.info("Featurizing...")
# self.featurized_drugs: 如果是多模态就是一个dict, 如果是structure单模态就是list[Data()]
self.featurized_drugs = [self.drug_featurizer(drug) for drug in self.drugs]
self.labels = [torch.tensor(label) for label in self.labels]
def _build(self, save_path=""):
if len(self.config["mol"]["modality"]) > 1 and save_path:
kg_config = self.config["mol"]["featurizer"]["kg"]
self.kg = SUPPORTED_KG[kg_config["kg_name"]](kg_config["kg_path"])
self.drug2kg, self.drug2text, _, _ = self.kg.link(self)
# TODO: dp use TransE, don't need filter_out?
filter_out = []
"""
for i_drug in data_index:
smi = self.smiles[i_drug]
#if smi in self.drug2kg:
# filter_out.append((self.drug2kg[smi], self.protein2kg[protein]))
"""
# embed once for consistency
try:
kge = embed(self.kg, 'ProNE', filter_out=filter_out, dim=kg_config["embed_dim"], save=True, save_path=save_path)
except Exception as e:
kge = None
self.config["mol"]["featurizer"]["kg"]["kge"] = kge
self._configure_featurizer()
# featurize all data pairs in one pass for training efficency
if self.in_memory:
self._featurize()
def _configure_featurizer(self):
if len(self.config["mol"]["modality"]) > 1:
self.drug_featurizer = MolMultiModalFeaturizer(self.config["mol"])
self.drug_featurizer.set_drug2kgid_dict(self.drug2kg)
self.drug_featurizer.set_drug2text_dict(self.drug2text)
else:
drug_feat_config = self.config["mol"]["featurizer"]["structure"]
self.drug_featurizer = SUPPORTED_MOL_FEATURIZER[drug_feat_config["name"]](drug_feat_config)
def index_select(self, indexes):
new_dataset = copy.copy(self)
new_dataset.drugs = [new_dataset.drugs[i] for i in indexes]
new_dataset.labels = [new_dataset.labels[i] for i in indexes]
return new_dataset
def __getitem__(self, index):
if not self.in_memory:
drug, label = self.drugs[index], self.labels[index]
return self.drug_featurizer(drug), label
else:
return self.featurized_drugs[index], self.labels[index]
def __len__(self):
return len(self.drugs)
class MoleculeNetDataset(DPDataset):
name2target = {
"BBBP": ["p_np"],
"Tox21": ["NR-AR", "NR-AR-LBD", "NR-AhR", "NR-Aromatase", "NR-ER", "NR-ER-LBD",
"NR-PPAR-gamma", "SR-ARE", "SR-ATAD5", "SR-HSE", "SR-MMP", "SR-p53"],
"ClinTox": ["CT_TOX", "FDA_APPROVED"],
"HIV": ["HIV_active"],
"Bace": ["class"],
"SIDER": ["Hepatobiliary disorders", "Metabolism and nutrition disorders", "Product issues",
"Eye disorders", "Investigations", "Musculoskeletal and connective tissue disorders",
"Gastrointestinal disorders", "Social circumstances", "Immune system disorders",
"Reproductive system and breast disorders",
"Neoplasms benign, malignant and unspecified (incl cysts and polyps)",
"General disorders and administration site conditions", "Endocrine disorders",
"Surgical and medical procedures", "Vascular disorders",
"Blood and lymphatic system disorders", "Skin and subcutaneous tissue disorders",
"Congenital, familial and genetic disorders", "Infections and infestations",
"Respiratory, thoracic and mediastinal disorders", "Psychiatric disorders",
"Renal and urinary disorders", "Pregnancy, puerperium and perinatal conditions",
"Ear and labyrinth disorders", "Cardiac disorders",
"Nervous system disorders", "Injury, poisoning and procedural complications"],
"MUV": ['MUV-692', 'MUV-689', 'MUV-846', 'MUV-859', 'MUV-644', 'MUV-548', 'MUV-852',
'MUV-600', 'MUV-810', 'MUV-712', 'MUV-737', 'MUV-858', 'MUV-713', 'MUV-733',
'MUV-652', 'MUV-466', 'MUV-832'],
"Toxcast": [""], # 617
"FreeSolv": ["expt"],
"ESOL": ["measured log solubility in mols per litre"],
"Lipo": ["exp"],
"qm7": ["u0_atom"],
"qm8": ["E1-CC2", "E2-CC2", "f1-CC2", "f2-CC2", "E1-PBE0", "E2-PBE0",
"f1-PBE0", "f2-PBE0", "E1-CAM", "E2-CAM", "f1-CAM","f2-CAM"],
"qm9": ['mu', 'alpha', 'homo', 'lumo', 'gap', 'r2', 'zpve', 'cv']
}
name2task = {
"BBBP": Task.CLASSFICATION,
"Tox21": Task.CLASSFICATION,
"ClinTox": Task.CLASSFICATION,
"HIV": Task.CLASSFICATION,
"Bace": Task.CLASSFICATION,
"SIDER": Task.CLASSFICATION,
"MUV": Task.CLASSFICATION,
"Toxcast": Task.CLASSFICATION,
"FreeSolv": Task.REGRESSION,
"ESOL": Task.REGRESSION,
"Lipo": Task.REGRESSION,
"qm7": Task.REGRESSION,
"qm8": Task.REGRESSION,
"qm9": Task.REGRESSION
}
def __init__(self, path, config, name="BBBP", label_type=1):
if name not in self.name2target:
raise ValueError("%s is not a valid moleculenet task!" % name)
file_name = os.listdir(os.path.join(path, name.lower(), "raw"))[0]
assert file_name[-4:] == ".csv"
path = os.path.join(path, name.lower(), "raw", file_name)
self.name = name
self.targets = self.name2target[name]
# TODO: del: no use
self.task = self.name2task[name]
# TODO: del label_type
self.label_type = label_type
super(MoleculeNetDataset, self).__init__(path, config)
self._train_test_split()
self._normalize()
def _load_data(self):
smiles_list, rdkit_mol_objs, labels = datasetname2function[self.name.lower()](self.path)
if labels.ndim == 1:
labels = np.expand_dims(labels, axis=1)
self.smiles, self.drugs, self.labels = [], [], []
for i in range(len(smiles_list)):
rdkit_mol = rdkit_mol_objs[i]
if rdkit_mol is None:
continue
# TODO: drugs and smiles are all get from AllChem.MolFromSmiles()
self.smiles.append(smiles_list[i])
self.drugs.append(smiles_list[i])
# self.drugs.append(rdkit_mol[i])
self.labels.append(labels[i])
def _train_test_split(self, strategy="scaffold"):
if strategy == "random":
self.train_index, self.val_index, self.test_index = random_split(len(self), 0.1, 0.1)
elif strategy == "scaffold":
self.train_index, self.val_index, self.test_index = scaffold_split(self, 0.1, 0.1, is_standard=True)
def _normalize(self):
if self.name in ["qm7", "qm9"]:
self.normalizer = []
for i in range(len(self.targets)):
self.normalizer.append(Normalizer(self.labels[:, i]))
self.labels[:, i] = self.normalizer[i].norm(self.labels[:, i])
else:
# TODO:
self.normalizer = [None] * len(self.targets)
SUPPORTED_DP_DATASETS = {
"MoleculeNet": MoleculeNetDataset
} | OpenBioMed-main | open_biomed/datasets/dp_dataset.py |
from abc import ABC, abstractmethod
import logging
logger = logging.getLogger(__name__)
import copy
import numpy as np
import pandas as pd
import pickle
import os
import json
from tqdm import tqdm
from collections import OrderedDict
import torch
from torch.utils.data import Dataset
from feature.mol_featurizer import SUPPORTED_MOL_FEATURIZER, MolMultiModalFeaturizer
from feature.protein_featurizer import SUPPORTED_PROTEIN_FEATURIZER, ProteinMultiModalFeaturizer
from utils.mol_utils import can_smiles
from utils.kg_utils import SUPPORTED_KG, embed
from utils.split_utils import kfold_split, cold_drug_split, cold_protein_split, cold_cluster_split
class DTIDataset(Dataset, ABC):
def __init__(self, path, config, split_strategy, in_memory=True):
super(DTIDataset, self).__init__()
self.path = path
self.config = config
self.split_strategy = split_strategy
self.in_memory = in_memory
self._load_data()
self._train_test_split()
@abstractmethod
def _load_data(self):
raise NotImplementedError
@abstractmethod
def _train_test_split(self):
raise NotImplementedError
def _build(self, eval_pair_index, save_path):
# build after train / test datasets are determined for filtering out edges
if len(self.config["mol"]["modality"]) > 1:
kg_config = self.config["mol"]["featurizer"]["kg"]
self.kg = SUPPORTED_KG[kg_config["kg_name"]](kg_config["kg_path"])
self.drug2kg, self.drug2text, self.protein2kg, self.protein2text = self.kg.link(self)
self.concat_text_first = self.config["concat_text_first"]
filter_out = []
for i_drug, i_protein in eval_pair_index:
smi = self.smiles[i_drug]
protein = self.proteins[i_protein]
if smi in self.drug2kg and protein in self.protein2kg:
filter_out.append((self.drug2kg[smi], self.protein2kg[protein]))
# embed once for consistency
kge = embed(self.kg, 'ProNE', filter_out=filter_out, dim=kg_config["embed_dim"], save=True, save_path=save_path)
self.config["mol"]["featurizer"]["kg"]["kge"] = kge
self.config["protein"]["featurizer"]["kg"]["kge"] = kge
else:
self.concat_text_first = False
self._configure_featurizer()
# featurize all data pairs in one pass for training efficency
if self.in_memory:
self._featurize()
def index_select(self, indexes):
new_dataset = copy.copy(self)
new_dataset.pair_index = [self.pair_index[i] for i in indexes]
new_dataset.labels = [self.labels[i] for i in indexes]
return new_dataset
def _configure_featurizer(self):
if len(self.config["mol"]["modality"]) > 1:
self.drug_featurizer = MolMultiModalFeaturizer(self.config["mol"])
self.protein_featurizer = ProteinMultiModalFeaturizer(self.config["protein"])
self.drug_featurizer.set_drug2kgid_dict(self.drug2kg)
self.protein_featurizer.set_protein2kgid_dict(self.protein2kg)
if not self.concat_text_first:
self.drug_featurizer.set_drug2text_dict(self.drug2text)
self.protein_featurizer.set_protein2text_dict(self.protein2text)
else:
drug_feat_config = self.config["mol"]["featurizer"]["structure"]
self.drug_featurizer = SUPPORTED_MOL_FEATURIZER[drug_feat_config["name"]](drug_feat_config)
protein_feat_config = self.config["protein"]["featurizer"]["structure"]
self.protein_featurizer = SUPPORTED_PROTEIN_FEATURIZER[protein_feat_config["name"]](protein_feat_config)
def _featurize(self):
logger.info("Featurizing...")
self.featurized_drugs = []
self.featurized_proteins = []
for i_drug, i_protein in tqdm(self.pair_index):
drug, protein = self.smiles[i_drug], self.proteins[i_protein]
if len(self.config["mol"]["modality"]) > 1 and self.concat_text_first:
processed_drug = self.drug_featurizer(drug, skip=["text"])
processed_protein = self.protein_featurizer(protein)
processed_drug["text"] = self.drug_featurizer["text"](self.drug2text[drug] + " [SEP] " + self.protein2text[protein])
else:
processed_drug = self.drug_featurizer(drug)
processed_protein = self.protein_featurizer(protein)
self.featurized_drugs.append(processed_drug)
self.featurized_proteins.append(processed_protein)
def __getitem__(self, index):
if not self.in_memory:
drug, protein, label = self.smiles[self.pair_index[index][0]], self.proteins[self.pair_index[index][1]], self.labels[index]
processed_drug = self.drug_featurizer(drug)
processed_protein = self.protein_featurizer(protein)
if self.concat_text_first:
processed_drug["text"] = self.drug_featurizer["text"](self.drug2text[drug] + " [SEP] " + self.protein2text[protein])
return processed_drug, processed_protein, label
else:
return self.featurized_drugs[index], self.featurized_proteins[index], self.labels[index]
def __len__(self):
return len(self.pair_index)
class DTIClassificationDataset(DTIDataset):
def __init__(self, path, config, split_strategy):
super(DTIClassificationDataset, self).__init__(path, config, split_strategy)
def _train_test_split(self):
if self.split_strategy in ["warm", "cold_drug", "cold_protein"]:
self.nfolds = 5
if self.split_strategy == "warm":
folds = kfold_split(len(self), 5)
elif self.split_strategy == "cold_drug":
folds = cold_drug_split(self, 5)
else:
folds = cold_protein_split(self, 5)
self.folds = []
for i in range(5):
self.folds.append({
"train": np.concatenate(folds[:i] + folds[i + 1:], axis=0).tolist(),
"test": folds[i]
})
elif self.split_strategy == "cold_cluster":
self.nfolds = 9
self.folds = cold_cluster_split(self, 3)
class Yamanishi08(DTIClassificationDataset):
def __init__(self, path, config, split_strategy):
super(Yamanishi08, self).__init__(path, config, split_strategy)
def _load_data(self):
data = json.load(open(os.path.join(self.path, "drug.json")))
self.smiles = [data[item]["SMILES"] for item in data]
drugsmi2index = dict(zip(self.smiles, range(len(self.smiles))))
data = json.load(open(os.path.join(self.path, "protein.json")))
self.proteins = [data[item]["sequence"] for item in data]
proteinseq2index = dict(zip(self.proteins, range(len(self.proteins))))
df = pd.read_csv(os.path.join(self.path, "data.csv"))
self.pair_index, self.labels = [], []
for row in df.iterrows():
row = row[1]
self.pair_index.append((drugsmi2index[row["compound_iso_smiles"]], proteinseq2index[row["target_sequence"]]))
self.labels.append(int(row["affinity"]))
logger.info("Yamanishi08's dataset, total %d samples" % (len(self)))
class BMKG_DTI(DTIClassificationDataset):
def __init__(self, path, config, split_strategy):
super(BMKG_DTI, self).__init__(path, config, split_strategy)
def _load_data(self):
data = json.load(open(os.path.join(self.path, "drug.json")))
self.smiles = [data[item]["SMILES"] for item in data]
drugid2index = dict(zip(data.keys(), range(len(self.smiles))))
data = json.load(open(os.path.join(self.path, "protein.json")))
self.proteins = [data[item]["sequence"] for item in data]
proteinid2index = dict(zip(data.keys(), range(len(self.proteins))))
df = pd.read_csv(os.path.join(self.path, "data.csv"))
self.pair_index, self.labels = [], []
for row in df.iterrows():
row = row[1]
self.pair_index.append((drugid2index[row["drug_id"]], proteinid2index[str(row["protein_id"])]))
self.labels.append(int(row["affinity"]))
class DTIRegressionDataset(DTIDataset):
def __init__(self, path, config, split_strategy):
super(DTIRegressionDataset, self).__init__(path, config, split_strategy)
class Davis_KIBA(DTIRegressionDataset):
def __init__(self, path, config, split_strategy):
self.is_davis = "davis" in path
super(Davis_KIBA, self).__init__(path, config, split_strategy)
def _load_data(self):
Y = pickle.load(open(os.path.join(self.path, "Y"), "rb"), encoding='latin1')
label_row_inds, label_col_inds = np.where(np.isnan(Y) == False)
can_smis_dict = json.load(open(os.path.join(self.path, "ligands_can.txt")), object_pairs_hook=OrderedDict)
can_smis = list(can_smis_dict.values())
self.smiles = [can_smiles(smi) for smi in can_smis]
proteins_dic = json.load(open(os.path.join(self.path, "proteins.txt")), object_pairs_hook=OrderedDict)
self.proteins = list(proteins_dic.values())
# data:
self.pair_index = []
self.labels = []
train_folds = json.load(open(os.path.join(self.path, "folds/train_fold_setting1.txt")))
for fold in train_folds:
for i in fold:
self.pair_index.append((label_row_inds[i], label_col_inds[i]))
self.labels.append(Y[label_row_inds[i], label_col_inds[i]])
self.train_index = list(range(len(self.labels)))
test_fold = json.load(open(os.path.join(self.path, "folds/test_fold_setting1.txt")))
for i in test_fold:
self.pair_index.append((label_row_inds[i], label_col_inds[i]))
self.labels.append(Y[label_row_inds[i], label_col_inds[i]])
self.test_index = list(range(len(self.train_index), len(self.labels)))
if self.is_davis:
self.labels = [-float(np.log10(y / 1e9)) for y in self.labels]
logger.info("%s dataset, %d samples" % ("davis" if self.is_davis else "kiba", len(self)))
def _train_test_split(self):
self.val_index = []
SUPPORTED_DTI_DATASETS = {
"yamanishi08": Yamanishi08,
"bmkg-dti": BMKG_DTI,
"davis": Davis_KIBA,
"kiba": Davis_KIBA
} | OpenBioMed-main | open_biomed/datasets/dti_dataset.py |
import numpy as np
import sklearn.metrics as metrics
def roc_auc(y_true, y_pred):
fpr, tpr, _ = metrics.roc_curve(y_true, y_pred)
roc_auc = metrics.auc(fpr, tpr)
return roc_auc
def pr_auc(y_true, y_pred):
precision, recall, _ = metrics.precision_recall_curve(y_true, y_pred)
pr_auc = metrics.auc(recall, precision)
return pr_auc
def multilabel_f1(y_true, y_pred):
TP, FP, TN, FN = 0, 0, 0, 0
for i in range(y_true.shape[0]):
for j in range(y_true.shape[1]):
if y_true[i][j] == y_pred[i][j]:
if y_true[i][j] == 1:
TP += 1
else:
TN += 1
elif y_true[i][j] == 1:
FN += 1
else:
FP += 1
precision = 1.0 * TP / (TP + FP + 1e-10)
recall = 1.0 * TP / (TP + FN + 1e-10)
f1 = 2 * precision * recall / (precision + recall + 1e-10)
return f1, precision, recall
def get_k(y_obs, y_pred):
y_obs = np.array(y_obs)
y_pred = np.array(y_pred)
return sum(y_obs * y_pred) / float(sum(y_pred * y_pred))
def rm2_index(ys_orig, ys_line):
r2 = r_squared_error(ys_orig, ys_line)
r02 = squared_error_zero(ys_orig, ys_line)
return r2 * (1 - np.sqrt(np.absolute((r2 * r2)-(r02 * r02))))
def concordance_index(gt, pred):
gt_mask = gt.reshape((1, -1)) > gt.reshape((-1, 1))
diff = pred.reshape((1, -1)) - pred.reshape((-1, 1))
h_one = (diff > 0)
h_half = (diff == 0)
CI = np.sum(gt_mask * h_one * 1.0 + gt_mask * h_half * 0.5) / np.sum(gt_mask)
return CI
def r_squared_error(y_obs, y_pred):
y_obs = np.array(y_obs)
y_pred = np.array(y_pred)
y_obs_mean = [np.mean(y_obs) for y in y_obs]
y_pred_mean = [np.mean(y_pred) for y in y_pred]
mult = sum((y_pred - y_pred_mean) * (y_obs - y_obs_mean))
mult = mult * mult
y_obs_sq = sum((y_obs - y_obs_mean)*(y_obs - y_obs_mean))
y_pred_sq = sum((y_pred - y_pred_mean) * (y_pred - y_pred_mean) )
return mult / float(y_obs_sq * y_pred_sq)
def squared_error_zero(y_obs, y_pred):
k = get_k(y_obs, y_pred)
y_obs = np.array(y_obs)
y_pred = np.array(y_pred)
y_obs_mean = [np.mean(y_obs) for y in y_obs]
upp = sum((y_obs - (k * y_pred)) * (y_obs - (k * y_pred)))
down= sum((y_obs - y_obs_mean)*(y_obs - y_obs_mean))
return 1 - (upp / float(down))
def recall_at_k(sorted, index, k):
for i in range(min(len(sorted), k)):
if sorted[i] == index:
return 1
return 0
def metrics_average(results):
metrics = {key: [] for key in results[0]}
for result in results:
for key in result:
metrics[key].append(result[key])
for key in metrics:
metrics[key] = (np.mean(metrics[key]), np.std(metrics[key]))
return metrics
| OpenBioMed-main | open_biomed/utils/metrics.py |
import logging
logger = logging.getLogger(__name__)
import math
import numpy as np
from rdkit import Chem
from rdkit.Chem.Scaffolds.MurckoScaffold import MurckoScaffoldSmiles
import json
import collections
from utils.cluster import cluster_with_sim_matrix, merge_cluster
from utils.prot_utils import get_normalized_ctd
def random_split(n, r_val, r_test):
r_train = 1 - r_val - r_test
perm = np.random.permutation(n)
train_cutoff = r_train * n
val_cutoff = (r_train + r_val) * n
return perm[:train_cutoff], perm[train_cutoff : val_cutoff], perm[val_cutoff:]
def kfold_split(n, k):
perm = np.random.permutation(n)
return [perm[i * n // k: (i + 1) * n // k] for i in range(k)]
def _generate_scaffold(smiles, include_chirality=False, is_standard=False):
if is_standard:
scaffold = MurckoScaffoldSmiles(smiles=smiles, includeChirality=True)
else:
mol = Chem.MolFromSmiles(smiles)
scaffold = MurckoScaffoldSmiles(mol=mol, includeChirality=include_chirality)
return scaffold
def generate_scaffolds(dataset, log_every_n=1000, sort=True, is_standard=False):
scaffolds = {}
data_len = len(dataset)
logger.info("About to generate scaffolds")
for ind, smiles in enumerate(dataset.smiles):
if log_every_n > 0 and ind % log_every_n == 0:
logger.info("Generating scaffold %d/%d" % (ind, data_len))
scaffold = _generate_scaffold(smiles, is_standard=is_standard)
if scaffold not in scaffolds:
scaffolds[scaffold] = [ind]
else:
scaffolds[scaffold].append(ind)
if sort:
# Sort from largest to smallest scaffold sets
scaffolds = {key: sorted(value) for key, value in scaffolds.items()}
scaffold_sets = [
scaffold_set for (scaffold, scaffold_set) in sorted(
scaffolds.items(),
key=lambda x: (len(x[1]), x[1][0]),
reverse=True
)
]
else:
scaffold_sets = [value for key, value in scaffolds.items()]
# TODO: DEBUG
"""
scaffold_index = collections.OrderedDict()
for i, value in enumerate(scaffold_sets):
scaffold_index[i] = str(value)
scaffold_index = json.dumps(scaffold_index)
with open("scaffold_set_2.json","w") as f:
f.write(scaffold_index)
"""
return scaffold_sets
def scaffold_split(dataset, r_val, r_test, log_every_n=1000, is_standard=False):
r_train = 1.0 - r_val - r_test
scaffold_sets = generate_scaffolds(dataset, log_every_n, is_standard=is_standard)
train_cutoff = r_train * len(dataset)
valid_cutoff = (r_train + r_val) * len(dataset)
train_inds = []
valid_inds = []
test_inds = []
logger.info("About to sort in scaffold sets")
for scaffold_set in scaffold_sets:
if len(train_inds) + len(scaffold_set) > train_cutoff:
if len(train_inds) + len(valid_inds) + len(scaffold_set) > valid_cutoff:
test_inds += scaffold_set
else:
valid_inds += scaffold_set
else:
train_inds += scaffold_set
return train_inds, valid_inds, test_inds
def cold_drug_split(dataset, nfolds):
scaffold_sets = generate_scaffolds(dataset, -1, sort=False)
n_cutoff = len(dataset.pair_index) // nfolds
drug_pair_index = {}
for i, (i_drug, i_prot) in enumerate(dataset.pair_index):
if i_drug not in drug_pair_index:
drug_pair_index[i_drug] = [i]
else:
drug_pair_index[i_drug].append(i)
folds = [[] for i in range(nfolds)]
cur = 0
for scaffold_set in scaffold_sets:
pair_in_scaffold_set = []
for i_drug in scaffold_set:
pair_in_scaffold_set += drug_pair_index[i_drug]
if cur != nfolds - 1 and len(folds[cur]) + len(pair_in_scaffold_set) >= n_cutoff:
if len(folds[cur]) + len(pair_in_scaffold_set) - n_cutoff > n_cutoff - len(folds[cur]):
cur += 1
folds[cur] += pair_in_scaffold_set
else:
folds[cur] += pair_in_scaffold_set
cur += 1
else:
folds[cur] += pair_in_scaffold_set
return folds
def cold_protein_split(dataset, nfolds):
ctds = get_normalized_ctd(dataset.proteins)
prot_sim = ctds @ ctds.T
clusters = cluster_with_sim_matrix(prot_sim, 0.3)
prot_pair_index = {}
for i, (i_drug, i_prot) in enumerate(dataset.pair_index):
if i_prot not in prot_pair_index:
prot_pair_index[i_prot] = [i]
else:
prot_pair_index[i_prot].append(i)
n_cutoff = len(dataset.pair_index) // nfolds
folds = [[] for i in range(nfolds)]
cur = 0
for cluster in clusters:
pair_in_cluster = []
for i_protein in cluster:
if i_protein in prot_pair_index:
pair_in_cluster += prot_pair_index[i_protein]
if cur != nfolds - 1 and len(folds[cur]) + len(pair_in_cluster) >= n_cutoff:
if len(folds[cur]) + len(pair_in_cluster) - n_cutoff > n_cutoff - len(folds[cur]):
cur += 1
folds[cur] += pair_in_cluster
else:
folds[cur] += pair_in_cluster
cur += 1
else:
folds[cur] += pair_in_cluster
return folds
def cold_cluster_split(dataset, ngrids):
drug_clusters = generate_scaffolds(dataset, -1)
drug_clusters = merge_cluster(drug_clusters, ngrids)
ctds = get_normalized_ctd(dataset.proteins)
prot_sim = ctds @ ctds.T
prot_clusters = cluster_with_sim_matrix(prot_sim, 0.3)
prot_clusters = merge_cluster(prot_clusters, ngrids)
pair_in_grid = []
for i in range(ngrids):
pair_in_grid.append([])
for j in range(ngrids):
pair_in_grid[i].append([])
for k, (i_drug, i_prot) in enumerate(dataset.pair_index):
if i_drug in drug_clusters[i] and i_prot in prot_clusters[j]:
pair_in_grid[i][j].append(k)
folds = []
for i in range(ngrids):
for j in range(ngrids):
folds.append({"test": pair_in_grid[i][j]})
train = []
for k in range(ngrids):
if k != i:
for l in range(ngrids):
if l != j:
train += pair_in_grid[k][l]
folds[-1]["train"] = train
return folds | OpenBioMed-main | open_biomed/utils/split_utils.py |
import numpy as np
from tqdm import tqdm
def to_clu_sparse(data):
s = "%d %d %d" % (data.shape[0], data.shape[1], np.sum(data))
s_row = [""] * data.shape[0]
non_zero_row, non_zero_col = np.where(data > 0)
for i in tqdm(range(len(non_zero_row))):
s_row[non_zero_row[i]] += " %d %f" % (non_zero_col[i] + 1, data[non_zero_row[i], non_zero_col[i]])
return "\n".join([s] + s_row) | OpenBioMed-main | open_biomed/utils/matrix_utils.py |
import math
import torch
from torch.optim.lr_scheduler import _LRScheduler
class CosineAnnealingWarmupRestarts(_LRScheduler):
"""
optimizer (Optimizer): Wrapped optimizer.
first_cycle_steps (int): First cycle step size.
cycle_mult(float): Cycle steps magnification. Default: -1.
max_lr(float): First cycle's max learning rate. Default: 0.1.
min_lr(float): Min learning rate. Default: 0.001.
warmup_steps(int): Linear warmup step size. Default: 0.
gamma(float): Decrease rate of max learning rate by cycle. Default: 1.
last_epoch (int): The index of last epoch. Default: -1.
"""
def __init__(
self,
optimizer : torch.optim.Optimizer,
first_cycle_steps : int,
cycle_mult : float = 1.,
max_lr : float = 0.1,
min_lr : float = 0.001,
warmup_steps : int = 0,
gamma : float = 1.,
last_epoch : int = -1
):
assert warmup_steps < first_cycle_steps
self.first_cycle_steps = first_cycle_steps # first cycle step size
self.cycle_mult = cycle_mult # cycle steps magnification
self.base_max_lr = max_lr # first max learning rate
self.max_lr = max_lr # max learning rate in the current cycle
self.min_lr = min_lr # min learning rate
self.warmup_steps = warmup_steps # warmup step size
self.gamma = gamma # decrease rate of max learning rate by cycle
self.cur_cycle_steps = first_cycle_steps # first cycle step size
self.cycle = 0 # cycle count
self.step_in_cycle = last_epoch # step size of the current cycle
super(CosineAnnealingWarmupRestarts, self).__init__(optimizer, last_epoch)
# set learning rate min_lr
self.init_lr()
def init_lr(self):
self.base_lrs = []
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.min_lr
self.base_lrs.append(self.min_lr)
def get_lr(self):
if self.step_in_cycle == -1:
return self.base_lrs
elif self.step_in_cycle < self.warmup_steps:
return [(self.max_lr - base_lr)*self.step_in_cycle / self.warmup_steps + base_lr for base_lr in self.base_lrs]
else:
return [base_lr + (self.max_lr - base_lr) \
* (1 + math.cos(math.pi * (self.step_in_cycle-self.warmup_steps) \
/ (self.cur_cycle_steps - self.warmup_steps))) / 2
for base_lr in self.base_lrs]
def step(self, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
self.step_in_cycle = self.step_in_cycle + 1
if self.step_in_cycle >= self.cur_cycle_steps:
self.cycle += 1
self.step_in_cycle = self.step_in_cycle - self.cur_cycle_steps
self.cur_cycle_steps = int((self.cur_cycle_steps - self.warmup_steps) * self.cycle_mult) + self.warmup_steps
else:
if epoch >= self.first_cycle_steps:
if self.cycle_mult == 1.:
self.step_in_cycle = epoch % self.first_cycle_steps
self.cycle = epoch // self.first_cycle_steps
else:
n = int(math.log((epoch / self.first_cycle_steps * (self.cycle_mult - 1) + 1), self.cycle_mult))
self.cycle = n
self.step_in_cycle = epoch - int(self.first_cycle_steps * (self.cycle_mult ** n - 1) / (self.cycle_mult - 1))
self.cur_cycle_steps = self.first_cycle_steps * self.cycle_mult ** (n)
else:
self.cur_cycle_steps = self.first_cycle_steps
self.step_in_cycle = epoch
self.max_lr = self.base_max_lr * (self.gamma**self.cycle)
self.last_epoch = math.floor(epoch)
for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr | OpenBioMed-main | open_biomed/utils/schedulars.py |
#import os
#import sys
#sys.path.append(os.path.dirname(__file__))
import os
import numpy as np
import random
import torch
import datetime
from utils.distributed_utils import *
from utils.metrics import *
from utils.mol_utils import *
from utils.cell_utils import *
from utils.kg_utils import *
from utils.matrix_utils import *
from utils.collators import *
class BestMeter(object):
"""Computes and stores the best value"""
def __init__(self, best_type):
self.best_type = best_type
self.count = 0
self.reset()
def reset(self):
if self.best_type == 'min':
self.best = float('inf')
else:
self.best = -float('inf')
def update(self, best):
self.best = best
self.count = 0
def get_best(self):
return self.best
def counter(self):
self.count += 1
return self.count
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, distributed=False, local_rank=0, dest_device=0, world_size=1):
self.reset()
self.distributed = distributed
self.local_rank = local_rank
self.dest_device = dest_device
self.world_size = world_size
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
def get_average(self):
self.avg = self.sum / (self.count + 1e-12)
if self.distributed:
return mean_reduce(self.avg)
return self.avg
class EarlyStopping(object):
"""
Parameters
----------
mode : str
* 'higher': Higher metric suggests a better model
* 'lower': Lower metric suggests a better model
If ``metric`` is not None, then mode will be determined
automatically from that.
patience : int
The early stopping will happen if we do not observe performance
improvement for ``patience`` consecutive epochs.
filename : str or None
Filename for storing the model checkpoint. If not specified,
we will automatically generate a file starting with ``early_stop``
based on the current time.
metric : str or None
A metric name that can be used to identify if a higher value is
better, or vice versa. Default to None. Valid options include:
``'r2'``, ``'mae'``, ``'rmse'``, ``'roc_auc_score'``.
"""
def __init__(self, mode='higher', patience=10, filename=None, metric=None):
if filename is None:
dt = datetime.datetime.now()
folder = os.path.join(os.getcwd(), 'results')
if not os.path.exists(folder):
os.makedirs(folder)
filename = os.path.join(folder, 'early_stop_{}_{:02d}-{:02d}-{:02d}.pth'.format(
dt.date(), dt.hour, dt.minute, dt.second))
if metric is not None:
assert metric in ['r2', 'mae', 'rmse', 'roc_auc_score', 'pr_auc_score'], \
"Expect metric to be 'r2' or 'mae' or " \
"'rmse' or 'roc_auc_score', got {}".format(metric)
if metric in ['r2', 'roc_auc_score', 'pr_auc_score']:
print('For metric {}, the higher the better'.format(metric))
mode = 'higher'
if metric in ['mae', 'rmse']:
print('For metric {}, the lower the better'.format(metric))
mode = 'lower'
assert mode in ['higher', 'lower']
self.mode = mode
if self.mode == 'higher':
self._check = self._check_higher
else:
self._check = self._check_lower
self.patience = patience
self.counter = 0
self.filename = filename
self.best_score = None
self.early_stop = False
def _check_higher(self, score, prev_best_score):
"""Check if the new score is higher than the previous best score.
Parameters
----------
score : float
New score.
prev_best_score : float
Previous best score.
Returns
-------
bool
Whether the new score is higher than the previous best score.
"""
return score > prev_best_score
def _check_lower(self, score, prev_best_score):
"""Check if the new score is lower than the previous best score.
Parameters
----------
score : float
New score.
prev_best_score : float
Previous best score.
Returns
-------
bool
Whether the new score is lower than the previous best score.
"""
return score < prev_best_score
def step(self, score, model):
"""Update based on a new score.
The new score is typically model performance on the validation set
for a new epoch.
Parameters
----------
score : float
New score.
model : nn.Module
Model instance.
Returns
-------
bool
Whether an early stop should be performed.
"""
if self.best_score is None:
self.best_score = score
self.save_checkpoint(model)
elif self._check(score, self.best_score):
self.best_score = score
self.save_checkpoint(model)
self.counter = 0
else:
self.counter += 1
print(
f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
return self.early_stop
def save_checkpoint(self, model):
'''Saves model when the metric on the validation set gets improved.
Parameters
----------
model : nn.Module
Model instance.
'''
torch.save({'model_state_dict': model.state_dict()}, self.filename)
def load_checkpoint(self, model):
'''Load the latest checkpoint
Parameters
----------
model : nn.Module
Model instance.
'''
model.load_state_dict(torch.load(self.filename)['model_state_dict'])
class Normalizer(object):
"""Normalize a Tensor and restore it later. """
def __init__(self, tensor):
"""tensor is taken as a sample to calculate the mean and std"""
self.mean = torch.mean(tensor)
self.std = torch.std(tensor)
def norm(self, tensor):
return (tensor - self.mean) / self.std
def denorm(self, normed_tensor):
return normed_tensor * self.std + self.mean
def state_dict(self):
return {'mean': self.mean,
'std': self.std}
def load_state_dict(self, state_dict):
self.mean = state_dict['mean']
self.std = state_dict['std']
def normalize(x):
return (x - x.min()) / (x.max() - x.min())
def save_checkpoint(model, model_dir, epoch, val_loss, val_acc):
model_path = os.path.join(model_dir, 'epoch:%d-val_loss:%.3f-val_acc:%.3f.model' % (epoch, val_loss, val_acc))
torch.save(model, model_path)
def load_checkpoint(model_path):
return torch.load(model_path)
def save_model_dict(model, model_dir, msg):
model_path = os.path.join(model_dir, msg + '.pt')
torch.save(model.state_dict(), model_path)
print("model has been saved to %s." % (model_path))
def load_model_dict(model, ckpt):
model.load_state_dict(torch.load(ckpt))
def cycle(iterable):
while True:
for x in iterable:
yield x
def seed_all(seed_value, cuda_deterministic=False):
random.seed(seed_value)
os.environ['PYTHONHASHSEED'] = str(seed_value)
np.random.seed(seed_value)
torch.manual_seed(seed_value)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed_value)
torch.cuda.manual_seed_all(seed_value)
# Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html
if cuda_deterministic: # slower, more reproducible
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
else: # faster, less reproducible
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
def fix_path_in_config(config, abs_path):
if isinstance(config, list):
for i in range(len(config)):
if isinstance(config[i], str):
if config[i].startswith("./"):
config[i] = abs_path + config[i][1:]
elif isinstance(config[i], list) or isinstance(config[i], dict):
fix_path_in_config(config[i], abs_path)
else:
for elem in config:
if isinstance(config[elem], str):
if config[elem].startswith("./"):
config[elem] = abs_path + config[elem][1:]
elif isinstance(config[elem], list) or isinstance(config[elem], dict):
fix_path_in_config(config[elem], abs_path) | OpenBioMed-main | open_biomed/utils/__init__.py |
import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
from torch.nn.utils import clip_grad_norm_
def warmup_cosine(x, warmup=0.002):
if x < warmup:
return x/warmup
return 0.5 * (1.0 + torch.cos(math.pi * x))
def warmup_constant(x, warmup=0.002):
if x < warmup:
return x/warmup
return 1.0
def warmup_linear(x, warmup=0.002):
if x < warmup:
return x/warmup
return max((x - 1. )/ (warmup - 1.), 0.)
def warmup_poly(x, warmup=0.002, degree=0.5):
if x < warmup:
return x/warmup
return (1.0 - x)**degree
SCHEDULES = {
'warmup_cosine':warmup_cosine,
'warmup_constant':warmup_constant,
'warmup_linear':warmup_linear,
'warmup_poly':warmup_poly,
}
class BertAdam(Optimizer):
"""Implements BERT version of Adam algorithm with weight decay fix.
Params:
lr: learning rate
warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
t_total: total number of training steps for the learning
rate schedule, -1 means constant learning rate. Default: -1
schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'
b1: Adams b1. Default: 0.9
b2: Adams b2. Default: 0.999
e: Adams epsilon. Default: 1e-6
weight_decay: Weight decay. Default: 0.01
max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
"""
def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear',
b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01,
max_grad_norm=1.0):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
if not 0.0 <= b1 < 1.0:
raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
if not e >= 0.0:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,
b1=b1, b2=b2, e=e, weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0:
return [0]
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['next_m'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['next_v'] = torch.zeros_like(p.data)
next_m, next_v = state['next_m'], state['next_v']
beta1, beta2 = group['b1'], group['b2']
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
next_m.mul_(beta1).add_(1 - beta1, grad)
next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)
update = next_m / (next_v.sqrt() + group['e'])
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
return loss | OpenBioMed-main | open_biomed/utils/optimizers.py |
from feature.mol_featurizer import SUPPORTED_MOL_FEATURIZER, MolMultiModalFeaturizer
from feature.protein_featurizer import SUPPORTED_PROTEIN_FEATURIZER, ProteinMultiModalFeaturizer
from feature.cell_featurizer import SUPPORTED_CELL_FEATURIZER
from feature.text_featurizer import SUPPORTED_TEXT_FEATURIZER
from utils.collators import MolCollator, ProteinCollator, CellCollator, TextCollator
entity_featurizer_map = {
"molecule": (SUPPORTED_MOL_FEATURIZER, MolMultiModalFeaturizer),
"protein": (SUPPORTED_PROTEIN_FEATURIZER, ProteinMultiModalFeaturizer),
"cell": (SUPPORTED_CELL_FEATURIZER, None),
"text": (SUPPORTED_TEXT_FEATURIZER, None),
}
entity_collator_map = {
"molecule": MolCollator,
"protein": ProteinCollator,
"cell": CellCollator,
"text": TextCollator
}
class DataProcessorFast(object):
def __init__(self, entity_type, config):
self.entity_type = entity_type
self.config = config
assert self.entity_type in entity_featurizer_map
# configure featurizer
feat = entity_featurizer_map[self.entity_type]
if entity_type in ["molecule", "protein"]:
if len(self.config["modality"]) > 1:
if feat[1] is None:
raise NotImplementedError("Multi-Modal featurizer for %s is not implemented!" % (self.entity_type))
self.featurizer = feat[1](config)
else:
feat_config = self.config["featurizer"][self.config["modality"][0]]
if feat_config["name"] not in feat[0]:
raise NotImplementedError("Featurizer %s for %s is not implemented!" % (feat_config["name"], self.entity_type))
self.featurizer = feat[0][feat_config["name"]](feat_config)
else:
self.featurizer = feat[0][config["name"]](config)
# configure collator
self.collator = entity_collator_map[self.entity_type](self.config)
def __call__(self, obj):
if not isinstance(obj, list):
obj = [self.featurizer(obj)]
else:
obj = [self.featurizer(x) for x in obj]
return self.collator(obj)
| OpenBioMed-main | open_biomed/utils/data_utils.py |
import logging
logger = logging.getLogger(__name__)
from abc import ABC, abstractmethod
import os
import json
import pandas as pd
import numpy as np
import pickle
from tqdm import tqdm
import random
import torch
from rdkit import Chem
from utils.cell_utils import load_hugo2ncbi
class KG(object):
def __init__(self):
super(KG, self).__init__()
@abstractmethod
def __str__(self):
raise NotImplementedError
@abstractmethod
def link(self, dataset):
raise NotImplementedError
class BMKG(KG):
def __init__(self, path):
super(BMKG, self).__init__()
self.drugs = json.load(open(os.path.join(path, "drug.json"), "r"))
self.smi2drugid = {}
for key in self.drugs:
mol = Chem.MolFromSmiles(self.drugs[key]["SMILES"])
if mol is not None:
smi = Chem.MolToSmiles(mol, isomericSmiles=True)
self.smi2drugid[smi] = key
self.proteins = json.load(open(os.path.join(path, "protein.json"), "r"))
self.seq2proteinid = {}
for key in self.proteins:
self.seq2proteinid[self.proteins[key]["sequence"]] = key
self.edges = pd.read_csv(os.path.join(path, "links.csv"), dtype=str).values.tolist()
def link(self, dataset):
link_drug, link_protein = 0, 0
drug2kg, drug2text = {}, {}
for smi in dataset.smiles:
iso_smi = Chem.MolToSmiles(Chem.MolFromSmiles(smi), isomericSmiles=True)
if iso_smi in self.smi2drugid:
link_drug += 1
drug2kg[smi] = self.smi2drugid[iso_smi]
drug2text[smi] = self.drugs[self.smi2drugid[iso_smi]]["text"].lower()
else:
drug2kg[smi] = None
drug2text[smi] = ""
protein2kg, protein2text = {}, {}
for seq in dataset.proteins:
if seq in self.seq2proteinid:
link_protein += 1
protein2kg[seq] = self.seq2proteinid[seq]
protein2text[seq] = self.proteins[self.seq2proteinid[seq]]["text"].lower()
else:
protein2kg[seq] = None
protein2text[seq] = ""
logger.info("Linked drug %d/%d" % (link_drug, len(dataset.smiles)))
logger.info("Linked protein %d/%d" % (link_protein, len(dataset.proteins)))
return drug2kg, drug2text, protein2kg, protein2text
class BMKGv2(KG):
def __init__(self, path):
super(BMKGv2, self).__init__()
self.kg = pickle.load(open(path, "rb"))
self.adj = {}
for triplet in self.kg["triplets"]:
if triplet[0] not in self.adj:
self.adj[triplet[0]] = [triplet]
else:
self.adj[triplet[0]].append(triplet)
if triplet[2] not in self.adj:
self.adj[triplet[2]] = [triplet]
else:
self.adj[triplet[2]].append(triplet)
class STRING(KG):
def __init__(self, path, thresh=0.95):
super(STRING, self).__init__()
self.thresh = thresh
_, self.hugo2ncbi = load_hugo2ncbi()
self._load_proteins(path)
self._load_edges(path)
def _load_proteins(self, path):
# self.proteins: Dict
# Key: ensp id
# Value: kg_id - index in the knowledge graph
# name - preferred name in HUGO
# sequence - amino acid sequence
# text - description
self.proteins = {}
self.ncbi2ensp = {}
df = pd.read_csv(os.path.join(path, "9606.protein.info.v11.0.txt"), sep='\t')
for index, protein in df.iterrows():
self.proteins[protein['protein_external_id']] = {
"kg_id": index,
"name": protein['preferred_name'],
"text": protein['annotation']
}
self.ncbi2ensp[protein['preferred_name']] = protein['protein_external_id']
# protein sequence
with open(os.path.join(path, "9606.protein.sequences.v11.0.fa"), 'r') as f:
id, buf = None, ''
for line in f.readlines():
if line.startswith('>'):
if id is not None:
self.proteins[id]["sequence"] = buf
id = line.lstrip('>').rstrip("\n")
buf = ''
else:
buf = buf + line.rstrip("\n")
def _load_edges(self, path):
edges = pd.read_csv(os.path.join(path, "9606.protein.links.v11.0.txt"), sep=' ')
selected_edges = edges['combined_score'] > (self.thresh * 1000)
self.edges = edges[selected_edges][["protein1", "protein2"]].values.tolist()
for i in range(len(self.edges)):
self.edges[i][0] = self.proteins[self.edges[i][0]]["kg_id"]
self.edges[i][1] = self.proteins[self.edges[i][1]]["kg_id"]
def node_subgraph(self, node_idx, format="hugo"):
if format == "hugo":
node_idx = [self.hugo2ncbi[x] for x in node_idx]
node_idx = [self.ncbi2ensp[x] if x in self.ncbi2ensp else x for x in node_idx]
ensp2subgraphid = dict(zip(node_idx, range(len(node_idx))))
names_ensp = list(self.proteins.keys())
edge_index = []
for i in self.edges:
p0, p1 = names_ensp[i[0]], names_ensp[i[1]]
if p0 in node_idx and p1 in node_idx:
edge_index.append((ensp2subgraphid[p0], ensp2subgraphid[p1]))
edge_index.append((ensp2subgraphid[p1], ensp2subgraphid[p0]))
edge_index = list(set(edge_index))
return np.array(edge_index, dtype=np.int64).T
def __str__(self):
return "Collected from string v11.0 database, totally %d proteins and %d edges" % (len(self.proteins), len(self.edges))
SUPPORTED_KG = {"BMKG": BMKG, "STRING": STRING}
def subgraph_sample(num_nodes, edge_index, strategy, num_samples, directed=False):
### Inputs:
# edge_index: edge index
# strategy: sampling strategy, e.g. bfs
### Output:
# indexes of sampled edges
adj = []
for i in range(num_nodes):
adj.append([])
for i, edge in enumerate(edge_index):
adj[edge[0]].append(i)
node_queue = []
visited = [0] * num_nodes
selected_edges = []
random_node = random.randint(0, num_nodes - 1)
while len(adj[random_node]) > 5:
random_node = random.randint(0, num_nodes - 1)
node_queue.append(random_node)
visited[random_node] = 1
def dfs(u):
visited[u] = 1
for i in adj[u]:
if i not in selected_edges:
selected_edges.append(i)
selected_edges.append(i ^ 1)
if len(selected_edges) >= num_samples:
return
for i in adj[u]:
v = edge_index[i][1]
if visited[v]:
continue
dfs(v)
if len(selected_edges) >= num_samples:
return
if strategy == 'dfs':
dfs(random_node)
else:
while len(selected_edges) < num_samples:
u = node_queue.pop(0)
for i in adj[u]:
v = edge_index[i][1]
if i not in selected_edges:
selected_edges.append(i)
selected_edges.append(i ^ 1)
if not visited[v]:
visited[v] = 1
node_queue.append(v)
return selected_edges
def embed(graph, model='ProNE', filter_out={}, dim=256, save=True, save_path=''):
### Inputs:
# G: object of KG
# model: network embedding model, e.g. ProNE
### Outputs:
# emb: numpy array, |G| * dim
if save and os.path.exists(save_path):
logger.info("Load KGE from saved file.")
return pickle.load(open(save_path, "rb"))
from cogdl.data import Adjacency
from cogdl.models.emb.prone import ProNE
name2id = {}
cnt = 0
filtered = 0
row = []
col = []
for h, t, r in graph.edges:
if (h, t) in filter_out:
filtered += 1
continue
if h not in name2id:
cnt += 1
name2id[h] = cnt
if t not in name2id:
cnt += 1
name2id[t] = cnt
row.append(name2id[h])
col.append(name2id[t])
logger.info("Filtered out %d edges in val/test set" % (filtered))
row = torch.tensor(row)
col = torch.tensor(col)
graph = Adjacency(row, col)
emb_model = ProNE(dim, 5, 0.2, 0.5)
logger.info("Generating KGE...")
emb = emb_model(graph)
kg_emb = {}
for key in name2id:
kg_emb[key] = emb[name2id[key]]
if save:
pickle.dump(kg_emb, open(save_path, "wb"))
return kg_emb
def bfs(graph, node_id, max_depth):
### Inputs:
# G: object of KG
# node_id: the id of the starting node
# max_depth: the max number of steps to go
### Outputs:
# dist: a list, dist[i] is the list of i-hop neighbors
pass | OpenBioMed-main | open_biomed/utils/kg_utils.py |
import logging
logger = logging.getLogger(__name__)
from abc import ABC, abstractmethod
import numpy as np
def load_hugo2ncbi():
ncbi2hugo = {}
hugo2ncbi = {}
try:
with open("../assets/drp/enterez_NCBI_to_hugo_gene_symbol_march_2019.txt", "r") as f:
for line in f.readlines():
line = line.strip("\n").split("\t")
if len(line) > 1:
ncbi2hugo[line[0]] = line[1]
hugo2ncbi[line[1]] = line[0]
except:
logger.warn("NCBI2hugo gene not found")
return ncbi2hugo, hugo2ncbi
class GeneSelector(ABC):
def __init__(self):
super(GeneSelector, self).__init__()
@abstractmethod
def __call__(self, genes, format="NCBI"):
raise NotImplementedError
class TGSAGeneSelector(GeneSelector):
def __init__(self):
super(TGSAGeneSelector, self).__init__()
self.ncbi2hugo, self.hugo2ncbi = load_hugo2ncbi()
self.selected_index_hugo = []
with open("../assets/drp/selected_genes.txt", "r") as f:
line = f.readline().strip("\n").split(",")
for index in line:
self.selected_index_hugo.append(index.lstrip('(').rstrip(')'))
def __call__(self, genes, format="NCBI"):
if format == "NCBI":
genename2id = dict(zip(genes, range(len(genes))))
return [genename2id[self.hugo2ncbi[x]] for x in self.selected_index_hugo]
SUPPORTED_GENE_SELECTOR = {
"TGSA": TGSAGeneSelector,
} | OpenBioMed-main | open_biomed/utils/cell_utils.py |
import logging
logger = logging.getLogger(__name__)
import numpy as np
class UFS(object):
def __init__(self, n):
self.fa = list(range(n))
def merge(self, x, y):
self.fa[x] = self.find(y)
def find(self, x):
self.fa[x] = self.find(self.fa[x]) if self.fa[x] != x else x
return self.fa[x]
def cluster_with_sim_matrix(sim_matrix, threshold):
n = len(sim_matrix)
e = []
f = UFS(n)
for i in range(n):
for j in range(n):
x, y = f.find(i), f.find(j)
if x != y and sim_matrix[x][y] > threshold:
f.merge(x, y)
for k in range(n):
sim_matrix[y][k] = min(sim_matrix[y][k], sim_matrix[x][k])
clusters = [[] for i in range(n)]
for i in range(n):
clusters[f.find(i)].append(i)
return clusters
def merge_cluster(clusters, n_cluster):
merged_clusters = [[] for i in range(n_cluster)]
n_cutoff = np.sum([len(cluster) for cluster in clusters]) // n_cluster
perm = np.random.permutation(len(clusters))
cur = 0
for i in perm:
if cur < n_cluster - 1 and len(merged_clusters[cur]) + len(clusters[i]) > n_cutoff:
if len(merged_clusters[cur]) + len(clusters[i]) - n_cutoff > n_cutoff - len(merged_clusters[cur]):
cur += 1
merged_clusters[cur].extend(clusters[i])
else:
merged_clusters[cur].extend(clusters[i])
cur += 1
else:
merged_clusters[cur].extend(clusters[i])
logger.info("cluster size: %s" % (", ".join([str(len(merged_cluster)) for merged_cluster in merged_clusters])))
return merged_clusters | OpenBioMed-main | open_biomed/utils/cluster.py |
import os
import torch
import torch.distributed as dist
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def is_main_process():
return get_rank() == 0
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def init_distributed_mode(args):
if not args.distributed:
args.device = 0
return
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.device = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(args.device)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
dist.barrier()
setup_for_distributed(args.rank == 0)
def mean_reduce(val, cur_device, dest_device, world_size):
val = val.clone().detach() if torch.is_tensor(val) else torch.tensor(val)
val = val.to(cur_device)
torch.distributed.reduce(val, dst=dest_device)
return val.item() / world_size
def concat_reduce(tensor, num_total_examples, world_size):
output_tensors = [tensor.clone() for _ in range(world_size)]
torch.distributed.all_gather(output_tensors, tensor)
concat = torch.cat(output_tensors, dim=0)
# truncate the dummy elements added by SequentialDistributedSampler
return concat[:num_total_examples]
@torch.no_grad()
def concat_gather(tensor):
if not is_dist_avail_and_initialized():
return tensor
gather_tensor = [torch.zeros_like(tensor) for i in range(dist.get_world_size())]
dist.all_gather(gather_tensor, tensor, async_op=False)
return torch.cat(gather_tensor, dim=0)
def concat_gather_with_grad(tensor):
if not is_dist_avail_and_initialized():
return tensor
gather_tensor = GatherLayer.apply(tensor)
return torch.cat(gather_tensor, dim=0)
class GatherLayer(torch.autograd.Function):
"""
Gather tensors from all workers with support for backward propagation:
This implementation does not cut the gradients as torch.distributed.all_gather does.
"""
@staticmethod
def forward(ctx, x):
output = [
torch.zeros_like(x) for _ in range(torch.distributed.get_world_size())
]
torch.distributed.all_gather(output, x)
return tuple(output)
@staticmethod
def backward(ctx, *grads):
all_gradients = torch.stack(grads)
torch.distributed.all_reduce(all_gradients)
return all_gradients[torch.distributed.get_rank()] | OpenBioMed-main | open_biomed/utils/distributed_utils.py |
import numpy as np
import math
def get_normalized_ctd(proteins):
from PyBioMed.PyProtein import CTD
ctds = []
for prot in proteins:
ctds.append(np.array(list(CTD.CalculateCTD(prot).values())))
ctds = np.array(ctds)
for i in range(ctds.shape[1]):
mean = np.mean(ctds[:, i])
var = np.var(ctds[:, i])
ctds[:, i] = (ctds[:, i] - mean) / math.sqrt(var)
for i in range(ctds.shape[0]):
ctds[i] /= np.linalg.norm(ctds[i])
return ctds | OpenBioMed-main | open_biomed/utils/prot_utils.py |
from abc import ABC, abstractmethod
import torch
from torch_geometric.data import Data, Batch
from transformers import BatchEncoding, DataCollatorWithPadding, BertTokenizer, T5Tokenizer, GPT2Tokenizer, EsmTokenizer
from utils.mol_utils import SmilesTokenizer
name2tokenizer = {
"bert": BertTokenizer,
"t5": T5Tokenizer,
"gpt2": GPT2Tokenizer,
"esm": EsmTokenizer,
"unimap": SmilesTokenizer,
}
def ToDevice(obj, device):
if isinstance(obj, dict):
for k in obj:
obj[k] = ToDevice(obj[k], device)
return obj
elif isinstance(obj, tuple) or isinstance(obj, list):
for i in range(len(obj)):
obj[i] = ToDevice(obj[i], device)
return obj
else:
return obj.to(device)
class BaseCollator(ABC):
def __init__(self, config):
self.config = config
self._build(config)
@abstractmethod
def __call__(self, data, **kwargs):
raise NotImplementedError
def _collate_single(self, data, config):
if isinstance(data[0], Data):
return Batch.from_data_list(data)
elif torch.is_tensor(data[0]):
return torch.stack([x.squeeze() for x in data])
elif isinstance(data[0], BatchEncoding):
return config["collator"](data)
elif isinstance(data[0], dict):
result = {}
for key in data[0]:
result[key] = self._collate_single([x[key] for x in data], config[key] if key in config else {})
return result
elif isinstance(data[0], int):
return torch.tensor(data).view((-1, 1))
def _collate_multiple(self, data, config):
cor = []
flatten_data = []
for x in data:
cor.append(len(flatten_data))
flatten_data += x
cor.append(len(flatten_data))
return (cor, self._collate_single(flatten_data, config),)
def _build(self, config):
if not isinstance(config, dict):
return
if "model_name_or_path" in config:
tokenizer = name2tokenizer[config["transformer_type"]].from_pretrained(config["model_name_or_path"])
if config["transformer_type"] == "gpt2":
tokenizer.pad_token = tokenizer.eos_token
config["collator"] = DataCollatorWithPadding(
tokenizer=tokenizer,
padding=True
)
return
for key in config:
self._build(config[key])
class MolCollator(BaseCollator):
def __init__(self, config):
super(MolCollator, self).__init__(config)
def __call__(self, mols):
if len(self.config["modality"]) > 1:
batch = {}
for modality in self.config["modality"]:
batch[modality] = self._collate_single([mol[modality] for mol in mols], self.config["featurizer"][modality])
else:
batch = self._collate_single(mols, self.config["featurizer"]["structure"])
return batch
class ProteinCollator(BaseCollator):
def __init__(self, config):
super(ProteinCollator, self).__init__(config)
def __call__(self, proteins):
if len(self.config["modality"]) > 1:
batch = {}
for modality in self.config["modality"]:
if isinstance(proteins[0][modality], list):
batch[modality] = self._collate_multiple([protein[modality] for protein in proteins], self.config["featurizer"][modality])
else:
batch[modality] = self._collate_single([protein[modality] for protein in proteins], self.config["featurizer"][modality])
else:
batch = self._collate_single(proteins, self.config["featurizer"]["structure"])
return batch
class CellCollator(BaseCollator):
def __init__(self, config):
super(CellCollator, self).__init__(config)
def __call__(self, cells):
batch = self._collate_single(cells, self.config["featurizer"])
return batch
class TextCollator(BaseCollator):
def __init__(self, config):
super(TextCollator, self).__init__(config)
def __call__(self, texts):
batch = self._collate_single(texts, self.config)
return batch
class TaskCollator(ABC):
def __init__(self, config):
super(TaskCollator, self).__init__()
self.config = config
@abstractmethod
def __call__(self, data, **kwargs):
raise NotImplementedError
class DPCollator(TaskCollator):
def __init__(self, config):
super(DPCollator, self).__init__(config)
self.mol_collator = MolCollator(config)
def __call__(self, data):
mols, labels = map(list, zip(*data))
return self.mol_collator(mols), torch.stack(labels)
class DTICollator(TaskCollator):
def __init__(self, config):
super(DTICollator, self).__init__(config)
self.mol_collator = MolCollator(config["mol"])
self.protein_collator = ProteinCollator(config["protein"])
def __call__(self, data):
mols, prots, labels = map(list, zip(*data))
return self.mol_collator(mols), self.protein_collator(prots), torch.tensor(labels)
class DRPCollator(TaskCollator):
def __init__(self, config):
super(DRPCollator, self).__init__(config)
self.mol_collator = MolCollator(config["mol"])
self.cell_collator = CellCollator(config["cell"])
def __call__(self, data):
mols, cells, labels = map(list, zip(*data))
return self.mol_collator(mols), self.cell_collator(cells), torch.tensor(labels)
class PPICollator(TaskCollator):
def __init__(self, config, graph_ppi):
super(PPICollator, self).__init__(config)
self.graph_ppi = graph_ppi
self.protein_collator = ProteinCollator(config)
def __call__(self, data):
prots1, prots2, labels = map(list, zip(*data))
if self.graph_ppi:
return torch.LongTensor(prots1), torch.LongTensor(prots2), torch.stack(labels)
else:
return self.protein_collator(prots1), self.protein_collator(prots2), torch.stack(labels)
class MolQACollator(TaskCollator):
def __init__(self, config, collate_outputs=True):
super(MolQACollator, self).__init__(config)
self.mol_collator = MolCollator(config["mol"])
self.question_collator = TextCollator(config["text"]["question"])
self.collate_outputs = collate_outputs
if self.collate_outputs:
self.answer_collator = TextCollator(config["text"]["answer"])
def __call__(self, data):
mols, questions, answers = map(list, zip(*data))
if self.collate_outputs:
return self.mol_collator(mols), self.question_collator(questions), self.answer_collator(answers)
else:
return self.mol_collator(mols), self.question_collator(questions), answers | OpenBioMed-main | open_biomed/utils/collators.py |
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import torch
import logging
logger = logging.getLogger(__name__)
import re
from utils.data_utils import DataProcessorFast
from utils.mol_utils import valid_smiles
from utils.collators import ToDevice
class Conversation(object):
def __init__(self, model, processor_config, device, system, roles=("Human", "Assistant"), sep="###", max_length=512):
self.model = model
self.mol_processor = DataProcessorFast("molecule", processor_config["mol"])
self.prot_processor = DataProcessorFast("protein", processor_config["protein"])
#TODO: add cell
self.device = device
self.system = system
self.roles = roles
self.sep = sep
self.max_length = max_length
self.messages = []
self.mol_embs = []
self.prot_embs = []
self.cell_embs = []
def _wrap_prompt(self):
ret = self.system + self.sep + " "
for role, message in self.messages:
if message:
ret += self.roles[role] + ": " + message + " " + self.sep + " "
else:
ret += self.roles[role] + ": "
return ret
def _append_message(self, role, message):
self.messages.append([role, message])
def _get_context_emb(self):
prompt = self._wrap_prompt()
logger.debug("Prompt: %s" % (prompt))
pattern = re.compile("<moleculeHere>|<proteinHere>|<cellHere>")
p_text = pattern.split(prompt)
spec_tokens = pattern.findall(prompt)
assert len(p_text) == len(self.mol_embs) + len(self.prot_embs) + len(self.cell_embs) + 1, "Unmatched numbers of placeholders and molecules."
seg_tokens = [
self.model.llm_tokenizer([seg], return_tensors="pt", add_special_tokens=(i == 0)).to(self.device)
for i, seg in enumerate(p_text)
]
seg_embs = [self.model.llm.get_input_embeddings()(seg_token.input_ids) for seg_token in seg_tokens]
mixed_embs = []
cur_mol, cur_prot, cur_cell = 0, 0, 0
for i in range(len(p_text) - 1):
mixed_embs.append(seg_embs[i])
if spec_tokens[i] == "<moleculeHere>":
mixed_embs.append(self.mol_embs[cur_mol])
cur_mol += 1
elif spec_tokens[i] == "<proteinHere>":
mixed_embs.append(self.prot_embs[cur_prot])
cur_prot += 1
elif spec_tokens[i] == "<cellHere>":
mixed_embs.append(self.cell_embs[cur_cell])
cur_cell += 1
mixed_embs.append(seg_embs[-1])
return torch.cat(mixed_embs, dim=1)
def ask(self, text):
if len(self.messages) > 0 and (self.messages[-1][1].endswith("</molecule>") or self.messages[-1][1].endswith("</protein>")) and self.messages[-1][0] == 0:
self.messages[-1][1] = self.messages[-1][1] + " " + text
else:
self._append_message(0, text)
def append_molecule(self, smi):
if not valid_smiles(smi):
logger.error("Failed to generate molecule graph. Maybe the SMILES is invalid.")
return
mol_inputs = ToDevice(self.mol_processor(smi), self.device)
with self.model.maybe_autocast():
mol_embs = self.model.proj_mol(self.model.encode_mol(mol_inputs, ret_atom_feats=True))
self.mol_embs.append(mol_embs.unsqueeze(0))
self._append_message(0, "<molecule><moleculeHere></molecule>")
def append_protein(self, protein, from_file=False):
if from_file:
protein = open(protein, "r").readline()
prot_inputs = ToDevice(self.prot_processor(protein), self.device)
with self.model.maybe_autocast():
prot_embs = self.model.proj_prot(self.model.encode_protein(prot_inputs))
self.prot_embs.append(prot_embs)
self._append_message(0, "<protein><proteinHere></protein>")
def answer(self, max_new_tokens=256, num_beams=1, top_p=0.9, repetition_penalty=1.0, length_penalty=1, temperature=1.0,):
self._append_message(1, None)
embs = self._get_context_emb()
if embs.shape[1] + max_new_tokens > self.max_length:
begin_idx = embs.shape[1] + max_new_tokens - self.max_length
embs = embs[:, begin_idx]
logger.warn("The number of tokens in current conversation exceeds the max length (%d). The model will not see the contexts outside the range." % (self.max_length))
output = self.model.llm.generate(
inputs_embeds=embs,
max_length=max_new_tokens,
num_beams=num_beams,
top_p=top_p,
do_sample=False,
repetition_penalty=repetition_penalty,
length_penalty=length_penalty,
temperature=temperature
)[0]
if output[0] in [0, 1]:
output = output[1:]
output = output[:-1]
output_tokens = self.model.llm_tokenizer.decode(output, add_special_tokens=False)
output_tokens = output_tokens.split("Assistant:")[-1].strip()
self.messages[-1][1] = output_tokens
return output_tokens, output.cpu().numpy()
def reset(self):
self.messages = []
self.mol_embs = []
self.prot_embs = []
self.cell_embs = []
if __name__ == "__main__":
import json
from models.multimodal import BioMedGPTV
config = json.load(open("./configs/encoders/multimodal/biomedgptv.json", "r"))
device = torch.device("cuda:0")
config["network"]["device"] = device
model = BioMedGPTV(config["network"])
ckpt = torch.load("./ckpts/fusion_ckpts/biomedgpt_10b.pth")
model.load_state_dict(ckpt)
model = model.to(device)
model.eval()
prompt_sys = "You are working as an excellent assistant in biology. " + \
"Below a human gives the representation of a molecule or a protein. Answer some questions about it. "
chat = Conversation(
model=model,
processor_config=config["data"],
device=device,
system=prompt_sys,
roles=("Human", "Assistant"),
sep="###",
max_length=2048
)
chat.append_protein("MAKEDTLEFPGVVKELLPNATFRVELDNGHELIAVMAGKMRKNRIRVLAGDKVQVEMTPYDLSKGRINYRFK")
questions = ["What are the official names of this protein?", "What is the function of this protein?"]
for q in questions:
print("Human: ", q)
chat.ask(q)
print("Assistant: ", chat.answer()[0])
print("Chat reset.")
chat.reset()
chat.append_molecule("C[C@]12CCC(=O)C=C1CC[C@@H]3[C@@H]2C(=O)C[C@]\\\\4([C@H]3CC/C4=C/C(=O)OC)C")
questions = ["Please describe this drug."]
for q in questions:
print("Human: ", q)
chat.ask(q)
print("Assistant: ", chat.answer()[0]) | OpenBioMed-main | open_biomed/utils/chat_utils.py |
import logging
logger = logging.getLogger(__name__)
import argparse
import csv
import collections
import json
import numpy as np
import os
import pickle
import re
from typing import List, Optional
import rdkit.Chem as Chem
from rdkit.Chem import MolStandardize
from rdkit import RDLogger
RDLogger.DisableLog("rdApp.*")
import torch
from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
def valid_smiles(smi):
try:
mol = Chem.MolFromSmiles(smi.strip("\n"))
if mol is not None:
return True
else:
return False
except:
return False
def can_smiles(smi):
try:
mol = Chem.MolFromSmiles(smi)
standardizer = MolStandardize.normalize
# standardize the molecule
standardized_mol = standardizer.Normalizer().normalize(mol)
# get the standardized SMILES string
standardized_smiles = Chem.MolToSmiles(standardized_mol, isomericSmiles=True)
except:
standardized_smiles = smi
return standardized_smiles
def write_sdf(cid2smiles_file, output_file, sdf_file):
cid2smiles = pickle.load(open(cid2smiles_file, "rb"))
smiles2cid = {}
for cid in cid2smiles:
if cid2smiles[cid] != '*':
smi = Chem.MolToSmiles(Chem.MolFromSmiles(cid2smiles[cid]))
smiles2cid[smi] = cid
all_mols = []
print("Loading output file...")
with open(output_file, "r") as f:
for i, line in enumerate(f.readlines()):
if i == 0:
continue
line = line.rstrip("\n").split("\t")
try:
gt_smi = Chem.MolToSmiles(Chem.MolFromSmiles(line[1]))
output_mol = Chem.MolFromSmiles(line[2])
if output_mol is not None:
output_mol.SetProp("CID", smiles2cid[gt_smi])
all_mols.append(output_mol)
except:
continue
print("Writing sdf file...")
with Chem.SDWriter(sdf_file) as f:
for mol in all_mols:
f.write(mol)
def load_mol2vec(file):
mol2vec = {}
with open(file, "r") as f:
reader = csv.reader(f, delimiter=',')
headers = next(reader)
for row in reader:
mol_str = " ".join(row[-300:])
mol2vec[row[3]] = np.fromstring(mol_str, sep=" ")
return mol2vec
def link_datasets(source, target):
targetsmi2id = {}
for i, smi in enumerate(target.smiles):
try:
smi = Chem.MolToSmiles(Chem.MolFromSmiles(smi), isomericSmiles=True)
targetsmi2id[smi] = i
except:
continue
match_indexes = []
for smi in source.smiles:
try:
smi = Chem.MolToSmiles(Chem.MolFromSmiles(smi), isomericSmiles=True)
match_indexes.append(targetsmi2id[smi])
except:
match_indexes.append(-1)
return match_indexes
def convert_pyg_batch(output, batch_idx, max_n_nodes):
batch_size = torch.max(batch_idx).item() + 1
batch_output = []
batch_attention_mask = []
for i in range(batch_size):
feat = output[torch.where(batch_idx == i)]
if feat.shape[0] < max_n_nodes:
batch_output.append(torch.cat((
feat,
torch.zeros(max_n_nodes - feat.shape[0], feat.shape[1]).to(feat.device)
), dim=0))
batch_attention_mask.append(torch.cat((
torch.ones(feat.shape[0]).to(feat.device),
torch.zeros(max_n_nodes - feat.shape[0]).to(feat.device)
), dim=0))
else:
batch_output.append(feat[:max_n_nodes, :])
batch_attention_mask.append(torch.ones(max_n_nodes).to(feat.device))
batch_output = torch.stack(batch_output, dim=0)
batch_attention_mask = torch.stack(batch_attention_mask, dim=0)
return batch_output, batch_attention_mask
def add_argument(parser):
parser.add_argument("--mode", type=str, choices=["write_sdf", "unittest"])
def add_sdf_argument(parser):
parser.add_argument("--cid2smiles_file", type=str, default="")
parser.add_argument("--output_file", type=str, default="")
parser.add_argument("--sdf_file", type=str, default="")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
add_argument(parser)
args, _ = parser.parse_known_args()
if args.mode == "write_sdf":
add_sdf_argument(parser)
args = parser.parse_args()
write_sdf(args.cid2smiles_file, args.output_file, args.sdf_file)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.json"}
def smiles_tokenizer(smi):
pattern = "(\[[^\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\(|\)|\.|=|#|-|\+|\\\\|\/|:|~|@|\?|>|\*|\$|\%[0-9]{2}|[0-9])"
regex = re.compile(pattern)
tokens = [token for token in regex.findall(smi)]
return tokens
class SmilesTokenizer(PreTrainedTokenizer):
"""
Tokenizer in RobertaTokenizer style.
Creates the SmilesTokenizer class. The tokenizer heavily inherits from the BertTokenizer
implementation found in Huggingface's transformers library. It runs a WordPiece tokenization
algorithm over SMILES strings using the tokenisation SMILES regex developed by Schwaller et. al.
Please see https://github.com/huggingface/transformers
and https://github.com/rxn4chemistry/rxnfp for more details.
Examples
--------
>>> from deepchem.feat.smiles_tokenizer import SmilesTokenizer
>>> current_dir = os.path.dirname(os.path.realpath(__file__))
>>> vocab_path = os.path.join(current_dir, 'tests/data', 'vocab.txt')
>>> tokenizer = SmilesTokenizer(vocab_path)
>>> print(tokenizer.encode("CC(=O)OC1=CC=CC=C1C(=O)O"))
[12, 16, 16, 17, 22, 19, 18, 19, 16, 20, 22, 16, 16, 22, 16, 16, 22, 16, 20, 16, 17, 22, 19, 18, 19, 13]
References
----------
.. [1] Schwaller, Philippe; Probst, Daniel; Vaucher, Alain C.; Nair, Vishnu H; Kreutter, David;
Laino, Teodoro; et al. (2019): Mapping the Space of Chemical Reactions using Attention-Based Neural
Networks. ChemRxiv. Preprint. https://doi.org/10.26434/chemrxiv.9897365.v3
Note
----
This class requires huggingface's transformers and tokenizers libraries to be installed.
"""
vocab_files_names = VOCAB_FILES_NAMES
def __init__(
self,
vocab_file: str = '',
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
add_prefix_space=False,
**kwargs):
"""Constructs a SmilesTokenizer.
Parameters
----------
vocab_file: str
Path to a SMILES character per line vocabulary file.
Default vocab file is found in deepchem/feat/tests/data/vocab.txt
"""
bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
super().__init__(
vocab_file=vocab_file,
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token,
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
add_prefix_space=add_prefix_space,
**kwargs,
)
#super().__init__(vocab_file, **kwargs) #merges_file
# take into account special tokens in max length
# self.max_len_single_sentence = self.model_max_length - 2
# self.max_len_sentences_pair = self.model_max_length - 3
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocab file at path '{}'.".format(vocab_file))
with open(vocab_file, 'r') as vr:
self.vocab = json.load(vr)
# self.vocab = load_vocab(vocab_file)
# self.highest_unused_index = max(
# [i for i, v in enumerate(self.vocab.keys()) if v.startswith("[unused")])
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.basic_tokenizer = smiles_tokenizer
self.init_kwargs["model_max_length"] = self.model_max_length
@property
def vocab_size(self):
return len(self.vocab)
@property
def vocab_list(self):
return list(self.vocab.keys())
def _tokenize(self, text: str):
"""Tokenize a string into a list of tokens.
Parameters
----------
text: str
Input string sequence to be tokenized.
"""
split_tokens = [token for token in self.basic_tokenizer(text)]
return split_tokens
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A RoBERTa sequence has the following format:
- single sequence: ``<s> X </s>``
- pair of sequences: ``<s> A </s></s> B </s>``
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
def _convert_token_to_id(self, token: str):
"""Converts a token (str/unicode) in an id using the vocab.
Parameters
----------
token: str
String token from a larger sequence to be converted to a numerical id.
"""
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index: int):
"""Converts an index (integer) in a token (string/unicode) using the vocab.
Parameters
----------
index: int
Integer index to be converted back to a string-based token as part of a larger sequence.
"""
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens: List[str]):
"""Converts a sequence of tokens (string) in a single string.
Parameters
----------
tokens: List[str]
List of tokens for a given string sequence.
Returns
-------
out_string: str
Single string from combined tokens.
"""
out_string: str = " ".join(tokens).replace(" ##", "").strip()
return out_string
def add_special_tokens_ids_single_sequence(self, token_ids: List[int]):
"""Adds special tokens to the a sequence for sequence classification tasks.
A BERT sequence has the following format: [CLS] X [SEP]
Parameters
----------
token_ids: list[int]
list of tokenized input ids. Can be obtained using the encode or encode_plus methods.
"""
return [self.cls_token_id] + token_ids + [self.sep_token_id]
def add_special_tokens_single_sequence(self, tokens: List[str]):
"""Adds special tokens to the a sequence for sequence classification tasks.
A BERT sequence has the following format: [CLS] X [SEP]
Parameters
----------
tokens: List[str]
List of tokens for a given string sequence.
"""
return [self.cls_token] + tokens + [self.sep_token]
def add_special_tokens_ids_sequence_pair(self, token_ids_0: List[int],
token_ids_1: List[int]) -> List[int]:
"""Adds special tokens to a sequence pair for sequence classification tasks.
A BERT sequence pair has the following format: [CLS] A [SEP] B [SEP]
Parameters
----------
token_ids_0: List[int]
List of ids for the first string sequence in the sequence pair (A).
token_ids_1: List[int]
List of tokens for the second string sequence in the sequence pair (B).
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep
def add_padding_tokens(self,
token_ids: List[int],
length: int,
right: bool = True) -> List[int]:
"""Adds padding tokens to return a sequence of length max_length.
By default padding tokens are added to the right of the sequence.
Parameters
----------
token_ids: list[int]
list of tokenized input ids. Can be obtained using the encode or encode_plus methods.
length: int
TODO
right: bool, default True
TODO
Returns
-------
List[int]
TODO
"""
padding = [self.pad_token_id] * (length - len(token_ids))
if right:
return token_ids + padding
else:
return padding + token_ids
def save_vocabulary(
self, vocab_path: str
): # -> tuple[str]: doctest issue raised with this return type annotation
"""Save the tokenizer vocabulary to a file.
Parameters
----------
vocab_path: obj: str
The directory in which to save the SMILES character per line vocabulary file.
Default vocab file is found in deepchem/feat/tests/data/vocab.txt
Returns
-------
vocab_file: Tuple
Paths to the files saved.
typle with string to a SMILES character per line vocabulary file.
Default vocab file is found in deepchem/feat/tests/data/vocab.txt
"""
index = 0
if os.path.isdir(vocab_path):
vocab_file = os.path.join(vocab_path, VOCAB_FILES_NAMES["vocab_file"])
else:
vocab_file = vocab_path
with open(vocab_file, "w", encoding="utf-8") as writer:
for token, token_index in sorted(
self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
"Saving vocabulary to {}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!".format(
vocab_file))
index = token_index
writer.write(token + "\n")
index += 1
return (vocab_file,) | OpenBioMed-main | open_biomed/utils/mol_utils.py |
import torch.nn as nn
activation = {
"sigmoid": nn.Sigmoid(),
"softplus": nn.Softplus(),
"relu": nn.ReLU(),
"gelu": nn.GELU(),
"tanh": nn.Tanh(),
}
class MLP(nn.Module):
def __init__(self, config, input_dim, output_dim):
super(MLP, self).__init__()
self.model = nn.Sequential()
hidden_dims = [input_dim] + config["hidden_size"] + [output_dim]
for i in range(len(hidden_dims) - 1):
self.model.append(nn.Linear(hidden_dims[i], hidden_dims[i + 1]))
if i != len(hidden_dims) - 2:
self.model.append(nn.Dropout(config["dropout"]))
if config["activation"] != "none":
self.model.append(activation[config["activation"]])
if config["batch_norm"]:
self.model.append(nn.BatchNorm1d())
def forward(self, h):
return self.model(h).squeeze() | OpenBioMed-main | open_biomed/models/predictor.py |
from abc import ABC, abstractmethod
import torch.nn as nn
class MolEncoder(nn.Module, ABC):
def __init__(self):
super(MolEncoder, self).__init__()
@abstractmethod
def encode_mol(self, mol):
raise NotImplementedError
class ProteinEncoder(nn.Module, ABC):
def __init__(self):
super(ProteinEncoder, self).__init__()
@abstractmethod
def encode_protein(self, prot):
raise NotImplementedError
class KnowledgeEncoder(nn.Module, ABC):
def __init__(self):
super(KnowledgeEncoder, self).__init__()
@abstractmethod
def encode_knowledge(self, kg):
raise NotImplementedError
class TextEncoder(nn.Module, ABC):
def __init__(self):
super(TextEncoder, self).__init__()
@abstractmethod
def encode_text(self, text):
raise NotImplementedError | OpenBioMed-main | open_biomed/models/base_models.py |
from models.molecule import *
from models.protein import *
from models.cell import *
from models.knowledge import *
from models.text import *
from models.multimodal import *
SUPPORTED_MOL_ENCODER = {
"cnn": MolCNN,
"tgsa": GINTGSA,
"graphcl": GraphCL,
"graphmvp": GraphMVP,
"molclr": MolCLR,
"mgnn": MGNN,
"molt5": MolT5,
"bert": MolBERT,
"biomedgpt-1.6b": BioMedGPTCLIP,
"biomedgpt-10b": BioMedGPTV,
"kv-plm": KVPLM,
"momu": MoMu,
"molfm": MolFM
}
SUPPORTED_MOL_DECODER = {
"moflow": MoFlow,
"molt5": MolT5
}
SUPPORTED_PROTEIN_ENCODER = {
"cnn": ProtCNN,
"cnn_gru": CNNGRU,
"mcnn": MCNN,
"pipr": CNNPIPR,
"prottrans": ProtTrans
}
SUPPORTED_CELL_ENCODER = {
"scbert": PerformerLM,
"celllm": PerformerLM_CellLM
}
SUPPORTED_TEXT_ENCODER = {
"base_transformer": BaseTransformers,
"biomedgpt-1.6b": BioMedGPTCLIP,
"biomedgpt-10b": BioMedGPTV,
"kv-plm": KVPLM,
"kv-plm*": KVPLM,
"molfm": MolFM,
"momu": MoMu,
"text2mol": Text2MolMLP,
"molt5": MolT5
}
SUPPORTED_TEXT_DECODER = {
"molt5": MolT5,
}
SUPPORTED_KNOWLEDGE_ENCODER = {
"TransE": TransE,
"gin": GIN
} | OpenBioMed-main | open_biomed/models/__init__.py |
from typing import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.base_models import ProteinEncoder
class Conv1dReLU(nn.Module):
'''
kernel_size=3, stride=1, padding=1
kernel_size=5, stride=1, padding=2
kernel_size=7, stride=1, padding=3
'''
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0):
super().__init__()
self.inc = nn.Sequential(
nn.Conv1d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding),
nn.ReLU()
)
def forward(self, x):
return self.inc(x)
class LinearReLU(nn.Module):
def __init__(self,in_features, out_features, bias=True):
super().__init__()
self.inc = nn.Sequential(
nn.Linear(in_features=in_features, out_features=out_features, bias=bias),
nn.ReLU()
)
def forward(self, x):
return self.inc(x)
class StackCNN(nn.Module):
def __init__(self, layer_num, in_channels, out_channels, kernel_size, stride=1, padding=0):
super().__init__()
self.inc = nn.Sequential(OrderedDict([('conv_layer0', Conv1dReLU(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding))]))
for layer_idx in range(layer_num - 1):
self.inc.add_module('conv_layer%d' % (layer_idx + 1), Conv1dReLU(out_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding))
self.inc.add_module('pool_layer', nn.AdaptiveMaxPool1d(1))
def forward(self, x):
return self.inc(x).squeeze(-1)
class MCNN(ProteinEncoder):
def __init__(self, config):
super().__init__()
self.output_dim = config["output_size"]
self.embed = nn.Embedding(config["vocab_size"], config["embedding_num"], padding_idx=0)
self.block_list = nn.ModuleList()
for block_idx in range(config["block_num"]):
self.block_list.append(
StackCNN(block_idx + 1, config["embedding_num"], config["hidden_size"], config["kernel_size"])
)
self.linear = nn.Linear(config["block_num"] * config["hidden_size"], config["output_size"])
def forward(self, x):
x = self.embed(x).permute(0, 2, 1)
feats = [block(x) for block in self.block_list]
x = torch.cat(feats, -1)
x = self.linear(x)
return x
def encode_protein(self, prot):
return self.forward(prot) | OpenBioMed-main | open_biomed/models/protein/mcnn.py |
from models.protein.cnn import ProtCNN, CNNGRU, CNNPIPR
from models.protein.mcnn import MCNN
from models.protein.prottrans import ProtTrans
| OpenBioMed-main | open_biomed/models/protein/__init__.py |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from models.base_models import ProteinEncoder
class ProtCNN(ProteinEncoder):
def __init__(self, config):
super(ProtCNN, self).__init__()
self.output_dim = config["output_dim"]
layer_size = len(config["in_ch"]) - 1
self.conv = nn.ModuleList(
[nn.Conv1d(
in_channels = config["in_ch"][i],
out_channels = config["in_ch"][i + 1],
kernel_size = config["kernels"][i]
) for i in range(layer_size)]
)
self.conv = self.conv.double()
hidden_dim = self._get_conv_output((config["vocab_size"], config["max_length"]))
self.fc1 = nn.Linear(hidden_dim, config["output_dim"])
def _get_conv_output(self, shape):
bs = 1
input = Variable(torch.rand(bs, *shape))
output_feat = self._forward_features(input.double())
n_size = output_feat.data.view(bs, -1).size(1)
return n_size
def _forward_features(self, x):
for l in self.conv:
x = F.relu(l(x))
x = F.adaptive_max_pool1d(x, output_size=1)
return x
def forward(self, v):
v = self._forward_features(v.double())
v = v.view(v.size(0), -1)
v = self.fc1(v.float())
return v
def encode_protein(self, prot):
return self.forward(prot)
class CNNGRU(ProteinEncoder):
def __init__(self, config):
super(CNNGRU, self).__init__()
self.conv1d = nn.Conv1d(in_channels=config["input_dim"], out_channels=config["cnn_dim"], kernel_size=3, padding=0)
self.bn1 = nn.BatchNorm1d(config["cnn_dim"])
self.biGRU = nn.GRU(config["cnn_dim"], config["cnn_dim"], bidirectional=True, batch_first=True, num_layers=1)
self.maxpool1d = nn.MaxPool1d(config["pool_size"], stride=config["pool_size"])
self.global_avgpool1d = nn.AdaptiveAvgPool1d(1)
self.fc1 = nn.Linear(math.floor(config["input_len"] / config["pool_size"]), config["output_dim"])
def forward(self, prot):
x = prot.transpose(1, 2)
x = self.conv1d(x)
x = self.bn1(x)
x = self.maxpool1d(x)
x = x.transpose(1, 2)
x, _ = self.biGRU(x)
x = self.global_avgpool1d(x)
x = x.squeeze()
x = self.fc1(x)
return x
def encode_protein(self, prot):
return self.forward(prot)
class ResConvGRU(nn.Module):
def __init__(self, input_dim, hidden_dim, pool_size):
super(ResConvGRU, self).__init__()
self.conv1d = nn.Conv1d(in_channels=input_dim, out_channels=hidden_dim, kernel_size=3, padding=1)
self.biGRU = nn.GRU(hidden_dim, hidden_dim, bidirectional=True, batch_first=True, num_layers=1)
self.pool = nn.MaxPool1d(pool_size)
def forward(self, prot):
x = prot.transpose(1, 2)
x = self.conv1d(x)
x = self.pool(x)
x = x.transpose(1, 2)
h, _ = self.biGRU(x)
x = torch.cat([h, x], dim=2)
return x
class CNNPIPR(ProteinEncoder):
def __init__(self, config):
super(CNNPIPR, self).__init__()
self.convs = nn.Sequential(
ResConvGRU(config["input_dim"], config["hidden_dim"], 2),
ResConvGRU(3 * config["hidden_dim"], config["hidden_dim"], 2),
ResConvGRU(3 * config["hidden_dim"], config["hidden_dim"], 2),
)
self.last_conv = nn.Conv1d(in_channels=config["hidden_dim"] * 3, out_channels=config["hidden_dim"], kernel_size=3, padding=1)
self.pool = nn.AdaptiveAvgPool1d(1)
self.fc = nn.Linear(config["input_len"] // 8, config["output_dim"])
self.dropout = nn.Dropout(config["dropout"])
self.act = nn.LeakyReLU(0.3)
self.output_dim = config["output_dim"]
def forward(self, prot):
x = self.convs(prot)
x = x.transpose(1, 2)
x = self.last_conv(x)
x = x.transpose(1, 2)
x = self.pool(x).squeeze()
x = self.fc(x)
return self.act(x)
def encode_protein(self, prot):
return self.forward(prot) | OpenBioMed-main | open_biomed/models/protein/cnn.py |
import torch
import torch.nn as nn
from transformers import AutoModel
from models.base_models import ProteinEncoder
class ProtTrans(ProteinEncoder):
def __init__(self, config):
super(ProtTrans, self).__init__()
self.max_length = config["max_length"]
self.output_dim = config["output_dim"]
self.model = AutoModel.from_pretrained(config["model_name_or_path"])
self.dropout = nn.Dropout(config["dropout"])
self.fc = nn.Linear(self.model.config.hidden_size, self.output_dim)
def forward(self, prot):
batch_size, model_max_length = prot["input_ids"].shape
h = self.model(**prot).last_hidden_state
h = h[:, 0, :].view(batch_size, self.max_length // model_max_length, -1)
h = torch.mean(h, dim=1)
h = self.dropout(h)
return self.fc(h) | OpenBioMed-main | open_biomed/models/protein/prottrans.py |
import logging
logger = logging.getLogger(__name__)
import json
import torch
import torch.nn as nn
from torch_geometric.data import Batch
from transformers.modeling_outputs import BaseModelOutput
from models import SUPPORTED_MOL_ENCODER, SUPPORTED_TEXT_ENCODER, SUPPORTED_TEXT_DECODER
from models.multimodal import KVPLM, MolT5, MolFM, DrugFM
from utils.mol_utils import convert_pyg_batch
class MolQASepRepModel(nn.Module):
def __init__(self, config):
super(MolQASepRepModel, self).__init__()
self.config = config
if "config_path" in config["mol"]:
mol_encoder_config = json.load(open(config["mol"]["config_path"], "r"))
else:
mol_encoder_config = config["mol"]
#for key, value in mol_encoder_config.items():
# self.config["mol"][key] = value
self.mol_encoder = SUPPORTED_MOL_ENCODER[config["mol"]["name"]](mol_encoder_config)
if "init_checkpoint" in config["mol"]:
logger.info("Loading molecule checkpoint from %s" % (config["mol"]["init_checkpoint"]))
state_dict = torch.load(config["mol"]["init_checkpoint"], map_location="cpu")
if "param_key" in config["mol"]:
state_dict = state_dict[config["mol"]["param_key"]]
self.mol_encoder.load_state_dict(state_dict)
if config["text_encoder"]["name"] == config["mol"]["name"]:
self.text_encoder = self.mol_encoder
else:
self.text_encoder = SUPPORTED_TEXT_ENCODER[config["text_encoder"]["name"]](config["text_encoder"])
self.mol_proj = nn.Linear(self.mol_encoder.output_dim, self.text_encoder.output_dim)
if config["text_decoder"]["name"] == config["text_encoder"]["name"]:
self.text_decoder = self.text_encoder
self.encoder_decoder_proj = None
else:
self.text_decoder = SUPPORTED_TEXT_DECODER[config["text_decoder"]["name"]](config["text_decoder"])
self.encoder_decoder_proj = nn.Linear(self.text_encoder.output_dim, self.text_decoder.hidden_size)
def _concat_mol_text(self, mol_embeds, mol_attention_mask, text_embeds, text_attention_mask):
# put <cls> first
bs = mol_embeds.shape[0]
num_atoms = torch.sum(mol_attention_mask, dim=1).int()
output_embeds, output_attention_mask = [], []
for i in range(bs):
output_embeds.append(torch.cat((
text_embeds[i, 0, :].unsqueeze(0),
mol_embeds[i, :num_atoms[i], :],
text_embeds[i, 1:, :],
mol_embeds[i, num_atoms[i]:, :]
), dim=0))
output_attention_mask.append(torch.cat((
text_attention_mask[i, 0].unsqueeze(0),
mol_attention_mask[i, :num_atoms[i]],
text_attention_mask[i, 1:],
mol_attention_mask[i, num_atoms[i]:]
), dim=0))
return torch.stack(output_embeds, dim=0), torch.stack(output_attention_mask, dim=0)
def _encode(self, mol, question):
_, mol_outputs = self.mol_encoder.encode_mol(mol, return_node_feats=True)
#mol_outputs = self.mol_encoder.encode_mol(mol)
if isinstance(mol, Batch):
mol_embeds, mol_attention_mask = convert_pyg_batch(mol_outputs, mol.batch, self.config["mol"]["max_n_nodes"])
else:
mol_embeds = mol_outputs
mol_attention_mask = mol.attention_mask
mol_embeds = self.mol_proj(mol_embeds)
text_embeds = self.text_encoder.main_model.get_input_embeddings()(question.input_ids)
embeds, attention_mask = self._concat_mol_text(mol_embeds, mol_attention_mask, text_embeds, question.attention_mask)
#text_outputs = self.text_encoder.encode_text(question)
#encoder_outputs, encoder_attention_mask = self._concat_mol_text(mol_embeds, mol_attention_mask, text_outputs.last_hidden_state, question.attention_mask)
encoder_outputs = self.text_encoder.main_model.encoder(
inputs_embeds=embeds,
attention_mask=attention_mask
)
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs,
hidden_states=None,
attentions=None
)
return encoder_outputs, attention_mask
def forward(self, mol, question, answer):
encoder_outputs, encoder_attention_mask = self._encode(mol, question)
if self.encoder_decoder_proj is not None:
encoder_outputs = self.encoder_decoder_proj(encoder_outputs)
labels = answer.input_ids.masked_fill(~answer.attention_mask.bool(), -100)
return self.text_decoder(
encoder_outputs=encoder_outputs,
encoder_attention_mask=encoder_attention_mask,
decoder_attention_mask=answer.attention_mask,
labels=labels
)
def generate(self, mol, question, num_beams=5, max_length=256):
encoder_outputs, encoder_attention_mask = self._encode(mol, question)
return self.text_decoder.decode(
encoder_outputs=encoder_outputs,
encoder_attention_mask=encoder_attention_mask,
num_beams=num_beams,
max_length=max_length,
)
class MolQAJointRepModel(nn.Module):
SUPPORTED_JOINT_REP_MODEL = {
"kvplm": (KVPLM, "early"),
"kvplm*": (KVPLM, "early"),
"molt5": (MolT5, "early"),
"molfm": (MolFM, "feature"),
"drugfm": (DrugFM, "feature")
}
def __init__(self, config):
super(MolQAJointRepModel, self).__init__()
self.config = config
encoder_cls, fusion_type = self.SUPPORTED_JOINT_REP_MODEL[config["encoder"]["name"]]
if "config_path" in config["encoder"]:
encoder_config = json.load(open(config["encoder"]["config_path"], "r"))
else:
encoder_config = config["encoder"]
self.encoder = encoder_cls(encoder_config)
if "init_checkpoint" in config["encoder"]:
state_dict = torch.load(config["encoder"]["init_checkpoint"], map_location="cpu")
if "param_key" in config["encoder"]:
state_dict = state_dict[config["encoder"]["param_key"]]
self.encoder.load_state_dict(state_dict)
self.fusion_type = fusion_type
if config["encoder"]["name"] != config["text_decoder"]["name"]:
self.decoder = SUPPORTED_TEXT_DECODER[config["text_decoder"]["name"]](config["text_decoder"])
self.encoder_decoder_proj = nn.Linear(self.encoder.output_dim, self.decoder.hidden_size)
else:
self.decoder = self.encoder
self.encoder_decoder_proj = None
def _concat_smi_text(self, mol, text):
bs = mol.input_ids.shape[0]
num_atoms = torch.sum(mol.attention_mask, dim=1)
output_input_ids, output_attention_mask = [], []
# ignore <eos> in molecule and <cls> in text
for i in range(bs):
output_input_ids.append(torch.cat((
mol.input_ids[i, :num_atoms[i] - 1],
text.input_ids[i, 1:],
mol.input_ids[i, num_atoms[i]:]
), dim=0))
output_attention_mask.append(torch.cat((
mol.attention_mask[i, :num_atoms[i] - 1],
text.attention_mask[i, 1:],
mol.attention_mask[i, num_atoms[i]:]
), dim=0))
return torch.stack(output_input_ids, dim=0), torch.stack(output_attention_mask, dim=0)
def _encode(self, mol, question):
if self.fusion_type == "early":
input_ids, input_attention_mask = self._concat_smi_text(mol, question)
encoder_outputs = self.encoder.encode({
"input_ids": input_ids,
"attention_mask": input_attention_mask
})
encoder_attention_mask = input_attention_mask
elif self.fusion_type == "feature":
encoder_outputs = self.encoder(mol, question).last_hidden_state
encoder_attention_mask = question.attention_mask
if self.encoder_decoder_proj is not None:
encoder_outputs = self.encoder_decoder_proj(encoder_outputs)
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs,
hidden_states=None,
attentions=None
)
return encoder_outputs, encoder_attention_mask
def forward(self, mol, question, answer):
encoder_outputs, encoder_attention_mask = self._encode(mol, question)
labels = answer.input_ids.masked_fill(~answer.attention_mask.bool(), -100)
return self.decoder(
encoder_outputs=encoder_outputs,
encoder_attention_mask=encoder_attention_mask,
decoder_attention_mask=answer.attention_mask,
labels=labels
)
def generate(self, mol, question, num_beams=5, max_length=256):
encoder_outputs, encoder_attention_mask = self._encode(mol, question)
return self.decoder.decode(
encoder_outputs=encoder_outputs,
encoder_attention_mask=encoder_attention_mask,
num_beams=num_beams,
max_length=max_length
)
SUPPORTED_MOLQA_MODELS = {
"sep_rep": MolQASepRepModel,
"joint_rep": MolQAJointRepModel,
} | OpenBioMed-main | open_biomed/models/task_model/molqa_model.py |
import json
import torch
import torch.nn as nn
from models import SUPPORTED_MOL_ENCODER, SUPPORTED_TEXT_ENCODER
class MTRModel(nn.Module):
def __init__(self, config):
super(MTRModel, self).__init__()
mol_config = json.load(open(config["structure"]["config_path"], "r"))
text_config = json.load(open(config["text"]["config_path"], "r"))
self.mol_encoder = SUPPORTED_MOL_ENCODER[config["structure"]["name"]](mol_config)
self.text_encoder = SUPPORTED_TEXT_ENCODER[config["text"]["name"]](text_config)
self.mol_proj = nn.Linear(self.mol_encoder.output_dim, config["projection_dim"])
self.text_proj = nn.Linear(self.text_encoder.output_dim, config["projection_dim"])
def encode_mol(self, mol):
h = self.mol_encoder.encode_mol(mol)
return self.mol_proj(h)
def encode_text(self, text):
h = self.text_encoder.encode_text(text)
return self.text_proj(h) | OpenBioMed-main | open_biomed/models/task_model/mtr_model.py |
import torch
import torch.nn as nn
from models import SUPPORTED_CELL_ENCODER
class CTCModel(nn.Module):
def __init__(self, config, num_labels):
super(CTCModel, self).__init__()
self.encoder_name = config["structure"]["name"]
self.encoder = SUPPORTED_CELL_ENCODER[config["structure"]["name"]](**config["structure"])
ckpt = torch.load(config["structure"]["ckpt_path"])
if config["structure"]["param_key"] != "":
ckpt = ckpt[config["structure"]["param_key"]]
self.encoder.load_state_dict(ckpt)
self.conv = nn.Conv2d(1, 1, (1, config["structure"]["dim"]))
self.act = nn.ReLU()
self.pred_head = nn.Sequential(
nn.ReLU(),
nn.Linear(config["structure"]["gene_num"] + 1, config["pred_head"]["hidden_size"][0])
)
config["pred_head"]["hidden_size"] += [num_labels]
for i in range(len(config["pred_head"]["hidden_size"]) - 1):
self.pred_head.append(nn.ReLU())
self.pred_head.append(nn.Dropout(config["pred_head"]["dropout"]))
self.pred_head.append(nn.Linear(config["pred_head"]["hidden_size"][i], config["pred_head"]["hidden_size"][i + 1]))
def forward(self, data):
batch_size = data.shape[0]
if self.encoder_name == "celllm":
h, gene_pos = self.encoder(data, return_encodings=True)
else:
h = self.encoder(data, return_encodings=True)
h = h[:,None,:,:]
h = self.conv(h)
h = self.act(h)
h = h.view(h.shape[0], -1)
if self.encoder_name == "celllm":
pad_gene_id = self.encoder.pad_gene_id
gene_num = pad_gene_id - 1
out_emb = torch.zeros(batch_size, gene_num + 1).to(h.device) # , dim
for batch in range(batch_size):
seq_len = (gene_pos[batch] != pad_gene_id).sum()
out_emb[batch][gene_pos[batch][:seq_len]] = h[batch][:seq_len]
h = out_emb
return self.pred_head(h) | OpenBioMed-main | open_biomed/models/task_model/ctc_model.py |
import torch
import torch.nn as nn
from transformers.modeling_outputs import BaseModelOutput
from models.multimodal.molt5 import MolT5
from models import SUPPORTED_MOL_ENCODER
from utils.mol_utils import convert_pyg_batch
class MolCapModel(nn.Module):
def __init__(self, config):
super(MolCapModel, self).__init__()
self.generate_model = MolT5(config["text"])
if "structure" in config:
self.encode_model = SUPPORTED_MOL_ENCODER[config["structure"]["name"]](**config["structure"])
self.max_n_nodes = config["structure"]["max_n_nodes"]
else:
self.encode_model = None
def forward(self, mol):
labels = mol["text"]["input_ids"].masked_fill(~mol["text"]["attention_mask"].bool(), -100)
h, encoder_attention_mask = self.encode(mol)
return self.generate_model(
encoder_outputs=h,
encoder_attention_mask=encoder_attention_mask,
decoder_attention_mask=mol["text"]["attention_mask"],
labels=labels
)
def encode(self, mol):
if self.encode_model is None:
h = self.generate_model.encode(mol["structure"])
encoder_attention_mask = mol["structure"]["attention_mask"]
else:
_, node_feats = self.encode_model(mol["structure"])
h, encoder_attention_mask = convert_pyg_batch(node_feats, mol["structure"].batch, max_n_nodes=self.max_n_nodes)
h = BaseModelOutput(
last_hidden_state=h,
hidden_states=None,
attentions=None
)
return h, encoder_attention_mask
def decode(self, mol, num_beams, max_length):
h, encoder_attention_mask = self.encode(mol)
return self.generate_model.decode(
encoder_outputs=h,
encoder_attention_mask=encoder_attention_mask,
num_beams=num_beams,
max_length=max_length
)
class GraphEnhancedMolCapModel(nn.Module):
def __init__(self, config):
super(GraphEnhancedMolCapModel, self).__init__()
self.generate_model = MolT5(config["text"])
self.graph_encoder = SUPPORTED_MOL_ENCODER[config["graph"]["name"]](config["graph"])
if "init_checkpoint" in config["graph"]:
ckpt = torch.load(config["graph"]["init_checkpoint"])
if "param_key" in config["graph"]:
ckpt = ckpt[config["graph"]["param_key"]]
self.graph_encoder.load_state_dict(ckpt)
if config["graph"]["stop_grad"]:
for k, v in self.graph_encoder.named_parameters():
v.requires_grad = False
self.graph_projector = nn.Sequential(
nn.Linear(config["graph"]["output_dim"], self.generate_model.hidden_size),
nn.ReLU(inplace=True),
nn.Linear(self.generate_model.hidden_size, self.generate_model.hidden_size)
)
self.max_n_nodes = config["graph"]["max_n_nodes"]
self.use_node_embeds = config["graph"]["max_n_nodes"] > 0
def forward(self, mol):
h, encoder_attention_mask = self.encode(mol)
labels = mol["text"]["input_ids"].masked_fill(~mol["text"]["attention_mask"].bool(), -100)
return self.generate_model(
encoder_outputs=h,
encoder_attention_mask=encoder_attention_mask,
decoder_attention_mask=mol["text"]["attention_mask"],
labels=labels
)
def decode(self, mol, num_beams, max_length):
h, encoder_attention_mask = self.encode(mol)
return self.generate_model.decode(
encoder_outputs=h,
encoder_attention_mask=encoder_attention_mask,
num_beams=num_beams,
max_length=max_length
)
def encode(self, mol):
B, _ = mol["structure"]["SMILES"]["attention_mask"].shape
device = mol["structure"]["SMILES"]["attention_mask"].device
smi_feats = self.generate_model.encode(mol["structure"]["SMILES"])
if self.use_node_embeds:
graph_feats, node_feats = self.graph_encoder.encode_mol(mol["structure"]["graph"], proj=False, return_node_feats=True)
graph_feats = self.graph_projector(graph_feats)
node_feats, node_attention_mask = convert_pyg_batch(node_feats, mol["structure"]["graph"].batch, self.max_n_nodes)
node_feats = self.graph_projector(node_feats)
h = BaseModelOutput(
#last_hidden_state=torch.cat([graph_feats.unsqueeze(1), node_feats, smi_feats], dim=1),
last_hidden_state=torch.cat([graph_feats.unsqueeze(1), node_feats], dim=1),
hidden_states=None,
attentions=None
)
#encoder_attention_mask = torch.cat([torch.ones(B, 1).to(device), node_attention_mask, mol["structure"]["SMILES"]["attention_mask"]], dim=1)
encoder_attention_mask = torch.cat([torch.ones(B, 1).to(device), node_attention_mask], dim=1)
else:
graph_feats = self.graph_encoder.encode_structure(mol["structure"]["graph"], proj=False)
graph_feats = self.graph_projector(graph_feats)
h = BaseModelOutput(
last_hidden_state=torch.cat([graph_feats.unsqueeze(1), smi_feats], dim=1),
hidden_states=None,
attentions=None
)
encoder_attention_mask = torch.cat([torch.ones(B, 1).to(device), mol["structure"]["SMILES"]["attention_mask"]], dim=1)
return h, encoder_attention_mask | OpenBioMed-main | open_biomed/models/task_model/molcap_model.py |
import torch
import torch.nn as nn
from models.molecule.gin_tgsa import GINTGSA
from models.cell import CellGAT
from models.cell import SUPPORTED_CELL_ENCODER
class ConvPooler(nn.Module):
def __init__(self, dim, full_seq_len):
super().__init__()
self.full_seq_len = full_seq_len
self.pad_gene_id = full_seq_len
self.conv = nn.Conv2d(1, 1, (1, dim))
def forward(self, h, gene_pos):
batchsize, seqlen, dim = h.shape
h = h[:,None,:,:]
h = self.conv(h)
h = h.view(h.shape[0], -1)
out_emb = torch.zeros(batchsize, self.full_seq_len).to(h.device)#, dim
for batch in range(batchsize):
seq_len = (gene_pos[batch] != self.pad_gene_id).sum()
out_emb[batch][gene_pos[batch][:seq_len]] = h[batch][:seq_len]
h = out_emb
return h
class ConvPoolerShort(nn.Module):
def __init__(self, dim):
super().__init__()
self.conv = nn.Conv2d(1, 1, (1, dim))
def forward(self, h):
h = h[:,None,:,:]
h = self.conv(h)
h = h.view(h.shape[0], -1)
return h
class TGDRP(nn.Module):
def __init__(self, config):
super().__init__()
self.layer_drug = config["layer_drug"]
self.dim_drug = config["dim_drug"]
self.input_dim_cell = config["input_dim_cell"]
self.layer_cell = config["layer_cell"]
self.dim_cell = config["dim_cell"]
self.dropout = config["dropout"]
self.cell_encoder_config = config["cell_encoder"]
def _build(self):
# drug graph branch
self.GNN_drug = GINTGSA(self.layer_drug, self.dim_drug)
self.drug_emb = nn.Sequential(
nn.Linear(self.dim_drug * self.layer_drug, 256),
nn.ReLU(),
nn.Dropout(p=self.dropout),
)
# cell graph branch
if self.cell_encoder_config["name"] == 'gat':
self.cell_encoder = CellGAT(self.input_dim_cell, self.layer_cell, self.dim_cell, self.cluster_predefine)
cell_encode_dim = self.dim_cell * self.cell_encoder.final_node
elif self.cell_encoder_config["name"] == 'deepcdr':
self.cell_encoder = SUPPORTED_CELL_ENCODER[self.cell_encoder_config["name"]](**self.cell_encoder_config)
cell_encode_dim = self.cell_encoder_config['output_dim']
else:
self.cell_encoder = SUPPORTED_CELL_ENCODER[self.cell_encoder_config["name"]](**self.cell_encoder_config)
if "ckpt_path" in self.cell_encoder_config:
ckpt = torch.load(self.cell_encoder_config["ckpt_path"])
if self.cell_encoder_config["param_key"] != "":
ckpt = ckpt[self.cell_encoder_config["param_key"]]
self.cell_encoder.load_state_dict(ckpt)
if self.cell_encoder_config["name"] in ["scbert", "celllm"]:
if self.cell_encoder_config["name"] == "celllm":
self.cell_encoder.to_out = ConvPooler(self.cell_encoder_config["dim"], self.cell_encoder_config["gene_num"] + 1)
cell_encode_dim = self.cell_encoder_config["gene_num"] + 1
else:
self.cell_encoder.to_out = ConvPoolerShort(self.cell_encoder_config["dim"])
cell_encode_dim = self.cell_encoder_config["max_seq_len"]
else:
cell_encode_dim = self.dim_cell * self.cell_encoder.final_node
self.cell_emb = nn.Sequential(
nn.Linear(cell_encode_dim, 1024),
nn.ReLU(),
nn.Dropout(p=self.dropout),
nn.Linear(1024, 256),
nn.ReLU(),
nn.Dropout(p=self.dropout),
)
self.regression = nn.Sequential(
nn.Linear(512, 512),
nn.ELU(),
nn.Dropout(p=self.dropout),
nn.Linear(512, 512),
nn.ELU(),
nn.Dropout(p=self.dropout),
nn.Linear(512, 1)
)
def forward(self, drug, cell):
# forward drug
x_drug = self.GNN_drug(drug)
x_drug = self.drug_emb(x_drug)
# forward cell
x_cell = self.cell_encoder(cell)
x_cell = self.cell_emb(x_cell)
# combine drug feature and cell line feature
x = torch.cat([x_drug, x_cell], -1)
x = self.regression(x)
return x
| OpenBioMed-main | open_biomed/models/task_model/drp_model.py |
import json
import torch
import torch.nn as nn
from transformers.modeling_outputs import BaseModelOutput
from models import SUPPORTED_TEXT_ENCODER
from models.multimodal.molt5 import MolT5
class Text2SMILESModel(nn.Module):
def __init__(self, config):
super(Text2SMILESModel, self).__init__()
self.generate_model = MolT5(config["smiles"])
if "text" in config:
if "config_path" in config["text"]:
text_config = json.load(open(config["text"]["config_path"]))
text_config["name"] = config["text"]["name"]
text_config["use_num_layers"] = config["text"]["use_num_layers"]
else:
text_config = config["text"]
self.text_encoder = SUPPORTED_TEXT_ENCODER[config["text"]["name"]](text_config)
if "init_checkpoint" in config["text"]:
ckpt = torch.load(config["text"]["init_checkpoint"])
if "param_key" in config["text"]:
ckpt = ckpt[config["text"]["param_key"]]
self.text_encoder.load_state_dict(ckpt)
self.text_projector = nn.Sequential(
nn.Linear(config["text"]["output_dim"], self.generate_model.hidden_size),
nn.ReLU(inplace=True),
nn.Linear(self.generate_model.hidden_size, self.generate_model.hidden_size)
)
else:
self.text_encoder = None
def forward(self, mol):
h, encoder_attention_mask = self._encode_text(mol)
labels = mol["structure"]["input_ids"].masked_fill(~mol["structure"]["attention_mask"].bool(), -100)
return self.generate_model(
encoder_outputs=h,
encoder_attention_mask=encoder_attention_mask,
decoder_attention_mask=mol["structure"]["attention_mask"],
labels=labels
)
def decode(self, mol, num_beams, max_length):
h, encoder_attention_mask = self._encode_text(mol)
return self.generate_model.decode(
encoder_outputs=h,
encoder_attention_mask=encoder_attention_mask,
num_beams=num_beams,
max_length=max_length
)
def _encode_text(self, mol):
if self.text_encoder is not None:
text_feats = self.text_encoder.encode_text(mol["text"], return_cls=False, proj=False)
text_feats = self.text_projector(text_feats)
else:
text_feats = self.generate_model.encode(mol["text"])
h = BaseModelOutput(
last_hidden_state=text_feats,
hidden_states=None,
attentions=None
)
text_attention_mask = mol["text"]["attention_mask"]
return h, text_attention_mask | OpenBioMed-main | open_biomed/models/task_model/text2smi_model.py |
import logging
logger = logging.getLogger(__name__)
import torch
import torch.nn as nn
import json
from transformers import AutoModel
from models import SUPPORTED_MOL_ENCODER, SUPPORTED_PROTEIN_ENCODER
from models.predictor import MLP
class DTIModel(nn.Module):
def __init__(self, config, pred_dim):
super(DTIModel, self).__init__()
drug_encoder_config = json.load(open(config["mol"]["config_path"], "r"))
self.drug_encoder = SUPPORTED_MOL_ENCODER[config["mol"]["name"]](drug_encoder_config)
if "ckpt" in drug_encoder_config:
state_dict = torch.load(open(drug_encoder_config["ckpt"], "rb"), map_location="cpu")
if "param_key" in drug_encoder_config:
state_dict = state_dict[drug_encoder_config["param_key"]]
self.drug_encoder.load_state_dict(state_dict)
logger.info("load drug encoder from %s" % (drug_encoder_config["ckpt"]))
protein_encoder_config = json.load(open(config["protein"]["config_path"], "r"))
self.protein_encoder = SUPPORTED_PROTEIN_ENCODER[config["protein"]["name"]](protein_encoder_config)
if "ckpt" in protein_encoder_config:
state_dict = torch.load(open(protein_encoder_config["ckpt"], "rb"), map_location="cpu")
if "param_key" in protein_encoder_config:
state_dict = state_dict[protein_encoder_config["param_key"]]
self.protein_encoder.load_state_dict(state_dict)
logger.info("load protein encoder from %s" % (protein_encoder_config["ckpt"]))
self.pred_head = MLP(config["pred_head"], self.drug_encoder.output_dim + self.protein_encoder.output_dim, pred_dim)
def forward(self, drug, protein):
h_drug = self.drug_encoder.encode_mol(drug)
h_protein = self.protein_encoder.encode_protein(protein)
h = torch.cat((h_drug, h_protein), dim=1)
return self.pred_head(h)
class DeepEIK4DTI(nn.Module):
def __init__(self, config, pred_dim):
super(DeepEIK4DTI, self).__init__()
self.use_attention = config["use_attention"]
self.projection_dim = config["projection_dim"]
drug_encoder_config = json.load(open(config["mol"]["structure"]["config_path"], "r"))
self.drug_structure_encoder = SUPPORTED_MOL_ENCODER[config["mol"]["structure"]["name"]](drug_encoder_config)
protein_encoder_config = json.load(open(config["protein"]["structure"]["config_path"], "r"))
self.protein_structure_encoder = SUPPORTED_PROTEIN_ENCODER[config["protein"]["structure"]["name"]](protein_encoder_config)
self.structure_hidden_dim = self.drug_structure_encoder.output_dim + self.protein_structure_encoder.output_dim
self.kg_project = nn.Sequential(
nn.Linear(config["mol"]["kg"]["embedding_dim"] + config["protein"]["kg"]["embedding_dim"], self.projection_dim),
nn.Dropout(config["projection_dropout"])
)
if "text" in config:
self.text_encoder = AutoModel.from_pretrained(config["text"]["model_name_or_path"])
else:
self.text_encoder = None
self.text_project = nn.Sequential(
nn.Linear(config["text_dim"], self.projection_dim),
nn.Dropout(config["projection_dropout"])
)
if self.use_attention:
self.attn = nn.MultiheadAttention(self.structure_hidden_dim + self.projection_dim, num_heads=config["num_attention_heads"], kdim=self.text_encoder.hidden_dim, vdim=self.text_encoder.hidden_dim)
self.pred_head = MLP(config["pred_head"], self.structure_hidden_dim + 2 * self.projection_dim, pred_dim)
def forward(self, drug, protein):
h_drug_structure = self.drug_structure_encoder.encode_mol(drug["structure"])
h_protein_structure = self.protein_structure_encoder.encode_protein(protein["structure"])
h_structure = torch.cat((h_drug_structure, h_protein_structure), dim=1)
h_kg = self.kg_project(torch.cat((drug["kg"], protein["kg"]), dim=1))
if self.text_encoder is not None:
h_text = self.text_encoder(**drug["text"]).last_hidden_state[:, 0, :]
else:
h_text = drug["text"]
if self.use_attention:
_, attn = self.attn(torch.cat(h_structure, h_kg).unsqueeze(1), h_text, h_text)
h_text = torch.matmul(attn * drug["text"].unsqueeze(1), h_text)
h_text = self.text_project(h_text)
h = torch.cat((h_structure, h_kg, h_text), dim=1)
return self.pred_head(h)
| OpenBioMed-main | open_biomed/models/task_model/dti_model.py |
import torch
import torch.nn as nn
import json
from transformers import AutoModel
from models import SUPPORTED_MOL_ENCODER
from models.multimodal.molfm.molfm import MolFM
activation = {
"sigmoid": nn.Sigmoid(),
"softplus": nn.Softplus(),
"relu": nn.ReLU(),
"gelu": nn.GELU(),
"tanh": nn.Tanh(),
}
class MLP(nn.Module):
def __init__(self, config, input_dim, output_dim):
super(MLP, self).__init__()
self.model = nn.Sequential()
hidden_dims = [input_dim] + config["hidden_size"] + [output_dim]
for i in range(len(hidden_dims) - 1):
self.model.append(nn.Linear(hidden_dims[i], hidden_dims[i + 1]))
if i != len(hidden_dims) - 2:
self.model.append(nn.Dropout(config["dropout"]))
if config["activation"] != "none":
self.model.append(activation[config["activation"]])
if config["batch_norm"]:
self.model.append(nn.BatchNorm1d())
def forward(self, h):
return self.model(h)
# TODO: choose header for different encoder
HEAD4ENCODER = {
"deepeik": MLP,
"momu": nn.Linear,
"molfm": nn.Linear,
"molclr": nn.Linear,
"graphmvp": nn.Linear,
"biomedgpt-1.6b": nn.Linear,
"kvplm": MLP
}
class DPModel(nn.Module):
def __init__(self, config, out_dim):
super(DPModel, self).__init__()
# prepare model
if config["model"] == "DeepEIK":
self.encoder = SUPPORTED_MOL_ENCODER[config["model"]](config["network"])
else:
self.encoder = SUPPORTED_MOL_ENCODER[config["model"]](config["network"]["structure"])
encoder_ckpt = config["network"]["structure"]["init_checkpoint"]
if encoder_ckpt != "":
ckpt = torch.load(encoder_ckpt, map_location="cpu")
param_key = config["network"]["structure"]["param_key"]
if param_key != "":
ckpt = ckpt[param_key]
missing_keys, unexpected_keys = self.encoder.load_state_dict(ckpt, strict=False)
print("missing_keys: ", missing_keys)
print("unexpected_keys: ", unexpected_keys)
self.proj_head = HEAD4ENCODER[config["network"]["structure"]["name"]](self.encoder.output_dim, out_dim)
def forward(self, drug):
if not isinstance(self.encoder, MolFM):
h = self.encoder.encode_mol(drug, proj=False, return_node_feats=False) # encoder_struct
else:
h = self.encoder.encode_structure_with_kg(drug["structure"], drug["kg"])
return self.proj_head(h)
class DeepEIK4DP(nn.Module):
def __init__(self, config, out_dim):
super(DeepEIK4DP, self).__init__()
self.use_attention = config["use_attention"]
self.projection_dim = config["projection_dim"]
drug_encoder_config = json.load(open(config["mol"]["structure"]["config_path"], "r"))
self.drug_structure_encoder = SUPPORTED_MOL_ENCODER[config["mol"]["structure"]["name"]](drug_encoder_config)
self.structure_hidden_dim = self.drug_structure_encoder.output_dim
self.kg_project = nn.Sequential(
nn.Linear(config["mol"]["kg"]["embedding_dim"], self.projection_dim),
nn.Dropout(config["projection_dropout"])
)
# TODO: need to update based on different text_tokenizer
if "text" in config["mol"]:
self.text_encoder = AutoModel.from_pretrained(config["mol"]["text"]["model_name_or_path"])
else:
self.text_encoder = None
# get the embeding of a sentence
self.text_project = nn.Sequential(
nn.Linear(config["text_dim"], self.projection_dim),
nn.Dropout(config["projection_dropout"])
)
if self.use_attention:
self.attn = nn.MultiheadAttention(self.structure_hidden_dim + self.projection_dim,
num_heads=config["num_attentin_heads"],
kdim=self.text_encoder.hidden_dim,
vdim=self.text_encoder.hidden_dim
)
# structure + kg + text
self.pred_head = MLP(config["pred_head"], self.structure_hidden_dim + 2 * self.projection_dim, out_dim)
def forward(self, drug):
self.h_drug_structure = self.drug_structure_encoder(drug["structure"])
h_kg = drug["kg"]
if self.text_encoder is not None:
h_text = self.text_encoder(**drug["text"]).last_hidden_state[:, 0, :]
else:
h_text = drug["text"]
# TODO:
if self.use_attention:
_, attn = self.attn(torch.cat(self.h_drug_structure, h_kg).unsqueeze(1), h_text, h_text)
h_text = torch.matmul(attn * drug["text"].unsqueeze(1), h_text)
h_text = self.text_project(h_text)
h = torch.cat((self.h_drug_structure, h_kg, h_text), dim=1)
return self.pred_head(h)
| OpenBioMed-main | open_biomed/models/task_model/dp_model.py |
import json
import torch
import torch.nn as nn
from models import SUPPORTED_PROTEIN_ENCODER, SUPPORTED_KNOWLEDGE_ENCODER
from models.predictor import MLP
class PPISeqModel(nn.Module):
def __init__(self, config, num_classes):
super(PPISeqModel, self).__init__()
protein_encoder_config = json.load(open(config["encoder"]["config_path"], "r"))
self.protein_encoder = SUPPORTED_PROTEIN_ENCODER[config["encoder"]["name"]](protein_encoder_config)
self.feature_fusion = config["feature_fusion"]
if self.feature_fusion == 'concat':
in_dim = self.protein_encoder.output_dim * 2
else:
in_dim = self.protein_encoder.output_dim
self.pred_head = MLP(config["pred_head"], in_dim, num_classes)
def forward(self, prot1, prot2):
x1 = self.protein_encoder(prot1)
x2 = self.protein_encoder(prot2)
#print(x1, x2)
if self.feature_fusion == 'concat':
x = torch.cat([x1, x2], dim=1)
else:
x = torch.mul(x1, x2)
return self.pred_head(x)
class PPIGraphModel(nn.Module):
def __init__(self, config, num_classes):
super(PPIGraphModel, self).__init__()
self.graph_encoder = SUPPORTED_KNOWLEDGE_ENCODER[config["name"]](config)
self.feature_fusion = config["feature_fusion"]
if self.feature_fusion == 'concat':
in_dim = self.graph_encoder.output_dim * 2
else:
in_dim = self.graph_encoder.output_dim
self.fc = nn.Linear(in_dim, num_classes)
def forward(self, prot1, prot2, graph):
x = self.graph_encoder(graph)
x1, x2 = x[prot1], x[prot2]
if self.feature_fusion == 'concat':
x = torch.cat([x1, x2], dim=1)
else:
x = torch.mul(x1, x2)
x = self.fc(x)
return x
class DeepEIK4PPI(nn.Module):
def __init__(self, config, num_classes):
super(DeepEIK4PPI, self).__init__()
def forward(self, prot1, prot2):
pass | OpenBioMed-main | open_biomed/models/task_model/ppi_model.py |
import torch
import torch.nn as nn
class DeepCDR(torch.nn.Module):
def __init__(self, input_dim, output_dim=100, **kwargs):
super().__init__()
self.linear1 = nn.Linear(input_dim, 256)
self.dropout = nn.Dropout(0.1)
self.norm = nn.BatchNorm1d(256)
self.linear2 = nn.Linear(256, output_dim)
def forward(self, gexpr_input):
# print(gexpr_input)
gexpr_input = gexpr_input.float()
x_gexpr = self.linear1(gexpr_input)
x_gexpr = torch.tanh(x_gexpr)
x_gexpr = self.norm(x_gexpr)
x_gexpr = self.dropout(x_gexpr)
x_gexpr = self.linear2(x_gexpr)
return x_gexpr | OpenBioMed-main | open_biomed/models/cell/deepcdr.py |
import math
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torch.cuda.amp import autocast
from torch.autograd.function import Function
from torch.utils.checkpoint import get_device_states, set_device_states
from einops import rearrange, repeat
from operator import itemgetter
from functools import partial
from contextlib import contextmanager
from local_attention import LocalAttention
try:
from apex import amp
APEX_AVAILABLE = True
except:
APEX_AVAILABLE = False
# for routing arguments into the functions of the reversible layer
def route_args(router, args, depth):
routed_args = [(dict(), dict()) for _ in range(depth)]
matched_keys = [key for key in args.keys() if key in router]
for key in matched_keys:
val = args[key]
for depth, ((f_args, g_args), routes) in enumerate(zip(routed_args, router[key])):
new_f_args, new_g_args = map(lambda route: ({key: val} if route else {}), routes)
routed_args[depth] = ({**f_args, **new_f_args}, {**g_args, **new_g_args})
return routed_args
# following example for saving and setting rng here https://pytorch.org/docs/stable/_modules/torch/utils/checkpoint.html
class Deterministic(nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
self.cpu_state = None
self.cuda_in_fwd = None
self.gpu_devices = None
self.gpu_states = None
def record_rng(self, *args):
self.cpu_state = torch.get_rng_state()
if torch.cuda._initialized:
self.cuda_in_fwd = True
self.gpu_devices, self.gpu_states = get_device_states(*args)
def forward(self, *args, record_rng = False, set_rng = False, **kwargs):
if record_rng:
self.record_rng(*args)
if not set_rng:
return self.net(*args, **kwargs)
rng_devices = []
if self.cuda_in_fwd:
rng_devices = self.gpu_devices
with torch.random.fork_rng(devices=rng_devices, enabled=True):
torch.set_rng_state(self.cpu_state)
if self.cuda_in_fwd:
set_device_states(self.gpu_devices, self.gpu_states)
return self.net(*args, **kwargs)
# heavily inspired by https://github.com/RobinBruegger/RevTorch/blob/master/revtorch/revtorch.py
# once multi-GPU is confirmed working, refactor and send PR back to source
class ReversibleBlock(nn.Module):
def __init__(self, f, g):
super().__init__()
self.f = Deterministic(f)
self.g = Deterministic(g)
def forward(self, x, f_args = {}, g_args = {}):
x1, x2 = torch.chunk(x, 2, dim=2)
y1, y2 = None, None
with torch.no_grad():
y1 = x1 + self.f(x2, record_rng=self.training, **f_args)
y2 = x2 + self.g(y1, record_rng=self.training, **g_args)
return torch.cat([y1, y2], dim=2)
def backward_pass(self, y, dy, f_args = {}, g_args = {}):
y1, y2 = torch.chunk(y, 2, dim=2)
del y
dy1, dy2 = torch.chunk(dy, 2, dim=2)
del dy
with torch.enable_grad():
y1.requires_grad = True
gy1 = self.g(y1, set_rng=True, **g_args)
torch.autograd.backward(gy1, dy2)
with torch.no_grad():
x2 = y2 - gy1
del y2, gy1
dx1 = dy1 + y1.grad
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
fx2 = self.f(x2, set_rng=True, **f_args)
torch.autograd.backward(fx2, dx1, retain_graph=True)
with torch.no_grad():
x1 = y1 - fx2
del y1, fx2
dx2 = dy2 + x2.grad
del dy2
x2.grad = None
x = torch.cat([x1, x2.detach()], dim=2)
dx = torch.cat([dx1, dx2], dim=2)
return x, dx
class _ReversibleFunction(Function):
@staticmethod
def forward(ctx, x, blocks, args):
ctx.args = args
for block, kwarg in zip(blocks, args):
x = block(x, **kwarg)
ctx.y = x.detach()
ctx.blocks = blocks
return x
@staticmethod
def backward(ctx, dy):
y = ctx.y
args = ctx.args
for block, kwargs in zip(ctx.blocks[::-1], args[::-1]):
y, dy = block.backward_pass(y, dy, **kwargs)
return dy, None, None
class SequentialSequence(nn.Module):
def __init__(self, layers, args_route = {}):
super().__init__()
assert all(len(route) == len(layers) for route in args_route.values()), 'each argument route map must have the same depth as the number of sequential layers'
self.layers = layers
self.args_route = args_route
def forward(self, x, output_attentions = False, **kwargs):
args = route_args(self.args_route, kwargs, len(self.layers))
layers_and_args = list(zip(self.layers, args))
if output_attentions:
attn_weights = []
for (f, g), (f_args, g_args) in layers_and_args:
if output_attentions:
x = x + f(x, output_attentions = output_attentions, **f_args)[0]
attn_weights.append(f(x, output_attentions = output_attentions, **f_args)[1].unsqueeze(0))
else:
x = x + f(x, **f_args)
x = x + g(x, **g_args)
if output_attentions:
attn_weights = torch.transpose(torch.cat(attn_weights, dim=0), 0, 1) # the final dim is (batch, layer, head, len, len)
attn_weights = torch.mean(attn_weights, dim=1) # the dim is (batch, head, len, len)
return x, attn_weights
else:
return x
class ReversibleSequence(nn.Module):
def __init__(self, blocks, args_route = {}):
super().__init__()
self.args_route = args_route
self.blocks = nn.ModuleList([ReversibleBlock(f=f, g=g) for f, g in blocks])
def forward(self, x, **kwargs):
x = torch.cat([x, x], dim=-1)
blocks = self.blocks
args = route_args(self.args_route, kwargs, len(blocks))
args = list(map(lambda x: {'f_args': x[0], 'g_args': x[1]}, args))
out = _ReversibleFunction.apply(x, blocks, args)
return torch.stack(out.chunk(2, dim=-1)).sum(dim=0)
# helpers
def exists(val):
return val is not None
def empty(tensor):
return tensor.numel() == 0
def default(val, d):
return val if exists(val) else d
@contextmanager
def null_context():
yield
def cast_tuple(val):
return (val,) if not isinstance(val, tuple) else val
# def get_module_device(module):
# return next(module.parameters).device
def get_module_device(module):
try:
return next(module.parameters()).device
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module):
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = module._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].device
def find_modules(nn_module, type):
return [module for module in nn_module.modules() if isinstance(module, type)]
class Always(nn.Module):
def __init__(self, val):
super().__init__()
self.val = val
def forward(self, *args, **kwargs):
return self.val
# kernel functions
# transcribed from jax to pytorch from
# https://github.com/google-research/google-research/blob/master/performer/fast_attention/jax/fast_attention.py
def softmax_kernel(data, *, projection_matrix, is_query, normalize_data=True, eps=1e-4, device = None):
b, h, *_ = data.shape
data_normalizer = (data.shape[-1] ** -0.25) if normalize_data else 1.
ratio = (projection_matrix.shape[0] ** -0.5)
projection = repeat(projection_matrix, 'j d -> b h j d', b = b, h = h)
projection = projection.type_as(data)
data_dash = torch.einsum('...id,...jd->...ij', (data_normalizer * data), projection)
diag_data = data ** 2
diag_data = torch.sum(diag_data, dim=-1)
diag_data = (diag_data / 2.0) * (data_normalizer ** 2)
diag_data = diag_data.unsqueeze(dim=-1)
if is_query:
data_dash = ratio * (
torch.exp(data_dash - diag_data -
torch.max(data_dash, dim=-1, keepdim=True).values) + eps)
else:
data_dash = ratio * (
torch.exp(data_dash - diag_data - torch.max(data_dash)) + eps)
return data_dash.type_as(data)
def generalized_kernel(data, *, projection_matrix, kernel_fn = nn.ReLU(), kernel_epsilon = 0.001, normalize_data = True, device = None):
b, h, *_ = data.shape
data_normalizer = (data.shape[-1] ** -0.25) if normalize_data else 1.
if projection_matrix is None:
return kernel_fn(data_normalizer * data) + kernel_epsilon
projection = repeat(projection_matrix, 'j d -> b h j d', b = b, h = h)
projection = projection.type_as(data)
data_dash = torch.einsum('...id,...jd->...ij', (data_normalizer * data), projection)
data_prime = kernel_fn(data_dash) + kernel_epsilon
return data_prime.type_as(data)
def orthogonal_matrix_chunk(cols, device = None):
unstructured_block = torch.randn((cols, cols), device = device)
q, r = torch.qr(unstructured_block.cpu(), some = True)
q, r = map(lambda t: t.to(device), (q, r))
return q.t()
def gaussian_orthogonal_random_matrix(nb_rows, nb_columns, scaling = 0, device = None):
nb_full_blocks = int(nb_rows / nb_columns)
block_list = []
for _ in range(nb_full_blocks):
q = orthogonal_matrix_chunk(nb_columns, device = device)
block_list.append(q)
remaining_rows = nb_rows - nb_full_blocks * nb_columns
if remaining_rows > 0:
q = orthogonal_matrix_chunk(nb_columns, device = device)
block_list.append(q[:remaining_rows])
final_matrix = torch.cat(block_list)
if scaling == 0:
multiplier = torch.randn((nb_rows, nb_columns), device = device).norm(dim = 1)
elif scaling == 1:
multiplier = math.sqrt((float(nb_columns))) * torch.ones((nb_rows,), device = device)
else:
raise ValueError(f'Invalid scaling {scaling}')
return torch.diag(multiplier) @ final_matrix
# linear attention classes with softmax kernel
# non-causal linear attention
def linear_attention(q, k, v):
k_cumsum = k.sum(dim = -2)
D_inv = 1. / torch.einsum('...nd,...d->...n', q, k_cumsum.type_as(q))
context = torch.einsum('...nd,...ne->...de', k, v)
out = torch.einsum('...de,...nd,...n->...ne', context, q, D_inv)
return out
# efficient causal linear attention, created by EPFL
# TODO: rewrite EPFL's CUDA kernel to do mixed precision and remove half to float conversion and back
def causal_linear_attention(q, k, v, eps = 1e-6):
from fast_transformers.causal_product import CausalDotProduct
autocast_enabled = torch.is_autocast_enabled()
is_half = isinstance(q, torch.cuda.HalfTensor)
assert not is_half or APEX_AVAILABLE, 'half tensors can only be used if nvidia apex is available'
cuda_context = null_context if not autocast_enabled else partial(autocast, enabled = False)
causal_dot_product_fn = amp.float_function(CausalDotProduct.apply) if is_half else CausalDotProduct.apply
k_cumsum = k.cumsum(dim=-2) + eps
D_inv = 1. / torch.einsum('...nd,...nd->...n', q, k_cumsum.type_as(q))
with cuda_context():
if autocast_enabled:
q, k, v = map(lambda t: t.float(), (q, k, v))
out = causal_dot_product_fn(q, k, v)
out = torch.einsum('...nd,...n->...nd', out, D_inv)
return out
# inefficient causal linear attention, without cuda code, for reader's reference
# not being used
def causal_linear_attention_noncuda(q, k, v, chunk_size = 128):
last_k_cumsum = 0
last_context_cumsum = 0
outs = []
for q, k, v in zip(*map(lambda t: t.chunk(chunk_size, dim = -2), (q, k, v))):
k_cumsum = last_k_cumsum + k.cumsum(dim=-2)
D_inv = 1. / torch.einsum('...nd,...nd->...n', q, k_cumsum.type_as(q))
context = torch.einsum('...nd,...ne->...nde', k, v)
context_cumsum = last_context_cumsum + context.cumsum(dim=-3)
out = torch.einsum('...nde,...nd,...n->...ne', context_cumsum, q, D_inv)
last_k_cumsum = k_cumsum[:, :, -1:]
last_context_cumsum = context_cumsum[:, :, -1:]
outs.append(out)
return torch.cat(outs, dim = -2)
def norm_tensor(tensor, dim=-1):
return tensor / tensor.sum(dim=dim).unsqueeze(dim)
class FastAttention(nn.Module):
def __init__(self, dim_heads, nb_features = None, ortho_scaling = 0, causal = False, generalized_attention = False, kernel_fn = nn.ReLU(), no_projection = False):
super().__init__()
nb_features = default(nb_features, int(dim_heads * math.log(dim_heads)))
self.dim_heads = dim_heads
self.nb_features = nb_features
self.ortho_scaling = ortho_scaling
self.create_projection = partial(gaussian_orthogonal_random_matrix, nb_rows = self.nb_features, nb_columns = dim_heads, scaling = ortho_scaling)
projection_matrix = self.create_projection()
self.register_buffer('projection_matrix', projection_matrix)
self.generalized_attention = generalized_attention
self.kernel_fn = kernel_fn
# if this is turned on, no projection will be used
# queries and keys will be softmax-ed as in the original efficient attention paper
self.no_projection = no_projection
self.causal = causal
if causal:
try:
import fast_transformers.causal_product.causal_product_cuda
self.causal_linear_fn = partial(causal_linear_attention)
except ImportError:
print('unable to import cuda code for auto-regressive Performer. will default to the memory inefficient non-cuda version')
self.causal_linear_fn = causal_linear_attention_noncuda
@torch.no_grad()
def redraw_projection_matrix(self, device):
projections = self.create_projection(device = device)
self.projection_matrix.copy_(projections)
del projections
def forward(self, q, k, v, output_attentions = False):
device = q.device
# inds = [8060, 8064, 6243, 8575, 10342, 10913, 9366, 993, 7796, 5210, 5212, 5504, 6851, 6559, 5508, 13107, 13820]
if self.no_projection:
q = q.softmax(dim = -1)
k = torch.exp(k) if self.causal else k.softmax(dim = -2)
elif self.generalized_attention:
create_kernel = partial(generalized_kernel, kernel_fn = self.kernel_fn, projection_matrix = self.projection_matrix, device = device)
q, k = map(create_kernel, (q, k))
else:
create_kernel = partial(softmax_kernel, projection_matrix = self.projection_matrix, device = device)
q = create_kernel(q, is_query = True)
k = create_kernel(k, is_query = False)
attn_fn = linear_attention if not self.causal else self.causal_linear_fn
out = attn_fn(q, k, v)
if output_attentions:
v_diag = torch.eye(v.shape[-2]).to(device)
v_diag = v_diag.unsqueeze(0).unsqueeze(0).repeat(v.shape[0],v.shape[1],1,1)
# attn_weights = torch.zeros(1, 1, len(inds), len(inds)).to(device).to(torch.float16)
# attn_weights = torch.zeros(1, q.shape[1], len(inds), len(inds)).to(device).to(torch.float16)
attn_weights = torch.zeros(1, 1, q.shape[2], q.shape[2]).to(device).to(torch.float16)
for head_dim in range(q.shape[1]):
# attn_weights[0, head_dim] = torch.abs(attn_fn(q[:,head_dim].to(torch.float16), k[:,head_dim].to(torch.float16), v_diag[:,head_dim].to(torch.float16)))[0, inds][:, inds]
attn_weights += torch.abs(attn_fn(q[:,head_dim].to(torch.float16), k[:,head_dim].to(torch.float16), v_diag[:,head_dim].to(torch.float16)))
# attn_weights += norm_tensor(torch.abs(attn_fn(q[:,head_dim].to(torch.float16), k[:,head_dim].to(torch.float16), v_diag[:,head_dim].to(torch.float16))), dim=-1)
attn_weights /= q.shape[1]
return out, attn_weights
else:
return out
# classes
class ReZero(nn.Module):
def __init__(self, fn):
super().__init__()
self.g = nn.Parameter(torch.tensor(1e-3))
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) * self.g
class PreScaleNorm(nn.Module):
def __init__(self, dim, fn, eps=1e-5):
super().__init__()
self.fn = fn
self.g = nn.Parameter(torch.ones(1))
self.eps = eps
def forward(self, x, **kwargs):
n = torch.norm(x, dim=-1, keepdim=True).clamp(min=self.eps)
x = x / n * self.g
return self.fn(x, **kwargs)
class PreLayerNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
class Chunk(nn.Module):
def __init__(self, chunks, fn, along_dim = -1):
super().__init__()
self.dim = along_dim
self.chunks = chunks
self.fn = fn
def forward(self, x, **kwargs):
if self.chunks == 1:
return self.fn(x, **kwargs)
chunks = x.chunk(self.chunks, dim = self.dim)
return torch.cat([self.fn(c, **kwargs) for c in chunks], dim = self.dim)
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4, dropout = 0., activation = None, glu = False):
super().__init__()
activation = default(activation, nn.GELU)
self.glu = glu
self.w1 = nn.Linear(dim, dim * mult * (2 if glu else 1))
self.act = activation()
self.dropout = nn.Dropout(dropout)
self.w2 = nn.Linear(dim * mult, dim)
def forward(self, x, **kwargs):
if not self.glu:
x = self.w1(x)
x = self.act(x)
else:
x, v = self.w1(x).chunk(2, dim=-1)
x = self.act(x) * v
x = self.dropout(x)
x = self.w2(x)
return x
class SelfAttention(nn.Module):
def __init__(
self,
dim,
causal = False,
heads = 8,
dim_head = 64,
local_heads = 0,
local_window_size = 256,
nb_features = None,
feature_redraw_interval = 1000,
generalized_attention = False,
kernel_fn = nn.ReLU(),
dropout = 0.,
no_projection = False,
qkv_bias = False
):
super().__init__()
assert dim % heads == 0, 'dimension must be divisible by number of heads'
dim_head = default(dim_head, dim // heads)
inner_dim = dim_head * heads
self.fast_attention = FastAttention(dim_head, nb_features, causal = causal, generalized_attention = generalized_attention, kernel_fn = kernel_fn, no_projection = no_projection)
self.heads = heads
self.global_heads = heads - local_heads
self.local_attn = LocalAttention(window_size = local_window_size, causal = causal, autopad = True, dropout = dropout, look_forward = int(not causal), rel_pos_emb_config = (dim_head, local_heads)) if local_heads > 0 else None
self.to_q = nn.Linear(dim, inner_dim, bias = qkv_bias)
self.to_k = nn.Linear(dim, inner_dim, bias = qkv_bias)
self.to_v = nn.Linear(dim, inner_dim, bias = qkv_bias)
self.to_out = nn.Linear(inner_dim, dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x, pos_emb = None, context = None, mask = None, context_mask = None, output_attentions = False, **kwargs):
b, n, _, h, gh = *x.shape, self.heads, self.global_heads
cross_attend = exists(context)
context = default(context, x)
context_mask = default(context_mask, mask) if not cross_attend else context_mask
q, k, v = self.to_q(x), self.to_k(context), self.to_v(context)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
(q, lq), (k, lk), (v, lv) = map(lambda t: (t[:, :gh], t[:, gh:]), (q, k, v))
attn_outs = []
if not empty(q):
if exists(context_mask):
global_mask = context_mask[:, None, :, None]
v.masked_fill_(~global_mask, 0.)
if exists(pos_emb) and not cross_attend:
q, k, = apply_rotary_pos_emb(q, k, pos_emb)
if output_attentions:
out, attn_weights = self.fast_attention(q, k, v, output_attentions)
else:
out = self.fast_attention(q, k, v)
attn_outs.append(out)
if not empty(lq):
assert not cross_attend, 'local attention is not compatible with cross attention'
out = self.local_attn(lq, lk, lv, input_mask = mask)
attn_outs.append(out)
out = torch.cat(attn_outs, dim = 1) # combine attn_out and cross_attn_out, here we have only attn_out, that means this line does nothing
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
if output_attentions:
return self.dropout(out), attn_weights
else:
return self.dropout(out)
# positional embeddings
class AbsolutePositionalEmbedding(nn.Module):
def __init__(self, dim, max_seq_len):
super().__init__()
self.emb = nn.Embedding(max_seq_len, dim)
def forward(self, x):
t = torch.arange(x.shape[1], device=x.device)
return self.emb(t)
# rotary positional embedding helpers
def rotate_every_two(x):
x = rearrange(x, '... (d j) -> ... d j', j = 2)
x1, x2 = x.unbind(dim = -1)
x = torch.stack((-x2, x1), dim = -1)
return rearrange(x, '... d j -> ... (d j)')
def apply_rotary_pos_emb(q, k, sinu_pos):
sinu_pos = rearrange(sinu_pos, '() n (j d) -> n j d', j = 2)
sin, cos = sinu_pos.unbind(dim = -2)
sin, cos = map(lambda t: repeat(t, 'b n -> b (n j)', j = 2), (sin, cos))
q, k = map(lambda t: (t * cos) + (rotate_every_two(t) * sin), (q, k))
return q, k
# sinusoidal positional embeddings
class Gene2VecPositionalEmbedding(nn.Module):
def __init__(self, dim, max_seq_len, gene2vec_path):
super().__init__()
gene2vec_weight = np.load(gene2vec_path)
gene2vec_weight = np.concatenate((gene2vec_weight, np.zeros((1, gene2vec_weight.shape[1]))), axis=0)
gene2vec_weight = torch.from_numpy(gene2vec_weight)
self.emb = nn.Embedding.from_pretrained(gene2vec_weight)
def forward(self, x):
t = torch.arange(x.shape[1], device=x.device)
return self.emb(t)
# performer
class Performer(nn.Module):
def __init__(
self,
dim, # dimension
depth, # layers
heads, # heads
dim_head, # dim of head
local_attn_heads = 0, # num of local attention heads, (heads - local_attn_heads) is num of global performers
local_window_size = 256, # window size of local attention
causal = False, # autoregressive or not
ff_mult = 4, # dim of intermediate features after attention / dim of input features
nb_features = None, # number of random features, if not set, will default to (d * log(d)), where d is the dimension of each head ?? what is random feature ??
feature_redraw_interval = 1000, # how frequently to redraw the projection matrix, the more frequent, the slower the training
reversible = False, # reversible layers, from Reformer (save memory)
ff_chunks = 1, # chunk feedforward layer, from Reformer
generalized_attention = False, # defaults to softmax approximation, but can be set to True for generalized attention ?? what is generalized attention ??
kernel_fn = nn.ReLU(), # the kernel function to be used, if generalized attention is turned on, defaults to Relu
use_scalenorm = False, # use scale norm, from 'Transformers without Tears' paper, a substitute for LayerNorm, priority: scalenorm.rezero.layernorm
use_rezero = False, # use Rezero or not, from 'Rezero is all you need' paper, a substitute for LayerNorm, priority: scalenorm.rezero.layernorm
ff_glu = False, # use GLU (Gated Linear Units) variant for feedforward
ff_dropout = 0., # feedforward dropout
attn_dropout = 0., # post-attention dropout
cross_attend = False, # ??
no_projection = False, # ??
auto_check_redraw = True, # ??
qkv_bias = True, # ??
):
super().__init__()
layers = nn.ModuleList([])
local_attn_heads = cast_tuple(local_attn_heads)
local_attn_heads = local_attn_heads * depth if len(local_attn_heads) == 1 else local_attn_heads
assert len(local_attn_heads) == depth, 'tuple specifying number of local attention heads per depth must be equal to the total depth'
assert all(map(lambda n: n >= 0 and n <= heads, local_attn_heads)), 'local attention head value must be less than the total number of heads'
if use_scalenorm:
wrapper_fn = partial(PreScaleNorm, dim)
elif use_rezero:
wrapper_fn = ReZero
else:
wrapper_fn = partial(PreLayerNorm, dim)
for _, local_heads in zip(range(depth), local_attn_heads):
layers.append(nn.ModuleList([
wrapper_fn(SelfAttention(dim, causal = causal, heads = heads, dim_head = dim_head, local_heads = local_heads, local_window_size = local_window_size, nb_features = nb_features, generalized_attention = generalized_attention, kernel_fn = kernel_fn, dropout = attn_dropout, no_projection = no_projection, qkv_bias = qkv_bias)),
wrapper_fn(Chunk(ff_chunks, FeedForward(dim, mult = ff_mult, dropout = ff_dropout, glu = ff_glu), along_dim = 1))
]))
# if no need cross_attend(decoder), begin next cycle
if not cross_attend:
continue
layers.append(nn.ModuleList([
wrapper_fn(SelfAttention(dim, heads = heads, dim_head = dim_head, nb_features = nb_features, generalized_attention = generalized_attention, kernel_fn = kernel_fn, dropout = attn_dropout, no_projection = no_projection)),
wrapper_fn(Chunk(ff_chunks, FeedForward(dim, mult = ff_mult, dropout = ff_dropout, glu = ff_glu), along_dim = 1))
]))
execute_type = ReversibleSequence if reversible else SequentialSequence
route_attn = ((True, False),) * depth * (2 if cross_attend else 1) # ((True, False), (True, False), (True, False), (True, False), (True, False), (True, False))
route_context = ((False, False), (True, False)) * depth
attn_route_map = {'mask': route_attn, 'pos_emb': route_attn}
context_route_map = {'context': route_context, 'context_mask': route_context} if cross_attend else {}
self.net = execute_type(layers, args_route = {**attn_route_map, **context_route_map})
# keeping track of when to redraw projections for all attention layers
self.auto_check_redraw = auto_check_redraw
self.feature_redraw_interval = feature_redraw_interval
self.register_buffer('calls_since_last_redraw', torch.tensor(0))
def fix_projection_matrices_(self):
self.feature_redraw_interval = None
def check_redraw_projections(self):
if not self.training:
return
if exists(self.feature_redraw_interval) and self.calls_since_last_redraw >= self.feature_redraw_interval:
device = get_module_device(self)
fast_attentions = find_modules(self, FastAttention)
for fast_attention in fast_attentions:
fast_attention.redraw_projection_matrix(device)
self.calls_since_last_redraw.zero_()
return
self.calls_since_last_redraw += 1
def forward(self, x, output_attentions = False, **kwargs):
if self.auto_check_redraw:
self.check_redraw_projections()
return self.net(x, output_attentions = output_attentions, **kwargs)
class PerformerLM(nn.Module):
def __init__(
self,
*,
num_tokens, # num of tokens
max_seq_len, # max length of sequence
dim, # dim of tokens
depth, # layers
heads, # num of heads
dim_head = 64, # dim of heads
local_attn_heads = 0,
local_window_size = 256,
causal = False,
ff_mult = 4,
nb_features = None,
feature_redraw_interval = 1000,
reversible = False,
ff_chunks = 1,
ff_glu = False,
emb_dropout = 0.,
ff_dropout = 0.,
attn_dropout = 0.,
generalized_attention = False,
kernel_fn = nn.ReLU(),
use_scalenorm = False,
use_rezero = False,
cross_attend = False,
no_projection = False,
tie_embed = False, # False: output is num of tokens, True: output is dim of tokens //multiply final embeddings with token weights for logits, like gpt decoder//
g2v_position_emb = True, # priority: gene2vec, no embedding
auto_check_redraw = True,
qkv_bias = False,
tune_layer = [-1],
gene2vec_path = "../assets/gene2vec_16906.npy",
**kwargs
):
super().__init__()
local_attn_heads = cast_tuple(local_attn_heads)
self.max_seq_len = max_seq_len
self.token_emb = nn.Embedding(num_tokens, dim)
if g2v_position_emb:
self.pos_emb = Gene2VecPositionalEmbedding(dim, max_seq_len, gene2vec_path)
self.layer_pos_emb = Always(None)
else:
self.pos_emb = torch.zeros_like
self.layer_pos_emb = Always(None)
self.dropout = nn.Dropout(emb_dropout)
self.performer = Performer(dim, depth, heads, dim_head, local_attn_heads, local_window_size, causal, ff_mult, nb_features, feature_redraw_interval, reversible, ff_chunks, generalized_attention, kernel_fn, use_scalenorm, use_rezero, ff_glu, ff_dropout, attn_dropout, cross_attend, no_projection, auto_check_redraw, qkv_bias)
self.norm = nn.LayerNorm(dim)
self.to_out = nn.Linear(dim, num_tokens) if not tie_embed else None
for param in self.parameters():
param.requires_grad = False
for param in self.norm.parameters():
param.requires_grad = True
for layer in tune_layer:
for param in self.performer.net.layers[layer].parameters():
param.requires_grad = True
def check_redraw_projections(self):
self.performer.check_redraw_projections()
def fix_projection_matrices_(self):
self.performer.fix_projection_matrices_()
def forward(self, x, return_encodings = False, output_attentions = False, **kwargs):
b, n, device = *x.shape, x.device
assert n <= self.max_seq_len, f'sequence length {n} must be less than the max sequence length {self.max_seq_len}'
# token and positional embedding
x = self.token_emb(x)
if output_attentions:
x.requires_grad_() # used for attn_map output
x += self.pos_emb(x)
x = self.dropout(x)
# performer layers
layer_pos_emb = self.layer_pos_emb(x)
if output_attentions:
x, attn_weights = self.performer(x, pos_emb = layer_pos_emb, output_attentions = output_attentions, **kwargs)
# norm and to logits
x = self.norm(x)
if return_encodings:
return x, attn_weights
if exists(self.to_out):
return self.to_out(x), attn_weights
return (x @ self.token_emb.weight.t()), attn_weights
else:
x = self.performer(x, pos_emb = layer_pos_emb, output_attentions = output_attentions, **kwargs)
# norm and to logits
x = self.norm(x)
if return_encodings:
return x
if exists(self.to_out):
x = self.to_out(x)
return x
return x @ self.token_emb.weight.t()
| OpenBioMed-main | open_biomed/models/cell/performer.py |
from models.cell.gat import CellGAT
from models.cell.performer import PerformerLM
from models.cell.performer_celllm import PerformerLM_CellLM
from models.cell.deepcdr import DeepCDR
SUPPORTED_CELL_ENCODER = {
"scbert": PerformerLM,
"celllm": PerformerLM_CellLM,
"gat": CellGAT,
"deepcdr": DeepCDR
} | OpenBioMed-main | open_biomed/models/cell/__init__.py |
import math
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torch.cuda.amp import autocast
from torch.autograd.function import Function
from torch.utils.checkpoint import get_device_states, set_device_states
from einops import rearrange, repeat
from operator import itemgetter
from functools import partial
from contextlib import contextmanager
from local_attention import LocalAttention
try:
from apex import amp
APEX_AVAILABLE = True
except:
APEX_AVAILABLE = False
# for routing arguments into the functions of the reversible layer
def route_args(router, args, depth):
routed_args = [(dict(), dict()) for _ in range(depth)]
matched_keys = [key for key in args.keys() if key in router]
for key in matched_keys:
val = args[key]
for depth, ((f_args, g_args), routes) in enumerate(zip(routed_args, router[key])):
new_f_args, new_g_args = map(lambda route: ({key: val} if route else {}), routes)
routed_args[depth] = ({**f_args, **new_f_args}, {**g_args, **new_g_args})
return routed_args
# following example for saving and setting rng here https://pytorch.org/docs/stable/_modules/torch/utils/checkpoint.html
class Deterministic(nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
self.cpu_state = None
self.cuda_in_fwd = None
self.gpu_devices = None
self.gpu_states = None
def record_rng(self, *args):
self.cpu_state = torch.get_rng_state()
if torch.cuda._initialized:
self.cuda_in_fwd = True
self.gpu_devices, self.gpu_states = get_device_states(*args)
def forward(self, *args, record_rng = False, set_rng = False, **kwargs):
if record_rng:
self.record_rng(*args)
if not set_rng:
return self.net(*args, **kwargs)
rng_devices = []
if self.cuda_in_fwd:
rng_devices = self.gpu_devices
with torch.random.fork_rng(devices=rng_devices, enabled=True):
torch.set_rng_state(self.cpu_state)
if self.cuda_in_fwd:
set_device_states(self.gpu_devices, self.gpu_states)
return self.net(*args, **kwargs)
# heavily inspired by https://github.com/RobinBruegger/RevTorch/blob/master/revtorch/revtorch.py
# once multi-GPU is confirmed working, refactor and send PR back to source
class ReversibleBlock(nn.Module):
def __init__(self, f, g):
super().__init__()
self.f = Deterministic(f)
self.g = Deterministic(g)
def forward(self, x, f_args = {}, g_args = {}):
x1, x2 = torch.chunk(x, 2, dim=2)
y1, y2 = None, None
with torch.no_grad():
y1 = x1 + self.f(x2, record_rng=self.training, **f_args)
y2 = x2 + self.g(y1, record_rng=self.training, **g_args)
return torch.cat([y1, y2], dim=2)
def backward_pass(self, y, dy, f_args = {}, g_args = {}):
y1, y2 = torch.chunk(y, 2, dim=2)
del y
dy1, dy2 = torch.chunk(dy, 2, dim=2)
del dy
with torch.enable_grad():
y1.requires_grad = True
gy1 = self.g(y1, set_rng=True, **g_args)
torch.autograd.backward(gy1, dy2)
with torch.no_grad():
x2 = y2 - gy1
del y2, gy1
dx1 = dy1 + y1.grad
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
fx2 = self.f(x2, set_rng=True, **f_args)
torch.autograd.backward(fx2, dx1, retain_graph=True)
with torch.no_grad():
x1 = y1 - fx2
del y1, fx2
dx2 = dy2 + x2.grad
del dy2
x2.grad = None
x = torch.cat([x1, x2.detach()], dim=2)
dx = torch.cat([dx1, dx2], dim=2)
return x, dx
class _ReversibleFunction(Function):
@staticmethod
def forward(ctx, x, blocks, args):
ctx.args = args
for block, kwarg in zip(blocks, args):
x = block(x, **kwarg)
ctx.y = x.detach()
ctx.blocks = blocks
return x
@staticmethod
def backward(ctx, dy):
y = ctx.y
args = ctx.args
for block, kwargs in zip(ctx.blocks[::-1], args[::-1]):
y, dy = block.backward_pass(y, dy, **kwargs)
return dy, None, None
class SequentialSequence(nn.Module):
def __init__(self, layers, args_route = {}):
super().__init__()
assert all(len(route) == len(layers) for route in args_route.values()), 'each argument route map must have the same depth as the number of sequential layers'
self.layers = layers
self.args_route = args_route
def forward(self, x, output_attentions = False, **kwargs):
args = route_args(self.args_route, kwargs, len(self.layers))
layers_and_args = list(zip(self.layers, args))
if output_attentions:
attn_weights = []
for (f, g), (f_args, g_args) in layers_and_args:
if output_attentions:
x = x + f(x, output_attentions = output_attentions, **f_args)[0]
attn_weights.append(f(x, output_attentions = output_attentions, **f_args)[1].unsqueeze(0))
else:
x = x + f(x, **f_args)
x = x + g(x, **g_args)
if output_attentions:
attn_weights = torch.transpose(torch.cat(attn_weights, dim=0), 0, 1) # the final dim is (batch, layer, head, len, len)
attn_weights = torch.mean(attn_weights, dim=1) # the dim is (batch, head, len, len)
return x, attn_weights
else:
return x
class ReversibleSequence(nn.Module):
def __init__(self, blocks, args_route = {}):
super().__init__()
self.args_route = args_route
self.blocks = nn.ModuleList([ReversibleBlock(f=f, g=g) for f, g in blocks])
def forward(self, x, **kwargs):
x = torch.cat([x, x], dim=-1)
blocks = self.blocks
args = route_args(self.args_route, kwargs, len(blocks))
args = list(map(lambda x: {'f_args': x[0], 'g_args': x[1]}, args))
out = _ReversibleFunction.apply(x, blocks, args)
return torch.stack(out.chunk(2, dim=-1)).sum(dim=0)
# helpers
def exists(val):
return val is not None
def empty(tensor):
return tensor.numel() == 0
def default(val, d):
return val if exists(val) else d
@contextmanager
def null_context():
yield
def cast_tuple(val):
return (val,) if not isinstance(val, tuple) else val
# def get_module_device(module):
# return next(module.parameters).device
def get_module_device(module):
try:
return next(module.parameters()).device
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module):
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = module._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].device
def find_modules(nn_module, type):
return [module for module in nn_module.modules() if isinstance(module, type)]
class Always(nn.Module):
def __init__(self, val):
super().__init__()
self.val = val
def forward(self, *args, **kwargs):
return self.val
# kernel functions
# transcribed from jax to pytorch from
# https://github.com/google-research/google-research/blob/master/performer/fast_attention/jax/fast_attention.py
def softmax_kernel(data, *, projection_matrix, is_query, normalize_data=True, eps=1e-4, device = None):
b, h, *_ = data.shape
data_normalizer = (data.shape[-1] ** -0.25) if normalize_data else 1.
ratio = (projection_matrix.shape[0] ** -0.5)
projection = repeat(projection_matrix, 'j d -> b h j d', b = b, h = h)
projection = projection.type_as(data)
data_dash = torch.einsum('...id,...jd->...ij', (data_normalizer * data), projection)
diag_data = data ** 2
diag_data = torch.sum(diag_data, dim=-1)
diag_data = (diag_data / 2.0) * (data_normalizer ** 2)
diag_data = diag_data.unsqueeze(dim=-1)
if is_query:
data_dash = ratio * (
torch.exp(data_dash - diag_data -
torch.max(data_dash, dim=-1, keepdim=True).values) + eps)
else:
data_dash = ratio * (
torch.exp(data_dash - diag_data - torch.max(data_dash)) + eps)
return data_dash.type_as(data)
def generalized_kernel(data, *, projection_matrix, kernel_fn = nn.ReLU(), kernel_epsilon = 0.001, normalize_data = True, device = None):
b, h, *_ = data.shape
data_normalizer = (data.shape[-1] ** -0.25) if normalize_data else 1.
if projection_matrix is None:
return kernel_fn(data_normalizer * data) + kernel_epsilon
projection = repeat(projection_matrix, 'j d -> b h j d', b = b, h = h)
projection = projection.type_as(data)
data_dash = torch.einsum('...id,...jd->...ij', (data_normalizer * data), projection)
data_prime = kernel_fn(data_dash) + kernel_epsilon
return data_prime.type_as(data)
def orthogonal_matrix_chunk(cols, device = None):
unstructured_block = torch.randn((cols, cols), device = device)
q, r = torch.linalg.qr(unstructured_block.cpu(), mode='reduced')
q, r = map(lambda t: t.to(device), (q, r))
return q.t()
def gaussian_orthogonal_random_matrix(nb_rows, nb_columns, scaling = 0, device = None):
nb_full_blocks = int(nb_rows / nb_columns)
block_list = []
for _ in range(nb_full_blocks):
q = orthogonal_matrix_chunk(nb_columns, device = device)
block_list.append(q)
remaining_rows = nb_rows - nb_full_blocks * nb_columns
if remaining_rows > 0:
q = orthogonal_matrix_chunk(nb_columns, device = device)
block_list.append(q[:remaining_rows])
final_matrix = torch.cat(block_list)
if scaling == 0:
multiplier = torch.randn((nb_rows, nb_columns), device = device).norm(dim = 1)
elif scaling == 1:
multiplier = math.sqrt((float(nb_columns))) * torch.ones((nb_rows,), device = device)
else:
raise ValueError(f'Invalid scaling {scaling}')
return torch.diag(multiplier) @ final_matrix
# linear attention classes with softmax kernel
# non-causal linear attention
def linear_attention(q, k, v):
k_cumsum = k.sum(dim = -2)
D_inv = 1. / torch.einsum('...nd,...d->...n', q, k_cumsum.type_as(q))
context = torch.einsum('...nd,...ne->...de', k, v)
out = torch.einsum('...de,...nd,...n->...ne', context, q, D_inv)
return out
# efficient causal linear attention, created by EPFL
# TODO: rewrite EPFL's CUDA kernel to do mixed precision and remove half to float conversion and back
def causal_linear_attention(q, k, v, eps = 1e-6):
from fast_transformers.causal_product import CausalDotProduct
autocast_enabled = torch.is_autocast_enabled()
is_half = isinstance(q, torch.cuda.HalfTensor)
assert not is_half or APEX_AVAILABLE, 'half tensors can only be used if nvidia apex is available'
cuda_context = null_context if not autocast_enabled else partial(autocast, enabled = False)
causal_dot_product_fn = amp.float_function(CausalDotProduct.apply) if is_half else CausalDotProduct.apply
k_cumsum = k.cumsum(dim=-2) + eps
D_inv = 1. / torch.einsum('...nd,...nd->...n', q, k_cumsum.type_as(q))
with cuda_context():
if autocast_enabled:
q, k, v = map(lambda t: t.float(), (q, k, v))
out = causal_dot_product_fn(q, k, v)
out = torch.einsum('...nd,...n->...nd', out, D_inv)
return out
# inefficient causal linear attention, without cuda code, for reader's reference
# not being used
def causal_linear_attention_noncuda(q, k, v, chunk_size = 128):
last_k_cumsum = 0
last_context_cumsum = 0
outs = []
for q, k, v in zip(*map(lambda t: t.chunk(chunk_size, dim = -2), (q, k, v))):
k_cumsum = last_k_cumsum + k.cumsum(dim=-2)
D_inv = 1. / torch.einsum('...nd,...nd->...n', q, k_cumsum.type_as(q))
context = torch.einsum('...nd,...ne->...nde', k, v)
context_cumsum = last_context_cumsum + context.cumsum(dim=-3)
out = torch.einsum('...nde,...nd,...n->...ne', context_cumsum, q, D_inv)
last_k_cumsum = k_cumsum[:, :, -1:]
last_context_cumsum = context_cumsum[:, :, -1:]
outs.append(out)
return torch.cat(outs, dim = -2)
def norm_tensor(tensor, dim=-1):
return tensor / tensor.sum(dim=dim).unsqueeze(dim)
class FastAttention(nn.Module):
def __init__(self, dim_heads, nb_features = None, ortho_scaling = 0, causal = False, generalized_attention = False, kernel_fn = nn.ReLU(), no_projection = False):
super().__init__()
nb_features = default(nb_features, int(dim_heads * math.log(dim_heads)))
self.dim_heads = dim_heads
self.nb_features = nb_features
self.ortho_scaling = ortho_scaling
self.create_projection = partial(gaussian_orthogonal_random_matrix, nb_rows = self.nb_features, nb_columns = dim_heads, scaling = ortho_scaling)
projection_matrix = self.create_projection()
self.register_buffer('projection_matrix', projection_matrix)
self.generalized_attention = generalized_attention
self.kernel_fn = kernel_fn
# if this is turned on, no projection will be used
# queries and keys will be softmax-ed as in the original efficient attention paper
self.no_projection = no_projection
self.causal = causal
if causal:
try:
import fast_transformers.causal_product.causal_product_cuda
self.causal_linear_fn = partial(causal_linear_attention)
except ImportError:
print('unable to import cuda code for auto-regressive Performer. will default to the memory inefficient non-cuda version')
self.causal_linear_fn = causal_linear_attention_noncuda
@torch.no_grad()
def redraw_projection_matrix(self, device):
projections = self.create_projection(device = device)
self.projection_matrix.copy_(projections)
del projections
def forward(self, q, k, v, output_attentions = False):
device = q.device
# inds = [8060, 8064, 6243, 8575, 10342, 10913, 9366, 993, 7796, 5210, 5212, 5504, 6851, 6559, 5508, 13107, 13820]
if self.no_projection:
q = q.softmax(dim = -1)
k = torch.exp(k) if self.causal else k.softmax(dim = -2)
elif self.generalized_attention:
create_kernel = partial(generalized_kernel, kernel_fn = self.kernel_fn, projection_matrix = self.projection_matrix, device = device)
q, k = map(create_kernel, (q, k))
else:
create_kernel = partial(softmax_kernel, projection_matrix = self.projection_matrix, device = device)
q = create_kernel(q, is_query = True)
k = create_kernel(k, is_query = False)
attn_fn = linear_attention if not self.causal else self.causal_linear_fn
out = attn_fn(q, k, v)
if output_attentions:
v_diag = torch.eye(v.shape[-2]).to(device)
v_diag = v_diag.unsqueeze(0).unsqueeze(0).repeat(v.shape[0],v.shape[1],1,1)
# attn_weights = torch.zeros(1, 1, len(inds), len(inds)).to(device).to(torch.float16)
# attn_weights = torch.zeros(1, q.shape[1], len(inds), len(inds)).to(device).to(torch.float16)
attn_weights = torch.zeros(1, 1, q.shape[2], q.shape[2]).to(device).to(torch.float16)
for head_dim in range(q.shape[1]):
# attn_weights[0, head_dim] = torch.abs(attn_fn(q[:,head_dim].to(torch.float16), k[:,head_dim].to(torch.float16), v_diag[:,head_dim].to(torch.float16)))[0, inds][:, inds]
attn_weights += torch.abs(attn_fn(q[:,head_dim].to(torch.float16), k[:,head_dim].to(torch.float16), v_diag[:,head_dim].to(torch.float16)))
# attn_weights += norm_tensor(torch.abs(attn_fn(q[:,head_dim].to(torch.float16), k[:,head_dim].to(torch.float16), v_diag[:,head_dim].to(torch.float16))), dim=-1)
attn_weights /= q.shape[1]
return out, attn_weights
else:
return out
# classes
class ReZero(nn.Module):
def __init__(self, fn):
super().__init__()
self.g = nn.Parameter(torch.tensor(1e-3))
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) * self.g
class PreScaleNorm(nn.Module):
def __init__(self, dim, fn, eps=1e-5):
super().__init__()
self.fn = fn
self.g = nn.Parameter(torch.ones(1))
self.eps = eps
def forward(self, x, **kwargs):
n = torch.norm(x, dim=-1, keepdim=True).clamp(min=self.eps)
x = x / n * self.g
return self.fn(x, **kwargs)
class PreLayerNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
class Chunk(nn.Module):
def __init__(self, chunks, fn, along_dim = -1):
super().__init__()
self.dim = along_dim
self.chunks = chunks
self.fn = fn
def forward(self, x, **kwargs):
if self.chunks == 1:
return self.fn(x, **kwargs)
chunks = x.chunk(self.chunks, dim = self.dim)
return torch.cat([self.fn(c, **kwargs) for c in chunks], dim = self.dim)
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4, dropout = 0., activation = None, glu = False):
super().__init__()
activation = default(activation, nn.GELU)
self.glu = glu
self.w1 = nn.Linear(dim, dim * mult * (2 if glu else 1))
self.act = activation()
self.dropout = nn.Dropout(dropout)
self.w2 = nn.Linear(dim * mult, dim)
def forward(self, x, **kwargs):
if not self.glu:
x = self.w1(x)
x = self.act(x)
else:
x, v = self.w1(x).chunk(2, dim=-1)
x = self.act(x) * v
x = self.dropout(x)
x = self.w2(x)
return x
class SelfAttention(nn.Module):
def __init__(
self,
dim,
causal = False,
heads = 8,
dim_head = 64,
local_heads = 0,
local_window_size = 256,
nb_features = None,
feature_redraw_interval = 1000,
generalized_attention = False,
kernel_fn = nn.ReLU(),
dropout = 0.,
no_projection = False,
qkv_bias = False
):
super().__init__()
assert dim % heads == 0, 'dimension must be divisible by number of heads'
dim_head = default(dim_head, dim // heads)
inner_dim = dim_head * heads
self.fast_attention = FastAttention(dim_head, nb_features, causal = causal, generalized_attention = generalized_attention, kernel_fn = kernel_fn, no_projection = no_projection)
self.heads = heads
self.global_heads = heads - local_heads
self.local_attn = LocalAttention(window_size = local_window_size, causal = causal, autopad = True, dropout = dropout, look_forward = int(not causal), rel_pos_emb_config = (dim_head, local_heads)) if local_heads > 0 else None
self.to_q = nn.Linear(dim, inner_dim, bias = qkv_bias)
self.to_k = nn.Linear(dim, inner_dim, bias = qkv_bias)
self.to_v = nn.Linear(dim, inner_dim, bias = qkv_bias)
self.to_out = nn.Linear(inner_dim, dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x, pos_emb = None, context = None, mask = None, context_mask = None, output_attentions = False, **kwargs):
b, n, _, h, gh = *x.shape, self.heads, self.global_heads
cross_attend = exists(context)
context = default(context, x)
context_mask = default(context_mask, mask) if not cross_attend else context_mask
q, k, v = self.to_q(x), self.to_k(context), self.to_v(context)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
(q, lq), (k, lk), (v, lv) = map(lambda t: (t[:, :gh], t[:, gh:]), (q, k, v))
attn_outs = []
if not empty(q):
if exists(context_mask):
global_mask = context_mask[:, None, :, None]
v.masked_fill_(~global_mask, 0.)
if exists(pos_emb) and not cross_attend:
q, k, = apply_rotary_pos_emb(q, k, pos_emb)
if output_attentions:
out, attn_weights = self.fast_attention(q, k, v, output_attentions)
else:
out = self.fast_attention(q, k, v)
attn_outs.append(out)
if not empty(lq):
assert not cross_attend, 'local attention is not compatible with cross attention'
out = self.local_attn(lq, lk, lv, input_mask = mask)
attn_outs.append(out)
out = torch.cat(attn_outs, dim = 1) # combine attn_out and cross_attn_out, here we have only attn_out, that means this line does nothing
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
if output_attentions:
return self.dropout(out), attn_weights
else:
return self.dropout(out)
# positional embeddings
class AbsolutePositionalEmbedding(nn.Module):
def __init__(self, dim, max_seq_len):
super().__init__()
self.emb = nn.Embedding(max_seq_len, dim)
def forward(self, x):
t = torch.arange(x.shape[1], device=x.device)
return self.emb(t)
# rotary positional embedding helpers
def rotate_every_two(x):
x = rearrange(x, '... (d j) -> ... d j', j = 2)
x1, x2 = x.unbind(dim = -1)
x = torch.stack((-x2, x1), dim = -1)
return rearrange(x, '... d j -> ... (d j)')
def apply_rotary_pos_emb(q, k, sinu_pos):
sinu_pos = rearrange(sinu_pos, '() n (j d) -> n j d', j = 2)
sin, cos = sinu_pos.unbind(dim = -2)
sin, cos = map(lambda t: repeat(t, 'b n -> b (n j)', j = 2), (sin, cos))
q, k = map(lambda t: (t * cos) + (rotate_every_two(t) * sin), (q, k))
return q, k
# sinusoidal positional embeddings
class Gene2VecPositionalEmbedding(nn.Module):
def __init__(self, dim, max_seq_len, gene2vec_path):
super().__init__()
gene2vec_weight = np.load(gene2vec_path)
gene2vec_weight = np.concatenate((gene2vec_weight, np.ones((1, gene2vec_weight.shape[1]))), axis=0) # CLS
gene2vec_weight = np.concatenate((gene2vec_weight, np.zeros((1, gene2vec_weight.shape[1]))), axis=0) # PAD
gene2vec_weight = torch.from_numpy(gene2vec_weight)
self.emb = nn.Embedding.from_pretrained(gene2vec_weight)
def forward(self, x, pos = None):
if pos is None:
t = torch.arange(x.shape[1], device=x.device)
return self.emb(t)
else:
return self.emb(pos)
# performer
class Performer(nn.Module):
def __init__(
self,
dim, # dimension
depth, # layers
heads, # heads
dim_head, # dim of head
local_attn_heads = 0, # num of local attention heads, (heads - local_attn_heads) is num of global performers
local_window_size = 256, # window size of local attention
causal = False, # autoregressive or not
ff_mult = 4, # dim of intermediate features after attention / dim of input features
nb_features = None, # number of random features, if not set, will default to (d * log(d)), where d is the dimension of each head ?? what is random feature ??
feature_redraw_interval = 1000, # how frequently to redraw the projection matrix, the more frequent, the slower the training
reversible = False, # reversible layers, from Reformer (save memory)
ff_chunks = 1, # chunk feedforward layer, from Reformer
generalized_attention = False, # defaults to softmax approximation, but can be set to True for generalized attention ?? what is generalized attention ??
kernel_fn = nn.ReLU(), # the kernel function to be used, if generalized attention is turned on, defaults to Relu
use_scalenorm = False, # use scale norm, from 'Transformers without Tears' paper, a substitute for LayerNorm, priority: scalenorm.rezero.layernorm
use_rezero = False, # use Rezero or not, from 'Rezero is all you need' paper, a substitute for LayerNorm, priority: scalenorm.rezero.layernorm
ff_glu = False, # use GLU (Gated Linear Units) variant for feedforward
ff_dropout = 0., # feedforward dropout
attn_dropout = 0., # post-attention dropout
cross_attend = False, # ??
no_projection = False, # ??
auto_check_redraw = True, # ??
qkv_bias = True, # ??
):
super().__init__()
layers = nn.ModuleList([])
local_attn_heads = cast_tuple(local_attn_heads)
local_attn_heads = local_attn_heads * depth if len(local_attn_heads) == 1 else local_attn_heads
assert len(local_attn_heads) == depth, 'tuple specifying number of local attention heads per depth must be equal to the total depth'
assert all(map(lambda n: n >= 0 and n <= heads, local_attn_heads)), 'local attention head value must be less than the total number of heads'
if use_scalenorm:
wrapper_fn = partial(PreScaleNorm, dim)
elif use_rezero:
wrapper_fn = ReZero
else:
wrapper_fn = partial(PreLayerNorm, dim)
for _, local_heads in zip(range(depth), local_attn_heads):
layers.append(nn.ModuleList([
wrapper_fn(SelfAttention(dim, causal = causal, heads = heads, dim_head = dim_head, local_heads = local_heads, local_window_size = local_window_size, nb_features = nb_features, generalized_attention = generalized_attention, kernel_fn = kernel_fn, dropout = attn_dropout, no_projection = no_projection, qkv_bias = qkv_bias)),
wrapper_fn(Chunk(ff_chunks, FeedForward(dim, mult = ff_mult, dropout = ff_dropout, glu = ff_glu), along_dim = 1))
]))
# if no need cross_attend(decoder), begin next cycle
if not cross_attend:
continue
layers.append(nn.ModuleList([
wrapper_fn(SelfAttention(dim, heads = heads, dim_head = dim_head, nb_features = nb_features, generalized_attention = generalized_attention, kernel_fn = kernel_fn, dropout = attn_dropout, no_projection = no_projection)),
wrapper_fn(Chunk(ff_chunks, FeedForward(dim, mult = ff_mult, dropout = ff_dropout, glu = ff_glu), along_dim = 1))
]))
execute_type = ReversibleSequence if reversible else SequentialSequence
route_attn = ((True, False),) * depth * (2 if cross_attend else 1) # ((True, False), (True, False), (True, False), (True, False), (True, False), (True, False))
route_context = ((False, False), (True, False)) * depth
attn_route_map = {'mask': route_attn, 'pos_emb': route_attn}
context_route_map = {'context': route_context, 'context_mask': route_context} if cross_attend else {}
self.net = execute_type(layers, args_route = {**attn_route_map, **context_route_map})
# keeping track of when to redraw projections for all attention layers
self.auto_check_redraw = auto_check_redraw
self.feature_redraw_interval = feature_redraw_interval
self.register_buffer('calls_since_last_redraw', torch.tensor(0))
def fix_projection_matrices_(self):
self.feature_redraw_interval = None
def check_redraw_projections(self):
if not self.training:
return
if exists(self.feature_redraw_interval) and self.calls_since_last_redraw >= self.feature_redraw_interval:
device = get_module_device(self)
fast_attentions = find_modules(self, FastAttention)
for fast_attention in fast_attentions:
fast_attention.redraw_projection_matrix(device)
self.calls_since_last_redraw.zero_()
return
self.calls_since_last_redraw += 1
def forward(self, x, output_attentions = False, **kwargs):
if self.auto_check_redraw:
self.check_redraw_projections()
return self.net(x, output_attentions = output_attentions, **kwargs)
class PerformerLM_CellLM(nn.Module):
def __init__(
self,
*,
num_tokens, # num of tokens
max_seq_len, # max length of sequence
dim, # dim of tokens
depth, # layers
heads, # num of heads
dim_head = 64, # dim of heads
local_attn_heads = 0,
local_window_size = 256,
causal = False,
ff_mult = 4,
nb_features = None,
feature_redraw_interval = 1000,
reversible = False,
ff_chunks = 1,
ff_glu = False,
emb_dropout = 0.,
ff_dropout = 0.,
attn_dropout = 0.,
generalized_attention = False,
kernel_fn = nn.ReLU(),
use_scalenorm = False,
use_rezero = False,
cross_attend = False,
no_projection = False,
tie_embed = False, # False: output is num of tokens, True: output is dim of tokens //multiply final embeddings with token weights for logits, like gpt decoder//
g2v_position_emb = True, # priority: gene2vec, no embedding
auto_check_redraw = True,
qkv_bias = False,
tune_layer = [-1],
gene2vec_path = '../assets/gene2vec_19379_512.npy',
gene_num = 19379,
**kwargs
):
super().__init__()
local_attn_heads = cast_tuple(local_attn_heads)
self.max_seq_len = max_seq_len
self.token_emb = nn.Embedding(num_tokens, dim)
self.pad_token_id = num_tokens - 1
self.cls_token_id = num_tokens - 2
self.pad_gene_id = gene_num + 1
self.cls_gene_id = gene_num
if g2v_position_emb:
self.pos_emb = Gene2VecPositionalEmbedding(dim, max_seq_len, gene2vec_path)
self.layer_pos_emb = Always(None)
else:
self.pos_emb = torch.zeros_like
self.layer_pos_emb = Always(None)
self.dropout = nn.Dropout(emb_dropout)
self.performer = Performer(dim, depth, heads, dim_head, local_attn_heads, local_window_size, causal, ff_mult, nb_features, feature_redraw_interval, reversible, ff_chunks, generalized_attention, kernel_fn, use_scalenorm, use_rezero, ff_glu, ff_dropout, attn_dropout, cross_attend, no_projection, auto_check_redraw, qkv_bias)
self.norm = nn.LayerNorm(dim)
self.to_out = nn.Linear(dim, num_tokens) if not tie_embed else None
for param in self.parameters():
param.requires_grad = False
for param in self.norm.parameters():
param.requires_grad = True
for layer in tune_layer:
for param in self.performer.net.layers[layer].parameters():
param.requires_grad = True
def check_redraw_projections(self):
self.performer.check_redraw_projections()
def fix_projection_matrices_(self):
self.performer.fix_projection_matrices_()
def forward(self, x, return_encodings = False, output_attentions = False, **kwargs):
input_exp = torch.ones(x.shape[0], self.max_seq_len).long().to(x.device) * self.pad_token_id
gene_pos = torch.ones(x.shape[0], self.max_seq_len).long().to(x.device) * self.pad_gene_id
mask = torch.zeros(x.shape[0], self.max_seq_len).to(x.device).bool()
for i, input_seq in enumerate(x):
exp = input_seq[input_seq > 0]
pos = torch.tensor(range(input_seq.shape[0])).to(x.device)[input_seq > 0]
exp[exp > (self.cls_token_id - 1)] = self.cls_token_id - 1
exp = exp.long()
exp = torch.cat((torch.tensor([self.cls_token_id]).to(x.device), exp))
pos = torch.cat((torch.tensor([self.cls_gene_id]).to(x.device), pos))
actual_len = min(exp.shape[0], self.max_seq_len)
input_exp[i, :actual_len] = exp[:actual_len]
gene_pos[i, :actual_len] = pos[:actual_len]
mask[i, :actual_len] = True
x = input_exp
b, n, device = *x.shape, x.device
assert n <= self.max_seq_len, f'sequence length {n} must be less than the max sequence length {self.max_seq_len}'
# token and positional embedding
x = self.token_emb(x)
if output_attentions:
x.requires_grad_() # used for attn_map output
x += self.pos_emb(x, gene_pos)
x = self.dropout(x)
# performer layers
layer_pos_emb = self.layer_pos_emb(x)
if output_attentions:
x, attn_weights = self.performer(x, pos_emb = layer_pos_emb, output_attentions = output_attentions, mask = mask, **kwargs)
# norm and to logits
x = self.norm(x)
if return_encodings:
return x, attn_weights
if exists(self.to_out):
return self.to_out(x), attn_weights
return (x @ self.token_emb.weight.t()), attn_weights
else:
x = self.performer(x, pos_emb = layer_pos_emb, output_attentions = output_attentions, mask = mask, **kwargs)
# norm and to logits
x = self.norm(x)
x *= mask.unsqueeze(-1)
if return_encodings:
return x, gene_pos
if exists(self.to_out):
try:
x = self.to_out(x)
except:
x = self.to_out(x, gene_pos)
return x
return x @ self.token_emb.weight.t()
| OpenBioMed-main | open_biomed/models/cell/performer_celllm.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch_geometric.nn import GATConv, max_pool
class CellGAT(torch.nn.Module):
def __init__(self, num_feature, layer_cell, dim_cell, cluster_predefine):
super().__init__()
self.num_feature = num_feature
self.layer_cell = layer_cell
self.dim_cell = dim_cell
self.cluster_predefine = cluster_predefine
self.final_node = len(self.cluster_predefine[self.layer_cell - 1].unique())
self.convs_cell = torch.nn.ModuleList()
self.bns_cell = torch.nn.ModuleList()
# self.activations = torch.nn.ModuleList()
for i in range(self.layer_cell):
if i:
conv = GATConv(self.dim_cell, self.dim_cell)
else:
conv = GATConv(self.num_feature, self.dim_cell)
bn = torch.nn.BatchNorm1d(self.dim_cell, affine=False) # True or False
# activation = nn.PReLU(self.dim_cell)
self.convs_cell.append(conv)
self.bns_cell.append(bn)
# self.activations.append(activation)
def forward(self, cell):
for i in range(self.layer_cell):
cell.x = F.relu(self.convs_cell[i](cell.x, cell.edge_index))
num_node = int(cell.x.size(0) / cell.num_graphs)
cluster = torch.cat([self.cluster_predefine[i] + j * num_node for j in range(cell.num_graphs)])
cell = max_pool(cluster, cell, transform=None)
cell.x = self.bns_cell[i](cell.x)
node_representation = cell.x.reshape(-1, self.final_node * self.dim_cell)
return node_representation
def grad_cam(self, cell):
for i in range(self.layer_cell):
cell.x = F.relu(self.convs_cell[i](cell.x, cell.edge_index))
if i == 0:
cell_node = cell.x
cell_node.retain_grad()
num_node = int(cell.x.size(0) / cell.num_graphs)
cluster = torch.cat([self.cluster_predefine[i] + j * num_node for j in range(cell.num_graphs)])
cell = max_pool(cluster, cell, transform=None)
cell.x = self.bns_cell[i](cell.x)
node_representation = cell.x.reshape(-1, self.final_node * self.dim_cell)
return cell_node, node_representation | OpenBioMed-main | open_biomed/models/cell/gat.py |
from models.multimodal.bert import MolBERT
from models.multimodal.biomedgpt import BioMedGPTCLIP, BioMedGPTV
from models.multimodal.kv_plm import KVPLM
from models.multimodal.momu import MoMu
from models.multimodal.molfm.molfm import MolFM
from models.multimodal.molfm.drugfm import DrugFM
from models.multimodal.molt5 import MolT5
from models.multimodal.text2mol import Text2MolMLP | OpenBioMed-main | open_biomed/models/multimodal/__init__.py |
import torch
import torch.nn as nn
from transformers import T5Tokenizer, T5ForConditionalGeneration
from models.base_models import MolEncoder, TextEncoder
class MolT5(MolEncoder, TextEncoder):
def __init__(self, config):
super(MolT5, self).__init__()
self.main_model = T5ForConditionalGeneration.from_pretrained(config['model_name_or_path'])
self.tokenizer = T5Tokenizer.from_pretrained(config['model_name_or_path'])
if "stop_grad" in config:
for k, v in self.main_model.named_parameters():
v.requires_grad = False
self.hidden_size = self.main_model.config.hidden_size
self.output_dim = self.hidden_size
def forward(self, encoder_outputs, encoder_attention_mask, decoder_attention_mask, labels):
return self.main_model(
encoder_outputs=encoder_outputs,
attention_mask=encoder_attention_mask,
decoder_attention_mask=decoder_attention_mask,
labels=labels
).loss
def encode(self, text):
return self.main_model.encoder(**text).last_hidden_state
def decode(self, encoder_outputs, encoder_attention_mask, num_beams, max_length):
outputs = self.main_model.generate(
encoder_outputs = encoder_outputs,
attention_mask = encoder_attention_mask,
num_beams=num_beams,
max_length=max_length,
)
return self.tokenizer.batch_decode(outputs, skip_special_tokens=True)
def encode_mol(self, mol):
return self.encode(mol)
def encode_text(self, text):
return self.main_model.encoder(**text) | OpenBioMed-main | open_biomed/models/multimodal/molt5.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.