python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
import pickle
import csv
import numpy as np
from rdkit import Chem
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import BertModel, BertTokenizer
from utils import ToDevice
from utils.mol_utils import load_mol2vec
from models.base_models import MolEncoder, TextEncoder
class Text2MolMLP(MolEncoder, TextEncoder):
def __init__(self, ninp, nout, nhid, model_name_or_path, cid2smiles_path, cid2vec_path, mol2vec_output_path=None):
super(Text2MolMLP, self).__init__()
if mol2vec_output_path is not None:
self.smiles2vec = load_mol2vec(mol2vec_output_path)
else:
self._prepare_smi2vec(cid2smiles_path, cid2vec_path)
self.text_hidden1 = nn.Linear(ninp, nout)
self.ninp = ninp
self.nhid = nhid
self.nout = nout
self.mol_hidden1 = nn.Linear(nout, nhid)
self.mol_hidden2 = nn.Linear(nhid, nhid)
self.mol_hidden3 = nn.Linear(nhid, nout)
self.temp = nn.Parameter(torch.Tensor([0.07]))
self.register_parameter('temp', self.temp)
self.ln1 = nn.LayerNorm((nout))
self.ln2 = nn.LayerNorm((nout))
self.relu = nn.ReLU()
self.selu = nn.SELU()
self.other_params = list(self.parameters()) #get all but bert params
self.text_transformer_model = BertModel.from_pretrained(model_name_or_path)
self.text_tokenizer = BertTokenizer.from_pretrained(model_name_or_path)
def _prepare_smi2vec(self, cid2smiles_path, cid2vec_path):
cid2smiles = pickle.load(open(cid2smiles_path, "rb"))
cid2remove = []
for cid in cid2smiles:
if cid2smiles[cid] == '*':
cid2remove.append(cid)
for cid in cid2remove:
cid2smiles.pop(cid, None)
smiles2cid = {}
for cid in cid2smiles:
smi = cid2smiles[cid]
smi = smi.replace("\\\\", "\\")
if cid2smiles[cid] == '*':
continue
mol = Chem.MolFromSmiles(smi)
smi = Chem.MolToSmiles(mol, isomericSmiles=True)
smiles2cid[smi] = cid
cid2vec = {}
with open(cid2vec_path, "r") as f:
reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE, fieldnames = ['cid', 'mol2vec', 'desc'])
for line in reader:
cid2vec[line['cid']] = np.fromstring(line['mol2vec'], sep=" ")
self.smiles2vec = {}
for smi in smiles2cid:
if smiles2cid[smi] in cid2vec:
self.smiles2vec[smi] = cid2vec[smiles2cid[smi]]
def forward(self, smi, text, device):
text = self.text_tokenizer(text, max_length=512, padding='max_length', truncation=True, return_tensors='pt')
text = ToDevice(text, device)
text_encoder_output = self.text_transformer_model(**text)
text_x = text_encoder_output['pooler_output']
text_x = self.text_hidden1(text_x)
smi = smi.replace("\\\\", "\\")
mol_x = torch.from_numpy(self.smiles2vec[smi]).reshape((1, -1)).to(device).float()
x = self.relu(self.mol_hidden1(mol_x))
x = self.relu(self.mol_hidden2(x))
x = self.mol_hidden3(x)
x = self.ln1(x)
text_x = self.ln2(text_x)
x = x * torch.exp(self.temp)
text_x = text_x * torch.exp(self.temp)
return F.cosine_similarity(x, text_x)
def encode_mol(self, mol):
x = self.relu(self.mol_hidden1(mol))
x = self.relu(self.mol_hidden2(x))
x = self.mol_hidden3(x)
return self.ln1(x)
def encode_text(self, text):
text_encoder_output = self.text_transformer_model(**text)
text_x = text_encoder_output['pooler_output']
text_x = self.text_hidden1(text_x)
return self.ln2(text_x) | OpenBioMed-main | open_biomed/models/multimodal/text2mol.py |
import logging
logger = logging.getLogger(__name__)
import torch
import torch.nn as nn
from transformers import BertModel
from models.base_models import MolEncoder, TextEncoder
class MolBERT(MolEncoder, TextEncoder):
def __init__(self, config):
super(MolBERT, self).__init__()
self.text_encoder = BertModel.from_pretrained(config["model_name_or_path"])
self.dropout = nn.Dropout(config["dropout"])
def forward(self, drug):
return self.encode_structure(drug["strcture"]), self.encode_text(drug["text"])
def encode_structure(self, structure):
h = self.text_encoder(**structure)["pooler_output"]
return self.dropout(h)
def encode_text(self, text):
h = self.text_encoder(**text)["pooler_output"]
return self.dropout(h) | OpenBioMed-main | open_biomed/models/multimodal/bert.py |
import logging
logger = logging.getLogger(__name__)
import torch
import torch.nn as nn
from transformers import BertConfig, BertForPreTraining, BertModel
from models.base_models import MolEncoder, TextEncoder
class KVPLMStarEncoder(nn.Module):
def __init__(self, bert_config):
super(KVPLMStarEncoder, self).__init__()
self.ptmodel = BertForPreTraining(bert_config)
self.emb = nn.Embedding(390, 768)
def forward(self, input_ids, attention_mask, token_type_ids, output_hidden_states=False):
embs = self.ptmodel.bert.embeddings.word_embeddings(input_ids)
msk = torch.where(input_ids >= 30700)
for k in range(msk[0].shape[0]):
i = msk[0][k].item()
j = msk[1][k].item()
embs[i, j] = self.emb(input_ids[i, j] - 30700)
return self.ptmodel.bert(inputs_embeds=embs, attention_mask=attention_mask, token_type_ids=token_type_ids, output_hidden_states=output_hidden_states)
class KVPLM(MolEncoder, TextEncoder):
def __init__(self, config):
super(KVPLM, self).__init__()
bert_config = BertConfig.from_json_file(config["bert_config_path"])
if config["name"] == "kv-plm*":
self.text_encoder = KVPLMStarEncoder(bert_config)
else:
self.text_encoder = BertModel(bert_config)
self.use_num_layers = config["use_num_layers"] if "use_num_layers" in config else -1
ckpt = torch.load(config["init_checkpoint"])
processed_ckpt = {}
if 'module.ptmodel.bert.embeddings.word_embeddings.weight' in ckpt:
processed_ckpt = {k[7:]: v for k, v in ckpt.items()}
elif 'bert.embeddings.word_embeddings.weight' in ckpt:
for k, v in ckpt.items():
if k.startswith("bert."):
processed_ckpt[k[5:]] = v
missing_keys, unexpected_keys = self.text_encoder.load_state_dict(processed_ckpt, strict=False)
logger.info("missing_keys: %s" % " ".join(missing_keys))
logger.info("unexpected_keys: %s" % " ".join(unexpected_keys))
self.dropout = nn.Dropout(config["dropout"])
def forward(self, drug):
return self.encode_mol(drug["strcture"]), self.encode_text(drug["text"])
def encode_mol(self, structure):
h = self.text_encoder(**structure)["pooler_output"]
return self.dropout(h)
def encode_text(self, text, return_cls=False, proj=False):
if self.use_num_layers != -1:
text["output_hidden_states"] = True
output = self.text_encoder(**text)
if return_cls:
logits = output["pooler_output"]
logits = self.dropout(logits)
elif self.use_num_layers == -1:
logits = output["last_hidden_state"]
else:
logits = output["hidden_states"][self.use_num_layers]
return logits | OpenBioMed-main | open_biomed/models/multimodal/kv_plm.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import BertConfig, BertModel
from models.base_models import MolEncoder, TextEncoder
from models.molecule.gnn_graphcl import GNNGraphCL
class MoMuTextEncoder(nn.Module):
def __init__(self, pretrained=True, model_name_or_path=None, use_num_layers=-1, dropout=0.0):
super(MoMuTextEncoder, self).__init__()
if pretrained: # if use pretrained scibert model
self.main_model = BertModel.from_pretrained(model_name_or_path)
else:
config = BertConfig(vocab_size=31090, )
self.main_model = BertModel(config)
self.dropout = nn.Dropout(dropout)
self.use_num_layers = use_num_layers
def forward(self, text, return_cls=True):
if self.use_num_layers != -1:
text["output_hidden_states"] = True
output = self.main_model(**text)
if return_cls:
logits = output["pooler_output"]
logits = self.dropout(logits)
elif self.use_num_layers == -1:
logits = output["last_hidden_state"]
else:
logits = output["hidden_states"][self.use_num_layers]
return logits
class MoMu(MolEncoder, TextEncoder):
def __init__(self, config):
super(MoMu, self).__init__()
self.gin_hidden_dim = config["gin_hidden_dim"]
self.gin_num_layers = config["gin_num_layers"]
self.drop_ratio = config["drop_ratio"]
self.graph_pooling = config["graph_pooling"]
self.graph_self = config["graph_self"]
self.bert_dropout = config["bert_dropout"]
self.bert_hidden_dim = config["bert_hidden_dim"]
self.projection_dim = config["projection_dim"]
self.graph_encoder = GNNGraphCL(
num_layer=self.gin_num_layers,
emb_dim=self.gin_hidden_dim,
gnn_type='gin',
drop_ratio=self.drop_ratio,
JK='last',
)
self.text_encoder = MoMuTextEncoder(pretrained=False, dropout=self.bert_dropout, use_num_layers=-1 if "use_num_layers" not in config else config["use_num_layers"])
self.graph_proj_head = nn.Sequential(
nn.Linear(self.gin_hidden_dim, self.gin_hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(self.gin_hidden_dim, self.projection_dim)
)
self.text_proj_head = nn.Sequential(
nn.Linear(self.bert_hidden_dim, self.bert_hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(self.bert_hidden_dim, self.projection_dim)
)
self.output_dim = self.projection_dim
# self.output_dim = self.gin_hidden_dim
def forward(self, features_graph, features_text):
batch_size = features_graph.size(0)
# normalized features
features_graph = F.normalize(features_graph, dim=-1)
features_text = F.normalize(features_text, dim=-1)
# cosine similarity as logits
logits_per_graph = features_graph @ features_text.t() / self.temperature
logits_per_text = logits_per_graph.t()
labels = torch.arange(batch_size, dtype=torch.long, device=self.device) # 大小为B
loss_graph = F.cross_entropy(logits_per_graph, labels)
loss_text = F.cross_entropy(logits_per_text, labels)
loss = (loss_graph + loss_text) / 2
return logits_per_graph, logits_per_text, loss
def encode_mol(self, structure, proj=True, return_node_feats=False):
mol_feats, node_feats = self.graph_encoder(structure)
if proj:
mol_feats = self.graph_proj_head(mol_feats)
node_feats = self.graph_proj_head(node_feats)
if return_node_feats:
return mol_feats, node_feats
else:
return mol_feats
def encode_structure_with_prob(self, structure, x, atomic_num_list, device):
h, _ = self.graph_encoder(structure, x, atomic_num_list, device)
return self.graph_proj_head(h)
def encode_text(self, text, return_cls=True, proj=True):
h = self.text_encoder(text, return_cls)
if proj:
h = self.text_proj_head(h)
return h | OpenBioMed-main | open_biomed/models/multimodal/momu.py |
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.base_models import MolEncoder, TextEncoder
from models.molecule.gnn_graphmvp import GNNGraphMVP
from models.multimodal.molfm.xbert import BertConfig, BertForMaskedLM
from models.knowledge.transe import TransE
from utils.mol_utils import convert_pyg_batch
class MolFM(MolEncoder, TextEncoder):
def __init__(self, config):
super().__init__()
self.config = config
self.max_n_nodes = config["max_n_nodes"]
bert_config = BertConfig.from_json_file(config["bert_config_path"])
self.output_dim = config["gin_hidden_dim"]
self.structure_encoder = GNNGraphMVP(
num_layer=config["gin_num_layers"],
emb_dim=config["gin_hidden_dim"],
gnn_type="gin",
drop_ratio=config["drop_ratio"],
JK="last",
)
if "gin_ckpt" in config:
self.structure_encoder.load_state_dict(torch.load(config["gin_ckpt"]))
self.structure_proj_head = nn.Linear(config["gin_hidden_dim"], config["projection_dim"])
self.structure_linear = nn.Linear(config["gin_hidden_dim"], bert_config.hidden_size)
self.text_encoder = BertForMaskedLM(bert_config)
if "bert_ckpt" in config:
ckpt = torch.load(config["bert_ckpt"])
processed_ckpt = {}
if 'module.ptmodel.bert.embeddings.word_embeddings.weight' in ckpt:
for k, v in ckpt.items():
if k.startswith("module.ptmodel."):
processed_ckpt[k[15:]] = v
else:
processed_ckpt[k] = v
missing_keys, unexpected_keys = self.text_encoder.load_state_dict(processed_ckpt, strict=False)
print("missing keys:", missing_keys)
print("unexpected keys:", unexpected_keys)
self.text_proj_head = nn.Linear(bert_config.hidden_size, config["projection_dim"])
#if "kge" in config:
# self.kg_encoder = TransE(**config["kge"])
# self.kg_linear = nn.Linear(config["kge"]["hidden_size"], bert_config.hidden_size)
self.mtm_head = nn.Linear(bert_config.hidden_size, 2)
def forward(self, mol, text, kg=None, cal_loss=False, output_attentions=False):
mol_embeds, node_embeds = self.structure_encoder(mol)
mol_feats = F.normalize(self.structure_proj_head(mol_embeds), dim=-1)
all_node_feats = self.structure_linear(node_embeds)
# serialize node feature
batch_size = mol_feats.shape[0]
node_feats, node_attention_mask = convert_pyg_batch(all_node_feats, mol.batch, self.max_n_nodes)
#node_feats = self.structure_linear(mol_embeds).unsqueeze(1)
#node_attention_mask = torch.ones(batch_size, 1).to(node_feats)
text_outputs = self.text_encoder.bert(text["input_ids"], attention_mask=text["attention_mask"], mode='text', return_dict=True)
seq_feats = text_outputs["last_hidden_state"]
if kg is not None:
neigh_feats = self.kg_encoder.predict(kg["neigh_indice"])
neigh_feats = self.kg_linear(neigh_feats)
node_feats = torch.cat((node_feats, neigh_feats), dim=1)
node_attention_mask = torch.cat((node_attention_mask, kg["neigh_attn"]), dim=1)
output = self.text_encoder.bert(
encoder_embeds=seq_feats,
attention_mask=text["attention_mask"],
encoder_hidden_states=node_feats,
encoder_attention_mask=node_attention_mask,
mode='fusion',
return_dict=True,
output_attentions=output_attentions,
)
if cal_loss:
perm = []
for i in range(batch_size):
j = i
while j == i:
j = random.randint(0, batch_size - 1)
perm.append(j)
perm = torch.LongTensor(perm).to(seq_feats.device)
output_neg = self.text_encoder.bert(
encoder_embeds=seq_feats,
attention_mask=text["attention_mask"],
encoder_hidden_states=node_feats[perm],
encoder_attention_mask=node_attention_mask[perm],
mode='fusion',
return_dict=True
)
label = torch.cat((torch.ones(batch_size), torch.zeros(batch_size)), dim=0).long().to(seq_feats.device)
logits = self.mtm_head(torch.cat((output["last_hidden_state"][:, 0, :], output_neg["last_hidden_state"][:, 0, :]), dim=0))
return F.cross_entropy(logits, label)
else:
return output
def encode_mol(self, structure, proj=False, return_node_feats=False):
mol_embeds, node_embeds = self.structure_encoder(structure)
if proj:
mol_embeds = self.structure_proj_head(mol_embeds)
if not return_node_feats:
return mol_embeds
else:
return mol_embeds, node_embeds
def encode_structure_with_prob(self, structure, x, atomic_num_list, device):
drug_embeds, _ = self.structure_encoder(structure, x, atomic_num_list, device)
return self.structure_proj_head(drug_embeds)
def encode_text(self, text, return_cls=True, proj=True):
text_embeds = self.text_encoder.bert(text["input_ids"], attention_mask=text["attention_mask"], mode='text', return_dict=True)["last_hidden_state"]
if return_cls:
text_embeds = text_embeds[:, 0, :]
if proj:
return self.text_proj_head(text_embeds)
else:
return text_embeds
def encode_knowledge(self, kg):
return self.predict(kg)
def predict_similarity_score(self, data):
preds = self.forward(data["structure"], data["text"])["last_hidden_state"][:, 0, :]
return F.softmax(self.mtm_head(preds), dim=-1)[:, 1]
def calculate_matching_loss(self, drug, text):
return self.forward(drug, text, cal_loss=True) | OpenBioMed-main | open_biomed/models/multimodal/molfm/molfm.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model. """
import math
import os
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import Tensor, device, dtype, nn
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
import torch.nn.functional as F
from transformers.activations import ACT2FN
from transformers.file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from transformers.modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from transformers.utils import logging
from transformers import BertConfig
import transformers
#transformers.logging.set_verbosity_error()
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "BertConfig"
_TOKENIZER_FOR_DOC = "BertTokenizer"
BERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"bert-base-uncased",
"bert-large-uncased",
"bert-base-cased",
"bert-large-cased",
"bert-base-multilingual-uncased",
"bert-base-multilingual-cased",
"bert-base-chinese",
"bert-base-german-cased",
"bert-large-uncased-whole-word-masking",
"bert-large-cased-whole-word-masking",
"bert-large-uncased-whole-word-masking-finetuned-squad",
"bert-large-cased-whole-word-masking-finetuned-squad",
"bert-base-cased-finetuned-mrpc",
"bert-base-german-dbmdz-cased",
"bert-base-german-dbmdz-uncased",
"cl-tohoku/bert-base-japanese",
"cl-tohoku/bert-base-japanese-whole-word-masking",
"cl-tohoku/bert-base-japanese-char",
"cl-tohoku/bert-base-japanese-char-whole-word-masking",
"TurkuNLP/bert-base-finnish-cased-v1",
"TurkuNLP/bert-base-finnish-uncased-v1",
"wietsedv/bert-base-dutch-cased",
# See all BERT models at https://huggingface.co/models?filter=bert
]
def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info("Skipping {}".format("/".join(name)))
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
self.config = config
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config, is_cross_attention):
super().__init__()
self.config = config
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
if is_cross_attention:
self.key = nn.Linear(config.encoder_width, self.all_head_size)
self.value = nn.Linear(config.encoder_width, self.all_head_size)
else:
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.save_attention = False
def save_attn_gradients(self, attn_gradients):
self.attn_gradients = attn_gradients
def get_attn_gradients(self):
return self.attn_gradients
def save_attention_map(self, attention_map):
self.attention_map = attention_map
def get_attention_map(self):
return self.attention_map
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
if is_cross_attention and self.save_attention:
self.save_attention_map(attention_probs)
attention_probs.register_hook(self.save_attn_gradients)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs_dropped = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs_dropped = attention_probs_dropped * head_mask
context_layer = torch.matmul(attention_probs_dropped, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
outputs = outputs + (past_key_value,)
return outputs
class BertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config, is_cross_attention=False):
super().__init__()
self.self = BertSelfAttention(config, is_cross_attention)
self.output = BertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config, layer_num):
super().__init__()
self.config = config
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BertAttention(config)
self.has_cross_attention = (layer_num >= config.fusion_layer)
#self.has_cross_attention = False
if self.has_cross_attention:
self.layer_num = layer_num
self.crossattention = BertAttention(config, is_cross_attention=True)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
if self.has_cross_attention:
assert encoder_hidden_states is not None, "encoder_hidden_states must be given for cross-attention layers"
if type(encoder_hidden_states) == list:
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states[(self.layer_num-self.config.fusion_layer)%len(encoder_hidden_states)],
encoder_attention_mask[(self.layer_num-self.config.fusion_layer)%len(encoder_hidden_states)],
output_attentions=output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1]
else:
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
output_attentions=output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([BertLayer(config,i) for i in range(config.num_hidden_layers)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
mode='multi_modal',
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
if mode=='text':
start_layer = 0
output_layer = self.config.fusion_layer
elif mode=='fusion':
start_layer = self.config.fusion_layer
output_layer = self.config.num_hidden_layers
elif mode=='multi_modal':
start_layer = 0
output_layer = self.config.num_hidden_layers
for i in range(start_layer, output_layer):
layer_module = self.layer[i]
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
if all_cross_attentions is not None:
print(len(all_cross_attentions))
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class BertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = BertConfig
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@dataclass
class BertForPreTrainingOutput(ModelOutput):
"""
Output type of :class:`~transformers.BertForPreTraining`.
Args:
loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: torch.FloatTensor = None
seq_relationship_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
BERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
BERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
BERT_START_DOCSTRING,
)
class BertModel(BertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint="bert-base-uncased",
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool) -> Tensor:
"""
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
attention_mask (:obj:`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (:obj:`Tuple[int]`):
The shape of the input to the model.
device: (:obj:`torch.device`):
The device of the input to the model.
Returns:
:obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
"""
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
# in case past_key_values are used we need to add a prefix ones mask to the causal mask
# causal and attention masks must have same type with pytorch version < 1.3
causal_mask = causal_mask.to(attention_mask.dtype)
if causal_mask.shape[1] < attention_mask.shape[1]:
prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
causal_mask = torch.cat(
[
torch.ones(
(batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype
),
causal_mask,
],
axis=-1,
)
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
else:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
input_shape, attention_mask.shape
)
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
is_decoder=False,
mode='multi_modal',
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
device = input_ids.device
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
device = inputs_embeds.device
elif encoder_embeds is not None:
input_shape = encoder_embeds.size()[:-1]
batch_size, seq_length = input_shape
device = encoder_embeds.device
else:
raise ValueError("You have to specify either input_ids or inputs_embeds or encoder_embeds")
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape,
device, is_decoder)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_hidden_states is not None:
if type(encoder_hidden_states) == list:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
else:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if type(encoder_attention_mask) == list:
encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
elif encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
if encoder_embeds is None:
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
else:
embedding_output = encoder_embeds
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
mode=mode,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""
Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next
sentence prediction (classification)` head.
""",
BERT_START_DOCSTRING,
)
class BertForPreTraining(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=BertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
next_sentence_label=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape ``(batch_size, sequence_length)``, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Returns:
Example::
>>> from transformers import BertTokenizer, BertForPreTraining
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = BertForPreTraining.from_pretrained('bert-base-uncased')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
total_loss = None
if labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
if not return_dict:
output = (prediction_scores, seq_relationship_score) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return BertForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""Bert Model with a `language modeling` head on top for CLM fine-tuning. """, BERT_START_DOCSTRING
)
class BertLMHeadModel(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
is_decoder=True,
reduction='mean',
mode='multi_modal',
soft_labels=None,
alpha=0,
return_logits=False,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
Returns:
Example::
>>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
>>> config = BertConfig.from_pretrained("bert-base-cased")
>>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
is_decoder=is_decoder,
mode=mode,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
if return_logits:
return prediction_scores[:, :-1, :].contiguous()
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss(reduction=reduction)
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
lm_loss = lm_loss.view(prediction_scores.size(0),-1).sum(1)
if soft_labels is not None:
loss_distill = -torch.sum(F.log_softmax(shifted_prediction_scores, dim=-1)*soft_labels,dim=-1)
loss_distill = (loss_distill * (labels!=-100)).sum(1)
lm_loss = (1-alpha)*lm_loss + alpha*loss_distill
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"past_key_values": past,
"encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None),
"encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None),
"is_decoder": True,
}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
@add_start_docstrings("""Bert Model with a `language modeling` head on top. """, BERT_START_DOCSTRING)
class BertForMaskedLM(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint="bert-base-uncased",
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
is_decoder=False,
mode='multi_modal',
soft_labels=None,
alpha=0,
return_logits=False,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_embeds=encoder_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
is_decoder=is_decoder,
mode=mode,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
if return_logits:
return prediction_scores
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if soft_labels is not None:
loss_distill = -torch.sum(F.log_softmax(prediction_scores, dim=-1)*soft_labels,dim=-1)
loss_distill = loss_distill[labels!=-100].mean()
masked_lm_loss = (1-alpha)*masked_lm_loss + alpha*loss_distill
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
effective_batch_size = input_shape[0]
# add a dummy token
assert self.config.pad_token_id is not None, "The PAD token should be defined for generation"
attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
dummy_token = torch.full(
(effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@add_start_docstrings(
"""Bert Model with a `next sentence prediction (classification)` head on top. """,
BERT_START_DOCSTRING,
)
class BertForNextSentencePrediction(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see ``input_ids`` docstring). Indices should be in ``[0, 1]``:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Returns:
Example::
>>> from transformers import BertTokenizer, BertForNextSentencePrediction
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = BertForNextSentencePrediction.from_pretrained('bert-base-uncased')
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
>>> encoding = tokenizer(prompt, next_sentence, return_tensors='pt')
>>> outputs = model(**encoding, labels=torch.LongTensor([1]))
>>> logits = outputs.logits
>>> assert logits[0, 0] < logits[0, 1] # next sentence was random
"""
if "next_sentence_label" in kwargs:
warnings.warn(
"The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("next_sentence_label")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
seq_relationship_scores = self.cls(pooled_output)
next_sentence_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
if not return_dict:
output = (seq_relationship_scores,) + outputs[2:]
return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
return NextSentencePredictorOutput(
loss=next_sentence_loss,
logits=seq_relationship_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
output) e.g. for GLUE tasks.
""",
BERT_START_DOCSTRING,
)
class BertForSequenceClassification(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint="bert-base-uncased",
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
BERT_START_DOCSTRING,
)
class BertForMultipleChoice(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
checkpoint="bert-base-uncased",
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
BERT_START_DOCSTRING,
)
class BertForTokenClassification(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config, add_pooling_layer=False)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint="bert-base-uncased",
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
BERT_START_DOCSTRING,
)
class BertForQuestionAnswering(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint="bert-base-uncased",
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| OpenBioMed-main | open_biomed/models/multimodal/molfm/xbert.py |
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import RobertaConfig
from models.base_models import MolEncoder, TextEncoder
from models.molecule.unimap import UniMAP
from models.multimodal.molfm.xbert import BertConfig, BertForMaskedLM
from models.knowledge.transe import TransE
class DrugFM(MolEncoder, TextEncoder):
def __init__(self, config):
super().__init__()
self.config = config
#self.max_n_nodes = config["max_n_nodes"]
bert_config = BertConfig.from_json_file(config["bert_config_path"])
roberta_config = RobertaConfig(
vocab_size=config["roberta"]["vocab_size"],
max_length=config["roberta"]["max_length"],
max_position_embeddings=config["roberta"]["max_position_embeddings"],
type_vocab_size=config["roberta"]["type_vocab_size"],
contrastive_class_num=2,
pooler_type=config["roberta"]["pooler_type"]
)
self.structure_encoder = UniMAP(
roberta_config,
config["gnn"],
config["atom_vocab_size"]
)
self.structure_proj_head = nn.Linear(self.structure_encoder.output_dim, config["projection_dim"])
self.structure_linear = nn.Linear(self.structure_encoder.output_dim, bert_config.hidden_size)
self.text_encoder = BertForMaskedLM(bert_config)
self.text_proj_head = nn.Linear(bert_config.hidden_size, config["projection_dim"])
#self.kg_encoder = TransE(**config["kge"])
#self.kg_linear = nn.Linear(config["kge"]["hidden_size"], bert_config.hidden_size)
self.mtm_head = nn.Linear(bert_config.hidden_size, 2)
def forward(self, mol, text, kg=None, cal_loss=False, output_attentions=False):
batch_size = text["input_ids"].shape[0]
mol_embeds, node_embeds, node_attention_mask = self.structure_encoder(mol["smiles"], mol["graph"])
"""
if node_embeds.shape[1] > 32:
node_embeds = node_embeds[:, :32, :]
node_attention_mask = node_attention_mask[:, :32]
"""
#node_feats = self.structure_linear(node_embeds)[:, :128, :]
#node_attention_mask = node_attention_mask[:, :128, :]
mol_feats = F.normalize(self.structure_proj_head(mol_embeds), dim=-1)
node_feats = self.structure_linear(node_embeds)
text_outputs = self.text_encoder.bert(text["input_ids"], attention_mask=text["attention_mask"], mode='text', return_dict=True)
seq_feats = text_outputs["last_hidden_state"]
if kg is not None:
neigh_feats = self.kg_encoder.predict(kg["neigh_indice"])
neigh_feats = self.kg_linear(neigh_feats)
node_feats = torch.cat((node_feats, neigh_feats), dim=1)
node_attention_mask = torch.cat((node_attention_mask, kg["neigh_attn"]), dim=1)
output = self.text_encoder.bert(
encoder_embeds=seq_feats,
attention_mask=text["attention_mask"],
encoder_hidden_states=node_feats,
encoder_attention_mask=node_attention_mask,
mode='fusion',
return_dict=True,
output_attentions=output_attentions,
)
if cal_loss:
perm = []
for i in range(batch_size):
j = i
while j == i:
j = random.randint(0, batch_size - 1)
perm.append(j)
perm = torch.LongTensor(perm).to(seq_feats.device)
output_neg = self.text_encoder.bert(
encoder_embeds=seq_feats,
attention_mask=text["attention_mask"],
encoder_hidden_states=node_feats[perm],
encoder_attention_mask=node_attention_mask[perm],
mode='fusion',
return_dict=True
)
label = torch.cat((torch.ones(batch_size), torch.zeros(batch_size)), dim=0).long().to(seq_feats.device)
logits = self.mtm_head(torch.cat((output["last_hidden_state"][:, 0, :], output_neg["last_hidden_state"][:, 0, :]), dim=0))
return F.cross_entropy(logits, label)
else:
return output
def encode_mol(self, mol, proj=True, return_node_feats=False):
mol_embeds, node_embeds, _ = self.structure_encoder(mol["smiles"], mol["graph"])
if proj:
mol_embeds = self.structure_proj_head(mol_embeds)
if not return_node_feats:
return mol_embeds
else:
return mol_embeds, node_embeds
def encode_text(self, text, return_cls=True, proj=True):
text_embeds = self.text_encoder.bert(text["input_ids"], attention_mask=text["attention_mask"], mode='text', return_dict=True)["last_hidden_state"]
if return_cls:
text_embeds = text_embeds[:, 0, :]
if proj:
return self.text_proj_head(text_embeds)
else:
return text_embeds
def encode_knowledge(self, kg):
return self.predict(kg)
def predict_similarity_score(self, data):
preds = self.forward(data["structure"], data["text"])["last_hidden_state"][:, 0, :]
return F.softmax(self.mtm_head(preds), dim=-1)[:, 1]
def calculate_matching_loss(self, drug, text):
return self.forward(drug, text, cal_loss=True) | OpenBioMed-main | open_biomed/models/multimodal/molfm/drugfm.py |
import logging
logger = logging.getLogger(__name__)
import contextlib
import torch
import torch.nn as nn
import re
import os
from transformers import LlamaTokenizer, EsmModel, EsmConfig
from models.base_models import MolEncoder, ProteinEncoder, TextEncoder
from models.molecule.gnn_graphmvp import GNNGraphMVP
from models.multimodal.biomedgpt.modeling_llama import LlamaForCausalLM, LlamaConfig
from utils.mol_utils import convert_pyg_batch
class BioMedGPTBase(MolEncoder, ProteinEncoder, TextEncoder):
def __init__(self):
super(BioMedGPTBase, self).__init__()
def maybe_autocast(self, dtype=torch.float16):
# if on cpu, don't use autocast
# if on gpu, use autocast with dtype if provided, otherwise use torch.float16
enable_autocast = self.device != torch.device("cpu")
if enable_autocast:
return torch.cuda.amp.autocast(dtype=dtype)
else:
return contextlib.nullcontext()
@classmethod
def from_pretrained(model_name_or_path):
raise NotImplementedError
def add_padding(self, wrapped_embeds, wrapped_attention_mask, targets=None, padding="right"):
batch_size = len(wrapped_embeds)
max_length_batch = 0
for i in range(batch_size):
if wrapped_embeds[i].shape[1] > max_length_batch:
max_length_batch = wrapped_embeds[i].shape[1]
for i in range(batch_size):
if wrapped_embeds[i].shape[1] < max_length_batch:
pad_len = max_length_batch - wrapped_embeds[i].shape[1]
if padding == "right":
wrapped_embeds[i] = torch.cat((
wrapped_embeds[i],
torch.zeros((1, pad_len, wrapped_embeds[i].shape[2]), dtype=wrapped_embeds[i].dtype).to(wrapped_embeds[i].device)
), dim=1)
wrapped_attention_mask[i] = torch.cat((
wrapped_attention_mask[i],
torch.zeros((1, pad_len), dtype=wrapped_attention_mask[i].dtype).to(wrapped_attention_mask[i].device)
), dim=1)
if targets is not None:
targets[i] = torch.cat((
targets[i],
torch.ones((1, pad_len), dtype=targets[i].dtype).to(targets[i].device).fill_(-100)
), dim=1)
else:
wrapped_embeds[i] = torch.cat((
torch.zeros((1, pad_len, wrapped_embeds[i].shape[2]), dtype=wrapped_embeds[i].dtype).to(wrapped_embeds[i].device),
wrapped_embeds[i],
), dim=1)
wrapped_attention_mask[i] = torch.cat((
torch.zeros((1, pad_len), dtype=wrapped_attention_mask[i].dtype).to(wrapped_attention_mask[i].device),
wrapped_attention_mask[i],
), dim=1)
if targets is not None:
targets[i] = torch.cat((
torch.ones((1, pad_len), dtype=targets[i].dtype).to(targets[i].device).fill_(-100),
targets[i],
), dim=1)
if targets is not None:
return torch.cat(wrapped_embeds, dim=0), torch.cat(wrapped_attention_mask, dim=0), torch.cat(targets, dim=0)
else:
return torch.cat(wrapped_embeds, dim=0), torch.cat(wrapped_attention_mask, dim=0)
def encode_text(self, text):
return self.llm(text)
class BioMedGPTV(BioMedGPTBase):
def __init__(self, config):
super(BioMedGPTV, self).__init__()
self.device = config["device"]
self.mol_structure_config = config["mol"]
# load molecule structure encoder
self.mol_structure_encoder = GNNGraphMVP(
num_layer=self.mol_structure_config["gin_num_layers"],
emb_dim=self.mol_structure_config["gin_hidden_dim"],
gnn_type="gin",
drop_ratio=self.mol_structure_config["drop_ratio"],
JK="last",
)
if config["mol"]["freeze"]:
logger.info("freeze molecule structure encoder")
for name, param in self.mol_structure_encoder.named_parameters():
param.requires_grad = False
self.mol_structure_encoder = self.mol_structure_encoder.eval()
# load protein structure encoder
self.prot_structure_config = EsmConfig.from_json_file(os.path.join(config["protein"]["model_name_or_path"], "config.json"))
self.prot_structure_encoder = EsmModel(self.prot_structure_config)
if config["protein"]["use_float16"]:
self.prot_structure_encoder = self.prot_structure_encoder.half()
if config["protein"]["lora"]:
from peft import get_peft_model, LoraConfig, TaskType
logger.info("applying lora to protein structure encoder")
lora_config = LoraConfig(peft_type=TaskType.FEATURE_EXTRACTION, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1, target_modules=["query", "value"])
self.prot_structure_encoder = get_peft_model(self.prot_structure_encoder, lora_config)
self.prot_structure_encoder.print_trainable_parameters()
elif config["protein"]["freeze"]:
logger.info("freeze protein structure encoder")
for name, param in self.prot_structure_encoder.named_parameters():
param.requires_grad = False
self.prot_structure_encoder = self.prot_structure_encoder.eval()
# load llm
self.llm_tokenizer = LlamaTokenizer.from_pretrained(config["llm"]["model_name_or_path"], use_fast=False, truncation_side="left")
self.llm_tokenizer.add_special_tokens({'pad_token': '[PAD]'})
self.llm_tokenizer.add_special_tokens({'bos_token': '<s>'})
self.llm_tokenizer.add_special_tokens({'eos_token': '</s>'})
self.llm_tokenizer.add_special_tokens({'unk_token': '<unk>'})
logger.info("loading llm")
self.llm_config = LlamaConfig.from_json_file(os.path.join(config["llm"]["model_name_or_path"], "config.json"))
self.llm = LlamaForCausalLM(self.llm_config)
if config["llm"]["use_float16"]:
self.llm = self.llm.half()
for name, param in self.llm.named_parameters():
param.requires_grad = False
#self.llm = LlamaForCausalLM.from_pretrained(config["llm"]["ckpt"], torch_dtype=torch.float16)
self.llm.resize_token_embeddings(len(self.llm_tokenizer))
self.proj_mol = nn.Linear(self.mol_structure_encoder.output_dim, self.llm.config.hidden_size)
self.proj_prot = nn.Linear(self.prot_structure_encoder.config.hidden_size, self.llm.config.hidden_size)
def _prompt_wrap(self, mol_feats, mol_batch, prot_feats, cell_feats, text_input, prompt):
device = text_input.device
batch_size = mol_feats.shape[0]
wrapped_embeds_batch, wrapped_attention_mask_batch = [], []
cur_mol, cur_prot, cur_cell = 0, 0, 0
for i in range(batch_size):
text = prompt[i].format(text_input=text_input[i])
bos_token = torch.ones((1, 1), dtype=text_input.input_ids.dtype, device=text_input.input_ids.device)
wrapped_embeds = [bos_token * self.llm_tokenizer.bos_token_id]
pattern = re.compile("<moleculeHere>|<proteinHere>|<cellHere>")
p_text = pattern.split(text)
spec_tokens = pattern.findall(text)
for j in range(len(p_text)):
p_tokens = self.llm_tokenizer(
p_text[j],
return_tensors='pt',
add_special_tokens=False
).to(device)
p_embeds = self.llm.get_input_embeddings()(p_tokens.input_ids)
wrapped_embeds.append(p_embeds)
if j < len(spec_tokens):
if spec_tokens[j] == "<moleculeHere>":
wrapped_embeds.append(mol_feats.where[mol_batch == cur_mol].unsqueeze(0))
cur_mol += 1
elif spec_tokens[j] == "<proteinHere>":
wrapped_embeds.append(prot_feats[cur_prot].unsqueeze(0))
cur_prot += 1
elif spec_tokens[j] == "<cellHere>":
wrapped_embeds.append(cell_feats[cur_cell].unsqueeze(0))
cur_cell += 1
wrapped_embeds_batch.append(torch.cat(wrapped_embeds, dim=1))
wrapped_attention_mask_batch.append(torch.ones(wrapped_embeds[-1].shape[:-1]).to(device))
return wrapped_embeds_batch, wrapped_attention_mask_batch
def _get_inputs_embeds(self, samples):
with self.maybe_autocast():
if "mol" in samples:
mol_batch = samples["mol"].batch
_, mol_feats = self.mol_structure_encoder(samples["mol"])
mol_feats = self.proj_mol(mol_feats)
else:
mol_feats, mol_batch = None, None
if "protein" in samples:
prot_feats = self.prot_structure_encoder(**samples["protein"]).last_hidden_state
prot_feats = self.proj_prot(prot_feats)
# TODO: Add cell features
cell_feats = None
return self._prompt_wrap(
mol_feats=mol_feats,
mol_batch=mol_batch,
prot_feats=prot_feats,
cell_feats=cell_feats,
text_input=samples["text_inputs"],
prompt=samples["prompt"]
)
def forward(self, samples):
inputs_embeds, inputs_attention_mask = self._get_inputs_embeds(samples)
wrapped_embeds, wrapped_attention_mask, wrapped_targets = [], [], []
for i in range(len(inputs_embeds)):
output_tokens = self.llm_tokenizer(
samples["text_outputs"][i],
return_tensors='pt',
add_special_tokens=False
).to(inputs_embeds[i].device)
eos_token = torch.ones((1, 1), dtype=output_tokens.input_ids.dtype, device=output_tokens.input_ids.device)
output_tokens.input_ids = torch.cat([output_tokens.input_ids, eos_token * self.llm_tokenizer.eos_token_id], dim=1)
output_tokens.attention_mask = torch.cat([output_tokens.attention_mask, eos_token], dim=1)
output_embeds = self.llm.get_input_embeddings()(output_tokens.input_ids)
wrapped_embeds.append(torch.cat([inputs_embeds[i], output_embeds], dim=1))
wrapped_attention_mask.append(torch.cat([inputs_attention_mask[i], output_tokens.attention_mask], dim=1))
# do not apply loss to the padding
targets = output_tokens.input_ids.masked_fill(
output_tokens.input_ids == self.llm_tokenizer.pad_token_id, -100
)
# do not apply loss to the text inputs (i.e., instruction)
empty_targets = torch.ones(inputs_attention_mask[i].shape, dtype=torch.long).to(inputs_embeds[i].device).fill_(-100)
wrapped_targets.append(torch.cat([empty_targets, targets], dim=1))
inputs_embeds, inputs_attention_mask, targets = self.add_padding(wrapped_embeds, wrapped_attention_mask, wrapped_targets)
with self.maybe_autocast():
outputs = self.llm(
inputs_embeds=inputs_embeds,
attention_mask=inputs_attention_mask,
labels=targets,
return_dict=True
)
return outputs.loss
@torch.no_grad()
def generate(
self,
samples,
use_nucleus_sampling=False,
num_beams=5,
max_length=256,
min_length=1,
top_p=0.9,
repetition_penalty=1.5,
length_penalty=1,
num_captions=1,
temperature=1,
):
with self.maybe_autocast():
inputs_embeds, inputs_attention_mask = self._get_inputs_embeds(samples)
inputs_embeds, inputs_attention_mask = self.add_padding(inputs_embeds, inputs_attention_mask)
outputs = self.llm.generate(
inputs_embeds=inputs_embeds,
attention_mask=inputs_attention_mask,
do_sample=use_nucleus_sampling,
top_p=top_p,
temperature=temperature,
num_beams=num_beams,
max_length=max_length,
min_length=min_length,
repetition_penalty=repetition_penalty,
length_penalty=length_penalty,
num_return_sequences=num_captions,
)
outputs[outputs == 0] = 2 # convert output id 0 to 2 (eos_token_id)
output_text = self.llm_tokenizer.batch_decode(outputs, skip_special_tokens=True)
output_text = [text.strip() for text in output_text]
return output_text
def encode_mol(self, mol, ret_atom_feats=False):
with self.maybe_autocast():
mol_feats, node_feats = self.mol_structure_encoder(mol)
if ret_atom_feats:
return node_feats
#if self.mol_structure_config["name"] == "graphmvp":
# return convert_pyg_batch(node_feats, mol.batch, self.mol_structure_config["max_n_nodes"])
else:
return mol_feats
def encode_protein(self, protein):
with self.maybe_autocast():
return self.prot_structure_encoder(**protein).last_hidden_state | OpenBioMed-main | open_biomed/models/multimodal/biomedgpt/biomedgpt.py |
from models.multimodal.biomedgpt.biomedgpt_clip import BioMedGPTCLIP
from models.multimodal.biomedgpt.biomedgpt import BioMedGPTV | OpenBioMed-main | open_biomed/models/multimodal/biomedgpt/__init__.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.base_models import MolEncoder, TextEncoder
from models.molecule.gnn_graphcl import GNNGraphCL
from models.text.base_transformers import BaseTransformers
class BioMedGPTCLIP(MolEncoder, TextEncoder):
def __init__(self, config):
super(BioMedGPTCLIP, self).__init__()
self.graph_encoder = GNNGraphCL(
num_layer=config["structure"]["gin_num_layers"],
emb_dim=config["structure"]["gin_hidden_dim"],
gnn_type='gin',
drop_ratio=config["structure"]["dropout"],
JK='last',
)
self.text_encoder = BaseTransformers(config["text"])
for param in self.text_encoder.parameters():
param.requires_grad = False
for param in self.text_encoder.main_model.h[-16:].parameters():
param.requires_grad = True
self.graph_proj_head = nn.Linear(self.graph_encoder.output_dim, config["projection_dim"])
self.text_proj_head = nn.Linear(self.text_encoder.output_dim, config["projection_dim"])
def forward(self, features_graph, features_text):
batch_size = features_graph.size(0)
# normalized features
features_graph = F.normalize(features_graph, dim=-1)
features_text = F.normalize(features_text, dim=-1)
# cosine similarity as logits
logits_per_graph = features_graph @ features_text.t() / self.temperature
logits_per_text = logits_per_graph.t()
labels = torch.arange(batch_size, dtype=torch.long, device=self.device) # 大小为B
loss_graph = F.cross_entropy(logits_per_graph, labels)
loss_text = F.cross_entropy(logits_per_text, labels)
loss = (loss_graph + loss_text) / 2
return logits_per_graph, logits_per_text, loss
def encode_mol(self, structure, proj=True, return_node_feats=False):
h, node_feats = self.graph_encoder(structure)
if proj:
h = self.graph_proj_head(h)
node_feats = self.graph_proj_head(node_feats)
if return_node_feats:
return h, node_feats
else:
return h
def encode_structure_with_prob(self, structure, x, atomic_num_list, device):
h, _ = self.graph_encoder(structure, x, atomic_num_list, device)
return self.graph_proj_head(h)
def encode_text(self, text, proj=True):
h = self.text_encoder(text)
if proj:
h = self.text_proj_head(h)
return h | OpenBioMed-main | open_biomed/models/multimodal/biomedgpt/biomedgpt_clip.py |
# This script is based on https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py
""" PyTorch LLaMA model."""
import math
from typing import List, Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
from transformers.modeling_utils import PreTrainedModel
from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
from transformers.models.llama.configuration_llama import LlamaConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "LlamaConfig"
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
def _make_causal_mask(
input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
mask_cond = torch.arange(mask.size(-1), device=device)
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
# Copied from transformers.models.bart.modeling_bart._expand_mask
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
class LlamaRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
LlamaRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
# convert into half-precision if necessary
if self.weight.dtype in [torch.float16, torch.bfloat16]:
hidden_states = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
class LlamaRotaryEmbedding(torch.nn.Module):
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
super().__init__()
inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim))
self.register_buffer("inv_freq", inv_freq)
# Build here to make `torch.jit.trace` work.
self.max_seq_len_cached = max_position_embeddings
t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
# Different from paper, but it uses a different permutation in order to obtain the same calculation
emb = torch.cat((freqs, freqs), dim=-1)
self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False)
self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False)
def forward(self, x, seq_len=None):
# x: [bs, num_attention_heads, seq_len, head_size]
# This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case.
if seq_len > self.max_seq_len_cached:
self.max_seq_len_cached = seq_len
t = torch.arange(self.max_seq_len_cached, device=x.device, dtype=self.inv_freq.dtype)
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
# Different from paper, but it uses a different permutation in order to obtain the same calculation
emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False)
self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False)
return (
self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
)
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
gather_indices = position_ids[:, None, :, None] # [bs, 1, seq_len, 1]
gather_indices = gather_indices.repeat(1, cos.shape[1], 1, cos.shape[3])
cos = torch.gather(cos.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices)
sin = torch.gather(sin.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
class LlamaMLP(nn.Module):
def __init__(
self,
hidden_size: int,
intermediate_size: int,
hidden_act: str,
):
super().__init__()
self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False)
self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
self.act_fn = ACT2FN[hidden_act]
def forward(self, x):
return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
class LlamaAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: LlamaConfig):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.hidden_size // self.num_heads
self.max_position_embeddings = config.max_position_embeddings
if (self.head_dim * self.num_heads) != self.hidden_size:
raise ValueError(
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
f" and `num_heads`: {self.num_heads})."
)
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.rotary_emb = LlamaRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: bool = False,
use_cache: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
kv_seq_len = key_states.shape[-2]
if past_key_value is not None:
kv_seq_len += past_key_value[0].shape[-2]
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
# [bsz, nh, t, hd]
if past_key_value is not None:
# reuse k, v, self_attention
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
past_key_value = (key_states, value_states) if use_cache else None
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, q_len, kv_seq_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights + attention_mask
attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min))
# upcast attention to fp32
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
attn_output = torch.matmul(attn_weights, value_states)
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
attn_output = self.o_proj(attn_output)
if not output_attentions:
attn_weights = None
return attn_output, attn_weights, past_key_value
class LlamaDecoderLayer(nn.Module):
def __init__(self, config: LlamaConfig):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = LlamaAttention(config=config)
self.mlp = LlamaMLP(
hidden_size=self.hidden_size,
intermediate_size=config.intermediate_size,
hidden_act=config.hidden_act,
)
self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
"""
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
if use_cache:
outputs += (present_key_value,)
return outputs
LLAMA_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`LlamaConfig`]):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration. Check out the
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
@add_start_docstrings(
"The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
LLAMA_START_DOCSTRING,
)
class LlamaPreTrainedModel(PreTrainedModel):
config_class = LlamaConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["LlamaDecoderLayer"]
_keys_to_ignore_on_load_unexpected = [r"decoder\.version"]
def _init_weights(self, module):
std = self.config.initializer_range
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, LlamaModel):
module.gradient_checkpointing = value
LLAMA_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
information on the default strategy.
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.n_positions - 1]`.
[What are position IDs?](../glossary#position-ids)
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
LLAMA_START_DOCSTRING,
)
class LlamaModel(LlamaPreTrainedModel):
"""
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`]
Args:
config: LlamaConfig
"""
def __init__(self, config: LlamaConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList([LlamaDecoderLayer(config) for _ in range(config.num_hidden_layers)])
self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
# Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape,
inputs_embeds.dtype,
device=inputs_embeds.device,
past_key_values_length=past_key_values_length,
)
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
inputs_embeds.device
)
combined_attention_mask = (
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
)
return combined_attention_mask
@add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
query_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPast]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
batch_size, seq_length = input_ids.shape
elif inputs_embeds is not None:
batch_size, seq_length, _ = inputs_embeds.shape
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if query_embeds is not None:
inputs_embeds = torch.cat([query_embeds, inputs_embeds], dim=1)
batch_size, seq_length, _ = inputs_embeds.shape
seq_length_with_past = seq_length
past_key_values_length = 0
if past_key_values is not None:
past_key_values_length = past_key_values[0][0].shape[2]
seq_length_with_past = seq_length_with_past + past_key_values_length
if position_ids is None:
device = input_ids.device if input_ids is not None else inputs_embeds.device
position_ids = torch.arange(
past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
)
position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
else:
position_ids = position_ids.view(-1, seq_length).long()
# embed positions
if attention_mask is None:
attention_mask = torch.ones(
(batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
)
attention_mask = self._prepare_decoder_attention_mask(
attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
)
hidden_states = inputs_embeds
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
next_decoder_cache = () if use_cache else None
for idx, decoder_layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
past_key_value = past_key_values[idx] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, output_attentions, None)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(decoder_layer),
hidden_states,
attention_mask,
position_ids,
None,
)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
if output_attentions:
all_self_attns += (layer_outputs[1],)
hidden_states = self.norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
)
class LlamaForCausalLM(LlamaPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.model = LlamaModel(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.model.embed_tokens
def set_input_embeddings(self, value):
self.model.embed_tokens = value
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def set_decoder(self, decoder):
self.model = decoder
def get_decoder(self):
return self.model
@add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
query_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CausalLMOutputWithPast]:
r"""
Args:
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
Example:
```python
>>> from transformers import AutoTokenizer, LlamaForCausalLM
>>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
>>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
>>> prompt = "Hey, are you consciours? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you."
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
query_embeds=query_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
shift_logits = shift_logits.view(-1, self.config.vocab_size)
shift_labels = shift_labels.view(-1)
# Enable model parallelism
shift_labels = shift_labels.to(shift_logits.device)
loss = loss_fct(shift_logits, shift_labels)
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(
self, input_ids, query_embeds=None, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
):
if past_key_values:
input_ids = input_ids[:, -1:]
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past_key_values:
position_ids = position_ids[:, -1].unsqueeze(-1)
query_embeds = None
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
if inputs_embeds is not None and past_key_values is None:
model_inputs = {"inputs_embeds": inputs_embeds}
else:
model_inputs = {"input_ids": input_ids}
model_inputs.update(
{
"position_ids": position_ids,
"query_embeds": query_embeds,
"past_key_values": past_key_values,
"use_cache": kwargs.get("use_cache"),
"attention_mask": attention_mask,
}
)
return model_inputs
@staticmethod
def _reorder_cache(past_key_values, beam_idx):
reordered_past = ()
for layer_past in past_key_values:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
| OpenBioMed-main | open_biomed/models/multimodal/biomedgpt/modeling_llama.py |
from models.knowledge.transe import TransE
from models.knowledge.gin import GIN
| OpenBioMed-main | open_biomed/models/knowledge/__init__.py |
import math
import torch
import torch.nn as nn
from models.base_models import KnowledgeEncoder
class TransE(KnowledgeEncoder):
def __init__(self, n_ents, n_rels, norm=1, hidden_size=256, margin=1.0):
super().__init__()
self.n_ents = n_ents
self.n_rels = n_rels
self.norm = norm
self.hidden_size = hidden_size
self.margin = margin
self.uniform_range = 6 / math.sqrt(self.hidden_size)
self.loss_fn = nn.MarginRankingLoss(margin=margin)
self.ent_emb = nn.Embedding(
num_embeddings=self.n_ents + 1,
embedding_dim=self.hidden_size,
padding_idx=self.n_ents
)
self.ent_emb.weight.data.uniform_(-self.uniform_range, self.uniform_range)
self.rel_emb = nn.Embedding(
num_embeddings=self.n_rels + 1,
embedding_dim=self.hidden_size,
padding_idx=self.n_rels
)
self.rel_emb.weight.data.uniform_(-self.uniform_range, self.uniform_range)
self.rel_emb.weight.data[:-1, :].div_(self.rel_emb.weight.data[:-1, :].norm(p=1, dim=1, keepdim=True))
def forward(self, pos, neg):
self.ent_emb.weight.data[:-1, :].div_(self.ent_emb.weight.data[:-1, :].norm(p=2, dim=1, keepdim=True))
self.rel_emb.weight.data[:-1, :].div_(self.rel_emb.weight.data[:-1, :].norm(p=1, dim=1, keepdim=True))
pos_dist = self._distance(pos)
neg_dist = self._distance(neg)
return self.loss_fn(pos_dist, neg_dist, -1 * torch.ones_like(pos_dist).to(pos_dist.device)), pos_dist, neg_dist
def predict(self, batch):
return self.ent_emb(batch)
def _distance(self, triplets):
return (self.ent_emb(triplets[:, 0]) + self.rel_emb(triplets[:, 1]) - self.ent_emb(triplets[:, 2])).norm(p=self.norm, dim=1)
def encode_knowledge(self, kg):
return self.predict(kg) | OpenBioMed-main | open_biomed/models/knowledge/transe.py |
import torch
import math
import torch.nn as nn
import torch.nn.functional as F
import random
from torch_geometric.nn import GINConv, JumpingKnowledge
from models.base_models import KnowledgeEncoder
from models.protein.cnn import CNNGRU
SUPPORTED_FEATURE_NETWORK = {
"cnn_gru": CNNGRU,
"linear": lambda x: nn.Linear(x["input_dim"], x["output_dim"]),
}
class GIN(KnowledgeEncoder):
def __init__(self, config):
super(GIN, self).__init__()
self.use_jk = config["gnn"]["use_jk"]
self.train_eps = config["gnn"]["train_eps"]
self.hidden_dim = config["gnn"]["hidden_dim"]
self.fn = SUPPORTED_FEATURE_NETWORK[config["feature_network"]["name"]](config["feature_network"])
self.gin_conv1 = GINConv(
nn.Sequential(
nn.Linear(config["feature_network"]["output_dim"], self.hidden_dim),
nn.ReLU(),
nn.Linear(self.hidden_dim, self.hidden_dim),
nn.ReLU(),
nn.BatchNorm1d(config["gnn"]["hidden_dim"]),
), train_eps=self.train_eps
)
self.gin_convs = torch.nn.ModuleList()
for i in range(config["gnn"]["num_layers"] - 1):
self.gin_convs.append(
GINConv(
nn.Sequential(
nn.Linear(self.hidden_dim, self.hidden_dim),
nn.ReLU(),
nn.Linear(self.hidden_dim, self.hidden_dim),
nn.ReLU(),
nn.BatchNorm1d(self.hidden_dim),
), train_eps=self.train_eps
)
)
if self.use_jk:
mode = 'cat'
self.jump = JumpingKnowledge(mode)
self.lin1 = nn.Linear(config["gnn"]["num_layers"] * self.hidden_dim, self.hidden_dim)
else:
self.lin1 = nn.Linear(self.hidden_dim, self.hidden_dim)
self.dropout = nn.Dropout(config["dropout"])
self.lin2 = nn.Linear(self.hidden_dim, self.hidden_dim)
self.output_dim = self.hidden_dim
def reset_parameters(self):
self.fn.reset_parameters()
self.gin_conv1.reset_parameters()
for gin_conv in self.gin_convs:
gin_conv.reset_parameters()
if self.use_jk:
self.jump.reset_parameters()
self.lin1.reset_parameters()
self.lin2.reset_parameters()
def forward(self, graph):
x = graph.x
edge_index = graph.edge_index
x = x.squeeze()
x = self.fn(x)
x = self.gin_conv1(x, edge_index)
xs = [x]
for conv in self.gin_convs:
x = conv(x, edge_index)
xs += [x]
if self.use_jk:
x = self.jump(xs)
x = F.relu(self.lin1(x))
x = self.dropout(x)
x = self.lin2(x)
# x = torch.add(x, x_)
return x
def encode_knowledge(self, kg):
return self.forward(kg)
| OpenBioMed-main | open_biomed/models/knowledge/gin.py |
from models.text.base_transformers import BaseTransformers
| OpenBioMed-main | open_biomed/models/text/__init__.py |
import torch
import torch.nn as nn
from transformers import AutoModel, AutoConfig
from models.base_models import TextEncoder
class BaseTransformers(TextEncoder):
def __init__(self, config):
super(BaseTransformers, self).__init__()
transformer_config = AutoConfig.from_pretrained(config["model_name_or_path"])
if "load_model" in config:
self.main_model = AutoModel.from_pretrained(config["model_name_or_path"])
#for name, param in self.main_model.named_parameters():
# print(name, param)
else:
self.main_model = AutoModel(transformer_config)
if "init_checkpoint" in config:
ckpt = torch.load(config["init_checkpoint"])
self.main_model.load_state_dict(ckpt)
if "use_num_layers" in config:
self.use_num_layers = config["use_num_layers"]
else:
self.use_num_layers = -1
self.dropout = nn.Dropout(config["dropout"])
self.pooler = config["pooler"]
self.output_dim = transformer_config.hidden_size
def pool(self, h):
if self.pooler == 'default':
h = h['pooler_output']
elif self.pooler == 'mean':
h = torch.mean(h['last_hidden_state'], dim=-2)
elif self.pooler == 'cls':
h = h['last_hidden_state'][:, 0, :]
return h
def forward(self, text):
result = self.main_model(**text)
return self.dropout(result)
def encode_text(self, text, pool=True, proj=False):
if self.use_num_layers == -1:
result = self.main_model(**text)
if pool:
h = self.pool(result)
return self.dropout(h)
else:
return self.dropout(result['last_hidden_state'])
else:
text["output_hidden_states"] = True
result = self.main_model(**text)
return result['hidden_states'][self.use_num_layers] | OpenBioMed-main | open_biomed/models/text/base_transformers.py |
import torch
from torch import nn
import torch.nn.functional as F
from torch_geometric.nn import MessagePassing
from torch_geometric.utils import add_self_loops
from torch_geometric.nn import global_add_pool, global_mean_pool, global_max_pool
from models.base_models import MolEncoder
num_atom_type = 119 # including the extra mask tokens
num_chirality_tag = 3
num_bond_type = 5 # including aromatic and self-loop edge
num_bond_direction = 3
class GINEConv(MessagePassing):
def __init__(self, emb_dim):
super(GINEConv, self).__init__()
self.mlp = nn.Sequential(
nn.Linear(emb_dim, 2*emb_dim),
nn.ReLU(),
nn.Linear(2*emb_dim, emb_dim)
)
self.edge_embedding1 = nn.Embedding(num_bond_type, emb_dim)
self.edge_embedding2 = nn.Embedding(num_bond_direction, emb_dim)
nn.init.xavier_uniform_(self.edge_embedding1.weight.data)
nn.init.xavier_uniform_(self.edge_embedding2.weight.data)
def forward(self, x, edge_index, edge_attr):
# add self loops in the edge space
edge_index = add_self_loops(edge_index, num_nodes=x.size(0))[0]
# add features corresponding to self-loop edges.
self_loop_attr = torch.zeros(x.size(0), 2)
self_loop_attr[:,0] = 4 #bond type for self-loop edge
self_loop_attr = self_loop_attr.to(edge_attr.device).to(edge_attr.dtype)
edge_attr = torch.cat((edge_attr, self_loop_attr), dim=0)
edge_embeddings = self.edge_embedding1(edge_attr[:,0]) + self.edge_embedding2(edge_attr[:,1])
return self.propagate(edge_index, x=x, edge_attr=edge_embeddings)
def message(self, x_j, edge_attr):
return x_j + edge_attr
def update(self, aggr_out):
return self.mlp(aggr_out)
class GINet(nn.Module):
"""
Args:
num_layer (int): the number of GNN layers
emb_dim (int): dimensionality of embeddings
max_pool_layer (int): the layer from which we use max pool rather than add pool for neighbor aggregation
drop_ratio (float): dropout rate
gnn_type: gin, gcn, graphsage, gat
Output:
node representations
"""
def __init__(self, num_layer=5, emb_dim=300, feat_dim=256, drop_ratio=0, pool='mean', **kwargs):
super(GINet, self).__init__()
self.num_layer = num_layer
self.emb_dim = emb_dim
self.feat_dim = feat_dim
self.drop_ratio = drop_ratio
self.x_embedding1 = nn.Embedding(num_atom_type, emb_dim)
self.x_embedding2 = nn.Embedding(num_chirality_tag, emb_dim)
nn.init.xavier_uniform_(self.x_embedding1.weight.data)
nn.init.xavier_uniform_(self.x_embedding2.weight.data)
# List of MLPs
self.gnns = nn.ModuleList()
for layer in range(num_layer):
self.gnns.append(GINEConv(emb_dim))
# List of batchnorms
self.batch_norms = nn.ModuleList()
for layer in range(num_layer):
self.batch_norms.append(nn.BatchNorm1d(emb_dim))
if pool == 'mean':
self.pool = global_mean_pool
elif pool == 'max':
self.pool = global_max_pool
elif pool == 'add':
self.pool = global_add_pool
def forward(self, data):
x = data.x
edge_index = data.edge_index
edge_attr = data.edge_attr
h = self.x_embedding1(x[:,0]) + self.x_embedding2(x[:,1])
for layer in range(self.num_layer):
h = self.gnns[layer](h, edge_index, edge_attr)
h = self.batch_norms[layer](h)
if layer == self.num_layer - 1:
h = F.dropout(h, self.drop_ratio, training=self.training)
else:
h = F.dropout(F.relu(h), self.drop_ratio, training=self.training)
out = self.pool(h, data.batch)
return out, h
class MolCLR(MolEncoder):
def __init__(self, config):
super(MolCLR, self).__init__()
self.main_model = GINet(
num_layer=config["gin_num_layers"],
emb_dim=config["gin_hidden_dim"],
JK='last',
drop_ratio=config["drop_ratio"],
gnn_type='gin'
)
if "projection_dim" in config:
self.projector = nn.Linear(config["gin_hidden_dim"], config["projection_dim"])
self.output_dim = config["projection_dim"]
else:
self.projector = None
self.output_dim = config["gin_hidden_dim"]
def forward(self, mol):
h_graph, h_node = self.main_model(mol)
return h_graph, h_node
def encode_mol(self, mol, proj=False, return_node_feats=False):
h_graph, h_node = self.forward(mol)
if proj and self.projector is not None:
h_graph = self.projector(h_graph)
h_node = self.projector(h_node)
if return_node_feats:
return h_graph, h_node
else:
return h_graph
def load_state_dict(self, state_dict, strict=True):
return self.main_model.load_state_dict(state_dict, strict) | OpenBioMed-main | open_biomed/models/molecule/gnn_molclr.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GINConv, JumpingKnowledge, global_max_pool
from models.base_models import MolEncoder
class GINTGSA(MolEncoder):
def __init__(self, layer_drug, dim_drug):
super().__init__()
self.layer_drug = layer_drug
self.dim_drug = dim_drug
self.JK = JumpingKnowledge('cat')
self.convs_drug = nn.ModuleList()
self.bns_drug = nn.ModuleList()
for i in range(self.layer_drug):
if i:
block = nn.Sequential(nn.Linear(self.dim_drug, self.dim_drug), nn.ReLU(),
nn.Linear(self.dim_drug, self.dim_drug))
else:
block = nn.Sequential(nn.Linear(77, self.dim_drug), nn.ReLU(), nn.Linear(self.dim_drug, self.dim_drug))
conv = GINConv(block)
bn = nn.BatchNorm1d(self.dim_drug)
self.convs_drug.append(conv)
self.bns_drug.append(bn)
def forward(self, drug):
x, edge_index, batch = drug.x, drug.edge_index, drug.batch
x_drug_list = []
for i in range(self.layer_drug):
x = F.relu(self.convs_drug[i](x, edge_index))
x = self.bns_drug[i](x)
x_drug_list.append(x)
node_representation = self.JK(x_drug_list)
x_drug = global_max_pool(node_representation, batch)
return x_drug
def encode_mol(self, mol):
return self.forward(mol) | OpenBioMed-main | open_biomed/models/molecule/gin_tgsa.py |
from models.molecule.cnn import MolCNN
from models.molecule.gin_tgsa import GINTGSA
from models.molecule.gnn_graphcl import GraphCL
from models.molecule.gnn_graphmvp import GraphMVP
from models.molecule.gnn_molclr import MolCLR
from models.molecule.mgnn import MGNN
from models.molecule.unimap import UniMAP
from models.molecule.moflow import MoFlow | OpenBioMed-main | open_biomed/models/molecule/__init__.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from models.base_models import MolEncoder
class MolCNN(MolEncoder):
def __init__(self, config):
super(MolCNN, self).__init__()
self.output_dim = config["output_dim"]
layer_size = len(config["in_ch"]) - 1
self.conv = nn.ModuleList(
[nn.Conv1d(
in_channels = config["in_ch"][i],
out_channels = config["in_ch"][i + 1],
kernel_size = config["kernels"][i]
) for i in range(layer_size)]
)
self.conv = self.conv.double()
hidden_dim = self._get_conv_output((config["vocab_size"], config["max_length"]))
self.fc1 = nn.Linear(hidden_dim, config["output_dim"])
def _get_conv_output(self, shape):
bs = 1
input = Variable(torch.rand(bs, *shape))
output_feat = self._forward_features(input.double())
n_size = output_feat.data.view(bs, -1).size(1)
return n_size
def _forward_features(self, x):
for l in self.conv:
x = F.relu(l(x))
x = F.adaptive_max_pool1d(x, output_size=1)
return x
def forward(self, v):
v = self._forward_features(v.double())
v = v.view(v.size(0), -1)
v = self.fc1(v.float())
return v
def encode_mol(self, mol, proj=False, return_node_feats=False):
return self.forward(mol) | OpenBioMed-main | open_biomed/models/molecule/cnn.py |
import logging
logger = logging.getLogger(__name__)
import os
import numpy as np
import re
import math
from rdkit import Chem
import json
from scipy import linalg as la
import torch
import torch.nn as nn
import torch.nn.functional as F
atom_decoder_m = {0: 6, 1: 7, 2: 8, 3: 9}
bond_decoder_m = {1: Chem.rdchem.BondType.SINGLE, 2: Chem.rdchem.BondType.DOUBLE, 3: Chem.rdchem.BondType.TRIPLE}
ATOM_VALENCY = {6:4, 7:3, 8:2, 9:1, 15:3, 16:2, 17:1, 35:1, 53:1}
atomic_num_list = [6, 7, 8, 9, 15, 16, 17, 35, 53, 0]
def check_valency(mol):
"""
Checks that no atoms in the mol have exceeded their possible
valency
:return: True if no valency issues, False otherwise
"""
try:
Chem.SanitizeMol(mol, sanitizeOps=Chem.SanitizeFlags.SANITIZE_PROPERTIES)
return True, None
except ValueError as e:
e = str(e)
p = e.find('#')
e_sub = e[p:]
atomid_valence = list(map(int, re.findall(r'\d+', e_sub)))
return False, atomid_valence
def valid_mol_can_with_seg(x, largest_connected_comp=True):
# mol = None
if x is None:
return None
sm = Chem.MolToSmiles(x, isomericSmiles=True)
mol = Chem.MolFromSmiles(sm)
if largest_connected_comp and '.' in sm:
vsm = [(s, len(s)) for s in sm.split('.')] # 'C.CC.CCc1ccc(N)cc1CCC=O'.split('.')
vsm.sort(key=lambda tup: tup[1], reverse=True)
mol = Chem.MolFromSmiles(vsm[0][0])
return mol
def valid_mol(x):
s = Chem.MolFromSmiles(Chem.MolToSmiles(x, isomericSmiles=True)) if x is not None else None
if s is not None and '.' not in Chem.MolToSmiles(s, isomericSmiles=True):
return s
return None
def correct_mol(x):
mol = x
while True:
flag, atomid_valence = check_valency(mol)
if flag:
break
else:
assert len (atomid_valence) == 2
idx = atomid_valence[0]
v = atomid_valence[1]
queue = []
for b in mol.GetAtomWithIdx(idx).GetBonds():
queue.append(
(b.GetIdx(), int(b.GetBondType()), b.GetBeginAtomIdx(), b.GetEndAtomIdx())
)
queue.sort(key=lambda tup: tup[1], reverse=True)
if len(queue) > 0:
start = queue[0][2]
end = queue[0][3]
t = queue[0][1] - 1
mol.RemoveBond(start, end)
if t >= 1:
mol.AddBond(start, end, bond_decoder_m[t])
return mol
def check_validity(adj, x, atomic_num_list, gpu=-1, return_unique=True,
correct_validity=True, largest_connected_comp=True, debug=True):
"""
:param adj: (100,4,9,9)
:param x: (100.9,5)
:param atomic_num_list: [6,7,8,9,0]
:param gpu: e.g. gpu0
:param return_unique:
:return:
"""
adj = np.array(adj) # (1000,4,9,9)
x = np.array(x) # (1000,9,5)
if correct_validity:
valid = []
for x_elem, adj_elem in zip(x, adj):
mol = construct_mol(x_elem, adj_elem, atomic_num_list)
cmol = correct_mol(mol)
vcmol = valid_mol_can_with_seg(cmol, largest_connected_comp=largest_connected_comp) # valid_mol_can_with_seg(cmol) # valid_mol(cmol) # valid_mol_can_with_seg
valid.append(vcmol)
else:
valid = [valid_mol(construct_mol(x_elem, adj_elem, atomic_num_list))
for x_elem, adj_elem in zip(x, adj)] #len()=1000
valid = [mol for mol in valid if mol is not None] #len()=valid number, say 794
if debug:
print("valid molecules: {}/{}".format(len(valid), adj.shape[0]))
for i, mol in enumerate(valid):
print("[{}] {}".format(i, Chem.MolToSmiles(mol, isomericSmiles=False)))
n_mols = x.shape[0]
valid_ratio = len(valid)/n_mols # say 794/1000
valid_smiles = [Chem.MolToSmiles(mol, isomericSmiles=False) for mol in valid]
unique_smiles = list(set(valid_smiles)) # unique valid, say 788
unique_ratio = 0.
if len(valid) > 0:
unique_ratio = len(unique_smiles)/len(valid) # say 788/794
if return_unique:
valid_smiles = unique_smiles
valid_mols = [Chem.MolFromSmiles(s) for s in valid_smiles]
abs_unique_ratio = len(unique_smiles)/n_mols
if debug:
print("valid: {:.3f}%, unique: {:.3f}%, abs unique: {:.3f}%".
format(valid_ratio * 100, unique_ratio * 100, abs_unique_ratio * 100))
results = dict()
results['valid_mols'] = valid_mols
results['valid_smiles'] = valid_smiles
results['valid_ratio'] = valid_ratio*100
results['unique_ratio'] = unique_ratio*100
results['abs_unique_ratio'] = abs_unique_ratio * 100
return results
def construct_mol(x, A, atomic_num_list):
"""
:param x: (9,5)
:param A: (4,9,9)
:param atomic_num_list: [6,7,8,9,0]
:return:
"""
mol = Chem.RWMol()
# x (ch, num_node)
atoms = np.argmax(x, axis=1)
# last a
atoms_exist = atoms != len(atomic_num_list) - 1
atoms = atoms[atoms_exist]
# print('num atoms: {}'.format(sum(atoms>0)))
for atom in atoms:
mol.AddAtom(Chem.Atom(int(atomic_num_list[atom])))
# A (edge_type, num_node, num_node)
adj = np.argmax(A, axis=0)
adj = np.array(adj)
adj = adj[atoms_exist, :][:, atoms_exist]
adj[adj == 3] = -1
adj += 1
for start, end in zip(*np.nonzero(adj)):
if start > end:
mol.AddBond(int(start), int(end), bond_decoder_m[adj[start, end]])
# add formal charge to atom: e.g. [O+], [N+] [S+]
# not support [O-], [N-] [S-] [NH+] etc.
flag, atomid_valence = check_valency(mol)
if flag:
continue
else:
assert len(atomid_valence) == 2
idx = atomid_valence[0]
v = atomid_valence[1]
an = mol.GetAtomWithIdx(idx).GetAtomicNum()
if an in (7, 8, 16) and (v - ATOM_VALENCY[an]) == 1:
mol.GetAtomWithIdx(idx).SetFormalCharge(1)
return mol
def gaussian_nll(x, mean, ln_var, reduce='sum'):
"""Computes the negative log-likelihood of a Gaussian distribution.
Given two variable ``mean`` representing :math:`\\mu` and ``ln_var``
representing :math:`\\log(\\sigma^2)`, this function computes in
elementwise manner the negative log-likelihood of :math:`x` on a
Gaussian distribution :math:`N(\\mu, S)`,
.. math::
-\\log N(x; \\mu, \\sigma^2) =
\\log\\left(\\sqrt{(2\\pi)^D |S|}\\right) +
\\frac{1}{2}(x - \\mu)^\\top S^{-1}(x - \\mu),
where :math:`D` is a dimension of :math:`x` and :math:`S` is a diagonal
matrix where :math:`S_{ii} = \\sigma_i^2`.
The output is a variable whose value depends on the value of
the option ``reduce``. If it is ``'no'``, it holds the elementwise
loss values. If it is ``'sum'``, loss values are summed up.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
mean (:class:`~chainer.Variable` or :ref:`ndarray`): A variable
representing mean of a Gaussian distribution, :math:`\\mu`.
ln_var (:class:`~chainer.Variable` or :ref:`ndarray`): A variable
representing logarithm of variance of a Gaussian distribution,
:math:`\\log(\\sigma^2)`.
reduce (str): Reduction option. Its value must be either
``'sum'`` or ``'no'``. Otherwise, :class:`ValueError` is raised.
Returns:
~chainer.Variable:
A variable representing the negative log-likelihood.
If ``reduce`` is ``'no'``, the output variable holds array
whose shape is same as one of (hence both of) input variables.
If it is ``'sum'``, the output variable holds a scalar value.
"""
if reduce not in ('sum', 'no'):
raise ValueError(
"only 'sum' and 'no' are valid for 'reduce', but '%s' is "
'given' % reduce)
x_prec = torch.exp(-ln_var) # 324
x_diff = x - mean # (256,324) - (324,) --> (256,324)
x_power = (x_diff * x_diff) * x_prec * -0.5
loss = (ln_var + math.log(2 * (math.pi))) / 2 - x_power
if reduce == 'sum':
return loss.sum()
else:
return loss
def rescale_adj(adj, type='all'):
# Previous paper didn't use rescale_adj.
# In their implementation, the normalization sum is: num_neighbors = F.sum(adj, axis=(1, 2))
# In this implementation, the normaliztion term is different
# raise NotImplementedError
# (256,4,9, 9):
# 4: single, double, triple, and bond between disconnected atoms (negative mask of sum of previous)
# 1-adj[i,:3,:,:].sum(dim=0) == adj[i,4,:,:]
# usually first 3 matrices have no diagnal, the last has.
# A_prime = self.A + sp.eye(self.A.shape[0])
if type == 'view':
out_degree = adj.sum(dim=-1)
out_degree_sqrt_inv = out_degree.pow(-1)
out_degree_sqrt_inv[out_degree_sqrt_inv == float('inf')] = 0
adj_prime = out_degree_sqrt_inv.unsqueeze(-1) * adj # (256,4,9,1) * (256, 4, 9, 9) = (256, 4, 9, 9)
else: # default type all
num_neighbors = adj.sum(dim=(1, 2)).float()
num_neighbors_inv = num_neighbors.pow(-1)
num_neighbors_inv[num_neighbors_inv == float('inf')] = 0
adj_prime = num_neighbors_inv[:, None, None, :] * adj
return adj_prime
logabs = lambda x: torch.log(torch.abs(x))
class ActNorm(nn.Module):
def __init__(self, in_channel, logdet=True):
super().__init__()
self.loc = nn.Parameter(torch.zeros(1, in_channel, 1, 1))
self.scale = nn.Parameter(torch.ones(1, in_channel, 1, 1))
self.register_buffer('initialized', torch.tensor(0, dtype=torch.uint8))
self.logdet = logdet
def initialize(self, input):
with torch.no_grad():
flatten = input.permute(1, 0, 2, 3).contiguous().view(input.shape[1], -1)
mean = (
flatten.mean(1)
.unsqueeze(1)
.unsqueeze(2)
.unsqueeze(3)
.permute(1, 0, 2, 3)
)
std = (
flatten.std(1)
.unsqueeze(1)
.unsqueeze(2)
.unsqueeze(3)
.permute(1, 0, 2, 3)
)
self.loc.data.copy_(-mean)
self.scale.data.copy_(1 / (std + 1e-6))
def forward(self, input):
_, _, height, width = input.shape
if self.initialized.item() == 0:
self.initialize(input)
self.initialized.fill_(1)
log_abs = logabs(self.scale)
logdet = height * width * torch.sum(log_abs)
if self.logdet:
return self.scale * (input + self.loc), logdet
else:
return self.scale * (input + self.loc)
def reverse(self, output):
return output / self.scale - self.loc
class ActNorm2D(nn.Module):
def __init__(self, in_dim, logdet=True):
super().__init__()
self.loc = nn.Parameter(torch.zeros(1, in_dim, 1))
self.scale = nn.Parameter(torch.ones(1, in_dim, 1))
self.register_buffer('initialized', torch.tensor(0, dtype=torch.uint8))
self.logdet = logdet
def initialize(self, input):
with torch.no_grad():
flatten = input.permute(1, 0, 2).contiguous().view(input.shape[1], -1)
mean = (
flatten.mean(1)
.unsqueeze(1)
.unsqueeze(2)
.permute(1, 0, 2)
)
std = (
flatten.std(1)
.unsqueeze(1)
.unsqueeze(2)
.permute(1, 0, 2)
)
self.loc.data.copy_(-mean)
self.scale.data.copy_(1 / (std + 1e-6))
def forward(self, input):
_, _, height = input.shape
if self.initialized.item() == 0:
self.initialize(input)
self.initialized.fill_(1)
log_abs = logabs(self.scale)
logdet = height * torch.sum(log_abs)
if self.logdet:
return self.scale * (input + self.loc), logdet
else:
return self.scale * (input + self.loc)
def reverse(self, output):
return output / self.scale - self.loc
class InvConv2d(nn.Module):
def __init__(self, in_channel):
super().__init__()
weight = torch.randn(in_channel, in_channel)
q, _ = torch.qr(weight)
weight = q.unsqueeze(2).unsqueeze(3)
self.weight = nn.Parameter(weight)
def forward(self, input):
_, _, height, width = input.shape
out = F.conv2d(input, self.weight)
logdet = (
height * width * torch.slogdet(self.weight.squeeze().double())[1].float()
)
return out, logdet
def reverse(self, output):
return F.conv2d(
output, self.weight.squeeze().inverse().unsqueeze(2).unsqueeze(3)
)
class InvConv2dLU(nn.Module):
def __init__(self, in_channel):
super().__init__()
weight = np.random.randn(in_channel, in_channel)
q, _ = la.qr(weight)
w_p, w_l, w_u = la.lu(q.astype(np.float32))
w_s = np.diag(w_u)
w_u = np.triu(w_u, 1)
u_mask = np.triu(np.ones_like(w_u), 1)
l_mask = u_mask.T
w_p = torch.from_numpy(w_p)
w_l = torch.from_numpy(w_l)
w_s = torch.from_numpy(w_s)
w_u = torch.from_numpy(w_u)
self.register_buffer('w_p', w_p)
self.register_buffer('u_mask', torch.from_numpy(u_mask))
self.register_buffer('l_mask', torch.from_numpy(l_mask))
self.register_buffer('s_sign', torch.sign(w_s))
self.register_buffer('l_eye', torch.eye(l_mask.shape[0]))
self.w_l = nn.Parameter(w_l)
self.w_s = nn.Parameter(logabs(w_s))
self.w_u = nn.Parameter(w_u)
def forward(self, input):
_, _, height, width = input.shape
weight = self.calc_weight()
out = F.conv2d(input, weight)
logdet = height * width * torch.sum(self.w_s)
return out, logdet
def calc_weight(self):
weight = (
self.w_p
@ (self.w_l * self.l_mask + self.l_eye)
@ ((self.w_u * self.u_mask) + torch.diag(self.s_sign * torch.exp(self.w_s)))
)
return weight.unsqueeze(2).unsqueeze(3)
def reverse(self, output):
weight = self.calc_weight()
return F.conv2d(output, weight.squeeze().inverse().unsqueeze(2).unsqueeze(3))
class InvRotationLU(nn.Module):
def __init__(self, dim):
super(InvRotationLU, self).__init__()
# (9*9) * (bs*9*5)
weight = np.random.randn(dim, dim) # (9,9)
q, _ = la.qr(weight)
w_p, w_l, w_u = la.lu(q.astype(np.float32))
w_s = np.diag(w_u)
w_u = np.triu(w_u, 1)
u_mask = np.triu(np.ones_like(w_u), 1)
l_mask = u_mask.T
w_p = torch.from_numpy(w_p) # a Permutation matrix
w_l = torch.from_numpy(w_l) # L matrix from PLU
w_s = torch.from_numpy(w_s) # diagnal of the U matrix from PLU
w_u = torch.from_numpy(w_u) # u - dianal of the U matrix from PLU
self.register_buffer('w_p', w_p) # (12,12)
self.register_buffer('u_mask', torch.from_numpy(u_mask)) # (12,12) upper 1 with 0 diagnal
self.register_buffer('l_mask', torch.from_numpy(l_mask)) # (12,12) lower 1 with 0 diagnal
self.register_buffer('s_sign', torch.sign(w_s)) # (12,) # sign of the diagnal of the U matrix from PLU
self.register_buffer('l_eye', torch.eye(l_mask.shape[0])) # (12,12) 1 diagnal
self.w_l = nn.Parameter(w_l) # (12,12)
self.w_s = nn.Parameter(logabs(w_s)) # (12, )
self.w_u = nn.Parameter(w_u) # (12,12)
def forward(self, input):
bs, height, width = input.shape # (bs, 9, 5)
weight = self.calc_weight() # 9,9
# out = F.conv2d(input, weight) # (2,12,32,32), (12,12,1,1) --> (2,12,32,32)
# logdet = height * width * torch.sum(self.w_s)
out = torch.matmul(weight, input) # (1, 9,9) * (bs, 9, 5) --> (bs, 9, 5)
logdet = width * torch.sum(self.w_s)
return out, logdet
def calc_weight(self):
weight = (
self.w_p
@ (self.w_l * self.l_mask + self.l_eye)
@ ((self.w_u * self.u_mask) + torch.diag(self.s_sign * torch.exp(self.w_s)))
)
# weight = torch.matmul(torch.matmul(self.w_p, (self.w_l * self.l_mask + self.l_eye)),
# ((self.w_u * self.u_mask) + torch.diag(self.s_sign * torch.exp(self.w_s))))
# weight = self.w_p
return weight.unsqueeze(0) # weight.unsqueeze(2).unsqueeze(3)
def reverse(self, output):
weight = self.calc_weight()
# return weight.inverse() @ output
return torch.matmul(weight.inverse(), output)
# return F.conv2d(output, weight.squeeze().inverse().unsqueeze(2).unsqueeze(3)) #np.linalg.det(weight.data.numpy())
class InvRotation(nn.Module):
def __init__(self, dim):
super().__init__()
weight = torch.randn(dim, dim)
q, _ = torch.qr(weight)
weight = q.unsqueeze(0)
self.weight = nn.Parameter(weight)
def forward(self, input):
_, height, width = input.shape
# out = F.conv2d(input, self.weight)
out = self.weight @ input
logdet = (
width * torch.slogdet(self.weight.squeeze().double())[1].float()
)
return out, logdet
def reverse(self, output):
return self.weight.squeeze().inverse().unsqueeze(0) @ output
# Basic non-invertible layers in coupling _s_t_function, or for transforming Gaussian distribution
class ZeroConv2d(nn.Module):
def __init__(self, in_channel, out_channel, padding=1):
super().__init__()
self.conv = nn.Conv2d(in_channel, out_channel, 3, padding=0) # in:512, out:12
self.conv.weight.data.zero_() # (12,512,3,3)
self.conv.bias.data.zero_() # 12
self.scale = nn.Parameter(torch.zeros(1, out_channel, 1, 1)) # (1,12,1,1)
def forward(self, input):
out = F.pad(input, [1, 1, 1, 1], value=1) # input: (2,512,32,32) --> (2,512,34,34)
out = self.conv(out) # (2,12,32,32)
out = out * torch.exp(self.scale * 3) # (2,12,32,32) * (1,12,1,1) = (2,12,32,32)
return out
# Basic non-invertible layers in coupling _s_t_function,
class GraphLinear(nn.Module):
"""Graph Linear layer.
This function assumes its input is 3-dimensional. Or 4-dim or whatever, only last dim are changed
Differently from :class:`nn.Linear`, it applies an affine
transformation to the third axis of input `x`.
Warning: original Chainer.link.Link use i.i.d. Gaussian initialization as default,
while default nn.Linear initialization using init.kaiming_uniform_
.. seealso:: :class:`nn.Linear`
"""
def __init__(self, in_size, out_size, bias=True):
super(GraphLinear, self).__init__()
self.in_size = in_size
self.out_size = out_size
self.linear = nn.Linear(in_size, out_size, bias) # Warning: differential initialization from Chainer
def forward(self, x):
"""Forward propagation.
Args:
x (:class:`chainer.Variable`, or :class:`numpy.ndarray`\
or :class:`cupy.ndarray`):
Input array that should be a float array whose ``ndim`` is 3.
It represents a minibatch of atoms, each of which consists
of a sequence of molecules. Each molecule is represented
by integer IDs. The first axis is an index of atoms
(i.e. minibatch dimension) and the second one an index
of molecules.
Returns:
:class:`chainer.Variable`:
A 3-dimeisional array.
"""
# h = x
# s0, s1, s2 = h.shape
# h = h.reshape(-1, s2) # shape: (s0*s1, s2)
# h = self.linear(h)
# h = h.reshape(s0, s1, self.out_size)
h = x
h = h.reshape(-1, x.shape[-1]) # shape: (s0*s1, s2)
h = self.linear(h)
h = h.reshape(tuple(x.shape[:-1] + (self.out_size,)))
return h
class GraphConv(nn.Module):
def __init__(self, in_channels, out_channels, num_edge_type=4):
"""
:param in_channels: e.g. 8
:param out_channels: e.g. 64
:param num_edge_type: e.g. 4 types of edges/bonds
"""
super(GraphConv, self).__init__()
self.graph_linear_self = GraphLinear(in_channels, out_channels)
self.graph_linear_edge = GraphLinear(in_channels, out_channels * num_edge_type)
self.num_edge_type = num_edge_type
self.in_ch = in_channels
self.out_ch = out_channels
def forward(self, adj, h):
"""
graph convolution over batch and multi-graphs
:param h: shape: (256,9, 8)
:param adj: shape: (256,4,9,9)
:return:
"""
mb, node, ch = h.shape # 256, 9, 8
# --- self connection, apply linear function ---
hs = self.graph_linear_self(h) # (256,9, 8) --> (256,9, 64)
# --- relational feature, from neighbor connection ---
# Expected number of neighbors of a vertex
# Since you have to divide by it, if its 0, you need to arbitrarily set it to 1
m = self.graph_linear_edge(h) # (256,9, 8) --> (256,9, 64*4), namely (256,9, 256)
m = m.reshape(mb, node, self.out_ch, self.num_edge_type) # (256,9, 256) --> (256,9, 64, 4)
m = m.permute(0, 3, 1, 2) # (256,9, 64, 4) --> (256, 4, 9, 64)
# m: (batchsize, edge_type, node, ch)
# hr: (batchsize, edge_type, node, ch)
hr = torch.matmul(adj, m) # (256,4,9,9) * (256, 4, 9, 64) = (256, 4, 9, 64)
# hr: (batchsize, node, ch)
hr = hr.sum(dim=1) # (256, 4, 9, 64) --> (256, 9, 64)
return hs + hr #
class AffineCoupling(nn.Module): # delete
def __init__(self, in_channel, hidden_channels, affine=True, mask_swap=False): # filter_size=512, --> hidden_channels =(512, 512)
super(AffineCoupling, self).__init__()
self.affine = affine
self.layers = nn.ModuleList()
self.norms = nn.ModuleList()
self.mask_swap=mask_swap
# self.norms_in = nn.ModuleList()
last_h = in_channel // 2
if affine:
vh = tuple(hidden_channels) + (in_channel,)
else:
vh = tuple(hidden_channels) + (in_channel // 2,)
for h in vh:
self.layers.append(nn.Conv2d(last_h, h, kernel_size=3, padding=1))
self.norms.append(nn.BatchNorm2d(h)) # , momentum=0.9 may change norm later, where to use norm? for the residual? or the sum
# self.norms.append(ActNorm(in_channel=h, logdet=False)) # similar but not good
last_h = h
def forward(self, input):
in_a, in_b = input.chunk(2, 1) # (2,12,32,32) --> (2,6,32,32), (2,6,32,32)
if self.mask_swap:
in_a, in_b = in_b, in_a
if self.affine:
# log_s, t = self.net(in_a).chunk(2, 1) # (2,12,32,32) --> (2,6,32,32), (2,6,32,32)
s, t = self._s_t_function(in_a)
out_b = (in_b + t) * s # different affine bias , no difference to the log-det # (2,6,32,32) More stable, less error
# out_b = in_b * s + t
logdet = torch.sum(torch.log(torch.abs(s)).view(input.shape[0], -1), 1)
else: # add coupling
# net_out = self.net(in_a)
_, t = self._s_t_function(in_a)
out_b = in_b + t
logdet = None
if self.mask_swap:
result = torch.cat([out_b, in_a], 1)
else:
result = torch.cat([in_a, out_b], 1)
return result, logdet
def reverse(self, output):
out_a, out_b = output.chunk(2, 1)
if self.mask_swap:
out_a, out_b = out_b, out_a
if self.affine:
s, t = self._s_t_function(out_a)
in_b = out_b / s - t # More stable, less error s must not equal to 0!!!
# in_b = (out_b - t) / s
else:
_, t = self._s_t_function(out_a)
in_b = out_b - t
if self.mask_swap:
result = torch.cat([in_b, out_a], 1)
else:
result = torch.cat([out_a, in_b], 1)
return result
def _s_t_function(self, x):
h = x
for i in range(len(self.layers)-1):
h = self.layers[i](h)
h = self.norms[i](h)
# h = torch.tanh(h) # tanh may be more stable?
h = torch.relu(h) #
h = self.layers[-1](h)
s = None
if self.affine:
# residual net for doubling the channel. Do not use residual, unstable
log_s, t = h.chunk(2, 1)
s = torch.sigmoid(log_s) # works good when actnorm
else:
t = h
return s, t
class GraphAffineCoupling(nn.Module):
def __init__(self, n_node, in_dim, hidden_dim_dict, masked_row, affine=True):
super(GraphAffineCoupling, self).__init__()
self.n_node = n_node
self.in_dim = in_dim
self.hidden_dim_dict = hidden_dim_dict
self.masked_row = masked_row
self.affine = affine
self.hidden_dim_gnn = hidden_dim_dict['gnn']
self.hidden_dim_linear = hidden_dim_dict['linear']
self.net = nn.ModuleList()
self.norm = nn.ModuleList()
last_dim = in_dim
for out_dim in self.hidden_dim_gnn: # What if use only one gnn???
self.net.append(GraphConv(last_dim, out_dim))
self.norm.append(nn.BatchNorm1d(n_node)) # , momentum=0.9 Change norm!!!
# self.norm.append(ActNorm2D(in_dim=n_node, logdet=False))
last_dim = out_dim
self.net_lin = nn.ModuleList()
self.norm_lin = nn.ModuleList()
for out_dim in self.hidden_dim_linear: # What if use only one gnn???
self.net_lin.append(GraphLinear(last_dim, out_dim))
self.norm_lin.append(nn.BatchNorm1d(n_node)) # , momentum=0.9 Change norm!!!
# self.norm_lin.append(ActNorm2D(in_dim=n_node, logdet=False))
last_dim = out_dim
if affine:
self.net_lin.append(GraphLinear(last_dim, in_dim*2))
else:
self.net_lin.append(GraphLinear(last_dim, in_dim))
self.scale = nn.Parameter(torch.zeros(1)) # nn.Parameter(torch.ones(1)) #
mask = torch.ones(n_node, in_dim)
mask[masked_row, :] = 0 # masked_row are kept same, and used for _s_t for updating the left rows
self.register_buffer('mask', mask)
def forward(self, adj, input):
masked_x = self.mask * input
s, t = self._s_t_function(adj, masked_x) # s must not equal to 0!!!
if self.affine:
out = masked_x + (1-self.mask) * (input + t) * s
# out = masked_x + (1-self.mask) * (input * s + t)
logdet = torch.sum(torch.log(torch.abs(s)).view(input.shape[0], -1), 1) # possibly wrong answer
else: # add coupling
out = masked_x + t*(1-self.mask)
logdet = None
return out, logdet
def reverse(self, adj, output):
masked_y = self.mask * output
s, t = self._s_t_function(adj, masked_y)
if self.affine:
input = masked_y + (1 - self.mask) * (output / s - t)
# input = masked_x + (1 - self.mask) * ((output-t) / s)
else:
input = masked_y + (1 - self.mask) * (output - t)
return input
def _s_t_function(self, adj, x):
# adj: (2,4,9,9) x: # (2,9,5)
s = None
h = x
for i in range(len(self.net)):
h = self.net[i](adj, h) # (2,1,9,hidden_dim)
h = self.norm[i](h)
# h = torch.tanh(h) # tanh may be more stable
h = torch.relu(h) # use relu!!!
for i in range(len(self.net_lin)-1):
h = self.net_lin[i](h) # (2,1,9,hidden_dim)
h = self.norm_lin[i](h)
# h = torch.tanh(h)
h = torch.relu(h)
h = self.net_lin[-1](h)
# h =h * torch.exp(self.scale*2)
if self.affine:
log_s, t = h.chunk(2, dim=-1)
s = torch.sigmoid(log_s) # better validity + actnorm
else:
t = h
return s, t
class Flow(nn.Module):
def __init__(self, in_channel, hidden_channels, affine=True, conv_lu=2, mask_swap=False):
super(Flow, self).__init__()
# More stable to support more flows
self.actnorm = ActNorm(in_channel)
if conv_lu == 0:
self.invconv = InvConv2d(in_channel)
elif conv_lu == 1:
self.invconv = InvConv2dLU(in_channel)
elif conv_lu == 2:
self.invconv = None
else:
raise ValueError("conv_lu in {0,1,2}, 0:InvConv2d, 1:InvConv2dLU, 2:none-just swap to update in coupling")
# May add more parameter to further control net in the coupling layer
self.coupling = AffineCoupling(in_channel, hidden_channels, affine=affine, mask_swap=mask_swap)
def forward(self, input):
out, logdet = self.actnorm(input)
# out = input
# logdet = 0
if self.invconv:
out, det1 = self.invconv(out)
else:
det1 = 0
out, det2 = self.coupling(out)
logdet = logdet + det1
if det2 is not None:
logdet = logdet + det2
return out, logdet
def reverse(self, output):
input = self.coupling.reverse(output)
if self.invconv:
input = self.invconv.reverse(input)
input = self.actnorm.reverse(input)
return input
class FlowOnGraph(nn.Module):
def __init__(self, n_node, in_dim, hidden_dim_dict, masked_row, affine=True):
super(FlowOnGraph, self).__init__()
self.n_node = n_node
self.in_dim = in_dim
self.hidden_dim_dict = hidden_dim_dict
self.masked_row = masked_row
self.affine = affine
# self.conv_lu = conv_lu
self.actnorm = ActNorm2D(in_dim=n_node) # May change normalization later, column norm, or row norm
# self.invconv = InvRotationLU(n_node) # Not stable for inverse!!! delete!!!
self.coupling = GraphAffineCoupling(n_node, in_dim, hidden_dim_dict, masked_row, affine=affine)
def forward(self, adj, input): # (2,4,9,9) (2,2,9,5)
# if input are two channel identical, normalized results are 0
# change other normalization for input
out, logdet = self.actnorm(input)
# out = input
# logdet = torch.zeros(1).to(input)
# out, det1 = self.invconv(out)
det1 = 0
out, det2 = self.coupling(adj, out)
logdet = logdet + det1
if det2 is not None:
logdet = logdet + det2
return out, logdet
def reverse(self, adj, output):
input = self.coupling.reverse(adj, output)
# input = self.invconv.reverse(input)
input = self.actnorm.reverse(input) # change other normalization for input
return input
class Block(nn.Module):
def __init__(self, in_channel, n_flow, squeeze_fold, hidden_channels, affine=True, conv_lu=2): # in_channel: 3, n_flow: 32
super(Block, self).__init__()
# squeeze_fold = 3 for qm9 (bs,4,9,9), squeeze_fold = 2 for zinc (bs, 4, 38, 38)
# (bs,4*3*3,3,3) (bs,4*2*2,19,19)
self.squeeze_fold = squeeze_fold
squeeze_dim = in_channel * self.squeeze_fold * self.squeeze_fold
self.flows = nn.ModuleList()
for i in range(n_flow):
if conv_lu in (0, 1):
self.flows.append(Flow(squeeze_dim, hidden_channels,
affine=affine, conv_lu=conv_lu, mask_swap=False))
else:
self.flows.append(Flow(squeeze_dim, hidden_channels,
affine=affine, conv_lu=2, mask_swap=bool(i % 2)))
# self.prior = ZeroConv2d(squeeze_dim, squeeze_dim*2)
def forward(self, input):
out = self._squeeze(input)
logdet = 0
for flow in self.flows:
out, det = flow(out)
logdet = logdet + det
out = self._unsqueeze(out)
return out, logdet # , log_p, z_new
def reverse(self, output): # , eps=None, reconstruct=False):
input = self._squeeze(output)
for flow in self.flows[::-1]:
input = flow.reverse(input)
unsqueezed = self._unsqueeze(input)
return unsqueezed
def _squeeze(self, x):
"""Trade spatial extent for channels. In forward direction, convert each
1x4x4 volume of input into a 4x1x1 volume of output.
Args:
x (torch.Tensor): Input to squeeze or unsqueeze.
reverse (bool): Reverse the operation, i.e., unsqueeze.
Returns:
x (torch.Tensor): Squeezed or unsqueezed tensor.
"""
# b, c, h, w = x.size()
assert len(x.shape) == 4
b_size, n_channel, height, width = x.shape
fold = self.squeeze_fold
squeezed = x.view(b_size, n_channel, height // fold, fold, width // fold, fold)
squeezed = squeezed.permute(0, 1, 3, 5, 2, 4).contiguous()
out = squeezed.view(b_size, n_channel * fold * fold, height // fold, width // fold)
return out
def _unsqueeze(self, x):
assert len(x.shape) == 4
b_size, n_channel, height, width = x.shape
fold = self.squeeze_fold
unsqueezed = x.view(b_size, n_channel // (fold * fold), fold, fold, height, width)
unsqueezed = unsqueezed.permute(0, 1, 4, 2, 5, 3).contiguous()
out = unsqueezed.view(b_size, n_channel // (fold * fold), height * fold, width * fold)
return out
class BlockOnGraph(nn.Module):
def __init__(self, n_node, in_dim, hidden_dim_dict, n_flow, mask_row_size=1, mask_row_stride=1, affine=True): #, conv_lu=True):
"""
:param n_node:
:param in_dim:
:param hidden_dim:
:param n_flow:
:param mask_row_size: number of rows to be masked for update
:param mask_row_stride: number of steps between two masks' firs row
:param affine:
"""
# in_channel=2 deleted. in_channel: 3, n_flow: 32
super(BlockOnGraph, self).__init__()
assert 0 < mask_row_size < n_node
self.flows = nn.ModuleList()
for i in range(n_flow):
start = i * mask_row_stride
masked_row =[r % n_node for r in range(start, start+mask_row_size)]
self.flows.append(FlowOnGraph(n_node, in_dim, hidden_dim_dict, masked_row=masked_row, affine=affine))
# self.prior = ZeroConv2d(2, 4)
def forward(self, adj, input):
out = input
logdet = 0
for flow in self.flows:
out, det = flow(adj, out)
logdet = logdet + det
# it seems logdet is not influenced
return out, logdet
def reverse(self, adj, output):
input = output
for flow in self.flows[::-1]:
input = flow.reverse(adj, input)
return input
class Glow(nn.Module):
def __init__(self, in_channel, n_flow, n_block, squeeze_fold, hidden_channel, affine=True, conv_lu=2): # in_channel: 3, n_flow:32, n_block:4
super(Glow, self).__init__()
self.blocks = nn.ModuleList()
n_channel = in_channel # 3
for i in range(n_block):
self.blocks.append(Block(n_channel, n_flow, squeeze_fold, hidden_channel, affine=affine, conv_lu=conv_lu)) # 3,6,12
# self.blocks.append(
# Block2(n_channel, n_flow, squeeze_fold, hidden_channel, affine=affine, conv_lu=conv_lu)) # delete
def forward(self, input):
logdet = 0
out = input
for block in self.blocks:
out, det = block(out)
logdet = logdet + det
return out, logdet
def reverse(self, z): # _list, reconstruct=False):
h = z
for i, block in enumerate(self.blocks[::-1]):
h = block.reverse(h)
return h
class GlowOnGraph(nn.Module):
def __init__(self, n_node, in_dim, hidden_dim_dict, n_flow, n_block,
mask_row_size_list=[2], mask_row_stride_list=[1], affine=True): # , conv_lu=True): # in_channel: 2 default
super(GlowOnGraph, self).__init__()
assert len(mask_row_size_list) == n_block or len(mask_row_size_list) == 1
assert len(mask_row_stride_list) == n_block or len(mask_row_stride_list) == 1
if len(mask_row_size_list) == 1:
mask_row_size_list = mask_row_size_list * n_block
if len(mask_row_stride_list) == 1:
mask_row_stride_list = mask_row_stride_list * n_block
self.blocks = nn.ModuleList()
for i in range(n_block):
mask_row_size = mask_row_size_list[i]
mask_row_stride = mask_row_stride_list[i]
self.blocks.append(BlockOnGraph(n_node, in_dim, hidden_dim_dict, n_flow, mask_row_size, mask_row_stride, affine=affine))
def forward(self, adj, x):
# adj (bs, 4,9,9), xx:(bs, 9,5)
logdet = 0
out = x
for block in self.blocks:
out, det = block(adj, out)
logdet = logdet + det
return out, logdet
def reverse(self, adj, z):
# (bs, 4,9,9), zz: (bs, 9, 5)
input = z
for i, block in enumerate(self.blocks[::-1]):
input = block.reverse(adj, input)
return input
class MoFlow(nn.Module):
def __init__(self, config):
super(MoFlow, self).__init__()
hyper_params = json.load(open(os.path.join(config["model_path"], config["hparams"]), "r"))
self.hyper_params = hyper_params # hold all the parameters, easy for save and load for further usage
logger.info("MoFlow hparams: %s" % (str(hyper_params)))
# More parameters derived from hyper_params for easy use
self.b_n_type = hyper_params["b_n_type"]
self.a_n_node = hyper_params["a_n_node"]
self.a_n_type = hyper_params["a_n_type"]
self.b_size = self.a_n_node * self.a_n_node * self.b_n_type
self.a_size = self.a_n_node * self.a_n_type
self.noise_scale = hyper_params["noise_scale"]
if hyper_params["learn_dist"]:
self.ln_var = nn.Parameter(torch.zeros(1)) # (torch.zeros(2)) 2 is worse than 1
else:
self.register_buffer('ln_var', torch.zeros(1)) # self.ln_var = torch.zeros(1)
self.bond_model = Glow(
in_channel=hyper_params["b_n_type"], # 4,
n_flow=hyper_params["b_n_flow"], # 10, # n_flow 10-->20 n_flow=20
n_block=hyper_params["b_n_block"], # 1,
squeeze_fold=hyper_params["b_n_squeeze"], # 3,
hidden_channel=hyper_params["b_hidden_ch"], # [128, 128],
affine=hyper_params["b_affine"], # True,
conv_lu=hyper_params["b_conv_lu"] # 0,1,2
) # n_flow=9, n_block=4
self.atom_model = GlowOnGraph(
n_node=hyper_params["a_n_node"], # 9,
in_dim=hyper_params["a_n_type"], # 5,
hidden_dim_dict={'gnn': hyper_params["a_hidden_gnn"], 'linear': hyper_params["a_hidden_lin"]}, # {'gnn': [64], 'linear': [128, 64]},
n_flow=hyper_params["a_n_flow"], # 27,
n_block=hyper_params["a_n_block"], # 1,
mask_row_size_list=hyper_params["mask_row_size_list"], # [1],
mask_row_stride_list=hyper_params["mask_row_stride_list"], # [1],
affine=hyper_params["a_affine"] # True
)
if config["snapshot"] != "None":
ckpt = torch.load(os.path.join(config["model_path"], config["snapshot"]))
self.load_state_dict(ckpt)
def forward(self, adj, x, adj_normalized):
"""
:param adj: (256,4,9,9)
:param x: (256,9,5)
:return:
"""
h = x # (256,9,5)
# add uniform noise to node feature matrices
# + noise didn't change log-det. 1. change to logit transform 2. *0.9 ---> *other value???
if self.training:
if self.noise_scale == 0:
h = h/2.0 - 0.5 + torch.rand_like(x) * 0.4 #/ 2.0 similar to X + U(0, 0.8) *0.5*0.8=0.4
else:
h = h + torch.rand_like(x) * self.noise_scale # noise_scale default 0.9
# h, log_det_logit_x = logit_pre_process(h) # to delete
h, sum_log_det_jacs_x = self.atom_model(adj_normalized, h)
# sum_log_det_jacs_x = sum_log_det_jacs_x + log_det_logit_x # to delete
# add uniform noise to adjacency tensors
if self.training:
if self.noise_scale == 0:
adj = adj/2.0 - 0.5 + torch.rand_like(adj) * 0.4 #/ 2.0
else:
adj = adj + torch.rand_like(adj) * self.noise_scale # (256,4,9,9) noise_scale default 0.9
# adj, log_det_logit_adj = logit_pre_process(adj) # to delete
adj_h, sum_log_det_jacs_adj = self.bond_model(adj)
# sum_log_det_jacs_adj = log_det_logit_adj + sum_log_det_jacs_adj # to delete
out = [h, adj_h] # combine to one tensor later bs * dim tensor
return out, [sum_log_det_jacs_x, sum_log_det_jacs_adj]
def reverse(self, z, true_adj=None): # change!!! z[0] --> for z_x, z[1] for z_adj, a list!!!
"""
Returns a molecule, given its latent vector.
:param z: latent vector. Shape: [B, N*N*M + N*T] (100,369) 369=9*9 * 4 + 9*5
B = Batch size, N = number of atoms, M = number of bond types,
T = number of atom types (Carbon, Oxygen etc.)
:param true_adj: used for testing. An adjacency matrix of a real molecule
:return: adjacency matrix and feature matrix of a molecule
"""
batch_size = z.shape[0] # 100, z.shape: (100,369)
z_x = z[:, :self.a_size] # (100, 45)
z_adj = z[:, self.a_size:] # (100, 324)
if true_adj is None:
h_adj = z_adj.reshape(batch_size, self.b_n_type, self.a_n_node, self.a_n_node) # (100,4,9,9)
h_adj = self.bond_model.reverse(h_adj)
if self.noise_scale == 0:
h_adj = (h_adj + 0.5) * 2
# decode adjacency matrix from h_adj
adj = h_adj
adj = adj + adj.permute(0, 1, 3, 2)
adj = adj / 2
adj = adj.softmax(dim=1) # (100,4!!!,9,9) prob. for edge 0-3 for every pair of nodes
max_bond = adj.max(dim=1).values.reshape(batch_size, -1, self.a_n_node, self.a_n_node) # (100,1,9,9)
adj = torch.floor(adj / max_bond) # (100,4,9,9) / (100,1,9,9) --> (100,4,9,9)
else:
adj = true_adj
h_x = z_x.reshape(batch_size, self.a_n_node, self.a_n_type)
adj_normalized = rescale_adj(adj).to(h_x)
h_x = self.atom_model.reverse(adj_normalized, h_x)
if self.noise_scale == 0:
h_x = (h_x + 0.5) * 2
# h_x = torch.sigmoid(h_x) # to delete for logit
return adj, h_x # (100,4,9,9), (100,9,5)
def decode(self, z):
adj, x = self.reverse(z)
adj = adj.squeeze(0)
x = x.squeeze(0)
return adj, x
def log_prob(self, z, logdet): # z:[(256,45), (256,324)] logdet:[(256,),(256,)]
# If I din't use self.ln_var, then I can parallel the code!
z[0] = z[0].reshape(z[0].shape[0],-1)
z[1] = z[1].reshape(z[1].shape[0], -1)
logdet[0] = logdet[0] - self.a_size * math.log(2.) # n_bins = 2**n_bit = 2**1=2
logdet[1] = logdet[1] - self.b_size * math.log(2.)
if len(self.ln_var) == 1:
ln_var_adj = self.ln_var * torch.ones([self.b_size]).to(z[0]) # (324,)
ln_var_x = self.ln_var * torch.ones([self.a_size]).to(z[0]) # (45)
else:
ln_var_adj = self.ln_var[0] * torch.ones([self.b_size]).to(z[0]) # (324,) 0 for bond
ln_var_x = self.ln_var[1] * torch.ones([self.a_size]).to(z[0]) # (45) 1 for atom
nll_adj = torch.mean(
torch.sum(gaussian_nll(z[1], torch.zeros(self.b_size).to(z[0]), ln_var_adj, reduce='no'), dim=1)
- logdet[1])
nll_adj = nll_adj / (self.b_size * math.log(2.)) # the negative log likelihood per dim with log base 2
nll_x = torch.mean(torch.sum(
gaussian_nll(z[0], torch.zeros(self.a_size).to(z[0]), ln_var_x, reduce='no'),
dim=1) - logdet[0])
nll_x = nll_x / (self.a_size * math.log(2.)) # the negative log likelihood per dim with log base 2
if nll_x.item() < 0:
print('nll_x:{}'.format(nll_x.item()))
return [nll_x, nll_adj]
| OpenBioMed-main | open_biomed/models/molecule/moflow.py |
import logging
logger = logging.getLogger(__name__)
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import (MessagePassing, global_add_pool, global_max_pool, global_mean_pool)
from torch_geometric.nn.inits import glorot, zeros
from torch_geometric.utils import add_self_loops, softmax
from torch_scatter import scatter_add
from models.base_models import MolEncoder
num_atom_type = 120 # including the extra mask tokens
num_chirality_tag = 3
num_bond_type = 6 # including aromatic and self-loop edge, and extra masked tokens
num_bond_direction = 3
class GINConv(MessagePassing):
def __init__(self, emb_dim, aggr="add"):
super(GINConv, self).__init__()
self.aggr = aggr
self.mlp = nn.Sequential(nn.Linear(emb_dim, 2 * emb_dim),
nn.ReLU(),
nn.Linear(2 * emb_dim, emb_dim))
self.edge_embedding1 = nn.Embedding(num_bond_type, emb_dim)
self.edge_embedding2 = nn.Embedding(num_bond_direction, emb_dim)
nn.init.xavier_uniform_(self.edge_embedding1.weight.data)
nn.init.xavier_uniform_(self.edge_embedding2.weight.data)
def forward(self, x, edge_index, edge_attr):
edge_index = add_self_loops(edge_index, num_nodes=x.size(0))
self_loop_attr = torch.zeros(x.size(0), 2)
self_loop_attr[:, 0] = 4 # bond type for self-loop edge
self_loop_attr = self_loop_attr.to(edge_attr.device).to(edge_attr.dtype)
edge_attr = torch.cat((edge_attr, self_loop_attr), dim=0)
edge_embeddings = self.edge_embedding1(edge_attr[:, 0]) + \
self.edge_embedding2(edge_attr[:, 1])
return self.propagate(edge_index[0], x=x, edge_attr=edge_embeddings)
def message(self, x_j, edge_attr):
return x_j + edge_attr
def update(self, aggr_out):
return self.mlp(aggr_out)
class GCNConv(MessagePassing):
def __init__(self, emb_dim, aggr="add"):
super(GCNConv, self).__init__()
self.aggr = aggr
self.emb_dim = emb_dim
self.linear = nn.Linear(emb_dim, emb_dim)
self.edge_embedding1 = nn.Embedding(num_bond_type, emb_dim)
self.edge_embedding2 = nn.Embedding(num_bond_direction, emb_dim)
nn.init.xavier_uniform_(self.edge_embedding1.weight.data)
nn.init.xavier_uniform_(self.edge_embedding2.weight.data)
def norm(self, edge_index, num_nodes, dtype):
### assuming that self-loops have been already added in edge_index
edge_weight = torch.ones((edge_index.size(1),), dtype=dtype, device=edge_index.device)
row, col = edge_index
deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
deg_inv_sqrt = deg.pow(-0.5)
deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0
return deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col]
def forward(self, x, edge_index, edge_attr):
# add self loops in the edge space
edge_index = add_self_loops(edge_index, num_nodes=x.size(0))
# add features corresponding to self-loop edges.
self_loop_attr = torch.zeros(x.size(0), 2)
self_loop_attr[:, 0] = 4 # bond type for self-loop edge
self_loop_attr = self_loop_attr.to(edge_attr.device).to(edge_attr.dtype)
edge_attr = torch.cat((edge_attr, self_loop_attr), dim=0)
edge_embeddings = self.edge_embedding1(edge_attr[:, 0]) + \
self.edge_embedding2(edge_attr[:, 1])
norm = self.norm(edge_index[0], x.size(0), x.dtype)
x = self.linear(x)
return self.propagate(edge_index[0], x=x, edge_attr=edge_embeddings, norm=norm)
def message(self, x_j, edge_attr, norm):
return norm.view(-1, 1) * (x_j + edge_attr)
class GATConv(MessagePassing):
def __init__(self, emb_dim, heads=2, negative_slope=0.2, aggr="add"):
super(GATConv, self).__init__(node_dim=0)
self.aggr = aggr
self.heads = heads
self.emb_dim = emb_dim
self.negative_slope = negative_slope
self.weight_linear = nn.Linear(emb_dim, heads * emb_dim)
self.att = nn.Parameter(torch.Tensor(1, heads, 2 * emb_dim))
self.bias = nn.Parameter(torch.Tensor(emb_dim))
self.edge_embedding1 = nn.Embedding(num_bond_type, heads * emb_dim)
self.edge_embedding2 = nn.Embedding(num_bond_direction, heads * emb_dim)
nn.init.xavier_uniform_(self.edge_embedding1.weight.data)
nn.init.xavier_uniform_(self.edge_embedding2.weight.data)
self.reset_parameters()
def reset_parameters(self):
glorot(self.att)
zeros(self.bias)
def forward(self, x, edge_index, edge_attr):
# add self loops in the edge space
edge_index = add_self_loops(edge_index, num_nodes=x.size(0))
# add features corresponding to self-loop edges.
self_loop_attr = torch.zeros(x.size(0), 2)
self_loop_attr[:, 0] = 4 # bond type for self-loop edge
self_loop_attr = self_loop_attr.to(edge_attr.device).to(edge_attr.dtype)
edge_attr = torch.cat((edge_attr, self_loop_attr), dim=0)
edge_embeddings = self.edge_embedding1(edge_attr[:, 0]) + \
self.edge_embedding2(edge_attr[:, 1])
x = self.weight_linear(x)
return self.propagate(edge_index[0], x=x, edge_attr=edge_embeddings)
def message(self, edge_index, x_i, x_j, edge_attr):
x_i = x_i.view(-1, self.heads, self.emb_dim)
x_j = x_j.view(-1, self.heads, self.emb_dim)
edge_attr = edge_attr.view(-1, self.heads, self.emb_dim)
x_j += edge_attr
alpha = (torch.cat([x_i, x_j], dim=-1) * self.att).sum(dim=-1)
alpha = F.leaky_relu(alpha, self.negative_slope)
alpha = softmax(alpha, edge_index[0])
return x_j * alpha.view(-1, self.heads, 1)
def update(self, aggr_out):
aggr_out = aggr_out.mean(dim=1)
aggr_out += self.bias
return aggr_out
class GraphSAGEConv(MessagePassing):
def __init__(self, emb_dim, aggr="mean"):
super(GraphSAGEConv, self).__init__()
self.aggr = aggr
self.emb_dim = emb_dim
self.linear = nn.Linear(emb_dim, emb_dim)
self.edge_embedding1 = nn.Embedding(num_bond_type, emb_dim)
self.edge_embedding2 = nn.Embedding(num_bond_direction, emb_dim)
nn.init.xavier_uniform_(self.edge_embedding1.weight.data)
nn.init.xavier_uniform_(self.edge_embedding2.weight.data)
def forward(self, x, edge_index, edge_attr):
# add self loops in the edge space
edge_index = add_self_loops(edge_index, num_nodes=x.size(0))
# add features corresponding to self-loop edges.
self_loop_attr = torch.zeros(x.size(0), 2)
self_loop_attr[:, 0] = 4 # bond type for self-loop edge
self_loop_attr = self_loop_attr.to(edge_attr.device).to(edge_attr.dtype)
edge_attr = torch.cat((edge_attr, self_loop_attr), dim=0)
edge_embeddings = self.edge_embedding1(edge_attr[:, 0]) + \
self.edge_embedding2(edge_attr[:, 1])
x = self.linear(x)
return self.propagate(edge_index[0], x=x, edge_attr=edge_embeddings)
def message(self, x_j, edge_attr):
return x_j + edge_attr
def update(self, aggr_out):
return F.normalize(aggr_out, p=2, dim=-1)
class GNNGraphMVP(nn.Module):
def __init__(self, num_layer, emb_dim, JK="last", drop_ratio=0., gnn_type="gin", **kwargs):
if num_layer < 2:
raise ValueError("Number of GNN layers must be greater than 1.")
super(GNNGraphMVP, self).__init__()
self.drop_ratio = drop_ratio
self.num_layer = num_layer
self.JK = JK
self.output_dim = emb_dim
self.x_embedding1 = nn.Embedding(num_atom_type, emb_dim)
self.x_embedding2 = nn.Embedding(num_chirality_tag, emb_dim)
nn.init.xavier_uniform_(self.x_embedding1.weight.data)
nn.init.xavier_uniform_(self.x_embedding2.weight.data)
###List of MLPs
self.gnns = nn.ModuleList()
for layer in range(num_layer):
if gnn_type == "gin":
self.gnns.append(GINConv(emb_dim, aggr="add"))
elif gnn_type == "gcn":
self.gnns.append(GCNConv(emb_dim))
elif gnn_type == "gat":
self.gnns.append(GATConv(emb_dim))
elif gnn_type == "graphsage":
self.gnns.append(GraphSAGEConv(emb_dim))
###List of batchnorms
self.batch_norms = nn.ModuleList()
for layer in range(num_layer):
self.batch_norms.append(nn.BatchNorm1d(emb_dim))
self.pooling = global_mean_pool
# def forward(self, x, edge_index, edge_attr):
def forward(self, *argv):
x_prob = None
if len(argv) == 3:
x, edge_index, edge_attr = argv[0], argv[1], argv[2]
elif len(argv) == 1:
data = argv[0]
x, edge_index, edge_attr, batch = data.x, data.edge_index, data.edge_attr, data.batch
elif len(argv) == 4:
data = argv[0]
x, edge_index, edge_attr, batch = data.x, data.edge_index, data.edge_attr, data.batch
x_prob = argv[1]
atomic_num_list = argv[2]
device = argv[3]
else:
raise ValueError("unmatched number of arguments.")
if x_prob is not None:
x = torch.matmul(x_prob[:, :-1], self.x_embedding1(torch.tensor(atomic_num_list[:-1]).to(device) - 1)) + torch.matmul(x_prob[:, -1:], self.x_embedding1(torch.arange(119, 120).to(device))) + self.x_embedding2(x[:, 1])
else:
x = self.x_embedding1(x[:, 0]) + self.x_embedding2(x[:, 1])
h_list = [x]
for layer in range(self.num_layer):
h = self.gnns[layer](h_list[layer], edge_index, edge_attr)
h = self.batch_norms[layer](h)
# h = F.dropout(F.relu(h), self.drop_ratio, training = self.training)
if layer == self.num_layer - 1:
# remove relu for the last layer
h = F.dropout(h, self.drop_ratio, training=self.training)
else:
h = F.dropout(F.relu(h), self.drop_ratio, training=self.training)
h_list.append(h)
### Different implementations of Jk-concat
if self.JK == "concat":
node_representation = torch.cat(h_list, dim=1)
elif self.JK == "last":
node_representation = h_list[-1]
elif self.JK == "max":
h_list = [h.unsqueeze_(0) for h in h_list]
node_representation = torch.max(torch.cat(h_list, dim=0), dim=0)[0]
elif self.JK == "sum":
h_list = [h.unsqueeze_(0) for h in h_list]
node_representation = torch.sum(torch.cat(h_list, dim=0), dim=0)[0]
else:
raise ValueError("not implemented.")
return self.pooling(node_representation, batch), node_representation
class GraphMVP(MolEncoder):
def __init__(self, config):
super(GraphMVP, self).__init__()
self.main_model = GNNGraphMVP(
num_layer=config["gin_num_layers"],
emb_dim=config["gin_hidden_dim"],
JK='last',
drop_ratio=config["drop_ratio"],
gnn_type='gin'
)
if "projection_dim" in config:
self.projector = nn.Linear(config["gin_hidden_dim"], config["projection_dim"])
self.output_dim = config["projection_dim"]
else:
self.projector = None
self.output_dim = config["gin_hidden_dim"]
if "init_checkpoint" in config:
logger.info("GraphMVP: load checkpoint from %s" % (config["init_checkpoint"]))
self.main_model.load_state_dict(torch.load(config["init_checkpoint"], map_location="cpu"))
def forward(self, mol):
h_graph, h_node = self.main_model(mol)
if self.projector is not None:
h_graph = self.projector(h_graph)
h_node = self.projector(h_node)
return h_graph, h_node
def encode_mol(self, mol, proj=False, return_node_feats=False):
h_graph, h_node = self.forward(mol)
if proj and self.projector is not None:
h_graph = self.projector(h_graph)
h_node = self.projector(h_node)
if return_node_feats:
return h_graph, h_node
else:
return h_graph
def load_state_dict(self, state_dict, strict=True):
return self.main_model.load_state_dict(state_dict, strict) | OpenBioMed-main | open_biomed/models/molecule/gnn_graphmvp.py |
'''
Implementation of MGNN in MGraphDTA: Deep Multiscale Graph Neural Network for Explainable Drug-target binding affinity Prediction
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.batchnorm import _BatchNorm
import torch_geometric.nn as gnn
from torch import Tensor
from collections import OrderedDict
from models.base_models import MolEncoder
class NodeLevelBatchNorm(_BatchNorm):
r"""
Applies Batch Normalization over a batch of graph data.
Shape:
- Input: [batch_nodes_dim, node_feature_dim]
- Output: [batch_nodes_dim, node_feature_dim]
batch_nodes_dim: all nodes of a batch graph
"""
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True,
track_running_stats=True):
super(NodeLevelBatchNorm, self).__init__(
num_features, eps, momentum, affine, track_running_stats)
def _check_input_dim(self, input):
if input.dim() != 2:
raise ValueError('expected 2D input (got {}D input)'
.format(input.dim()))
def forward(self, input):
self._check_input_dim(input)
if self.momentum is None:
exponential_average_factor = 0.0
else:
exponential_average_factor = self.momentum
if self.training and self.track_running_stats:
if self.num_batches_tracked is not None:
self.num_batches_tracked = self.num_batches_tracked + 1
if self.momentum is None:
exponential_average_factor = 1.0 / float(self.num_batches_tracked)
else:
exponential_average_factor = self.momentum
return torch.functional.F.batch_norm(
input, self.running_mean, self.running_var, self.weight, self.bias,
self.training or not self.track_running_stats,
exponential_average_factor, self.eps)
def extra_repr(self):
return 'num_features={num_features}, eps={eps}, ' \
'affine={affine}'.format(**self.__dict__)
class GraphConvBn(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv = gnn.GraphConv(in_channels, out_channels)
self.norm = NodeLevelBatchNorm(out_channels)
def forward(self, data):
x, edge_index, batch = data.x, data.edge_index, data.batch
data.x = F.relu(self.norm(self.conv(x, edge_index)))
return data
class DenseLayer(nn.Module):
def __init__(self, num_input_features, growth_rate=32, bn_size=4):
super().__init__()
self.conv1 = GraphConvBn(num_input_features, int(growth_rate * bn_size))
self.conv2 = GraphConvBn(int(growth_rate * bn_size), growth_rate)
def bn_function(self, data):
concated_features = torch.cat(data.x, 1)
data.x = concated_features
data = self.conv1(data)
return data
def forward(self, data):
if isinstance(data.x, Tensor):
data.x = [data.x]
data = self.bn_function(data)
data = self.conv2(data)
return data
class DenseBlock(nn.ModuleDict):
def __init__(self, num_layers, num_input_features, growth_rate=32, bn_size=4):
super().__init__()
for i in range(num_layers):
layer = DenseLayer(num_input_features + i * growth_rate, growth_rate, bn_size)
self.add_module('layer%d' % (i + 1), layer)
def forward(self, data):
features = [data.x]
for name, layer in self.items():
data = layer(data)
features.append(data.x)
data.x = features
data.x = torch.cat(data.x, 1)
return data
class MGNN(MolEncoder):
def __init__(self, config):
super().__init__()
self.output_dim = config["out_dim"]
self.features = nn.Sequential(OrderedDict([('conv0', GraphConvBn(config["num_input_features"], 32))]))
num_input_features = 32
for i, num_layers in enumerate(config["block_config"]):
block = DenseBlock(
num_layers, num_input_features, growth_rate=config["growth_rate"], bn_size=config["bn_sizes"][i]
)
self.features.add_module('block%d' % (i+1), block)
num_input_features += int(num_layers * config["growth_rate"])
trans = GraphConvBn(num_input_features, num_input_features // 2)
self.features.add_module("transition%d" % (i+1), trans)
num_input_features = num_input_features // 2
self.classifer = nn.Linear(num_input_features, config["out_dim"])
def forward(self, data):
data = self.features(data)
x = gnn.global_mean_pool(data.x, data.batch)
x = self.classifer(x)
return x
def encode_mol(self, mol, proj=False, return_node_feats=False):
return self.forward(mol) | OpenBioMed-main | open_biomed/models/molecule/mgnn.py |
import torch
import torch.nn as nn
from torch_geometric.nn import MessagePassing
from torch_geometric.utils import add_self_loops, degree, softmax
from torch_geometric.nn import global_add_pool, global_mean_pool, global_max_pool, GlobalAttention, Set2Set
import torch.nn.functional as F
from torch_scatter import scatter_add
from torch_geometric.nn.inits import glorot, zeros
from models.base_models import MolEncoder
num_atom_type = 120 #including the extra mask tokens
num_chirality_tag = 3
num_bond_type = 6 #including aromatic and self-loop edge, and extra masked tokens
num_bond_direction = 3
class GINConv(MessagePassing):
"""
Extension of GIN aggregation to incorporate edge information by concatenation.
Args:
emb_dim (int): dimensionality of embeddings for nodes and edges.
embed_input (bool): whether to embed input or not.
See https://arxiv.org/abs/1810.00826
"""
def __init__(self, emb_dim, aggr = "add"):
super(GINConv, self).__init__(aggr = "add")
#multi-layer perceptron
self.mlp = torch.nn.Sequential(torch.nn.Linear(emb_dim, 2*emb_dim), torch.nn.ReLU(), torch.nn.Linear(2*emb_dim, emb_dim))
self.edge_embedding1 = torch.nn.Embedding(num_bond_type, emb_dim)
self.edge_embedding2 = torch.nn.Embedding(num_bond_direction, emb_dim)
torch.nn.init.xavier_uniform_(self.edge_embedding1.weight.data)
torch.nn.init.xavier_uniform_(self.edge_embedding2.weight.data)
self.aggr = aggr
def forward(self, x, edge_index, edge_attr):
#add self loops in the edge space
# print('--------------------')
# print('x:', x.shape)
# print('edge_index:',edge_index.shape)
edge_index, edge_attr = add_self_loops(edge_index, edge_attr, fill_value=0, num_nodes = x.size(0))
#add features corresponding to self-loop edges.
# self_loop_attr = torch.zeros(x.size(0), 2)
# self_loop_attr[:,0] = 4 #bond type for self-loop edge
# self_loop_attr = self_loop_attr.to(edge_attr.device).to(edge_attr.dtype)
# print('edge_attr:',edge_attr.shape)
# print('self_loop_attr:',self_loop_attr.shape)
# print('--------------------')
# edge_attr = torch.cat((edge_attr, self_loop_attr), dim = 0)
edge_embeddings = self.edge_embedding1(edge_attr[:,0]) + self.edge_embedding2(edge_attr[:,1])
return self.propagate(edge_index, x=x, edge_attr=edge_embeddings)
def message(self, x_j, edge_attr):
return x_j + edge_attr
def update(self, aggr_out):
return self.mlp(aggr_out)
class GCNConv(MessagePassing):
def __init__(self, emb_dim, aggr = "add"):
super(GCNConv, self).__init__()
self.emb_dim = emb_dim
self.linear = torch.nn.Linear(emb_dim, emb_dim)
self.edge_embedding1 = torch.nn.Embedding(num_bond_type, emb_dim)
self.edge_embedding2 = torch.nn.Embedding(num_bond_direction, emb_dim)
torch.nn.init.xavier_uniform_(self.edge_embedding1.weight.data)
torch.nn.init.xavier_uniform_(self.edge_embedding2.weight.data)
self.aggr = aggr
def norm(self, edge_index, num_nodes, dtype):
### assuming that self-loops have been already added in edge_index
edge_weight = torch.ones((edge_index.size(1), ), dtype=dtype,
device=edge_index.device)
row, col = edge_index
deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
deg_inv_sqrt = deg.pow(-0.5)
deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0
return deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col]
def forward(self, x, edge_index, edge_attr):
#add self loops in the edge space
edge_index = add_self_loops(edge_index, num_nodes = x.size(0))
#add features corresponding to self-loop edges.
self_loop_attr = torch.zeros(x.size(0), 2)
self_loop_attr[:,0] = 4 #bond type for self-loop edge
self_loop_attr = self_loop_attr.to(edge_attr.device).to(edge_attr.dtype)
edge_attr = torch.cat((edge_attr, self_loop_attr), dim = 0)
edge_embeddings = self.edge_embedding1(edge_attr[:,0]) + self.edge_embedding2(edge_attr[:,1])
norm = self.norm(edge_index, x.size(0), x.dtype)
x = self.linear(x)
return self.propagate(self.aggr, edge_index, x=x, edge_attr=edge_embeddings, norm = norm)
def message(self, x_j, edge_attr, norm):
return norm.view(-1, 1) * (x_j + edge_attr)
class GATConv(MessagePassing):
def __init__(self, emb_dim, heads=2, negative_slope=0.2, aggr = "add"):
super(GATConv, self).__init__()
self.aggr = aggr
self.emb_dim = emb_dim
self.heads = heads
self.negative_slope = negative_slope
self.weight_linear = torch.nn.Linear(emb_dim, heads * emb_dim)
self.att = torch.nn.Parameter(torch.Tensor(1, heads, 2 * emb_dim))
self.bias = torch.nn.Parameter(torch.Tensor(emb_dim))
self.edge_embedding1 = torch.nn.Embedding(num_bond_type, heads * emb_dim)
self.edge_embedding2 = torch.nn.Embedding(num_bond_direction, heads * emb_dim)
torch.nn.init.xavier_uniform_(self.edge_embedding1.weight.data)
torch.nn.init.xavier_uniform_(self.edge_embedding2.weight.data)
self.reset_parameters()
def reset_parameters(self):
glorot(self.att)
zeros(self.bias)
def forward(self, x, edge_index, edge_attr):
#add self loops in the edge space
edge_index = add_self_loops(edge_index, num_nodes = x.size(0))
#add features corresponding to self-loop edges.
self_loop_attr = torch.zeros(x.size(0), 2)
self_loop_attr[:,0] = 4 #bond type for self-loop edge
self_loop_attr = self_loop_attr.to(edge_attr.device).to(edge_attr.dtype)
edge_attr = torch.cat((edge_attr, self_loop_attr), dim = 0)
edge_embeddings = self.edge_embedding1(edge_attr[:,0]) + self.edge_embedding2(edge_attr[:,1])
x = self.weight_linear(x).view(-1, self.heads, self.emb_dim)
return self.propagate(self.aggr, edge_index, x=x, edge_attr=edge_embeddings)
def message(self, edge_index, x_i, x_j, edge_attr):
edge_attr = edge_attr.view(-1, self.heads, self.emb_dim)
x_j += edge_attr
alpha = (torch.cat([x_i, x_j], dim=-1) * self.att).sum(dim=-1)
alpha = F.leaky_relu(alpha, self.negative_slope)
alpha = softmax(alpha, edge_index[0])
return x_j * alpha.view(-1, self.heads, 1)
def update(self, aggr_out):
aggr_out = aggr_out.mean(dim=1)
aggr_out = aggr_out + self.bias
return aggr_out
class GraphSAGEConv(MessagePassing):
def __init__(self, emb_dim, aggr = "mean"):
super(GraphSAGEConv, self).__init__()
self.emb_dim = emb_dim
self.linear = torch.nn.Linear(emb_dim, emb_dim)
self.edge_embedding1 = torch.nn.Embedding(num_bond_type, emb_dim)
self.edge_embedding2 = torch.nn.Embedding(num_bond_direction, emb_dim)
torch.nn.init.xavier_uniform_(self.edge_embedding1.weight.data)
torch.nn.init.xavier_uniform_(self.edge_embedding2.weight.data)
self.aggr = aggr
def forward(self, x, edge_index, edge_attr):
#add self loops in the edge space
edge_index = add_self_loops(edge_index, num_nodes = x.size(0))
#add features corresponding to self-loop edges.
self_loop_attr = torch.zeros(x.size(0), 2)
self_loop_attr[:,0] = 4 #bond type for self-loop edge
self_loop_attr = self_loop_attr.to(edge_attr.device).to(edge_attr.dtype)
edge_attr = torch.cat((edge_attr, self_loop_attr), dim = 0)
edge_embeddings = self.edge_embedding1(edge_attr[:,0]) + self.edge_embedding2(edge_attr[:,1])
x = self.linear(x)
return self.propagate(self.aggr, edge_index, x=x, edge_attr=edge_embeddings)
def message(self, x_j, edge_attr):
return x_j + edge_attr
def update(self, aggr_out):
return F.normalize(aggr_out, p = 2, dim = -1)
class GNNGraphCL(nn.Module):
"""
Args:
num_layer (int): the number of GNN layers
emb_dim (int): dimensionality of embeddings
JK (str): last, concat, max or sum.
max_pool_layer (int): the layer from which we use max pool rather than add pool for neighbor aggregation
drop_ratio (float): dropout rate
gnn_type: gin, gcn, graphsage, gat
Output:
node representations
"""
def __init__(self, num_layer, emb_dim, JK = "last", drop_ratio = 0, gnn_type = "gin", **kwargs):
super(GNNGraphCL, self).__init__()
self.num_layer = num_layer
self.drop_ratio = drop_ratio
self.JK = JK
self.output_dim = emb_dim
if self.num_layer < 2:
raise ValueError("Number of GNN layers must be greater than 1.")
self.x_embedding1 = torch.nn.Embedding(num_atom_type, emb_dim)
self.x_embedding2 = torch.nn.Embedding(num_chirality_tag, emb_dim)
torch.nn.init.xavier_uniform_(self.x_embedding1.weight.data)
torch.nn.init.xavier_uniform_(self.x_embedding2.weight.data)
###List of MLPs
self.gnns = torch.nn.ModuleList()
for layer in range(num_layer):
if gnn_type == "gin":
self.gnns.append(GINConv(emb_dim, aggr = "add"))
elif gnn_type == "gcn":
self.gnns.append(GCNConv(emb_dim))
elif gnn_type == "gat":
self.gnns.append(GATConv(emb_dim))
elif gnn_type == "graphsage":
self.gnns.append(GraphSAGEConv(emb_dim))
self.pool = global_mean_pool
###List of batchnorms
self.batch_norms = torch.nn.ModuleList()
for layer in range(num_layer):
self.batch_norms.append(torch.nn.BatchNorm1d(emb_dim))
#def forward(self, x, edge_index, edge_attr):
def forward(self, *argv):
x_prob = None
if len(argv) == 3:
x, edge_index, edge_attr = argv[0], argv[1], argv[2]
elif len(argv) == 1:
data = argv[0]
x, edge_index, edge_attr, batch = data.x, data.edge_index, data.edge_attr, data.batch
elif len(argv) == 4:
data = argv[0]
x, edge_index, edge_attr, batch = data.x, data.edge_index, data.edge_attr, data.batch
x_prob = argv[1]
atomic_num_list = argv[2]
device = argv[3]
else:
raise ValueError("unmatched number of arguments.")
if x_prob is not None:
x = torch.matmul(x_prob[:, :-1], self.x_embedding1(torch.tensor(atomic_num_list[:-1]).to(device) - 1)) + torch.matmul(x_prob[:, -1:], self.x_embedding1(torch.arange(119, 120).to(device))) + self.x_embedding2(x[:, 1])
else:
x = self.x_embedding1(x[:, 0]) + self.x_embedding2(x[:, 1])
h_list = [x]
for layer in range(self.num_layer):
h = self.gnns[layer](h_list[layer], edge_index, edge_attr)
h = self.batch_norms[layer](h)
#h = F.dropout(F.relu(h), self.drop_ratio, training = self.training)
if layer == self.num_layer - 1:
#remove relu for the last layer
h = F.dropout(h, self.drop_ratio, training = self.training)
else:
h = F.dropout(F.relu(h), self.drop_ratio, training = self.training)
h_list.append(h)
### Different implementations of Jk-concat
if self.JK == "concat":
node_representation = torch.cat(h_list, dim = 1)
elif self.JK == "last":
node_representation = h_list[-1]
elif self.JK == "max":
h_list = [h.unsqueeze_(0) for h in h_list]
node_representation = torch.max(torch.cat(h_list, dim = 0), dim = 0)[0]
elif self.JK == "sum":
h_list = [h.unsqueeze_(0) for h in h_list]
node_representation = torch.sum(torch.cat(h_list, dim = 0), dim = 0)[0]
h_graph = self.pool(node_representation, batch)
return h_graph, node_representation
def encode_mol(self, mol):
return self.forward(mol)
class GraphCL(MolEncoder):
def __init__(self, config):
super(GraphCL, self).__init__()
self.main_model = GNNGraphCL(
num_layer=config["gin_num_layers"],
emb_dim=config["gin_hidden_dim"],
JK='last',
drop_ratio=config["drop_ratio"],
gnn_type='gin'
)
if "projection_dim" in config:
self.projector = nn.Linear(config["gin_hidden_dim"], config["projection_dim"])
else:
self.projector = None
def forward(self, mol):
h_graph, h_node = self.main_model(mol)
return h_graph, h_node
def encode_mol(self, mol, proj=False, return_node_feats=True):
h_graph, h_node = self.forward(mol)
if proj and self.projector is not None:
h_graph = self.projector(h_graph)
h_node = self.projector(h_node)
if return_node_feats:
return h_graph, h_node
else:
return h_node
def load_state_dict(self, state_dict, strict=True):
return self.main_model.load_state_dict(state_dict, strict) | OpenBioMed-main | open_biomed/models/molecule/gnn_graphcl.py |
from typing import Optional, Callable
import torch
import torch.nn.functional as F
from torch.nn.modules.sparse import Embedding
from torch_geometric.nn import MessagePassing
from torch_scatter import scatter
from torch import nn, Tensor
# from fairseq import utils
from torch_geometric.nn import global_max_pool, global_mean_pool, global_sort_pool
from torch_geometric.utils import add_self_loops
class GINConv(MessagePassing):
"""
Extension of GIN aggregation to incorporate edge information by concatenation.
Args:
emb_dim (int): dimensionality of embeddings for nodes and edges.
embed_input (bool): whether to embed input or not.
See https://arxiv.org/abs/1810.00826
"""
def __init__(self, emb_dim, out_dim, num_bond_type, num_bond_direction, aggr = "add", **kwargs):
kwargs.setdefault('aggr', aggr)
self.aggr = aggr
super(GINConv, self).__init__(**kwargs)
#multi-layer perceptron
self.mlp = torch.nn.Sequential(torch.nn.Linear(emb_dim, 2*emb_dim), torch.nn.ReLU(), torch.nn.Linear(2*emb_dim, out_dim))
self.edge_embedding1 = torch.nn.Embedding(num_bond_type, emb_dim)
self.edge_embedding2 = torch.nn.Embedding(num_bond_direction, emb_dim)
torch.nn.init.xavier_uniform_(self.edge_embedding1.weight.data)
torch.nn.init.xavier_uniform_(self.edge_embedding2.weight.data)
def forward(self, x, edge_index, edge_attr):
#add self loops in the edge space
edge_index, _ = add_self_loops(edge_index, num_nodes = x.size(0))
#add features corresponding to self-loop edges.
self_loop_attr = torch.zeros(x.size(0), 2)
self_loop_attr[:,0] = 4 #bond type for self-loop edge
self_loop_attr = self_loop_attr.to(edge_attr.device).to(edge_attr.dtype)
edge_attr = torch.cat((edge_attr, self_loop_attr), dim = 0)
edge_embeddings = self.edge_embedding1(edge_attr[:,0]) + self.edge_embedding2(edge_attr[:,1])
# return self.propagate(self.aggr, edge_index, x=x, edge_attr=edge_embeddings)
return self.propagate(edge_index, x=x, edge_attr=edge_embeddings)
def message(self, x_j, edge_attr):
return x_j + edge_attr
def update(self, aggr_out):
return self.mlp(aggr_out)
class CustomMessagePassing(MessagePassing):
def __init__(self, aggr: Optional[str] = "maxminmean", embed_dim: Optional[int] = None):
if aggr in ['maxminmean']:
super().__init__(aggr=None)
self.aggr = aggr
assert embed_dim is not None
self.aggrmlp = nn.Linear(3 * embed_dim, embed_dim)
else:
super().__init__(aggr=aggr)
def aggregate(self, inputs: Tensor, index: Tensor, ptr: Optional[Tensor],
dim_size: Optional[int]) -> Tensor:
if self.aggr in ['maxminmean']:
inputs_fp32 = inputs.float()
input_max = scatter(inputs_fp32,
index,
dim=self.node_dim,
dim_size=dim_size,
reduce='max')
input_min = scatter(inputs_fp32,
index,
dim=self.node_dim,
dim_size=dim_size,
reduce='min')
input_mean = scatter(inputs_fp32,
index,
dim=self.node_dim,
dim_size=dim_size,
reduce='mean')
aggr_out = torch.cat([input_max, input_min, input_mean], dim=-1).type_as(inputs)
aggr_out = self.aggrmlp(aggr_out)
return aggr_out
else:
return super().aggregate(inputs, index, ptr, dim_size)
class MulOnehotEncoder(nn.Module):
def __init__(self, embed_dim, feature_dims):
super().__init__()
self.atom_embedding_list = nn.ModuleList()
for dim in feature_dims:
emb = nn.Embedding(dim, embed_dim)
nn.init.xavier_uniform_(emb.weight.data)
self.atom_embedding_list.append(emb)
def forward(self, x):
x_embedding = 0
for i in range(x.shape[1]):
x_embedding = x_embedding + self.atom_embedding_list[i](x[:, i])
return x_embedding
class ResidualGINLayer(CustomMessagePassing):
def __init__(self,
in_dim,
emb_dim,
aggr='add',
encode_edge=False,
bond_encoder=False,
edge_feat_dim=None):
super().__init__(aggr, embed_dim=in_dim)
# self.mlp = nn.Linear(in_dim, emb_dim)
self.mlp = torch.nn.Sequential(torch.nn.Linear(in_dim, 2*in_dim), torch.nn.GELU(), torch.nn.Linear(2*in_dim, emb_dim))
self.encode_edge = encode_edge
if encode_edge:
if bond_encoder:
self.edge_encoder = MulOnehotEncoder(in_dim, [7, 7, 2])
else:
self.edge_encoder = nn.Linear(edge_feat_dim, in_dim)
def forward(self, x, edge_index, edge_attr=None):
if self.encode_edge and edge_attr is not None:
edge_emb = self.edge_encoder(edge_attr)
else:
edge_emb = None
m = self.propagate(edge_index, x=x, edge_attr=edge_emb)
h = x + m
out = self.mlp(h)
return out
def message(self, x_j, edge_attr=None):
if edge_attr is not None:
msg = x_j + edge_attr
else:
msg = x_j
return msg
class ResidualConvLayer(CustomMessagePassing):
def __init__(self,
in_dim,
emb_dim,
aggr,
encode_edge=False,
bond_encoder=False,
edge_feat_dim=None):
super().__init__(aggr, embed_dim=in_dim)
self.mlp = nn.Linear(in_dim, emb_dim)
self.encode_edge = encode_edge
if encode_edge:
if bond_encoder:
self.edge_encoder = MulOnehotEncoder(in_dim, [7, 7, 2])
else:
self.edge_encoder = nn.Linear(edge_feat_dim, in_dim)
def forward(self, x, edge_index, edge_attr=None):
if self.encode_edge and edge_attr is not None:
edge_emb = self.edge_encoder(edge_attr)
else:
edge_emb = None
m = self.propagate(edge_index, x=x, edge_attr=edge_emb)
h = x + m
out = self.mlp(h)
return out
def message(self, x_j, edge_attr=None):
if edge_attr is not None:
msg = x_j + edge_attr
else:
msg = x_j
return msg
def get_norm_layer(norm, fea_dim):
norm = norm.lower()
if norm == 'layer':
return nn.LayerNorm(fea_dim)
elif norm == "batch":
return nn.BatchNorm1d(fea_dim)
else:
raise NotImplementedError()
class AtomHead(nn.Module):
def __init__(self, emb_dim, output_dim, activation_fn, weight=None, norm=None):
super().__init__()
self.dense = nn.Linear(emb_dim, emb_dim)
# self.activation_fn = utils.get_activation_fn(activation_fn)
if activation_fn == 'gelu':
self.activation_fn = nn.GELU()
else:
self.activation_fn = nn.ReLU()
self.norm = get_norm_layer(norm, emb_dim)
if weight is None:
weight = nn.Linear(emb_dim, output_dim, bias=False).weight
self.weight = weight
self.bias = nn.Parameter(torch.zeros(output_dim))
def forward(self, node_features, cls_features, masked_atom=None):
if cls_features is not None:
node_features = torch.cat((node_features, cls_features), 1)
if masked_atom is not None:
node_features = node_features[masked_atom, :]
x = self.dense(node_features)
x = self.activation_fn(x)
x = self.norm(x)
x = F.linear(x, self.weight) + self.bias
return x
class DeeperGCN(nn.Module):
def __init__(self, args):
super().__init__()
self.num_layers = args['gnn_number_layer']
self.dropout = args['gnn_dropout']
self.conv_encode_edge = args['conv_encode_edge']
self.embed_dim = args['gnn_embed_dim']
self.aggr = args['gnn_aggr']
self.norm = args['gnn_norm']
self.act = args['gnn_act']
self.gcns = nn.ModuleList()
self.norms = nn.ModuleList()
# self.activation_fn = utils.get_activation_fn(getattr(args, 'gnn_activation_fn', 'relu'))
# self.activation_fn = nn.ReLU()
if self.act == 'relu':
self.activation_fn = nn.ReLU()
else:
self.activation_fn = nn.GELU()
for layer in range(self.num_layers):
self.gcns.append(
ResidualConvLayer(
self.embed_dim,
self.embed_dim,
self.aggr,
encode_edge=self.conv_encode_edge,
bond_encoder=True,
))
self.norms.append(get_norm_layer(self.norm, self.embed_dim))
self.atom_encoder = MulOnehotEncoder(self.embed_dim, [120, 4, 12, 12, 10, 6, 6, 2, 2])
if not self.conv_encode_edge:
self.bond_encoder = MulOnehotEncoder(self.embed_dim, [7, 7, 2])
self.graph_pred_linear = nn.Identity()
self.output_features = 2 * self.embed_dim
# self.atom_head = AtomHead(self.embed_dim,
# get_atom_feature_dims()[0],
# getattr(args, 'gnn_activation_fn', 'relu'),
# norm=self.norm,
# weight=self.atom_encoder.atom_embedding_list[0].weight)
def forward(self, graph, masked_tokens=None, features_only=False):
x = graph.x
edge_index = graph.edge_index
edge_attr = graph.edge_attr
batch = graph.batch
h = self.atom_encoder(x)
if self.conv_encode_edge:
edge_emb = edge_attr
else:
edge_emb = self.bond_encoder(edge_attr)
h = self.gcns[0](h, edge_index, edge_emb)
for layer in range(1, self.num_layers):
residual = h
h = self.norms[layer](h)
h = self.activation_fn(h)
h = F.dropout(h, p=self.dropout, training=self.training)
h = self.gcns[layer](h, edge_index, edge_emb)
h = h + residual
h = self.norms[0](h)
h = self.activation_fn(h)
node_fea = F.dropout(h, p=self.dropout, training=self.training)
graph_fea = self.pool(node_fea, batch)
# if not features_only:
# atom_pred = self.atom_head(node_fea, masked_tokens)
# else:
# atom_pred = None
return graph_fea, node_fea
# return (graph_fea, node_fea), atom_pred
def pool(self, h, batch):
h_fp32 = h.float()
h_max = global_max_pool(h_fp32, batch)
h_mean = global_mean_pool(h_fp32, batch)
h = torch.cat([h_max, h_mean], dim=-1).type_as(h)
h = self.graph_pred_linear(h)
return h
| OpenBioMed-main | open_biomed/models/molecule/unimap/gcn.py |
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
import copy
import transformers
from transformers import RobertaTokenizer
from transformers.models.roberta.modeling_roberta import RobertaPreTrainedModel
from transformers.activations import gelu
from models.base_models import MolEncoder
from models.molecule.unimap.gcn import DeeperGCN, AtomHead
from models.molecule.unimap.modeling_roberta import RobertaLayer, RobertaModel
def collate_tokens(
values,
pad_idx,
left_pad=False,
pad_to_length=None,
pad_to_multiple=1,
):
"""Convert a list of 1d tensors into a padded 2d tensor."""
size = max(v.size(0) for v in values)
size = size if pad_to_length is None else max(size, pad_to_length)
if pad_to_multiple != 1 and size % pad_to_multiple != 0:
size = int(((size - 0.1) // pad_to_multiple + 1) * pad_to_multiple)
res = values[0].new(len(values), size).fill_(pad_idx)
def copy_tensor(src, dst):
assert dst.numel() == src.numel()
dst.copy_(src)
for i, v in enumerate(values):
copy_tensor(v, res[i][size - len(v) :] if left_pad else res[i][: len(v)])
return res
class Similarity(nn.Module):
"""
Dot product or cosine similarity
"""
def __init__(self, temp):
super().__init__()
self.temp = temp
self.cos = nn.CosineSimilarity(dim=-1)
def forward(self, x, y):
return self.cos(x, y) / self.temp
class RobertaLMHead(nn.Module):
"""Roberta Head for masked language modeling."""
def __init__(self, config, vocab_size_sp=None, graph_hidden_size=None, atom_vocab_size=None):
super().__init__()
first_hidden_size = config.hidden_size
if graph_hidden_size is not None:
first_hidden_size += graph_hidden_size
self.dense = nn.Linear(first_hidden_size, config.hidden_size)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
if vocab_size_sp is not None:
vocab_size = vocab_size_sp
elif atom_vocab_size is not None:
vocab_size = config.vocab_size + atom_vocab_size
else:
vocab_size = config.vocab_size
self.decoder = nn.Linear(config.hidden_size, vocab_size)
self.bias = nn.Parameter(torch.zeros(vocab_size))
self.decoder.bias = self.bias
def forward(self, features, **kwargs):
x = self.dense(features)
x = gelu(x)
x = self.layer_norm(x)
# project back to size of vocabulary with bias
x = self.decoder(x)
return x
def _tie_weights(self):
# To tie those two weights if they get disconnected (on TPU or when the bias is resized)
self.bias = self.decoder.bias
class RobertaHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config, regression=False):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
output_dim = config.task_output_dim
self.out_proj = nn.Linear(config.hidden_size, output_dim)
self.regression = regression
def forward(self, features, only_feat=False):
x = self.dropout(features)
x = self.dense(x)
if self.regression:
x = torch.relu(x)
else:
x = torch.tanh(x)
# try diff norm: batch norm, layernorm
if only_feat:
return x
x = self.dropout(x)
x = self.out_proj(x)
return x
class Pooler(nn.Module):
"""
Parameter-free poolers to get the sentence embedding
'cls': [CLS] representation with BERT/RoBERTa's MLP pooler.
'cls_before_pooler': [CLS] representation without the original MLP pooler.
'avg': average of the last layers' hidden states at each token.
'avg_top2': average of the last two layers.
'avg_first_last': average of the first and the last layers.
"""
def __init__(self, pooler_type):
super().__init__()
self.pooler_type = pooler_type
assert self.pooler_type in ["cls", "cls_before_pooler", "avg", "avg_top2", "avg_first_last"], "unrecognized pooling type %s" % self.pooler_type
def forward(self, attention_mask, outputs):
last_hidden = outputs.last_hidden_state
pooler_output = outputs.pooler_output
hidden_states = outputs.hidden_states
if self.pooler_type in ['cls_before_pooler', 'cls']:
return last_hidden[:, 0]
elif self.pooler_type == "avg":
return ((last_hidden * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(-1).unsqueeze(-1))
elif self.pooler_type == "avg_first_last":
first_hidden = hidden_states[0]
last_hidden = hidden_states[-1]
pooled_result = ((first_hidden + last_hidden) / 2.0 * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(-1).unsqueeze(-1)
return pooled_result
elif self.pooler_type == "avg_top2":
second_last_hidden = hidden_states[-2]
last_hidden = hidden_states[-1]
pooled_result = ((last_hidden + second_last_hidden) / 2.0 * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(-1).unsqueeze(-1)
return pooled_result
else:
raise NotImplementedError
class UniMAP(RobertaPreTrainedModel, MolEncoder):
def __init__(self, multilingua_config, gcn_config, atom_vocab_size, fg_labels_num=85, fingerprint_len=2048, temp=0.05):
# temp parameter: temperature for the constastive loss
super(UniMAP, self).__init__(multilingua_config)
self.multilingua_config = multilingua_config
self.lang_roberta = RobertaModel(multilingua_config, add_pooling_layer=True)
self.gnn = DeeperGCN(gcn_config)
# mask prediction for the lang model
# self.lm_head = RobertaLMHead(multilingua_config, atom_vocab_size=atom_vocab_size)
self.lm_head = RobertaLMHead(multilingua_config)
# + atom_vocab_size
self.lang_gcn_vocab_size = multilingua_config.vocab_size
# atom mask predcition for the gnn, maybe another RobertaLMHead???
# self.atom_head = AtomHead(multilingua_config.hidden_size,
# atom_vocab_size, 'gelu', norm=gcn_config['gnn_norm'])
# todo 1.head for fingerprint regression 2. head function group prediction 3. head for pair matching
self.lang_pooler = Pooler(multilingua_config.pooler_type)
self.config = multilingua_config
self.gcn_config = gcn_config
self.loss_mlm = nn.CrossEntropyLoss()
# transfer from gcn embeddings to lang shape
self.gcn_embedding = nn.Linear(self.gcn_config['gnn_embed_dim'], self.config.hidden_size, bias=True)
self.dropout = nn.Dropout(multilingua_config.hidden_dropout_prob)
self.LayerNorm = torch.nn.LayerNorm(multilingua_config.hidden_size, eps=1e-12)
# contrastive head:
# if smiles_graph: 0, 1; smiles_iupac_graph: 0, 1, 2; 0 means pair match
contrastive_class_num = multilingua_config.contrastive_class_num
multilingua_config.task_output_dim = contrastive_class_num
self.contrastive_head = RobertaHead(multilingua_config)
self.contrastive_loss = nn.CrossEntropyLoss()
# self.contrastive_head = nn.Linear(multilingua_config.hidden_size, contrastive_classs_num)
# function group:
multilingua_config.task_output_dim = fg_labels_num
self.fg_task_head = RobertaHead(multilingua_config)
self.fg_task_loss = nn.BCEWithLogitsLoss(reduction="mean")
# fingerprint regeression
multilingua_config.task_output_dim = fingerprint_len
self.fingerprint_head = RobertaHead(multilingua_config, regression=True)
self.fingerprint_loss = nn.MSELoss()
# self.fingerprint_loss = nn.SmoothL1Loss()
# for output token group alignment
self.lang_group_layer = RobertaLayer(multilingua_config)
self.sim = Similarity(temp=temp)
self.ctr_loss = nn.CrossEntropyLoss()
self.output_dim = self.config.hidden_size
def forward(self, lingua=None, graph=None,):
return_dict = {}
if lingua['input_ids'].device != self.device:
lingua['input_ids'] = lingua['input_ids'].to(self.device)
if 'mlm_input_ids' in lingua:
lingua['mlm_input_ids'] = lingua['mlm_input_ids'].to(self.device)
lingua['mlm_labels'] = lingua['mlm_labels'].to(self.device)
lingua['attention_mask'] = lingua['attention_mask'].to(self.device)
graph.to(self.device)
gcn_output = self.gnn(graph)
# concat graph atome embeddings and langua embeddings
gcn_embedding_output = self.gcn_embedding(gcn_output[1])
gcn_embedding_output = self.LayerNorm(gcn_embedding_output)
gcn_embedding_output = self.dropout(gcn_embedding_output)
assert 'mlm_input_ids' not in lingua
graph_attention_mask = []
batch_size = lingua['input_ids'].shape[0]
batch_idx = graph.batch
gcn_embedding_lst = []
for bs in range(batch_size):
gcn_embedding_lst.append(gcn_embedding_output[batch_idx == bs])
atom_num = (batch_idx == bs).sum().item()
graph_attention_mask.append(torch.tensor([1 for _ in range(atom_num)]).to(self.device))
graph_attention_mask = collate_tokens(graph_attention_mask, pad_idx=0, pad_to_multiple=8)
graph_attention_mask = graph_attention_mask.to(torch.bool)
lang_gcn_outputs, lang_gcn_attention_mask = self.lang_roberta(
lingua['input_ids'],
attention_mask=lingua['attention_mask'],
# token_type_ids=lingua['token_type_ids'],
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=True if self.multilingua_config.pooler_type in ['avg_top2', 'avg_first_last'] else False,
return_dict=True,
# graph_input = gcn_embedding_output,
graph_input = gcn_embedding_lst,
graph_batch = graph.batch,
graph_max_seq_size = self.gcn_config['graph_max_seq_size'],
gnn_mask_labels = None,
graph_attention_mask = graph_attention_mask,
)
last_hidden_embedding = lang_gcn_outputs['last_hidden_state']
graph_batch = graph.batch
lang_input_dim = lingua['input_ids'].shape[1]
lang_gcn_pooler_output = self.lang_pooler(lang_gcn_attention_mask, lang_gcn_outputs)
if last_hidden_embedding.shape[1] < self.multilingua_config.max_length + self.gcn_config['graph_max_seq_size']:
bs = last_hidden_embedding.shape[0]
hidden_size = last_hidden_embedding.shape[2]
pad_len = self.multilingua_config.max_length + self.gcn_config['graph_max_seq_size'] - last_hidden_embedding.shape[1]
last_hidden_embedding = torch.cat([
last_hidden_embedding,
torch.zeros(bs, pad_len, hidden_size).to(last_hidden_embedding)
], dim=1)
lang_gcn_attention_mask = torch.cat([
lang_gcn_attention_mask,
torch.zeros(bs, pad_len).to(lang_gcn_attention_mask)
], dim=1)
return lang_gcn_pooler_output, last_hidden_embedding, lang_gcn_attention_mask
"""
graph_attention_start = lingua['attention_mask'].shape[1]
out_hidden_embedding = last_hidden_embedding[:, graph_attention_start:, :].contiguous()
out_attention_mask = lang_gcn_attention_mask[:, graph_attention_start:].contiguous()
if out_hidden_embedding.shape[1] < self.gcn_config['graph_max_seq_size']:
bs = out_hidden_embedding.shape[0]
hidden_size = out_hidden_embedding.shape[2]
pad_len = self.gcn_config['graph_max_seq_size'] - out_hidden_embedding.shape[1]
out_hidden_embedding = torch.cat([
out_hidden_embedding,
torch.zeros(bs, pad_len, hidden_size).to(out_hidden_embedding)
], dim=1)
out_attention_mask = torch.cat([
out_attention_mask,
torch.zeros(bs, pad_len).to(out_attention_mask)
], dim=1)
return lang_gcn_pooler_output, out_hidden_embedding, out_attention_mask
"""
def encode_mol(self, mol):
h, _, _ = self.forward(mol["smiles"], mol["graph"])
return h
if __name__ == "__main__":
config = {
"data": {
"mol": {
"modality": ["structure"],
"featurizer": {
"structure": {
"name": "MultiScale",
"scales": ["smiles", "graph"],
"smiles": {
"name": "transformer",
"transformer_type": "unimap",
"max_length": 128,
"model_name_or_path": "../assets/unimap/smiles_tokenizer/"
},
"graph": {
"name": "unimap"
}
}
}
}
},
"network": {
"atom_vocab_size": 10535,
"roberta": {
"vocab_size": 2426,
"max_position_embeddings": 515,
"type_vocab_size": 1,
"pooler_type": "avg"
},
"gnn": {
"gnn_number_layer": 3,
"gnn_dropout": 0.1,
"conv_encode_edge": True,
"gnn_embed_dim": 384,
"gnn_aggr": "maxminmean",
"gnn_norm": "layer",
"gnn_act": "gelu",
"atom_vocab_size": 10535,
"graph_max_seq_size": 128
},
}
}
from transformers import RobertaConfig
from feature.mol_featurizer import MolMultiScaleFeaturizer
from utils.collators import MolCollator
roberta_config = RobertaConfig(
vocab_size=config["network"]["roberta"]["vocab_size"],
max_position_embeddings=config["network"]["roberta"]["max_position_embeddings"],
type_vocab_size=config["network"]["roberta"]["type_vocab_size"],
contrastive_class_num=2,
pooler_type=config["network"]["roberta"]["pooler_type"]
)
featurizer = MolMultiScaleFeaturizer(config["data"]["mol"]["featurizer"]["structure"])
collator = MolCollator(config["data"]["mol"])
model = UniMAP(roberta_config, config["network"]["gnn"], config["network"]["atom_vocab_size"])
model.load_state_dict(torch.load("/share/project/task_3/PUBLIC/Shikun/train_1kw_gcn_3_8gpu_check_frag_final/pytorch_model.bin", map_location="cpu"), strict=False)
model = model.to("cuda:0")
model.eval()
smi1 = "O=C(Nc1ccnc(NC(=O)C2CC2)c1)c1c(Cl)cccc1Cl"
mol1 = featurizer(smi1)
smi2 = "CC(C)(C)OC(=O)N[C@H]1CC[C@H](n2nnc3cnc4[nH]ccc4c32)CC1"
mol2 = featurizer(smi2)
mol = collator([mol1, mol2])
print(mol["smiles"], mol["graph"])
print(model(mol["smiles"], mol["graph"]))
| OpenBioMed-main | open_biomed/models/molecule/unimap/unimap.py |
from models.molecule.unimap.unimap import UniMAP | OpenBioMed-main | open_biomed/models/molecule/unimap/__init__.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch RoBERTa model. """
import math
from unicodedata import decimal
import torch
import torch.utils.checkpoint
from packaging import version
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN, gelu
from transformers.file_utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from transformers.modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from transformers.utils import logging
from transformers.models.roberta.configuration_roberta import RobertaConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "roberta-base"
_CONFIG_FOR_DOC = "RobertaConfig"
_TOKENIZER_FOR_DOC = "RobertaTokenizer"
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [
"roberta-base",
"roberta-large",
"roberta-large-mnli",
"distilroberta-base",
"roberta-base-openai-detector",
"roberta-large-openai-detector",
# See all RoBERTa models at https://huggingface.co/models?filter=roberta
]
class RobertaEmbeddings(nn.Module):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
# Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
if version.parse(torch.__version__) > version.parse("1.6.0"):
self.register_buffer(
"token_type_ids",
torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device),
persistent=False,
)
# End copy
self.padding_idx = config.pad_token_id
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
)
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if position_ids is None:
if input_ids is not None:
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
# issue #5664
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
)
return position_ids.unsqueeze(0).expand(input_shape)
# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Roberta
class RobertaSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in RobertaModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput
class RobertaSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Roberta
class RobertaAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = RobertaSelfAttention(config)
self.output = RobertaSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate
class RobertaIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput
class RobertaOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Roberta
class RobertaLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = RobertaAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added"
self.crossattention = RobertaAttention(config)
self.intermediate = RobertaIntermediate(config)
self.output = RobertaOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
assert hasattr(
self, "crossattention"
), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Roberta
class RobertaEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([RobertaLayer(config) for _ in range(config.num_hidden_layers)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
# Copied from transformers.models.bert.modeling_bert.BertPooler
class RobertaPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class RobertaPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = RobertaConfig
base_model_prefix = "roberta"
# Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def update_keys_to_ignore(self, config, del_keys_to_ignore):
"""Remove some keys from ignore list"""
if not config.tie_word_embeddings:
# must make a new list, or the class variable gets modified!
self._keys_to_ignore_on_save = [k for k in self._keys_to_ignore_on_save if k not in del_keys_to_ignore]
self._keys_to_ignore_on_load_missing = [
k for k in self._keys_to_ignore_on_load_missing if k not in del_keys_to_ignore
]
ROBERTA_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.RobertaConfig`): Model configuration class with all the parameters of the
model. Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
ROBERTA_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.RobertaTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.",
ROBERTA_START_DOCSTRING,
)
class RobertaModel(RobertaPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need`_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz
Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration
set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
.. _`Attention is all you need`: https://arxiv.org/abs/1706.03762
"""
_keys_to_ignore_on_load_missing = [r"position_ids"]
# Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->Roberta
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = RobertaEmbeddings(config)
self.encoder = RobertaEncoder(config)
self.pooler = RobertaPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
# Copied from transformers.models.bert.modeling_bert.BertModel.forward
def _concat_graph_embeddings(self, embedding_output, gnn_embedding, gnn_batch, attention_mask, pad_embed, gnn_mask_labels, graph_max_seq_size=56, only_graphs=None, only_smiles=None):
# group gnn_embeddings acoording to batch
# pad_embed_size: 768
batch_size = embedding_output.shape[0]
emb_size = pad_embed.shape[0]
gnn_pad_embedding = pad_embed.expand(batch_size, graph_max_seq_size, emb_size).clone()
gnn_mask_attention = torch.full((batch_size, graph_max_seq_size), 0, device=gnn_pad_embedding.device)
graph_mm_labels = torch.full((batch_size, graph_max_seq_size), -100, device=gnn_pad_embedding.device)
bidx = 0
for i in range(batch_size):
atom_num = (gnn_batch == i).sum()
pad_size = min(atom_num, graph_max_seq_size)
gnn_pad_embedding[i][:pad_size] = gnn_embedding[bidx: bidx+pad_size]
gnn_mask_attention[i][:pad_size] = 1 # no sep token
if gnn_mask_labels is not None:
g_mm_labels = gnn_mask_labels[bidx: bidx+pad_size].clone()
g_mm_labels[g_mm_labels != -100] += self.config.vocab_size
graph_mm_labels[i][:pad_size] = g_mm_labels # shift label
bidx += atom_num
if only_graphs:
graph_mm_labels[graph_mm_labels != -100] -= self.config.vocab_size # only the graph as input
return gnn_pad_embedding, gnn_mask_attention, graph_mm_labels
if only_smiles:
return embedding_output, attention_mask, graph_mm_labels
concat_embedding_output = torch.cat((embedding_output, gnn_pad_embedding), 1)
concat_attention_mask = torch.cat((attention_mask, gnn_mask_attention), 1)
return concat_embedding_output, concat_attention_mask, graph_mm_labels
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
graph_input = None,
graph_batch = None,
graph_max_seq_size = 128,
gnn_mask_labels = None,
graph_attention_mask = None,
only_graphs = None,
only_smiles = None
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
# if have graph input: concat the graph and smiles
if graph_input is not None:
padding_vocab_idx = 1
padding_embedding = self.embeddings.word_embeddings(torch.tensor(padding_vocab_idx).cuda())
if graph_attention_mask.shape[1] > graph_max_seq_size:
graph_attention_mask = graph_attention_mask[:, :graph_max_seq_size]
graph_pad_emb = torch.zeros((embedding_output.shape[0], graph_attention_mask.shape[1], embedding_output.shape[2]), dtype=embedding_output.dtype, device=embedding_output.device)
for idx, graph_ele_emb in enumerate(graph_input):
cur = graph_input[idx]
if cur.shape[0] > graph_max_seq_size:
cur = cur[:graph_max_seq_size]
graph_pad_emb[idx][graph_attention_mask[idx]] = cur
padding_start_idx = graph_attention_mask[idx].sum().item()
graph_pad_emb[idx][padding_start_idx:] = padding_embedding
# concat embedding and attention
graph_attention_mask_int = graph_attention_mask.to(attention_mask.dtype)
attention_mask = torch.cat((attention_mask, graph_attention_mask_int), 1)
embedding_output = torch.cat((embedding_output, graph_pad_emb), 1)
input_shape = attention_mask.size()
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
), attention_mask
@add_start_docstrings(
"""RoBERTa Model with a `language modeling` head on top for CLM fine-tuning. """, ROBERTA_START_DOCSTRING
)
class RobertaForCausalLM(RobertaPreTrainedModel):
_keys_to_ignore_on_save = [r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `RobertaLMHeadModel` as a standalone, add `is_decoder=True.`")
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.lm_head = RobertaLMHead(config)
# The LM head weights require special treatment only when they are tied with the word embeddings
self.update_keys_to_ignore(config, ["lm_head.decoder.weight"])
self.init_weights()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
Returns:
Example::
>>> from transformers import RobertaTokenizer, RobertaForCausalLM, RobertaConfig
>>> import torch
>>> tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
>>> config = RobertaConfig.from_pretrained("roberta-base")
>>> config.is_decoder = True
>>> model = RobertaForCausalLM.from_pretrained('roberta-base', config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
@add_start_docstrings("""RoBERTa Model with a `language modeling` head on top. """, ROBERTA_START_DOCSTRING)
class RobertaForMaskedLM(RobertaPreTrainedModel):
_keys_to_ignore_on_save = [r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning(
"If you want to use `RobertaForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.lm_head = RobertaLMHead(config)
# The LM head weights require special treatment only when they are tied with the word embeddings
self.update_keys_to_ignore(config, ["lm_head.decoder.weight"])
self.init_weights()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
mask="<mask>",
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class RobertaLMHead(nn.Module):
"""Roberta Head for masked language modeling."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.decoder.bias = self.bias
def forward(self, features, **kwargs):
x = self.dense(features)
x = gelu(x)
x = self.layer_norm(x)
# project back to size of vocabulary with bias
x = self.decoder(x)
return x
def _tie_weights(self):
# To tie those two weights if they get disconnected (on TPU or when the bias is resized)
self.bias = self.decoder.bias
@add_start_docstrings(
"""
RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
ROBERTA_START_DOCSTRING,
)
class RobertaForSequenceClassification(RobertaPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.classifier = RobertaClassificationHead(config)
self.init_weights()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
ROBERTA_START_DOCSTRING,
)
class RobertaForMultipleChoice(RobertaPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.roberta = RobertaModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
token_type_ids=None,
attention_mask=None,
labels=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
flat_inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.roberta(
flat_input_ids,
position_ids=flat_position_ids,
token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask,
head_mask=head_mask,
inputs_embeds=flat_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Roberta Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
ROBERTA_START_DOCSTRING,
)
class RobertaForTokenClassification(RobertaPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class RobertaClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@add_start_docstrings(
"""
Roberta Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
ROBERTA_START_DOCSTRING,
)
class RobertaForQuestionAnswering(RobertaPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + padding_idx
| OpenBioMed-main | open_biomed/models/molecule/unimap/modeling_roberta.py |
OpenBioMed-main | open_biomed/feature/__init__.py |
|
from abc import ABC, abstractmethod
from transformers import BertModel, BertTokenizer, T5Model, T5Tokenizer, GPT2Model, GPT2Tokenizer
from feature.base_featurizer import BaseFeaturizer
from utils import ToDevice
# Warning: it seems that the results of AutoTokenizer and specified tokenizer is different
name2tokenizer = {
"bert": BertTokenizer,
"t5": T5Tokenizer,
"gpt2": GPT2Tokenizer
}
name2model = {
"bert": BertModel,
"t5": T5Model,
"gpt2": GPT2Model
}
class TextFeaturizer(BaseFeaturizer, ABC):
def __init__(self):
super(TextFeaturizer).__init__()
self.transform = None
def set_transform(self, transform):
self.transform = transform
@abstractmethod
def __call__(self, data):
raise NotImplementedError
class TextTransformerTokFeaturizer(TextFeaturizer):
def __init__(self, config):
super(TextTransformerTokFeaturizer, self).__init__()
self.max_length = config["max_length"]
self.tokenizer = name2tokenizer[config["transformer_type"]].from_pretrained(config["model_name_or_path"], model_max_length=self.max_length)
if config["transformer_type"] in ["gpt2"]:
self.tokenizer.pad_token = self.tokenizer.eos_token
def __call__(self, data):
if self.transform is not None:
data = self.transform[data]
return self.tokenizer(data, truncation=True, padding=True)
class TextTransformerSentFeaturizer(TextFeaturizer):
def __init__(self, config):
super(TextTransformerSentFeaturizer, self).__init__()
self.max_length = config["max_length"]
self.min_sentence_length = config["min_sentence_length"]
self.tokenizer = name2tokenizer[config["transformer_type"]].from_pretrained(config["model_name_or_path"], model_max_length=self.max_length)
if config["transformer_type"] in ["gpt2"]:
self.tokenizer.pad_token = self.tokenizer.eos_token
def __call__(self, data):
if self.transform is not None:
data = self.transform[data]
sents = []
for sent in data.split("."):
if len(sent.split(" ")) < 5:
continue
sents.append(self.tokenizer(sent, truncation=True, padding=True))
return sents
class TextTransformerEncFeaturizer(TextFeaturizer):
def __init__(self, config):
super(TextTransformerEncFeaturizer, self).__init__()
self.device = config["device"]
self.max_length = config["max_length"]
self.tokenizer = name2tokenizer[config["transformer_type"]].from_pretrained(config["model_name_or_path"], model_max_length=self.max_length)
if config["transformer_type"] in ["gpt2"]:
self.tokenizer.pad_token = self.tokenizer.eos_token
self.encoder = name2model[config["transformer_type"]].from_pretrained(config["model_name_or_path"])
self.encoder = self.encoder.to(self.device)
def __call__(self, data):
if self.transform is not None:
data = self.transform[data]
data = self.tokenizer(data, truncation=True, padding=True, return_tensors='pt')
data = ToDevice(data, self.device)
return self.encoder(**data)["pooler_output"].detach().cpu()
SUPPORTED_TEXT_FEATURIZER = {
"TransformerTokenizer": TextTransformerTokFeaturizer,
"TransformerSentenceTokenizer": TextTransformerSentFeaturizer,
"TransformerEncoder": TextTransformerEncFeaturizer,
} | OpenBioMed-main | open_biomed/feature/text_featurizer.py |
import logging
logger = logging.getLogger(__name__)
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import argparse
import copy
import json
import numpy as np
import pickle
import torch
import rdkit.Chem as Chem
from rdkit.Chem import DataStructs, rdmolops
from rdkit.Chem import AllChem, Descriptors
from rdkit import RDLogger
RDLogger.DisableLog("rdApp.*")
from sklearn.preprocessing import OneHotEncoder
from torch_geometric.data import Data
from transformers import BertTokenizer, T5Tokenizer
from feature.base_featurizer import BaseFeaturizer
from feature.kg_featurizer import SUPPORTED_KG_FEATURIZER
from feature.text_featurizer import SUPPORTED_TEXT_FEATURIZER
from utils import to_clu_sparse, SmilesTokenizer
def one_hot_encoding(x, allowable_set, encode_unknown=False):
"""One-hot encoding.
"""
if encode_unknown and (allowable_set[-1] is not None):
allowable_set.append(None)
if encode_unknown and (x not in allowable_set):
x = None
return list(map(lambda s: x == s, allowable_set))
def safe_index(l, e):
"""
Return index of element e in list l. If e is not present, return the last index
"""
try:
return l.index(e)
except:
return len(l) - 1
# Atom featurization: Borrowed from dgllife.utils.featurizers.py
def atom_type_one_hot(atom, allowable_set=None, encode_unknown=False):
"""One hot encoding for the type of an atom.
"""
if allowable_set is None:
allowable_set = ['C', 'N', 'O', 'S', 'F', 'Si', 'P', 'Cl', 'Br', 'Mg', 'Na', 'Ca',
'Fe', 'As', 'Al', 'I', 'B', 'V', 'K', 'Tl', 'Yb', 'Sb', 'Sn',
'Ag', 'Pd', 'Co', 'Se', 'Ti', 'Zn', 'H', 'Li', 'Ge', 'Cu', 'Au',
'Ni', 'Cd', 'In', 'Mn', 'Zr', 'Cr', 'Pt', 'Hg', 'Pb']
return one_hot_encoding(atom.GetSymbol(), allowable_set, encode_unknown)
def atom_degree_one_hot(atom, allowable_set=None, encode_unknown=False):
"""One hot encoding for the degree of an atom.
"""
if allowable_set is None:
allowable_set = list(range(11))
return one_hot_encoding(atom.GetDegree(), allowable_set, encode_unknown)
def atom_implicit_valence_one_hot(atom, allowable_set=None, encode_unknown=False):
"""One hot encoding for the implicit valence of an atom.
"""
if allowable_set is None:
allowable_set = list(range(7))
return one_hot_encoding(atom.GetImplicitValence(), allowable_set, encode_unknown)
def atom_formal_charge(atom):
"""Get formal charge for an atom.
"""
return [atom.GetFormalCharge()]
def atom_num_radical_electrons(atom):
return [atom.GetNumRadicalElectrons()]
def atom_hybridization_one_hot(atom, allowable_set=None, encode_unknown=False):
"""One hot encoding for the hybridization of an atom.
"""
if allowable_set is None:
allowable_set = [Chem.rdchem.HybridizationType.SP,
Chem.rdchem.HybridizationType.SP2,
Chem.rdchem.HybridizationType.SP3,
Chem.rdchem.HybridizationType.SP3D,
Chem.rdchem.HybridizationType.SP3D2]
return one_hot_encoding(atom.GetHybridization(), allowable_set, encode_unknown)
def atom_is_aromatic(atom):
"""Get whether the atom is aromatic.
"""
return [atom.GetIsAromatic()]
def atom_total_num_H_one_hot(atom, allowable_set=None, encode_unknown=False):
"""One hot encoding for the total number of Hs of an atom.
"""
if allowable_set is None:
allowable_set = list(range(5))
return one_hot_encoding(atom.GetTotalNumHs(), allowable_set, encode_unknown)
def atom_is_in_ring(atom):
"""Get whether the atom is in ring.
"""
return [atom.IsInRing()]
def atom_chirality_type_one_hot(atom, allowable_set=None, encode_unknown=False):
"""One hot encoding for the chirality type of an atom.
"""
if not atom.HasProp('_CIPCode'):
return [False, False]
if allowable_set is None:
allowable_set = ['R', 'S']
return one_hot_encoding(atom.GetProp('_CIPCode'), allowable_set, encode_unknown)
# Atom featurization: Borrowed from dgllife.utils.featurizers.py
def bond_type_one_hot(bond, allowable_set=None, encode_unknown=False):
"""One hot encoding for the type of a bond.
"""
if allowable_set is None:
allowable_set = [Chem.rdchem.BondType.SINGLE,
Chem.rdchem.BondType.DOUBLE,
Chem.rdchem.BondType.TRIPLE,
Chem.rdchem.BondType.AROMATIC]
return one_hot_encoding(bond.GetBondType(), allowable_set, encode_unknown)
class MolOneHotFeaturizer(BaseFeaturizer):
smiles_char = ['?', '#', '%', ')', '(', '+', '-', '.', '1', '0', '3', '2', '5', '4',
'7', '6', '9', '8', '=', 'A', 'C', 'B', 'E', 'D', 'G', 'F', 'I',
'H', 'K', 'M', 'L', 'O', 'N', 'P', 'S', 'R', 'U', 'T', 'W', 'V',
'Y', '[', 'Z', ']', '_', 'a', 'c', 'b', 'e', 'd', 'g', 'f', 'i',
'h', 'm', 'l', 'o', 'n', 's', 'r', 'u', 't', 'y']
def __init__(self, config):
super(MolOneHotFeaturizer, self).__init__()
self.max_len = config["max_len"]
self.enc = OneHotEncoder().fit(np.array(self.smiles_char).reshape(-1, 1))
def __call__(self, data):
temp = [c if c in self.smiles_char else '?' for c in data]
if len(temp) < self.max_len:
temp = temp + ['?'] * (self.max_len - len(temp))
else:
temp = temp[:self.max_len]
return torch.tensor(self.enc.transform(np.array(temp).reshape(-1, 1)).toarray().T)
class MolTransformerTokFeaturizer(BaseFeaturizer):
name2tokenizer = {
"bert": BertTokenizer,
"t5": T5Tokenizer,
"unimap": SmilesTokenizer
}
def __init__(self, config):
super(MolTransformerTokFeaturizer, self).__init__()
self.max_length = config["max_length"]
self.tokenizer = self.name2tokenizer[config["transformer_type"]].from_pretrained(config["model_name_or_path"], model_max_length=self.max_length)
def __call__(self, data):
result = self.tokenizer(data, max_length=self.max_length, padding=True, truncation=True)
return result
class MolBPEFeaturizer(BaseFeaturizer):
def __init__(self, config):
super(MolBPEFeaturizer, self).__init__()
from subword_nmt.apply_bpe import BPE, read_vocabulary
import codecs
self.bpe = BPE(
codecs.open(config["code_name"], encoding="utf8"),
vocab=read_vocabulary(codecs.open(config["vocabulary"], encoding="utf8"), config["vocabulary_threshold"]),
separator="~~",
)
self.vocabs = {}
lines = open(config["vocabulary"], "r").readlines()
for line in lines:
wd = line.strip('\n').split(' ')
self.vocabs[wd[0]] = len(self.vocabs)
self.max_length = config["max_length"]
def _preprocess_smiles(self, data):
data = data.replace('(', '')
data = data.replace(')', '')
for i in range(10):
item = str(i)
data = data.replace(item, '')
return data
def __call__(self, data):
data = self._preprocess_smiles(data)
bpe_result = self.bpe.process_line(data).split(" ")
result = [self.vocabs[x] if x in self.vocabs else len(self.vocabs) for x in bpe_result]
if len(result) > self.max_length - 2:
result = result[:self.max_length - 2]
input_ids = torch.LongTensor([102] + [i + 30700 for i in result] + [103] + [0] * (self.max_length - 2 - len(result)))
attn_mask = torch.LongTensor([1] * (len(result) + 2) + [0] * (self.max_length - len(result) - 2))
token_type_ids = torch.zeros_like(attn_mask).long()
return {
"input_ids": input_ids,
"attention_mask": attn_mask,
"token_type_ids": token_type_ids
}
class MolFPFeaturizer(BaseFeaturizer):
def __init__(self, config):
super(MolFPFeaturizer, self).__init__()
self.config = config
def __call__(self, data):
mol = Chem.MolFromSmiles(data)
if mol is not None:
fp = Chem.RDKFingerprint(mol, fpSize=self.config["fpsize"])
np_fp = np.zeros(self.config["fpsize"])
DataStructs.ConvertToNumpyArray(fp, np_fp)
if self.config["return_type"] == "pt":
return torch.tensor(np_fp)
else:
return np_fp
else:
return None
class MolTGSAFeaturizer(BaseFeaturizer):
def __init__(self, config):
super(MolTGSAFeaturizer, self).__init__()
self.config = config
def atom_feature(self, atom):
"""
Converts rdkit atom object to feature list of indices
:param mol: rdkit atom object
:return: list
8 features are canonical, 2 features are from OGB
"""
featurizer_funcs = [
atom_type_one_hot,
atom_degree_one_hot,
atom_implicit_valence_one_hot,
atom_formal_charge,
atom_num_radical_electrons,
atom_hybridization_one_hot,
atom_is_aromatic,
atom_total_num_H_one_hot,
atom_is_in_ring,
atom_chirality_type_one_hot,
]
atom_feature = np.concatenate([func(atom) for func in featurizer_funcs], axis=0)
return atom_feature
def bond_feature(self, bond):
"""
Converts rdkit bond object to feature list of indices
:param mol: rdkit bond object
:return: list
"""
featurizer_funcs = [bond_type_one_hot]
bond_feature = np.concatenate([func(bond) for func in featurizer_funcs], axis=0)
return bond_feature
def __call__(self, data):
mol = Chem.MolFromSmiles(data)
"""
Converts SMILES string to graph Data object without remove salt
:input: SMILES string (str)
:return: pyg Data object
"""
# atoms
atom_features_list = []
for atom in mol.GetAtoms():
atom_features_list.append(self.atom_feature(atom))
x = np.array(atom_features_list, dtype=np.int64)
# bonds
num_bond_features = 4 # bond type, bond stereo, is_conjugated
if len(mol.GetBonds()) > 0: # mol has bonds
edges_list = []
edge_features_list = []
for bond in mol.GetBonds():
i = bond.GetBeginAtomIdx()
j = bond.GetEndAtomIdx()
edge_feature = self.bond_feature(bond)
# add edges in both directions
edges_list.append((i, j))
edge_features_list.append(edge_feature)
edges_list.append((j, i))
edge_features_list.append(edge_feature)
edge_index = np.array(edges_list, dtype=np.int64).T
edge_attr = np.array(edge_features_list, dtype=np.int64)
else: # mol has no bonds
edge_index = np.empty((2, 0), dtype=np.int64)
edge_attr = np.empty((0, num_bond_features), dtype=np.int64)
graph = Data(
x=torch.tensor(x, dtype=torch.float),
edge_index=torch.tensor(edge_index, dtype=torch.long),
edge_attr=torch.tensor(edge_attr, dtype=torch.float),
)
return graph
class MolGraphFeaturizer(BaseFeaturizer):
allowable_features = {
'possible_atomic_num_list': list(range(1, 119)) + ['misc'],
'possible_formal_charge_list': [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 'misc'],
'possible_chirality_list': [
Chem.rdchem.ChiralType.CHI_UNSPECIFIED,
Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CW,
Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CCW,
Chem.rdchem.ChiralType.CHI_OTHER
],
'possible_hybridization_list': [
Chem.rdchem.HybridizationType.SP,
Chem.rdchem.HybridizationType.SP2,
Chem.rdchem.HybridizationType.SP3,
Chem.rdchem.HybridizationType.SP3D,
Chem.rdchem.HybridizationType.SP3D2,
Chem.rdchem.HybridizationType.UNSPECIFIED,
'misc'
],
'possible_numH_list': [0, 1, 2, 3, 4, 5, 6, 7, 8, 'misc'],
'possible_implicit_valence_list': [0, 1, 2, 3, 4, 5, 6],
'possible_degree_list': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 'misc'],
'possible_number_radical_e_list': [0, 1, 2, 3, 4, 'misc'],
'possible_is_aromatic_list': [False, True],
'possible_is_in_ring_list': [False, True],
'possible_bond_type_list': [
Chem.rdchem.BondType.SINGLE,
Chem.rdchem.BondType.DOUBLE,
Chem.rdchem.BondType.TRIPLE,
Chem.rdchem.BondType.AROMATIC,
'misc'
],
'possible_bond_dirs': [ # only for double bond stereo information
Chem.rdchem.BondDir.NONE,
Chem.rdchem.BondDir.ENDUPRIGHT,
Chem.rdchem.BondDir.ENDDOWNRIGHT
],
'possible_bond_stereo_list': [
Chem.rdchem.BondStereo.STEREONONE,
Chem.rdchem.BondStereo.STEREOZ,
Chem.rdchem.BondStereo.STEREOE,
Chem.rdchem.BondStereo.STEREOCIS,
Chem.rdchem.BondStereo.STEREOTRANS,
Chem.rdchem.BondStereo.STEREOANY,
],
'possible_is_conjugated_list': [False, True]
}
def __init__(self, config):
super(MolGraphFeaturizer, self).__init__()
self.config = config
if self.config["name"] == "unimap":
self.allowable_features["possible_atomic_num_list"] = self.allowable_features["possible_atomic_num_list"][:-1] + ['[MASK]', 'misc']
self.allowable_features["possible_bond_type_list"] = self.allowable_features["possible_bond_type_list"][:-1] + ['[MASK]', '[SELF]', 'misc']
self.allowable_features["possible_bond_stereo_list"] = self.allowable_features["possible_bond_stereo_list"] + ['[MASK]']
self.allowable_features["possible_hybridization_list"] = self.allowable_features["possible_hybridization_list"][:-2] + ['misc']
def __call__(self, data):
if isinstance(data, str):
mol = Chem.MolFromSmiles(data)
# mol = AllChem.MolFromSmiles(data)
else:
mol = data
# atoms
atom_features_list = []
for atom in mol.GetAtoms():
if self.config["name"] in ["ogb", "unimap"]:
atom_feature = [
safe_index(self.allowable_features['possible_atomic_num_list'], atom.GetAtomicNum()),
self.allowable_features['possible_chirality_list'].index(atom.GetChiralTag()),
safe_index(self.allowable_features['possible_degree_list'], atom.GetTotalDegree()),
safe_index(self.allowable_features['possible_formal_charge_list'], atom.GetFormalCharge()),
safe_index(self.allowable_features['possible_numH_list'], atom.GetTotalNumHs()),
safe_index(self.allowable_features['possible_number_radical_e_list'], atom.GetNumRadicalElectrons()),
safe_index(self.allowable_features['possible_hybridization_list'], atom.GetHybridization()),
self.allowable_features['possible_is_aromatic_list'].index(atom.GetIsAromatic()),
self.allowable_features['possible_is_in_ring_list'].index(atom.IsInRing()),
]
else:
atom_feature = [
safe_index(self.allowable_features['possible_atomic_num_list'], atom.GetAtomicNum()),
self.allowable_features['possible_chirality_list'].index(atom.GetChiralTag())
]
atom_features_list.append(atom_feature)
x = torch.tensor(np.array(atom_features_list), dtype=torch.long)
# bonds
if len(mol.GetBonds()) <= 0: # mol has no bonds
num_bond_features = 3 if self.config["name"] in ["ogb", "unimap"] else 2
edge_index = torch.empty((2, 0), dtype=torch.long)
edge_attr = torch.empty((0, num_bond_features), dtype=torch.long)
else: # mol has bonds
edges_list = []
edge_features_list = []
for bond in mol.GetBonds():
i = bond.GetBeginAtomIdx()
j = bond.GetEndAtomIdx()
if self.config["name"] in ["ogb", "unimap"]:
edge_feature = [
safe_index(self.allowable_features['possible_bond_type_list'], bond.GetBondType()),
self.allowable_features['possible_bond_stereo_list'].index(bond.GetStereo()),
self.allowable_features['possible_is_conjugated_list'].index(bond.GetIsConjugated()),
]
else:
edge_feature = [
self.allowable_features['possible_bond_type_list'].index(bond.GetBondType()),
self.allowable_features['possible_bond_dirs'].index(bond.GetBondDir())
]
edges_list.append((i, j))
edge_features_list.append(edge_feature)
edges_list.append((j, i))
edge_features_list.append(edge_feature)
# data.edge_index: Graph connectivity in COO format with shape [2, num_edges]
edge_index = torch.tensor(np.array(edges_list).T, dtype=torch.long)
# data.edge_attr: Edge feature matrix with shape [num_edges, num_edge_features]
edge_attr = torch.tensor(np.array(edge_features_list), dtype=torch.long)
data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr)
return data
class MolGGNNFeaturizer(BaseFeaturizer):
def __init__(self, config):
super(MolGGNNFeaturizer, self).__init__()
self.max_n_atoms = config["max_n_atoms"]
self.atomic_num_list = config["atomic_num_list"]
self.bond_type_list = [
Chem.rdchem.BondType.SINGLE,
Chem.rdchem.BondType.DOUBLE,
Chem.rdchem.BondType.TRIPLE,
'misc'
]
def __call__(self, data):
if isinstance(data, str):
mol = Chem.MolFromSmiles(data)
else:
mol = data
Chem.Kekulize(mol)
x = self._construct_atomic_number_array(mol, self.max_n_atoms)
adj = self._construct_adj_matrix(mol, self.max_n_atoms)
return x, adj, self._rescale_adj(adj)
def _construct_atomic_number_array(self, mol, out_size=-1):
"""Returns atomic numbers of atoms consisting a molecule.
Args:
mol (rdkit.Chem.Mol): Input molecule.
out_size (int): The size of returned array.
If this option is negative, it does not take any effect.
Otherwise, it must be larger than the number of atoms
in the input molecules. In that case, the tail of
the array is padded with zeros.
Returns:
torch.tensor: a tensor consisting of atomic numbers
of atoms in the molecule.
"""
atom_list = [a.GetAtomicNum() for a in mol.GetAtoms()]
if len(atom_list) > self.max_n_atoms:
atom_list = atom_list[:self.max_n_atoms]
if out_size < 0:
result = torch.zeros(len(atom_list), len(self.atomic_num_list))
else:
result = torch.zeros(out_size, len(self.atomic_num_list))
for i, atom in enumerate(atom_list):
result[i, safe_index(self.atomic_num_list, atom)] = 1
for i in range(len(atom_list), self.max_n_atoms):
result[i, -1] = 1
return result
def _construct_adj_matrix(self, mol, out_size=-1, self_connection=True):
"""Returns the adjacent matrix of the given molecule.
This function returns the adjacent matrix of the given molecule.
Contrary to the specification of
:func:`rdkit.Chem.rdmolops.GetAdjacencyMatrix`,
The diagonal entries of the returned matrix are all-one.
Args:
mol (rdkit.Chem.Mol): Input molecule.
out_size (int): The size of the returned matrix.
If this option is negative, it does not take any effect.
Otherwise, it must be larger than the number of atoms
in the input molecules. In that case, the adjacent
matrix is expanded and zeros are padded to right
columns and bottom rows.
self_connection (bool): Add self connection or not.
If True, diagonal element of adjacency matrix is filled with 1.
Returns:
adj (torch.tensor): The adjacent matrix of the input molecule.
It is 2-dimensional tensor with shape (atoms1, atoms2), where
atoms1 & atoms2 represent from and to of the edge respectively.
If ``out_size`` is non-negative, the returned
its size is equal to that value. Otherwise,
it is equal to the number of atoms in the the molecule.
"""
if out_size < 0:
adj = torch.zeros(4, mol.GetNumAtoms(), mol.GetNumAtoms())
else:
adj = torch.zeros(4, out_size, out_size)
for bond in mol.GetBonds():
i = bond.GetBeginAtomIdx()
j = bond.GetEndAtomIdx()
adj[safe_index(self.bond_type_list, bond.GetBondType()), i, j] = 1
adj[safe_index(self.bond_type_list, bond.GetBondType()), j, i] = 1
adj[3] = 1 - torch.sum(adj[:3], dim=0)
return adj
def _rescale_adj(self, adj):
# Previous paper didn't use rescale_adj.
# In their implementation, the normalization sum is: num_neighbors = F.sum(adj, axis=(1, 2))
# In this implementation, the normaliztion term is different
# raise NotImplementedError
# (256,4,9, 9):
# 4: single, double, triple, and bond between disconnected atoms (negative mask of sum of previous)
# 1-adj[i,:3,:,:].sum(dim=0) == adj[i,4,:,:]
# usually first 3 matrices have no diagnal, the last has.
# A_prime = self.A + sp.eye(self.A.shape[0])
num_neighbors = adj.sum(dim=(0, 1)).float()
num_neighbors_inv = num_neighbors.pow(-1)
num_neighbors_inv[num_neighbors_inv == float('inf')] = 0
adj_prime = num_neighbors_inv[None, None, :] * adj
return adj_prime
class MolMGNNFeaturizer(BaseFeaturizer):
allowable_atom_list = ['C', 'N', 'O', 'S', 'F', 'Si', 'P', 'Cl', 'Br', 'Mg', 'Na','Ca', 'Fe', 'As', 'Al', 'I', 'B', 'V', 'K', 'Tl', 'Yb','Sb', 'Sn', 'Ag', 'Pd', 'Co', 'Se', 'Ti', 'Zn', 'H','Li', 'Ge', 'Cu', 'Au', 'Ni', 'Cd', 'In', 'Mn', 'Zr','Cr', 'Pt', 'Hg', 'Pb', 'Unknown']
allowable_degree_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
allowable_num_hs_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
allowable_implicit_valence_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
allowable_hybridization_list = [
Chem.rdchem.HybridizationType.SP,
Chem.rdchem.HybridizationType.SP2,
Chem.rdchem.HybridizationType.SP3,
Chem.rdchem.HybridizationType.SP3D,
Chem.rdchem.HybridizationType.SP3D2,
'other'
]
allowable_cip_code_list = ['R', 'S']
def __init__(self, config):
super(MolMGNNFeaturizer, self).__init__()
self.config = config
def __call__(self, data):
if isinstance(data, str):
mol = Chem.MolFromSmiles(data)
else:
mol = data
atom_features_list = []
for atom in mol.GetAtoms():
encoding = self.one_of_k_encoding_unk(atom.GetSymbol(), self.allowable_atom_list)
encoding += self.one_of_k_encoding(atom.GetDegree(), self.allowable_degree_list)
encoding += self.one_of_k_encoding_unk(atom.GetTotalNumHs(), self.allowable_num_hs_list)
encoding += self.one_of_k_encoding_unk(atom.GetImplicitValence(), self.allowable_implicit_valence_list)
encoding += self.one_of_k_encoding_unk(atom.GetHybridization(), self.allowable_hybridization_list)
encoding += [atom.GetIsAromatic()]
try:
encoding += self.one_of_k_encoding_unk(atom.GetProp("_CIPNode"), self.allowable_cip_code_list)
except:
encoding += [0, 0]
encoding += [atom.HasProp("_ChiralityPossible")]
encoding /= np.sum(encoding)
atom_features_list.append(encoding)
x = torch.tensor(np.array(atom_features_list), dtype=torch.float)
if len(mol.GetBonds()) <= 0:
edge_index = torch.empty((2, 0), dtype=torch.long)
else:
edges_list = []
for bond in mol.GetBonds():
i = bond.GetBeginAtomIdx()
j = bond.GetEndAtomIdx()
edges_list.append((i, j))
edges_list.append((j, i))
edge_index = torch.tensor(np.array(edges_list).T, dtype=torch.long)
return Data(x=x, edge_index=edge_index)
def one_of_k_encoding(self, x, allowable_set):
if x not in allowable_set:
raise Exception("input {0} not in allowable set{1}:".format(x, allowable_set))
return list(map(lambda s: x == s, allowable_set))
def one_of_k_encoding_unk(self, x, allowable_set):
"""Maps inputs not in the allowable set to the last element."""
if x not in allowable_set:
x = allowable_set[-1]
return list(map(lambda s: x == s, allowable_set))
class MolMultiScaleFeaturizer(BaseFeaturizer):
def __init__(self, config):
super(MolMultiScaleFeaturizer, self).__init__()
self.scales = config["scales"]
self.featurizers = {}
for scale in config["scales"]:
conf = config[scale]
self.featurizers[scale] = SUPPORTED_SINGLE_SCALE_MOL_FEATURIZER[conf["name"]](conf)
def __call__(self, data):
feat = {}
for scale in self.scales:
feat[scale] = self.featurizers[scale](data)
return feat
# same with graphmvp
class MolGraphFeaturizerV2(BaseFeaturizer):
allowable_features = {
'possible_atomic_num_list': list(range(1, 119)),
'possible_formal_charge_list': [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5],
'possible_chirality_list': [
Chem.rdchem.ChiralType.CHI_UNSPECIFIED,
Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CW,
Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CCW,
Chem.rdchem.ChiralType.CHI_OTHER
],
'possible_hybridization_list': [
Chem.rdchem.HybridizationType.S,
Chem.rdchem.HybridizationType.SP,
Chem.rdchem.HybridizationType.SP2,
Chem.rdchem.HybridizationType.SP3,
Chem.rdchem.HybridizationType.SP3D,
Chem.rdchem.HybridizationType.SP3D2,
Chem.rdchem.HybridizationType.UNSPECIFIED
],
'possible_numH_list': [0, 1, 2, 3, 4, 5, 6, 7, 8],
'possible_implicit_valence_list': [0, 1, 2, 3, 4, 5, 6],
'possible_degree_list': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'possible_number_radical_e_list': [0, 1, 2, 3, 4, 'misc'],
'possible_is_aromatic_list': [False, True],
'possible_is_in_ring_list': [False, True],
'possible_bond_type_list': [
Chem.rdchem.BondType.SINGLE,
Chem.rdchem.BondType.DOUBLE,
Chem.rdchem.BondType.TRIPLE,
Chem.rdchem.BondType.AROMATIC
],
'possible_bond_dirs': [ # only for double bond stereo information
Chem.rdchem.BondDir.NONE,
Chem.rdchem.BondDir.ENDUPRIGHT,
Chem.rdchem.BondDir.ENDDOWNRIGHT
],
'possible_bond_stereo_list': [
Chem.rdchem.BondStereo.STEREONONE,
Chem.rdchem.BondStereo.STEREOZ,
Chem.rdchem.BondStereo.STEREOE,
Chem.rdchem.BondStereo.STEREOCIS,
Chem.rdchem.BondStereo.STEREOTRANS,
Chem.rdchem.BondStereo.STEREOANY,
],
'possible_is_conjugated_list': [False, True]
}
def __init__(self, config):
super(MolGraphFeaturizerV2, self).__init__()
self.config = config
def __call__(self, data):
# mol = Chem.MolFromSmiles(data)
mol = AllChem.MolFromSmiles(data)
# atoms
atom_features_list = []
for atom in mol.GetAtoms():
if self.config["name"] == "ogb":
atom_feature = [
safe_index(self.allowable_features['possible_atomic_num_list'], atom.GetAtomicNum()),
self.allowable_features['possible_chirality_list'].index(atom.GetChiralTag()),
safe_index(self.allowable_features['possible_degree_list'], atom.GetTotalDegree()),
safe_index(self.allowable_features['possible_formal_charge_list'], atom.GetFormalCharge()),
safe_index(self.allowable_features['possible_numH_list'], atom.GetTotalNumHs()),
safe_index(self.allowable_features['possible_number_radical_e_list'], atom.GetNumRadicalElectrons()),
safe_index(self.allowable_features['possible_hybridization_list'], atom.GetHybridization()),
self.allowable_features['possible_is_aromatic_list'].index(atom.GetIsAromatic()),
self.allowable_features['possible_is_in_ring_list'].index(atom.IsInRing()),
]
else:
"""
atom_feature = [
safe_index(self.allowable_features['possible_atomic_num_list'], atom.GetAtomicNum()),
self.allowable_features['possible_chirality_list'].index(atom.GetChiralTag())
]
"""
atom_feature = [self.allowable_features['possible_atomic_num_list'].index(atom.GetAtomicNum())] + \
[self.allowable_features['possible_chirality_list'].index(atom.GetChiralTag())]
atom_features_list.append(atom_feature)
x = torch.tensor(np.array(atom_features_list), dtype=torch.long)
# bonds
if len(mol.GetBonds()) <= 0: # mol has no bonds
num_bond_features = 3 if self.config["name"] in "ogb" else 2
edge_index = torch.empty((2, 0), dtype=torch.long)
edge_attr = torch.empty((0, num_bond_features), dtype=torch.long)
else: # mol has bonds
edges_list = []
edge_features_list = []
for bond in mol.GetBonds():
i = bond.GetBeginAtomIdx()
j = bond.GetEndAtomIdx()
if self.config["name"] == "ogb":
edge_feature = [
safe_index(self.allowable_features['possible_bond_type_list'], bond.GetBondType()),
self.allowable_features['possible_bond_stereo_list'].index(bond.GetStereo()),
self.allowable_features['possible_is_conjugated_list'].index(bond.GetIsConjugated()),
]
else:
"""
edge_feature = [
self.allowable_features['possible_bond_type_list'].index(bond.GetBondType()),
self.allowable_features['possible_bond_dirs'].index(bond.GetBondDir())
]
"""
edge_feature = [self.allowable_features['possible_bond_type_list'].index(bond.GetBondType())] + \
[self.allowable_features['possible_bond_dirs'].index(bond.GetBondDir())]
edges_list.append((i, j))
edge_features_list.append(edge_feature)
edges_list.append((j, i))
edge_features_list.append(edge_feature)
# data.edge_index: Graph connectivity in COO format with shape [2, num_edges]
edge_index = torch.tensor(np.array(edges_list).T, dtype=torch.long)
# data.edge_attr: Edge feature matrix with shape [num_edges, num_edge_features]
edge_attr = torch.tensor(np.array(edge_features_list), dtype=torch.long)
data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr)
return data
class MolMultiModalFeaturizer(BaseFeaturizer):
def __init__(self, config):
super(MolMultiModalFeaturizer, self).__init__()
self.modality = config["modality"]
self.featurizers = {}
if "structure" in config["modality"]:
conf = config["featurizer"]["structure"]
self.featurizers["structure"] = SUPPORTED_SINGLE_MODAL_MOL_FEATURIZER[conf["name"]](conf)
if "kg" in config["modality"]:
conf = config["featurizer"]["kg"]
self.featurizers["kg"] = SUPPORTED_KG_FEATURIZER[conf["name"]](conf)
if "text" in config["modality"]:
conf = config["featurizer"]["text"]
self.featurizers["text"] = SUPPORTED_TEXT_FEATURIZER[conf["name"]](conf)
def set_mol2kgid_dict(self, mol2kgid):
self.featurizers["kg"].set_transform(mol2kgid)
def set_mol2text_dict(self, mol2text):
self.featurizers["text"].set_transform(mol2text)
def __call__(self, data, skip=[]):
feat = {}
for modality in self.featurizers.keys():
if modality not in skip:
feat[modality] = self.featurizers[modality](data)
return feat
def __getitem__(self, index):
if index not in self.modality:
logger.error("%s is not a valid modality!" % (index))
return None
return self.featurizers[index]
SUPPORTED_SINGLE_SCALE_MOL_FEATURIZER = {
"OneHot": MolOneHotFeaturizer,
"KV-PLM*": MolBPEFeaturizer,
"transformer": MolTransformerTokFeaturizer,
"fp": MolFPFeaturizer,
"TGSA": MolTGSAFeaturizer,
"ogb": MolGraphFeaturizer,
"unimap": MolGraphFeaturizer,
"MGNN": MolMGNNFeaturizer,
"BaseGNN": MolGraphFeaturizer,
}
SUPPORTED_SINGLE_MODAL_MOL_FEATURIZER = copy.deepcopy(SUPPORTED_SINGLE_SCALE_MOL_FEATURIZER)
SUPPORTED_SINGLE_MODAL_MOL_FEATURIZER["MultiScale"] = MolMultiScaleFeaturizer
SUPPORTED_MOL_FEATURIZER = copy.deepcopy(SUPPORTED_SINGLE_MODAL_MOL_FEATURIZER)
SUPPORTED_MOL_FEATURIZER["MultiModal"] = MolMultiModalFeaturizer
def add_arguments(parser):
parser.add_argument("--mode", type=str, choices=["unit_test", "interactive", "file"])
parser.add_argument("--featurizer", type=str, default="fp")
parser.add_argument("--config_file", type=str, default="")
parser.add_argument("--smiles_file", type=str, default="")
parser.add_argument("--output_file", type=str, default="")
parser.add_argument("--post_transform", type=str, default="")
return parser
def unit_test():
smi = "CCC=O"
data = MolGraphFeaturizer({"name": "ogb"})(smi)
print(data.x, data.edge_index, data.edge_attr)
smi = "C(C(C(=O)O)N)C(C(=O)O)O"
data = MolBPEFeaturizer({
"name": "KV-PLM*",
"code_name": "../assets/KV-PLM*/bpe_coding.txt",
"vocabulary": "../assets/KV-PLM*/bpe_vocab.txt",
"vocabulary_threshold": 80,
"max_length": 32
})(smi)
print(data)
smi = "OC(=O)C1=CC(=CC=C1O)\\N=N\\C1=CC=C(C=C1)S(=O)(=O)NC1=NC=CC=C1"
data = MolMGNNFeaturizer({})(smi)
print(data.x, data.edge_index)
def featurize_file(args):
with open(args.smiles_file, "r") as f:
smis = [line.rstrip("\n") for line in f.readlines()]
config = json.load(open(args.config_file, "r"))
featurizer = SUPPORTED_MOL_FEATURIZER[args.featurizer](config)
result = [featurizer(smi) for smi in smis]
if args.post_transform == "to_clu":
result = to_clu_sparse(np.array(result))
with open(args.output_file, "w") as f:
f.write(result)
else:
pickle.dump(result, open(args.output_file, "wb"))
def run_featurize(args):
# TODO: implement command line tool for featurizing SMILES
raise NotImplementedError
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser = add_arguments(parser)
args = parser.parse_args()
if args.mode == "unit_test":
unit_test()
elif args.mode == "file":
featurize_file(args)
elif args.mode == "interactive":
run_featurize(args) | OpenBioMed-main | open_biomed/feature/mol_featurizer.py |
from abc import ABC, abstractmethod
class BaseFeaturizer(ABC):
def __init__(self):
super(BaseFeaturizer, self).__init__()
@abstractmethod
def __call__(self, data):
raise NotImplementedError | OpenBioMed-main | open_biomed/feature/base_featurizer.py |
from abc import ABC, abstractmethod
import torch
from feature.base_featurizer import BaseFeaturizer
from utils.kg_utils import SUPPORTED_KG, embed
class KGFeaturizer(BaseFeaturizer, ABC):
def __init__(self, config):
super().__init__()
self.config = config
# TODO:self.kg is no use
# self.kg = SUPPORTED_KG[self.config["kg_name"]](self.config["kg_path"])
self.transform = None
def set_transform(self, transform):
self.transform = transform
@abstractmethod
def __call__(self, data):
raise NotImplementedError
class KGIDFeaturizer(KGFeaturizer):
def __init__(self, config):
super().__init__(config)
self.embed_dim = config["embed_dim"]
# TODO: hard code
self.max_index = 49111
# data: SMILES
def __call__(self, data):
if self.transform is not None:
index = self.transform[data]
if index == -1 or index is None:
index = self.max_index
return index
else:
return None
# ugly, redesign later
class KGEFeaturizer(KGFeaturizer):
def __init__(self, config):
super().__init__(config)
self.kge = config["kge"]
self.embed_dim = config["embed_dim"]
def __call__(self, data):
if self.transform is not None:
data = self.transform[data]
if data is None or data not in self.kge:
return torch.zeros(self.embed_dim)
else:
return torch.FloatTensor(self.kge[data])
SUPPORTED_KG_FEATURIZER = {
"id": KGIDFeaturizer,
"KGE": KGEFeaturizer
} | OpenBioMed-main | open_biomed/feature/kg_featurizer.py |
import copy
import numpy as np
from sklearn.preprocessing import OneHotEncoder
import torch
from feature.base_featurizer import BaseFeaturizer
from feature.kg_featurizer import SUPPORTED_KG_FEATURIZER
from feature.text_featurizer import SUPPORTED_TEXT_FEATURIZER
from utils import ToDevice
from transformers import AutoTokenizer, AutoModel
class ProteinIndexFeaturizer(BaseFeaturizer):
VOCAB_PROTEIN = {
"A": 1, "C": 2, "B": 3, "E": 4, "D": 5, "G": 6,
"F": 7, "I": 8, "H": 9, "K": 10, "M": 11, "L": 12,
"O": 13, "N": 14, "Q": 15, "P": 16, "S": 17, "R": 18,
"U": 19, "T": 20, "W": 21, "V": 22, "Y": 23, "X": 24,
"Z": 25
}
def __init__(self, config):
super(ProteinIndexFeaturizer, self).__init__()
self.max_length = config["max_length"]
def __call__(self, data):
temp = [self.VOCAB_PROTEIN[s] for s in data]
if len(temp) < self.max_length:
temp = np.pad(temp, (0, self.max_length - len(temp)))
else:
temp = temp[:self.max_length]
return torch.LongTensor(temp)
class ProteinOneHotFeaturizer(BaseFeaturizer):
amino_char = [
'?', 'A', 'C', 'B', 'E', 'D', 'G', 'F', 'I', 'H', 'K', 'M', 'L',
'O', 'N', 'Q', 'P', 'S', 'R', 'U', 'T', 'W', 'V', 'Y', 'X', 'Z'
]
def __init__(self, config):
super(ProteinOneHotFeaturizer, self).__init__()
self.max_length = config["max_length"]
self.enc = OneHotEncoder().fit(np.array(self.amino_char).reshape(-1, 1))
def __call__(self, data):
temp = [i if i in self.amino_char else '?' for i in data]
if len(temp) < self.max_length:
temp = temp + ['?'] * (self.max_length - len(temp))
else:
temp = temp[:self.max_length]
return torch.tensor(self.enc.transform(np.array(temp).reshape(-1, 1)).toarray().T)
class Protein2VecFeaturizer(BaseFeaturizer):
AMINO_VEC = {
"A": [-0.17691335, -0.19057421, 0.045527875, -0.175985, 1.1090639, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
"C": [-0.31572455, 0.38517416, 0.17325026, 0.3164464, 1.1512344, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
"B": [0.037789278, -0.1989614, -0.844488, -0.8851388, 0.57501, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
"E": [-0.06940994, -0.34011552, -0.17767446, 0.251, 1.0661993, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
"D": [0.00600859, -0.1902303, -0.049640052, 0.15067418, 1.0812483, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
"G": [-0.07281224, 0.01804472, 0.22983849, -0.045492448, 1.1139168, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
"F": [0.2315121, -0.01626652, 0.25592703, 0.2703909, 1.0793934, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
"I": [0.15077977, -0.1881559, 0.33855876, 0.39121667, 1.0793937, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
"H": [0.019046513, -0.023256639, -0.06749539, 0.16737276, 1.0796973, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
"K": [0.22048187, -0.34703028, 0.20346786, 0.65077996, 1.0620389, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
"J": [0.06847394, 0.015362699, -0.7120714, -1.054779, 0.49967504, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
"M": [0.06302169, -0.10206237, 0.18976009, 0.115588315, 1.0927621, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
"L": [0.0075188675, -0.17002057, 0.08902198, 0.066686414, 1.0804346, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
"O": [-0.042549122, 0.11453196, 0.3218399, -0.96280265, 0.42855614, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
"N": [0.41597384, -0.22671205, 0.31179032, 0.45883527, 1.0529875, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
"Q": [0.25189143, -0.40238172, -0.046555642, 0.22140719, 1.0362468, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
"P": [0.017954966, -0.09864355, 0.028460773, -0.12924117, 1.0974121, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
"S": [0.17177454, -0.16769698, 0.27776834, 0.10357749, 1.0800852, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
"R": [-0.15621762, -0.19172126, -0.209409, 0.026799612, 1.0879921, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
"U": [0.00069698587, -0.40677646, 0.045045465, 0.875985, 0.93636376, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
"T": [0.054446213, -0.16771607, 0.22424258, -0.01337227, 1.0967118, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
"W": [0.25281385, 0.12420933, 0.0132171605, 0.09199735, 1.0842415, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
"V": [-0.09511698, -0.11654304, 0.1440215, -0.0022315443, 1.1064949, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
"Y": [0.27962074, -0.051454283, 0.114876375, 0.3550331, 1.0615551, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
"X": [0.5566999, -2.5784554, -4.580289, -0.46196952, 1.4881511, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
"Z": [-0.020066334, -0.116254225, -0.69591016, -1.2875729, 0.6376922, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
"?": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
}
def __init__(self, config):
super(Protein2VecFeaturizer, self).__init__()
self.max_length = config["max_length"]
def __call__(self, data):
temp = [self.AMINO_VEC[i] if i in self.AMINO_VEC else [0.0] * 13 for i in data]
if len(temp) < self.max_length:
for i in range(self.max_length - len(temp)):
temp.append([0.0] * 13)
else:
temp = temp[:self.max_length]
return torch.tensor(temp)
class ProteinTransformerTokFeaturizer(BaseFeaturizer):
def __init__(self, config):
super(ProteinTransformerTokFeaturizer, self).__init__()
self.max_length = config["max_length"]
self.tokenizer = AutoTokenizer.from_pretrained(config["model_name_or_path"], model_max_length=self.max_length)
def __call__(self, data):
result = self.tokenizer(data, max_length=self.max_length, padding=True, truncation=True)
return result
class ProteinTransformerEncFeaturizer(BaseFeaturizer):
def __init__(self, config):
super(ProteinTransformerEncFeaturizer, self).__init__()
self.device = config["device"]
self.max_length = config["max_length"]
self.tokenizer = AutoTokenizer.from_pretrained(config["model_name_or_path"], model_max_length=self.max_length)
self.encoder = AutoModel.from_pretrained(config["model_name_or_path"])
if "init_ckpt" in config:
ckpt = torch.load(open(config["init_ckpt"], "rb"))
if "param_key" in config:
ckpt = ckpt[config["param_key"]]
self.encoder.load_state_dict(ckpt)
self.encoder = self.encoder.to(self.device)
def __call__(self, data):
with torch.no_grad():
data = " ".join(list(data))
data = self.tokenizer(data, truncation=True, padding=True, return_tensors='pt')
data = ToDevice(data, self.device)
h = self.encoder(**data)['last_hidden_state'].squeeze()
h = h[1:-1].mean(dim=0)
return h
SUPPORTED_SINGLE_MODAL_PROTEIN_FEATURIZER = {
"index": ProteinIndexFeaturizer,
"OneHot": ProteinOneHotFeaturizer,
"protein2vec": Protein2VecFeaturizer,
"transformertok": ProteinTransformerTokFeaturizer,
"transformerenc": ProteinTransformerEncFeaturizer
}
class ProteinMultiModalFeaturizer(BaseFeaturizer):
def __init__(self, config):
super(ProteinMultiModalFeaturizer, self).__init__()
self.modality = config["modality"]
self.featurizers = {}
if "structure" in config["modality"]:
conf = config["featurizer"]["structure"]
self.featurizers["structure"] = SUPPORTED_SINGLE_MODAL_PROTEIN_FEATURIZER[conf["name"]](conf)
if "kg" in config["modality"]:
conf = config["featurizer"]["kg"]
self.featurizers["kg"] = SUPPORTED_KG_FEATURIZER[conf["name"]](conf)
if "text" in config["modality"]:
conf = config["featurizer"]["text"]
self.featurizers["text"] = SUPPORTED_TEXT_FEATURIZER[conf["name"]](conf)
def set_protein2kgid_dict(self, protein2kgid):
self.featurizers["kg"].set_transform(protein2kgid)
def set_protein2text_dict(self, protein2text):
self.featurizers["text"].set_transform(protein2text)
def __call__(self, data):
feat = {}
for modality in self.featurizers.keys():
feat[modality] = self.featurizers[modality](data)
return feat
SUPPORTED_PROTEIN_FEATURIZER = copy.deepcopy(SUPPORTED_SINGLE_MODAL_PROTEIN_FEATURIZER)
SUPPORTED_PROTEIN_FEATURIZER["MultiModal"] = ProteinMultiModalFeaturizer | OpenBioMed-main | open_biomed/feature/protein_featurizer.py |
import logging
logger = logging.getLogger(__name__)
import os
import pickle
import torch
import numpy as np
from torch_geometric.data import Data, Batch
from torch_geometric.nn import graclus, max_pool
from feature.base_featurizer import BaseFeaturizer
from utils.kg_utils import STRING
class CellTGSAFeaturizer(BaseFeaturizer):
def __init__(self, config):
super(CellTGSAFeaturizer, self).__init__()
save_path = "../assets/drp/gene_graph.pkl"
if not os.path.exists(save_path):
logger.info("Generating gene graph...")
self.selected_index_hugo = []
with open("../assets/drp/selected_genes.txt", "r") as f:
line = f.readline().strip("\n").split(",")
for index in line:
self.selected_index_hugo.append(index.lstrip('(').rstrip(')'))
self.ppi_graph = STRING("../assets/kg/STRING", config["edge_threshold"]).node_subgraph(self.selected_index_hugo)
self.predefined_cluster = self._gen_predefined_cluster()
pickle.dump({
"graph": self.ppi_graph,
"predefined_cluster": self.predefined_cluster
}, open(save_path, "wb"))
else:
logger.info("Loading gene graph from cache...")
data = pickle.load(open(save_path, "rb"))
self.ppi_graph = data["graph"]
self.predefined_cluster = data["predefined_cluster"]
def _gen_predefined_cluster(self):
g = Data(edge_index=torch.tensor(self.ppi_graph, dtype=torch.long), x=torch.zeros(len(self.selected_index_hugo), 1))
g = Batch.from_data_list([g])
cluster_predefine = {}
for i in range(5):
cluster = graclus(g.edge_index, None, g.x.size(0))
logger.info("%d nodes at cluster level #%d" % (len(cluster.unique()), i))
g = max_pool(cluster, g, transform=None)
cluster_predefine[i] = cluster
return cluster_predefine
def __call__(self, data):
feat = {}
for cell_name in data:
feat[cell_name] = Data(
x=torch.tensor(data[cell_name], dtype=torch.float32),
edge_index=torch.tensor(self.ppi_graph)
)
return feat
class CellBarFeaturizer(BaseFeaturizer):
def __init__(self, config):
super(CellBarFeaturizer, self).__init__()
self.n_bars = config["n_bars"] + 2
def __call__(self, data):
data = data.toarray()[0]
data[data > self.n_bars - 2] = self.n_bars - 2
data = torch.from_numpy(data).long()
return torch.cat((data, torch.tensor([0])))
class CellFullseqFeaturizer(BaseFeaturizer):
def __init__(self, config):
super(CellFullseqFeaturizer, self).__init__()
def __call__(self, data):
data = data.toarray()[0]
data = torch.from_numpy(data)
return data
class CellTensorDictFeaturizer(BaseFeaturizer):
def __init__(self, config):
super(CellTensorDictFeaturizer, self).__init__()
def __call__(self, data):
for k in data:
data[k] = torch.from_numpy(data[k])
return data
class CellBarDictFeaturizer(BaseFeaturizer):
def __init__(self, config):
super(CellBarDictFeaturizer, self).__init__()
self.n_bars = config["n_bars"] + 2
def __call__(self, data):
for k in data:
d = data[k]
d[d > self.n_bars - 2] = self.n_bars - 2
d = torch.from_numpy(d).long()
data[k] = torch.cat((d, torch.tensor([0])))
return data
SUPPORTED_CELL_FEATURIZER = {
"Bar": CellBarFeaturizer,
"Fullseq": CellFullseqFeaturizer,
"TGSA": CellTGSAFeaturizer,
"TensorDict": CellTensorDictFeaturizer,
"BarDict": CellBarDictFeaturizer
}
| OpenBioMed-main | open_biomed/feature/cell_featurizer.py |
from pathlib import Path
from setuptools import find_packages, setup
if __name__ == "__main__":
with Path(Path(__file__).parent, "README.md").open(encoding="utf-8") as file:
long_description = file.read()
# TODO: This is a hack to get around the fact that we can't read the requirements.txt file, we should fix this.
# def _read_reqs(relpath):
# fullpath = os.path.join(Path(__file__).parent, relpath)
# with open(fullpath) as f:
# return [
# s.strip()
# for s in f.readlines()
# if (s.strip() and not s.startswith("#"))
# ]
REQUIREMENTS = [
"einops",
"einops-exts",
"transformers",
"torch",
"torchvision",
"pillow",
"more-itertools",
"datasets",
"braceexpand",
"webdataset",
"wandb",
"nltk",
"scipy",
"inflection",
"sentencepiece",
"open_clip_torch",
]
setup(
name="open_flamingo",
packages=find_packages(),
include_package_data=True,
version="0.0.2",
license="MIT",
description="An open-source framework for training large multimodal models",
long_description=long_description,
long_description_content_type="text/markdown",
data_files=[(".", ["README.md"])],
keywords=["machine learning"],
install_requires=REQUIREMENTS,
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.9",
],
)
| flamingo-main | setup.py |
from .src.flamingo import Flamingo
from .src.factory import create_model_and_transforms
| flamingo-main | open_flamingo/__init__.py |
import time
from contextlib import suppress
import torch
from tqdm import tqdm
def get_cast_dtype(precision: str):
cast_dtype = None
if precision == "bf16":
cast_dtype = torch.bfloat16
elif precision == "fp16":
cast_dtype = torch.float16
return cast_dtype
def get_autocast(precision):
if precision == "amp":
return torch.cuda.amp.autocast
elif precision == "amp_bfloat16" or precision == "amp_bf16":
# amp_bfloat16 is more stable than amp float16 for clip training
return lambda: torch.cuda.amp.autocast(dtype=torch.bfloat16)
else:
return suppress
def train_one_epoch(
args,
model,
epoch,
laion_loader,
mmc4_loader,
tokenizer,
optimizer,
lr_scheduler,
device_id,
wandb,
):
num_batches_per_epoch_laion = laion_loader.num_batches
num_batches_per_epoch_mmc4 = mmc4_loader.num_batches
assert (
num_batches_per_epoch_laion == num_batches_per_epoch_mmc4
), "Number of batches in laion and mmc4 datasets must be the same"
num_batches_per_epoch = num_batches_per_epoch_mmc4
total_training_steps = num_batches_per_epoch * args.num_epochs
autocast = get_autocast(args.precision)
cast_dtype = get_cast_dtype(args.precision)
media_token_id = tokenizer("<image>", add_special_tokens=False)["input_ids"][-1]
endofchunk_token_id = tokenizer("<|endofchunk|>", add_special_tokens=False)[
"input_ids"
][-1]
model.train()
# setup logging
step_time_m = (
AverageMeter()
) # time for one optimizer step (> 1 batch if using gradient accum)
data_time_m = (
AverageMeter()
) # avg time to load one batch of both C4 AND laion (= 1 batch regardless of gradient accum)
end = time.time()
# loop through dataloader
for num_steps, (batch_laion, batch_mmc4) in tqdm(
enumerate(zip(laion_loader, mmc4_loader)),
disable=args.rank != 0,
total=total_training_steps,
initial=(epoch * num_batches_per_epoch),
):
data_time_m.update(time.time() - end)
global_step = num_steps + epoch * num_batches_per_epoch
#### LAION FORWARD PASS ####
images = (
batch_laion[0]
.to(device_id, dtype=cast_dtype, non_blocking=True)
.unsqueeze(1)
.unsqueeze(1)
)
input_ids = batch_laion[1][0].to(device_id, dtype=cast_dtype, non_blocking=True)
attention_mask = batch_laion[1][1].to(
device_id, dtype=cast_dtype, non_blocking=True
)
labels = input_ids.clone()
labels[labels == tokenizer.pad_token_id] = -100
labels[:, 0] = -100
labels[labels == media_token_id] = -100
labels.to(device_id)
with autocast():
loss_laion = model(
vision_x=images,
lang_x=input_ids,
attention_mask=attention_mask,
labels=labels,
)[0]
divided_loss_laion = loss_laion / args.gradient_accumulation_steps
#### C4 FORWARD PASS ####
images = (
batch_mmc4[0]
.to(device_id, dtype=cast_dtype, non_blocking=True)
.unsqueeze(2)
)
input_ids = torch.stack([x[0] for x in batch_mmc4[1]]).squeeze(1)
attention_mask = torch.stack([x[1] for x in batch_mmc4[1]]).squeeze(1)
# NOTE: irena: expected shape of clip_text_input_ids / attention_mask is (N, I, max_seq_len)
labels = input_ids.clone()
labels[labels == tokenizer.pad_token_id] = -100
labels[:, 0] = -100
for i in range(labels.shape[0]):
# remove loss for any token before the first <image> token
label_idx = 0
while (
label_idx < labels.shape[1] and labels[i][label_idx] != media_token_id
):
labels[i][label_idx] = -100
label_idx += 1
# get index of all endofchunk tokens in the sequence
endofchunk_idxs = torch.where(labels[i] == endofchunk_token_id)[0]
for endofchunk_idx in endofchunk_idxs:
token_idx = endofchunk_idx + 1
while (
token_idx < labels.shape[1]
and labels[i][token_idx] != media_token_id
):
labels[i][token_idx] = -100
token_idx += 1
labels[labels == media_token_id] = -100
labels.to(device_id)
with autocast():
loss_mmc4 = model(
vision_x=images,
lang_x=input_ids,
attention_mask=attention_mask,
labels=labels,
)[0]
# if loss is nan, skip this batch
if torch.isnan(loss_mmc4):
print("loss is nan, skipping this batch")
print("input_ids: ", tokenizer.batch_decode(input_ids))
print("labels: ", labels)
print("images: ", images)
optimizer.zero_grad()
continue
divided_loss_mmc4 = loss_mmc4 / args.gradient_accumulation_steps
#### BACKWARD PASS ####
loss = (
divided_loss_laion * args.loss_multiplier_laion
+ divided_loss_mmc4 * args.loss_multiplier_mmc4
)
loss.backward()
#### MASK GRADIENTS FOR EMBEDDINGS ####
# Note (anas): Do not apply weight decay to embeddings as it will break this function.
def mask_embedding(m):
if isinstance(m, torch.nn.Embedding) and m.weight.requires_grad:
zero_mask = torch.zeros_like(m.weight.grad)
zero_mask[media_token_id] = torch.ones_like(zero_mask[media_token_id])
zero_mask[endofchunk_token_id] = torch.ones_like(
zero_mask[endofchunk_token_id]
)
m.weight.grad = m.weight.grad * zero_mask
model.apply(mask_embedding)
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
# step optimizer and log
if (((num_steps + 1) % args.gradient_accumulation_steps) == 0) or (
num_steps == num_batches_per_epoch - 1
):
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# step time and reset end outside of rank 0
step_time_m.update(time.time() - end)
end = time.time()
if args.rank == 0 and args.report_to_wandb:
# compute within rank 0
laion_samples_per_second = (
args.gradient_accumulation_steps
* args.batch_size_laion
* args.world_size
/ step_time_m.val
)
laion_samples_per_second_per_gpu = (
args.gradient_accumulation_steps
* args.batch_size_laion
/ step_time_m.val
)
c4_samples_per_second = (
args.gradient_accumulation_steps
* args.batch_size_mmc4
* args.world_size
/ step_time_m.val
)
c4_samples_per_second_per_gpu = (
args.gradient_accumulation_steps
* args.batch_size_mmc4
/ step_time_m.val
)
wandb.log(
{
"data_time": data_time_m.avg,
"step_time": step_time_m.avg,
"laion_samples_per_second": laion_samples_per_second,
"laion_samples_per_second_per_gpu": laion_samples_per_second_per_gpu,
"c4_samples_per_second": c4_samples_per_second,
"c4_samples_per_second_per_gpu": c4_samples_per_second_per_gpu,
"lr": optimizer.param_groups[0]["lr"],
},
commit=False,
)
step_time_m.reset()
data_time_m.reset()
wandb.log(
{
"loss_laion": divided_loss_laion.item(),
"global_step": global_step,
},
commit=False,
)
wandb.log(
{"loss_mmc4": divided_loss_mmc4.item(), "global_step": global_step},
commit=True,
)
# Log loss to console
if ((num_steps + 1) % args.logging_steps == 0) and args.rank == 0:
print(
f"Step {num_steps+1}/{num_batches_per_epoch} of epoch {epoch+1}/{args.num_epochs} complete. Loss LAION: {loss_laion.item():.3f} // Loss MMC4: {loss_mmc4.item():.3f}"
)
def get_checkpoint(model):
state_dict = model.state_dict()
for name, p in model.named_parameters():
if not p.requires_grad:
del state_dict[name]
return state_dict
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
| flamingo-main | open_flamingo/train/train_utils.py |
flamingo-main | open_flamingo/train/__init__.py |
|
import os
import torch
try:
import horovod.torch as hvd
except ImportError:
hvd = None
def is_global_master(args):
return args.rank == 0
def is_local_master(args):
return args.local_rank == 0
def is_master(args, local=False):
return is_local_master(args) if local else is_global_master(args)
def is_using_horovod():
# NOTE w/ horovod run, OMPI vars should be set, but w/ SLURM PMI vars will be set
# Differentiating between horovod and DDP use via SLURM may not be possible, so horovod arg still required...
ompi_vars = ["OMPI_COMM_WORLD_RANK", "OMPI_COMM_WORLD_SIZE"]
pmi_vars = ["PMI_RANK", "PMI_SIZE"]
if all([var in os.environ for var in ompi_vars]) or all(
[var in os.environ for var in pmi_vars]
):
return True
else:
return False
def is_using_distributed():
if "WORLD_SIZE" in os.environ:
return int(os.environ["WORLD_SIZE"]) > 1
if "SLURM_NTASKS" in os.environ:
return int(os.environ["SLURM_NTASKS"]) > 1
return False
def world_info_from_env():
local_rank = 0
for v in (
"LOCAL_RANK",
"MPI_LOCALRANKID",
"SLURM_LOCALID",
"OMPI_COMM_WORLD_LOCAL_RANK",
):
if v in os.environ:
local_rank = int(os.environ[v])
break
global_rank = 0
for v in ("RANK", "PMI_RANK", "SLURM_PROCID", "OMPI_COMM_WORLD_RANK"):
if v in os.environ:
global_rank = int(os.environ[v])
break
world_size = 1
for v in ("WORLD_SIZE", "PMI_SIZE", "SLURM_NTASKS", "OMPI_COMM_WORLD_SIZE"):
if v in os.environ:
world_size = int(os.environ[v])
break
return local_rank, global_rank, world_size
def init_distributed_device(args):
# Distributed training = training on more than one GPU.
# Works in both single and multi-node scenarios.
args.distributed = False
args.world_size = 1
args.rank = 0 # global rank
args.local_rank = 0
if args.horovod:
assert hvd is not None, "Horovod is not installed"
hvd.init()
args.local_rank = int(hvd.local_rank())
args.rank = hvd.rank()
args.world_size = hvd.size()
args.distributed = True
os.environ["LOCAL_RANK"] = str(args.local_rank)
os.environ["RANK"] = str(args.rank)
os.environ["WORLD_SIZE"] = str(args.world_size)
elif is_using_distributed():
if "SLURM_PROCID" in os.environ:
# DDP via SLURM
args.local_rank, args.rank, args.world_size = world_info_from_env()
# SLURM var -> torch.distributed vars in case needed
os.environ["LOCAL_RANK"] = str(args.local_rank)
os.environ["RANK"] = str(args.rank)
os.environ["WORLD_SIZE"] = str(args.world_size)
torch.distributed.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank,
)
else:
# DDP via torchrun, torch.distributed.launch
args.local_rank, _, _ = world_info_from_env()
torch.distributed.init_process_group(
backend=args.dist_backend, init_method=args.dist_url
)
args.world_size = torch.distributed.get_world_size()
args.rank = torch.distributed.get_rank()
args.distributed = True
else:
# needed to run on single gpu
torch.distributed.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url,
world_size=1,
rank=0,
)
if torch.cuda.is_available():
if args.distributed and not args.no_set_device_rank:
device = "cuda:%d" % args.local_rank
else:
device = "cuda:0"
torch.cuda.set_device(device)
else:
device = "cpu"
args.device = device
device = torch.device(device)
return device
| flamingo-main | open_flamingo/train/distributed.py |
""" Main training script """
import argparse
import copy
import glob
import os
import random
import numpy as np
import torch
import wandb
from data import get_data
from distributed import init_distributed_device, world_info_from_env
from torch.nn.parallel import DistributedDataParallel as DDP
from train_utils import get_checkpoint, train_one_epoch
from transformers import (
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_linear_schedule_with_warmup,
)
from open_flamingo import create_model_and_transforms
def random_seed(seed=42, rank=0):
torch.manual_seed(seed + rank)
np.random.seed(seed + rank)
random.seed(seed + rank)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--vision_encoder_path", default="ViT-L-14", type=str)
parser.add_argument("--vision_encoder_pretrained", default="openai", type=str)
parser.add_argument("--lm_path", default="facebook/opt-1.3b", type=str)
parser.add_argument(
"--tokenizer_path",
default="facebook/opt-30b",
type=str,
help="path to tokenizer",
)
parser.add_argument(
"--cross_attn_every_n_layers",
type=int,
default=1,
help="how often to add a cross-attention layer after each transformer layer",
)
parser.add_argument(
"--run_name",
type=str,
default="openflamingo3B",
help="used to name saving directory and wandb run",
)
parser.add_argument("--use_media_placement_augmentation", action="store_true")
parser.add_argument("--offline", action="store_true")
parser.add_argument("--num_epochs", type=int, default=1)
parser.add_argument(
"--logging_steps", type=int, default=100, help="log loss every n steps"
)
# Sum of gradient optimization batch size
parser.add_argument("--batch_size_mmc4", type=int, default=128)
parser.add_argument("--batch_size_laion", type=int, default=128)
parser.add_argument("--gradient_accumulation_steps", type=int, default=1)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
help="path to checkpoint to resume from, this should contain model, optimizer, and lr_scheduler states",
default=None,
)
parser.add_argument(
"--delete_previous_checkpoint",
action="store_true",
help="delete previous checkpoint when saving new checkpoint",
)
parser.add_argument(
"--laion_shards",
type=str,
help="path to laion shards, this should be a glob pattern such as /path/to/shards/shard-{0000..0999}.tar",
)
parser.add_argument(
"--mmc4_shards",
type=str,
help="path to c4 shards, this should be a glob pattern such as /path/to/shards/shard-{0000..0999}.tar",
)
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--learning_rate", default=1e-4, type=float)
parser.add_argument(
"--lr_scheduler",
default="constant",
type=str,
help="constant, linear, or cosine",
)
parser.add_argument("--loss_multiplier_mmc4", type=float, default=1.0)
parser.add_argument("--loss_multiplier_laion", type=float, default=1.0)
parser.add_argument("--warmup_steps", default=5000, type=int)
parser.add_argument("--weight_decay", default=0.1, type=float)
parser.add_argument(
"--precision",
choices=["amp_bf16", "amp_bfloat16", "bf16", "fp16", "fp32"],
default="fp32",
help="Floating point precision.",
)
# data args
parser.add_argument("--workers", type=int, default=1)
parser.add_argument("--train_num_samples_mmc4", type=int, default=10000)
parser.add_argument("--train_num_samples_laion", type=int, default=10000)
parser.add_argument("--dataset_resampled", action="store_true")
# distributed training args
parser.add_argument(
"--dist-url",
default="env://",
type=str,
help="url used to set up distributed training",
)
parser.add_argument(
"--dist-backend", default="nccl", type=str, help="distributed backend"
)
parser.add_argument(
"--horovod",
default=False,
action="store_true",
help="Use horovod for distributed training.",
)
parser.add_argument(
"--no-set-device-rank",
default=False,
action="store_true",
help="Don't set device index from local rank (when CUDA_VISIBLE_DEVICES restricted to one per proc).",
)
# wandb args
parser.add_argument("--report_to_wandb", default=False, action="store_true")
parser.add_argument(
"--wandb_project",
type=str,
)
parser.add_argument(
"--wandb_entity",
type=str,
)
parser.add_argument(
"--save_checkpoints_to_wandb",
default=False,
action="store_true",
help="save checkpoints to wandb",
)
parser.add_argument(
"--mmc4_textsim_threshold",
default=30,
type=float,
help="threshold for filtering images in mmc4 based on image-text similarity",
)
args = parser.parse_args()
if args.laion_shards.startswith("s3"):
args.laion_shards = f"pipe:aws s3 cp {args.laion_shards} -"
if args.mmc4_shards.startswith("s3"):
args.mmc4_shards = f"pipe:aws s3 cp {args.mmc4_shards} -"
if args.save_checkpoints_to_wandb and not args.report_to_wandb:
raise ValueError("save_checkpoints_to_wandb requires report_to_wandb")
assert (args.train_num_samples_laion // args.batch_size_laion) == (
args.train_num_samples_mmc4 // args.batch_size_mmc4
), "number of samples per epoch must be equal for mmc4 and laion"
if args.offline:
os.environ["WANDB_MODE"] = "offline"
os.environ["TRANSFORMERS_OFFLINE"] = "1"
args.local_rank, args.rank, args.world_size = world_info_from_env()
device_id = init_distributed_device(args)
random_seed(args.seed)
model, image_processor, tokenizer = create_model_and_transforms(
args.vision_encoder_path,
args.vision_encoder_pretrained,
args.lm_path,
args.tokenizer_path if args.tokenizer_path else args.lm_path,
cross_attn_every_n_layers=args.cross_attn_every_n_layers,
use_local_files=args.offline,
use_media_placement_augmentation=args.use_media_placement_augmentation,
)
random_seed(args.seed, args.rank)
print(f"Start running training on rank {args.rank}.")
if args.rank == 0 and args.report_to_wandb:
wandb.init(
project=args.wandb_project,
entity=args.wandb_entity,
name=args.run_name,
config=vars(args),
)
device_id = args.rank % torch.cuda.device_count()
model = model.to(device_id)
ddp_model = DDP(model, device_ids=[device_id])
laion_dataset = get_data(args, image_processor, tokenizer, "image_text")
mmc4_dataset = get_data(args, image_processor, tokenizer, "mmc4")
def get_grouped_params(model):
params_with_wd, params_without_wd = [], []
def apply_decay(x):
return (
"gated_cross_attn_layer" in x
and "ff_gate" not in x
and "attn_gate" not in x
and "norm" not in x
and "bias" not in x
)
for n, p in model.named_parameters():
# if p.requires_grad:
if apply_decay(n):
params_with_wd.append(p)
else:
params_without_wd.append(p)
return [
{"params": params_with_wd, "weight_decay": args.weight_decay},
{"params": params_without_wd, "weight_decay": 0.0},
]
optimizer = torch.optim.AdamW(get_grouped_params(ddp_model), lr=args.learning_rate)
total_training_steps = (
(args.train_num_samples_mmc4) // (args.batch_size_mmc4 * args.world_size)
) * args.num_epochs
if args.rank == 0:
print(f"Total training steps: {total_training_steps}")
if args.lr_scheduler == "linear":
lr_scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=total_training_steps,
)
elif args.lr_scheduler == "cosine":
lr_scheduler = get_cosine_schedule_with_warmup(
optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=total_training_steps,
)
else:
lr_scheduler = get_constant_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps
)
# check if a checkpoint exists for this run
if os.path.exists(f"{args.run_name}") and args.resume_from_checkpoint is None:
checkpoint_list = glob.glob(f"{args.run_name}/checkpoint_*.pt")
if len(checkpoint_list) == 0:
print(f"Found no checkpoints for run {args.run_name}.")
else:
args.resume_from_checkpoint = sorted(
checkpoint_list, key=lambda x: int(x.split("_")[-1].split(".")[0])
)[-1]
print(
f"Found checkpoint {args.resume_from_checkpoint} for run {args.run_name}."
)
resume_from_epoch = 0
if args.resume_from_checkpoint is not None:
if args.rank == 0:
print(f"Loading checkpoint from {args.resume_from_checkpoint}")
checkpoint = torch.load(args.resume_from_checkpoint, map_location="cpu")
ddp_model.load_state_dict(checkpoint["model_state_dict"], False)
optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
lr_scheduler.load_state_dict(checkpoint["lr_scheduler_state_dict"])
resume_from_epoch = checkpoint["epoch"] + 1
ddp_model.train()
for epoch in range(resume_from_epoch, args.num_epochs):
laion_dataset.set_epoch(epoch)
laion_loader = laion_dataset.dataloader
mmc4_dataset.set_epoch(epoch)
mmc4_loader = mmc4_dataset.dataloader
train_one_epoch(
args=args,
model=ddp_model,
epoch=epoch,
tokenizer=tokenizer,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
laion_loader=laion_loader,
mmc4_loader=mmc4_loader,
device_id=device_id,
wandb=wandb,
)
if args.rank == 0:
if not os.path.exists(args.run_name):
os.makedirs(args.run_name)
checkpoint_dict = {
"epoch": epoch,
"model_state_dict": get_checkpoint(ddp_model),
"optimizer_state_dict": optimizer.state_dict(),
"lr_scheduler_state_dict": lr_scheduler.state_dict(),
}
print(f"Saving checkpoint to {args.run_name}/checkpoint_{epoch}.pt")
torch.save(checkpoint_dict, f"{args.run_name}/checkpoint_{epoch}.pt")
if args.report_to_wandb and args.save_checkpoints_to_wandb:
wandb.save(f"{args.run_name}/checkpoint_{epoch}.pt")
if args.delete_previous_checkpoint:
if epoch > 0:
os.remove(f"{args.run_name}/checkpoint_{epoch-1}.pt")
if args.rank == 0:
if not os.path.exists(args.run_name):
os.makedirs(args.run_name)
torch.save(get_checkpoint(ddp_model), f"{args.run_name}/final_weights.pt")
if args.report_to_wandb and args.save_checkpoints_to_wandb:
wandb.save(f"{args.run_name}/final_weights.pt")
if __name__ == "__main__":
main()
| flamingo-main | open_flamingo/train/train.py |
import ast
import functools
import io
import json
import logging
import math
import os
import random
import sys
import tarfile
from dataclasses import dataclass
from multiprocessing import Value
import braceexpand
import torch
import torchvision
import webdataset as wds
from PIL import Image
from torch.utils.data import DataLoader, IterableDataset, get_worker_info
from torch.utils.data.distributed import DistributedSampler
from webdataset.filters import _shuffle
from webdataset.tariterators import (
base_plus_ext,
tar_file_expander,
url_opener,
valid_sample,
)
Image.MAX_IMAGE_PIXELS = 1000000000
MAX_NUM_TOKENS = 256
MAX_NUM_IMAGES = 5
TINY_IMAGE_SIZE_THRESHOLD = 1
N_CHANNELS = 3
INTERLEAVED_IMAGE_SIZE = 224
try:
import horovod.torch as hvd
except ImportError:
hvd = None
class SharedEpoch:
def __init__(self, epoch: int = 0):
self.shared_epoch = Value("i", epoch)
def set_value(self, epoch):
self.shared_epoch.value = epoch
def get_value(self):
return self.shared_epoch.value
@dataclass
class DataInfo:
dataloader: DataLoader
sampler: DistributedSampler = None
shared_epoch: SharedEpoch = None
def set_epoch(self, epoch):
if self.shared_epoch is not None:
self.shared_epoch.set_value(epoch)
if self.sampler is not None and isinstance(self.sampler, DistributedSampler):
self.sampler.set_epoch(epoch)
def get_dataset_size(shards):
shards_list = list(braceexpand.braceexpand(shards))
shards_list = shards
dir_path = os.path.dirname(shards[0])
sizes_filename = os.path.join(dir_path, "sizes.json")
len_filename = os.path.join(dir_path, "__len__")
if os.path.exists(sizes_filename):
sizes = json.load(open(sizes_filename, "r"))
total_size = sum(
[
int(sizes[os.path.basename(shard)])
if os.path.basename(shard) in sizes
else 0
for shard in shards_list
]
)
elif os.path.exists(len_filename):
# FIXME this used to be eval(open(...)) but that seemed rather unsafe
total_size = ast.literal_eval(open(len_filename, "r").read())
else:
total_size = None # num samples undefined
# some common dataset sizes (at time of authors last download)
# CC3M (train): 2905954
# CC12M: 10968539
# LAION-400M: 407332084
# LAION-2B (english): 2170337258
num_shards = len(shards_list)
return total_size, num_shards
def count_samples(dataloader):
os.environ["WDS_EPOCH"] = "0"
n_elements, n_batches = 0, 0
for images, texts in dataloader:
n_batches += 1
n_elements += len(images)
assert len(images) == len(texts)
return n_elements, n_batches
def filter_no_caption_or_no_image(sample):
return ("txt" in sample) and (
"png" in sample or "jpg" in sample or "jpeg" in sample
)
def log_and_continue(exn):
"""Call in an exception handler to ignore any exception, issue a warning, and continue."""
if "No images in sample" in str(exn) or "Only one image in sample" in str(
exn
): # Avoid spamming logs with these
return True
logging.warning(f"Handling webdataset error ({repr(exn)}). Ignoring.")
return True
def group_by_keys_nothrow(
data, keys=base_plus_ext, lcase=True, suffixes=None, handler=None
):
"""Return function over iterator that groups key, value pairs into samples.
:param keys: function that splits the key into key and extension (base_plus_ext)
:param lcase: convert suffixes to lower case (Default value = True)
"""
current_sample = None
for filesample in data:
assert isinstance(filesample, dict)
fname, value = filesample["fname"], filesample["data"]
prefix, suffix = keys(fname)
if prefix is None:
continue
if lcase:
suffix = suffix.lower()
# FIXME webdataset version throws if suffix in current_sample, but we have a potential for
# this happening in the current LAION400m dataset if a tar ends with same prefix as the next
# begins, rare, but can happen since prefix aren't unique across tar files in that dataset
if (
current_sample is None
or prefix != current_sample["__key__"]
or suffix in current_sample
):
if valid_sample(current_sample):
yield current_sample
current_sample = dict(__key__=prefix, __url__=filesample["__url__"])
if suffixes is None or suffix in suffixes:
current_sample[suffix] = value
if valid_sample(current_sample):
yield current_sample
def tarfile_to_samples_nothrow(src, handler=log_and_continue):
# NOTE this is a re-impl of the webdataset impl with group_by_keys that doesn't throw
streams = url_opener(src, handler=handler)
files = tar_file_expander(streams, handler=handler)
samples = group_by_keys_nothrow(files, handler=handler)
return samples
def pytorch_worker_seed(increment=0):
"""get dataloader worker seed from pytorch"""
worker_info = get_worker_info()
if worker_info is not None:
# favour using the seed already created for pytorch dataloader workers if it exists
seed = worker_info.seed
if increment:
# space out seed increments so they can't overlap across workers in different iterations
seed += increment * max(1, worker_info.num_workers)
return seed
# fallback to wds rank based seed
return wds.utils.pytorch_worker_seed()
_SHARD_SHUFFLE_SIZE = 2000
_SHARD_SHUFFLE_INITIAL = 500
_SAMPLE_SHUFFLE_SIZE = 5000
_SAMPLE_SHUFFLE_INITIAL = 1000
class detshuffle2(wds.PipelineStage):
def __init__(
self,
bufsize=1000,
initial=100,
seed=0,
epoch=-1,
):
self.bufsize = bufsize
self.initial = initial
self.seed = seed
self.epoch = epoch
def run(self, src):
if isinstance(self.epoch, SharedEpoch):
epoch = self.epoch.get_value()
else:
# NOTE: this is epoch tracking is problematic in a multiprocess (dataloader workers or train)
# situation as different workers may wrap at different times (or not at all).
self.epoch += 1
epoch = self.epoch
rng = random.Random()
if self.seed < 0:
# If seed is negative, we use the worker's seed, this will be different across all nodes/workers
seed = pytorch_worker_seed(epoch)
else:
# This seed to be deterministic AND the same across all nodes/workers in each epoch
seed = self.seed + epoch
rng.seed(seed)
return _shuffle(src, self.bufsize, self.initial, rng)
class ResampledShards2(IterableDataset):
"""An iterable dataset yielding a list of urls."""
def __init__(
self,
urls,
nshards=sys.maxsize,
worker_seed=None,
deterministic=False,
epoch=-1,
):
"""Sample shards from the shard list with replacement.
:param urls: a list of URLs as a Python list or brace notation string
"""
super().__init__()
urls = wds.shardlists.expand_urls(urls)
self.urls = urls
assert isinstance(self.urls[0], str)
self.nshards = nshards
self.rng = random.Random()
self.worker_seed = worker_seed
self.deterministic = deterministic
self.epoch = epoch
def __iter__(self):
"""Return an iterator over the shards."""
if isinstance(self.epoch, SharedEpoch):
epoch = self.epoch.get_value()
else:
# NOTE: this is epoch tracking is problematic in a multiprocess (dataloader workers or train)
# situation as different workers may wrap at different times (or not at all).
self.epoch += 1
epoch = self.epoch
if self.deterministic:
# reset seed w/ epoch if deterministic
if self.worker_seed is None:
# pytorch worker seed should be deterministic due to being init by arg.seed + rank + worker id
seed = pytorch_worker_seed(epoch)
else:
seed = self.worker_seed() + epoch
self.rng.seed(seed)
for _ in range(self.nshards):
yield dict(url=self.rng.choice(self.urls))
def preprocess_image(sample, image_processor):
image = [image_processor(s).unsqueeze(0) for s in sample]
image = torch.cat(image, dim=0)
# apply random horizontal flip and color jitter
image = torchvision.transforms.RandomHorizontalFlip(p=0.5)(image)
image = torchvision.transforms.ColorJitter(brightness=0.5, hue=0.3)(image)
return image
def preprocess_text(sample, tokenizer):
tokenizer.padding_side = "right"
sample = [
(f"<image>{s.strip()}<|endofchunk|>{tokenizer.eos_token}") for s in sample
]
text = tokenizer(
sample,
max_length=32,
padding="longest",
truncation="only_first",
return_tensors="pt",
)
return text["input_ids"], text["attention_mask"]
MIN_KB = 10
MAX_NUM_IMAGES = 5
def preprocess_interleaved(sample, tokenizer, clip_processor, sim_threshold):
info = json.loads(sample[0])
tar_file_obj = io.BytesIO(sample[1])
image_tar = tarfile.open(fileobj=tar_file_obj)
sentences = info["text_list"]
images, image_idxs = [], []
for image_path, sim in zip(info["image_info"], info["similarity_matrix"]):
# pick one image per sentence
if info["image_info"][image_path]["matched_text_index"] in image_idxs:
continue
rawbytes = image_tar.extractfile(
os.path.join(image_tar.getnames()[0], image_path)
).read()
# filter to images >= 10KB
if len(rawbytes) // 1000 <= MIN_KB:
continue
if sim[info["image_info"][image_path]["matched_text_index"]] < sim_threshold:
continue
image = Image.open(io.BytesIO(rawbytes)).convert("RGB")
images.append(image)
image_idxs.append(info["image_info"][image_path]["matched_text_index"])
if len(images) == 0:
raise ValueError("No images in sample")
# filter out images that are exact duplicates
images_tensors = preprocess_image(images, clip_processor)
keep_ixs = range(min(len(images_tensors), MAX_NUM_IMAGES))
images_tensors = images_tensors[keep_ixs]
image_idxs = [image_idxs[ix] for ix in keep_ixs]
# pad to 5 images
if len(images_tensors) < MAX_NUM_IMAGES:
zero_padding = torch.zeros(
(MAX_NUM_IMAGES - len(images_tensors), 3, 224, 224), dtype=torch.float
)
images_tensors = torch.cat((images_tensors, zero_padding), dim=0)
# add in <image> and <eoc> tokens
# eoc after sentence = "sentence loss"
for ix in image_idxs:
sentences[ix] = f"<|endofchunk|><image>{sentences[ix]}"
text = " ".join(sentences)
text = text.replace("<|endofchunk|>", "", 1) # but remove first eoc
# whitespace cleanup
text = (
text.replace(" <|endofchunk|>", "<|endofchunk|>")
.replace("<image> ", "<image>")
.replace(" <image>", "<image>")
)
text = f"{text}<|endofchunk|>{tokenizer.eos_token}"
tokenizer.padding_side = "right"
text_tensor = tokenizer(
text, max_length=256, truncation=True, padding="max_length", return_tensors="pt"
)
# reject sequences with too few images (after truncation)
num_images = torch.count_nonzero(
text_tensor["input_ids"]
== tokenizer.additional_special_tokens_ids[
tokenizer.additional_special_tokens.index("<image>")
]
)
if num_images == 0:
raise ValueError("No images in sample")
elif (
num_images == 1 and random.random() <= 0.5
): # 50% chance of keeping single image samples
raise ValueError("Only one image in sample")
return (
images_tensors,
(text_tensor["input_ids"], text_tensor["attention_mask"]),
)
def get_mmc4_dataset(args, image_processor, tokenizer, epoch=0, floor=False):
input_shards = args.mmc4_shards
assert input_shards is not None
resampled = getattr(args, "dataset_resampled", False)
num_samples, num_shards = get_dataset_size(input_shards)
num_samples = None
if not num_samples:
num_samples = args.train_num_samples_mmc4
if not num_samples:
raise RuntimeError(
"Currently, number of dataset samples must be specified for training dataset. "
"Please specify via `--train-num-samples` if no dataset length info present."
)
# create a shared epoch store to sync epoch to dataloader worker proc
shared_epoch = SharedEpoch(epoch=epoch)
if resampled:
pipeline = [
ResampledShards2(input_shards, deterministic=True, epoch=shared_epoch)
]
else:
pipeline = [wds.SimpleShardList(input_shards)]
preprocess_fn = functools.partial(
preprocess_interleaved,
clip_processor=image_processor,
tokenizer=tokenizer,
sim_threshold=args.mmc4_textsim_threshold,
)
# at this point we have an iterator over all the shards
if not resampled:
pipeline.extend(
[
detshuffle2(
bufsize=_SHARD_SHUFFLE_SIZE,
initial=_SHARD_SHUFFLE_INITIAL,
seed=args.seed,
epoch=shared_epoch,
),
wds.split_by_node,
wds.split_by_worker,
]
)
pipeline.extend(
[
# at this point, we have an iterator over the shards assigned to each worker at each node
# wds.tarfile_to_samples(handler=log_and_continue),
tarfile_to_samples_nothrow,
wds.shuffle(
bufsize=_SAMPLE_SHUFFLE_SIZE,
initial=_SAMPLE_SHUFFLE_INITIAL,
),
]
)
pipeline.extend(
[
wds.to_tuple("json", "tar", handler=log_and_continue),
wds.map(preprocess_fn, handler=log_and_continue),
wds.batched(args.batch_size_mmc4, partial=False),
]
)
dataset = wds.DataPipeline(*pipeline)
if not resampled:
assert (
num_shards >= args.workers * args.world_size
), "number of shards must be >= total workers"
# roll over and repeat a few samples to get same number of full batches on each node
round_fn = math.floor if floor else math.ceil
global_batch_size = args.batch_size_mmc4 * args.world_size
num_batches = round_fn(num_samples / global_batch_size)
num_workers = max(1, args.workers)
num_worker_batches = round_fn(num_batches / num_workers) # per dataloader worker
num_batches = num_worker_batches * num_workers
num_samples = num_batches * global_batch_size
# each worker is iterating over this
dataset = dataset.with_epoch(num_worker_batches)
dataloader = wds.WebLoader(
dataset,
batch_size=None,
shuffle=False,
num_workers=args.workers,
persistent_workers=True,
)
# add meta-data to dataloader instance for convenience
dataloader.num_batches = num_batches
dataloader.num_samples = num_samples
return DataInfo(dataloader=dataloader, shared_epoch=shared_epoch)
def get_laion_dataset(args, image_processor, tokenizer, epoch=0, floor=False):
input_shards = args.laion_shards
assert input_shards is not None
resampled = getattr(args, "dataset_resampled", False)
num_samples, num_shards = get_dataset_size(input_shards)
num_samples = None
if not num_samples:
num_samples = args.train_num_samples_laion
if not num_samples:
raise RuntimeError(
"Currently, number of dataset samples must be specified for training dataset. "
"Please specify via `--train-num-samples` if no dataset length info present."
)
# create a shared epoch store to sync epoch to dataloader worker proc
shared_epoch = SharedEpoch(epoch=epoch)
if resampled:
pipeline = [
ResampledShards2(input_shards, deterministic=True, epoch=shared_epoch)
]
else:
pipeline = [wds.SimpleShardList(input_shards)]
# create two preprocess functions that take in the passed in image_processor and tokenizer
preprocess_image_fn = functools.partial(
preprocess_image, image_processor=image_processor
)
preprocess_text_fn = functools.partial(preprocess_text, tokenizer=tokenizer)
# at this point we have an iterator over all the shards
if not resampled:
pipeline.extend(
[
detshuffle2(
bufsize=_SHARD_SHUFFLE_SIZE,
initial=_SHARD_SHUFFLE_INITIAL,
seed=args.seed,
epoch=shared_epoch,
),
wds.split_by_node,
wds.split_by_worker,
]
)
pipeline.extend(
[
# at this point, we have an iterator over the shards assigned to each worker at each node
# wds.tarfile_to_samples(handler=log_and_continue),
tarfile_to_samples_nothrow,
wds.shuffle(
bufsize=_SAMPLE_SHUFFLE_SIZE,
initial=_SAMPLE_SHUFFLE_INITIAL,
),
]
)
pipeline.extend(
[
wds.select(filter_no_caption_or_no_image),
wds.decode("pilrgb", handler=log_and_continue),
wds.to_tuple("jpg;png;jpeg", "txt", handler=log_and_continue),
wds.batched(args.batch_size_laion, partial=False),
wds.map_tuple(
preprocess_image_fn, preprocess_text_fn, handler=log_and_continue
),
]
)
dataset = wds.DataPipeline(*pipeline)
if not resampled:
assert (
num_shards >= args.workers * args.world_size
), "number of shards must be >= total workers"
# roll over and repeat a few samples to get same number of full batches on each node
round_fn = math.floor if floor else math.ceil
global_batch_size = args.batch_size_laion * args.world_size
num_batches = round_fn(num_samples / global_batch_size)
num_workers = max(1, args.workers)
num_worker_batches = round_fn(num_batches / num_workers) # per dataloader worker
num_batches = num_worker_batches * num_workers
num_samples = num_batches * global_batch_size
# each worker is iterating over this
dataset = dataset.with_epoch(num_worker_batches)
dataloader = wds.WebLoader(
dataset,
batch_size=None,
shuffle=False,
num_workers=args.workers,
persistent_workers=True,
)
# add meta-data to dataloader instance for convenience
dataloader.num_batches = num_batches
dataloader.num_samples = num_samples
return DataInfo(dataloader=dataloader, shared_epoch=shared_epoch)
def get_dataset_fn(dataset_type):
if dataset_type == "image_text":
return get_laion_dataset
elif dataset_type == "mmc4":
return get_mmc4_dataset
else:
raise ValueError(f"Unsupported dataset type: {dataset_type}")
def get_data(args, image_processor, tokenizer, dataset_type, epoch=0):
return get_dataset_fn(dataset_type)(
args, image_processor=image_processor, epoch=epoch, tokenizer=tokenizer
)
| flamingo-main | open_flamingo/train/data.py |
from typing import Dict, Sequence, Tuple
import re
import numpy as np
import torch
def postprocess_classification_generation(predictions) -> str:
return re.split("Prompt|Completion", predictions, 1)[0]
def compute_classification_accuracy(predictions: Sequence[Dict[str, str]]) -> float:
"""Compute the accuracy of a sequence of predictions."""
def _preprocess_fn(s):
"""Function to preprocess both targets and predictions."""
return s.lower()
is_correct = [
_preprocess_fn(x["prediction"]) == _preprocess_fn(x["class_label"])
for x in predictions
]
return np.mean(is_correct).item()
def compute_shifted_logits_and_labels(
logits: torch.Tensor, encodings, tokenizer, eoc_token_id
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Helper function to compute shifted logits and labels.
This allows for straightforward computation of the loss on shift_logits
and shift_labels such that the nth element of logits computes the n-1th
element of the original labels (in the outputs, the nth element of logits
corresponds to the nth element of the labels).
Elements in shift_labels that correspond to inputs are masked with values
of -100 (by default in hf, loss is only computed on token IDs >= 0).
Returns: tuple containing two elements:
shift_logits: a float Tensor of shape [batch_size, seq_len - 1].
shift_labels: an integer Tensor of shape [batch_size, seq_len - 1]
"""
labels = encodings["input_ids"].clone()
# convert padding and EOC tokens to -100 so they are ignored in loss
labels[labels == tokenizer.pad_token_id] = -100
labels[labels == eoc_token_id] = -100
# Convert all tokens in prefix until separator to -100 so they are
# ignored in loss
for idx in range(len(labels)):
# Find the location of the last token of prefix *from right*,
# since the first non-padding token of the sequence will also be
# eos_token (because bos_token and eos_token are the same for
# the tokenizer).
end_of_prefix = -labels[idx].tolist()[::-1].index(tokenizer.eos_token_id) - 1
labels[idx, : end_of_prefix + 1] = -100
# Shift so that tokens < n predict n. The shifted tensors both have
# shape [batch_size, seq_len - 1].
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
return shift_logits, shift_labels
def compute_per_sample_probs(
encodings, tokenizer, logits: torch.Tensor, eoc_token_id
) -> torch.Tensor:
"""Helper function to compute per-sample probability of the input sequence.
Assumes <eos token> is used to separate inputs from targets in the
prompt text
"""
shift_logits, shift_labels = compute_shifted_logits_and_labels(
logits, encodings, tokenizer, eoc_token_id
)
# Tuple of tensors for unmasked label tokens. The first element of the
# tuple contains the batch indices; the second element contains the
# sequence indices.
unmasked_indices = torch.nonzero(shift_labels != -100, as_tuple=True)
# Tensor where the i^th element is the token_id corresponding to the i^th
# element of unmasked_indices
unmasked_token_ids = shift_labels[unmasked_indices]
# 3d tensor of [batch_idx, sequence_position, token_id] for unmasked tokens.
target_idxs = torch.column_stack([*unmasked_indices, unmasked_token_ids])
target_idxs = target_idxs.to(shift_logits.device)
# Sanity check that every element in batch has at least one unmasked
# target token
assert torch.all(
torch.bincount(target_idxs[:, 0]) != 0
), "At least one element in batch has no unmasked target tokens."
# Renormalize over tokens to make sure they are proper probabilities via
# softmax over the token dimension.
shift_probs = torch.nn.functional.softmax(shift_logits, 2)
# Compute the probability of the target sequence (as the product of the
# probability of the individual tokens in the sequence).
target_probs = torch.ones(len(shift_labels), device=shift_logits.device)
for i, j, k in target_idxs:
target_probs[i] *= shift_probs[i, j, k]
return target_probs
def compute_per_sample_loss(encodings, tokenizer, logits, eoc_token_id) -> torch.Tensor:
"""Helper function to compute per-sample classification loss.
Assumes <eos token> is used to separate inputs from targets in the
prompt text
"""
shift_logits, shift_labels = compute_shifted_logits_and_labels(
logits, encodings, tokenizer, eoc_token_id
)
device = shift_logits.device
# Loss is computed token-wise, on Tensors of shape
# [batch_size * (seq_len - 1), vocab_size]
# and returns a loss tensor of shape
# [batch_size * (seq_len - 1)]. Most of the tokens will be masked
# in this computation.
loss = torch.nn.functional.cross_entropy(
shift_logits.view(-1, shift_logits.size(-1)),
shift_labels.view(-1).to(device),
reduction="none",
)
# Reshape to [batch_size, seq_len - 1]
loss = loss.view(shift_logits.size(0), shift_logits.size(1)).cpu()
# loss_mask is 1 for tokens we want included in the loss, and 0 for tokens
# that should be ignored in the loss.
loss_mask = (shift_labels != -100).int().cpu()
loss *= loss_mask
# Compute per-element loss : sum loss over all (unmasked) tokens and
# divide by number of variable tokens to obtain tensor of
# shape [batch_size,]
loss = loss.sum(dim=1) / (shift_labels != -100).sum(dim=1).float()
return loss
| flamingo-main | open_flamingo/eval/classification.py |
# Those are manual mapping that are not caught by our stemming rules or would
# would be done incorrectly by our automatic stemming rule. In details,
# the keys of the _MANUAL_MATCHES dict contains the original word and the value
# contains the transformation of the word expected by the OKVQA stemming rule.
# These manual rules were found by checking the `raw_answers` and the `answers`
# fields of the released OKVQA dataset and checking all things that were not
# properly mapped by our automatic rules. In particular some of the mapping
# are sometimes constant, e.g. christmas -> christmas which was incorrectly
# singularized by our inflection.singularize.
import re
import nltk
from nltk.corpus.reader import VERB
import inflection
_MANUAL_MATCHES = {
"police": "police",
"las": "las",
"vegas": "vegas",
"yes": "yes",
"jeans": "jean",
"hell's": "hell",
"domino's": "domino",
"morning": "morn",
"clothes": "cloth",
"are": "are",
"riding": "ride",
"leaves": "leaf",
"dangerous": "danger",
"clothing": "cloth",
"texting": "text",
"kiting": "kite",
"firefighters": "firefight",
"ties": "tie",
"married": "married",
"teething": "teeth",
"gloves": "glove",
"tennis": "tennis",
"dining": "dine",
"directions": "direct",
"waves": "wave",
"christmas": "christmas",
"drives": "drive",
"pudding": "pud",
"coding": "code",
"plating": "plate",
"quantas": "quanta",
"hornes": "horn",
"graves": "grave",
"mating": "mate",
"paned": "pane",
"alertness": "alert",
"sunbathing": "sunbath",
"tenning": "ten",
"wetness": "wet",
"urinating": "urine",
"sickness": "sick",
"braves": "brave",
"firefighting": "firefight",
"lenses": "lens",
"reflections": "reflect",
"backpackers": "backpack",
"eatting": "eat",
"designers": "design",
"curiousity": "curious",
"playfulness": "play",
"blindness": "blind",
"hawke": "hawk",
"tomatoe": "tomato",
"rodeoing": "rodeo",
"brightness": "bright",
"circuses": "circus",
"skateboarders": "skateboard",
"staring": "stare",
"electronics": "electron",
"electicity": "elect",
"mountainous": "mountain",
"socializing": "social",
"hamburgers": "hamburg",
"caves": "cave",
"transitions": "transit",
"wading": "wade",
"creame": "cream",
"toileting": "toilet",
"sautee": "saute",
"buildings": "build",
"belongings": "belong",
"stockings": "stock",
"walle": "wall",
"cumulis": "cumuli",
"travelers": "travel",
"conducter": "conduct",
"browsing": "brows",
"pooping": "poop",
"haircutting": "haircut",
"toppings": "top",
"hearding": "heard",
"sunblocker": "sunblock",
"bases": "base",
"markings": "mark",
"mopeds": "mope",
"kindergartener": "kindergarten",
"pies": "pie",
"scrapbooking": "scrapbook",
"couponing": "coupon",
"meetings": "meet",
"elevators": "elev",
"lowes": "low",
"men's": "men",
"childrens": "children",
"shelves": "shelve",
"paintings": "paint",
"raines": "rain",
"paring": "pare",
"expressions": "express",
"routes": "rout",
"pease": "peas",
"vastness": "vast",
"awning": "awn",
"boy's": "boy",
"drunkenness": "drunken",
"teasing": "teas",
"conferences": "confer",
"ripeness": "ripe",
"suspenders": "suspend",
"earnings": "earn",
"reporters": "report",
"kid's": "kid",
"containers": "contain",
"corgie": "corgi",
"porche": "porch",
"microwaves": "microwave",
"batter's": "batter",
"sadness": "sad",
"apartments": "apart",
"oxygenize": "oxygen",
"striping": "stripe",
"purring": "pure",
"professionals": "profession",
"piping": "pipe",
"farmer's": "farmer",
"potatoe": "potato",
"emirates": "emir",
"womens": "women",
"veteran's": "veteran",
"wilderness": "wilder",
"propellers": "propel",
"alpes": "alp",
"charioteering": "chariot",
"swining": "swine",
"illness": "ill",
"crepte": "crept",
"adhesives": "adhesive",
"regent's": "regent",
"decorations": "decor",
"rabbies": "rabbi",
"overseas": "oversea",
"travellers": "travel",
"casings": "case",
"smugness": "smug",
"doves": "dove",
"nationals": "nation",
"mustange": "mustang",
"ringe": "ring",
"gondoliere": "gondolier",
"vacationing": "vacate",
"reminders": "remind",
"baldness": "bald",
"settings": "set",
"glaced": "glace",
"coniferous": "conifer",
"revelations": "revel",
"personals": "person",
"daughter's": "daughter",
"badness": "bad",
"projections": "project",
"polarizing": "polar",
"vandalizers": "vandal",
"minerals": "miner",
"protesters": "protest",
"controllers": "control",
"weddings": "wed",
"sometimes": "sometime",
"earing": "ear",
}
class OKVQAStemmer:
"""Stemmer to match OKVQA v1.1 procedure."""
def __init__(self):
self._wordnet_lemmatizer = nltk.stem.WordNetLemmatizer()
def stem(self, input_string):
"""Apply stemming."""
word_and_pos = nltk.pos_tag(nltk.tokenize.word_tokenize(input_string))
stemmed_words = []
for w, p in word_and_pos:
if w in _MANUAL_MATCHES:
w = _MANUAL_MATCHES[w]
elif w.endswith("ing"):
w = self._wordnet_lemmatizer.lemmatize(w, VERB)
elif p.startswith("NNS") or p.startswith("NNPS"):
w = inflection.singularize(w)
stemmed_words.append(w)
return " ".join(stemmed_words)
stemmer = OKVQAStemmer()
def postprocess_ok_vqa_generation(predictions) -> str:
prediction = re.split("Question|Answer", predictions, 1)[0]
prediction_stem = stemmer.stem(prediction)
return prediction_stem
| flamingo-main | open_flamingo/eval/ok_vqa_utils.py |
# classnames via https://github.com/mlfoundations/wise-ft/blob/master/src/datasets/imagenet_classnames.py#L1
openai_imagenet_classnames = [
"tench",
"goldfish",
"great white shark",
"tiger shark",
"hammerhead shark",
"electric ray",
"stingray",
"rooster",
"hen",
"ostrich",
"brambling",
"goldfinch",
"house finch",
"junco",
"indigo bunting",
"American robin",
"bulbul",
"jay",
"magpie",
"chickadee",
"American dipper",
"kite (bird of prey)",
"bald eagle",
"vulture",
"great grey owl",
"fire salamander",
"smooth newt",
"newt",
"spotted salamander",
"axolotl",
"American bullfrog",
"tree frog",
"tailed frog",
"loggerhead sea turtle",
"leatherback sea turtle",
"mud turtle",
"terrapin",
"box turtle",
"banded gecko",
"green iguana",
"Carolina anole",
"desert grassland whiptail lizard",
"agama",
"frilled-necked lizard",
"alligator lizard",
"Gila monster",
"European green lizard",
"chameleon",
"Komodo dragon",
"Nile crocodile",
"American alligator",
"triceratops",
"worm snake",
"ring-necked snake",
"eastern hog-nosed snake",
"smooth green snake",
"kingsnake",
"garter snake",
"water snake",
"vine snake",
"night snake",
"boa constrictor",
"African rock python",
"Indian cobra",
"green mamba",
"sea snake",
"Saharan horned viper",
"eastern diamondback rattlesnake",
"sidewinder rattlesnake",
"trilobite",
"harvestman",
"scorpion",
"yellow garden spider",
"barn spider",
"European garden spider",
"southern black widow",
"tarantula",
"wolf spider",
"tick",
"centipede",
"black grouse",
"ptarmigan",
"ruffed grouse",
"prairie grouse",
"peafowl",
"quail",
"partridge",
"african grey parrot",
"macaw",
"sulphur-crested cockatoo",
"lorikeet",
"coucal",
"bee eater",
"hornbill",
"hummingbird",
"jacamar",
"toucan",
"duck",
"red-breasted merganser",
"goose",
"black swan",
"tusker",
"echidna",
"platypus",
"wallaby",
"koala",
"wombat",
"jellyfish",
"sea anemone",
"brain coral",
"flatworm",
"nematode",
"conch",
"snail",
"slug",
"sea slug",
"chiton",
"chambered nautilus",
"Dungeness crab",
"rock crab",
"fiddler crab",
"red king crab",
"American lobster",
"spiny lobster",
"crayfish",
"hermit crab",
"isopod",
"white stork",
"black stork",
"spoonbill",
"flamingo",
"little blue heron",
"great egret",
"bittern bird",
"crane bird",
"limpkin",
"common gallinule",
"American coot",
"bustard",
"ruddy turnstone",
"dunlin",
"common redshank",
"dowitcher",
"oystercatcher",
"pelican",
"king penguin",
"albatross",
"grey whale",
"killer whale",
"dugong",
"sea lion",
"Chihuahua",
"Japanese Chin",
"Maltese",
"Pekingese",
"Shih Tzu",
"King Charles Spaniel",
"Papillon",
"toy terrier",
"Rhodesian Ridgeback",
"Afghan Hound",
"Basset Hound",
"Beagle",
"Bloodhound",
"Bluetick Coonhound",
"Black and Tan Coonhound",
"Treeing Walker Coonhound",
"English foxhound",
"Redbone Coonhound",
"borzoi",
"Irish Wolfhound",
"Italian Greyhound",
"Whippet",
"Ibizan Hound",
"Norwegian Elkhound",
"Otterhound",
"Saluki",
"Scottish Deerhound",
"Weimaraner",
"Staffordshire Bull Terrier",
"American Staffordshire Terrier",
"Bedlington Terrier",
"Border Terrier",
"Kerry Blue Terrier",
"Irish Terrier",
"Norfolk Terrier",
"Norwich Terrier",
"Yorkshire Terrier",
"Wire Fox Terrier",
"Lakeland Terrier",
"Sealyham Terrier",
"Airedale Terrier",
"Cairn Terrier",
"Australian Terrier",
"Dandie Dinmont Terrier",
"Boston Terrier",
"Miniature Schnauzer",
"Giant Schnauzer",
"Standard Schnauzer",
"Scottish Terrier",
"Tibetan Terrier",
"Australian Silky Terrier",
"Soft-coated Wheaten Terrier",
"West Highland White Terrier",
"Lhasa Apso",
"Flat-Coated Retriever",
"Curly-coated Retriever",
"Golden Retriever",
"Labrador Retriever",
"Chesapeake Bay Retriever",
"German Shorthaired Pointer",
"Vizsla",
"English Setter",
"Irish Setter",
"Gordon Setter",
"Brittany dog",
"Clumber Spaniel",
"English Springer Spaniel",
"Welsh Springer Spaniel",
"Cocker Spaniel",
"Sussex Spaniel",
"Irish Water Spaniel",
"Kuvasz",
"Schipperke",
"Groenendael dog",
"Malinois",
"Briard",
"Australian Kelpie",
"Komondor",
"Old English Sheepdog",
"Shetland Sheepdog",
"collie",
"Border Collie",
"Bouvier des Flandres dog",
"Rottweiler",
"German Shepherd Dog",
"Dobermann",
"Miniature Pinscher",
"Greater Swiss Mountain Dog",
"Bernese Mountain Dog",
"Appenzeller Sennenhund",
"Entlebucher Sennenhund",
"Boxer",
"Bullmastiff",
"Tibetan Mastiff",
"French Bulldog",
"Great Dane",
"St. Bernard",
"husky",
"Alaskan Malamute",
"Siberian Husky",
"Dalmatian",
"Affenpinscher",
"Basenji",
"pug",
"Leonberger",
"Newfoundland dog",
"Great Pyrenees dog",
"Samoyed",
"Pomeranian",
"Chow Chow",
"Keeshond",
"brussels griffon",
"Pembroke Welsh Corgi",
"Cardigan Welsh Corgi",
"Toy Poodle",
"Miniature Poodle",
"Standard Poodle",
"Mexican hairless dog (xoloitzcuintli)",
"grey wolf",
"Alaskan tundra wolf",
"red wolf or maned wolf",
"coyote",
"dingo",
"dhole",
"African wild dog",
"hyena",
"red fox",
"kit fox",
"Arctic fox",
"grey fox",
"tabby cat",
"tiger cat",
"Persian cat",
"Siamese cat",
"Egyptian Mau",
"cougar",
"lynx",
"leopard",
"snow leopard",
"jaguar",
"lion",
"tiger",
"cheetah",
"brown bear",
"American black bear",
"polar bear",
"sloth bear",
"mongoose",
"meerkat",
"tiger beetle",
"ladybug",
"ground beetle",
"longhorn beetle",
"leaf beetle",
"dung beetle",
"rhinoceros beetle",
"weevil",
"fly",
"bee",
"ant",
"grasshopper",
"cricket insect",
"stick insect",
"cockroach",
"praying mantis",
"cicada",
"leafhopper",
"lacewing",
"dragonfly",
"damselfly",
"red admiral butterfly",
"ringlet butterfly",
"monarch butterfly",
"small white butterfly",
"sulphur butterfly",
"gossamer-winged butterfly",
"starfish",
"sea urchin",
"sea cucumber",
"cottontail rabbit",
"hare",
"Angora rabbit",
"hamster",
"porcupine",
"fox squirrel",
"marmot",
"beaver",
"guinea pig",
"common sorrel horse",
"zebra",
"pig",
"wild boar",
"warthog",
"hippopotamus",
"ox",
"water buffalo",
"bison",
"ram (adult male sheep)",
"bighorn sheep",
"Alpine ibex",
"hartebeest",
"impala (antelope)",
"gazelle",
"arabian camel",
"llama",
"weasel",
"mink",
"European polecat",
"black-footed ferret",
"otter",
"skunk",
"badger",
"armadillo",
"three-toed sloth",
"orangutan",
"gorilla",
"chimpanzee",
"gibbon",
"siamang",
"guenon",
"patas monkey",
"baboon",
"macaque",
"langur",
"black-and-white colobus",
"proboscis monkey",
"marmoset",
"white-headed capuchin",
"howler monkey",
"titi monkey",
"Geoffroy's spider monkey",
"common squirrel monkey",
"ring-tailed lemur",
"indri",
"Asian elephant",
"African bush elephant",
"red panda",
"giant panda",
"snoek fish",
"eel",
"silver salmon",
"rock beauty fish",
"clownfish",
"sturgeon",
"gar fish",
"lionfish",
"pufferfish",
"abacus",
"abaya",
"academic gown",
"accordion",
"acoustic guitar",
"aircraft carrier",
"airliner",
"airship",
"altar",
"ambulance",
"amphibious vehicle",
"analog clock",
"apiary",
"apron",
"trash can",
"assault rifle",
"backpack",
"bakery",
"balance beam",
"balloon",
"ballpoint pen",
"Band-Aid",
"banjo",
"baluster / handrail",
"barbell",
"barber chair",
"barbershop",
"barn",
"barometer",
"barrel",
"wheelbarrow",
"baseball",
"basketball",
"bassinet",
"bassoon",
"swimming cap",
"bath towel",
"bathtub",
"station wagon",
"lighthouse",
"beaker",
"military hat (bearskin or shako)",
"beer bottle",
"beer glass",
"bell tower",
"baby bib",
"tandem bicycle",
"bikini",
"ring binder",
"binoculars",
"birdhouse",
"boathouse",
"bobsleigh",
"bolo tie",
"poke bonnet",
"bookcase",
"bookstore",
"bottle cap",
"hunting bow",
"bow tie",
"brass memorial plaque",
"bra",
"breakwater",
"breastplate",
"broom",
"bucket",
"buckle",
"bulletproof vest",
"high-speed train",
"butcher shop",
"taxicab",
"cauldron",
"candle",
"cannon",
"canoe",
"can opener",
"cardigan",
"car mirror",
"carousel",
"tool kit",
"cardboard box / carton",
"car wheel",
"automated teller machine",
"cassette",
"cassette player",
"castle",
"catamaran",
"CD player",
"cello",
"mobile phone",
"chain",
"chain-link fence",
"chain mail",
"chainsaw",
"storage chest",
"chiffonier",
"bell or wind chime",
"china cabinet",
"Christmas stocking",
"church",
"movie theater",
"cleaver",
"cliff dwelling",
"cloak",
"clogs",
"cocktail shaker",
"coffee mug",
"coffeemaker",
"spiral or coil",
"combination lock",
"computer keyboard",
"candy store",
"container ship",
"convertible",
"corkscrew",
"cornet",
"cowboy boot",
"cowboy hat",
"cradle",
"construction crane",
"crash helmet",
"crate",
"infant bed",
"Crock Pot",
"croquet ball",
"crutch",
"cuirass",
"dam",
"desk",
"desktop computer",
"rotary dial telephone",
"diaper",
"digital clock",
"digital watch",
"dining table",
"dishcloth",
"dishwasher",
"disc brake",
"dock",
"dog sled",
"dome",
"doormat",
"drilling rig",
"drum",
"drumstick",
"dumbbell",
"Dutch oven",
"electric fan",
"electric guitar",
"electric locomotive",
"entertainment center",
"envelope",
"espresso machine",
"face powder",
"feather boa",
"filing cabinet",
"fireboat",
"fire truck",
"fire screen",
"flagpole",
"flute",
"folding chair",
"football helmet",
"forklift",
"fountain",
"fountain pen",
"four-poster bed",
"freight car",
"French horn",
"frying pan",
"fur coat",
"garbage truck",
"gas mask or respirator",
"gas pump",
"goblet",
"go-kart",
"golf ball",
"golf cart",
"gondola",
"gong",
"gown",
"grand piano",
"greenhouse",
"radiator grille",
"grocery store",
"guillotine",
"hair clip",
"hair spray",
"half-track",
"hammer",
"hamper",
"hair dryer",
"hand-held computer",
"handkerchief",
"hard disk drive",
"harmonica",
"harp",
"combine harvester",
"hatchet",
"holster",
"home theater",
"honeycomb",
"hook",
"hoop skirt",
"gymnastic horizontal bar",
"horse-drawn vehicle",
"hourglass",
"iPod",
"clothes iron",
"carved pumpkin",
"jeans",
"jeep",
"T-shirt",
"jigsaw puzzle",
"rickshaw",
"joystick",
"kimono",
"knee pad",
"knot",
"lab coat",
"ladle",
"lampshade",
"laptop computer",
"lawn mower",
"lens cap",
"letter opener",
"library",
"lifeboat",
"lighter",
"limousine",
"ocean liner",
"lipstick",
"slip-on shoe",
"lotion",
"music speaker",
"loupe magnifying glass",
"sawmill",
"magnetic compass",
"messenger bag",
"mailbox",
"tights",
"one-piece bathing suit",
"manhole cover",
"maraca",
"marimba",
"mask",
"matchstick",
"maypole",
"maze",
"measuring cup",
"medicine cabinet",
"megalith",
"microphone",
"microwave oven",
"military uniform",
"milk can",
"minibus",
"miniskirt",
"minivan",
"missile",
"mitten",
"mixing bowl",
"mobile home",
"ford model t",
"modem",
"monastery",
"monitor",
"moped",
"mortar and pestle",
"graduation cap",
"mosque",
"mosquito net",
"vespa",
"mountain bike",
"tent",
"computer mouse",
"mousetrap",
"moving van",
"muzzle",
"metal nail",
"neck brace",
"necklace",
"baby pacifier",
"notebook computer",
"obelisk",
"oboe",
"ocarina",
"odometer",
"oil filter",
"pipe organ",
"oscilloscope",
"overskirt",
"bullock cart",
"oxygen mask",
"product packet / packaging",
"paddle",
"paddle wheel",
"padlock",
"paintbrush",
"pajamas",
"palace",
"pan flute",
"paper towel",
"parachute",
"parallel bars",
"park bench",
"parking meter",
"railroad car",
"patio",
"payphone",
"pedestal",
"pencil case",
"pencil sharpener",
"perfume",
"Petri dish",
"photocopier",
"plectrum",
"Pickelhaube",
"picket fence",
"pickup truck",
"pier",
"piggy bank",
"pill bottle",
"pillow",
"ping-pong ball",
"pinwheel",
"pirate ship",
"drink pitcher",
"block plane",
"planetarium",
"plastic bag",
"plate rack",
"farm plow",
"plunger",
"Polaroid camera",
"pole",
"police van",
"poncho",
"pool table",
"soda bottle",
"plant pot",
"potter's wheel",
"power drill",
"prayer rug",
"printer",
"prison",
"missile",
"projector",
"hockey puck",
"punching bag",
"purse",
"quill",
"quilt",
"race car",
"racket",
"radiator",
"radio",
"radio telescope",
"rain barrel",
"recreational vehicle",
"fishing casting reel",
"reflex camera",
"refrigerator",
"remote control",
"restaurant",
"revolver",
"rifle",
"rocking chair",
"rotisserie",
"eraser",
"rugby ball",
"ruler measuring stick",
"sneaker",
"safe",
"safety pin",
"salt shaker",
"sandal",
"sarong",
"saxophone",
"scabbard",
"weighing scale",
"school bus",
"schooner",
"scoreboard",
"CRT monitor",
"screw",
"screwdriver",
"seat belt",
"sewing machine",
"shield",
"shoe store",
"shoji screen / room divider",
"shopping basket",
"shopping cart",
"shovel",
"shower cap",
"shower curtain",
"ski",
"balaclava ski mask",
"sleeping bag",
"slide rule",
"sliding door",
"slot machine",
"snorkel",
"snowmobile",
"snowplow",
"soap dispenser",
"soccer ball",
"sock",
"solar thermal collector",
"sombrero",
"soup bowl",
"keyboard space bar",
"space heater",
"space shuttle",
"spatula",
"motorboat",
"spider web",
"spindle",
"sports car",
"spotlight",
"stage",
"steam locomotive",
"through arch bridge",
"steel drum",
"stethoscope",
"scarf",
"stone wall",
"stopwatch",
"stove",
"strainer",
"tram",
"stretcher",
"couch",
"stupa",
"submarine",
"suit",
"sundial",
"sunglasses",
"sunglasses",
"sunscreen",
"suspension bridge",
"mop",
"sweatshirt",
"swim trunks / shorts",
"swing",
"electrical switch",
"syringe",
"table lamp",
"tank",
"tape player",
"teapot",
"teddy bear",
"television",
"tennis ball",
"thatched roof",
"front curtain",
"thimble",
"threshing machine",
"throne",
"tile roof",
"toaster",
"tobacco shop",
"toilet seat",
"torch",
"totem pole",
"tow truck",
"toy store",
"tractor",
"semi-trailer truck",
"tray",
"trench coat",
"tricycle",
"trimaran",
"tripod",
"triumphal arch",
"trolleybus",
"trombone",
"hot tub",
"turnstile",
"typewriter keyboard",
"umbrella",
"unicycle",
"upright piano",
"vacuum cleaner",
"vase",
"vaulted or arched ceiling",
"velvet fabric",
"vending machine",
"vestment",
"viaduct",
"violin",
"volleyball",
"waffle iron",
"wall clock",
"wallet",
"wardrobe",
"military aircraft",
"sink",
"washing machine",
"water bottle",
"water jug",
"water tower",
"whiskey jug",
"whistle",
"hair wig",
"window screen",
"window shade",
"Windsor tie",
"wine bottle",
"airplane wing",
"wok",
"wooden spoon",
"wool",
"split-rail fence",
"shipwreck",
"sailboat",
"yurt",
"website",
"comic book",
"crossword",
"traffic or street sign",
"traffic light",
"dust jacket",
"menu",
"plate",
"guacamole",
"consomme",
"hot pot",
"trifle",
"ice cream",
"popsicle",
"baguette",
"bagel",
"pretzel",
"cheeseburger",
"hot dog",
"mashed potatoes",
"cabbage",
"broccoli",
"cauliflower",
"zucchini",
"spaghetti squash",
"acorn squash",
"butternut squash",
"cucumber",
"artichoke",
"bell pepper",
"cardoon",
"mushroom",
"Granny Smith apple",
"strawberry",
"orange",
"lemon",
"fig",
"pineapple",
"banana",
"jackfruit",
"cherimoya (custard apple)",
"pomegranate",
"hay",
"carbonara",
"chocolate syrup",
"dough",
"meatloaf",
"pizza",
"pot pie",
"burrito",
"red wine",
"espresso",
"tea cup",
"eggnog",
"mountain",
"bubble",
"cliff",
"coral reef",
"geyser",
"lakeshore",
"promontory",
"sandbar",
"beach",
"valley",
"volcano",
"baseball player",
"bridegroom",
"scuba diver",
"rapeseed",
"daisy",
"yellow lady's slipper",
"corn",
"acorn",
"rose hip",
"horse chestnut seed",
"coral fungus",
"agaric",
"gyromitra",
"stinkhorn mushroom",
"earth star fungus",
"hen of the woods mushroom",
"bolete",
"corn cob",
"toilet paper",
]
# Maps numeric class ids to labels
IMAGENET_1K_CLASS_ID_TO_LABEL = dict(
zip(range(len(openai_imagenet_classnames)), openai_imagenet_classnames)
)
| flamingo-main | open_flamingo/eval/imagenet_utils.py |
flamingo-main | open_flamingo/eval/__init__.py |
|
import argparse
import json
from math import ceil
import os
import random
import uuid
from collections import defaultdict
from typing import Callable
import more_itertools
import numpy as np
import torch
from coco_metric import compute_cider, postprocess_captioning_generation
from eval_datasets import COCOFlickrDataset, VQADataset, ImageNetDataset
from tqdm import tqdm
from open_flamingo.eval.ok_vqa_utils import postprocess_ok_vqa_generation
from vqa_metric import compute_vqa_accuracy, postprocess_vqa_generation
from open_flamingo.eval.classification import (
compute_per_sample_probs,
compute_per_sample_loss,
)
from open_flamingo.eval.imagenet_utils import (
openai_imagenet_classnames,
IMAGENET_1K_CLASS_ID_TO_LABEL,
)
from open_flamingo.src.factory import create_model_and_transforms
parser = argparse.ArgumentParser()
parser.add_argument("--lm_path", type=str, default="facebook/opt-1.3b")
parser.add_argument("--lm_tokenizer_path", type=str, default="facebook/opt-30b")
parser.add_argument("--vision_encoder_path", default="ViT-L-14", type=str)
parser.add_argument("--vision_encoder_pretrained", default="openai", type=str)
parser.add_argument("--checkpoint_path", type=str, required=True)
parser.add_argument(
"--cross_attn_every_n_layers",
type=int,
default=1,
help="how often to add a cross-attention layer after each transformer layer",
)
parser.add_argument(
"--results_file", type=str, default=None, help="JSON file to save results"
)
# Trial arguments
parser.add_argument("--shots", nargs="+", default=[0, 4, 8, 16, 32], type=int)
parser.add_argument(
"--num_trials",
type=int,
default=1,
help="Number of trials to run for each shot using different demonstrations",
)
parser.add_argument(
"--trial_seeds",
nargs="+",
default=[0],
help="Seeds to use for each trial for picking demonstrations and eval sets",
)
parser.add_argument(
"--num_samples", type=int, default=5000, help="Number of samples to evaluate on"
)
parser.add_argument("--batch_size", type=int, default=8)
parser.add_argument("--device", type=int, default=0)
# Per-dataset evaluation flags
parser.add_argument(
"--eval_coco",
action="store_true",
default=False,
help="Whether to evaluate on COCO.",
)
parser.add_argument(
"--eval_vqav2",
action="store_true",
default=False,
help="Whether to evaluate on VQAV2.",
)
parser.add_argument(
"--eval_ok_vqa",
action="store_true",
default=False,
help="Whether to evaluate on OK-VQA.",
)
parser.add_argument(
"--eval_imagenet",
action="store_true",
default=False,
help="Whether to evaluate on ImageNet.",
)
parser.add_argument(
"--eval_flickr30",
action="store_true",
default=False,
help="Whether to evaluate on Flickr30.",
)
# Dataset arguments
## Flickr30 Dataset
parser.add_argument(
"--flickr_image_dir_path",
type=str,
help="Path to the flickr30/flickr30k_images directory.",
default=None,
)
parser.add_argument(
"--flickr_annotations_json_path",
type=str,
help="Path to the dataset_flickr30k_coco_style.json file.",
default=None,
)
## COCO Dataset
parser.add_argument(
"--coco_image_dir_path",
type=str,
help="Path to the flickr30/flickr30k_images directory.",
default=None,
)
parser.add_argument(
"--coco_annotations_json_path",
type=str,
default=None,
)
## VQAV2 Dataset
parser.add_argument(
"--vqav2_image_dir_path",
type=str,
default=None,
)
parser.add_argument(
"--vqav2_questions_json_path",
type=str,
default=None,
)
parser.add_argument(
"--vqav2_annotations_json_path",
type=str,
default=None,
)
## OK-VQA Dataset
parser.add_argument(
"--ok_vqa_image_dir_path",
type=str,
help="Path to the vqav2/train2014 directory.",
default=None,
)
parser.add_argument(
"--ok_vqa_questions_json_path",
type=str,
help="Path to the v2_OpenEnded_mscoco_train2014_questions.json file.",
default=None,
)
parser.add_argument(
"--ok_vqa_annotations_json_path",
type=str,
help="Path to the v2_mscoco_train2014_annotations.json file.",
default=None,
)
## Imagenet dataset
parser.add_argument("--imagenet_root", type=str, default="/tmp")
def main():
args = parser.parse_args()
# load model
flamingo, image_processor, tokenizer = create_model_and_transforms(
args.vision_encoder_path,
args.vision_encoder_pretrained,
args.lm_path,
args.lm_tokenizer_path,
cross_attn_every_n_layers=args.cross_attn_every_n_layers,
)
checkpoint = torch.load(args.checkpoint_path, map_location="cpu")
flamingo.load_state_dict(checkpoint, strict=False)
flamingo.to(args.device if args.device >= 0 else "cpu")
results = defaultdict(list)
if args.eval_flickr30:
print("Evaluating on Flickr30...")
for shot in args.shots:
scores = []
for seed, trial in zip(args.trial_seeds, range(args.num_trials)):
cider_score = evaluate_coco_flickr(
model=flamingo,
tokenizer=tokenizer,
image_processor=image_processor,
batch_size=args.batch_size,
image_dir_path=args.flickr_image_dir_path,
annotations_json_path=args.flickr_annotations_json_path,
num_samples=args.num_samples,
num_shots=shot,
device=args.device,
seed=seed,
is_flickr=True,
)
print(f"Shots {shot} Trial {trial} CIDEr score: {cider_score}")
scores.append(cider_score)
print(f"Shots {shot} Mean CIDEr score: {np.mean(scores)}")
results["flickr30"].append(
{"shots": shot, "trials": scores, "mean": np.mean(scores)}
)
results = defaultdict(list)
if args.eval_coco:
print("Evaluating on COCO...")
for shot in args.shots:
scores = []
for seed, trial in zip(args.trial_seeds, range(args.num_trials)):
cider_score = evaluate_coco_flickr(
model=flamingo,
tokenizer=tokenizer,
image_processor=image_processor,
batch_size=args.batch_size,
image_dir_path=args.coco_image_dir_path,
annotations_json_path=args.coco_annotations_json_path,
num_samples=args.num_samples,
num_shots=shot,
device=args.device,
seed=seed,
)
print(f"Shots {shot} Trial {trial} CIDEr score: {cider_score}")
scores.append(cider_score)
print(f"Shots {shot} Mean CIDEr score: {np.mean(scores)}")
results["coco"].append(
{"shots": shot, "trials": scores, "mean": np.mean(scores)}
)
if args.eval_ok_vqa:
print("Evaluating on OK-VQA...")
for shot in args.shots:
scores = []
for seed, trial in zip(args.trial_seeds, range(args.num_trials)):
ok_vqa_score = evaluate_vqa(
model=flamingo,
tokenizer=tokenizer,
image_processor=image_processor,
batch_size=args.batch_size,
num_samples=args.num_samples,
num_shots=shot,
device=args.device,
seed=seed,
image_dir_path=args.ok_vqa_image_dir_path,
questions_json_path=args.ok_vqa_questions_json_path,
annotations_json_path=args.ok_vqa_annotations_json_path,
vqa_dataset="ok_vqa",
)
print(f"Shots {shot} Trial {trial} OK-VQA score: {ok_vqa_score}")
scores.append(ok_vqa_score)
print(f"Shots {shot} Mean OK-VQA score: {np.mean(scores)}")
results["ok_vqa"].append(
{"shots": shot, "trials": scores, "mean": np.mean(scores)}
)
if args.eval_vqav2:
print("Evaluating on VQAv2...")
for shot in args.shots:
scores = []
for seed, trial in zip(args.trial_seeds, range(args.num_trials)):
vqa_score = evaluate_vqa(
model=flamingo,
tokenizer=tokenizer,
image_processor=image_processor,
batch_size=args.batch_size,
num_samples=args.num_samples,
num_shots=shot,
device=args.device,
seed=seed,
image_dir_path=args.vqav2_image_dir_path,
questions_json_path=args.vqav2_questions_json_path,
annotations_json_path=args.vqav2_annotations_json_path,
vqa_dataset="vqa",
)
print(f"Shots {shot} Trial {trial} VQA score: {vqa_score}")
scores.append(vqa_score)
print(f"Shots {shot} Mean VQA score: {np.mean(scores)}")
results["vqav2"].append(
{"shots": shot, "trials": scores, "mean": np.mean(scores)}
)
if args.eval_imagenet:
print("Evaluating on ImageNet...")
for shot in args.shots:
scores = []
for seed, trial in zip(args.trial_seeds, range(args.num_trials)):
imagenet_score = evaluate_imagenet(
model=flamingo,
tokenizer=tokenizer,
image_processor=image_processor,
batch_size=args.batch_size,
num_samples=args.num_samples,
num_shots=shot,
device=args.device,
seed=seed,
imagenet_root=args.imagenet_root,
)
print(
f"Shots {shot} Trial {trial} " f"ImageNet score: {imagenet_score}"
)
scores.append(imagenet_score)
print(f"Shots {shot} Mean ImageNet score: {np.mean(scores)}")
results["imagenet"].append(
{"shots": shot, "trials": scores, "mean": np.mean(scores)}
)
if args.results_file is not None:
with open(args.results_file, "w") as f:
json.dump(results, f)
def get_random_indices(num_samples, query_set_size, full_dataset, seed):
if num_samples + query_set_size > len(full_dataset):
raise ValueError(
f"num_samples + num_shots must be less than {len(full_dataset)}"
)
# get a random subset of the dataset
np.random.seed(seed)
random_indices = np.random.choice(
len(full_dataset), num_samples + query_set_size, replace=False
)
return random_indices
def prepare_eval_samples_and_dataset(full_dataset, random_indices, query_set_size):
# get in context samples
in_context_samples = [full_dataset[i] for i in random_indices[:query_set_size]]
eval_dataset = torch.utils.data.Subset(
full_dataset, random_indices[query_set_size:]
)
return in_context_samples, eval_dataset
def get_context_images(image_processor, in_context_samples, num_shots):
if num_shots > 0:
context_images = [
image_processor(s["image"]).unsqueeze(0) for s in in_context_samples
]
context_images = torch.cat(context_images, dim=0)
context_images = context_images.unsqueeze(1).unsqueeze(0)
else:
context_images = None
return context_images
def get_context_text(
get_prompt: Callable[[dict], str],
in_context_samples,
effective_num_shots,
num_shots,
) -> str:
context_text = (
"".join([get_prompt(s) for s in in_context_samples])
if effective_num_shots > 0
else ""
)
if num_shots == 0:
context_text = context_text.replace("<image>", "")
return context_text
def prepare_batch_images(batch, image_processor, context_images, num_shots):
batch_images = None
for b, sample_imgs in zip(batch, context_images):
b_image = image_processor(b["image"]).unsqueeze(0).unsqueeze(1).unsqueeze(0)
b_image = torch.cat([sample_imgs, b_image], dim=1) if num_shots > 0 else b_image
if batch_images is None:
batch_images = b_image
else:
batch_images = torch.cat([batch_images, b_image], dim=0)
return batch_images
def sample_batch_demos_from_query_set(query_set, num_samples, batch_size):
return [random.sample(query_set, num_samples) for _ in range(batch_size)]
def get_outputs(
model,
batch_images,
device,
attention_mask,
max_generation_length,
num_beams,
length_penalty,
input_ids,
):
with torch.inference_mode():
outputs = model.generate(
batch_images.to(device if device >= 0 else "cpu"),
input_ids.to(device if device >= 0 else "cpu"),
attention_mask=attention_mask.to(device if device >= 0 else "cpu"),
max_new_tokens=max_generation_length,
num_beams=num_beams,
length_penalty=length_penalty,
)
outputs = outputs[:, len(input_ids[0]) :]
return outputs
def evaluate_coco_flickr(
model,
tokenizer,
image_processor,
batch_size,
image_dir_path,
annotations_json_path,
seed=42,
max_generation_length=20,
num_beams=3,
length_penalty=-2.0,
num_samples=5000,
query_set_size=2048,
num_shots=8,
device=-1,
is_flickr=False,
):
"""Evaluate a model on COCO dataset.
Args:
model (nn.Module): model to evaluate
tokenizer (transformers.PreTrainedTokenizer): tokenizer for the model
image_processor : image processor for the model
batch_size (int): batch size
image_dir_path (str, optional): path to the directory containing the images.
annotations_json_path (str, optional): path to the json file containing the annotations.
seed (int, optional): seed for random number generator. Defaults to 42.
max_generation_length (int, optional): maximum length of the generated caption. Defaults to 10.
num_beams (int, optional): number of beams to use for beam search. Defaults to 3.
length_penalty (float, optional): length penalty for beam search. Defaults to -2.0.
num_samples (int, optional): number of samples to evaluate on. Defaults to 5000.
query_set_size (int, optional): number of samples to use for query set. Defaults to 2048.
num_shots (int, optional): number of in-context samples to use. Defaults to 8.
device (int, optional): device to use. Defaults to -1.
num_workers (int, optional): number of workers to use for dataloader. Defaults to 4.
is_flickr (bool): defines if that data is COCO or Flickr. Defaults to False (COCO).
Returns:
float: CIDEr score
"""
full_dataset = COCOFlickrDataset(
image_dir_path=image_dir_path,
annotations_path=annotations_json_path,
is_flickr=is_flickr,
)
effective_num_shots = num_shots if num_shots > 0 else 2
random_indices = get_random_indices(num_samples, query_set_size, full_dataset, seed)
in_context_samples, eval_dataset = prepare_eval_samples_and_dataset(
full_dataset=full_dataset,
random_indices=random_indices,
query_set_size=query_set_size,
)
model.eval()
def get_prompt(sample):
return f"<image>Output:{sample['caption'].strip()}<|endofchunk|>"
predictions = defaultdict()
desc = "Running inference Flickr30" if is_flickr else "Running inference COCO"
for batch in more_itertools.chunked(tqdm(eval_dataset, desc=desc), batch_size):
batch_demo_samples = sample_batch_demos_from_query_set(
in_context_samples, effective_num_shots, len(batch)
)
context_images = [
get_context_images(
image_processor=image_processor,
in_context_samples=batch_demo_samples[i],
num_shots=num_shots,
)
for i in range(len(batch))
]
context_text = [
get_context_text(
get_prompt,
in_context_samples=batch_demo_samples[i],
effective_num_shots=effective_num_shots,
num_shots=num_shots,
)
for i in range(len(batch))
]
batch_images = prepare_batch_images(
batch=batch,
image_processor=image_processor,
context_images=context_images,
num_shots=num_shots,
)
batch_text = [f"{context_text[i]}<image>Output:" for i in range(len(batch))]
tokenizer.padding_side = "left"
encodings = tokenizer(
batch_text,
padding="longest",
truncation=True,
return_tensors="pt",
max_length=2000,
)
input_ids = encodings["input_ids"]
attention_mask = encodings["attention_mask"]
outputs = get_outputs(
model=model,
batch_images=batch_images,
device=device,
attention_mask=attention_mask,
max_generation_length=max_generation_length,
num_beams=num_beams,
length_penalty=length_penalty,
input_ids=input_ids,
)
new_predictions = [
postprocess_captioning_generation(out).replace('"', "")
for out in tokenizer.batch_decode(outputs, skip_special_tokens=True)
]
for i, sample in enumerate(batch):
predictions[sample["image_id"]] = {
"caption": new_predictions[i],
}
# save the predictions to a temporary file
random_uuid = str(uuid.uuid4())
results_path = (
f"flickrresults_{random_uuid}.json"
if is_flickr
else f"cocoresults_{random_uuid}.json"
)
with open(results_path, "w") as f:
f.write(
json.dumps(
[
{"image_id": k, "caption": predictions[k]["caption"]}
for k in predictions
],
indent=4,
)
)
metrics = compute_cider(
result_path=results_path,
annotations_path=annotations_json_path,
)
# delete the temporary file
os.remove(results_path)
return metrics["CIDEr"] * 100.0
def evaluate_vqa(
model,
tokenizer,
image_processor,
batch_size,
image_dir_path,
questions_json_path,
annotations_json_path,
seed=42,
max_generation_length=5,
num_beams=3,
length_penalty=-2.0,
num_samples=5000,
query_set_size=2048,
num_shots=8,
device=-1,
vqa_dataset="vqa",
):
"""
Evaluate a model on VQA datasets. Currently supports VQA v2.0.
Args:
model (nn.Module): model to evaluate
tokenizer (transformers.PreTrainedTokenizer): tokenizer for the model
image_processor : image processor for the model
batch_size (int): batch size
image_dir_path (str): path to image directory
questions_json_path (str): path to questions json file
annotations_json_path (str): path to annotations json file
seed (int, optional): random seed. Defaults to 42.
max_generation_length (int, optional): max generation length. Defaults to 5.
num_beams (int, optional): number of beams to use for beam search. Defaults to 3.
length_penalty (float, optional): length penalty for beam search. Defaults to -2.0.
num_samples (int, optional): number of samples to evaluate on. Defaults to 5000 samples.
query_set_size (int, optional): size of the query set. Defaults to 2048.
num_shots (int, optional): number of shots to use. Defaults to 8.
device (int, optional): device to use. Defaults to -1 (cpu).
num_workers (int, optional): number of workers to use. Defaults to 4.
vqa_dataset (string): type of vqa dataset: currently supports vqa, ok_vqa. Defaults to vqa.
Returns:
float: accuracy score
"""
full_dataset = VQADataset(
image_dir_path=image_dir_path,
question_path=questions_json_path,
annotations_path=annotations_json_path,
vqa_dataset=vqa_dataset,
)
effective_num_shots = num_shots if num_shots > 0 else 2
if num_samples + effective_num_shots > len(full_dataset):
raise ValueError(
f"num_samples + num_shots must be less than or equal to {len(full_dataset)}"
)
random_indices = get_random_indices(num_samples, query_set_size, full_dataset, seed)
def get_prompt(sample, train=True):
return f"<image>Question:{sample['question'].strip()} Short Answer:{sample['answers'][0].strip() if train else ''}{'<|endofchunk|>' if train else ''}"
in_context_samples, eval_dataset = prepare_eval_samples_and_dataset(
full_dataset=full_dataset,
random_indices=random_indices,
query_set_size=query_set_size,
)
model.eval()
predictions = []
for batch in more_itertools.chunked(
tqdm(eval_dataset, desc="Running inference"), batch_size
):
batch_demo_samples = sample_batch_demos_from_query_set(
in_context_samples, effective_num_shots, len(batch)
)
context_images = [
get_context_images(
image_processor=image_processor,
in_context_samples=batch_demo_samples[i],
num_shots=num_shots,
)
for i in range(len(batch))
]
context_text = [
get_context_text(
get_prompt,
in_context_samples=batch_demo_samples[i],
effective_num_shots=effective_num_shots,
num_shots=num_shots,
)
for i in range(len(batch))
]
batch_images = prepare_batch_images(
batch=batch,
image_processor=image_processor,
context_images=context_images,
num_shots=num_shots,
)
batch_text = [
context_text[i] + get_prompt(s, train=False) for i, s in enumerate(batch)
]
tokenizer.padding_side = "left"
encodings = tokenizer(
batch_text,
return_tensors="pt",
padding="longest",
truncation=True,
max_length=2000,
)
input_ids = encodings["input_ids"].to(device if device >= 0 else "cpu")
attention_mask = encodings["attention_mask"].to(
device if device >= 0 else "cpu"
)
outputs = get_outputs(
model=model,
batch_images=batch_images,
device=device,
attention_mask=attention_mask,
max_generation_length=max_generation_length,
num_beams=num_beams,
length_penalty=length_penalty,
input_ids=input_ids,
)
process_function = (
postprocess_vqa_generation
if vqa_dataset == "vqa"
else postprocess_ok_vqa_generation
)
new_predictions = [
process_function(out)
for out in tokenizer.batch_decode(outputs, skip_special_tokens=True)
]
predictions.extend(
[
{"answer": p, "question_id": sample["question_id"]}
for p, sample in zip(new_predictions, batch)
]
)
# save the predictions to a temporary file
random_uuid = str(uuid.uuid4())
with open(f"{vqa_dataset}results_{random_uuid}.json", "w") as f:
f.write(json.dumps(predictions, indent=4))
acc = compute_vqa_accuracy(
f"{vqa_dataset}results_{random_uuid}.json",
questions_json_path,
annotations_json_path,
)
# delete the temporary file
os.remove(f"{vqa_dataset}results_{random_uuid}.json")
return acc
def evaluate_imagenet(
model,
tokenizer,
image_processor,
batch_size: int,
imagenet_root: str,
seed: int = 42,
num_samples: int = 5000,
num_shots: int = 8,
device: int = -1,
):
"""
Evaluate a model on ImageNet dataset.
Args:
model: model to evaluate
tokenizer (transformers.PreTrainedTokenizer): tokenizer for the model
image_processor : image processor for the model
batch_size (int): batch size
imagenet_root (str): path to imagenet root for the specified split.
seed (int, optional): random seed. Defaults to 42.
num_samples (int, optional): number of samples to evaluate on. Defaults to 5000 samples.
num_shots (int, optional): number of shots to use. Defaults to 8.
device (int, optional): device to use. Defaults to -1 (cpu).
Returns:
float: accuracy score
"""
full_dataset = ImageNetDataset(root=imagenet_root)
effective_num_shots = num_shots if num_shots > 0 else 2
if num_samples + effective_num_shots > len(full_dataset):
raise ValueError(
f"num_samples + num_shots must be less than or equal to "
f"{len(full_dataset)} "
)
random_indices = get_random_indices(
num_samples, effective_num_shots, full_dataset, seed
)
eoc_token = "<|endofchunk|>"
eoc_token_id = tokenizer.additional_special_tokens_ids[
tokenizer.additional_special_tokens.index(eoc_token)
]
# Padding from right allows efficient precomputing of context activations.
tokenizer.padding_side = "right"
def _imagenet_prompt(class_name, is_context: bool = True):
"""Construct an imagenet prompt for a given label."""
prefix = "<image>A photo of a "
if is_context:
return prefix + class_name.strip()
else:
# Not a context example; insert EOS token before the class name
# so that we can compute the loss on the class name tokens only.
return prefix + tokenizer.eos_token + class_name.strip()
def get_imagenet_prompt(x: dict, is_context: bool = True) -> str:
"""Construct an ImageNet prompt for an example, using its label."""
return _imagenet_prompt(x["class_name"], is_context=is_context)
in_context_samples, eval_dataset = prepare_eval_samples_and_dataset(
full_dataset=full_dataset,
random_indices=random_indices,
query_set_size=effective_num_shots, # NOTE: here we replace query_set_size with effective_num_shots but this is not the ideal evaluation setting.
# TODO: We should add a query_set_size argument to the function and use it to randomly sample the context for each example.
# This will be more consistent with the evaluation setting in the paper but will require some reworking of the caching.
)
device = device if device >= 0 else "cpu"
model.eval()
# Predictions based on the class target sequence with the maximal
# predicted probability
predictions_max_prob = []
# Predictions based on the class target sequence with the minimal loss on
# the model logits
predictions_min_loss = []
labels = []
context_images = [
get_context_images(
image_processor=image_processor,
in_context_samples=in_context_samples,
num_shots=num_shots,
)
for _ in range(batch_size)
]
context_text = get_context_text(
get_imagenet_prompt,
in_context_samples=in_context_samples,
effective_num_shots=effective_num_shots,
num_shots=num_shots,
)
# kwargs to use when calling tokenizer
tokenizer_kwargs = {
"return_tensors": "pt",
"padding": True,
"truncation": True,
"max_length": 256,
}
for i, batch in enumerate(more_itertools.chunked(eval_dataset, batch_size)):
print(f"processing batch {i} of {ceil(len(eval_dataset) / batch_size)}")
batch_per_class_probs = []
batch_per_class_losses = []
batch_images = prepare_batch_images(
batch=batch,
image_processor=image_processor,
context_images=context_images,
num_shots=num_shots,
)
# Process the images only once.
batch_images = batch_images.to(device)
model._encode_vision_x(vision_x=batch_images)
# Process the context text only once.
context_encodings = tokenizer([context_text] * batch_size, **tokenizer_kwargs)
context_ids = context_encodings["input_ids"].to(device)
context_len = context_ids.shape[-1]
context_precomputed = model(
None,
context_ids,
use_cached_vision_x=True,
clear_conditioned_layers=False,
use_cache=True,
)
# For each ImageNet class, construct the output prompt, compute a
# forward pass, and store the results.
for imagenet_class_name in tqdm(openai_imagenet_classnames):
batch_text = [
context_text + _imagenet_prompt(imagenet_class_name, False) + eoc_token
] * batch_size
full_batch_encodings = tokenizer(batch_text, **tokenizer_kwargs)
# full_batch_input_ids has shape [batch_size, seq_len], but we
# only need to run inference on the [batch_size,
# context_len:] inputs that have not been precomputed and
# vary per class.
full_batch_input_ids = full_batch_encodings["input_ids"].to(device)
full_batch_attention_mask = full_batch_encodings["attention_mask"].to(
device
)
# Sanity check that the encoded inputs with context are the same
# as the encoded context alone, for every example in the batch
assert torch.all(
context_ids[0, :] == full_batch_input_ids[:, :context_len]
).item()
# Clone the nested structure of the past key values
past_key_values = tuple(
[
tuple([x.clone() for x in inner])
for inner in context_precomputed.past_key_values
]
)
# Compute the outputs without recomputing context representations.
outputs = model(
vision_x=None,
lang_x=full_batch_input_ids[:, context_len:],
attention_mask=full_batch_attention_mask,
use_cached_vision_x=True,
clear_conditioned_layers=False,
past_key_values=past_key_values,
use_cache=True,
)
logits = torch.concat((context_precomputed.logits, outputs.logits), 1)
per_sample_probs = compute_per_sample_probs(
encodings=full_batch_encodings,
tokenizer=tokenizer,
logits=logits,
eoc_token_id=eoc_token_id,
)
per_sample_loss = compute_per_sample_loss(
encodings=full_batch_encodings,
tokenizer=tokenizer,
logits=logits,
eoc_token_id=eoc_token_id,
)
batch_per_class_probs.append(per_sample_probs.detach())
batch_per_class_losses.append(per_sample_loss.detach())
# Tensor of shape [batch_size, 1000] where the [i,j]th element is
# the (probability or loss) for batch element i on imagenet class j.
batch_probs = torch.stack(batch_per_class_probs, 1)
batch_losses = torch.stack(batch_per_class_losses, 1)
predictions_max_prob.extend(torch.argmax(batch_probs, 1).detach().tolist())
predictions_min_loss.extend(torch.argmin(batch_losses, 1).detach().tolist())
labels.extend(x["class_id"] for x in batch)
acc_max_prob = (np.array(predictions_max_prob) == np.array(labels)).mean()
acc_min_loss = (np.array(predictions_min_loss) == np.array(labels)).mean()
print(f"[DEBUG] ImageNet accuracy with max prob method is {acc_max_prob}")
print(f"[DEBUG] ImageNet accuracy with min loss method is {acc_min_loss}")
print(f"[DEBUG] printing ImageNet predictions and labels:")
for yhat_prob, yhat_loss, y in zip(
predictions_max_prob, predictions_min_loss, labels
):
print(
" " * 30 + f"label: {IMAGENET_1K_CLASS_ID_TO_LABEL[y]}"
f"\nprediction (max prob method): "
f"{IMAGENET_1K_CLASS_ID_TO_LABEL[yhat_prob]}"
f"\nprediction (min loss method): "
f"{IMAGENET_1K_CLASS_ID_TO_LABEL[yhat_loss]}\n"
"#" * 25
)
return acc_max_prob
if __name__ == "__main__":
main()
| flamingo-main | open_flamingo/eval/evaluate.py |
from pycocoevalcap.eval import COCOEvalCap
from pycocotools.coco import COCO
def compute_cider(
result_path,
annotations_path="/data/yfcc-tmp/data/mscoco/annotations/captions_train2017.json",
):
# create coco object and coco_result object
coco = COCO(annotations_path)
coco_result = coco.loadRes(result_path)
# create coco_eval object by taking coco and coco_result
coco_eval = COCOEvalCap(coco, coco_result)
coco_eval.params["image_id"] = coco_result.getImgIds()
coco_eval.evaluate()
return coco_eval.eval
def postprocess_captioning_generation(predictions):
return predictions.split("Output", 1)[0]
| flamingo-main | open_flamingo/eval/coco_metric.py |
import json
import os
from PIL import Image
from torch.utils.data import Dataset
from torchvision.datasets import ImageFolder
from open_flamingo.eval.imagenet_utils import IMAGENET_1K_CLASS_ID_TO_LABEL
class COCOFlickrDataset(Dataset):
def __init__(
self,
image_dir_path="/mmfs1/gscratch/efml/anasa2/data/coco/train2017/",
annotations_path="/mmfs1/gscratch/efml/anasa2/data/coco/annotations/captions_train2017.json",
is_flickr=False,
):
self.image_dir_path = image_dir_path
self.annotations = json.load(open(annotations_path))["annotations"]
self.is_flickr = is_flickr
def __len__(self):
return len(self.annotations)
def get_img_path(self, idx):
if self.is_flickr:
return f"{self.image_dir_path}/{self.annotations[idx]['image_id']}.jpg"
else:
return f"{self.image_dir_path}/COCO_train2014_{self.annotations[idx]['image_id']:012d}.jpg"
def __getitem__(self, idx):
image = Image.open(self.get_img_path(idx))
caption = self.annotations[idx]["caption"]
return {
"image": image,
"caption": caption,
"image_id": self.annotations[idx]["image_id"],
}
class VQADataset(Dataset):
def __init__(
self,
image_dir_path="/mmfs1/gscratch/efml/anasa2/data/vqav2/train2014/",
question_path="/mmfs1/gscratch/efml/anasa2/data/vqav2/v2_OpenEnded_mscoco_train2014_questions.json",
annotations_path="/mmfs1/gscratch/efml/anasa2/data/vqav2/v2_mscoco_train2014_annotations.json",
vqa_dataset="vqa",
):
self.questions = json.load(open(question_path, "r"))["questions"]
self.answers = json.load(open(annotations_path, "r"))["annotations"]
self.image_dir_path = image_dir_path
self.vqa_dataset = vqa_dataset
def __len__(self):
return len(self.questions)
def get_img_path(self, question):
if self.vqa_dataset == "vqa":
return os.path.join(
self.image_dir_path, f"COCO_train2014_{question['image_id']:012d}.jpg"
)
elif self.vqa_dataset == "ok_vqa":
return os.path.join(
self.image_dir_path, f"COCO_val2014_{question['image_id']:012d}.jpg"
)
else:
raise Exception(f"Unknown VQA dataset {self.vqa_dataset}")
def __getitem__(self, idx):
question = self.questions[idx]
answers = self.answers[idx]
img_path = self.get_img_path(question)
image = Image.open(img_path)
return {
"image": image,
"question": question["question"],
"answers": [a["answer"] for a in answers["answers"]],
"question_id": question["question_id"],
}
class ImageNetDataset(ImageFolder):
"""Class to represent the ImageNet1k dataset."""
def __init__(self, root, **kwargs):
super().__init__(root=root, **kwargs)
def __getitem__(self, idx):
sample, target = super().__getitem__(idx)
target_label = IMAGENET_1K_CLASS_ID_TO_LABEL[target]
return {
"image": sample,
"class_id": target, # numeric ID of the ImageNet class
"class_name": target_label, # human-readable name of ImageNet class
}
| flamingo-main | open_flamingo/eval/eval_datasets.py |
import copy
import datetime
import json
import os
import random
import re
import sys
# Interface for accessing the VQA dataset.
# This code is based on the code written by Tsung-Yi Lin for MSCOCO Python API available at the following link:
# (https://github.com/pdollar/coco/blob/master/PythonAPI/pycocotools/coco.py).
# The following functions are defined:
# VQA - VQA class that loads VQA annotation file and prepares data structures.
# getQuesIds - Get question ids that satisfy given filter conditions.
# getImgIds - Get image ids that satisfy given filter conditions.
# loadQA - Load questions and answers with the specified question ids.
# showQA - Display the specified questions and answers.
# loadRes - Load result file and create result object.
# Help on each function can be accessed by: "help(COCO.function)"
class VQA:
def __init__(self, annotation_file=None, question_file=None):
"""
Constructor of VQA helper class for reading and visualizing questions and answers.
:param annotation_file (str): location of VQA annotation file
:return:
"""
# load dataset
self.dataset = {}
self.questions = {}
self.qa = {}
self.qqa = {}
self.imgToQA = {}
if not annotation_file == None and not question_file == None:
print("loading VQA annotations and questions into memory...")
time_t = datetime.datetime.utcnow()
dataset = json.load(open(annotation_file, "r"))
questions = json.load(open(question_file, "r"))
print(datetime.datetime.utcnow() - time_t)
self.dataset = dataset
self.questions = questions
self.createIndex()
def createIndex(self):
# create index
print("creating index...")
imgToQA = {ann["image_id"]: [] for ann in self.dataset["annotations"]}
qa = {ann["question_id"]: [] for ann in self.dataset["annotations"]}
qqa = {ann["question_id"]: [] for ann in self.dataset["annotations"]}
for ann in self.dataset["annotations"]:
imgToQA[ann["image_id"]] += [ann]
qa[ann["question_id"]] = ann
for ques in self.questions["questions"]:
qqa[ques["question_id"]] = ques
print("index created!")
# create class members
self.qa = qa
self.qqa = qqa
self.imgToQA = imgToQA
def info(self):
"""
Print information about the VQA annotation file.
:return:
"""
for key, value in self.dataset["info"].items():
print("%s: %s" % (key, value))
def getQuesIds(self, imgIds=[], quesTypes=[], ansTypes=[]):
"""
Get question ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get question ids for given imgs
quesTypes (str array) : get question ids for given question types
ansTypes (str array) : get question ids for given answer types
:return: ids (int array) : integer array of question ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
quesTypes = quesTypes if type(quesTypes) == list else [quesTypes]
ansTypes = ansTypes if type(ansTypes) == list else [ansTypes]
if len(imgIds) == len(quesTypes) == len(ansTypes) == 0:
anns = self.dataset["annotations"]
else:
if not len(imgIds) == 0:
anns = sum(
[self.imgToQA[imgId] for imgId in imgIds if imgId in self.imgToQA],
[],
)
else:
anns = self.dataset["annotations"]
anns = (
anns
if len(quesTypes) == 0
else [ann for ann in anns if ann["question_type"] in quesTypes]
)
anns = (
anns
if len(ansTypes) == 0
else [ann for ann in anns if ann["answer_type"] in ansTypes]
)
ids = [ann["question_id"] for ann in anns]
return ids
def getImgIds(self, quesIds=[], quesTypes=[], ansTypes=[]):
"""
Get image ids that satisfy given filter conditions. default skips that filter
:param quesIds (int array) : get image ids for given question ids
quesTypes (str array) : get image ids for given question types
ansTypes (str array) : get image ids for given answer types
:return: ids (int array) : integer array of image ids
"""
quesIds = quesIds if type(quesIds) == list else [quesIds]
quesTypes = quesTypes if type(quesTypes) == list else [quesTypes]
ansTypes = ansTypes if type(ansTypes) == list else [ansTypes]
if len(quesIds) == len(quesTypes) == len(ansTypes) == 0:
anns = self.dataset["annotations"]
else:
if not len(quesIds) == 0:
anns = sum(
[self.qa[quesId] for quesId in quesIds if quesId in self.qa], []
)
else:
anns = self.dataset["annotations"]
anns = (
anns
if len(quesTypes) == 0
else [ann for ann in anns if ann["question_type"] in quesTypes]
)
anns = (
anns
if len(ansTypes) == 0
else [ann for ann in anns if ann["answer_type"] in ansTypes]
)
ids = [ann["image_id"] for ann in anns]
return ids
def loadQA(self, ids=[]):
"""
Load questions and answers with the specified question ids.
:param ids (int array) : integer ids specifying question ids
:return: qa (object array) : loaded qa objects
"""
if type(ids) == list:
return [self.qa[id] for id in ids]
elif type(ids) == int:
return [self.qa[ids]]
def showQA(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
for ann in anns:
quesId = ann["question_id"]
print("Question: %s" % (self.qqa[quesId]["question"]))
for ans in ann["answers"]:
print("Answer %d: %s" % (ans["answer_id"], ans["answer"]))
def loadRes(self, resFile, quesFile):
"""
Load result file and return a result object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = VQA()
res.questions = json.load(open(quesFile))
res.dataset["info"] = copy.deepcopy(self.questions["info"])
res.dataset["task_type"] = copy.deepcopy(self.questions["task_type"])
res.dataset["data_type"] = copy.deepcopy(self.questions["data_type"])
res.dataset["data_subtype"] = copy.deepcopy(self.questions["data_subtype"])
res.dataset["license"] = copy.deepcopy(self.questions["license"])
print("Loading and preparing results... ")
time_t = datetime.datetime.utcnow()
anns = json.load(open(resFile))
assert type(anns) == list, "results is not an array of objects"
annsQuesIds = [ann["question_id"] for ann in anns]
# print set of question ids that do not have corresponding annotations
# assert set(annsQuesIds) == set(self.getQuesIds()), \
# 'Results do not correspond to current VQA set. Either the results do not have predictions for all question ids in annotation file or there is atleast one question id that does not belong to the question ids in the annotation file.'
for ann in anns:
quesId = ann["question_id"]
if res.dataset["task_type"] == "Multiple Choice":
assert (
ann["answer"] in self.qqa[quesId]["multiple_choices"]
), "predicted answer is not one of the multiple choices"
qaAnn = self.qa[quesId]
ann["image_id"] = qaAnn["image_id"]
ann["question_type"] = qaAnn["question_type"]
ann["answer_type"] = qaAnn["answer_type"]
print(
"DONE (t=%0.2fs)" % ((datetime.datetime.utcnow() - time_t).total_seconds())
)
res.dataset["annotations"] = anns
res.createIndex()
return res
class VQAEval:
def __init__(self, vqa, vqaRes, n=2):
self.n = n
self.accuracy = {}
self.evalQA = {}
self.evalQuesType = {}
self.evalAnsType = {}
self.vqa = vqa
self.vqaRes = vqaRes
self.params = {"question_id": vqaRes.getQuesIds()}
self.contractions = {
"aint": "ain't",
"arent": "aren't",
"cant": "can't",
"couldve": "could've",
"couldnt": "couldn't",
"couldn'tve": "couldn't've",
"couldnt've": "couldn't've",
"didnt": "didn't",
"doesnt": "doesn't",
"dont": "don't",
"hadnt": "hadn't",
"hadnt've": "hadn't've",
"hadn'tve": "hadn't've",
"hasnt": "hasn't",
"havent": "haven't",
"hed": "he'd",
"hed've": "he'd've",
"he'dve": "he'd've",
"hes": "he's",
"howd": "how'd",
"howll": "how'll",
"hows": "how's",
"Id've": "I'd've",
"I'dve": "I'd've",
"Im": "I'm",
"Ive": "I've",
"isnt": "isn't",
"itd": "it'd",
"itd've": "it'd've",
"it'dve": "it'd've",
"itll": "it'll",
"let's": "let's",
"maam": "ma'am",
"mightnt": "mightn't",
"mightnt've": "mightn't've",
"mightn'tve": "mightn't've",
"mightve": "might've",
"mustnt": "mustn't",
"mustve": "must've",
"neednt": "needn't",
"notve": "not've",
"oclock": "o'clock",
"oughtnt": "oughtn't",
"ow's'at": "'ow's'at",
"'ows'at": "'ow's'at",
"'ow'sat": "'ow's'at",
"shant": "shan't",
"shed've": "she'd've",
"she'dve": "she'd've",
"she's": "she's",
"shouldve": "should've",
"shouldnt": "shouldn't",
"shouldnt've": "shouldn't've",
"shouldn'tve": "shouldn't've",
"somebody'd": "somebodyd",
"somebodyd've": "somebody'd've",
"somebody'dve": "somebody'd've",
"somebodyll": "somebody'll",
"somebodys": "somebody's",
"someoned": "someone'd",
"someoned've": "someone'd've",
"someone'dve": "someone'd've",
"someonell": "someone'll",
"someones": "someone's",
"somethingd": "something'd",
"somethingd've": "something'd've",
"something'dve": "something'd've",
"somethingll": "something'll",
"thats": "that's",
"thered": "there'd",
"thered've": "there'd've",
"there'dve": "there'd've",
"therere": "there're",
"theres": "there's",
"theyd": "they'd",
"theyd've": "they'd've",
"they'dve": "they'd've",
"theyll": "they'll",
"theyre": "they're",
"theyve": "they've",
"twas": "'twas",
"wasnt": "wasn't",
"wed've": "we'd've",
"we'dve": "we'd've",
"weve": "we've",
"werent": "weren't",
"whatll": "what'll",
"whatre": "what're",
"whats": "what's",
"whatve": "what've",
"whens": "when's",
"whered": "where'd",
"wheres": "where's",
"whereve": "where've",
"whod": "who'd",
"whod've": "who'd've",
"who'dve": "who'd've",
"wholl": "who'll",
"whos": "who's",
"whove": "who've",
"whyll": "why'll",
"whyre": "why're",
"whys": "why's",
"wont": "won't",
"wouldve": "would've",
"wouldnt": "wouldn't",
"wouldnt've": "wouldn't've",
"wouldn'tve": "wouldn't've",
"yall": "y'all",
"yall'll": "y'all'll",
"y'allll": "y'all'll",
"yall'd've": "y'all'd've",
"y'alld've": "y'all'd've",
"y'all'dve": "y'all'd've",
"youd": "you'd",
"youd've": "you'd've",
"you'dve": "you'd've",
"youll": "you'll",
"youre": "you're",
"youve": "you've",
}
self.manualMap = {
"none": "0",
"zero": "0",
"one": "1",
"two": "2",
"three": "3",
"four": "4",
"five": "5",
"six": "6",
"seven": "7",
"eight": "8",
"nine": "9",
"ten": "10",
}
self.articles = ["a", "an", "the"]
self.periodStrip = re.compile("(?!<=\d)(\.)(?!\d)")
self.commaStrip = re.compile("(\d)(\,)(\d)")
self.punct = [
";",
r"/",
"[",
"]",
'"',
"{",
"}",
"(",
")",
"=",
"+",
"\\",
"_",
"-",
">",
"<",
"@",
"`",
",",
"?",
"!",
]
def evaluate(self, quesIds=None):
if quesIds == None:
quesIds = [quesId for quesId in self.params["question_id"]]
gts = {}
res = {}
for quesId in quesIds:
gts[quesId] = self.vqa.qa[quesId]
res[quesId] = self.vqaRes.qa[quesId]
# =================================================
# Compute accuracy
# =================================================
accQA = []
accQuesType = {}
accAnsType = {}
print("computing accuracy")
step = 0
for quesId in quesIds:
for ansDic in gts[quesId]["answers"]:
ansDic["answer"] = ansDic["answer"].replace("\n", " ")
ansDic["answer"] = ansDic["answer"].replace("\t", " ")
ansDic["answer"] = ansDic["answer"].strip()
resAns = res[quesId]["answer"]
resAns = resAns.replace("\n", " ")
resAns = resAns.replace("\t", " ")
resAns = resAns.strip()
gtAcc = []
gtAnswers = [ans["answer"] for ans in gts[quesId]["answers"]]
if len(set(gtAnswers)) > 1:
for ansDic in gts[quesId]["answers"]:
ansDic["answer"] = self.processPunctuation(ansDic["answer"])
ansDic["answer"] = self.processDigitArticle(ansDic["answer"])
resAns = self.processPunctuation(resAns)
resAns = self.processDigitArticle(resAns)
for gtAnsDatum in gts[quesId]["answers"]:
otherGTAns = [
item for item in gts[quesId]["answers"] if item != gtAnsDatum
]
matchingAns = [item for item in otherGTAns if item["answer"] == resAns]
acc = min(1, float(len(matchingAns)) / 3)
gtAcc.append(acc)
quesType = gts[quesId]["question_type"]
ansType = gts[quesId]["answer_type"]
avgGTAcc = float(sum(gtAcc)) / len(gtAcc)
accQA.append(avgGTAcc)
if quesType not in accQuesType:
accQuesType[quesType] = []
accQuesType[quesType].append(avgGTAcc)
if ansType not in accAnsType:
accAnsType[ansType] = []
accAnsType[ansType].append(avgGTAcc)
self.setEvalQA(quesId, avgGTAcc)
self.setEvalQuesType(quesId, quesType, avgGTAcc)
self.setEvalAnsType(quesId, ansType, avgGTAcc)
if step % 100 == 0:
self.updateProgress(step / float(len(quesIds)))
step = step + 1
self.setAccuracy(accQA, accQuesType, accAnsType)
print("Done computing accuracy")
def processPunctuation(self, inText):
outText = inText
for p in self.punct:
if (p + " " in inText or " " + p in inText) or (
re.search(self.commaStrip, inText) != None
):
outText = outText.replace(p, "")
else:
outText = outText.replace(p, " ")
outText = self.periodStrip.sub("", outText, re.UNICODE)
return outText
def processDigitArticle(self, inText):
outText = []
tempText = inText.lower().split()
for word in tempText:
word = self.manualMap.setdefault(word, word)
if word not in self.articles:
outText.append(word)
else:
pass
for wordId, word in enumerate(outText):
if word in self.contractions:
outText[wordId] = self.contractions[word]
outText = " ".join(outText)
return outText
def setAccuracy(self, accQA, accQuesType, accAnsType):
self.accuracy["overall"] = round(100 * float(sum(accQA)) / len(accQA), self.n)
self.accuracy["perQuestionType"] = {
quesType: round(
100 * float(sum(accQuesType[quesType])) / len(accQuesType[quesType]),
self.n,
)
for quesType in accQuesType
}
self.accuracy["perAnswerType"] = {
ansType: round(
100 * float(sum(accAnsType[ansType])) / len(accAnsType[ansType]), self.n
)
for ansType in accAnsType
}
def setEvalQA(self, quesId, acc):
self.evalQA[quesId] = round(100 * acc, self.n)
def setEvalQuesType(self, quesId, quesType, acc):
if quesType not in self.evalQuesType:
self.evalQuesType[quesType] = {}
self.evalQuesType[quesType][quesId] = round(100 * acc, self.n)
def setEvalAnsType(self, quesId, ansType, acc):
if ansType not in self.evalAnsType:
self.evalAnsType[ansType] = {}
self.evalAnsType[ansType][quesId] = round(100 * acc, self.n)
def updateProgress(self, progress):
barLength = 20
status = ""
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
status = "error: progress var must be float\r\n"
if progress < 0:
progress = 0
status = "Halt...\r\n"
if progress >= 1:
progress = 1
status = "Done...\r\n"
block = int(round(barLength * progress))
text = "\rFinshed Percent: [{0}] {1}% {2}".format(
"#" * block + "-" * (barLength - block), int(progress * 100), status
)
sys.stdout.write(text)
sys.stdout.flush()
def compute_vqa_accuracy(result_json_path, question_json_path, annotation_json_path):
"""Compute the VQA accuracy metric.
Args:
predictions (List): list of predictions
ground_truth (List[List]): list of all possible ground truth answers
Returns:
float: VQA accuracy
"""
# coding: utf-8
# dataDir = data_dir
# set up file names and paths
# versionType = 'v2_' # this should be '' when using VQA v2.0 dataset
# 'OpenEnded' only for v2.0. 'OpenEnded' or 'MultipleChoice' for v1.0
# taskType = 'OpenEnded'
# 'mscoco' only for v1.0. 'mscoco' for real and 'abstract_v002' for abstract for v1.0.
# dataType = 'mscoco'
# dataSubType = 'train2014'
# annFile = '%s/%s%s_%s_annotations.json' % (
# dataDir, versionType, dataType, dataSubType)
# quesFile = '%s/%s%s_%s_%s_questions.json' % (
# dataDir, versionType, taskType, dataType, dataSubType)
# imgDir = '%s/%s/%s/' % (dataDir, dataType, dataSubType)
# resultType = res_file_name
# fileTypes = ['results', 'accuracy',
# 'evalQA', 'evalQuesType', 'evalAnsType']
# An example result json file has been provided in './Results' folder.
# [resFile, accuracyFile, evalQAFile, evalQuesTypeFile, evalAnsTypeFile] = ['%s/%s%s_%s_%s_%s_%s.json' % (dataDir, versionType, taskType, dataType, dataSubType,
# resultType, fileType) for fileType in fileTypes]
# create vqa object and vqaRes object
vqa = VQA(annotation_json_path, question_json_path)
vqaRes = vqa.loadRes(result_json_path, question_json_path)
# create vqaEval object by taking vqa and vqaRes
# n is precision of accuracy (number of places after decimal), default is 2
vqaEval = VQAEval(vqa, vqaRes, n=2)
# evaluate results
"""
If you have a list of question ids on which you would like to evaluate your results, pass it as a list to below function
By default it uses all the question ids in annotation file
"""
vqaEval.evaluate()
return vqaEval.accuracy["overall"]
def postprocess_vqa_generation(predictions):
return re.split("Question|Answer", predictions, 1)[0]
| flamingo-main | open_flamingo/eval/vqa_metric.py |
flamingo-main | open_flamingo/src/__init__.py |
|
from transformers import AutoModelForCausalLM, AutoTokenizer
import open_clip
from .flamingo import Flamingo
from .flamingo_lm import FlamingoLMMixin
from .utils import extend_instance
def create_model_and_transforms(
clip_vision_encoder_path: str,
clip_vision_encoder_pretrained: str,
lang_encoder_path: str,
tokenizer_path: str,
cross_attn_every_n_layers: int = 1,
use_local_files: bool = False,
decoder_layers_attr_name: str = None,
**flamingo_kwargs,
):
"""
Initialize a Flamingo model from a pretrained vision encoder and language encoder.
Appends special tokens to the tokenizer and freezes backbones.
Args:
clip_vision_encoder_path (str): path to pretrained clip model (e.g. "ViT-B-32")
clip_vision_encoder_pretrained (str): name of pretraining dataset for clip model (e.g. "laion2b_s32b_b79k")
lang_encoder_path (str): path to pretrained language encoder
tokenizer_path (str): path to pretrained tokenizer
cross_attn_every_n_layers (int, optional): determines how often to add a cross-attention layer. Defaults to 1.
use_local_files (bool, optional): whether to use local files. Defaults to False.
decoder_layers_attr_name (str, optional): name of the decoder layers attribute. Defaults to None.
Returns:
Flamingo: Flamingo model from pretrained vision and language encoders
Image processor: Pipeline to preprocess input images
Tokenizer: A tokenizer for the language model
"""
vision_encoder, _, image_processor = open_clip.create_model_and_transforms(
clip_vision_encoder_path, pretrained=clip_vision_encoder_pretrained
)
# set the vision encoder to output the visual features
vision_encoder.visual.output_tokens = True
text_tokenizer = AutoTokenizer.from_pretrained(
tokenizer_path, local_files_only=use_local_files
)
# add Flamingo special tokens to the tokenizer
text_tokenizer.add_special_tokens(
{"additional_special_tokens": ["<|endofchunk|>", "<image>"]}
)
if text_tokenizer.pad_token is None:
# Issue: GPT models don't have a pad token, which we use to
# modify labels for the loss.
text_tokenizer.add_special_tokens({"pad_token": "<PAD>"})
lang_encoder = AutoModelForCausalLM.from_pretrained(
lang_encoder_path, local_files_only=use_local_files
)
extend_instance(lang_encoder, FlamingoLMMixin)
if decoder_layers_attr_name is None:
decoder_layers_attr_name = _infer_decoder_layers_attr_name(lang_encoder)
lang_encoder.set_decoder_layers_attr_name(decoder_layers_attr_name)
lang_encoder.resize_token_embeddings(len(text_tokenizer))
model = Flamingo(
vision_encoder,
lang_encoder,
text_tokenizer.encode("<|endofchunk|>")[-1],
text_tokenizer.encode("<image>")[-1],
vis_dim=open_clip.get_model_config(clip_vision_encoder_path)["vision_cfg"][
"width"
],
cross_attn_every_n_layers=cross_attn_every_n_layers,
**flamingo_kwargs,
)
# Freeze all parameters
model.requires_grad_(False)
assert sum(p.numel() for p in model.parameters() if p.requires_grad) == 0
# Unfreeze perceiver, gated_cross_attn_layers, and LM input embeddings
model.perceiver.requires_grad_(True)
model.lang_encoder.gated_cross_attn_layers.requires_grad_(True)
model.lang_encoder.get_input_embeddings().requires_grad_(True)
print(
f"Flamingo model initialized with {sum(p.numel() for p in model.parameters() if p.requires_grad)} trainable parameters"
)
return model, image_processor, text_tokenizer
def _infer_decoder_layers_attr_name(model):
for k in __KNOWN_DECODER_LAYERS_ATTR_NAMES:
if k.lower() in model.__class__.__name__.lower():
return __KNOWN_DECODER_LAYERS_ATTR_NAMES[k]
raise ValueError(
f"We require the attribute name for the nn.ModuleList in the decoder storing the transformer block layers. Please supply this string manually."
)
__KNOWN_DECODER_LAYERS_ATTR_NAMES = {
"opt": "model.decoder.layers",
"gptneo": "transformer.h",
"gptj": "transformer.h",
"gpt-j": "transformer.h",
"pythia": "gpt_neox.layers",
"llama": "model.layers",
}
| flamingo-main | open_flamingo/src/factory.py |
import random
import torch.nn as nn
from .helpers import GatedCrossAttentionBlock
from .utils import getattr_recursive, setattr_recursive
class FlamingoLayer(nn.Module):
def __init__(self, gated_cross_attn_layer, decoder_layer):
super().__init__()
self.gated_cross_attn_layer = gated_cross_attn_layer
self.decoder_layer = decoder_layer
self.vis_x = None
self.media_locations = None
def is_conditioned(self) -> bool:
"""Check whether the layer is conditioned."""
return self.vis_x is not None
# Used this great idea from this implementation of Flamingo (https://github.com/dhansmair/flamingo-mini/)
def condition_vis_x(self, vis_x):
self.vis_x = vis_x
def condition_media_locations(self, media_locations):
self.media_locations = media_locations
def condition_attend_previous(self, attend_previous):
self.attend_previous = attend_previous
def forward(
self,
lang_x,
attention_mask=None,
**decoder_layer_kwargs,
):
if self.gated_cross_attn_layer is None:
return self.decoder_layer(
lang_x, attention_mask=attention_mask, **decoder_layer_kwargs
)
if self.vis_x is None:
raise ValueError("vis_x must be conditioned before forward pass")
if self.media_locations is None:
raise ValueError("media_locations must be conditioned before forward pass")
lang_x = self.gated_cross_attn_layer(
lang_x,
self.vis_x,
media_locations=self.media_locations,
attend_previous=self.attend_previous,
)
lang_x = self.decoder_layer(
lang_x, attention_mask=attention_mask, **decoder_layer_kwargs
)
return lang_x
class FlamingoLMMixin(nn.Module):
"""
Mixin to add cross-attention layers to a language model.
"""
def set_decoder_layers_attr_name(self, decoder_layers_attr_name):
self.decoder_layers_attr_name = decoder_layers_attr_name
def _get_decoder_layers(self):
return getattr_recursive(self, self.decoder_layers_attr_name)
def _set_decoder_layers(self, value):
setattr_recursive(self, self.decoder_layers_attr_name, value)
def init_flamingo(
self,
media_token_id,
vis_hidden_size,
cross_attn_every_n_layers,
use_media_placement_augmentation,
):
"""
Initialize Flamingo by adding a new gated cross attn to the decoder. Store the media token id for computing the media locations.
"""
self.gated_cross_attn_layers = nn.ModuleList(
[
GatedCrossAttentionBlock(
dim=self.config.hidden_size, dim_visual=vis_hidden_size
)
if (layer_idx + 1) % cross_attn_every_n_layers == 0
else None
for layer_idx, _ in enumerate(self._get_decoder_layers())
]
)
self._set_decoder_layers(
nn.ModuleList(
[
FlamingoLayer(gated_cross_attn_layer, decoder_layer)
for gated_cross_attn_layer, decoder_layer in zip(
self.gated_cross_attn_layers, self._get_decoder_layers()
)
]
)
)
self.media_token_id = media_token_id
self.use_media_placement_augmentation = use_media_placement_augmentation
self.initialized_flamingo = True
def forward(self, *input, **kwargs):
"""Condition the Flamingo layers on the media locations before forward()"""
if not self.initialized_flamingo:
raise ValueError(
"Flamingo layers are not initialized. Please call `init_flamingo` first."
)
input_ids = kwargs["input_ids"] if "input_ids" in kwargs else input[0]
media_locations = input_ids == self.media_token_id
attend_previous = (
(random.random() < 0.5) if self.use_media_placement_augmentation else False
)
for layer in self.get_decoder().layers:
layer.condition_media_locations(media_locations)
layer.condition_attend_previous(attend_previous)
return super().forward(
*input, **kwargs
) # Call the other parent's forward method
def is_conditioned(self) -> bool:
"""Check whether all decoder layers are already conditioned."""
return all(l.is_conditioned() for l in self._get_decoder_layers())
def clear_conditioned_layers(self):
for layer in self._get_decoder_layers():
layer.condition_vis_x(None)
layer.condition_media_locations(None)
layer.condition_attend_previous(None)
| flamingo-main | open_flamingo/src/flamingo_lm.py |
import torch
from einops import rearrange
from torch import nn
from .helpers import PerceiverResampler
class Flamingo(nn.Module):
def __init__(
self,
vision_encoder: nn.Module,
lang_encoder: nn.Module,
eoc_token_id: int,
media_token_id: int,
vis_dim: int,
cross_attn_every_n_layers: int = 1,
use_media_placement_augmentation: bool = False,
):
"""
Args:
vision_encoder (nn.Module): HF CLIPModel
lang_encoder (nn.Module): HF causal language model
eoc_token_id (int): Token id for <|endofchunk|>
media_token_id (int): Token id for <image>
vis_dim (int): Dimension of the visual features.
Visual features are projected to match this shape along the last dimension.
cross_attn_every_n_layers (int, optional): How often to apply cross attention after transformer layer. Defaults to 1.
use_media_placement_augmentation (bool, optional): Whether to randomly assign images to the preceding or following text in training. Defaults to False.
"""
super().__init__()
self.eoc_token_id = eoc_token_id
self.media_token_id = media_token_id
self.use_media_placement_augmentation = use_media_placement_augmentation
self.vis_dim = vis_dim
self.vision_encoder = vision_encoder
self.perceiver = PerceiverResampler(dim=self.vis_dim)
self.lang_encoder = lang_encoder
self.lang_encoder.init_flamingo(
media_token_id=media_token_id,
vis_hidden_size=self.vis_dim,
cross_attn_every_n_layers=cross_attn_every_n_layers,
use_media_placement_augmentation=self.use_media_placement_augmentation,
)
def forward(
self,
vision_x: torch.Tensor,
lang_x: torch.Tensor,
attention_mask: torch.Tensor = None,
labels: torch.Tensor = None,
use_cached_vision_x: bool = False,
clear_conditioned_layers: bool = True,
past_key_values=None,
use_cache: bool = False,
):
"""
Forward pass of Flamingo.
Args:
vision_x (torch.Tensor): Vision input
shape (B, T_img, F, C, H, W) with F=1
lang_x (torch.Tensor): Language input ids
shape (B, T_txt)
attention_mask (torch.Tensor, optional): Attention mask. Defaults to None.
labels (torch.Tensor, optional): Labels. Defaults to None.
clear_conditioned_layers: if True, clear the conditioned layers
once the foward pass is completed. Set this to false if the
same set of images will be reused in another subsequent
forward pass.
past_key_values: pre-computed values to pass to language model.
See past_key_values documentation in Hugging Face
CausalLM models.
use_cache: whether to use cached key values. See use_cache
documentation in Hugging Face CausalLM models.
"""
assert (
vision_x is not None
) or use_cached_vision_x, (
"Must provide either vision_x or use_cached_vision_x to True."
)
if use_cached_vision_x:
# Case: use cached; vision_x should be cached and other
# vision-related inputs should not be provided.
assert (
vision_x is None
), "Expect vision_x to be None when use_cached_vision_x is True."
assert self.lang_encoder.is_conditioned()
else:
# Case: do not use caching (i.e. this is a standard forward pass);
self._encode_vision_x(vision_x=vision_x)
output = self.lang_encoder(
input_ids=lang_x,
attention_mask=attention_mask,
labels=labels,
past_key_values=past_key_values,
use_cache=use_cache,
)
if clear_conditioned_layers:
self.lang_encoder.clear_conditioned_layers()
return output
def generate(
self,
vision_x: torch.Tensor,
lang_x: torch.Tensor,
attention_mask: torch.Tensor = None,
num_beams=1,
max_new_tokens=None,
temperature=1.0,
top_k=0,
top_p=1.0,
no_repeat_ngram_size=0,
prefix_allowed_tokens_fn=None,
length_penalty=1.0,
num_return_sequences=1,
do_sample=False,
early_stopping=False,
):
"""
Generate text conditioned on vision and language inputs.
Args:
vision_x (torch.Tensor): Vision input
shape (B, T_img, F, C, H, W)
images in the same chunk are collated along T_img, and frames are collated along F
currently only F=1 is supported (single-frame videos)
lang_x (torch.Tensor): Language input
shape (B, T_txt)
max_length (int, optional): Maximum length of the output. Defaults to None.
attention_mask (torch.Tensor, optional): Attention mask. Defaults to None.
num_beams (int, optional): Number of beams. Defaults to 1.
max_new_tokens (int, optional): Maximum new tokens. Defaults to None.
temperature (float, optional): Temperature. Defaults to 1.0.
top_k (int, optional): Top k. Defaults to 0.
top_p (float, optional): Top p. Defaults to 1.0.
no_repeat_ngram_size (int, optional): No repeat ngram size. Defaults to 0.
length_penalty (float, optional): Length penalty. Defaults to 1.0.
num_return_sequences (int, optional): Number of return sequences. Defaults to 1.
do_sample (bool, optional): Do sample. Defaults to False.
early_stopping (bool, optional): Early stopping. Defaults to False.
Returns:
torch.Tensor: lang_x with generated tokens appended to it
"""
if num_beams > 1:
vision_x = vision_x.repeat_interleave(num_beams, dim=0)
self._encode_vision_x(vision_x=vision_x)
output = self.lang_encoder.generate(
lang_x,
attention_mask=attention_mask,
eos_token_id=self.eoc_token_id,
num_beams=num_beams,
max_new_tokens=max_new_tokens,
temperature=temperature,
top_k=top_k,
top_p=top_p,
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
no_repeat_ngram_size=no_repeat_ngram_size,
length_penalty=length_penalty,
num_return_sequences=num_return_sequences,
do_sample=do_sample,
early_stopping=early_stopping,
)
self.lang_encoder.clear_conditioned_layers()
return output
def _encode_vision_x(self, vision_x: torch.Tensor):
"""
Compute media tokens from vision input by passing it through vision encoder and conditioning language model.
Args:
vision_x (torch.Tensor): Vision input
shape (B, T_img, F, C, H, W)
Images in the same chunk are collated along T_img, and frames are collated along F
Currently only F=1 is supported (single-frame videos)
rearrange code based on https://github.com/dhansmair/flamingo-mini
"""
assert vision_x.ndim == 6, "vision_x should be of shape (b, T_img, F, C, H, W)"
b, T, F = vision_x.shape[:3]
assert F == 1, "Only single frame supported"
vision_x = rearrange(vision_x, "b T F c h w -> (b T F) c h w")
with torch.no_grad():
vision_x = self.vision_encoder.visual(vision_x)[1]
vision_x = rearrange(vision_x, "(b T F) v d -> b T F v d", b=b, T=T, F=F)
vision_x = self.perceiver(vision_x) # reshapes to (b, T, n, d)
for layer in self.lang_encoder._get_decoder_layers():
layer.condition_vis_x(vision_x)
| flamingo-main | open_flamingo/src/flamingo.py |
def extend_instance(obj, mixin):
"""Apply mixins to a class instance after creation"""
base_cls = obj.__class__
base_cls_name = obj.__class__.__name__
obj.__class__ = type(
base_cls_name, (mixin, base_cls), {}
) # mixin needs to go first for our forward() logic to work
def getattr_recursive(obj, att):
"""
Return nested attribute of obj
Example: getattr_recursive(obj, 'a.b.c') is equivalent to obj.a.b.c
"""
if att == "":
return obj
i = att.find(".")
if i < 0:
return getattr(obj, att)
else:
return getattr_recursive(getattr(obj, att[:i]), att[i + 1 :])
def setattr_recursive(obj, att, val):
"""
Set nested attribute of obj
Example: setattr_recursive(obj, 'a.b.c', val) is equivalent to obj.a.b.c = val
"""
if "." in att:
obj = getattr_recursive(obj, ".".join(att.split(".")[:-1]))
setattr(obj, att.split(".")[-1], val)
| flamingo-main | open_flamingo/src/utils.py |
"""
Taken from https://github.com/lucidrains/flamingo-pytorch
"""
import torch
from einops import rearrange, repeat
from einops_exts import rearrange_many
from torch import einsum, nn
def exists(val):
return val is not None
def FeedForward(dim, mult=4):
inner_dim = int(dim * mult)
return nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, inner_dim, bias=False),
nn.GELU(),
nn.Linear(inner_dim, dim, bias=False),
)
class PerceiverAttention(nn.Module):
def __init__(self, *, dim, dim_head=64, heads=8):
super().__init__()
self.scale = dim_head**-0.5
self.heads = heads
inner_dim = dim_head * heads
self.norm_media = nn.LayerNorm(dim)
self.norm_latents = nn.LayerNorm(dim)
self.to_q = nn.Linear(dim, inner_dim, bias=False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False)
self.to_out = nn.Linear(inner_dim, dim, bias=False)
def forward(self, x, latents):
"""
Args:
x (torch.Tensor): image features
shape (b, T, n1, D)
latent (torch.Tensor): latent features
shape (b, T, n2, D)
"""
x = self.norm_media(x)
latents = self.norm_latents(latents)
h = self.heads
q = self.to_q(latents)
kv_input = torch.cat((x, latents), dim=-2)
k, v = self.to_kv(kv_input).chunk(2, dim=-1)
q, k, v = rearrange_many((q, k, v), "b t n (h d) -> b h t n d", h=h)
q = q * self.scale
# attention
sim = einsum("... i d, ... j d -> ... i j", q, k)
sim = sim - sim.amax(dim=-1, keepdim=True).detach()
attn = sim.softmax(dim=-1)
out = einsum("... i j, ... j d -> ... i d", attn, v)
out = rearrange(out, "b h t n d -> b t n (h d)", h=h)
return self.to_out(out)
class PerceiverResampler(nn.Module):
def __init__(
self,
*,
dim,
depth=6,
dim_head=64,
heads=8,
num_latents=64,
max_num_media=None,
max_num_frames=None,
ff_mult=4,
):
super().__init__()
self.latents = nn.Parameter(torch.randn(num_latents, dim))
self.frame_embs = (
nn.Parameter(torch.randn(max_num_frames, dim))
if exists(max_num_frames)
else None
)
self.media_time_embs = (
nn.Parameter(torch.randn(max_num_media, 1, dim))
if exists(max_num_media)
else None
)
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(
nn.ModuleList(
[
PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads),
FeedForward(dim=dim, mult=ff_mult),
]
)
)
self.norm = nn.LayerNorm(dim)
def forward(self, x):
"""
Args:
x (torch.Tensor): image features
shape (b, T, F, v, D)
Returns:
shape (b, T, n, D) where n is self.num_latents
"""
b, T, F, v = x.shape[:4]
# frame and media time embeddings
if exists(self.frame_embs):
frame_embs = repeat(self.frame_embs[:F], "F d -> b T F v d", b=b, T=T, v=v)
x = x + frame_embs
x = rearrange(
x, "b T F v d -> b T (F v) d"
) # flatten the frame and spatial dimensions
if exists(self.media_time_embs):
x = x + self.media_time_embs[:T]
# blocks
latents = repeat(self.latents, "n d -> b T n d", b=b, T=T)
for attn, ff in self.layers:
latents = attn(x, latents) + latents
latents = ff(latents) + latents
return self.norm(latents)
# gated cross attention
class MaskedCrossAttention(nn.Module):
def __init__(
self,
*,
dim,
dim_visual,
dim_head=64,
heads=8,
only_attend_immediate_media=True,
):
super().__init__()
self.scale = dim_head**-0.5
self.heads = heads
inner_dim = dim_head * heads
self.norm = nn.LayerNorm(dim)
self.to_q = nn.Linear(dim, inner_dim, bias=False)
self.to_kv = nn.Linear(dim_visual, inner_dim * 2, bias=False)
self.to_out = nn.Linear(inner_dim, dim, bias=False)
# whether for text to only attend to immediate preceding image, or all previous images
self.only_attend_immediate_media = only_attend_immediate_media
def forward(self, x, media, media_locations=None, attend_previous=True):
"""
Args:
x (torch.Tensor): text features
shape (B, T_txt, D_txt)
media (torch.Tensor): image features
shape (B, T_img, n, D_img) where n is the dim of the latents
media_locations: boolean mask identifying the media tokens in x
shape (B, T_txt)
attend_previous: bool
If false, ignores immediately preceding image and starts attending when following image
"""
_, T_img, n = media.shape[:3]
h = self.heads
x = self.norm(x)
q = self.to_q(x)
media = rearrange(media, "b t n d -> b (t n) d")
k, v = self.to_kv(media).chunk(2, dim=-1)
q, k, v = rearrange_many((q, k, v), "b n (h d) -> b h n d", h=h)
q = q * self.scale
sim = einsum("... i d, ... j d -> ... i j", q, k)
if exists(media_locations):
# at each boolean of True, increment the time counter (relative to media time)
text_time = media_locations.cumsum(dim=-1)
media_time = torch.arange(T_img, device=x.device) + 1
if not attend_previous:
text_time[~media_locations] += 1
# make sure max is still the number of images in the sequence
text_time[
text_time
> repeat(
torch.count_nonzero(media_locations, dim=1),
"b -> b i",
i=text_time.shape[1],
)
] = 0
# text time must equal media time if only attending to most immediate image
# otherwise, as long as text time is greater than media time (if attending to all previous images / media)
mask_op = torch.eq if self.only_attend_immediate_media else torch.ge
text_to_media_mask = mask_op(
rearrange(text_time, "b i -> b 1 i 1"),
repeat(media_time, "j -> 1 1 1 (j n)", n=n),
)
sim = sim.masked_fill(~text_to_media_mask, -torch.finfo(sim.dtype).max)
sim = sim - sim.amax(dim=-1, keepdim=True).detach()
attn = sim.softmax(dim=-1)
if exists(media_locations) and self.only_attend_immediate_media:
# any text without a preceding media needs to have attention zeroed out
text_without_media_mask = text_time == 0
text_without_media_mask = rearrange(
text_without_media_mask, "b i -> b 1 i 1"
)
attn = attn.masked_fill(text_without_media_mask, 0.0)
out = einsum("... i j, ... j d -> ... i d", attn, v)
out = rearrange(out, "b h n d -> b n (h d)")
return self.to_out(out)
class GatedCrossAttentionBlock(nn.Module):
def __init__(
self,
*,
dim,
dim_visual,
dim_head=64,
heads=8,
ff_mult=4,
only_attend_immediate_media=True,
):
super().__init__()
self.attn = MaskedCrossAttention(
dim=dim,
dim_visual=dim_visual,
dim_head=dim_head,
heads=heads,
only_attend_immediate_media=only_attend_immediate_media,
)
self.attn_gate = nn.Parameter(torch.tensor([0.0]))
self.ff = FeedForward(dim, mult=ff_mult)
self.ff_gate = nn.Parameter(torch.tensor([0.0]))
def forward(
self,
x,
media,
media_locations=None,
attend_previous=True,
):
x = (
self.attn(
x,
media,
media_locations=media_locations,
attend_previous=attend_previous,
)
* self.attn_gate.tanh()
+ x
)
x = self.ff(x) * self.ff_gate.tanh() + x
return x
| flamingo-main | open_flamingo/src/helpers.py |
# import unittest
# import requests
# from PIL import Image
# from open_flamingo import create_model_and_transforms
# class TestFlamingoModel(unittest.TestCase):
# def test_forward_pass(self):
# model, image_processor, tokenizer = create_model_and_transforms(
# clip_vision_encoder_path="hf-internal-testing/tiny-random-clip-zero-shot-image-classification",
# clip_processor_path="hf-internal-testing/tiny-random-clip-zero-shot-image-classification",
# lang_encoder_path="hf-internal-testing/tiny-random-OPTModel",
# tokenizer_path="hf-internal-testing/tiny-random-OPTModel",
# )
# image = Image.open(
# requests.get(
# "http://images.cocodataset.org/val2017/000000039769.jpg", stream=True
# ).raw
# )
# vis_x = image_processor(images=[image, image], return_tensors="pt")[
# "pixel_values"
# ]
# vis_x = vis_x.unsqueeze(1).unsqueeze(1)
# lang_x = tokenizer(
# ["<image> A dog", "<image> A cat"],
# max_length=10,
# padding=True,
# truncation=True,
# return_tensors="pt",
# )
# # try batched forward pass
# model(vis_x, lang_x["input_ids"], attention_mask=lang_x["attention_mask"])
# def test_generate(self):
# model, image_processor, tokenizer = create_model_and_transforms(
# clip_vision_encoder_path="hf-internal-testing/tiny-random-clip-zero-shot-image-classification",
# clip_processor_path="hf-internal-testing/tiny-random-clip-zero-shot-image-classification",
# lang_encoder_path="hf-internal-testing/tiny-random-OPTModel",
# tokenizer_path="hf-internal-testing/tiny-random-OPTModel",
# )
# tokenizer.padding_side = (
# "left" # we want to pad on the left side for generation
# )
# image = Image.open(
# requests.get(
# "http://images.cocodataset.org/val2017/000000039769.jpg", stream=True
# ).raw
# )
# vis_x = image_processor(images=[image, image], return_tensors="pt")[
# "pixel_values"
# ]
# vis_x = vis_x.unsqueeze(1).unsqueeze(1)
# lang_x = tokenizer(
# ["<image> A dog", "<image> A cat <|endofchunk|>"],
# max_length=10,
# padding=True,
# truncation=True,
# return_tensors="pt",
# )
# # try batched generation
# model.generate(
# vis_x,
# lang_x["input_ids"],
# attention_mask=lang_x["attention_mask"],
# max_new_tokens=20,
# )
# if __name__ == "__main__":
# unittest.main()
| flamingo-main | tests/test_flamingo_model.py |
from setuptools import setup, find_packages
setup(
name = 'primus',
packages = find_packages(exclude=[]),
version = '0.0.2',
license='MIT',
description = 'cybertron- Pytorch',
author = 'Kye Gomez',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/kyegomez/cybertron',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism',
'robotics'
],
install_requires=[
'classifier-free-guidance-pytorch>=0.1.4',
'einops>=0.6',
'torch>=1.6',
''
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
) | CyberTron-master | setup.py |
#builds dataset automatically => adds to your hf account
import multiprocessing
import argparse
from itertools import chain
from datasets import load_dataset
from transformers import AutoTokenizer
class CFG:
SEED: int = 42
SEQ_LEN: int = 8192 # context length make it larger or smaller depending on your task
NUM_CPU: int = multiprocessing.cpu_count()
HF_ACCOUNT_REPO: str = "YOUR HF ACCOUNT"
#ADD YOUR OWN TOKENIZER
TOKENIZER: str = "EleutherAI/gpt-neox-20b"
#ADD YOUR OWN DATASET
DATASET_NAME: str = "EleutherAI/the_pile_deduplicated"
def main(args):
tokenizer = AutoTokenizer.from_pretrained(CFG.TOKENIZER)
train_dataset = load_dataset(CFG.DATASET_NAME, split="train")
#ADD YOUR OWN TOKENIZE LOGIC
def tokenize_function(example):
return tokenizer([t + tokenizer.eos_token for t in example["text"]])
tokenized_dataset = train_dataset.map(
tokenize_function,
batched=True,
num_proc=CFG.NUM_CPU,
remove_columns=["text"],
)
block_size = CFG.SEQ_LEN
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
if total_length >= block_size:
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
return result
train_tokenized_dataset = tokenized_dataset.map(
group_texts,
batched=True,
num_proc=CFG.NUM_CPU,
)
train_tokenized_dataset.push_to_hub(CFG.HF_ACCOUNT_REPO)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Process and push dataset to Hugging Face Hub")
parser.add_argument("--seed", type=int, default=CFG.SEED, help="Random seed")
parser.add_argument("--seq_len", type=int, default=CFG.SEQ_LEN, help="Sequence length for processing")
parser.add_argument("--hf_account", type=str, default=CFG.HF_ACCOUNT_REPO, help="Hugging Face account name and repo")
parser.add_argument("--tokenizer", type=str, default=CFG.TOKENIZER, help="Tokenizer model to use")
parser.add_argument("--dataset_name", type=str, default=CFG.DATASET_NAME, help="Name of the dataset to process")
args = parser.parse_args()
main(args) | CyberTron-master | build_dataset.py |
CyberTron-master | cybertron/models/__init__.py |
|
from collections import namedtuple
from functools import wraps
from packaging import version
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
# constants
EfficientAttentionConfig = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
# helpers
def exists(val):
return val is not None
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# main class
class Attend(nn.Module):
def __init__(
self,
causal = False,
dropout = 0.,
flash = False
):
super().__init__()
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
self.causal = causal
self.flash = flash
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = EfficientAttentionConfig(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(False, True, True)
def get_mask(self, i, j, device):
return torch.ones((i, j), device=device, dtype=torch.bool).triu(j - i + 1)
def flash_attn(self, q, k, v, mask = None, attn_bias = None):
_, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device
# single headed key / values
if k.ndim == 3:
k = rearrange(k, 'b n d -> b 1 n d')
if v.ndim == 3:
v = rearrange(v, 'b n d -> b 1 n d')
# Check if mask exists and expand to compatible shape
# The mask is B L, so it would have to be expanded to B H N L
if exists(mask) and mask.ndim != 4:
mask = rearrange(mask, 'b j -> b 1 1 j')
mask = mask.expand(-1, heads, q_len, -1)
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = mask,
dropout_p = self.dropout if self.training else 0.,
is_causal = self.causal
)
return out
def forward(self, q, k, v, mask = None):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
q_len, k_len, device = q.shape[-2], k.shape[-2], q.device
scale = q.shape[-1] ** -0.5
kv_einsum_eq = 'b j d' if k.ndim == 3 else 'b h j d'
if self.flash:
return self.flash_attn(q, k, v, mask = mask)
# similarity
sim = einsum(f"b h i d, {kv_einsum_eq} -> b h i j", q, k) * scale
# causal mask
if self.causal:
causal_mask = self.get_mask(q_len, k_len, device)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# attention
attn = sim.softmax(dim=-1)
attn = self.attn_dropout(attn)
# aggregate values
out = einsum(f"b h i j, {kv_einsum_eq} -> b h i d", attn, v)
return out | CyberTron-master | cybertron/models/rt1/flash_attn.py |
import torch
import torch.nn.functional as F
from torch import nn, einsum, Tensor
from typing import List, Optional, Callable, Tuple
from beartype import beartype
from einops import pack, unpack, repeat, reduce, rearrange
from einops.layers.torch import Rearrange, Reduce
from classifier_free_guidance_pytorch import TextConditioner, AttentionTextConditioner, classifier_free_guidance
from dataclasses import dataclass
from rt1.flash_attn import Attend
@dataclass
class Intermediates:
qk_similarities: Tensor = None
pre_softmax_attn: Tensor = None
post_softmax_attn: Tensor = None
def to_tuple(self):
return (self.qk_similarities, self.pre_softmax_attn, self.post_softmax_attn)
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def cast_tuple(val, length = 1):
return val if isinstance(val, tuple) else ((val,) * length)
def pack_one(x, pattern):
return pack([x], pattern)
def unpack_one(x, ps, pattern):
return unpack(x, ps, pattern)[0]
# sinusoidal positions
def posemb_sincos_1d(seq, dim, temperature = 10000, device = None, dtype = torch.float32):
n = torch.arange(seq, device = device)
omega = torch.arange(dim // 2, device = device) / (dim // 2 - 1)
omega = 1. / (temperature ** omega)
n = n[:, None] * omega[None, :]
pos_emb = torch.cat((n.sin(), n.cos()), dim = 1)
return pos_emb.type(dtype)
# helper classes
# flasha attention
def flash_attn(
self,
q, k, v,
mask = None,
attn_bias = None
):
batch, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device
# Recommended for multi-query single-key-value attention by Tri Dao
# kv shape torch.Size([1, 512, 64]) -> torch.Size([1, 8, 512, 64])
if k.ndim == 3:
k = rearrange(k, 'b ... -> b 1 ...').expand_as(q)
if v.ndim == 3:
v = rearrange(v, 'b ... -> b 1 ...').expand_as(q)
# handle scale - by default they scale by dim_head ** -0.5, but need to take care if using cosine sim attention
if self.qk_norm:
default_scale = q.shape[-1] ** -0.5
q = q * (default_scale / self.scale)
# Check if mask exists and expand to compatible shape
# The mask is B L, so it would have to be expanded to B H N L
causal = self.causal
if exists(mask):
assert mask.ndim == 4
mask = mask.expand(batch, heads, q_len, k_len)
# manually handle causal mask, if another mask was given
if causal:
causal_mask = torch.ones((q_len, k_len), dtype = torch.bool, device = device).triu(k_len - q_len + 1)
mask = mask & ~causal_mask
causal = False
# handle alibi positional bias
# convert from bool to float
if exists(attn_bias):
attn_bias = rearrange(attn_bias, 'h i j -> 1 h i j').expand(batch, -1, -1, -1)
# if mask given, the mask would already contain the causal mask from above logic
# otherwise, if no mask given but still causal, mask out alibi positional bias to a large negative number
mask_value = -torch.finfo(q.dtype).max
if exists(mask):
attn_bias = attn_bias.masked_fill(~mask, mask_value // 2)
elif causal:
causal_mask = torch.ones((q_len, k_len), dtype = torch.bool, device = device).triu(k_len - q_len + 1)
attn_bias = attn_bias.masked_fill(causal_mask, mask_value // 2)
causal = False
# scaled_dot_product_attention handles attn_mask either as bool or additive bias
# make it an additive bias here
mask = attn_bias
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = mask,
dropout_p = self.dropout if self.training else 0.,
is_causal = causal
)
return out, Intermediates()
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return self.fn(x) + x
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.ones(dim))
self.register_buffer("beta", torch.zeros(dim))
def forward(self, x):
return F.layer_norm(x, x.shape[-1:], self.gamma, self.beta)
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4, dropout = 0.):
super().__init__()
inner_dim = int(dim * mult)
self.norm = LayerNorm(dim)
self.net = nn.Sequential(
nn.Linear(dim, inner_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x, cond_fn = None):
x = self.norm(x)
if exists(cond_fn):
# adaptive layernorm
x= cond_fn(x)
return self.net(x)
# MBConv
class SqueezeExcitation(nn.Module):
def __init__(self, dim, shrinkage_rate = 0.25):
super().__init__()
hidden_dim = int(dim * shrinkage_rate)
self.gate = nn.Sequential(
Reduce('b c h w -> b c', 'mean'),
nn.Linear(dim, hidden_dim, bias = False),
nn.SiLU(),
nn.Linear(hidden_dim, dim, bias = False),
nn.Sigmoid(),
Rearrange('b c -> b c 1 1')
)
def forward(self, x):
return x * self.gate(x)
class MBConvResidual(nn.Module):
def __init__(self, fn, dropout = 0.):
super().__init__()
self.fn = fn
self.dropsample = Dropsample(dropout)
def forward(self, x):
out = self.fn(x)
out = self.dropsample(out)
return out + x
class Dropsample(nn.Module):
def __init__(self, prob = 0):
super().__init__()
self.prob = prob
def forward(self, x):
device = x.device
if self.prob == 0. or (not self.training):
return x
keep_mask = torch.FloatTensor((x.shape[0], 1, 1, 1), device = device).uniform_() > self.prob
return x * keep_mask / (1 - self.prob)
def MBConv(
dim_in,
dim_out,
*,
downsample,
expansion_rate = 4,
shrinkage_rate = 0.25,
dropout = 0.
):
hidden_dim = int(expansion_rate * dim_out)
stride = 2 if downsample else 1
net = nn.Sequential(
nn.Conv2d(dim_in, hidden_dim, 1),
nn.BatchNorm2d(hidden_dim),
nn.GELU(),
nn.Conv2d(hidden_dim, hidden_dim, 3, stride = stride, padding = 1, groups = hidden_dim),
nn.BatchNorm2d(hidden_dim),
nn.GELU(),
SqueezeExcitation(hidden_dim, shrinkage_rate = shrinkage_rate),
nn.Conv2d(hidden_dim, dim_out, 1),
nn.BatchNorm2d(dim_out)
)
if dim_in == dim_out and not downsample:
net = MBConvResidual(net, dropout = dropout)
return net
# attention related classes
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head = 32,
dropout = 0.,
window_size = 7,
):
super().__init__()
assert (dim % dim_head) == 0, 'dimension should be divisible by dimension per head'
self.norm = LayerNorm(dim)
self.heads = dim // dim_head
self.scale = dim_head ** -0.5
self.to_qkv = nn.Linear(dim, dim * 3, bias = False)
self.attend = nn.Sequential(
nn.Softmax(dim = -1),
nn.Dropout(dropout)
)
self.to_out = nn.Sequential(
nn.Linear(dim, dim, bias = False),
nn.Dropout(dropout)
)
# relative positional bias
self.rel_pos_bias = nn.Embedding((2 * window_size - 1) ** 2, self.heads)
pos = torch.arange(window_size)
grid = torch.stack(torch.meshgrid(pos, pos, indexing = 'ij'))
grid = rearrange(grid, 'c i j -> (i j) c')
rel_pos = rearrange(grid, 'i ... -> i 1 ...') - rearrange(grid, 'j ... -> 1 j ...')
rel_pos += window_size - 1
rel_pos_indices = (rel_pos * torch.tensor([2 * window_size - 1, 1])).sum(dim = -1)
self.register_buffer('rel_pos_indices', rel_pos_indices, persistent = False)
def forward(self, x):
batch, height, width, window_height, window_width, _, device, h = *x.shape, x.device, self.heads
x = self.norm(x)
# flatten
x = rearrange(x, 'b x y w1 w2 d -> (b x y) (w1 w2) d')
# project for queries, keys, values
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
# split heads
q, k, v = map(lambda t: rearrange(t, 'b n (h d ) -> b h n d', h = h), (q, k, v))
# scale
q = q * self.scale
# sim
sim = einsum('b h i d, b h j d -> b h i j', q, k)
# add positional bias
bias = self.rel_pos_bias(self.rel_pos_indices)
sim = sim + rearrange(bias, 'i j h -> h i j')
# attention
attn = self.attend(sim)
# aggregate
out = einsum('b h i j, b h j d -> b h i d', attn, v)
# merge heads
out = rearrange(out, 'b h (w1 w2) d -> b w1 w2 (h d)', w1 = window_height, w2 = window_width)
# combine heads out
out = self.to_out(out)
return rearrange(out, '(b x y) ... -> b x y ...', x = height, y = width)
class MaxViT(nn.Module):
def __init__(
self,
*,
num_classes,
dim,
depth,
dim_head = 32,
dim_conv_stem = None,
window_size = 7,
mbconv_expansion_rate = 4,
mbconv_shrinkage_rate = 0.25,
dropout = 0.1,
channels = 3
):
super().__init__()
assert isinstance(depth, tuple), 'depth needs to be tuple if integers indicating number of transformer blocks at that stage'
# convolutional stem
dim_conv_stem = default(dim_conv_stem, dim)
self.conv_stem = nn.Sequential(
nn.Conv2d(channels, dim_conv_stem, 3, stride = 2, padding = 1),
nn.Conv2d(dim_conv_stem, dim_conv_stem, 3, padding = 1)
)
# variables
num_stages = len(depth)
dims = tuple(map(lambda i: (2 ** i) * dim, range(num_stages)))
dims = (dim_conv_stem, *dims)
dim_pairs = tuple(zip(dims[:-1], dims[1:]))
self.layers = nn.ModuleList([])
# shorthand for window size for efficient block - grid like attention
w = window_size
# iterate through stages
cond_hidden_dims = []
for ind, ((layer_dim_in, layer_dim), layer_depth) in enumerate(zip(dim_pairs, depth)):
for stage_ind in range(layer_depth):
is_first = stage_ind == 0
stage_dim_in = layer_dim_in if is_first else layer_dim
cond_hidden_dims.append(stage_dim_in)
block = nn.Sequential(
MBConv(
stage_dim_in,
layer_dim,
downsample = is_first,
expansion_rate = mbconv_expansion_rate,
shrinkage_rate = mbconv_shrinkage_rate
),
Rearrange('b d (x w1) (y w2) -> b x y w1 w2 d', w1 = w, w2 = w), # block-like attention
Residual(Attention(dim = layer_dim, dim_head = dim_head, dropout = dropout, window_size = w)),
# Residual(flash_attn(dim = layer_dim, dim_head = dim_head, dropout=dropout, window_size = w )),
Residual(FeedForward(dim = layer_dim, dropout = dropout)),
Rearrange('b x y w1 w2 d -> b d (x w1) (y w2)'),
Rearrange('b d (w1 x) (w2 y) -> b x y w1 w2 d', w1 = w, w2 = w), # grid-like attention
#
Residual(Attention(dim = layer_dim, dim_head = dim_head, dropout = dropout, window_size = w)),
# Residual(flash_attn(dim = layer_dim, dim_head = dim_head, dropout = dropout, window_size = w )),
Residual(FeedForward(dim = layer_dim, dropout = dropout)),
Rearrange('b x y w1 w2 d -> b d (w1 x) (w2 y)'),
)
self.layers.append(block)
embed_dim = dims[-1]
self.embed_dim = dims[-1]
self.cond_hidden_dims = cond_hidden_dims
# mlp head out
self.mlp_head = nn.Sequential(
Reduce('b d h w -> b d', 'mean'),
LayerNorm(embed_dim),
nn.Linear(embed_dim, num_classes)
)
@beartype
def forward(
self,
x,
texts: Optional[List[str]] = None,
cond_fns: Optional[Tuple[Callable, ...]] = None,
cond_drop_prob = 0.,
return_embeddings = False
):
x = self.conv_stem(x)
if not exists(cond_fns):
cond_fns = (None,) * len(self.layers)
for stage, cond_fn in zip(self.layers, cond_fns):
if exists(cond_fn):
x = cond_fn(x)
x = stage(x)
if return_embeddings:
return x
return self.mlp_head(x)
# attention
class TransformerAttention(nn.Module):
def __init__(
self,
dim,
causal = False,
dim_head = 64,
dim_context = None,
heads = 8,
norm_context = False,
dropout = 0.1,
flash_attn = False,
):
super().__init__()
self.heads = heads
self.scale = dim_head ** -0.5
self.causal = causal
inner_dim = dim_head * heads
################# => Flash attention
self.attend = Attend(
casual=True,
flash = flash_attn,
dropout = dropout,
)
################# => Flash attention
dim_context = default(dim_context, dim)
self.norm = LayerNorm(dim)
self.context_norm = LayerNorm(dim_context) if norm_context else nn.Identity()
self.attn_dropout = nn.Dropout(dropout)
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim_context, dim_head * 2, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim, bias = False),
nn.Dropout(dropout)
)
# def forward(
# self,
# x,
# context = None,
# mask = None,
# attn_bias = None,
# attn_mask = None,
# cond_fn: Optional[Callable] = None
# ):
# b = x.shape[0]
# if exists(context):
# context = self.context_norm(context)
# kv_input = default(context, x)
# x = self.norm(x)
# if exists(cond_fn):
# # adaptive layer-norm
# x= cond_fn(x)
# q, k, v = self.to_q(x), *self.to_kv(kv_input).chunk(2, dim = -1)
# q = rearrange(q, 'b n (h d) -> b h n d', h = self.heads)
# q = q * self.scale
# sim = einsum('b h i d, b j d -> b h i j', q, k)
# if exists(attn_bias):
# sim = sim + attn_bias
# if exists(attn_mask):
# sim = sim.masked_fill(~attn_mask, -torch.finfo(sim.dtype).max)
# if exists(mask):
# mask = rearrange(mask, 'b j -> b 1 1 j')
# sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max)
# if self.causal:
# i, j = sim.shape[-2:]
# causal_mask = torch.ones((i, j), dtype = torch.bool, device = x.device).triu(j - i + 1)
# sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# attn = sim.softmax(dim = -1)
# attn = self.attn_dropout(attn)
# out = einsum('b h i j, b j d -> b h i d', attn, v)
# out = rearrange(out, 'b h n d -> b n (h d)')
# return self.to_out(out)
def forward(
self,
x,
context = None,
mask = None,
attn_bias = None,
attn_mask = None,
cond_fn: Optional[Callable] = None
):
b, n, _, h = *x.shape, self.heads
if exists(context):
context = self.context_norm(context)
kv_input = default(context, x)
x = self.norm(x)
if exists(cond_fn):
# adaptive layer-norm
x= cond_fn(x)
q, k, v = self.to_q(x), *self.to_kv(kv_input).chunk(2, dim = -1)
q = rearrange(q, 'b n (h d) -> b h n d', h = h)
q = q * self.scale
k = rearrange(k, 'b n (h d) -> b h n d', h = h)
v = rearrange(v, 'b n (h d) -> b h n d', h = h)
# Calculate attention using the Attend module
out = self.attend(q, k, v, mask = mask)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
@beartype
class Transformer(nn.Module):
def __init__(
self,
dim,
dim_head = 64,
heads = 8,
depth = 6,
attn_dropout = 0.,
ff_dropout = 0.
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
TransformerAttention(dim = dim, heads = heads, dropout = attn_dropout),
# flash_attn(dim = dim, heads = heads, dropout = attn_dropout),
FeedForward(dim = dim, dropout = ff_dropout)
]))
def forward(
self,
x,
cond_fns: Optional[Tuple[Callable, ...]] = None,
attn_mask = None
):
if not exists(cond_fns):
cond_fns = (None,) * len(self.layers * 2)
cond_fns = iter(cond_fns)
for attn, ff in self.layers:
x = attn(x, attn_mask = attn_mask, cond_fn = next(cond_fns)) + x
x = ff(x, cond_fn = next(cond_fns)) + x
return x
# token learner module
class TokenLearner(nn.Module):
"""
https://arxiv.org/abs/2106.11297
using the 1.1 version with the MLP (2 dense layers with gelu) for generating attention map
"""
def __init__(
self,
*,
dim,
ff_mult = 2,
num_output_tokens = 8,
num_layers = 2
):
super().__init__()
inner_dim = dim * ff_mult * num_output_tokens
self.num_output_tokens = num_output_tokens
self.net = nn.Sequential(
nn.Conv2d(dim * num_output_tokens, inner_dim, 1, groups = num_output_tokens),
nn.GELU(),
nn.Conv2d(inner_dim, num_output_tokens, 1, groups = num_output_tokens),
)
def forward(self, x):
x, ps = pack_one(x, '* c h w')
x = repeat(x, 'b c h w -> b (g c) h w', g = self.num_output_tokens)
attn = self.net(x)
attn = rearrange(attn, 'b g h w -> b 1 g h w')
x = rearrange(x, 'b (g c) h w -> b c g h w', g = self.num_output_tokens)
x = reduce(x * attn, 'b c g h w -> b c g', 'mean')
x = unpack_one(x, ps, '* c n')
return x
# Robotic Transformer
@beartype
class RT1(nn.Module):
def __init__(
self,
*,
vit: MaxViT,
num_actions = 11,
action_bins = 256,
depth = 6,
heads = 8,
dim_head = 64,
token_learner_ff_mult = 2,
token_learner_num_layers = 2,
token_learner_num_output_tokens = 8,
cond_drop_prob = 0.2,
use_attn_conditioner = False,
conditioner_kwargs: dict = dict()
):
super().__init__()
self.vit = vit
self.num_vit_stages = len(vit.cond_hidden_dims)
conditioner_klass = AttentionTextConditioner if use_attn_conditioner else TextConditioner
self.conditioner = conditioner_klass(
hidden_dims = (*tuple(vit.cond_hidden_dims), *((vit.embed_dim,) * depth * 2)),
hiddens_channel_first = (*((True,) * self.num_vit_stages), *((False,) * depth * 2)),
cond_drop_prob = cond_drop_prob,
**conditioner_kwargs
)
self.token_learner = TokenLearner(
dim = vit.embed_dim,
ff_mult = token_learner_ff_mult,
num_output_tokens = token_learner_num_output_tokens,
num_layers = token_learner_num_layers
)
self.num_learned_tokens = token_learner_num_output_tokens
self.transformer_depth = depth
self.transformer = Transformer(
dim = vit.embed_dim,
dim_head = dim_head,
heads = heads,
depth = depth
)
self.cond_drop_prob = cond_drop_prob
self.to_logits = nn.Sequential(
LayerNorm(vit.embed_dim),
nn.Linear(vit.embed_dim, num_actions * action_bins),
Rearrange('... (a b) -> ... a b', b = action_bins)
)
@classifier_free_guidance
def forward(
self,
video,
texts: Optional[List[str]] = None,
cond_drop_prob = 0.
):
depth = self.transformer_depth
cond_drop_prob = default(cond_drop_prob, self.cond_drop_prob)
frames, device = video.shape[2], video.device
cond_fns = self.conditioner(
texts,
cond_drop_prob = cond_drop_prob,
repeat_batch = (*((frames,) * self.num_vit_stages), *((1,) * self.transformer_depth * 2))
)
vit_cond_fns, transformer_cond_fns = cond_fns[:-(depth * 2)], cond_fns[-(depth * 2):]
video = rearrange(video, 'b c f h w -> b f c h w')
images, packed_shape = pack_one(video, '* c h w')
tokens = self.vit(
images,
texts = texts,
cond_fns = vit_cond_fns,
cond_drop_prob = cond_drop_prob,
return_embeddings = True
)
tokens = unpack_one(tokens, packed_shape, '* c h w')
learned_tokens = self.token_learner(tokens)
learned_tokens = rearrange(learned_tokens, 'b f c n -> b (f n) c')
# causal attention mask
attn_mask = torch.ones((frames, frames), dtype = torch.bool, device = device).triu(1)
attn_mask = repeat(attn_mask, 'i j -> (i r1) (j r2)', r1 = self.num_learned_tokens, r2 = self.num_learned_tokens)
# sinusoidal positional embedding
pos_emb = posemb_sincos_1d(frames, learned_tokens.shape[-1], dtype = learned_tokens.dtype, device = learned_tokens.device)
learned_tokens = learned_tokens + repeat(pos_emb, 'n d -> (n r) d', r = self.num_learned_tokens)
# attention
attended_tokens = self.transformer(learned_tokens, cond_fns = transformer_cond_fns, attn_mask = ~attn_mask)
pooled = reduce(attended_tokens, 'b (f n) d -> b f d', 'mean', f = frames)
logits = self.to_logits(pooled)
return logits | CyberTron-master | cybertron/models/rt1/robotic_transformer.py |
#builds dataset automatically => adds to your hf account
import multiprocessing
import argparse
from itertools import chain
from datasets import load_dataset
from transformers import AutoTokenizer
class CFG:
SEED: int = 42
SEQ_LEN: int = 8192 # context length make it larger or smaller depending on your task
NUM_CPU: int = multiprocessing.cpu_count()
HF_ACCOUNT_REPO: str = "YOUR HF ACCOUNT"
#ADD YOUR OWN TOKENIZER
TOKENIZER: str = "EleutherAI/gpt-neox-20b"
#ADD YOUR OWN DATASET
DATASET_NAME: str = "EleutherAI/the_pile_deduplicated"
def main(args):
tokenizer = AutoTokenizer.from_pretrained(CFG.TOKENIZER)
train_dataset = load_dataset(CFG.DATASET_NAME, split="train")
#ADD YOUR OWN TOKENIZE LOGIC
def tokenize_function(example):
return tokenizer([t + tokenizer.eos_token for t in example["text"]])
tokenized_dataset = train_dataset.map(
tokenize_function,
batched=True,
num_proc=CFG.NUM_CPU,
remove_columns=["text"],
)
block_size = CFG.SEQ_LEN
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
if total_length >= block_size:
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
return result
train_tokenized_dataset = tokenized_dataset.map(
group_texts,
batched=True,
num_proc=CFG.NUM_CPU,
)
train_tokenized_dataset.push_to_hub(CFG.HF_ACCOUNT_REPO)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Process and push dataset to Hugging Face Hub")
parser.add_argument("--seed", type=int, default=CFG.SEED, help="Random seed")
parser.add_argument("--seq_len", type=int, default=CFG.SEQ_LEN, help="Sequence length for processing")
parser.add_argument("--hf_account", type=str, default=CFG.HF_ACCOUNT_REPO, help="Hugging Face account name and repo")
parser.add_argument("--tokenizer", type=str, default=CFG.TOKENIZER, help="Tokenizer model to use")
parser.add_argument("--dataset_name", type=str, default=CFG.DATASET_NAME, help="Name of the dataset to process")
args = parser.parse_args()
main(args) | CyberTron-master | cybertron/models/rt1/tokenize_dataset.py |
import torch
from robotic_transformer import MaxViT, RT1
vit = MaxViT(
num_classes = 1000,
dim_conv_stem = 64,
dim = 96,
dim_head = 36,
depth = (2, 2, 5, 2),
window_size = 7,
mb_conv_expansion_rate = 4,
mbconv_shrinkage_rate = 0.25,
dropout = 0.1
)
model = RT1(
vit = vit,
num_actions = 11,
depth = 6,
heads = 8,
dim_head = 64,
cond_drop_prob = 0.2
)
video = torch.randn(2, 3, 6, 224, 224)
instructions = [
'bring me that pizza on that table'
]
train_logits = model(video, instructions) # (2, 6, 11, 256) # (batch, frames, actions, bins)
#after training
model.eval()
eval_logits = model(video, instructions, cond_scale = 3.) # classifier free guidance with conditional scale of 3 | CyberTron-master | cybertron/models/rt1/model.py |
import math
import multiprocessing
import os
from datetime import timedelta
from functools import partial
from itertools import chain
import torch
from torch.distributed.fsdp import (
FullyShardedDataParallel,
MixedPrecision,
BackwardPrefetch,
ShardingStrategy,
)
from accelerate import Accelerator
from accelerate.utils import (DummyOptim, DummyScheduler,
InitProcessGroupKwargs)
from datasets import concatenate_datasets, load_dataset
from lion_pytorch import Lion
from torch.nn import LayerNorm
from torch.nn import LayerNorm
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
CheckpointImpl, apply_activation_checkpointing, checkpoint_wrapper)
from torch.distributed.fsdp.wrap import (
transformer_auto_wrap_policy,
)
from torch.optim import AdamW
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import (AutoTokenizer, default_data_collator,
get_cosine_schedule_with_warmup,
get_linear_schedule_with_warmup, set_seed)
from models.rt1.model import model
from utils.stable_adam import StableAdamWUnfused
############ SETUP CONFIG
# import torch.distributed as dist
# dist.init_process_group(backend='nccl', init_method="env://")
################
class CFG:
BATCH_SIZE: int = 3
GRADIENT_ACCUMULATE_EVERY: int = 1
SEED: int = 42
LEARNING_RATE: float = 3e-4
WEIGHT_DECAY: float = 0.1
SEQ_LEN: int = 8192
NUM_CPU: int = multiprocessing.cpu_count()
USE_DEEPSPEED: bool = True
USE_FSDP: bool = False
USE_PRETOKENIZED: bool = False
USE_ACTIVATION_CHECKPOINTING: bool = False
RESUME_FROM_CHECKPOINT: str = None
CHECKPOINTING_STEPS: int = 1000
OUTPUT_DIR: str = "YOUR_OUTPUT_DIR"
ENTITY_NAME: str = "YOUR_ENTITY_NAME" #wandb
# helpers
def print_num_params(model, accelerator: Accelerator):
n_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
accelerator.print(f"Number of parameters in model: {n_params}")
# activation checkpointing
def activation_checkpointing(
model: torch.nn.Module,
offload_to_cpu: bool = False,
accelerator: Accelerator = None,
):
"""
Apply activation checkpointing to a model.
Args:
model (Module): The model to which to apply activation checkpointing.
offload_to_cpu (bool, optional): Whether to offload the activations to CPU. Defaults to False.
accelerator (Accelerator, optional): The Accelerate library accelerator. Defaults to None.
"""
if accelerator is not None:
accelerator.print("Using activation checkpointing")
#maybe error here in decoder, use parallel transformer block
def check_fn(submodule):
return isinstance(submodule, Decoder)
non_reentrant_wrapper = partial(
checkpoint_wrapper,
offload_to_cpu=offload_to_cpu,
checkpoint_impl=CheckpointImpl.NO_REENTRANT,
)
apply_activation_checkpointing(
model, checkpoint_wrapper_fn=non_reentrant_wrapper, check_fn=check_fn
)
# FSDP
def fsdp(
model: torch.nn.Module,
auto_wrap: bool = False,
mp: str = "fp32",
shard_strat: str = "NO_SHARD",
):
"""
This function wraps a given PyTorch model with the FullyShardedDataParallel (FSDP) wrapper to enable efficient data parallelism and model sharding.
Args:
model (torch.nn.Module): The original PyTorch model to be wrapped with FSDP.
auto_wrap (bool, optional): If True, it enables automatic wrapping of the model's layers according to the transformer_auto_wrap_policy. Default is False.
mp (str, optional): The mixed precision mode to be used. Can be 'bf16' for BFloat16, 'fp16' for Float16 or 'fp32' for Float32 precision. Default is 'fp32'.
shard_strat (str, optional): The sharding strategy to be used. Can be 'SHARD_GRAD' for sharding at gradient computation, 'FULL_SHARD' for full model sharding or 'NO_SHARD' for no sharding. Default is 'NO_SHARD'.
Raises:
ValueError: If the provided mp (mixed precision mode) is not 'bf16', 'fp16' or 'fp32'.
ValueError: If the provided shard_strat (sharding strategy) is not 'SHARD_GRAD', 'FULL_SHARD' or 'NO_SHARD'.
Returns:
torch.nn.Module: The input model wrapped with FSDP.
"""
if auto_wrap:
kosmos_auto_wrap_policy = partial(
transformer_auto_wrap_policy,
transformer_layer_cls={
Decoder,
},
)
else:
kosmos_auto_wrap_policy = None
if mp == "bf16":
mp_fsdp = MixedPrecision(
param_dtype=torch.bfloat16,
# Gradient communication precision.
reduce_dtype=torch.bfloat16,
# Buffer precision.
buffer_dtype=torch.bfloat16,
)
elif mp == "fp16":
mp_fsdp = MixedPrecision(
param_dtype=torch.float16,
# Gradient communication precision.
reduce_dtype=torch.float16,
# Buffer precision.
buffer_dtype=torch.float16,
)
elif mp == "fp32":
mp_fsdp = MixedPrecision(
param_dtype=torch.float32,
# Gradient communication precision.
reduce_dtype=torch.float32,
# Buffer precision.
buffer_dtype=torch.float32,
)
else:
raise ValueError(
"Invalid scheduler_type. Expected 'bf16', 'fp16' or 'fp32', got: {}".format(
mp
)
)
if shard_strat == "SHARD_GRAD":
sharding_strat_fsdp = ShardingStrategy.SHARD_GRAD_OP
elif shard_strat == "FULL_SHARD":
sharding_strat_fsdp = ShardingStrategy.FULL_SHARD
elif shard_strat == "NO_SHARD":
sharding_strat_fsdp = ShardingStrategy.NO_SHARD
else:
raise ValueError(
"Invalid scheduler_type. Expected 'SHARD_GRAD', 'FULL_SHARD' or 'NO_SHARD', got: {}".format(
shard_strat
)
)
model = FullyShardedDataParallel(
model,
auto_wrap_policy=kosmos_auto_wrap_policy,
mixed_precision=mp_fsdp,
backward_prefetch=BackwardPrefetch.BACKWARD_PRE,
sharding_strategy=sharding_strat_fsdp,
forward_prefetch=True,
use_orig_params=True,
)
return model
# learning rate scheduler
def get_lr_scheduler_with_warmup(
optimizer: torch.optim.Optimizer,
scheduler_type: str,
num_warmup_steps: int,
max_train_steps: int,
grad_accumulate_every: int = 1,
accelerator: Accelerator = None,
):
"""
Get a learning rate scheduler with warmup.
Args:
optimizer (Optimizer): The optimizer for which to create the learning rate scheduler.
scheduler_type (str): The type of learning rate scheduler to create, either "linear" or "cosine".
num_warmup_steps (int): The number of warmup steps for the learning rate scheduler.
max_train_steps (int): The maximum number of training steps.
grad_accumulate_every (int, optional): The gradient accumulation factor. Defaults to 1.
accelerator (Accelerator, optional): The Accelerate library accelerator. Defaults to None.
Returns:
The learning rate scheduler with warmup.
Raises:
ValueError: If scheduler_type is not "linear" or "cosine".
"""
NUM_WARMUP_STEPS = num_warmup_steps
GRADIENT_ACCUMULATE_EVERY = grad_accumulate_every
if accelerator is not None:
accelerator.print(f"Using {scheduler_type} lr scheduler")
if scheduler_type == "linear":
return get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=NUM_WARMUP_STEPS * GRADIENT_ACCUMULATE_EVERY,
num_training_steps=max_train_steps * GRADIENT_ACCUMULATE_EVERY,
)
elif scheduler_type == "cosine":
return get_cosine_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=NUM_WARMUP_STEPS * GRADIENT_ACCUMULATE_EVERY,
num_training_steps=max_train_steps * GRADIENT_ACCUMULATE_EVERY,
)
else:
raise ValueError(
"Invalid scheduler_type. Expected 'linear' or 'cosine', got: {}".format(
scheduler_type
)
)
# optimizers
def decoupled_optimizer(
model: torch.nn.Module,
learning_rate: float,
weight_decay: float,
beta_1: float,
beta_2: float,
optimizer_type: str,
use_fsdp: bool = True,
accelerator: Accelerator = None,
):
"""
Decouples the optimizer from the training process.
This function sets up the optimizer for the model by creating two groups of parameters:
one for weight decay and one without weight decay. Then, it initializes the optimizer
with these two groups of parameters.
Args:
model (Module): The model whose parameters are optimized.
learning_rate (float): The learning rate for the optimizer.
weight_decay (float): The weight decay for the optimizer.
beta_1 (float): The exponential decay rate for the 1st moment estimates.
beta_2 (float): The exponential decay rate for the 2nd moment estimates.
optimizer_type (str): The type of the optimizer. Can be 'lion', 'adamw', or 'stable_adamw'.
use_fsdp (bool, optional): If True, the optimizer will work with fully sharded data parallelism. Defaults to True.
accelerator (Accelerator, optional): The accelerator from HuggingFace's Accelerate library. Defaults to None.
Returns:
Optimizer: The initialized optimizer.
Raises:
ValueError: If the optimizer type is not 'lion', 'adamw' or 'stable_adamw'.
"""
accelerator.print(f"Using {optimizer_type} optimizer")
# Create an empty dictionary called param_dict to store the model's named parameters.
param_dict = {}
# Iterate over the model's named parameters and populate the param_dict with key-value pairs.
for param_name, param in model.named_parameters():
param_dict[param_name] = param
# Separate the model's named modules into two groups: decay and no_decay.
# Create an empty list to store the names of the LayerNorm and Embedding layer weights with no weight decay.
no_decay = []
if use_fsdp:
exclude_module = "_fsdp_wrapped_module.token_emb"
else:
exclude_module = "token_emb"
# Iterate through the named modules of the model.
for module_name, module in model.named_modules():
# Check if the current module is an instance of any of the desired types (LayerNorm or torch.nn.Embedding).
for ndim in [LayerNorm, torch.nn.Embedding]:
if isinstance(module, ndim):
# If torch.nn.Embedding, append its name with a ".weight" suffix to the no_decay list.
if module_name == exclude_module:
no_decay.append(f"{module_name}.weight")
else:
# If the module is an instance of LayerNorm
no_decay.append(f"{module_name}.gamma")
# Exit the inner loop since the desired module has been found.
break
# Create an empty list to store the names of the Linear layer weights with weight decay.
decay = []
# Iterate through the named modules of the model.
for module_name, module in model.named_modules():
# Check if the current module is an instance of the desired type (torch.nn.Linear).
for ndim in [torch.nn.Linear]:
if isinstance(module, ndim):
# If the module is an instance of torch.nn.Linear, append its name with a ".weight" suffix to the decay list.
decay.append(f"{module_name}.weight")
# Exit the inner loop since the desired module has been found.
break
# Create two separate lists of model parameters: decay_param and no_decay_param.
# The decay_param list contains the parameters that should have weight decay applied.
# The no_decay_param list contains the parameters that should not have weight decay applied, excluding the 'to_logits.weight' parameter.
# Create an empty list called decay_param to store the parameters with weight decay.
decay_param = []
if use_fsdp:
exclude_param = "_fsdp_wrapped_module.to_logits.weight"
else:
exclude_param = "to_logits.weight"
# Iterate over the decay list, which contains the names of the parameters with weight decay.
for param in decay:
# Check if the current parameter is not 'to_logits.weight'.
# Append the corresponding parameter from param_dict to the decay_param list.
if param != exclude_param:
decay_param.append(param_dict[param])
# Create an empty list called no_decay_param to store the parameters without weight decay.
no_decay_param = []
# Iterate over the no_decay list, which contains the names of the parameters without weight decay.
for param in no_decay:
# Append the corresponding parameter from param_dict to the no_decay_param list.
no_decay_param.append(param_dict[param])
# Create a list called grouped_params that contains two dictionaries.
# The first dictionary has the decay_param list and the corresponding weight_decay value.
# The second dictionary has the no_decay_param list and a weight_decay value of 0.0.
grouped_params = [
{"params": decay_param, "weight_decay": weight_decay},
{"params": no_decay_param, "weight_decay": 0.0},
]
# Create a variable called optimizer that stores an instance of the optimizer.
if optimizer_type == "lion":
optimizer = Lion(grouped_params, lr=learning_rate, betas=(beta_1, beta_2),)
elif optimizer_type == "adamw":
optimizer = AdamW(grouped_params, lr=learning_rate, betas=(beta_1, beta_2),)
elif optimizer_type == "deepspeed":
optimizer = DummyOptim(grouped_params, lr=learning_rate, betas=(beta_1, beta_2),)
elif optimizer_type == "stable_adamw":
optimizer = StableAdamWUnfused(
grouped_params, lr=learning_rate, betas=(beta_1, beta_2),
)
else:
raise ValueError(
"Invalid optimizer_type. Expected 'lion', 'adamw', 'deepspeed' or 'stable_adamw', got: {}".format(
optimizer_type
)
)
# Return the optimizer.
return optimizer
# dataloaders
def build_dataloaders():
"""
Build data loaders for training.
This function performs the following steps:
1. Load the tokenizer from the pretrained "EleutherAI/gpt-neox-20b" model.
2. Load the "openwebtext" dataset.
3. Tokenize the dataset, adding the end-of-sentence token to each text.
4. Process the tokenized dataset into chunks of a specified block size.
Returns:
Dataset: The processed dataset ready for training.
"""
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
dataset = load_dataset("openwebtext", split="train")
tokenized_dataset = dataset.map(
lambda example: tokenizer([t + tokenizer.eos_token for t in example["text"]]),
batched=True,
num_proc=CFG.NUM_CPU,
remove_columns=["text"],
)
block_size = CFG.SEQ_LEN
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
if total_length >= block_size:
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
return result
train_dataset = tokenized_dataset.map(
group_texts, batched=True, num_proc=CFG.NUM_CPU,
)
return train_dataset
#switch to falconwebdataset
def build_pre_tokenized():
d0 = load_dataset("conceptofmind/c4_0-to-20_neox_with_eos_8k", split="train")
d1 = load_dataset("conceptofmind/c4_21-to-40_neox_with_eos_8k", split="train")
d2 = load_dataset("conceptofmind/c4_41-to-60_neox_with_eos_8k", split="train")
d3 = load_dataset("conceptofmind/c4_61-to-80_neox_with_eos_8k", split="train")
d4 = load_dataset("conceptofmind/c4_81-to-100_neox_with_eos_8k", split="train")
train_dataset = concatenate_datasets([d0, d1, d2, d3, d4])
return train_dataset
def main():
# accelerator
timeout = InitProcessGroupKwargs(timeout=timedelta(seconds=1_000_000))
accelerator = Accelerator(
gradient_accumulation_steps=CFG.GRADIENT_ACCUMULATE_EVERY,
mixed_precision="fp16",
log_with="wandb",
kwargs_handlers=[timeout],
)
# AcceleratorState().deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu'] = 4 #??????
accelerator.init_trackers(
project_name="Kosmos",
config={
"batch_size": CFG.BATCH_SIZE,
"gradient_accumulate_every": CFG.GRADIENT_ACCUMULATE_EVERY,
"learning_rate": CFG.LEARNING_RATE,
"seq_len": CFG.SEQ_LEN,
},
init_kwargs={"wandb": {"entity": CFG.ENTITY_NAME}},
)
accelerator.print(f"Total GPUS: {accelerator.num_processes}")
# set seed
set_seed(CFG.SEED)
# model = Kosmos.to(accelerator.device)
# model = AutoModelForCausalLM.from_pretrained("YOUR MODEL", load_in_4bit=True, device_map="auto").to(accelerator.device)
model.to(accelerator.device)
print_num_params(model, accelerator)
if CFG.USE_FSDP:
model = fsdp(
model,
mp="fp16",
shard_strat="SHARD_GRAD"
)
if CFG.USE_ACTIVATION_CHECKPOINTING:
activation_checkpointing(model, accelerator)
model = accelerator.prepare(model)
# dataloaders
if CFG.USE_PRETOKENIZED:
train_dataset = build_pre_tokenized()
else:
train_dataset = build_dataloaders()
train_loader = DataLoader(
train_dataset, batch_size=CFG.BATCH_SIZE, collate_fn=default_data_collator,
)
# optimizer
optim = decoupled_optimizer(
model=model,
learning_rate=CFG.LEARNING_RATE,
weight_decay=CFG.WEIGHT_DECAY,
beta_1=0.90,
beta_2=0.95,
optimizer_type='deepspeed',
use_fsdp=True,
accelerator=accelerator
)
# Determine number of training steps
max_train_steps = math.ceil(len(train_loader) / CFG.GRADIENT_ACCUMULATE_EVERY)
accelerator.print(f"Max train steps: {max_train_steps}")
# lr scheduler
NUM_WARMUP_STEPS = int(max_train_steps * 0.01)
accelerator.print(f"Num warmup steps: {NUM_WARMUP_STEPS}")
if CFG.USE_DEEPSPEED:
lr_scheduler = DummyScheduler(
optim,
total_num_steps=max_train_steps * accelerator.num_processes,
warmup_num_steps=NUM_WARMUP_STEPS
)
else:
lr_scheduler = get_lr_scheduler_with_warmup(
optimizer=optim,
scheduler_type="cosine",
num_warmup_steps=NUM_WARMUP_STEPS,
max_train_steps=max_train_steps,
grad_accumulate_every=CFG.GRADIENT_ACCUMULATE_EVERY,
)
# prepare
optim, train_loader, lr_scheduler = accelerator.prepare(
optim, train_loader, lr_scheduler
)
# checkpoint scheduler
accelerator.register_for_checkpointing(lr_scheduler)
# I do not know why Huggingface recommends recalculation of max_train_steps
max_train_steps = math.ceil(len(train_loader) / CFG.GRADIENT_ACCUMULATE_EVERY)
accelerator.print(f"Max train steps recalculated: {max_train_steps}")
# Total batch size for logging
total_batch_size = (
CFG.BATCH_SIZE * accelerator.num_processes * CFG.GRADIENT_ACCUMULATE_EVERY
)
accelerator.print(f"Total batch size: {total_batch_size}")
# resume training
progress_bar = tqdm(
range(max_train_steps), disable=not accelerator.is_local_main_process
)
completed_steps = 0
if CFG.RESUME_FROM_CHECKPOINT:
if CFG.RESUME_FROM_CHECKPOINT is not None or CFG.RESUME_FROM_CHECKPOINT != "":
accelerator.print(f"Resuming from checkpoint {CFG.RESUME_FROM_CHECKPOINT}")
accelerator.load_state(CFG.RESUME_FROM_CHECKPOINT)
path = os.path.basename(CFG.RESUME_FROM_CHECKPOINT)
training_difference = os.path.splitext(path)[0]
# need to multiply `gradient_accumulation_steps` to reflect real steps
resume_step = (
int(training_difference.replace("step_", ""))
* CFG.GRADIENT_ACCUMULATE_EVERY
)
if CFG.RESUME_FROM_CHECKPOINT and resume_step is not None:
train_loader = accelerator.skip_first_batches(train_loader, resume_step)
completed_steps += resume_step
progress_bar.update(resume_step)
# training
model.train()
for step, batch in enumerate(train_loader):
with accelerator.accumulate(model):
inputs = batch["input_ids"].to(accelerator.device)
loss = model(inputs, return_loss=True)
accelerator.backward(loss)
accelerator.log({"loss": loss.item()}, step=step)
if accelerator.sync_gradients:
accelerator.clip_grad_norm_(model.parameters(), 1.0)
optim.step()
lr_scheduler.step()
optim.zero_grad()
if accelerator.sync_gradients:
progress_bar.update(1)
completed_steps += 1
if isinstance(CFG.CHECKPOINTING_STEPS, int):
if completed_steps % CFG.CHECKPOINTING_STEPS == 0:
output_dir = f"step_{completed_steps }"
if CFG.OUTPUT_DIR is not None:
output_dir = os.path.join(CFG.OUTPUT_DIR, output_dir)
accelerator.save_state(output_dir)
if completed_steps >= max_train_steps:
break
# end training
# accelerator.print(f"Training Finished")
accelerator.end_training()
# save final model
# accelerator.print(f"Saving model to {CFG.OUTPUT_DIR}")
if CFG.OUTPUT_DIR is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
with accelerator.main_process_first():
accelerator.save(
unwrapped_model.state_dict(), f"{CFG.OUTPUT_DIR}/final/final_model.pt"
)
if __name__ == "__main__":
main() | CyberTron-master | cybertron/models/rt1/train.py |
import torch
# This is the unfused version of StableAdamW. It is slower than the fused version (coming).
class StableAdamWUnfused(torch.optim.Optimizer):
def __init__(
self,
params,
lr=0.002,
weight_decay=0.2,
betas=(0.9, 0.99),
eps=1e-8,
clip_thresh=1.0,
precision="amp_bfloat16",
custom_scalar=65536,
):
beta1, beta2 = betas[0], betas[1]
defaults = dict(lr=lr, weight_decay=weight_decay, beta1=beta1, beta2=beta2)
super(StableAdamWUnfused, self).__init__(params, defaults)
self.eps = eps
self.d = clip_thresh
# Set precision to "custom_fp16" if you want to use a fixed loss scalar, custom_scalar, which is divided out in the update step.
# If you do this, call (custom_scalar * loss).backward() instead of loss.backward().
self.precision = precision
self.custom_scaler = custom_scalar
for group in self.param_groups:
group["step"] = 1.0
print("Using StableAdamWUnfused-v1")
def __setstate__(self, state):
super(StableAdamWUnfused, self).__setstate__(state)
def step(self, closure=None):
if closure is not None:
closure()
for group in self.param_groups:
lr = group["lr"]
weight_decay = group["weight_decay"]
beta1 = group["beta1"]
beta2 = group["beta2"]
step = group["step"]
for p in group["params"]:
if p.grad is None:
continue
theta = p.data
param_state = self.state[p]
if self.precision == "custom_fp16":
g = p.grad.data / self.custom_scaler
if torch.any(torch.isnan(g) | torch.isinf(g)):
continue
else:
g = p.grad.data
if "exp_avg" not in param_state:
v = param_state["exp_avg"] = torch.zeros_like(theta)
u = param_state["exp_avg_sq"] = torch.zeros_like(theta)
else:
v = param_state["exp_avg"]
u = param_state["exp_avg_sq"]
beta1hat = beta1 * (1 - beta1 ** (step - 1)) / (1 - beta1**step)
beta2hat = beta2 * (1 - beta2 ** (step - 1)) / (1 - beta2**step)
v = v.mul_(beta1hat).add_(g, alpha=1.0 - beta1hat)
u = u.mul_(beta2hat).addcmul_(g, g, value=1.0 - beta2hat)
denominator = u.sqrt().add_(self.eps)
# StableAdamW = AdamW + update clipping (https://arxiv.org/abs/1804.04235) applied tensor-wise.
rms = (
torch.div(
g.pow(2), torch.maximum(u, (self.eps**2) * torch.ones_like(u))
)
.mean()
.sqrt()
.item()
)
theta = theta.mul_(1.0 - lr * weight_decay).addcdiv_(
v, denominator, value=-lr * (1.0 / max(1.0, rms / self.d))
)
# save current params
param_state["exp_avg"] = v
param_state["exp_avg_sq"] = u
group["step"] = step + 1 | CyberTron-master | cybertron/models/rt1/utils/stable_adam.py |
from setuptools import find_packages, setup
setup(
name='gato',
version='0.0.1',
description='Gato: A Generalist Agent',
url='https://github.com/kyegomez/GATO',
author='Kye Gomez',
author_email='[email protected]',
long_description=open('README.md', 'r', encoding='utf-8').read(),
long_description_content_type='text/markdown',
license='MIT',
packages=find_packages(exclude=[]),
install_requires=[
'tensorflow>=2.11',
'flowchain>=0.0.4'
],
python_requires='>=3.10.0',
keywords=[
'deep learning',
'gato',
'tensorflow',
'generalist agent'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.10'
]
)
| CyberTron-master | cybertron/models/GATO/setup.py |
import torch
from gato import Gato, GatoConfig
# Create model instance
config = GatoConfig.small()
gato = Gato(config)
# Fake inputs for Gato
input_dim = config.input_dim
input_ids = torch.cat([
torch.rand((1, 1, input_dim)) for _ in range(20)] + # 20 image patches
[torch.full((1, 1, input_dim), 0.25), # continuous value
torch.full((1, 1, input_dim), 624.0)] + # discrete (actions, texts)
[torch.rand((1, 1, input_dim)) for _ in range(20)] + # 20 image patches
[torch.full((1, 1, input_dim), 0.12), # continuous value
torch.full((1, 1, input_dim), 295.0)], # discrete (actions, texts)
dim=1)
encoding = torch.tensor([
[0, 0, 0, 0, 1, 2, 0, 0, 0, 0, 1, 2]
])
row_pos = (
torch.tensor([[0.00, 0.25, 0.50, 0.75, 0, 0, 0.00, 0.25, 0.50, 0.75, 0, 0]]), # pos_from
torch.tensor([[0.25, 0.50, 0.75, 1.00, 0, 0, 0.25, 0.50, 0.75, 1.00, 0, 0]]) # pos_to
)
col_pos = (
torch.tensor([[0.00, 0.00, 0.00, 0.80, 0, 0, 0.00, 0.00, 0.00, 0.80, 0, 0]]), # pos_from
torch.tensor([[0.20, 0.20, 0.20, 1.00, 0, 0, 0.20, 0.20, 0.20, 1.00, 0, 0]]) # pos_to
)
obs = (
torch.tensor([[ 0, 1, 2, 19, 20, 21, 0, 1, 2, 19, 20, 21]]), # obs token
torch.tensor([[ 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0]]) # obs token masking (for action tokens)
)
hidden_states = gato((input_ids, (encoding, row_pos, col_pos), obs))
| CyberTron-master | cybertron/models/GATO/example.py |
import math
import multiprocessing
import os
from datetime import timedelta
from functools import partial
from itertools import chain
import torch
from torch.distributed.fsdp import (
FullyShardedDataParallel,
MixedPrecision,
BackwardPrefetch,
ShardingStrategy,
)
from accelerate import Accelerator
from accelerate.utils import (DummyOptim, DummyScheduler,
InitProcessGroupKwargs)
from datasets import concatenate_datasets, load_dataset
from lion_pytorch import Lion
from torch.nn import LayerNorm
from torch.nn import LayerNorm
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
CheckpointImpl, apply_activation_checkpointing, checkpoint_wrapper)
from torch.distributed.fsdp.wrap import (
transformer_auto_wrap_policy,
)
from torch.optim import AdamW
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import (AutoTokenizer, default_data_collator,
get_cosine_schedule_with_warmup,
get_linear_schedule_with_warmup, set_seed)
from gato import Gato, GatoConfig
############ SETUP CONFIG
# import torch.distributed as dist
# dist.init_process_group(backend='nccl', init_method="env://")
################
class CFG:
BATCH_SIZE: int = 3
GRADIENT_ACCUMULATE_EVERY: int = 1
SEED: int = 42
LEARNING_RATE: float = 3e-4
WEIGHT_DECAY: float = 0.1
SEQ_LEN: int = 8192
NUM_CPU: int = multiprocessing.cpu_count()
USE_DEEPSPEED: bool = True
USE_FSDP: bool = False
USE_PRETOKENIZED: bool = False
USE_ACTIVATION_CHECKPOINTING: bool = False
RESUME_FROM_CHECKPOINT: str = None
CHECKPOINTING_STEPS: int = 1000
OUTPUT_DIR: str = "YOUR_OUTPUT_DIR"
ENTITY_NAME: str = "YOUR_ENTITY_NAME" #wandb
# helpers
def print_num_params(model, accelerator: Accelerator):
n_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
accelerator.print(f"Number of parameters in model: {n_params}")
# activation checkpointing
def activation_checkpointing(
model: torch.nn.Module,
offload_to_cpu: bool = False,
accelerator: Accelerator = None,
):
"""
Apply activation checkpointing to a model.
Args:
model (Module): The model to which to apply activation checkpointing.
offload_to_cpu (bool, optional): Whether to offload the activations to CPU. Defaults to False.
accelerator (Accelerator, optional): The Accelerate library accelerator. Defaults to None.
"""
if accelerator is not None:
accelerator.print("Using activation checkpointing")
#maybe error here in decoder, use parallel transformer block
def check_fn(submodule):
return isinstance(submodule, Decoder)
non_reentrant_wrapper = partial(
checkpoint_wrapper,
offload_to_cpu=offload_to_cpu,
checkpoint_impl=CheckpointImpl.NO_REENTRANT,
)
apply_activation_checkpointing(
model, checkpoint_wrapper_fn=non_reentrant_wrapper, check_fn=check_fn
)
# FSDP
def fsdp(
model: torch.nn.Module,
auto_wrap: bool = False,
mp: str = "fp32",
shard_strat: str = "NO_SHARD",
):
"""
This function wraps a given PyTorch model with the FullyShardedDataParallel (FSDP) wrapper to enable efficient data parallelism and model sharding.
Args:
model (torch.nn.Module): The original PyTorch model to be wrapped with FSDP.
auto_wrap (bool, optional): If True, it enables automatic wrapping of the model's layers according to the transformer_auto_wrap_policy. Default is False.
mp (str, optional): The mixed precision mode to be used. Can be 'bf16' for BFloat16, 'fp16' for Float16 or 'fp32' for Float32 precision. Default is 'fp32'.
shard_strat (str, optional): The sharding strategy to be used. Can be 'SHARD_GRAD' for sharding at gradient computation, 'FULL_SHARD' for full model sharding or 'NO_SHARD' for no sharding. Default is 'NO_SHARD'.
Raises:
ValueError: If the provided mp (mixed precision mode) is not 'bf16', 'fp16' or 'fp32'.
ValueError: If the provided shard_strat (sharding strategy) is not 'SHARD_GRAD', 'FULL_SHARD' or 'NO_SHARD'.
Returns:
torch.nn.Module: The input model wrapped with FSDP.
"""
if auto_wrap:
kosmos_auto_wrap_policy = partial(
transformer_auto_wrap_policy,
transformer_layer_cls={
Decoder,
},
)
else:
kosmos_auto_wrap_policy = None
if mp == "bf16":
mp_fsdp = MixedPrecision(
param_dtype=torch.bfloat16,
# Gradient communication precision.
reduce_dtype=torch.bfloat16,
# Buffer precision.
buffer_dtype=torch.bfloat16,
)
elif mp == "fp16":
mp_fsdp = MixedPrecision(
param_dtype=torch.float16,
# Gradient communication precision.
reduce_dtype=torch.float16,
# Buffer precision.
buffer_dtype=torch.float16,
)
elif mp == "fp32":
mp_fsdp = MixedPrecision(
param_dtype=torch.float32,
# Gradient communication precision.
reduce_dtype=torch.float32,
# Buffer precision.
buffer_dtype=torch.float32,
)
else:
raise ValueError(
"Invalid scheduler_type. Expected 'bf16', 'fp16' or 'fp32', got: {}".format(
mp
)
)
if shard_strat == "SHARD_GRAD":
sharding_strat_fsdp = ShardingStrategy.SHARD_GRAD_OP
elif shard_strat == "FULL_SHARD":
sharding_strat_fsdp = ShardingStrategy.FULL_SHARD
elif shard_strat == "NO_SHARD":
sharding_strat_fsdp = ShardingStrategy.NO_SHARD
else:
raise ValueError(
"Invalid scheduler_type. Expected 'SHARD_GRAD', 'FULL_SHARD' or 'NO_SHARD', got: {}".format(
shard_strat
)
)
model = FullyShardedDataParallel(
model,
auto_wrap_policy=kosmos_auto_wrap_policy,
mixed_precision=mp_fsdp,
backward_prefetch=BackwardPrefetch.BACKWARD_PRE,
sharding_strategy=sharding_strat_fsdp,
forward_prefetch=True,
use_orig_params=True,
)
return model
# learning rate scheduler
def get_lr_scheduler_with_warmup(
optimizer: torch.optim.Optimizer,
scheduler_type: str,
num_warmup_steps: int,
max_train_steps: int,
grad_accumulate_every: int = 1,
accelerator: Accelerator = None,
):
"""
Get a learning rate scheduler with warmup.
Args:
optimizer (Optimizer): The optimizer for which to create the learning rate scheduler.
scheduler_type (str): The type of learning rate scheduler to create, either "linear" or "cosine".
num_warmup_steps (int): The number of warmup steps for the learning rate scheduler.
max_train_steps (int): The maximum number of training steps.
grad_accumulate_every (int, optional): The gradient accumulation factor. Defaults to 1.
accelerator (Accelerator, optional): The Accelerate library accelerator. Defaults to None.
Returns:
The learning rate scheduler with warmup.
Raises:
ValueError: If scheduler_type is not "linear" or "cosine".
"""
NUM_WARMUP_STEPS = num_warmup_steps
GRADIENT_ACCUMULATE_EVERY = grad_accumulate_every
if accelerator is not None:
accelerator.print(f"Using {scheduler_type} lr scheduler")
if scheduler_type == "linear":
return get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=NUM_WARMUP_STEPS * GRADIENT_ACCUMULATE_EVERY,
num_training_steps=max_train_steps * GRADIENT_ACCUMULATE_EVERY,
)
elif scheduler_type == "cosine":
return get_cosine_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=NUM_WARMUP_STEPS * GRADIENT_ACCUMULATE_EVERY,
num_training_steps=max_train_steps * GRADIENT_ACCUMULATE_EVERY,
)
else:
raise ValueError(
"Invalid scheduler_type. Expected 'linear' or 'cosine', got: {}".format(
scheduler_type
)
)
# optimizers
def decoupled_optimizer(
model: torch.nn.Module,
learning_rate: float,
weight_decay: float,
beta_1: float,
beta_2: float,
optimizer_type: str,
use_fsdp: bool = True,
accelerator: Accelerator = None,
):
"""
Decouples the optimizer from the training process.
This function sets up the optimizer for the model by creating two groups of parameters:
one for weight decay and one without weight decay. Then, it initializes the optimizer
with these two groups of parameters.
Args:
model (Module): The model whose parameters are optimized.
learning_rate (float): The learning rate for the optimizer.
weight_decay (float): The weight decay for the optimizer.
beta_1 (float): The exponential decay rate for the 1st moment estimates.
beta_2 (float): The exponential decay rate for the 2nd moment estimates.
optimizer_type (str): The type of the optimizer. Can be 'lion', 'adamw', or 'stable_adamw'.
use_fsdp (bool, optional): If True, the optimizer will work with fully sharded data parallelism. Defaults to True.
accelerator (Accelerator, optional): The accelerator from HuggingFace's Accelerate library. Defaults to None.
Returns:
Optimizer: The initialized optimizer.
Raises:
ValueError: If the optimizer type is not 'lion', 'adamw' or 'stable_adamw'.
"""
accelerator.print(f"Using {optimizer_type} optimizer")
# Create an empty dictionary called param_dict to store the model's named parameters.
param_dict = {}
# Iterate over the model's named parameters and populate the param_dict with key-value pairs.
for param_name, param in model.named_parameters():
param_dict[param_name] = param
# Separate the model's named modules into two groups: decay and no_decay.
# Create an empty list to store the names of the LayerNorm and Embedding layer weights with no weight decay.
no_decay = []
if use_fsdp:
exclude_module = "_fsdp_wrapped_module.token_emb"
else:
exclude_module = "token_emb"
# Iterate through the named modules of the model.
for module_name, module in model.named_modules():
# Check if the current module is an instance of any of the desired types (LayerNorm or torch.nn.Embedding).
for ndim in [LayerNorm, torch.nn.Embedding]:
if isinstance(module, ndim):
# If torch.nn.Embedding, append its name with a ".weight" suffix to the no_decay list.
if module_name == exclude_module:
no_decay.append(f"{module_name}.weight")
else:
# If the module is an instance of LayerNorm
no_decay.append(f"{module_name}.gamma")
# Exit the inner loop since the desired module has been found.
break
# Create an empty list to store the names of the Linear layer weights with weight decay.
decay = []
# Iterate through the named modules of the model.
for module_name, module in model.named_modules():
# Check if the current module is an instance of the desired type (torch.nn.Linear).
for ndim in [torch.nn.Linear]:
if isinstance(module, ndim):
# If the module is an instance of torch.nn.Linear, append its name with a ".weight" suffix to the decay list.
decay.append(f"{module_name}.weight")
# Exit the inner loop since the desired module has been found.
break
# Create two separate lists of model parameters: decay_param and no_decay_param.
# The decay_param list contains the parameters that should have weight decay applied.
# The no_decay_param list contains the parameters that should not have weight decay applied, excluding the 'to_logits.weight' parameter.
# Create an empty list called decay_param to store the parameters with weight decay.
decay_param = []
if use_fsdp:
exclude_param = "_fsdp_wrapped_module.to_logits.weight"
else:
exclude_param = "to_logits.weight"
# Iterate over the decay list, which contains the names of the parameters with weight decay.
for param in decay:
# Check if the current parameter is not 'to_logits.weight'.
# Append the corresponding parameter from param_dict to the decay_param list.
if param != exclude_param:
decay_param.append(param_dict[param])
# Create an empty list called no_decay_param to store the parameters without weight decay.
no_decay_param = []
# Iterate over the no_decay list, which contains the names of the parameters without weight decay.
for param in no_decay:
# Append the corresponding parameter from param_dict to the no_decay_param list.
no_decay_param.append(param_dict[param])
# Create a list called grouped_params that contains two dictionaries.
# The first dictionary has the decay_param list and the corresponding weight_decay value.
# The second dictionary has the no_decay_param list and a weight_decay value of 0.0.
grouped_params = [
{"params": decay_param, "weight_decay": weight_decay},
{"params": no_decay_param, "weight_decay": 0.0},
]
# Create a variable called optimizer that stores an instance of the optimizer.
if optimizer_type == "lion":
optimizer = Lion(grouped_params, lr=learning_rate, betas=(beta_1, beta_2),)
elif optimizer_type == "adamw":
optimizer = AdamW(grouped_params, lr=learning_rate, betas=(beta_1, beta_2),)
elif optimizer_type == "deepspeed":
optimizer = DummyOptim(grouped_params, lr=learning_rate, betas=(beta_1, beta_2),)
elif optimizer_type == "stable_adamw":
optimizer = StableAdamWUnfused(
grouped_params, lr=learning_rate, betas=(beta_1, beta_2),
)
else:
raise ValueError(
"Invalid optimizer_type. Expected 'lion', 'adamw', 'deepspeed' or 'stable_adamw', got: {}".format(
optimizer_type
)
)
# Return the optimizer.
return optimizer
# dataloaders
def build_dataloaders():
"""
Build data loaders for training.
This function performs the following steps:
1. Load the tokenizer from the pretrained "EleutherAI/gpt-neox-20b" model.
2. Load the "openwebtext" dataset.
3. Tokenize the dataset, adding the end-of-sentence token to each text.
4. Process the tokenized dataset into chunks of a specified block size.
Returns:
Dataset: The processed dataset ready for training.
"""
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
dataset = load_dataset("openwebtext", split="train")
tokenized_dataset = dataset.map(
lambda example: tokenizer([t + tokenizer.eos_token for t in example["text"]]),
batched=True,
num_proc=CFG.NUM_CPU,
remove_columns=["text"],
)
block_size = CFG.SEQ_LEN
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
if total_length >= block_size:
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
return result
train_dataset = tokenized_dataset.map(
group_texts, batched=True, num_proc=CFG.NUM_CPU,
)
return train_dataset
#switch to falconwebdataset
def build_pre_tokenized():
d0 = load_dataset("conceptofmind/c4_0-to-20_neox_with_eos_8k", split="train")
d1 = load_dataset("conceptofmind/c4_21-to-40_neox_with_eos_8k", split="train")
d2 = load_dataset("conceptofmind/c4_41-to-60_neox_with_eos_8k", split="train")
d3 = load_dataset("conceptofmind/c4_61-to-80_neox_with_eos_8k", split="train")
d4 = load_dataset("conceptofmind/c4_81-to-100_neox_with_eos_8k", split="train")
train_dataset = concatenate_datasets([d0, d1, d2, d3, d4])
return train_dataset
def main():
# accelerator
timeout = InitProcessGroupKwargs(timeout=timedelta(seconds=1_000_000))
accelerator = Accelerator(
gradient_accumulation_steps=CFG.GRADIENT_ACCUMULATE_EVERY,
mixed_precision="fp16",
log_with="wandb",
kwargs_handlers=[timeout],
)
# AcceleratorState().deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu'] = 4 #??????
accelerator.init_trackers(
project_name="Kosmos",
config={
"batch_size": CFG.BATCH_SIZE,
"gradient_accumulate_every": CFG.GRADIENT_ACCUMULATE_EVERY,
"learning_rate": CFG.LEARNING_RATE,
"seq_len": CFG.SEQ_LEN,
},
init_kwargs={"wandb": {"entity": CFG.ENTITY_NAME}},
)
accelerator.print(f"Total GPUS: {accelerator.num_processes}")
# set seed
set_seed(CFG.SEED)
#create model instance
config = GatoConfig.small()
model = Gato(config)
model.to(accelerator.device)
print_num_params(model, accelerator)
if CFG.USE_FSDP:
model = fsdp(
model,
mp="fp16",
shard_strat="SHARD_GRAD"
)
if CFG.USE_ACTIVATION_CHECKPOINTING:
activation_checkpointing(model, accelerator)
model = accelerator.prepare(model)
# dataloaders
if CFG.USE_PRETOKENIZED:
train_dataset = build_pre_tokenized()
else:
train_dataset = build_dataloaders()
train_loader = DataLoader(
train_dataset, batch_size=CFG.BATCH_SIZE, collate_fn=default_data_collator,
)
# optimizer
optim = decoupled_optimizer(
model=model,
learning_rate=CFG.LEARNING_RATE,
weight_decay=CFG.WEIGHT_DECAY,
beta_1=0.90,
beta_2=0.95,
optimizer_type='deepspeed',
use_fsdp=True,
accelerator=accelerator
)
# Determine number of training steps
max_train_steps = math.ceil(len(train_loader) / CFG.GRADIENT_ACCUMULATE_EVERY)
accelerator.print(f"Max train steps: {max_train_steps}")
# lr scheduler
NUM_WARMUP_STEPS = int(max_train_steps * 0.01)
accelerator.print(f"Num warmup steps: {NUM_WARMUP_STEPS}")
if CFG.USE_DEEPSPEED:
lr_scheduler = DummyScheduler(
optim,
total_num_steps=max_train_steps * accelerator.num_processes,
warmup_num_steps=NUM_WARMUP_STEPS
)
else:
lr_scheduler = get_lr_scheduler_with_warmup(
optimizer=optim,
scheduler_type="cosine",
num_warmup_steps=NUM_WARMUP_STEPS,
max_train_steps=max_train_steps,
grad_accumulate_every=CFG.GRADIENT_ACCUMULATE_EVERY,
)
# prepare
optim, train_loader, lr_scheduler = accelerator.prepare(
optim, train_loader, lr_scheduler
)
# checkpoint scheduler
accelerator.register_for_checkpointing(lr_scheduler)
# I do not know why Huggingface recommends recalculation of max_train_steps
max_train_steps = math.ceil(len(train_loader) / CFG.GRADIENT_ACCUMULATE_EVERY)
accelerator.print(f"Max train steps recalculated: {max_train_steps}")
# Total batch size for logging
total_batch_size = (
CFG.BATCH_SIZE * accelerator.num_processes * CFG.GRADIENT_ACCUMULATE_EVERY
)
accelerator.print(f"Total batch size: {total_batch_size}")
# resume training
progress_bar = tqdm(
range(max_train_steps), disable=not accelerator.is_local_main_process
)
completed_steps = 0
if CFG.RESUME_FROM_CHECKPOINT:
if CFG.RESUME_FROM_CHECKPOINT is not None or CFG.RESUME_FROM_CHECKPOINT != "":
accelerator.print(f"Resuming from checkpoint {CFG.RESUME_FROM_CHECKPOINT}")
accelerator.load_state(CFG.RESUME_FROM_CHECKPOINT)
path = os.path.basename(CFG.RESUME_FROM_CHECKPOINT)
training_difference = os.path.splitext(path)[0]
# need to multiply `gradient_accumulation_steps` to reflect real steps
resume_step = (
int(training_difference.replace("step_", ""))
* CFG.GRADIENT_ACCUMULATE_EVERY
)
if CFG.RESUME_FROM_CHECKPOINT and resume_step is not None:
train_loader = accelerator.skip_first_batches(train_loader, resume_step)
completed_steps += resume_step
progress_bar.update(resume_step)
# training
model.train()
for step, batch in enumerate(train_loader):
with accelerator.accumulate(model):
inputs = batch["input_ids"].to(accelerator.device)
loss = model(inputs, return_loss=True)
accelerator.backward(loss)
accelerator.log({"loss": loss.item()}, step=step)
if accelerator.sync_gradients:
accelerator.clip_grad_norm_(model.parameters(), 1.0)
optim.step()
lr_scheduler.step()
optim.zero_grad()
if accelerator.sync_gradients:
progress_bar.update(1)
completed_steps += 1
if isinstance(CFG.CHECKPOINTING_STEPS, int):
if completed_steps % CFG.CHECKPOINTING_STEPS == 0:
output_dir = f"step_{completed_steps }"
if CFG.OUTPUT_DIR is not None:
output_dir = os.path.join(CFG.OUTPUT_DIR, output_dir)
accelerator.save_state(output_dir)
if completed_steps >= max_train_steps:
break
# end training
# accelerator.print(f"Training Finished")
accelerator.end_training()
# save final model
# accelerator.print(f"Saving model to {CFG.OUTPUT_DIR}")
if CFG.OUTPUT_DIR is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
with accelerator.main_process_first():
accelerator.save(
unwrapped_model.state_dict(), f"{CFG.OUTPUT_DIR}/final/final_model.pt"
)
if __name__ == "__main__":
main() | CyberTron-master | cybertron/models/GATO/training.py |
import copy
from typing import Dict, Any
class GatoConfig:
@staticmethod
def large():
return GatoConfig(num_transformer_blocks=24,
num_attention_heads=16,
layer_width=2048,
feedforward_hidden_size=8192,
key_value_size=128)
@staticmethod
def baseline():
return GatoConfig(num_transformer_blocks=12,
num_attention_heads=12,
layer_width=1536,
feedforward_hidden_size=6144,
key_value_size=128)
@staticmethod
def small():
return GatoConfig(num_transformer_blocks=8,
num_attention_heads=24,
layer_width=768,
feedforward_hidden_size=3072,
key_value_size=32)
def __init__(self, **kwargs):
self.input_dim = kwargs.pop('input_dim', 768)
self.img_patch_size = kwargs.pop('img_patch_size', 16)
# Section 2.3. Training
self.token_sequence_length = kwargs.pop('token_sequence_length', 1024)
# Section 2.1. Tokenization
# Text - SentencePiece
self.vocabulary_size = kwargs.pop('vocabulary_size', 32000)
# Discrete values
self.actions_size = kwargs.pop('actions_size', 1024)
# Continuous values
self.continuous_values_size = kwargs.pop('continuous_values_size', 1024)
# Appendix C.1. Transformer Hyperparameters
self.num_transformer_blocks = kwargs.pop('num_transformer_blocks', 8)
self.num_attention_heads = kwargs.pop('num_attention_heads', 24)
self.layer_width = kwargs.pop('layer_width', 768)
self.feedforward_hidden_size = kwargs.pop('feedforward_hidden_size', 3072)
self.key_value_size = kwargs.pop('key_value_size', 32)
# Appendix E. Regularization
self.dropout_rate = kwargs.pop('dropout_rate', 0.1)
# Appendix C.2. Embedding Function
self.num_group_norm_groups = kwargs.pop('num_group_norm_groups', 32)
# Appendix C.3. Position Encodings > Patch Position Encodings
self.discretize_depth = kwargs.pop('discretize_depth', 128)
# Appendix C.3. Position Encodings > Local Observation Position Encodings
self.local_position_encoding_size = kwargs.pop('local_position_encoding_size', 512)
self.max_seq_len = kwargs.pop('max_seq_len', 8192)
@property
def embedding_input_size(self):
return self.vocabulary_size + self.continuous_values_size + self.actions_size + 1
@property
def output_target_size(self):
return self.vocabulary_size + self.actions_size
def to_dict(self) -> Dict[str, Any]:
output = copy.deepcopy(self.__dict__)
return output
@classmethod
def from_dict(cls, config_dict: Dict[str, Any]) -> "GatoConfig":
config = cls(**config_dict)
return config
| CyberTron-master | cybertron/models/GATO/gato/config.py |
from flowchain import enable_tensor_chaining
enable_tensor_chaining()
| CyberTron-master | cybertron/models/GATO/gato/__init__.py |
from typing import Dict, Any, Union
from gato import GatoConfig
import torch
import torch.nn as nn
import torch.nn.functional as F
from gato import GatoConfig
def _randomized_positions(from_v, to_v):
pos = torch.rand_like(from_v) * (to_v - from_v)
return pos.int()
def _rounded_mean_positions(from_v, to_v):
pos = (from_v + to_v).float() / 2
return pos.round()
class PatchPositionEncoding(nn.Module):
def __init__(self, config):
super().__init__()
self.embedding_dim = config.layer_width
self.discretize_depth = config.discretize_depth
self.patch_size = config.img_patch_size
self.row_embedding = nn.Embedding(self.discretize_depth, self.embedding_dim)
self.col_embedding = nn.Embedding(self.discretize_depth, self.embedding_dim)
def _discretize(self, pos):
return (pos * self.discretize_depth).round()
def _discretize_interval(self, interval):
pos_from, pos_to = interval
return self._discretize(pos_from), self._discretize(pos_to)
def forward(self, input_ids, pos):
row_pos, col_pos = pos
row_pos_from, row_pos_to = self._discretize_interval(row_pos)
col_pos_from, col_pos_to = self._discretize_interval(col_pos)
if self.training:
row_pos = row_pos_from + _randomized_positions(row_pos_from, row_pos_to)
col_pos = col_pos_from + _randomized_positions(col_pos_from, col_pos_to)
else:
row_pos = _rounded_mean_positions(row_pos_from, row_pos_to)
col_pos = _rounded_mean_positions(col_pos_from, col_pos_to)
return input_ids + self.row_embedding(row_pos.long()) + self.col_embedding(col_pos.long())
def get_config(self):
config = super(PatchPositionEncoding, self).get_config()
config.update({
'config': self.config.to_dict(),
})
return config
class ResidualUnit(nn.Module):
def __init__(self, num_groups: int, filters: int):
super().__init__()
self.num_groups = num_groups
self.filters = filters
self.conv1 = nn.Conv2d(in_channels=filters, out_channels=filters//2, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(in_channels=filters//2, out_channels=filters, kernel_size=3, stride=2, padding=1)
self.conv_proj = nn.Conv2d(in_channels=filters, out_channels=filters, kernel_size=1, stride=2, padding=0)
self.gn1 = nn.GroupNorm(num_groups=self.num_groups, num_channels=filters)
self.gn2 = nn.GroupNorm(num_groups=self.num_groups, num_channels=filters//2)
self.gn_proj = nn.GroupNorm(num_groups=self.num_groups, num_channels=filters)
def forward(self, x):
residual = self.conv_prok(self.gn_proj(x))
x = F.gelu(self.gn1(x))
x = self.conv1(x)
x = F.gelu(self.gn2(x))
x = self.conv2(x)
return x + residual
class ResidualEmbedding(nn.Module):
def __init__(self, config):
super().__init__()
self.root_conv = nn.Sequential(
nn.Conv2d(in_channels=config.input_dim, out_channels=96, kernel_size=7, stride=2, padding=3),
nn.GroupNorm(num_channels=96, num_groups=config.num_group_norm_groups),
nn.GELU()
)
self.residual_units = nn.ModuleList([ResidualUnit(num_groups=config.num_group_norm_groups,
filters=96 * 2 ** (i + 1))
for i in range(3)])
if config.input_dim != config.layer_width:
self.conv_proj = nn.Conv2d(in_channels=96 * 2 ** 3, out_channels=config.layer_width, kernel_size=1, stride=1, padding=0)
def forward(self, images):
x = self.root_conv(images)
for unit in self.residual_units:
x = unit(x)
if self.config.input_dim != self.config.layer_width:
x = self.conv_proj(x)
return x
def get_config(self):
config = super(ResidualEmbedding, self).get_config()
config.update({
'config': self.config.to_dict()
})
return config
class LocalPositionEncoding(nn.Module):
def __init__(self, config: Union[GatoConfig, Dict[str, Any]], trainable=True, name=None, *args, **kwargs):
"""
Appendix C.3. Position Encodings > Local Observation Position Encodings
"""
super(LocalPositionEncoding, self).__init__(trainable=trainable, name=name, *args, **kwargs)
if isinstance(config, dict):
config = GatoConfig(**config)
self.config = config
self.embedding = nn.Embedding(self.config.token_sequence_length, self.config.layer_width)
def forward(self, inputs):
obs_pos, obs_mask = inputs
embed = self.embedding(obs_pos)
ones = torch.ones((embed.shape[0], 1, self.config.layer_width)).to(embed.device)
obs_mask = obs_mask.float().transpose(-1, -2).matmul(ones)
return embed * obs_mask
def get_config(self):
config = super(LocalPositionEncoding, self).get_config()
config.update({
'config': self.config.to_dict()
})
return config
class DiscreteEmbedding(nn.Module):
def __init__(self, config):
super(DiscreteEmbedding, self).__init__()
if isinstance(config, dict):
config = GatoConfig(**config)
self.config = config
self.embedding = nn.Embedding(self.config_embedding_input_size, self.config.layer_width)
def forward(self, inputs):
return self.embedding(inputs)
def get_config(self):
config = super(DiscreteEmbedding, self).get_config()
config.update({
'config': self.config.to_dict()
})
return config
| CyberTron-master | cybertron/models/GATO/gato/models/embedding.py |
from gato.models.transformer import TransformerBlock
from gato.models.embedding import PatchPositionEncoding, ResidualEmbedding, LocalPositionEncoding, DiscreteEmbedding
from gato.models.tokenizers import ContinousValueTokenizer
from gato import GatoConfig
import torch
import torch.nn as nn
import torch.nn.functional as F
class Gato(nn.Module):
def __init__(self, config):
super(Gato, self).__init__()
if isinstance(config, dict):
config = GatoConfig(**config)
self.config = config
self.image_embedding = PatchEmbedding(config)
self.discrete_embedding = DiscreteEmbedding(config)
self.continuous_encoding = ContinousValueTokenizer(config)
self.transformer = Transformer(config)
self.local_pos_encoding = LocalPositionEncoding(config)
def forward(self, inputs):
input_ids, (encoding, row_pos, col_pos), (obs_pos, obs_mask) = inputs
encoding = F.one_hot(encoding, num_classes=3).float()
ones = torch.ones((input_ids.size(0), 1, self.config.layer_width))
image_embed = self.image_embedding((input_ids, (row_pos, col_pos)))
image_embed *= encoding[..., 0].unsqueeze(-1).matmul(ones)
continuous_embed = self.continuous_encoding(input_ids[..., 0])
continuous_embed = self.discrete_embedding(continuous_embed)
continuous_embed *= encoding[..., 1].unsqueeze(-1).matmul(ones)
discrete_embed = self.discrete_embedding(input_ids[..., 0])
discrete_embed *= encoding[..., 2].unsqueeze(-1).matmul(ones)
embed = image_embed + continuous_embed + discrete_embed
embed += self.local_pos_encoding((obs_pos, obs_mask))
hidden_states = self.transformer(embed)
return hidden_states
def get_config(self):
return super(Gato, self).get_config()
class Transformer(nn.Module):
def __init__(self, config):
super(Transformer, self).__init__()
if isinstance(config, dict):
config = GatoConfig(**config)
self.config = config
self.encoders = nn.ModuleList([TransformerBlock(config) for _ in range(config.num_transformer_blocks)])
def forward(self, inputs):
x = inputs
for encoder in self.encoders:
x = encoder(x)
return x
def get_config(self):
return super(Transformer, self).get_config()
class PatchEmbedding(nn.Module):
def __init__(self, config):
super(PatchEmbedding, self).__init__()
if isinstance(config, dict):
config = GatoConfig(**config)
self.config = config
self.residual_embedding = ResidualEmbedding(config)
self.pos_encoding = PatchPositionEncoding(config)
def forward(self, inputs):
input_ids, (row_pos, col_pos) = inputs
patch_size = self.config.img_patch_size
depth = self.config.input_dim // (patch_size * patch_size)
x = input_ids.view(-1, input_ids.size(1), patch_size, patch_size, depth)
x = self.residual_embedding(x)
x = self.pos_encoding((x, (row_pos, col_pos)))
return x
def get_config(self):
return super(PatchEmbedding, self).get_config()
| CyberTron-master | cybertron/models/GATO/gato/models/__init__.py |
from gato import GatoConfig
import torch.nn as nn
#implement alibi, flash sparse multihead attention + other juicy plug methods
from flash_attn.flash_blocksparse_attention import FlashBlocksparseMHA
class TransformerBlock(nn.Module):
def __init__(self, config):
super(TransformerBlock, self).__init__()
if isinstance(config, dict):
config = GatoConfig(**config)
self.config = config
self.attention = FlashBlocksparseMHA(embed_dim=config.layer_width,
num_heads=config.num_attention_heads,
dropout=config.dropout_rate,
max_seq_length=config.max_seq_len)
#may be unnecessary
self.dropout = nn.Dropout(config.dropout_rate)
self.feed_forward(nn.Sequential(
nn.Linear(in_features=config.layer_width, out_features=config.feedforward_hidden_size),
nn.GELU(),
nn.Dropout(config.dropout_rate),
nn.Linear(in_features=config.feedforward_hidden_size, out_features=config.layer_width),
nn.Dropout(config.dropout_rate)
))
self.layer_norm1 = nn.LayerNorm(normalized_shape=config.layer_width, eps=1e-6)
self.layer_norm2 = nn.LayerNorm(normalized_shape=config.layer_width, eps=1e-6)
def forward(self, inputs):
x_norm1 = self.layer_norm1(inputs)
x_attention, _ = self.attention(x_norm1, x_norm1, x_norm1)
x_dropout = self.dropout(x_attention)
x_residual = x_dropout + inputs
x_norm2 = self.layer_norm2(x_residual)
x_ff = self.feed_forward(x_norm2)
x_residual2 = x_ff + x_residual
return x_residual2
def get_config(self):
config = super(TransformerBlock, self).get_config()
config.update({
'config': self.config.to_dict(),
})
return config | CyberTron-master | cybertron/models/GATO/gato/models/transformer.py |
from gato import GatoConfig
import torch
import torch.nn as nn
def mu_law_encode(x, mu=100, m=256):
numerator = torch.log(x.abs(), * mu + 1.0)
denominator = torch.log(m * mu + 1.0)
return (numerator / denominator) * x.sign()
def tokenize_continous_value(x, mu=100, m=256, bins=1024, shift=None):
#appenddix B agent data tokenization
#finally they are discretized using bins of uniform width on the domain[-1, 1]
x = mu_law_encode(x, mu, m)
#we use 1024 bins and shift the resulting integers
#so they are not overlapping with the ones used for text tokens
c = (c + 1) * (bins / 2)
c = c.int()
if shift is not None:
c += shift
return c
class ContinousValueTokenizer(nn.Module):
def __init__(self, config, mu=100, m=256, bins=1024):
super(ContinousValueTokenizer, self).__init__()
if isinstance(config, dict):
config = GatoConfig(**config)
self.config = config
self.mu = mu
self.m = m
self.bins = bins
def forward(self, inputs):
return tokenize_continous_value(inputs, self.mu, self.m, self.bins, shift=self.config.vocabulary_size)
def get_config(self):
return super(ContinousValueTokenizer, self).get_config()
# | CyberTron-master | cybertron/models/GATO/gato/models/tokenizers.py |
from ray.rllib.algorithms.impala import ImpalaConfig
from ray.tune.logger import pretty_print
import datetime
import os
import tempfile
from ray.tune.logger.unified import UnifiedLogger # noqa: E402
def custom_log_creator(custom_path, custom_str):
timestr = datetime.datetime.today().strftime("%Y-%m-%d_%H-%M-%S")
logdir_prefix = "{}_{}".format(custom_str, timestr)
def logger_creator(config):
if not os.path.exists(custom_path):
os.makedirs(custom_path)
logdir = tempfile.mkdtemp(prefix=logdir_prefix, dir=custom_path)
return UnifiedLogger(config, logdir, loggers=None)
return logger_creator
config = ImpalaConfig()
config = config.training(lr=0.0003, train_batch_size=512)
config = config.resources(num_gpus=0)
config = config.rollouts(num_rollout_workers=8)
config = config.debugging(logger_creator = custom_log_creator(custom_path = 'ray_results', custom_str = 'test'))
config = config.environment(disable_env_checking=True)
#config = config.environment(env_creator=env_creator)
print(config.to_dict())
# Build a Algorithm object from the config and run 1 training iteration.
algo = config.build(env='ALE/Kangaroo-v5')
#algo = config.build()
for i in range(200):
result = algo.train()
print(pretty_print(result)) | CyberTron-master | cybertron/models/GATO/datasets/control_env/ALE_Atari/atari_test_impala.py |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for transformer."""
from absl.testing import parameterized
from robotics_transformer import transformer
import tensorflow as tf
class TransformerTest(parameterized.TestCase):
def setUp(self):
self._vocab_size = 10
batch_size = 8
sequence_len = 12
self._tokens = tf.random.uniform(
[batch_size, sequence_len, self._vocab_size],
minval=0,
maxval=1,
dtype=tf.dtypes.float32,
)
super(TransformerTest, self).setUp()
@parameterized.parameters(True, False)
def test_transformer_forwardpass(self, return_attention_scores):
network = transformer.Transformer(
num_layers=2,
layer_size=512,
num_heads=4,
feed_forward_size=256,
dropout_rate=0.1,
vocab_size=self._vocab_size,
return_attention_scores=return_attention_scores)
output_tokens, attention_scores = network(self._tokens, attention_mask=None)
self.assertSequenceEqual(self._tokens.shape.as_list(),
output_tokens.shape.as_list())
if return_attention_scores:
self.assertNotEmpty(attention_scores)
else:
self.assertEmpty(attention_scores)
if __name__ == '__main__':
tf.test.main()
| CyberTron-master | cybertron/models/robotics_transformer/transformer_test.py |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sequence policy and agent that directly output actions via actor network.
These classes are not intended to change as they are generic enough for any
all-neural actor based agent+policy. All new features are intended to be
implemented in `actor_network` and `loss_fn`.
"""
from typing import Optional, Type
from absl import logging
import tensorflow as tf
from tf_agents.agents import data_converter
from tf_agents.agents import tf_agent
from tf_agents.networks import network
from tf_agents.policies import actor_policy
from tf_agents.trajectories import policy_step
from tf_agents.trajectories import time_step as ts
from tf_agents.typing import types
from tf_agents.utils import nest_utils
class SequencePolicy(actor_policy.ActorPolicy):
"""A policy that directly outputs actions via an actor network."""
def __init__(self, **kwargs):
self._actions = None
super().__init__(**kwargs)
def set_actions(self, actions):
self._actor_network.set_actions(actions)
def get_actor_loss(self):
return self._actor_network.get_actor_loss()
def get_aux_info(self):
return self._actor_network.get_aux_info()
def set_training(self, training):
self._training = training
def _action(self,
time_step: ts.TimeStep,
policy_state: types.NestedTensor,
seed: Optional[types.Seed] = None) -> policy_step.PolicyStep:
del seed
action, policy_state = self._apply_actor_network(
time_step.observation,
step_type=time_step.step_type,
policy_state=policy_state)
info = ()
return policy_step.PolicyStep(action, policy_state, info)
def _distribution(self, time_step, policy_state):
current_step = super()._distribution(time_step, policy_state)
return current_step
class SequenceAgent(tf_agent.TFAgent):
"""A sequence agent that directly outputs actions via an actor network."""
def __init__(self,
time_step_spec: ts.TimeStep,
action_spec: types.NestedTensorSpec,
actor_network: Type[network.Network],
actor_optimizer: tf.keras.optimizers.Optimizer,
policy_cls: Type[actor_policy.ActorPolicy] = SequencePolicy,
time_sequence_length: int = 6,
debug_summaries: bool = False,
**kwargs):
self._info_spec = ()
self._actor_network = actor_network( # pytype: disable=missing-parameter # dynamic-method-lookup
input_tensor_spec=time_step_spec.observation,
output_tensor_spec=action_spec,
policy_info_spec=self._info_spec,
train_step_counter=kwargs['train_step_counter'],
time_sequence_length=time_sequence_length)
self._actor_optimizer = actor_optimizer
# Train policy is only used for loss and never exported as saved_model.
self._train_policy = policy_cls(
time_step_spec=time_step_spec,
action_spec=action_spec,
info_spec=self._info_spec,
actor_network=self._actor_network,
training=True)
collect_policy = policy_cls(
time_step_spec=time_step_spec,
action_spec=action_spec,
info_spec=self._info_spec,
actor_network=self._actor_network,
training=False)
super(SequenceAgent, self).__init__(
time_step_spec,
action_spec,
collect_policy, # We use the collect_policy as the eval policy.
collect_policy,
train_sequence_length=time_sequence_length,
**kwargs)
self._data_context = data_converter.DataContext(
time_step_spec=time_step_spec,
action_spec=action_spec,
info_spec=collect_policy.info_spec,
use_half_transition=True)
self.as_transition = data_converter.AsHalfTransition(
self._data_context, squeeze_time_dim=False)
self._debug_summaries = debug_summaries
num_params = 0
for weight in self._actor_network.trainable_weights:
weight_params = 1
for dim in weight.shape:
weight_params *= dim
logging.info('%s has %s params.', weight.name, weight_params)
num_params += weight_params
logging.info('Actor network has %sM params.', round(num_params / 1000000.,
2))
def _train(self, experience: types.NestedTensor,
weights: types.Tensor) -> tf_agent.LossInfo:
self.train_step_counter.assign_add(1)
loss_info = self._loss(experience, weights, training=True)
self._apply_gradients(loss_info.loss)
return loss_info
def _apply_gradients(self, loss: types.Tensor):
variables = self._actor_network.trainable_weights
gradients = tf.gradients(loss, variables)
# Skip nan and inf gradients.
new_gradients = []
for g in gradients:
if g is not None:
new_g = tf.where(
tf.math.logical_or(tf.math.is_inf(g), tf.math.is_nan(g)),
tf.zeros_like(g), g)
new_gradients.append(new_g)
else:
new_gradients.append(g)
grads_and_vars = list(zip(new_gradients, variables))
self._actor_optimizer.apply_gradients(grads_and_vars)
def _loss(self, experience: types.NestedTensor, weights: types.Tensor,
training: bool) -> tf_agent.LossInfo:
transition = self.as_transition(experience)
time_steps, policy_steps, _ = transition
batch_size = nest_utils.get_outer_shape(time_steps, self._time_step_spec)[0]
policy = self._train_policy
policy.set_actions(policy_steps.action)
policy.set_training(training=training)
with tf.name_scope('actor_loss'):
policy_state = policy.get_initial_state(batch_size)
policy.action(time_steps, policy_state=policy_state)
valid_mask = tf.cast(~time_steps.is_last(), tf.float32)
loss = valid_mask * policy.get_actor_loss()
loss = tf.reduce_mean(loss)
policy.set_actions(None)
self._actor_network.add_summaries(time_steps.observation,
policy.get_aux_info(),
self._debug_summaries, training)
return tf_agent.LossInfo(loss=loss, extra=loss)
| CyberTron-master | cybertron/models/robotics_transformer/sequence_agent.py |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for sequence_agent."""
from typing import Type
import numpy as np
from robotics_transformer import sequence_agent
from tensor2robot.utils import tensorspec_utils
import tensorflow as tf
from tf_agents.networks import network
from tf_agents.policies import policy_saver
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import time_step as ts
class DummyActorNet(network.Network):
"""Used for testing SequenceAgent and its subclass."""
def __init__(self,
output_tensor_spec=None,
train_step_counter=None,
policy_info_spec=None,
time_sequence_length=1,
use_tcl=False,
**kwargs):
super().__init__(**kwargs)
@property
def tokens_per_action(self):
return 8
def set_actions(self, actions):
self._actions = actions
def get_actor_loss(self):
return self._actor_loss
def call(self,
observations,
step_type,
network_state,
actions=None,
training=False):
del step_type
image = observations['image']
tf.expand_dims(tf.reduce_mean(image, axis=-1), -1)
actions = tensorspec_utils.TensorSpecStruct(
world_vector=tf.constant(1., shape=[1, 3]),
rotation_delta=tf.constant(1., shape=[1, 3]),
terminate_episode=tf.constant(1, shape=[1, 2]),
gripper_closedness_action=tf.constant(1., shape=[1, 1]),
)
return actions, network_state
@property
def trainable_weights(self):
return [tf.Variable(1.0)]
class SequenceAgentTestSetUp(tf.test.TestCase):
"""Defines spec for testing SequenceAgent and its subclass, tests create."""
def setUp(self):
super().setUp()
self._action_spec = tensorspec_utils.TensorSpecStruct()
self._action_spec.world_vector = tensor_spec.BoundedTensorSpec(
(3,), dtype=tf.float32, minimum=-1., maximum=1., name='world_vector')
self._action_spec.rotation_delta = tensor_spec.BoundedTensorSpec(
(3,),
dtype=tf.float32,
minimum=-np.pi / 2,
maximum=np.pi / 2,
name='rotation_delta')
self._action_spec.gripper_closedness_action = tensor_spec.BoundedTensorSpec(
(1,),
dtype=tf.float32,
minimum=-1.,
maximum=1.,
name='gripper_closedness_action')
self._action_spec.terminate_episode = tensor_spec.BoundedTensorSpec(
(2,), dtype=tf.int32, minimum=0, maximum=1, name='terminate_episode')
state_spec = tensorspec_utils.TensorSpecStruct()
state_spec.image = tensor_spec.BoundedTensorSpec([256, 320, 3],
dtype=tf.float32,
name='image',
minimum=0.,
maximum=1.)
state_spec.natural_language_embedding = tensor_spec.TensorSpec(
shape=[512], dtype=tf.float32, name='natural_language_embedding')
self._time_step_spec = ts.time_step_spec(observation_spec=state_spec)
self.sequence_agent_cls = sequence_agent.SequenceAgent
def create_agent_and_initialize(self,
actor_network: Type[
network.Network] = DummyActorNet,
**kwargs):
"""Creates the agent and initialize it."""
agent = self.sequence_agent_cls(
time_step_spec=self._time_step_spec,
action_spec=self._action_spec,
actor_network=actor_network,
actor_optimizer=tf.keras.optimizers.Adam(),
train_step_counter=tf.compat.v1.train.get_or_create_global_step(),
**kwargs)
agent.initialize()
return agent
def testCreateAgent(self):
"""Creates the Agent and save the agent.policy."""
agent = self.create_agent_and_initialize()
self.assertIsNotNone(agent.policy)
policy_model_saver = policy_saver.PolicySaver(
agent.policy,
train_step=tf.compat.v2.Variable(
0,
trainable=False,
dtype=tf.int64,
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA,
shape=()),
input_fn_and_spec=None)
save_options = tf.saved_model.SaveOptions(
experimental_io_device='/job:localhost',
experimental_custom_gradients=False)
policy_model_saver.save('/tmp/unittest/policy/0', options=save_options)
if __name__ == '__main__':
tf.test.main()
| CyberTron-master | cybertron/models/robotics_transformer/sequence_agent_test_set_up.py |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for networks."""
import copy
from typing import Optional, Tuple, Union
from absl.testing import parameterized
import numpy as np
from robotics_transformer import sequence_agent
from robotics_transformer import transformer_network
from tensor2robot.utils import tensorspec_utils
import tensorflow as tf
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import time_step as ts
BATCH_SIZE = 2
TIME_SEQUENCE_LENGTH = 3
HEIGHT = 256
WIDTH = 320
NUM_IMAGE_TOKENS = 2
def spec_names_list() -> list[str]:
"""Lists the different types of specs accepted by the transformer."""
return ['default']
def state_spec_list() -> list[tensorspec_utils.TensorSpecStruct]:
"""Lists the different types of state spec accepted by the transformer."""
state_spec = tensorspec_utils.TensorSpecStruct()
state_spec.image = tensor_spec.BoundedTensorSpec([HEIGHT, WIDTH, 3],
dtype=tf.float32,
name='image',
minimum=0.,
maximum=1.)
state_spec.natural_language_embedding = tensor_spec.TensorSpec(
shape=[512], dtype=tf.float32, name='natural_language_embedding')
state_spec_mask = copy.deepcopy(state_spec)
state_spec_mask.initial_binary_mask = tensor_spec.BoundedTensorSpec(
[HEIGHT, WIDTH, 1],
dtype=tf.int32,
name='initial_binary_mask',
minimum=0,
maximum=255)
state_spec_tcl = copy.deepcopy(state_spec)
state_spec_tcl.original_image = tensor_spec.BoundedTensorSpec(
[HEIGHT, WIDTH, 3],
dtype=tf.float32,
name='original_image',
minimum=0.,
maximum=1.)
return [
state_spec,
state_spec_mask,
state_spec_tcl,
]
def observations_list(training: bool = True) -> list[dict[str, tf.Tensor]]:
"""Lists the different types of observations accepted by the transformer."""
if training:
image_shape = [BATCH_SIZE, TIME_SEQUENCE_LENGTH, HEIGHT, WIDTH, 3]
emb_shape = [BATCH_SIZE, TIME_SEQUENCE_LENGTH, 512]
mask_shape = [BATCH_SIZE, TIME_SEQUENCE_LENGTH, HEIGHT, WIDTH, 1]
else:
# inference currently only support batch size of 1
image_shape = [1, HEIGHT, WIDTH, 3]
emb_shape = [1, 512]
mask_shape = [1, HEIGHT, WIDTH, 1]
return [
{
'image': tf.constant(0.5, shape=image_shape),
'natural_language_embedding': tf.constant(1., shape=emb_shape),
},
{
'image': tf.constant(0.5, shape=image_shape),
'natural_language_embedding': tf.constant(1., shape=emb_shape),
'initial_binary_mask': tf.constant(192, shape=mask_shape),
},
{ # This is used for TCL.
'image': tf.constant(0.5, shape=image_shape),
'original_image': tf.constant(0.4, shape=image_shape),
'natural_language_embedding': tf.constant(1., shape=emb_shape),
},
]
NAME_TO_STATE_SPECS = dict(zip(spec_names_list(), state_spec_list()))
NAME_TO_OBSERVATIONS = dict(zip(spec_names_list(), observations_list()))
NAME_TO_INF_OBSERVATIONS = dict(
zip(spec_names_list(), observations_list(False)))
class FakeImageTokenizer(tf.keras.layers.Layer):
"""Fake Image Tokenizer for testing Transformer."""
def __init__(self,
encoder: ...,
position_embedding: ...,
embedding_output_dim: int,
patch_size: int,
use_token_learner: bool = False,
num_tokens: int = NUM_IMAGE_TOKENS,
use_initial_binary_mask: bool = False,
**kwargs):
del encoder, position_embedding, patch_size, use_token_learner
super().__init__(**kwargs)
self.tokens_per_context_image = num_tokens
if use_initial_binary_mask:
self.tokens_per_context_image += 1
self.embedding_output_dim = embedding_output_dim
self.use_initial_binary_mask = use_initial_binary_mask
def __call__(self,
image: tf.Tensor,
context: Optional[tf.Tensor] = None,
initial_binary_mask: Optional[tf.Tensor] = None,
training: bool = False) -> tf.Tensor:
if self.use_initial_binary_mask:
assert initial_binary_mask is not None
image_shape = tf.shape(image)
seq_size = image_shape[1]
batch_size = image_shape[0]
all_tokens = []
num_tokens = self.tokens_per_context_image
for t in range(seq_size):
tokens = tf.ones([batch_size, 1, num_tokens, self.embedding_output_dim
]) * image[0][t][0][0]
all_tokens.append(tokens)
return tf.concat(all_tokens, axis=1)
class TransformerNetworkTestUtils(tf.test.TestCase, parameterized.TestCase):
"""Defines specs, SequenceAgent, and various other testing utilities."""
def _define_specs(self,
train_batch_size=BATCH_SIZE,
inference_batch_size=1,
time_sequence_length=TIME_SEQUENCE_LENGTH,
inference_sequence_length=TIME_SEQUENCE_LENGTH,
token_embedding_size=512,
image_width=WIDTH,
image_height=HEIGHT):
"""Defines specs and observations (both training and inference)."""
self.train_batch_size = train_batch_size
self.inference_batch_size = inference_batch_size
self.time_sequence_length = time_sequence_length
self.inference_sequence_length = inference_sequence_length
self.token_embedding_size = token_embedding_size
action_spec = tensorspec_utils.TensorSpecStruct()
action_spec.world_vector = tensor_spec.BoundedTensorSpec(
(3,), dtype=tf.float32, minimum=-1., maximum=1., name='world_vector')
action_spec.rotation_delta = tensor_spec.BoundedTensorSpec(
(3,),
dtype=tf.float32,
minimum=-np.pi / 2,
maximum=np.pi / 2,
name='rotation_delta')
action_spec.gripper_closedness_action = tensor_spec.BoundedTensorSpec(
(1,),
dtype=tf.float32,
minimum=-1.,
maximum=1.,
name='gripper_closedness_action')
action_spec.terminate_episode = tensor_spec.BoundedTensorSpec(
(2,), dtype=tf.int32, minimum=0, maximum=1, name='terminate_episode')
state_spec = tensorspec_utils.TensorSpecStruct()
state_spec.image = tensor_spec.BoundedTensorSpec(
[image_height, image_width, 3],
dtype=tf.float32,
name='image',
minimum=0.,
maximum=1.)
state_spec.natural_language_embedding = tensor_spec.TensorSpec(
shape=[self.token_embedding_size],
dtype=tf.float32,
name='natural_language_embedding')
self._policy_info_spec = {
'return':
tensor_spec.BoundedTensorSpec((),
dtype=tf.float32,
minimum=0.0,
maximum=1.0,
name='return'),
'discounted_return':
tensor_spec.BoundedTensorSpec((),
dtype=tf.float32,
minimum=0.0,
maximum=1.0,
name='discounted_return'),
}
self._state_spec = state_spec
self._action_spec = action_spec
self._inference_observation = {
'image':
tf.constant(
1,
shape=[self.inference_batch_size, image_height, image_width, 3],
dtype=tf.dtypes.float32),
'natural_language_embedding':
tf.constant(
1.,
shape=[self.inference_batch_size, self.token_embedding_size],
dtype=tf.dtypes.float32),
}
self._train_observation = {
'image':
tf.constant(
0.5,
shape=[
self.train_batch_size, self.time_sequence_length,
image_height, image_width, 3
]),
'natural_language_embedding':
tf.constant(
1.,
shape=[
self.train_batch_size, self.time_sequence_length,
self.token_embedding_size
]),
}
self._inference_action = {
'world_vector':
tf.constant(0.5, shape=[self.inference_batch_size, 3]),
'rotation_delta':
tf.constant(0.5, shape=[self.inference_batch_size, 3]),
'terminate_episode':
tf.constant(
[0, 1] * self.inference_batch_size,
shape=[self.inference_batch_size, 2]),
'gripper_closedness_action':
tf.constant(0.5, shape=[self.inference_batch_size, 1]),
}
self._train_action = {
'world_vector':
tf.constant(
0.5,
shape=[self.train_batch_size, self.time_sequence_length, 3]),
'rotation_delta':
tf.constant(
0.5,
shape=[self.train_batch_size, self.time_sequence_length, 3]),
'terminate_episode':
tf.constant(
[0, 1] * self.train_batch_size * self.time_sequence_length,
shape=[self.train_batch_size, self.time_sequence_length, 2]),
'gripper_closedness_action':
tf.constant(
0.5,
shape=[self.train_batch_size, self.time_sequence_length, 1]),
}
def _create_agent(self, actor_network=None):
"""Creates SequenceAgent using custom actor_network."""
time_step_spec = ts.time_step_spec(observation_spec=self._state_spec)
if actor_network is None:
actor_network = transformer_network.TransformerNetwork
self._agent = sequence_agent.SequenceAgent(
time_step_spec=time_step_spec,
action_spec=self._action_spec,
actor_network=actor_network,
actor_optimizer=tf.keras.optimizers.Adam(),
train_step_counter=tf.compat.v1.train.get_or_create_global_step(),
time_sequence_length=TIME_SEQUENCE_LENGTH)
self._num_action_tokens = (
# pylint:disable=protected-access
self._agent._actor_network._action_tokenizer._tokens_per_action)
# pylint:enable=protected-access
def setUp(self):
self._define_specs()
super().setUp()
def get_image_value(self, step_idx: int) -> float:
return float(step_idx) / self.time_sequence_length
def get_action_logits(self, batch_size: int, value: int,
vocab_size: int) -> tf.Tensor:
return tf.broadcast_to(
tf.one_hot(value % vocab_size, vocab_size)[tf.newaxis, tf.newaxis, :],
[batch_size, 1, vocab_size])
def create_obs(self, value) -> dict[str, tf.Tensor]:
observations = {}
observations['image'] = value * self._inference_observation['image']
observations[
'natural_language_embedding'] = value * self._inference_observation[
'natural_language_embedding']
return observations
def fake_action_token_emb(self, action_tokens) -> tf.Tensor:
"""Just pad with zeros."""
shape = action_tokens.shape
assert self.vocab_size > self.token_embedding_size
assert len(shape) == 4
return action_tokens[:, :, :, :self.token_embedding_size]
def fake_transformer(
self, all_tokens, training,
attention_mask) -> Union[tf.Tensor, Tuple[tf.Tensor, list[tf.Tensor]]]:
"""Fakes the call to TransformerNetwork._transformer."""
del training
del attention_mask
# We expect ST00 ST01 A00 A01...
# Where:
# * ST01 is token 1 of state 0.
# * A01 is token 1 of action 0.
shape = all_tokens.shape.as_list()
batch_size = shape[0]
self.assertEqual(batch_size, 1)
emb_size = self.token_embedding_size
# transform to [batch_size, num_tokens, token_size]
all_tokens = tf.reshape(all_tokens, [batch_size, -1, emb_size])
# Pads tokens to be of vocab_size.
self.assertGreater(self.vocab_size, self.token_embedding_size)
all_shape = all_tokens.shape
self.assertLen(all_shape.as_list(), 3)
output_tokens = tf.concat([
all_tokens,
tf.zeros([
all_shape[0], all_shape[1],
self.vocab_size - self.token_embedding_size
])
],
axis=-1)
num_tokens_per_step = NUM_IMAGE_TOKENS + self._num_action_tokens
# Check state/action alignment.
window_range = min(self._step_idx + 1, self.time_sequence_length)
for j in range(window_range):
# The index step that is stored in j = 0.
first_step_idx = max(0, self._step_idx + 1 - self.time_sequence_length)
image_idx = j * num_tokens_per_step
action_start_index = image_idx + NUM_IMAGE_TOKENS
for t in range(NUM_IMAGE_TOKENS):
self.assertAllEqual(
self.get_image_value(first_step_idx + j) *
tf.ones_like(all_tokens[0][image_idx][:self.token_embedding_size]),
all_tokens[0][image_idx + t][:self.token_embedding_size])
# if j is not the current step in the window, all action dimensions
# from previous steps are already infered and thus can be checked.
action_dims_range = self.action_inf_idx if j == window_range - 1 else self._num_action_tokens
for t in range(action_dims_range):
token_idx = action_start_index + t
action_value = (first_step_idx + j) * self._num_action_tokens + t
self.assertAllEqual(
self.get_action_logits(
batch_size=batch_size,
value=action_value,
vocab_size=self.vocab_size)[0][0][:self.token_embedding_size],
all_tokens[0][token_idx][:self.token_embedding_size])
# Output the right action dimension value.
image_token_index = (
min(self._step_idx, self.time_sequence_length - 1) *
num_tokens_per_step)
transformer_shift = -1
action_index = (
image_token_index + NUM_IMAGE_TOKENS + self.action_inf_idx +
transformer_shift)
action_value = self._step_idx * self._num_action_tokens + self.action_inf_idx
action_logits = self.get_action_logits(
batch_size=batch_size, value=action_value, vocab_size=self.vocab_size)
output_tokens = tf.concat([
output_tokens[:, :action_index, :], action_logits[:, :, :],
output_tokens[:, action_index + 1:, :]
],
axis=1)
self.action_inf_idx = (self.action_inf_idx + 1) % self._num_action_tokens
attention_scores = []
return output_tokens, attention_scores
| CyberTron-master | cybertron/models/robotics_transformer/transformer_network_test_set_up.py |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""robotics_transformer API."""
# A new PyPI release will be pushed everytime `__version__` is increased
# When changing this, also update the CHANGELOG.md
__version__ = '0.1.0'
| CyberTron-master | cybertron/models/robotics_transformer/__init__.py |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for networks."""
from absl.testing import parameterized
from robotics_transformer import transformer_network
from robotics_transformer.transformer_network_test_set_up import BATCH_SIZE
from robotics_transformer.transformer_network_test_set_up import NAME_TO_INF_OBSERVATIONS
from robotics_transformer.transformer_network_test_set_up import NAME_TO_STATE_SPECS
from robotics_transformer.transformer_network_test_set_up import observations_list
from robotics_transformer.transformer_network_test_set_up import spec_names_list
from robotics_transformer.transformer_network_test_set_up import state_spec_list
from robotics_transformer.transformer_network_test_set_up import TIME_SEQUENCE_LENGTH
from robotics_transformer.transformer_network_test_set_up import TransformerNetworkTestUtils
import tensorflow as tf
from tf_agents.specs import tensor_spec
class TransformerNetworkTest(TransformerNetworkTestUtils):
# pylint:disable=g-complex-comprehension
@parameterized.named_parameters([{
'testcase_name': '_' + name,
'state_spec': spec,
'train_observation': obs,
} for (name, spec,
obs) in zip(spec_names_list(), state_spec_list(), observations_list())]
)
# pylint:enable=g-complex-comprehension
def testTransformerTrainLossCall(self, state_spec, train_observation):
network = transformer_network.TransformerNetwork(
input_tensor_spec=state_spec,
output_tensor_spec=self._action_spec,
time_sequence_length=TIME_SEQUENCE_LENGTH)
network.create_variables()
self.assertNotEmpty(network.variables)
network.set_actions(self._train_action)
network_state = tensor_spec.sample_spec_nest(
network.state_spec, outer_dims=[BATCH_SIZE])
output_actions, network_state = network(
train_observation, step_type=None, network_state=network_state)
expected_shape = [2, 3]
self.assertEqual(network.get_actor_loss().shape,
tf.TensorShape(expected_shape))
self.assertCountEqual(self._train_action.keys(), output_actions.keys())
# pylint:disable=g-complex-comprehension
@parameterized.named_parameters([{
'testcase_name': '_' + name,
'spec_name': name,
} for name in spec_names_list()])
# pylint:enable=g-complex-comprehension
def testTransformerInferenceLossCall(self, spec_name):
state_spec = NAME_TO_STATE_SPECS[spec_name]
observation = NAME_TO_INF_OBSERVATIONS[spec_name]
network = transformer_network.TransformerNetwork(
input_tensor_spec=state_spec,
output_tensor_spec=self._action_spec,
time_sequence_length=TIME_SEQUENCE_LENGTH,
action_order=[
'terminate_episode', 'world_vector', 'rotation_delta',
'gripper_closedness_action'
])
network.create_variables()
self.assertNotEmpty(network.variables)
network.set_actions(self._inference_action)
# inference currently only support batch size of 1
network_state = tensor_spec.sample_spec_nest(
network.state_spec, outer_dims=[1])
output_actions, network_state = network(
observation, step_type=None, network_state=network_state)
tf.debugging.assert_equal(network.get_actor_loss(), 0.0)
self.assertCountEqual(self._inference_action.keys(), output_actions.keys())
# pylint:disable=g-complex-comprehension
@parameterized.named_parameters([{
'testcase_name': '_' + name,
'state_spec': spec,
'train_observation': obs,
} for name, spec, obs in zip(spec_names_list(), state_spec_list(),
observations_list())])
# pylint:enable=g-complex-comprehension
def testTransformerLogging(self, state_spec, train_observation):
network = transformer_network.TransformerNetwork(
input_tensor_spec=state_spec,
output_tensor_spec=self._action_spec,
time_sequence_length=TIME_SEQUENCE_LENGTH,
action_order=[
'terminate_episode', 'world_vector', 'rotation_delta',
'gripper_closedness_action'
])
network.create_variables()
self.assertNotEmpty(network.variables)
network.set_actions(self._train_action)
network_state = tensor_spec.sample_spec_nest(
network.state_spec, outer_dims=[BATCH_SIZE])
_ = network(train_observation, step_type=None, network_state=network_state)
network.add_summaries(
train_observation,
network.get_aux_info(),
debug_summaries=True,
training=True)
# pylint:disable=g-complex-comprehension
@parameterized.named_parameters([{
'testcase_name': '_' + name,
'state_spec': spec,
} for name, spec in zip(spec_names_list(), state_spec_list())])
# pylint:enable=g-complex-comprehension
def testTransformerCausality(self, state_spec):
"""Tests the causality for the transformer.
Args:
state_spec: Which state spec to test the transformer with
"""
network = transformer_network.TransformerNetwork(
input_tensor_spec=state_spec,
output_tensor_spec=self._action_spec,
time_sequence_length=TIME_SEQUENCE_LENGTH)
network.create_variables()
self.assertNotEmpty(network.variables)
time_sequence_length = network._time_sequence_length
tokens_per_image = network._tokens_per_context_image
tokens_per_action = network._tokens_per_action
def _split_image_and_action_tokens(all_tokens):
image_start_indices = [(tokens_per_image + tokens_per_action) * k
for k in range(time_sequence_length)]
image_tokens = tf.stack(
[all_tokens[i:i + tokens_per_image] for i in image_start_indices],
axis=0)
action_start_indices = [i + tokens_per_image for i in image_start_indices]
action_tokens = [
tf.stack([
all_tokens[i:i + tokens_per_action] for i in action_start_indices
], 0)
]
image_tokens = tf.one_hot(image_tokens, network._token_embedding_size)
# Remove extra dimension before the end once b/254902773 is fixed.
shape = image_tokens.shape
# Add batch dimension.
image_tokens = tf.reshape(image_tokens,
[1] + shape[:-1] + [1] + shape[-1:])
return image_tokens, action_tokens
# Generate some random tokens for image and actions.
all_tokens = tf.random.uniform(
shape=[time_sequence_length * (tokens_per_image + tokens_per_action)],
dtype=tf.int32,
maxval=10,
minval=0)
context_image_tokens, action_tokens = _split_image_and_action_tokens(
all_tokens)
# Get the output tokens without any zeroed out input tokens.
output_tokens = network._transformer_call(
context_image_tokens=context_image_tokens,
action_tokens=action_tokens,
attention_mask=network._default_attention_mask,
batch_size=1,
training=False)[0]
for t in range(time_sequence_length *
(tokens_per_image + tokens_per_action)):
# Zero out future input tokens.
all_tokens_at_t = tf.concat(
[all_tokens[:t + 1],
tf.zeros_like(all_tokens[t + 1:])], 0)
context_image_tokens, action_tokens = _split_image_and_action_tokens(
all_tokens_at_t)
# Get the output tokens with zeroed out input tokens after t.
output_tokens_at_t = network._transformer_call(
context_image_tokens=context_image_tokens,
action_tokens=action_tokens,
attention_mask=network._default_attention_mask,
batch_size=1,
training=False)[0]
# The output token is unchanged if future input tokens are zeroed out.
self.assertAllEqual(output_tokens[:t + 1], output_tokens_at_t[:t + 1])
def testLossMasks(self):
self._define_specs()
self._create_agent()
image_tokens = 3
action_tokens = 2
self._agent._actor_network._time_sequence_length = 2
self._agent._actor_network._tokens_per_context_image = image_tokens
self._agent._actor_network._tokens_per_action = action_tokens
self._agent._actor_network._generate_masks()
self.assertAllEqual(
self._agent._actor_network._action_tokens_mask,
tf.constant([
image_tokens, image_tokens + 1, 2 * image_tokens + action_tokens,
2 * image_tokens + action_tokens + 1
], tf.int32))
self._agent._actor_network._generate_masks()
self.assertAllEqual(
self._agent._actor_network._action_tokens_mask,
tf.constant([
image_tokens, image_tokens + 1, 2 * (image_tokens) + action_tokens,
2 * (image_tokens) + action_tokens + 1
], tf.int32))
if __name__ == '__main__':
# Useful to enable if running with ipdb.
tf.config.run_functions_eagerly(True)
tf.test.main()
| CyberTron-master | cybertron/models/robotics_transformer/transformer_network_test.py |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RT1 decoder transformer.
Copied from:
https://www.tensorflow.org/text/tutorials/transformer#decoder
"""
from typing import Tuple, Union
import tensorflow as tf
class _TransformerLayer(tf.keras.layers.Layer):
"""A single transformer block."""
def __init__(self,
layer_size: int = 4096,
num_heads: int = 8,
feed_forward_size: int = 512,
dropout_rate: float = 0.1,
return_attention_scores: bool = False):
"""Creates a Transformer layer.
Args:
layer_size: Size of the multiple head attention layer.
num_heads: Number of heads for the multiple head attention layer.
feed_forward_size: Dimensionality of the feed_forward layer.
dropout_rate: Dropout rate.
return_attention_scores: Return attention scores.
"""
super(_TransformerLayer, self).__init__()
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.mha1 = tf.keras.layers.MultiHeadAttention(
key_dim=layer_size, num_heads=num_heads, dropout=dropout_rate)
self.ff = tf.keras.layers.Dense(feed_forward_size)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout_ff = tf.keras.layers.Dropout(dropout_rate)
self._return_attention_scores = return_attention_scores
def call(self, x: tf.Tensor, attention_mask: tf.Tensor,
training: bool) -> Tuple[tf.Tensor, Union[tf.Tensor, None]]:
"""Calls the layer.
Args:
x: Input Tensor of shape `(B, T, dim)`.
attention_mask: a boolean mask of shape `(B, T, T)`, that prevents
attention to certain positions. The boolean mask specifies which query
elements can attend to which key elements, 1 indicates attention and 0
indicates no attention. Broadcasting can happen for the missing batch
dimensions and the head dimension.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (no dropout).
Returns:
y: Output Tensor of shape `(B, T, dim)`. Also return the attention scores
of shape `(B, T, dim)` or None.
"""
x1 = self.layernorm1(x)
mha_results = self.mha1(
query=x1,
key=x1,
value=x1,
attention_mask=attention_mask,
return_attention_scores=self._return_attention_scores,
training=training)
if self._return_attention_scores:
x1, score = mha_results
else:
x1, score = mha_results, None
x = x + x1
y = self.layernorm2(x)
ff_y = self.ff(y)
ff_y = self.dropout_ff(ff_y, training=training)
x = x + ff_y
return x, score
class Transformer(tf.keras.layers.Layer):
"""A decoder only transformer."""
def __init__(self,
num_layers: int = 1,
layer_size: int = 4096,
num_heads: int = 8,
feed_forward_size: int = 512,
dropout_rate: float = 0.1,
vocab_size: int = 256,
return_attention_scores: bool = False):
"""Creates a transformer.
Args:
num_layers: Number of transformer layers.
layer_size: Size of the multiple head attention layer.
num_heads: Number of heads for the multiple head attention layer.
feed_forward_size: Dimensionality of the feed_forward layer.
dropout_rate: Dropout rate.
vocab_size: Dimensionality of tokens from the output layer.
return_attention_scores: Return attention scores.
"""
super(Transformer, self).__init__()
self._layers = [
_TransformerLayer( # pylint: disable=g-complex-comprehension
layer_size=layer_size,
num_heads=num_heads,
feed_forward_size=feed_forward_size,
dropout_rate=dropout_rate,
return_attention_scores=return_attention_scores)
for _ in range(num_layers)
]
self._token_emb = tf.keras.layers.Dense(feed_forward_size)
self._position_emb = tf.keras.layers.Dense(feed_forward_size)
self._output_tokens = tf.keras.layers.Dense(vocab_size)
def call(
self,
x: tf.Tensor,
training: bool,
attention_mask: tf.Tensor,
) -> Union[tf.Tensor, Tuple[tf.Tensor, list[tf.Tensor]]]:
"""Calls the layer.
Args:
x: Input Tensor of shape `(B, T, dim)`.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (no dropout).
attention_mask: a boolean mask of shape `(B, T, T)`, that prevents
attention to certain positions. The boolean mask specifies which query
elements can attend to which key elements, 1 indicates attention and 0
indicates no attention. Broadcasting can happen for the missing batch
dimensions and the head dimension.
Returns:
x: Output Tensor of shape `(B, T, vocab_size)`. If
`return_attention_scores`, also return attention scores of
a list of `layer` of elements with shape `(B, T, dim)`.
"""
seq_len = tf.shape(x)[1]
batch_size = tf.shape(x)[0]
positions = tf.one_hot(
tf.tile(tf.expand_dims(tf.range(0, seq_len, 1), 0), [batch_size, 1]),
seq_len)
x = self._token_emb(x)
x += self._position_emb(positions)
scores = []
for layer in self._layers:
x, score = layer(x, attention_mask=attention_mask, training=training)
if score is not None:
scores.append(score)
x = self._output_tokens(x)
return x, scores
| CyberTron-master | cybertron/models/robotics_transformer/transformer.py |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for sequence_agent."""
from robotics_transformer.sequence_agent_test_set_up import SequenceAgentTestSetUp
import tensorflow as tf
from tf_agents.agents import data_converter
class SequenceAgentTest(SequenceAgentTestSetUp):
def testAsTransitionType(self):
agent = self.create_agent_and_initialize()
self.assertIsInstance(agent.as_transition, data_converter.AsHalfTransition)
if __name__ == '__main__':
tf.test.main()
| CyberTron-master | cybertron/models/robotics_transformer/sequence_agent_test.py |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow based methods for sequence agents."""
from typing import Optional, Tuple, Union, Any
from absl import logging
import numpy as np
from robotics_transformer import transformer
from robotics_transformer.film_efficientnet import preprocessors
from robotics_transformer.tokenizers import action_tokenizer
from robotics_transformer.tokenizers import image_tokenizer
from tensor2robot.utils import tensorspec_utils
import tensorflow as tf
from tf_agents.networks import network
from tf_agents.specs import tensor_spec
from tf_agents.utils import nest_utils
class TransformerNetwork(network.Network):
"""A transformer based actor network."""
def __init__(
self,
input_tensor_spec: tensorspec_utils.TensorSpecStruct,
output_tensor_spec: tensorspec_utils.TensorSpecStruct,
train_step_counter: int = 0,
vocab_size: int = 256,
token_embedding_size: int = 512,
num_layers: int = 1,
layer_size: int = 4096,
num_heads: int = 8,
feed_forward_size: int = 512,
dropout_rate: float = 0.1,
time_sequence_length: int = 1,
crop_size: int = 236,
policy_info_spec: Optional[dict[Any,
tensor_spec.BoundedTensorSpec]] = None,
action_order: Optional[list[str]] = None,
use_token_learner: Optional[bool] = True,
return_attention_scores: bool = False,
**kwargs):
"""Creates a transformer network.
Args:
input_tensor_spec: Nested list/tuple/dict of TensorSpecs, describing the
shape of input tensor.
output_tensor_spec: Nested list/tuple/dict of TensorSpecs, describing the
shape of output tensor.
train_step_counter: Counter for number of steps.
vocab_size: Dimensionality of tokens from the output layer.
token_embedding_size: Dimensionality of tokens from the embedding layer.
num_layers: Number of transformer layers.
layer_size: Size of the multiple head attention layer.
num_heads: Number of heads for the multiple head attention layer.
feed_forward_size: Dimensionality of the feed_forward layer.
dropout_rate: Dropout rate.
time_sequence_length: Length of the time sequence.
crop_size: Height and width of the square crop, where original image will
be padded to allow full field of view to be extracted.
policy_info_spec: Spec on return value given return type of the return
tokenizer.
action_order: Order of actions for the action tokenizer.
use_token_learner: Whether to use token learner. See
https://arxiv.org/abs/2106.11297
return_attention_scores: show attention scores in tensorboard.
**kwargs: Keyword parameter arguments.
"""
self._input_tensor_spec = input_tensor_spec
self._output_tensor_spec = output_tensor_spec
self._train_step_counter = train_step_counter
self._actions = None
self._returns = None
self._vocab_size = vocab_size
self._token_embedding_size = token_embedding_size
self._time_sequence_length = time_sequence_length
self._crop_size = crop_size
self._transformer = transformer.Transformer(
num_layers=num_layers,
layer_size=layer_size,
num_heads=num_heads,
feed_forward_size=feed_forward_size,
dropout_rate=dropout_rate,
vocab_size=self._vocab_size,
return_attention_scores=return_attention_scores)
# create tokenizers
self._image_tokenizer = image_tokenizer.RT1ImageTokenizer(
embedding_output_dim=self._token_embedding_size,
use_token_learner=use_token_learner)
self._action_tokenizer = action_tokenizer.RT1ActionTokenizer(
output_tensor_spec,
vocab_size=self._vocab_size,
action_order=action_order)
self._tokens_per_action = self._action_tokenizer.tokens_per_action
self._tokens_per_context_image = self._image_tokenizer.tokens_per_context_image
# generate loss and attention masks
self._generate_masks()
# define mappings to token embedding size
self._action_token_emb = tf.keras.layers.Dense(self._token_embedding_size)
# define loss function
self._loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE)
self._attention_scores = []
self._use_token_learner = use_token_learner
super(TransformerNetwork, self).__init__(
input_tensor_spec=input_tensor_spec, **kwargs)
self._state_spec = {
# Force this to be 4 dimension due to b/254902773.
# Otherwise can be dimension 3.
'context_image_tokens':
tensor_spec.TensorSpec(
shape=(time_sequence_length, self._tokens_per_context_image, 1,
token_embedding_size),
dtype=tf.float32,
name='context_image_tokens'),
'action_tokens':
tensor_spec.TensorSpec(
shape=(time_sequence_length, self._tokens_per_action, 1, 1),
dtype=tf.int32,
name='action_tokens'),
# Stores where in the window we are.
# This value is within range [0, time_sequence_length + 1].
# When seq_idx == time_sequence_length, context_image_tokens and
# action_tokens need to be shifted to the left.
'seq_idx':
tensor_spec.TensorSpec(
shape=(1, 1, 1, 1), dtype=tf.int32, name='seq_idx')
}
@property
def attention_scores(self) -> list[tf.Tensor]:
"""Return attention score. This is for debugging/visualization purpose."""
return self._attention_scores
def _get_action_index_for_token(self, k):
"""Returns action associated with the token at given position `k`.
If k is not an action token then it returns -1.
If k is part of the first action in the sequence then returns 0 etc.
Args:
k: an int that represents the position in the sequence.
Returns:
The index of the action that this position belongs to, or if this
position is part of an image token then returns -1.
"""
if (k < 0 or k >= self._all_num_tokens):
return -1
n = k
if n % self._single_time_step_num_tokens < self._tokens_per_context_image:
return -1
return int(n / self._single_time_step_num_tokens)
def _generate_masks(self):
"""Generate mask for action prediction loss and attention visualization."""
# each time step = [image, action]
self._single_time_step_num_tokens = (
self._tokens_per_action + self._tokens_per_context_image)
# full sequence = [prefix context + N x timestep + postfix context]
self._all_num_tokens = (
self._time_sequence_length * self._single_time_step_num_tokens)
# create mask for action predition loss
self._action_tokens_mask = []
for n in range(0, self._all_num_tokens, self._single_time_step_num_tokens):
for x in range(0, self._tokens_per_action, 1):
self._action_tokens_mask.append(x + n + self._tokens_per_context_image)
self._action_tokens_mask = tf.constant(
self._action_tokens_mask, dtype=tf.int32)
# The look ahead mask ensures causality.
self._default_attention_mask = tf.linalg.band_part(
tf.ones((self._all_num_tokens, self._all_num_tokens)), -1, 0)
action_mask = np.ndarray(
shape=(self._all_num_tokens, self._all_num_tokens), dtype=int)
for i in range(self._all_num_tokens):
for j in range(self._all_num_tokens):
action_i = self._get_action_index_for_token(i)
action_j = self._get_action_index_for_token(j)
mask = 0
if action_i != -1 and action_j != -1:
# Ignore actions of previous steps.
if action_j < action_i:
mask = 1
# If we're not auto-regression, ignore action dimensions of current
# step.
if (action_j == action_i and j <= i):
mask = 1
action_mask[i, j] = mask
self._default_attention_mask -= action_mask
def _transformer_call(
self,
context_image_tokens: tf.Tensor,
action_tokens: tf.Tensor,
batch_size: int,
training: bool,
attention_mask: tf.Tensor,
) -> Union[tf.Tensor, Tuple[tf.Tensor, tf.Tensor]]:
"""Calls the transformer.
Args:
context_image_tokens: Tokenized context and image in Tensor of shape `(B,
T, num token, -1)`.
action_tokens: Discrete action token sequence of size [8, 256].
batch_size: Batch size as when reshaping all tokens.
training: Whether to run the transformer in training mode.
attention_mask: Optional bool tensor for masking transformer's attention.
Returns:
Output tokens in Tensor of shape `(B, T, dim)`. If
return_attention_scores, also return the attention scores of
shape `(B, T, dim)`.
"""
input_token_sequence = self._assemble_input_token_sequence(
context_image_tokens, action_tokens, batch_size)
# run transformer
output_tokens, self._attention_scores = self._transformer(
input_token_sequence, training, attention_mask)
return output_tokens
def _get_tokens_and_mask(self,
observations: dict[str, tf.Tensor],
network_state: dict[str, tf.Tensor],
training: bool = False):
# tokenize all inputs
context_image_tokens, network_state = self._tokenize_images(
observations, network_state, training)
action_tokens = self._tokenize_actions(observations, network_state)
# generate transformer attention mask
attention_mask = self._default_attention_mask
return (context_image_tokens, action_tokens, attention_mask)
def _transformer_call_and_slice(self,
*args,
slice_start: int = 0,
slice_length: int = 1,
**kwargs) -> Tuple[tf.Tensor, tf.Tensor]:
output_tokens = self._transformer_call(*args, **kwargs)
slice_end = slice_start + slice_length
token_logits = output_tokens[:, slice_start:slice_end, :]
token = tf.argmax(token_logits, axis=-1, output_type=tf.int32)
return token, token_logits
def call(self,
observations: dict[str, tf.Tensor],
network_state: dict[str, tf.Tensor],
training: bool = False):
"""Calls the transformer network.
Args:
observations: Observation data including image and natural language
embedding in dict of Tensors.
network_state: Network state data including time step, image, action
tokens, step number in dict of Tensors.
training: Whether to call transformer network in training mode.
Returns:
A tuple `(Detokenized output actions, network state)`.
"""
# used to determine training vs inference call
# outer_rank will be 2 -> [b, t] during training and
# outer_rank will be 1 -> [b] during inference
outer_rank = self._get_outer_rank(observations)
assert outer_rank in (1, 2)
b, t = self._get_batch_size_and_seq_len(network_state)
context_image_tokens, action_tokens, attention_mask = self._get_tokens_and_mask(
observations, network_state, training)
self._aux_info = {'action_labels': action_tokens}
if outer_rank == 1: # This is an inference call
# run transformer in loop to produce action tokens one-by-one
seq_idx = tf.reshape(network_state['seq_idx'], [1])[0]
action_t = tf.minimum(seq_idx, self._time_sequence_length - 1)
# Transformer shifts all to the left by one step by default (it's usually
# predicting the next token as default training task...).
transformer_shift = -1
# We only want to get the action predicted at time_step.
start_index = (
transformer_shift + self._tokens_per_context_image + action_t *
(self._single_time_step_num_tokens))
current_action_tokens = []
action_predictions_logits = []
for k in range(self._tokens_per_action):
action_index = start_index + k
token, token_logits = self._transformer_call_and_slice(
context_image_tokens,
action_tokens,
attention_mask=attention_mask,
batch_size=b,
training=training,
slice_start=action_index # slicing single action dimension
)
action_predictions_logits.append(token_logits)
current_action_tokens.append(token)
# action_tokens is [b, t * self._tokens_per_action]
action_tokens = tf.reshape(action_tokens, [b, -1])
action_start_index = (action_t * self._tokens_per_action) + k
action_tokens = tf.concat([
action_tokens[:, :action_start_index], token,
action_tokens[:, action_start_index + 1:]
],
axis=1)
# action_tokens is [b, t, self._tokens_per_action]
action_tokens = tf.reshape(action_tokens,
[b, t, self._tokens_per_action])
self._aux_info.update({
# action_predictions_logits is
# [b, self._tokens_per_action, self._vocab_size]
'action_predictions_logits': tf.concat(action_predictions_logits, 1)
})
# predicted_tokens_for_output is [b, self._tokens_per_action]
predicted_tokens_for_output = tf.concat(current_action_tokens, 1)
# state_action_tokens is [b, 1, self._tokens_per_action, 1, 1]
one_state_action_tokens = predicted_tokens_for_output[:, tf.newaxis, :,
tf.newaxis,
tf.newaxis]
state_action_tokens = network_state['action_tokens']
network_state['action_tokens'] = tf.concat([
state_action_tokens[:, :action_t, ...], one_state_action_tokens,
state_action_tokens[:, action_t + 1:, ...]
],
axis=1)
# Increment the time_step for the next inference call.
network_state['seq_idx'] = tf.reshape(
tf.minimum(seq_idx + 1, self._time_sequence_length), [-1, 1, 1, 1, 1])
self._loss = tf.constant(0.0)
else:
# training call --> simply run one transformer forward pass
output_tokens = self._transformer_call(
context_image_tokens,
action_tokens,
attention_mask=attention_mask,
batch_size=b,
training=training)
# Gather all predicted actions for the action loss.
action_logits = tf.gather(
output_tokens, self._action_tokens_mask - 1, axis=1)
action_logits_for_training = tf.reshape(
action_logits, [b, t, self._tokens_per_action, -1])
# Only take the last action as the action.
# action_logits_for_output is [b, self._tokens_per_action, emb]
action_logits_for_output = action_logits_for_training[:, -1]
# predicted_tokens_for_output is [b, self._tokens_per_action]
predicted_tokens_for_output = tf.argmax(
action_logits_for_output, axis=-1, output_type=tf.int32)
num_items = (
tf.cast(b * t, tf.float32) * self._single_time_step_num_tokens)
action_loss = tf.reduce_mean(
self._loss_object(action_tokens, action_logits_for_training) /
num_items,
axis=-1)
self._loss = action_loss
# store action labels and predictions for visualization
self._aux_info.update({
'action_predictions':
tf.argmax(
action_logits_for_training, axis=-1, output_type=tf.int32),
'action_loss':
action_loss,
'actor_loss_mask':
tf.ones([b], dtype=tf.float32)
})
output_actions = self._action_tokenizer.detokenize(
predicted_tokens_for_output)
return output_actions, network_state
def add_summaries(self, observations: dict[str, tf.Tensor],
logging_info: dict[str, tf.Tensor], debug_summaries: bool,
training: bool) -> None:
"""Adds summaries.
Args:
observations: Observation data including image and natural language
instruction in dict of Tensors.
logging_info: Dict with all data stored for logging during training pass.
debug_summaries: Whether to include debug summaries.
training: Whether this function is called during training or inference.
"""
num_params = 0
for weight in self.trainable_weights:
weight_params = 1
for dim in weight.shape:
weight_params *= dim
num_params += weight_params
tf.compat.v2.summary.scalar(name='num_params', data=num_params)
# debug_summaries are for the non-tpu worker, train_summary.
if debug_summaries:
image = observations['image'] # [b, t, h, w, c]
image_h = image.shape[2]
image_w = image.shape[3]
batch_size = image.shape[0]
num_ts = image.shape[1]
logging.info('image shape %s', image.shape)
# Concat images for different timesteps across width.
image = tf.concat(tf.unstack(image, axis=1), 2)
# Concat images for different batches (up to 8) across height.
image = tf.expand_dims(tf.concat(tf.unstack(image, axis=0)[0:8], 0), 0)
tf.summary.image(
'observations/image',
image,
step=self._train_step_counter,
# Single output since we have concatenated images along batch.
max_outputs=1)
# [b, t], strings
if 'natural_language_instruction' in observations:
task = observations['natural_language_instruction'][:, 0]
tf.summary.text(
'natural_language_instruction', task, step=self._train_step_counter)
if self.attention_scores and not self._use_token_learner:
for l_idx, layer_attention_score in enumerate(self.attention_scores):
logging.info('Attention score shape: %s, %s', l_idx,
layer_attention_score.shape)
for head_idx in range(layer_attention_score.shape[1]):
pairwise_attention = tf.expand_dims(
layer_attention_score[:, head_idx], -1)
# pairwise attention shape (16, 552, 552, 1)
# make attention from different time steps comparable
pairwise_attention = pairwise_attention * np.arange(
1, pairwise_attention.shape[1] + 1)[None, :, None, None]
# visualize spatial attention, note this only supports
# mk1_500tasks_transformer pipeline with no token learner
img_tf_ts = tf.reshape(
tf.transpose(
tf.reshape(
tf.reduce_sum(pairwise_attention, axis=1) / np.arange(
pairwise_attention.shape[1], 0, -1)[None, :, None],
[batch_size, num_ts, -1]),
[0, 2, 1])[:, :-self._tokens_per_action, :],
[-1, 9, 9, num_ts])
img_tf_ts = tf.image.resize(
img_tf_ts, [image_h, image_w],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
img_tf_ts_concat = tf.concat(tf.unstack(img_tf_ts, axis=3), 2)
img_tf_ts_concat_min = tf.reduce_min(
img_tf_ts_concat, axis=[1, 2], keepdims=True)
img_tf_ts_concat = (img_tf_ts_concat - img_tf_ts_concat_min) / (
tf.reduce_max(img_tf_ts_concat, axis=[1, 2], keepdims=True) -
img_tf_ts_concat_min)
img_tf_ts_concat = tf.concat(
tf.unstack(img_tf_ts_concat, axis=0)[:8], 0)
img_tf_ts_concat = tf.expand_dims(
tf.expand_dims(img_tf_ts_concat, 0), -1)
tf.summary.image(
'attention/layer_{}/head_{}'.format(l_idx, head_idx),
img_tf_ts_concat,
step=self._train_step_counter,
# Single output since we have concatenated images along batch.
max_outputs=1)
if img_tf_ts_concat.shape[1] == image.shape[
1] and img_tf_ts_concat.shape[2] == image.shape[2]:
# can overlay
overlay_viz = tf.cast(
(tf.cast(image, tf.float32) * (0.2 + img_tf_ts_concat) / 1.2),
tf.uint8)
tf.summary.image(
'overlay_attention/layer_{}/head_{}'.format(l_idx, head_idx),
overlay_viz,
step=self._train_step_counter,
# Single output since we have concatenated images along batch.
max_outputs=1)
# log action info
action_labels = tf.boolean_mask(logging_info['action_labels'],
logging_info['actor_loss_mask'])
action_predictions = tf.boolean_mask(logging_info['action_predictions'],
logging_info['actor_loss_mask'])
with tf.name_scope('ActionTokens'):
token_accuracy = (
tf.cast(tf.equal(action_labels, action_predictions), tf.float32))
accuracy = tf.reduce_mean(token_accuracy)
tf.compat.v2.summary.scalar(
name='accuracy', data=accuracy, step=self._train_step_counter)
# Accuracy across timesteps
for t in range(self._time_sequence_length):
tf.compat.v2.summary.scalar(
name='accuracy/time_step/{}'.format(t),
data=tf.reduce_mean(token_accuracy[:, t, :]),
step=self._train_step_counter)
token_index = 0
for k in self._action_tokenizer.action_order:
spec = self._action_tokenizer.action_spec[k]
if spec.dtype == tf.int32:
n_tokens = 1
else:
n_tokens = spec.shape[0]
action_token_accuracy = tf.reduce_mean(
token_accuracy[:, :, token_index:token_index + n_tokens])
tf.compat.v2.summary.scalar(
name='accuracy/action_type/{}'.format(k),
data=action_token_accuracy,
step=self._train_step_counter)
for n in range(n_tokens):
tf.summary.histogram(
'tokens/{}_{}/labels'.format(k, n + 1),
action_labels[:, :, token_index],
step=self._train_step_counter)
tf.summary.histogram(
'tokens/{}_{}/predictions'.format(k, n + 1),
action_predictions[:, :, token_index],
step=self._train_step_counter)
token_index += 1
# log loss components
with tf.name_scope('TokenLosses'):
tf.compat.v2.summary.scalar(
name='action_loss',
data=tf.reduce_mean(logging_info['action_loss']),
step=self._train_step_counter)
def _tokenize_images(self, observations, network_state, training):
image = observations['image'] # [b, t, h, w, c]
outer_rank = self._get_outer_rank(observations)
if outer_rank == 1: # This is an inference call
seq_idx = tf.reshape(network_state['seq_idx'], [1])[0]
time_step = tf.minimum(seq_idx, self._time_sequence_length - 1)
image = tf.expand_dims(image, 1)
image_shape = tf.shape(image)
b = image_shape[0]
input_t = image_shape[1]
h = image_shape[2]
w = image_shape[3]
c = image_shape[4]
context = self._extract_context_from_observation(observations, input_t)
image = tf.reshape(image, [b * input_t, h, w, c])
seed = tf.random.uniform(shape=(2,), maxval=2**30, dtype=tf.int32)
image = preprocessors.convert_dtype_and_crop_images(
image,
crop_size=self._crop_size,
training=training,
pad_then_crop=True,
convert_dtype=True,
seed=seed)
image = tf.reshape(image, [b, input_t, h, w, c])
context_image_tokens = self._image_tokenizer(
image, context=context, training=training)
num_tokens = tf.shape(context_image_tokens)[2]
context_image_tokens = tf.reshape(context_image_tokens,
[b, input_t, num_tokens, 1, -1])
if outer_rank == 1: # This is an inference call
network_state['context_image_tokens'] = tf.reshape(
network_state['context_image_tokens'], [
b, self._time_sequence_length, self._tokens_per_context_image, 1,
-1
])
state_image_tokens = network_state['context_image_tokens']
# network_state as input for this call is the output from the last call.
# Therefore, we need to shift all images to the left by 1 in the time axis
# to align w/ the time dim in this call.
state_image_tokens = tf.cond(
seq_idx == self._time_sequence_length,
lambda: tf.roll(state_image_tokens, -1, axis=1),
lambda: state_image_tokens)
context_image_tokens = tf.concat([
state_image_tokens[:, :time_step, ...], context_image_tokens,
state_image_tokens[:, time_step + 1:, ...]
],
axis=1)
network_state['context_image_tokens'] = context_image_tokens
return context_image_tokens, network_state
def _tokenize_actions(self, observations, network_state):
outer_rank = self._get_outer_rank(observations)
if outer_rank == 1: # This is an inference call
action_tokens = tf.squeeze(network_state['action_tokens'], [3, 4])
seq_idx = tf.reshape(network_state['seq_idx'], [1])[0]
# network_state as input for this call is the output from the last call.
# Therefore, we need to shift all actions by 1 to the left.
action_tokens = tf.cond(seq_idx == self._time_sequence_length,
lambda: tf.roll(action_tokens, -1, axis=1),
lambda: action_tokens)
else:
assert outer_rank == 2
if self._actions is None:
b, t = self._get_batch_size_and_seq_len(network_state)
action_tokens = tf.zeros(
shape=[b, t, self._tokens_per_action], dtype=tf.int32)
else:
action_tokens = self._action_tokenizer.tokenize(self._actions)
return action_tokens
def _assemble_input_token_sequence(self, context_image_tokens, action_tokens,
batch_size):
# embed action tokens
action_tokens = tf.one_hot(action_tokens, self._vocab_size)
action_tokens = self._action_token_emb(action_tokens)
action_tokens = tf.zeros_like(action_tokens) # b/260260205
# Because of b/254902773, we need to add 1 extra dimension.
action_tokens = tf.expand_dims(action_tokens, axis=-2)
# assemble token sequence
input_token_sequence = tf.concat([context_image_tokens, action_tokens],
axis=2)
input_token_sequence = tf.reshape(
input_token_sequence, [batch_size, -1, self._token_embedding_size])
return input_token_sequence
def _extract_context_from_observation(self, observations, seq_len):
"""Extract context from observation."""
context = None
if 'natural_language_embedding' in observations:
outer_rank = self._get_outer_rank(observations)
context = observations['natural_language_embedding'] # [b, t, emb-size]
if outer_rank == 1:
context = tf.tile(context[:, None], [1, seq_len, 1])
return context
def set_actions(self, actions: tensorspec_utils.TensorSpecStruct):
"""Sets actions that will be tokenized and used in transformer network.
Args:
actions: actions to be tokenized and used in transformer network. example
actions are terminate = [0, 1] world_vector = [0.9, 0.8, -0.3]
rotation_delta = [-0.1, 0.2, .6] gripper_closedness = 0.9
"""
self._actions = actions
def _get_outer_rank(self, observations):
# used to determine training vs inference call
# outer_rank will be 2 -> [b, t] during training and
# outer_rank will be 1 -> [b] during inference
return nest_utils.get_outer_rank(observations, self._input_tensor_spec)
def _get_batch_size_and_seq_len(self, network_state):
image_shape = tf.shape(network_state['context_image_tokens'])
b = image_shape[0]
t = image_shape[1]
return b, t
def get_actor_loss(self) -> tf.Tensor:
return self._loss
def get_aux_info(self) -> dict[str, Any]:
return self._aux_info
| CyberTron-master | cybertron/models/robotics_transformer/transformer_network.py |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for token_learner."""
from absl.testing import parameterized
from robotics_transformer.tokenizers import token_learner
import tensorflow as tf
class TokenLearnerTest(parameterized.TestCase):
@parameterized.named_parameters(('sample_input', 512, 8))
def testTokenLearner(self, embedding_dim, num_tokens):
batch = 1
seq = 2
token_learner_layer = token_learner.TokenLearnerModule(
num_tokens=num_tokens)
inputvec = tf.random.normal(shape=(batch * seq, 81, embedding_dim))
learnedtokens = token_learner_layer(inputvec)
self.assertEqual(learnedtokens.shape,
[batch * seq, num_tokens, embedding_dim])
if __name__ == '__main__':
tf.test.main()
| CyberTron-master | cybertron/models/robotics_transformer/tokenizers/token_learner_test.py |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple action tokenizer used with Robotics Transformer 1.
As an example, if an action is:
terminate = [0, 1]
world_vector = [0.9, 0.8, -0.3]
rotation_delta = [-0.1, 0.2, .6]
gripper_closedness = 0.9
Then we build a sequence of tokens of length 8 [one for each dimension].
The int32 type action dimensions are already assumed discrete and tokenized,
the float dimensions are bucketed according to the specs min and max. Each
dimension has 'vocab_size' buckets.
Currently, this tokenizer assumes one action spec and it is highly recommended
to specify the 'action_order', eg [terminate, world_vector, rotation_delta,
gripper_closedness]. Since after tokenization you lose that information, this
will be useful for debugging. Actions may also be subselected for prediction,
since not all actions are needed in the action_order.
"""
from typing import Optional
from tensor2robot.utils import tensorspec_utils
import tensorflow as tf
class RT1ActionTokenizer:
"""Tokenizes based on vocab size."""
def __init__(self,
action_spec: tensorspec_utils.TensorSpecStruct,
vocab_size: int,
action_order: Optional[list[str]] = None):
"""Instantiates an RT1ActionTokenizer.
Args:
action_spec: Tensor spec of the expected action tensor.
vocab_size: Number of buckets to discretize action to.
action_order: Order of the action names, used to discern the order of
tokenized actions to detokenize and assemble back to action tensor
"""
self._action_spec = action_spec
self._vocab_size = vocab_size
if action_order is None:
self._action_order = self._action_spec.keys()
else:
for action in action_order:
if action not in self._action_spec.keys():
raise ValueError('actions: %s not found in action_spec: %s' %
(action, action_spec.keys()))
assert action in self._action_spec.keys()
self._action_order = action_order
self._tokens_per_action = 0
for action in self._action_order:
action_shape = self._action_spec[action].shape
if len(action_shape) != 1:
raise ValueError(
'Only action shapes with single dimension supported, got %s' %
action_shape)
if self._action_spec[action].dtype == tf.int32:
# Int32 actions are already assumed to be tokens.
self._tokens_per_action += 1
else:
self._tokens_per_action += action_shape[0]
# We measure # of action tokens in two different way. One is by checking
# from action_order (above) and the other is by looping through the
# action spec (below). We aseert the # of action tokens are the same
# calculated by these two ways. This will assure action_order is correctly
# configured, otherwise, it will through an error in the assert.
num_action_token = 0
for spec in self._action_spec.values():
if spec.dtype == tf.int32:
num_action_token += 1
else:
num_action_token += spec.shape[-1]
tf.debugging.assert_equal(num_action_token, self._tokens_per_action)
@property
def tokens_per_action(self) -> int:
return self._tokens_per_action
@property
def action_spec(self) -> tensorspec_utils.TensorSpecStruct:
return self._action_spec
@property
def action_order(self) -> list[str]:
return self._action_order
def tokenize(self, action: tensorspec_utils.TensorSpecStruct) -> tf.Tensor:
"""Tokenizes an action."""
action_tokens = []
for k in self._action_order:
a = action[k] # a is [batch, actions_size]
spec = self._action_spec[k]
if spec.dtype == tf.int32:
# Int32 actions are already assumed to be tokens, assume it is smaller
# than the vocab size, so all we need to do is pad zeros.
tf.debugging.assert_equal(1, tf.reduce_sum(a, axis=-1))
# extract the token [batch, 1]
token = tf.argmax(a, axis=-1, output_type=tf.int32)
tf.debugging.assert_less(token, self._vocab_size)
# Add a seq dimension [batch, 1]
token = tf.expand_dims(token, axis=-1)
else:
a = tf.clip_by_value(a, spec.minimum, spec.maximum)
# Normalize the action [batch, actions_size]
token = (a - spec.minimum) / (spec.maximum - spec.minimum)
# Bucket and discretize the action to vocab_size, [batch, actions_size]
token = tf.cast(token * (self._vocab_size - 1), tf.int32)
action_tokens.append(token)
# Append all actions, [batch, all_actions_size]
action_tokens = tf.concat(action_tokens, axis=-1)
return action_tokens
def detokenize(self,
action_tokens: tf.Tensor) -> tensorspec_utils.TensorSpecStruct:
"""Detokenizes an action."""
action = tensorspec_utils.TensorSpecStruct()
token_index = 0
for k in self._action_order:
spec = self._action_spec[k]
action_dim = spec.shape[0]
if spec.dtype == tf.int32:
# Int32 actions are already assumed to be tokens.
action[k] = action_tokens[..., token_index]
# A poor model may output tokens outside the allowed range, in that case
# set them to a default value, the 0 token in this case.
outside_range = tf.greater_equal(action[k], action_dim)
action[k] = tf.where(outside_range, tf.zeros_like(action[k]), action[k])
action[k] = tf.one_hot(
action[k], depth=action_dim, axis=-1, dtype=tf.int32)
token_index += 1
else:
actions = []
for _ in range(action_dim):
a = action_tokens[..., token_index:token_index + 1]
a = tf.cast(a, tf.float32)
a = a / (self._vocab_size - 1)
a = (a * (spec.maximum - spec.minimum)) + spec.minimum
actions.append(a)
token_index += 1
action[k] = tf.concat(actions, axis=-1)
return action
| CyberTron-master | cybertron/models/robotics_transformer/tokenizers/action_tokenizer.py |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF implementation of Token Learner(Ryoo et al 2021)."""
import functools
from typing import Optional, Sequence, Union
import numpy as np
import tensorflow as tf
def gelu(x: float) -> float:
return 0.5 * x * (1 +
tf.tanh(tf.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3))))
def _maybe_dropout(rate: float = 0.0, name: str = "dropout"):
"""Helper function to return dropout layer if rate is non zero."""
if rate:
return tf.keras.layers.Dropout(rate, name=name)
return lambda x, *args: x # Does nothing to x.
class MlpBlock(tf.keras.layers.Layer):
"""Transformer MLP / feed-forward block."""
def __init__(self,
*,
mlp_dim: int,
out_dim: Optional[int] = None,
kernel_init: Optional[tf.keras.initializers.Initializer] = tf
.keras.initializers.glorot_uniform(),
bias_init: Optional[tf.keras.initializers.Initializer] = tf.keras
.initializers.RandomNormal(stddev=1e-6),
dropout_rate: float = 0.1,
**kwargs):
"""Initializer for the MLP Block.
This computes outer_dense(gelu(hidden_dense(input))), with dropout
applied as necessary.
Note: Especially outside a keras workflow, make sure to call layer.build
Args:
mlp_dim: The dimension of the inner representation (output of hidden
layer). Usually larger than the input/output dim.
out_dim: The output dimension of the block. If None, the model output dim
is equal to the input dim (usually desired)
kernel_init: Initializer for dense kernels, used for both dense layers.
bias_init: Initializer for dense biases, used for both dense layers.
dropout_rate: Dropout rate to be applied after dense ( & activation)
**kwargs: Other keyword args passed to the tf.keras.layers.Layer
constructor e.g. the name
"""
super().__init__(**kwargs)
self._out_dim = out_dim
self._hidden_dropout = _maybe_dropout(dropout_rate)
self._output_dropout = _maybe_dropout(dropout_rate)
self._hidden_layer = tf.keras.layers.Dense(
mlp_dim,
activation=gelu,
kernel_initializer=kernel_init,
bias_initializer=bias_init,
name="hidden_dense")
# If out_dim is None, infer out_dim = input_dim at self.build()
self._output_layer = functools.partial(
tf.keras.layers.Dense,
kernel_initializer=kernel_init,
bias_initializer=bias_init,
name="final_dense")
def build(self, input_shape: Sequence[int]):
out_dim = self._out_dim or input_shape[-1]
self._output_layer = self._output_layer(units=out_dim)
super().build(input_shape)
def call(self,
inputs: tf.Tensor,
*,
is_training: Union[bool, tf.Tensor] = False) -> tf.Tensor:
"""Applies Transformer MlpBlock module."""
x = self._hidden_layer(inputs)
x = self._hidden_dropout(x, is_training)
x = self._output_layer(x)
x = self._output_dropout(x, is_training)
return x
class TokenLearnerModule(tf.keras.layers.Layer):
"""TokenLearner module V1.1 (https://arxiv.org/abs/2106.11297)."""
def __init__(self,
num_tokens: int,
bottleneck_dim: int = 64,
dropout_rate: float = 0.):
super().__init__()
self.mlp = MlpBlock(
mlp_dim=bottleneck_dim, out_dim=num_tokens, dropout_rate=dropout_rate)
self.layernorm = tf.keras.layers.LayerNormalization(epsilon=1e-6)
def call(self, inputs: tf.Tensor, training: bool = False) -> tf.Tensor:
if len(inputs.shape) == 4:
bs, h, w, c = inputs.shape
inputs = tf.reshape(inputs, [bs, h * w, c])
selected = self.layernorm(inputs)
selected = self.mlp(
selected, is_training=training) # Shape: [bs, h*w, n_token].
selected = tf.transpose(selected, [0, 2, 1]) # Shape: [bs, n_token, h*w].
selected = tf.nn.softmax(selected, axis=-1)
feat = tf.einsum("...si,...id->...sd", selected, inputs)
return feat # Shape: [bs, n_token, c]
| CyberTron-master | cybertron/models/robotics_transformer/tokenizers/token_learner.py |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| CyberTron-master | cybertron/models/robotics_transformer/tokenizers/__init__.py |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for action_tokenizer."""
import numpy as np
from robotics_transformer.tokenizers import action_tokenizer
from tensor2robot.utils import tensorspec_utils
import tensorflow as tf
from tf_agents.specs import tensor_spec
class ActionTokenizerTest(tf.test.TestCase):
def testTokenize_int32(self):
action_spec = tensorspec_utils.TensorSpecStruct()
action_spec.terminate_episode = tensor_spec.BoundedTensorSpec(
(2,), dtype=tf.int32, minimum=0, maximum=1, name='terminate_episode')
tokenizer = action_tokenizer.RT1ActionTokenizer(action_spec, vocab_size=10)
self.assertEqual(1, tokenizer.tokens_per_action)
action = tensorspec_utils.TensorSpecStruct(terminate_episode=[0, 1])
action_tokens = tokenizer.tokenize(action)
self.assertEqual([1], action_tokens.numpy())
def testTokenize_int32_not_one_hot(self):
action_spec = tensorspec_utils.TensorSpecStruct()
action_spec.terminate_episode = tensor_spec.BoundedTensorSpec(
(2,), dtype=tf.int32, minimum=0, maximum=1, name='terminate_episode')
tokenizer = action_tokenizer.RT1ActionTokenizer(action_spec, vocab_size=10)
self.assertEqual(1, tokenizer.tokens_per_action)
action = tensorspec_utils.TensorSpecStruct(terminate_episode=[1, 8])
with self.assertRaises(tf.errors.InvalidArgumentError):
tokenizer.tokenize(action)
def testDetokenize_int32(self):
action_spec = tensorspec_utils.TensorSpecStruct()
action_spec.terminate_episode = tensor_spec.BoundedTensorSpec(
(2,), dtype=tf.int32, minimum=0, maximum=1, name='terminate_episode')
tokenizer = action_tokenizer.RT1ActionTokenizer(action_spec, vocab_size=10)
# 0 token should become a one hot: [1, 0]
action = tokenizer.detokenize(tf.constant([0], dtype=tf.int32))
self.assertSequenceEqual([1, 0], list(action['terminate_episode'].numpy()))
# 1 token should become a one hot: [0, 1]
action = tokenizer.detokenize(tf.constant([1], dtype=tf.int32))
self.assertSequenceEqual([0, 1], list(action['terminate_episode'].numpy()))
# OOV 3 token should become a default one hot: [1, 0]
action = tokenizer.detokenize(tf.constant([3], dtype=tf.int32))
self.assertSequenceEqual([1, 0], list(action['terminate_episode'].numpy()))
def testTokenize_float(self):
action_spec = tensorspec_utils.TensorSpecStruct()
action_spec.world_vector = tensor_spec.BoundedTensorSpec(
(3,), dtype=tf.float32, minimum=-1., maximum=1., name='world_vector')
tokenizer = action_tokenizer.RT1ActionTokenizer(action_spec, vocab_size=10)
self.assertEqual(3, tokenizer.tokens_per_action)
action = tensorspec_utils.TensorSpecStruct(world_vector=[0.1, 0.5, -0.8])
action_tokens = tokenizer.tokenize(action)
self.assertSequenceEqual([4, 6, 0], list(action_tokens.numpy()))
def testTokenize_float_with_time_dimension(self):
action_spec = tensorspec_utils.TensorSpecStruct()
action_spec.world_vector = tensor_spec.BoundedTensorSpec(
(3,), dtype=tf.float32, minimum=-1., maximum=1., name='world_vector')
tokenizer = action_tokenizer.RT1ActionTokenizer(action_spec, vocab_size=10)
self.assertEqual(3, tokenizer.tokens_per_action)
batch_size = 2
time_dimension = 3
action = tensorspec_utils.TensorSpecStruct(
world_vector=tf.constant(
[[0.1, 0.5, -0.8], [0.1, 0.5, -0.8], [0.1, 0.5, -0.8],
[0.1, 0.5, -0.8], [0.1, 0.5, -0.8], [0.1, 0.5, -0.8]],
shape=[batch_size, time_dimension, tokenizer.tokens_per_action]))
action_tokens = tokenizer.tokenize(action)
self.assertSequenceEqual(
[batch_size, time_dimension, tokenizer.tokens_per_action],
action_tokens.shape.as_list())
def testTokenize_float_at_limits(self):
minimum = -1.
maximum = 1.
vocab_size = 10
action_spec = tensorspec_utils.TensorSpecStruct()
action_spec.world_vector = tensor_spec.BoundedTensorSpec(
(2,),
dtype=tf.float32,
minimum=minimum,
maximum=maximum,
name='world_vector')
tokenizer = action_tokenizer.RT1ActionTokenizer(
action_spec, vocab_size=vocab_size)
self.assertEqual(2, tokenizer.tokens_per_action)
action = tensorspec_utils.TensorSpecStruct(world_vector=[minimum, maximum])
action_tokens = tokenizer.tokenize(action)
# Minimum value will go to 0
# Maximum value witll go to vocab_size-1
self.assertSequenceEqual([0, vocab_size - 1], list(action_tokens.numpy()))
def testTokenize_invalid_action_spec_shape(self):
action_spec = tensorspec_utils.TensorSpecStruct()
action_spec.world_vector = tensor_spec.BoundedTensorSpec(
(2, 2), dtype=tf.float32, minimum=1, maximum=-1, name='world_vector')
with self.assertRaises(ValueError):
action_tokenizer.RT1ActionTokenizer(action_spec, vocab_size=10)
def testTokenizeAndDetokenizeIsEqual(self):
action_spec = tensorspec_utils.TensorSpecStruct()
action_spec.world_vector = tensor_spec.BoundedTensorSpec(
(3,), dtype=tf.float32, minimum=-1., maximum=1., name='world_vector')
action_spec.rotation_delta = tensor_spec.BoundedTensorSpec(
(3,),
dtype=tf.float32,
minimum=-np.pi / 2.,
maximum=np.pi / 2.,
name='rotation_delta')
action_spec.gripper_closedness_action = tensor_spec.BoundedTensorSpec(
(1,),
dtype=tf.float32,
minimum=-1.,
maximum=1.,
name='gripper_closedness_action')
num_sub_action_space = 2
action_spec.terminate_episode = tensor_spec.BoundedTensorSpec(
(num_sub_action_space,),
dtype=tf.int32,
minimum=0,
maximum=1,
name='terminate_episode')
tokenizer = action_tokenizer.RT1ActionTokenizer(
action_spec,
vocab_size=1024,
action_order=[
'terminate_episode', 'world_vector', 'rotation_delta',
'gripper_closedness_action'
])
self.assertEqual(8, tokenizer.tokens_per_action)
# Repeat the following test N times with fuzzy inputs.
n_repeat = 10
for _ in range(n_repeat):
action = tensorspec_utils.TensorSpecStruct(
world_vector=np.random.uniform(low=-1., high=1.0, size=3),
rotation_delta=np.random.uniform(
low=-np.pi / 2., high=np.pi / 2., size=3),
gripper_closedness_action=np.random.uniform(low=0., high=1.0, size=1),
terminate_episode=[0, 1])
action_tokens = tokenizer.tokenize(action)
policy_action = tokenizer.detokenize(action_tokens)
for k in action:
self.assertSequenceAlmostEqual(
action[k], policy_action[k].numpy(), places=2)
# Repeat the test with batched actions
batched_action = tensorspec_utils.TensorSpecStruct(
world_vector=[
np.random.uniform(low=-1., high=1.0, size=3),
np.random.uniform(low=-1., high=1.0, size=3)
],
rotation_delta=[
np.random.uniform(low=-np.pi / 2., high=np.pi / 2., size=3),
np.random.uniform(low=-np.pi / 2., high=np.pi / 2., size=3)
],
gripper_closedness_action=[
np.random.uniform(low=0., high=1.0, size=1),
np.random.uniform(low=0., high=1.0, size=1)
],
terminate_episode=[[0, 1], [1, 0]])
action_tokens = tokenizer.tokenize(batched_action)
policy_action = tokenizer.detokenize(action_tokens)
for k in batched_action:
for a, policy_a in zip(batched_action[k], policy_action[k].numpy()):
self.assertSequenceAlmostEqual(a, policy_a, places=2)
if __name__ == '__main__':
tf.test.main()
| CyberTron-master | cybertron/models/robotics_transformer/tokenizers/action_tokenizer_test.py |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for image_tokenizer."""
from absl.testing import parameterized
from robotics_transformer.tokenizers import image_tokenizer
import tensorflow as tf
class ImageTokenizerTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('sample_image', 512, 224, False, 8),
('sample_image_token_learner', 512, 224, True, 8))
def testTokenize(self, output_dim, image_resolution, use_token_learner,
num_tokens):
batch = 1
seq = 2
tokenizer = image_tokenizer.RT1ImageTokenizer(
embedding_output_dim=output_dim,
use_token_learner=use_token_learner,
num_tokens=num_tokens)
image = tf.random.normal(
shape=(batch, seq, image_resolution, image_resolution, 3))
image = tf.clip_by_value(image, 0.0, 1.0)
context_vector = tf.random.uniform((batch, seq, 512))
image_tokens = tokenizer(image, context_vector)
if use_token_learner:
self.assertEqual(image_tokens.shape, [batch, seq, num_tokens, 512])
else:
self.assertEqual(image_tokens.shape, [batch, seq, 81, 512])
if __name__ == '__main__':
tf.test.main()
| CyberTron-master | cybertron/models/robotics_transformer/tokenizers/image_tokenizer_test.py |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A FiLM Efficientnet contextual image tokenizer used in Robotics Transformer 1.
"""
from typing import Optional
from robotics_transformer.film_efficientnet import pretrained_efficientnet_encoder
from robotics_transformer.tokenizers import token_learner
import tensorflow as tf
class RT1ImageTokenizer(tf.keras.layers.Layer):
"""Tokenizes based on vocab size."""
def __init__(self,
embedding_output_dim: int,
use_token_learner: bool = False,
num_tokens: int = 8,
**kwargs):
"""Instantiates a RT1ImageTokenizer.
Args:
embedding_output_dim: The output size of the tokens.
use_token_learner: Whether to use token learner. See
https://arxiv.org/abs/2106.11297
num_tokens: Relevant only for token learner - the number of learned
tokens.
**kwargs: Keyword arguments to base class.
"""
super().__init__(**kwargs)
self._embedding_output_dim = embedding_output_dim
self._tokenizer = pretrained_efficientnet_encoder.EfficientNetEncoder(
pooling=False, early_film=True)
self._use_token_learner = use_token_learner
if self._use_token_learner:
self._num_tokens = num_tokens
self._token_learner = token_learner.TokenLearnerModule(
num_tokens=self._num_tokens)
@property
def tokens_per_context_image(self) -> int:
if self._use_token_learner:
num_tokens = self._num_tokens
else:
num_tokens = 81
return num_tokens
def __call__(self,
image: tf.Tensor,
context: Optional[tf.Tensor] = None,
training: bool = False) -> tf.Tensor:
"""Gets image tokens.
Args:
image: Images of shape (b, t, h, w, 3) to tokenize.
context: An optional context vector (e.g., a natural language embedding).
Expected to have shape (b, t, embedding_dim).
training: Whether or not we are in training mode.
Returns:
tokens: has shape (batch, t, num_tokens_per_timestep, embedding_dim)
"""
image_shape = tf.shape(image)
b = image_shape[0]
t = image_shape[1]
h = image_shape[2]
w = image_shape[3]
c = image_shape[4]
# Fold the time axis into the batch axis.
image = tf.reshape(image, [b * t, h, w, c])
if context is not None:
context_rank = tf.rank(context)
assertion = tf.Assert(context_rank == 3, data=[context_rank])
with tf.control_dependencies([assertion]):
context = tf.reshape(context, [b * t, tf.shape(context)[-1]])
tokens = self.get_image_embeddings(image, context, training)
if self._use_token_learner:
tokens = self._token_learner(tokens, training)
# Unflatten the time axis, which was previously flattened into the batch.
tokens = tf.reshape(tokens, [b, t, tf.shape(tokens)[1], -1])
return tokens
def get_image_embeddings(self,
image: tf.Tensor,
context: Optional[tf.Tensor],
training: bool = False) -> tf.Tensor:
"""Gets embeddings from image.
Args:
image: Expected to be float32 in range [0, 1] with shape (b, h, w, 3).
context: Expected to be float32 with shape (b, embedding_dim)
training: Whether or not we are in training mode.
Returns:
tokens of shape (b, num_tokens, emedding_dim)
"""
image_tokens = self._tokenizer(image, context=context, training=training)
image_tokens = tf.reshape(image_tokens, [-1, 81, 512])
return image_tokens
| CyberTron-master | cybertron/models/robotics_transformer/tokenizers/image_tokenizer.py |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encoder based on Efficientnet."""
from typing import Optional
import gin
from robotics_transformer.film_efficientnet import film_conditioning_layer
from robotics_transformer.film_efficientnet import film_efficientnet_encoder
import tensorflow as tf
_MODELS = {
'b3': film_efficientnet_encoder.EfficientNetB3,
}
_SIZES = {
'b3': 300,
}
@gin.configurable
class EfficientNetEncoder(tf.keras.layers.Layer):
"""Applies a pretrained Efficientnet based encoder."""
def __init__(self,
model_variant: str = 'b3',
freeze: bool = False,
early_film: bool = True,
weights: Optional[str] = 'imagenet',
include_top: bool = False,
pooling: bool = True,
**kwargs):
"""Initialize the model.
Args:
model_variant: One of 'b0-b7' of the efficient encoders. See
https://arxiv.org/abs/1905.11946 to understand the variants.
freeze: Whether or not to freeze the pretrained weights (seems to not work
well).
early_film: Whether to inject film layers into the efficientnet encoder
(seems to be essential to getting strong performance).
weights: Which pretrained weights to use. Either 'imagenet', a path to the
pretrained weights, or None for from scratch.
include_top: Whether to add the top fully connected layer. If True, this
will cause encoding to fail and is used only for unit testing purposes.
pooling: If false, returns feature map before global average pooling
**kwargs: Keras specific layer kwargs.
"""
super(EfficientNetEncoder, self).__init__(**kwargs)
if model_variant not in _MODELS:
raise ValueError(f'Unknown variant {model_variant}')
self.model_variant = model_variant
self.early_film = early_film
self.freeze = freeze
self.conv1x1 = tf.keras.layers.Conv2D(
filters=512,
kernel_size=(1, 1),
strides=(1, 1),
padding='SAME',
use_bias=False,
kernel_initializer=tf.keras.initializers.VarianceScaling())
self.net = _MODELS[model_variant](
include_top=include_top,
weights=weights,
include_film=early_film,
)
self.film_layer = film_conditioning_layer.FilmConditioning(num_channels=512)
self._pooling = pooling
def _prepare_image(self, image: tf.Tensor) -> tf.Tensor:
"""Resize the input image and check that the range is correct."""
if len(image.shape) != 4 or image.shape[-1] != 3:
raise ValueError('Provided image should have shape (b, h, w, 3).')
size = _SIZES[self.model_variant]
if image.shape[1] < size / 4 or image.shape[2] < size / 4:
raise ValueError('Provided image is too small.')
if image.shape[1] > size * 4 or image.shape[2] > size * 4:
raise ValueError('Provided image is too large.')
image = tf.image.resize(image, (size, size))
c1 = tf.Assert(tf.reduce_max(image) <= 1, data=[tf.reduce_max(image)])
c2 = tf.Assert(tf.reduce_min(image) >= 0, data=[tf.reduce_min(image)])
with tf.control_dependencies([c1, c2]):
image *= 255 # The image is expected to be in range(0, 255).
image = film_efficientnet_encoder.preprocess_input(image)
return image
def _encode(self, image: tf.Tensor, context: tf.Tensor,
training: bool) -> tf.Tensor:
"""Run the image through the efficientnet encoder."""
image = self._prepare_image(image)
if self.early_film:
return self.net((image, context), training=training)
return self.net(image, training=training)
def call(self,
image: tf.Tensor,
context: Optional[tf.Tensor] = None,
training: bool = True) -> tf.Tensor:
if self.freeze:
features = tf.stop_gradient(self._encode(image, context, training))
else:
features = self._encode(image, context, training)
if context is not None:
features = self.conv1x1(features)
features = self.film_layer(features, context)
if not self._pooling:
return features
# Global average pool.
return tf.reduce_mean(features, [1, 2])
| CyberTron-master | cybertron/models/robotics_transformer/film_efficientnet/pretrained_efficientnet_encoder.py |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for pretrained_efficientnet_encoder."""
import numpy as np
from robotics_transformer.film_efficientnet import film_efficientnet_encoder
from robotics_transformer.film_efficientnet import pretrained_efficientnet_encoder as eff
from skimage import data
import tensorflow as tf
class PretrainedEfficientnetEncoderTest(tf.test.TestCase):
def test_encoding(self):
"""Test that we get a correctly shaped decoding."""
state = np.random.RandomState(0)
context = state.uniform(-1, 1, (10, 512))
model = eff.EfficientNetEncoder()
image = np.expand_dims(data.chelsea(), axis=0) / 255
preds = model(image, context, training=False).numpy()
self.assertEqual(preds.shape, (10, 512))
def test_imagenet_classification(self):
"""Test that we can correctly classify an image of a cat."""
state = np.random.RandomState(0)
context = state.uniform(-1, 1, (10, 512))
model = eff.EfficientNetEncoder(include_top=True)
image = np.expand_dims(data.chelsea(), axis=0) / 255
preds = model._encode(image, context, training=False).numpy()
predicted_names = [
n[1]
for n in film_efficientnet_encoder.decode_predictions(preds, top=3)[0]
]
self.assertIn('tabby', predicted_names)
if __name__ == '__main__':
tf.test.main()
| CyberTron-master | cybertron/models/robotics_transformer/film_efficientnet/pretrained_efficientnet_encoder_test.py |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests that film_efficientnet can detect an image of a cat."""
from absl.testing import parameterized
import numpy as np
from robotics_transformer.film_efficientnet import film_efficientnet_encoder
from skimage import data
import tensorflow as tf
class FilmEfficientnetTest(tf.test.TestCase, parameterized.TestCase):
def _helper(self, include_film, model_variant):
if model_variant == 'b0':
size = 224
fe = film_efficientnet_encoder.EfficientNetB0
elif model_variant == 'b1':
size = 240
fe = film_efficientnet_encoder.EfficientNetB1
elif model_variant == 'b2':
size = 260
fe = film_efficientnet_encoder.EfficientNetB2
elif model_variant == 'b3':
size = 300
fe = film_efficientnet_encoder.EfficientNetB3
elif model_variant == 'b4':
size = 380
fe = film_efficientnet_encoder.EfficientNetB4
elif model_variant == 'b5':
size = 456
fe = film_efficientnet_encoder.EfficientNetB5
elif model_variant == 'b6':
size = 528
fe = film_efficientnet_encoder.EfficientNetB6
elif model_variant == 'b7':
size = 600
fe = film_efficientnet_encoder.EfficientNetB7
else:
raise ValueError(f'Unknown variant: {model_variant}')
fe = fe(include_top=True, weights='imagenet', include_film=include_film)
image = np.expand_dims(data.chelsea(), axis=0)
image = tf.image.resize(image, (size, size))
context = np.random.randn(1, 512)
if include_film:
eff_output = fe(
(film_efficientnet_encoder.preprocess_input(image), context),
training=False)
else:
eff_output = fe(
film_efficientnet_encoder.preprocess_input(image), training=False)
film_preds = film_efficientnet_encoder.decode_predictions(
eff_output.numpy(), top=10)
self.assertIn('tabby', [f[1] for f in film_preds[0]])
@parameterized.parameters([True, False])
def test_keras_equivalence_b3(self, include_film):
self._helper(include_film, 'b3')
if __name__ == '__main__':
tf.test.main()
| CyberTron-master | cybertron/models/robotics_transformer/film_efficientnet/film_efficientnet_encoder_test.py |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for film_conditioning_layer."""
from absl.testing import parameterized
import numpy as np
from robotics_transformer.film_efficientnet import film_conditioning_layer
import tensorflow as tf
class FilmConditioningLayerTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters([2, 4])
def test_film_conditioning_rank_two_and_four(self, conv_rank):
batch = 2
num_channels = 3
if conv_rank == 2:
conv_layer = np.random.randn(batch, num_channels)
elif conv_rank == 4:
conv_layer = np.random.randn(batch, 1, 1, num_channels)
else:
raise ValueError(f'Unexpected conv rank: {conv_rank}')
context = np.random.rand(batch, num_channels)
film_layer = film_conditioning_layer.FilmConditioning(num_channels)
out = film_layer(conv_layer, context)
tf.debugging.assert_rank(out, conv_rank)
if __name__ == '__main__':
tf.test.main()
| CyberTron-master | cybertron/models/robotics_transformer/film_efficientnet/film_conditioning_layer_test.py |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| CyberTron-master | cybertron/models/robotics_transformer/film_efficientnet/__init__.py |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ResNet variants model for Keras with Film-Conditioning.
Related papers/blogs:
- https://arxiv.org/abs/1512.03385
- https://arxiv.org/pdf/1603.05027v2.pdf
- http://torch.ch/blog/2016/02/04/resnets.html
- https://arxiv.org/abs/1709.07871
"""
import tensorflow.compat.v2 as tf
layers = tf.keras.layers
class FilmConditioning(tf.keras.layers.Layer):
"""Layer that adds FiLM conditioning.
This is intended to be applied after a convolutional layer. It will learn a
multiplicative and an additive factor to be applied to each channel of the
convolution's output.
Conv layer can be rank 2 or 4.
For further details, see: https://arxiv.org/abs/1709.07871
"""
def __init__(self, num_channels: int):
"""Constructs a FiLM conditioning layer.
Args:
num_channels: Number of filter channels to expect in the input.
"""
super().__init__()
# Note that we initialize with zeros because empirically we have found
# this works better than initializing with glorot.
self._projection_add = layers.Dense(
num_channels,
activation=None,
kernel_initializer='zeros',
bias_initializer='zeros')
self._projection_mult = layers.Dense(
num_channels,
activation=None,
kernel_initializer='zeros',
bias_initializer='zeros')
def call(self, conv_filters: tf.Tensor, conditioning: tf.Tensor):
tf.debugging.assert_rank(conditioning, 2)
projected_cond_add = self._projection_add(conditioning)
projected_cond_mult = self._projection_mult(conditioning)
if len(conv_filters.shape) == 4:
# [B, D] -> [B, 1, 1, D]
projected_cond_add = projected_cond_add[:, tf.newaxis, tf.newaxis]
projected_cond_mult = projected_cond_mult[:, tf.newaxis, tf.newaxis]
else:
tf.debugging.assert_rank(conv_filters, 2)
# Original FiLM paper argues that 1 + gamma centers the initialization at
# identity transform.
result = (1 + projected_cond_mult) * conv_filters + projected_cond_add
return result
| CyberTron-master | cybertron/models/robotics_transformer/film_efficientnet/film_conditioning_layer.py |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preprocessing functions for transforming the image for training."""
from typing import Optional
import gin
import tensorflow.compat.v2 as tf
CROP_SIZE = 472
@gin.configurable(
denylist=['images', 'crop_size', 'training', 'convert_dtype', 'seed'])
def convert_dtype_and_crop_images(images,
crop_size: int = CROP_SIZE,
training: bool = True,
pad_then_crop: bool = False,
convert_dtype: bool = True,
seed: Optional[tf.Tensor] = None):
"""Convert uint8 [512, 640, 3] images to float32 and square crop.
Args:
images: [B, H, W, 3] uint8 tensor of images.
crop_size: Width of the square crop.
training: If we are in training (random crop) or not-training (fixed crop).
pad_then_crop: If True, pads image and then crops the original image size.
This allows full field of view to be extracted.
convert_dtype: whether or not to convert the image to float32 in the range
of (0, 1).
seed: Optional seed of shape (2,) for giving to tf.random.stateless_uniform
Returns:
[B, crop_size, crop_size, 3] images of dtype float32.
"""
if seed is None:
seed = tf.random.uniform(shape=(2,), maxval=2**30, dtype=tf.int32)
seed2 = tf.random.experimental.stateless_split(seed, num=1)[0]
if convert_dtype:
images = tf.image.convert_image_dtype(images, tf.float32)
image_height = images.get_shape().as_list()[-3]
image_width = images.get_shape().as_list()[-2]
if pad_then_crop:
if training:
if image_height == 512:
ud_pad = 40
lr_pad = 100
elif image_height == 256:
ud_pad = 20
lr_pad = 50
else:
raise ValueError(
'convert_dtype_and_crop_images only supports image height 512 or '
'256.')
max_y = 2 * ud_pad
max_x = 2 * lr_pad
images = tf.image.pad_to_bounding_box(
images,
offset_height=ud_pad,
offset_width=lr_pad,
target_height=image_height + 2 * ud_pad,
target_width=image_width + 2 * lr_pad)
offset_y = tf.random.stateless_uniform((),
maxval=max_y + 1,
dtype=tf.int32,
seed=seed)
offset_x = tf.random.stateless_uniform((),
maxval=max_x + 1,
dtype=tf.int32,
seed=seed2)
images = tf.image.crop_to_bounding_box(images, offset_y, offset_x,
image_height, image_width)
else:
# Standard cropping.
max_y = image_height - crop_size
max_x = image_width - crop_size
if training:
offset_y = tf.random.stateless_uniform((),
maxval=max_y + 1,
dtype=tf.int32,
seed=seed)
offset_x = tf.random.stateless_uniform((),
maxval=max_x + 1,
dtype=tf.int32,
seed=seed2)
images = tf.image.crop_to_bounding_box(images, offset_y, offset_x,
crop_size, crop_size)
else:
images = tf.image.crop_to_bounding_box(images, max_y // 2, max_x // 2,
crop_size, crop_size)
return images
| CyberTron-master | cybertron/models/robotics_transformer/film_efficientnet/preprocessors.py |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for preprocessors."""
from absl.testing import parameterized
import numpy as np
from robotics_transformer.film_efficientnet import preprocessors
from tensor2robot.utils import tensorspec_utils
import tensorflow.compat.v2 as tf
def _random_image(shape):
images = tf.random.uniform(
shape, minval=0, maxval=255, dtype=tf.dtypes.int32, seed=42)
return tf.cast(images, tf.uint8)
def _get_features(
image_shape=(2, 512, 640, 3), use_task_image=False, use_goal_image=False):
# Time-dimension stacking occurs during training but not eval.
state = tensorspec_utils.TensorSpecStruct(image=_random_image(image_shape))
if use_task_image:
state.task_image = _random_image(image_shape)
if use_goal_image:
state.goal_image = _random_image(image_shape)
return state
class PreprocessorsTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters((True, False, False), (False, True, False),
(True, False, True), (False, True, True))
def testConvertDtypeAndCropImages(self, training, pad_then_crop,
convert_dtype):
features = _get_features()
images = preprocessors.convert_dtype_and_crop_images(
features.image,
training=training,
pad_then_crop=pad_then_crop,
convert_dtype=convert_dtype)
expected_cropped_shape = ([2, 512, 640, 3]
if pad_then_crop else [2, 472, 472, 3])
tf.ensure_shape(images, expected_cropped_shape)
if convert_dtype:
self.assertEqual(images.dtype, tf.float32)
self.assertLessEqual(images.numpy().max(), 1.)
self.assertGreaterEqual(images.numpy().min(), 0.)
else:
self.assertEqual(images.dtype, tf.uint8)
self.assertLessEqual(images.numpy().max(), 255)
self.assertGreaterEqual(images.numpy().min(), 0)
self.assertGreater(images.numpy().max(), 1)
def testConvertDtypeAndCropImagesSeeded(self):
features = _get_features()
seed = tf.constant([1, 2], tf.int32)
images1 = preprocessors.convert_dtype_and_crop_images(
features.image, training=True, pad_then_crop=True, seed=seed)
images2 = preprocessors.convert_dtype_and_crop_images(
features.image, training=True, pad_then_crop=True, seed=seed)
diff = np.sum(np.abs(images1.numpy() - images2.numpy()))
self.assertAlmostEqual(diff, 0)
def testConvertDtypeAndCropImagesUnseeded(self):
features = _get_features()
seed1 = tf.constant([1, 2], tf.int32)
images1 = preprocessors.convert_dtype_and_crop_images(
features.image, training=True, pad_then_crop=True, seed=seed1)
seed2 = tf.constant([2, 3], tf.int32)
images2 = preprocessors.convert_dtype_and_crop_images(
features.image, training=True, pad_then_crop=True, seed=seed2)
diff = np.sum(np.abs(images1.numpy() - images2.numpy()))
self.assertNotAlmostEqual(diff, 0)
| CyberTron-master | cybertron/models/robotics_transformer/film_efficientnet/preprocessors_test.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.