python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
| EXA-1-master | exa/models/mmf-main/tools/scripts/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Requires vqa-maskrcnn-benchmark (https://gitlab.com/vedanuj/vqa-maskrcnn-benchmark)
# to be built and installed. Category mapping for visual genome can be downloaded from
# https://dl.fbaipublicfiles.com/pythia/data/visual_genome_categories.json
# When the --background flag is set, the index saved with key "objects" in
# info_list will be +1 of the Visual Genome category mapping above and 0
# is the background class. When the --background flag is not set, the
# index saved with key "objects" in info list will match the Visual Genome
# category mapping.
import argparse
import os
import cv2
import numpy as np
import torch
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.layers import nms
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.structures.image_list import to_image_list
from maskrcnn_benchmark.utils.model_serialization import load_state_dict
from mmf.utils.download import download
from PIL import Image
from tools.scripts.features.extraction_utils import chunks, get_image_files
class FeatureExtractor:
MODEL_URL = {
"X-101": "https://dl.fbaipublicfiles.com/pythia/"
+ "detectron_model/detectron_model.pth",
"X-152": "https://dl.fbaipublicfiles.com/pythia/"
+ "detectron_model/detectron_model_x152.pth",
}
CONFIG_URL = {
"X-101": "https://dl.fbaipublicfiles.com/pythia/"
+ "detectron_model/detectron_model.yaml",
"X-152": "https://dl.fbaipublicfiles.com/pythia/"
+ "detectron_model/detectron_model_x152.yaml",
}
MAX_SIZE = 1333
MIN_SIZE = 800
def __init__(self):
self.args = self.get_parser().parse_args()
self._try_downloading_necessities(self.args.model_name)
self.detection_model = self._build_detection_model()
os.makedirs(self.args.output_folder, exist_ok=True)
def _try_downloading_necessities(self, model_name):
if self.args.model_file is None and model_name is not None:
model_url = self.MODEL_URL[model_name]
config_url = self.CONFIG_URL[model_name]
self.args.model_file = model_url.split("/")[-1]
self.args.config_file = config_url.split("/")[-1]
if os.path.exists(self.args.model_file) and os.path.exists(
self.args.config_file
):
print(f"model and config file exists in directory: {os.getcwd()}")
return
print("Downloading model and configuration")
download(model_url, ".", self.args.model_file)
download(config_url, ".", self.args.config_file)
def get_parser(self):
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_name", default="X-152", type=str, help="Model to use for detection"
)
parser.add_argument(
"--model_file",
default=None,
type=str,
help="Detectron model file. This overrides the model_name param.",
)
parser.add_argument(
"--config_file", default=None, type=str, help="Detectron config file"
)
parser.add_argument(
"--start_index", default=0, type=int, help="Index to start from "
)
parser.add_argument("--end_index", default=None, type=int, help="")
parser.add_argument("--batch_size", type=int, default=2, help="Batch size")
parser.add_argument(
"--num_features",
type=int,
default=100,
help="Number of features to extract.",
)
parser.add_argument(
"--output_folder", type=str, default="./output", help="Output folder"
)
parser.add_argument("--image_dir", type=str, help="Image directory or file")
parser.add_argument(
"--feature_name",
type=str,
help="The name of the feature to extract",
default="fc6",
)
parser.add_argument(
"--exclude_list",
type=str,
help="List of images to be excluded from feature conversion. "
+ "Each image on a new line",
default="./list",
)
parser.add_argument(
"--confidence_threshold",
type=float,
default=0,
help="Threshold of detection confidence above which boxes will be selected",
)
parser.add_argument(
"--background",
action="store_true",
help="The model will output predictions for the background class when set",
)
return parser
def _build_detection_model(self):
cfg.merge_from_file(self.args.config_file)
cfg.freeze()
model = build_detection_model(cfg)
checkpoint = torch.load(self.args.model_file, map_location=torch.device("cpu"))
load_state_dict(model, checkpoint.pop("model"))
model.to("cuda")
model.eval()
return model
def _image_transform(self, path):
img = Image.open(path)
im = np.array(img).astype(np.float32)
if im.shape[-1] > 3:
im = np.array(img.convert("RGB")).astype(np.float32)
# IndexError: too many indices for array, grayscale images
if len(im.shape) < 3:
im = np.repeat(im[:, :, np.newaxis], 3, axis=2)
im = im[:, :, ::-1]
im -= np.array([102.9801, 115.9465, 122.7717])
im_shape = im.shape
im_height = im_shape[0]
im_width = im_shape[1]
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
# Scale based on minimum size
im_scale = self.MIN_SIZE / im_size_min
# Prevent the biggest axis from being more than max_size
# If bigger, scale it down
if np.round(im_scale * im_size_max) > self.MAX_SIZE:
im_scale = self.MAX_SIZE / im_size_max
im = cv2.resize(
im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR
)
img = torch.from_numpy(im).permute(2, 0, 1)
im_info = {"width": im_width, "height": im_height}
return img, im_scale, im_info
def _process_feature_extraction(
self, output, im_scales, im_infos, feature_name="fc6", conf_thresh=0
):
batch_size = len(output[0]["proposals"])
n_boxes_per_image = [len(boxes) for boxes in output[0]["proposals"]]
score_list = output[0]["scores"].split(n_boxes_per_image)
score_list = [torch.nn.functional.softmax(x, -1) for x in score_list]
feats = output[0][feature_name].split(n_boxes_per_image)
cur_device = score_list[0].device
feat_list = []
info_list = []
for i in range(batch_size):
dets = output[0]["proposals"][i].bbox / im_scales[i]
scores = score_list[i]
max_conf = torch.zeros(scores.shape[0]).to(cur_device)
conf_thresh_tensor = torch.full_like(max_conf, conf_thresh)
start_index = 1
# Column 0 of the scores matrix is for the background class
if self.args.background:
start_index = 0
for cls_ind in range(start_index, scores.shape[1]):
cls_scores = scores[:, cls_ind]
keep = nms(dets, cls_scores, 0.5)
max_conf[keep] = torch.where(
# Better than max one till now and minimally greater
# than conf_thresh
(cls_scores[keep] > max_conf[keep])
& (cls_scores[keep] > conf_thresh_tensor[keep]),
cls_scores[keep],
max_conf[keep],
)
sorted_scores, sorted_indices = torch.sort(max_conf, descending=True)
num_boxes = (sorted_scores[: self.args.num_features] != 0).sum()
keep_boxes = sorted_indices[: self.args.num_features]
feat_list.append(feats[i][keep_boxes])
bbox = output[0]["proposals"][i][keep_boxes].bbox / im_scales[i]
# Predict the class label using the scores
objects = torch.argmax(scores[keep_boxes][:, start_index:], dim=1)
info_list.append(
{
"bbox": bbox.cpu().numpy(),
"num_boxes": num_boxes.item(),
"objects": objects.cpu().numpy(),
"cls_prob": scores[keep_boxes][:, start_index:].cpu().numpy(),
"image_width": im_infos[i]["width"],
"image_height": im_infos[i]["height"],
}
)
return feat_list, info_list
def get_detectron_features(self, image_paths):
img_tensor, im_scales, im_infos = [], [], []
for image_path in image_paths:
im, im_scale, im_info = self._image_transform(image_path)
img_tensor.append(im)
im_scales.append(im_scale)
im_infos.append(im_info)
# Image dimensions should be divisible by 32, to allow convolutions
# in detector to work
current_img_list = to_image_list(img_tensor, size_divisible=32)
current_img_list = current_img_list.to("cuda")
with torch.no_grad():
output = self.detection_model(current_img_list)
feat_list = self._process_feature_extraction(
output,
im_scales,
im_infos,
self.args.feature_name,
self.args.confidence_threshold,
)
return feat_list
def _save_feature(self, file_name, feature, info):
file_base_name = os.path.basename(file_name)
file_base_name = file_base_name.split(".")[0]
info_file_base_name = file_base_name + "_info.npy"
file_base_name = file_base_name + ".npy"
np.save(
os.path.join(self.args.output_folder, file_base_name), feature.cpu().numpy()
)
np.save(os.path.join(self.args.output_folder, info_file_base_name), info)
def extract_features(self):
image_dir = self.args.image_dir
if os.path.isfile(image_dir):
features, infos = self.get_detectron_features([image_dir])
self._save_feature(image_dir, features[0], infos[0])
else:
files = get_image_files(
self.args.image_dir,
exclude_list=self.args.exclude_list,
start_index=self.args.start_index,
end_index=self.args.end_index,
output_folder=self.args.output_folder,
)
finished = 0
total = len(files)
for chunk, begin_idx in chunks(files, self.args.batch_size):
features, infos = self.get_detectron_features(chunk)
for idx, file_name in enumerate(chunk):
self._save_feature(file_name, features[idx], infos[idx])
finished += len(chunk)
if finished % 200 == 0:
print(f"Processed {finished}/{total}")
if __name__ == "__main__":
feature_extractor = FeatureExtractor()
feature_extractor.extract_features()
| EXA-1-master | exa/models/mmf-main/tools/scripts/features/extract_features_vmb.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import os
from glob import glob
import numpy as np
import torch
import torch.nn as nn
import torchvision.models as models
import torchvision.transforms as transforms
from mmf.common.constants import IMAGE_COLOR_MEAN, IMAGE_COLOR_STD
from PIL import Image
from torch.autograd import Variable
TARGET_IMAGE_SIZE = [448, 448]
data_transforms = transforms.Compose(
[
transforms.Resize(TARGET_IMAGE_SIZE),
transforms.ToTensor(),
transforms.Normalize(IMAGE_COLOR_MEAN, IMAGE_COLOR_STD),
]
)
use_cuda = torch.cuda.is_available()
# NOTE feat path "https://download.pytorch.org/models/resnet152-b121ed2d.pth"
RESNET152_MODEL = models.resnet152(pretrained=True)
RESNET152_MODEL.eval()
if use_cuda:
RESNET152_MODEL = RESNET152_MODEL.cuda()
class ResNet152FeatModule(nn.Module):
def __init__(self):
super().__init__()
modules = list(RESNET152_MODEL.children())[:-2]
self.feature_module = nn.Sequential(*modules)
def forward(self, x):
return self.feature_module(x)
_resnet_module = ResNet152FeatModule()
if use_cuda:
_resnet_module = _resnet_module.cuda()
def extract_image_feat(img_file):
img = Image.open(img_file).convert("RGB")
img_transform = data_transforms(img)
# make sure grey scale image is processed correctly
if img_transform.shape[0] == 1:
img_transform = img_transform.expand(3, -1, -1)
img_var = Variable(img_transform.unsqueeze(0))
if use_cuda:
img_var = img_var.cuda()
img_feat = _resnet_module(img_var)
return img_feat
def get_image_id(image_name):
image_id = int(image_name.split(".")[0].split("_")[-1])
return image_id
def extract_dataset_pool5(image_dir, save_dir, total_group, group_id, ext_filter):
image_list = glob(image_dir + "/*." + ext_filter)
image_list = {f: 1 for f in image_list}
exclude = {}
with open("./list") as f:
lines = f.readlines()
for line in lines:
exclude[line.strip("\n").split(os.path.sep)[-1].split(".")[0]] = 1
output_files = glob(os.path.join(save_dir, "*.npy"))
output_dict = {}
for f in output_files:
file_name = f.split(os.path.sep)[-1].split(".")[0]
output_dict[file_name] = 1
for f in list(image_list.keys()):
file_name = f.split(os.path.sep)[-1].split(".")[0]
if file_name in output_dict or file_name in exclude:
image_list.pop(f)
image_list = list(image_list.keys())
if not os.path.exists(save_dir):
os.makedirs(save_dir)
for n_im, impath in enumerate(image_list):
if (n_im + 1) % 100 == 0:
print("processing %d / %d" % (n_im + 1, len(image_list)))
image_name = os.path.basename(impath)
image_id = get_image_id(image_name)
if image_id % total_group != group_id:
continue
feat_name = image_name.replace(ext_filter, "npy")
save_path = os.path.join(save_dir, feat_name)
tmp_lock = save_path + ".lock"
if os.path.exists(save_path) and not os.path.exists(tmp_lock):
continue
if not os.path.exists(tmp_lock):
os.makedirs(tmp_lock)
# pool5_val = extract_image_feat(impath).permute(0, 2, 3, 1)
try:
pool5_val = extract_image_feat(impath).permute(0, 2, 3, 1)
except Exception:
print("error for" + image_name)
continue
feat = pool5_val.data.cpu().numpy()
np.save(save_path, feat)
os.rmdir(tmp_lock)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--total_group", type=int, default=1)
parser.add_argument("--group_id", type=int, default=0)
parser.add_argument("--data_dir", type=str, required=True)
parser.add_argument("--out_dir", type=str, required=True)
parser.add_argument("--image_ext", type=str, default="jpg")
args = parser.parse_args()
extract_dataset_pool5(
args.data_dir, args.out_dir, args.total_group, args.group_id, args.image_ext
)
| EXA-1-master | exa/models/mmf-main/tools/scripts/features/extract_resnet152_feat.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This script extracts features for vinvl using their scene-graph-benchmark repo.
# Their AttrRCNN model can not support JIT, so we need clone the repo for
# their pytorch code. This script assumes you followed the vinvl feature
# extraction tutorial! The setup can be straight forward.
# This script is a modification of their
# tools/demo/demo_image.py for feature extraction for image directories
# as suggested by SPQRXVIII001 in the github issues.
# If you would like vinvl features for a popular dataset, consider
# downloading the pre-extracted features from
# https://github.com/microsoft/Oscar/blob/master/VinVL_DOWNLOAD.md
# This script was written for scene_graph_branchmark commit version 8e14944
# oscar (vinvl repo) version 4788a74
import argparse
import os
import cv2
import numpy as np
import torch
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.data.transforms import build_transforms
from maskrcnn_benchmark.structures.image_list import to_image_list
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from mmf.utils.download import download
from PIL import Image
from scene_graph_benchmark.AttrRCNN import AttrRCNN
from scene_graph_benchmark.config import sg_cfg
from tools.scripts.features.extraction_utils import chunks, get_image_files
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
class FeatureExtractor:
MODEL_URL = {
"X-152-C4": "https://dl.fbaipublicfiles.com/mmf/data/models/vinvl/"
+ "detection/vinvl_vg_x152c4.pth",
}
CONFIG_URL = {
"X-152-C4": "https://dl.fbaipublicfiles.com/mmf/data/models/vinvl/"
+ "detection/vinvl_x152c4.yaml",
}
def __init__(self):
self.args = self.get_parser().parse_args()
self._try_downloading_necessities(self.args.model_name)
self.detection_model = self._build_detection_model()
self.transforms = build_transforms(cfg, is_train=False)
os.makedirs(self.args.output_folder, exist_ok=True)
def _try_downloading_necessities(self, model_name):
if self.args.model_file is None and model_name is not None:
model_url = self.MODEL_URL[model_name]
config_url = self.CONFIG_URL[model_name]
self.args.model_file = model_url.split("/")[-1]
self.args.config_file = config_url.split("/")[-1]
if os.path.exists(self.args.model_file) and os.path.exists(
self.args.config_file
):
print(f"model and config file exists in directory: {os.getcwd()}")
return
print("Downloading model and configuration")
download(model_url, ".", self.args.model_file)
download(config_url, ".", self.args.config_file)
def get_parser(self):
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="X-152-C4",
type=str,
help="Model to use for detection",
)
parser.add_argument(
"--model_file",
default=None,
type=str,
help="Detectron model file. This overrides the model_name param.",
)
parser.add_argument(
"--config_file", default=None, type=str, help="Detectron config file"
)
parser.add_argument(
"--start_index", default=0, type=int, help="Index to start from "
)
parser.add_argument("--end_index", default=None, type=int, help="")
parser.add_argument("--batch_size", type=int, default=2, help="Batch size")
parser.add_argument(
"--num_features",
type=int,
default=100,
help="Number of features to extract.",
)
parser.add_argument(
"--output_folder", type=str, default="./output", help="Output folder"
)
parser.add_argument("--image_dir", type=str, help="Image directory or file")
parser.add_argument(
"--exclude_list",
type=str,
help="List of images to be excluded from feature conversion. "
+ "Each image on a new line",
default="./list",
)
parser.add_argument(
"--background",
action="store_true",
help="The model will output predictions for the background class when set",
)
return parser
def _build_detection_model(self):
"""Create model cfg for feature extraction
SG expects a yacs config (CfgNode)
which is effectively a dictionary merged from
their cfg defaults, their sg_cfg defaults, and
scene_graph_benchmark/sgg_configs/vgattr/vinvl_x152c4.yaml
the last needs to be modified for vinvl feature extraction
we will modify that cfg obj here to output box_features
used by oscar (vinvl model) as image feature input
"""
cfg.set_new_allowed(True)
cfg.merge_from_other_cfg(sg_cfg)
cfg.set_new_allowed(False)
# Configuring VinVl
cfg.merge_from_file(self.args.config_file)
model_args = [
"MODEL.WEIGHT",
self.args.model_file,
"MODEL.ROI_HEADS.NMS_FILTER",
1,
"MODEL.ROI_HEADS.SCORE_THRESH",
0.2,
"TEST.IGNORE_BOX_REGRESSION",
False,
"MODEL.ATTRIBUTE_ON",
True,
"TEST.OUTPUT_FEATURE",
True,
"TEST.OUTPUT_RELATION_FEATURE",
True,
"TEST.TSV_SAVE_SUBSET",
["rect", "class", "conf", "feature", "relation_feature"],
"TEST.GATHER_ON_CPU",
True,
]
cfg.merge_from_list(model_args)
cfg.freeze()
model = AttrRCNN(cfg)
model.to(DEVICE)
model.eval()
output_dir = cfg.OUTPUT_DIR
checkpointer = DetectronCheckpointer(cfg, model, save_dir=output_dir)
checkpointer.load(cfg.MODEL.WEIGHT)
return model
def _image_transform(self, path):
img = cv2.imread(path)
img_height = img.shape[0]
img_width = img.shape[1]
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = Image.fromarray(img)
img, _ = self.transforms(img, target=None)
img = img.to(DEVICE)
return img, {"width": img_width, "height": img_height}
def _norm_box(self, bbox, w, h):
bbox_aug = torch.zeros(bbox.size(0), 6)
bbox_aug[:, :4] = bbox
bbox_aug[:, 0] /= w
bbox_aug[:, 1] /= h
bbox_aug[:, 2] /= w
bbox_aug[:, 3] /= h
bbox_aug[:, 4] = bbox_aug[:, 2] - bbox_aug[:, 0]
bbox_aug[:, 5] = bbox_aug[:, 3] - bbox_aug[:, 1]
return bbox_aug
def _process_feature_extraction(self, output, im_infos):
"""Convert AttrRCNN object detection output and image sizes
to make image features and image info expected by MMF.
Args:
output (List[BoxList]):
A list of OD outputs, with BoxList containing image_info and feats
BoxList fields contain labels, scores, box_features,
attr_labels, attr_scores based on config options.
bbox attribute contains bounding boxes.
im_infos (List[Dict[str,int]]):
A list of Dicts containing image width, height values.
Returns:
Tuple[List[np.Array], List[Dict[str, Union[torch.Tensor, int]]]]:
Returns a list of image features, and list of image info dicts
"""
feat_list = []
info_list = []
for i, box_list in enumerate(output):
w = im_infos[i]["width"]
h = im_infos[i]["height"]
box_list = box_list.to("cpu").resize((w, h))
det_dict = {key: box_list.get_field(key) for key in box_list.fields()}
bbox = box_list.bbox
bbox_aug = self._norm_box(bbox, w, h)
det_dict["bbox"] = bbox_aug
det_dict["image_width"] = w
det_dict["image_height"] = h
features = torch.cat([det_dict["box_features"], det_dict["bbox"]], dim=1)
det_dict = {
key: val.numpy() if isinstance(val, torch.Tensor) else val
for key, val in det_dict.items()
}
features = features.numpy()
feat_list += [features]
info_list += [det_dict]
return feat_list, info_list
def get_vinvl_features(self, image_paths):
img_tensor, im_infos = [], []
for image_path in image_paths:
im, im_info = self._image_transform(image_path)
img_tensor.append(im)
im_infos.append(im_info)
current_img_list = to_image_list(img_tensor, size_divisible=32)
current_img_list = current_img_list.to(DEVICE)
torch.manual_seed(0)
with torch.no_grad():
output = self.detection_model(current_img_list)
feat_list = self._process_feature_extraction(
output,
im_infos,
)
return feat_list
def _save_feature(self, file_name, feature, info):
file_base_name = os.path.basename(file_name)
file_base_name = file_base_name.split(".")[0]
info_file_base_name = file_base_name + "_info.npy"
file_base_name = file_base_name + ".npy"
np.save(os.path.join(self.args.output_folder, file_base_name), feature)
np.save(os.path.join(self.args.output_folder, info_file_base_name), info)
def extract_features(self):
"""Models and config files are downloaded if not
specified in args.
Then gets a list of images to extract features from.
Will exclude images already in the output dir,
and images in the exclude list.
Images are loaded and transformed based on config.
Will then do feature extraction in batches
using AttrRCNN with weights from scene graph benchmark
for VinVL (Oscar+) image features.
Output of a list of BoxList objects that contain
fields and bbox which are processes to create
image features and image info objects for MMF.
By default this will generate npy files containing
the image features expected by VinVL checkpoints,
and work with MMF out of the box.
"""
image_dir = self.args.image_dir
if os.path.isfile(image_dir):
features, infos = self.get_vinvl_features([image_dir])
self._save_feature(image_dir, features[0], infos[0])
else:
files = get_image_files(
self.args.image_dir,
exclude_list=self.args.exclude_list,
start_index=self.args.start_index,
end_index=self.args.end_index,
output_folder=self.args.output_folder,
)
finished = 0
total = len(files)
for chunk, _ in chunks(files, self.args.batch_size):
features, infos = self.get_vinvl_features(chunk)
for idx, file_name in enumerate(chunk):
self._save_feature(file_name, features[idx], infos[idx])
finished += len(chunk)
if finished % 200 == 0:
print(f"Processed {finished}/{total}")
if __name__ == "__main__":
feature_extractor = FeatureExtractor()
feature_extractor.extract_features()
| EXA-1-master | exa/models/mmf-main/tools/scripts/features/extract_features_vinvl.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import glob
import os
import pickle
import lmdb
import numpy as np
import tqdm
from mmf.utils.file_io import PathManager
class LMDBConversion:
def __init__(self):
self.args = self.get_parser().parse_args()
def get_parser(self):
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"--mode",
required=True,
type=str,
help="Mode can either be `convert` (for conversion of \n"
+ "features to an LMDB file) or `extract` (extract \n"
+ "raw features from a LMDB file)",
)
parser.add_argument(
"--lmdb_path", required=True, type=str, help="LMDB file path"
)
parser.add_argument(
"--features_folder", required=True, type=str, help="Features folder"
)
return parser
def convert(self):
env = lmdb.open(self.args.lmdb_path, map_size=1099511627776)
id_list = []
all_features = glob.glob(
os.path.join(self.args.features_folder, "**", "*.npy"), recursive=True
)
features = []
for feature in all_features:
if not feature.endswith("_info.npy"):
features.append(feature)
with env.begin(write=True) as txn:
for infile in tqdm.tqdm(features):
reader = np.load(infile, allow_pickle=True)
item = {}
split = os.path.relpath(infile, self.args.features_folder).split(
".npy"
)[0]
item["feature_path"] = split
key = split.encode()
id_list.append(key)
item["features"] = reader
info_file = infile.split(".npy")[0] + "_info.npy"
if not os.path.isfile(info_file):
txn.put(key, pickle.dumps(item))
continue
reader = np.load(info_file, allow_pickle=True)
item["image_height"] = reader.item().get("image_height")
item["image_width"] = reader.item().get("image_width")
item["num_boxes"] = reader.item().get("num_boxes")
item["objects"] = reader.item().get("objects")
item["cls_prob"] = reader.item().get("cls_prob", None)
item["bbox"] = reader.item().get("bbox")
txn.put(key, pickle.dumps(item))
txn.put(b"keys", pickle.dumps(id_list))
def extract(self):
os.makedirs(self.args.features_folder, exist_ok=True)
env = lmdb.open(
self.args.lmdb_path,
max_readers=1,
readonly=True,
lock=False,
readahead=False,
meminit=False,
)
with env.begin(write=False) as txn:
_image_ids = pickle.loads(txn.get(b"keys"))
for img_id in tqdm.tqdm(_image_ids):
item = pickle.loads(txn.get(img_id))
img_id = img_id.decode("utf-8")
tmp_dict = {}
tmp_dict["image_id"] = img_id
tmp_dict["bbox"] = item["bbox"]
tmp_dict["num_boxes"] = item["num_boxes"]
tmp_dict["image_height"] = item["image_height"]
tmp_dict["image_width"] = item["image_width"]
tmp_dict["objects"] = item["objects"]
tmp_dict["cls_prob"] = item["cls_prob"]
info_file_base_name = str(img_id) + "_info.npy"
file_base_name = str(img_id) + ".npy"
path = os.path.join(self.args.features_folder, file_base_name)
if PathManager.exists(path):
continue
info_path = os.path.join(self.args.features_folder, info_file_base_name)
base_path = "/".join(path.split("/")[:-1])
PathManager.mkdirs(base_path)
np.save(PathManager.open(path, "wb"), item["features"])
np.save(PathManager.open(info_path, "wb"), tmp_dict)
def execute(self):
if self.args.mode == "convert":
self.convert()
elif self.args.mode == "extract":
self.extract()
else:
raise ValueError("mode must be either `convert` or `extract` ")
if __name__ == "__main__":
lmdb_converter = LMDBConversion()
lmdb_converter.execute()
| EXA-1-master | exa/models/mmf-main/tools/scripts/features/lmdb_conversion.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import glob
import math
import os
def get_image_files(
image_dir,
exclude_list=None,
partition=None,
max_partition=None,
start_index=0,
end_index=None,
output_folder=None,
):
files = glob.glob(os.path.join(image_dir, "*.png"))
files.extend(glob.glob(os.path.join(image_dir, "*.jpg")))
files.extend(glob.glob(os.path.join(image_dir, "*.jpeg")))
files = set(files)
exclude = set()
if os.path.exists(exclude_list):
with open(exclude_list) as f:
lines = f.readlines()
for line in lines:
exclude.add(line.strip("\n").split(os.path.sep)[-1].split(".")[0])
output_ignore = set()
if output_folder is not None:
output_files = glob.glob(os.path.join(output_folder, "*.npy"))
for f in output_files:
file_name = f.split(os.path.sep)[-1].split(".")[0]
output_ignore.add(file_name)
for f in list(files):
file_name = f.split(os.path.sep)[-1].split(".")[0]
if file_name in exclude or file_name in output_ignore:
files.remove(f)
files = list(files)
files = sorted(files)
if partition is not None and max_partition is not None:
interval = math.floor(len(files) / max_partition)
if partition == max_partition:
files = files[partition * interval :]
else:
files = files[partition * interval : (partition + 1) * interval]
if end_index is None:
end_index = len(files)
files = files[start_index:end_index]
return files
def chunks(array, chunk_size):
for i in range(0, len(array), chunk_size):
yield array[i : i + chunk_size], i
| EXA-1-master | exa/models/mmf-main/tools/scripts/features/extraction_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
"""
coding=utf-8
Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal
Adapted From Facebook Inc, Detectron2
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.import copy
"""
from typing import Tuple
import torch
import torch.nn.functional as F
from mmf.datasets.processors.frcnn_processor import img_tensorize, ResizeShortestEdge
class Preprocess:
def __init__(self, cfg):
self.aug = ResizeShortestEdge(
[cfg.input.min_size_test, cfg.input.min_size_test], cfg.input.max_size_test
)
self.input_format = cfg.input.format
self.size_divisibility = cfg.size_divisibility
self.pad_value = cfg.PAD_VALUE
self.max_image_size = cfg.input.max_size_test
self.device = cfg.model.device
self.pixel_std = (
torch.tensor(cfg.model.pixel_std)
.to(self.device)
.view(len(cfg.model.pixel_std), 1, 1)
)
self.pixel_mean = (
torch.tensor(cfg.model.pixel_mean)
.to(self.device)
.view(len(cfg.model.pixel_std), 1, 1)
)
self.normalizer = lambda x: (x - self.pixel_mean) / self.pixel_std
def pad(self, images):
max_size = tuple(max(s) for s in zip(*[img.shape for img in images]))
image_sizes = [im.shape[-2:] for im in images]
images = [
F.pad(
im,
[0, max_size[-1] - size[1], 0, max_size[-2] - size[0]],
value=self.pad_value,
)
for size, im in zip(image_sizes, images)
]
return torch.stack(images), torch.tensor(image_sizes)
def __call__(self, images, single_image=False):
with torch.no_grad():
if not isinstance(images, list):
images = [images]
if single_image:
assert len(images) == 1
for i in range(len(images)):
if isinstance(images[i], torch.Tensor):
images.insert(i, images.pop(i).to(self.device).float())
elif not isinstance(images[i], torch.Tensor):
images.insert(
i,
torch.as_tensor(img_tensorize(images.pop(i)))
.to(self.device)
.float(),
)
# resize smallest edge
raw_sizes = torch.tensor([im.shape[:2] for im in images])
images = self.aug(images)
# flip rgb to bgr
for idx in range(len(images)):
images[idx] = torch.flip(images[idx], [0])
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32"))
# .permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
images = [self.normalizer(x) for x in images]
# now pad them to do the following operations
images, sizes = self.pad(images)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
scales_yx = torch.true_divide(raw_sizes, sizes)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _scale_box(boxes, scale_yx):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _clip_box(tensor, box_size: Tuple[int, int]):
assert torch.isfinite(tensor).all(), "Box tensor contains infinite or NaN!"
h, w = box_size
tensor[:, 0].clamp_(min=0, max=w)
tensor[:, 1].clamp_(min=0, max=h)
tensor[:, 2].clamp_(min=0, max=w)
tensor[:, 3].clamp_(min=0, max=h)
| EXA-1-master | exa/models/mmf-main/tools/scripts/features/frcnn/processing_image.py |
# Copyright (c) Facebook, Inc. and its affiliates.
"""
coding=utf-8
Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal
Adapted From Facebook Inc, Detectron2 && Huggingface Co.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.import copy
"""
import itertools
import math
import os
from abc import ABCMeta, abstractmethod
from collections import namedtuple, OrderedDict
from typing import Dict, List, Tuple
import numpy as np
import torch
from tools.scripts.features.frcnn.frcnn_utils import (
cached_path,
Config,
is_remote_url,
load_checkpoint,
WEIGHTS_NAME,
)
from torch import nn
from torch.nn import functional as F
from torch.nn.modules.batchnorm import BatchNorm2d
from torchvision.ops import RoIPool
from torchvision.ops.boxes import batched_nms, nms
# other:
def norm_box(boxes, raw_sizes):
if not isinstance(boxes, torch.Tensor):
normalized_boxes = boxes.copy()
else:
normalized_boxes = boxes.clone()
normalized_boxes[:, :, (0, 2)] /= raw_sizes[:, 1]
normalized_boxes[:, :, (1, 3)] /= raw_sizes[:, 0]
return normalized_boxes
def pad_list_tensors(
list_tensors,
preds_per_image,
max_detections=None,
return_tensors=None,
padding=None,
pad_value=0,
location=None,
):
"""
location will always be cpu for np tensors
"""
if location is None:
location = "cpu"
assert return_tensors in {"pt", "np", None}
assert padding in {"max_detections", "max_batch", None}
new = []
if padding is None:
if return_tensors is None:
return list_tensors
elif return_tensors == "pt":
if not isinstance(list_tensors, torch.Tensor):
return torch.stack(list_tensors).to(location)
else:
return list_tensors.to(location)
else:
if not isinstance(list_tensors, list):
return np.array(list_tensors.to(location))
else:
return list_tensors.to(location)
if padding == "max_detections":
assert max_detections is not None, "specify max number of detections per batch"
elif padding == "max_batch":
max_detections = max(preds_per_image)
for i in range(len(list_tensors)):
too_small = False
tensor_i = list_tensors.pop(0)
if tensor_i.ndim < 2:
too_small = True
tensor_i = tensor_i.unsqueeze(-1)
assert isinstance(tensor_i, torch.Tensor)
tensor_i = F.pad(
input=tensor_i,
pad=(0, 0, 0, max_detections - preds_per_image[i]),
mode="constant",
value=pad_value,
)
if too_small:
tensor_i = tensor_i.squeeze(-1)
if return_tensors is None:
if location == "cpu":
tensor_i = tensor_i.cpu()
tensor_i = tensor_i.tolist()
if return_tensors == "np":
if location == "cpu":
tensor_i = tensor_i.cpu()
tensor_i = tensor_i.numpy()
else:
if location == "cpu":
tensor_i = tensor_i.cpu()
new.append(tensor_i)
if return_tensors == "np":
return np.stack(new, axis=0)
elif return_tensors == "pt" and not isinstance(new, torch.Tensor):
return torch.stack(new, dim=0)
else:
return list_tensors
def do_nms(boxes, scores, image_shape, score_thresh, nms_thresh, mind, maxd):
scores = scores[:, :-1]
num_bbox_reg_classes = boxes.shape[1] // 4
# Convert to Boxes to use the `clip` function ...
boxes = boxes.reshape(-1, 4)
_clip_box(boxes, image_shape)
boxes = boxes.view(-1, num_bbox_reg_classes, 4) # R x C x 4
# Select max scores
max_scores, max_classes = scores.max(1) # R x C --> R
num_objs = boxes.size(0)
boxes = boxes.view(-1, 4)
idxs = torch.arange(num_objs).to(boxes.device) * num_bbox_reg_classes + max_classes
max_boxes = boxes[idxs] # Select max boxes according to the max scores.
# Apply NMS
keep = nms(max_boxes, max_scores, nms_thresh)
keep = keep[:maxd]
if keep.shape[-1] >= mind and keep.shape[-1] <= maxd:
max_boxes, max_scores = max_boxes[keep], max_scores[keep]
classes = max_classes[keep]
return max_boxes, max_scores, classes, keep
else:
return None
# Helper Functions
def _clip_box(tensor, box_size: Tuple[int, int]):
assert torch.isfinite(tensor).all(), "Box tensor contains infinite or NaN!"
h, w = box_size
tensor[:, 0].clamp_(min=0, max=w)
tensor[:, 1].clamp_(min=0, max=h)
tensor[:, 2].clamp_(min=0, max=w)
tensor[:, 3].clamp_(min=0, max=h)
def _nonempty_boxes(box, threshold: float = 0.0) -> torch.Tensor:
widths = box[:, 2] - box[:, 0]
heights = box[:, 3] - box[:, 1]
keep = (widths > threshold) & (heights > threshold)
return keep
def get_norm(norm, out_channels):
if isinstance(norm, str):
if len(norm) == 0:
return None
norm = {
"BN": BatchNorm2d,
"GN": lambda channels: nn.GroupNorm(32, channels),
"nnSyncBN": nn.SyncBatchNorm, # keep for debugging
"": lambda x: x,
}[norm]
return norm(out_channels)
def _create_grid_offsets(size: List[int], stride: int, offset: float, device):
grid_height, grid_width = size
shifts_x = torch.arange(
offset * stride,
grid_width * stride,
step=stride,
dtype=torch.float32,
device=device,
)
shifts_y = torch.arange(
offset * stride,
grid_height * stride,
step=stride,
dtype=torch.float32,
device=device,
)
shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
shift_x = shift_x.reshape(-1)
shift_y = shift_y.reshape(-1)
return shift_x, shift_y
def build_backbone(cfg):
input_shape = ShapeSpec(channels=len(cfg.model.pixel_mean))
norm = cfg.resnets.norm
stem = BasicStem(
in_channels=input_shape.channels,
out_channels=cfg.resnets.stem_out_channels,
norm=norm,
caffe_maxpool=cfg.model.max_pool,
)
freeze_at = cfg.backbone.freeze_at
if freeze_at >= 1:
for p in stem.parameters():
p.requires_grad = False
out_features = cfg.resnets.out_features
depth = cfg.resnets.depth
num_groups = cfg.resnets.num_groups
width_per_group = cfg.resnets.width_per_group
bottleneck_channels = num_groups * width_per_group
in_channels = cfg.resnets.stem_out_channels
out_channels = cfg.resnets.res2_out_channels
stride_in_1x1 = cfg.resnets.stride_in_1x1
res5_dilation = cfg.resnets.res5_dilation
assert res5_dilation in {1, 2}, f"res5_dilation cannot be {res5_dilation}."
num_blocks_per_stage = {50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3]}[
depth
]
stages = []
out_stage_idx = [
{"res2": 2, "res3": 3, "res4": 4, "res5": 5}[f] for f in out_features
]
max_stage_idx = max(out_stage_idx)
for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)):
dilation = res5_dilation if stage_idx == 5 else 1
first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2
stage_kargs = {
"num_blocks": num_blocks_per_stage[idx],
"first_stride": first_stride,
"in_channels": in_channels,
"bottleneck_channels": bottleneck_channels,
"out_channels": out_channels,
"num_groups": num_groups,
"norm": norm,
"stride_in_1x1": stride_in_1x1,
"dilation": dilation,
}
stage_kargs["block_class"] = BottleneckBlock
blocks = ResNet.make_stage(**stage_kargs)
in_channels = out_channels
out_channels *= 2
bottleneck_channels *= 2
if freeze_at >= stage_idx:
for block in blocks:
block.freeze()
stages.append(blocks)
return ResNet(stem, stages, out_features=out_features)
def find_top_rpn_proposals(
proposals,
pred_objectness_logits,
images,
image_sizes,
nms_thresh,
pre_nms_topk,
post_nms_topk,
min_box_side_len,
training,
):
"""Args:
proposals (list[Tensor]): (L, N, Hi*Wi*A, 4).
pred_objectness_logits: tensors of length L.
nms_thresh (float): IoU threshold to use for NMS
pre_nms_topk (int): before nms
post_nms_topk (int): after nms
min_box_side_len (float): minimum proposal box side
training (bool): True if proposals are to be used in training,
Returns:
results (List[Dict]): stores post_nms_topk object proposals for image i.
"""
num_images = len(images)
device = proposals[0].device
# 1. Select top-k anchor for every level and every image
topk_scores = [] # #lvl Tensor, each of shape N x topk
topk_proposals = []
level_ids = [] # #lvl Tensor, each of shape (topk,)
batch_idx = torch.arange(num_images, device=device)
for level_id, proposals_i, logits_i in zip(
itertools.count(), proposals, pred_objectness_logits
):
Hi_Wi_A = logits_i.shape[1]
num_proposals_i = min(pre_nms_topk, Hi_Wi_A)
# sort is faster than topk (https://github.com/pytorch/pytorch/issues/22812)
# topk_scores_i, topk_idx = logits_i.topk(num_proposals_i, dim=1)
logits_i, idx = logits_i.sort(descending=True, dim=1)
topk_scores_i = logits_i[batch_idx, :num_proposals_i]
topk_idx = idx[batch_idx, :num_proposals_i]
# each is N x topk
topk_proposals_i = proposals_i[batch_idx[:, None], topk_idx] # N x topk x 4
topk_proposals.append(topk_proposals_i)
topk_scores.append(topk_scores_i)
level_ids.append(
torch.full((num_proposals_i,), level_id, dtype=torch.int64, device=device)
)
# 2. Concat all levels together
topk_scores = torch.cat(topk_scores, dim=1)
topk_proposals = torch.cat(topk_proposals, dim=1)
level_ids = torch.cat(level_ids, dim=0)
# if I change to batched_nms, I wonder if this will make a difference
# 3. For each image, run a per-level NMS, and choose topk results.
results = []
for n, image_size in enumerate(image_sizes):
boxes = topk_proposals[n]
scores_per_img = topk_scores[n]
# I will have to take a look at the boxes clip method
_clip_box(boxes, image_size)
# filter empty boxes
keep = _nonempty_boxes(boxes, threshold=min_box_side_len)
lvl = level_ids
if keep.sum().item() != len(boxes):
boxes, scores_per_img, lvl = (
boxes[keep],
scores_per_img[keep],
level_ids[keep],
)
keep = batched_nms(boxes, scores_per_img, lvl, nms_thresh)
keep = keep[:post_nms_topk]
res = (boxes[keep], scores_per_img[keep])
results.append(res)
# I wonder if it would be possible for me to pad all these things.
return results
def subsample_labels(labels, num_samples, positive_fraction, bg_label):
"""
Returns:
pos_idx, neg_idx (Tensor):
1D vector of indices. The total length of both is `num_samples` or fewer.
"""
positive = torch.nonzero((labels != -1) & (labels != bg_label)).squeeze(1)
negative = torch.nonzero(labels == bg_label).squeeze(1)
num_pos = int(num_samples * positive_fraction)
# protect against not enough positive examples
num_pos = min(positive.numel(), num_pos)
num_neg = num_samples - num_pos
# protect against not enough negative examples
num_neg = min(negative.numel(), num_neg)
# randomly select positive and negative examples
perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos]
perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg]
pos_idx = positive[perm1]
neg_idx = negative[perm2]
return pos_idx, neg_idx
def add_ground_truth_to_proposals(gt_boxes, proposals):
raise NotImplementedError()
def add_ground_truth_to_proposals_single_image(gt_boxes, proposals):
raise NotImplementedError()
def _fmt_box_list(box_tensor, batch_index: int):
repeated_index = torch.full(
(len(box_tensor), 1),
batch_index,
dtype=box_tensor.dtype,
device=box_tensor.device,
)
return torch.cat((repeated_index, box_tensor), dim=1)
def convert_boxes_to_pooler_format(box_lists: List[torch.Tensor]):
pooler_fmt_boxes = torch.cat(
[_fmt_box_list(box_list, i) for i, box_list in enumerate(box_lists)], dim=0
)
return pooler_fmt_boxes
def assign_boxes_to_levels(
box_lists: List[torch.Tensor],
min_level: int,
max_level: int,
canonical_box_size: int,
canonical_level: int,
):
box_sizes = torch.sqrt(torch.cat([boxes.area() for boxes in box_lists]))
# Eqn.(1) in FPN paper
level_assignments = torch.floor(
canonical_level + torch.log2(box_sizes / canonical_box_size + 1e-8)
)
# clamp level to (min, max), in case the box size is too large or too small
# for the available feature maps
level_assignments = torch.clamp(level_assignments, min=min_level, max=max_level)
return level_assignments.to(torch.int64) - min_level
# Helper Classes
class _NewEmptyTensorOp(torch.autograd.Function):
@staticmethod
def forward(ctx, x, new_shape):
ctx.shape = x.shape
return x.new_empty(new_shape)
@staticmethod
def backward(ctx, grad):
shape = ctx.shape
return _NewEmptyTensorOp.apply(grad, shape), None
class ShapeSpec(namedtuple("_ShapeSpec", ["channels", "height", "width", "stride"])):
def __new__(cls, *, channels=None, height=None, width=None, stride=None):
return super().__new__(cls, channels, height, width, stride)
class Box2BoxTransform:
"""
This R-CNN transformation scales the box's width and height
by exp(dw), exp(dh) and shifts a box's center by the offset
(dx * width, dy * height).
"""
def __init__(
self, weights: Tuple[float, float, float, float], scale_clamp: float = None
):
"""
Args:
weights (4-element tuple): Scaling factors that are applied to the
(dx, dy, dw, dh) deltas. In Fast R-CNN, these were originally set
such that the deltas have unit variance; now they are treated as
hyperparameters of the system.
scale_clamp (float): When predicting deltas, the predicted box scaling
factors (dw and dh) are clamped such that they are <= scale_clamp.
"""
self.weights = weights
if scale_clamp is not None:
self.scale_clamp = scale_clamp
else:
"""
Value for clamping large dw and dh predictions.
The heuristic is that we clamp such that dw and dh are no larger
than what would transform a 16px box into a 1000px box
(based on a small anchor, 16px, and a typical image size, 1000px).
"""
self.scale_clamp = math.log(1000.0 / 16)
def get_deltas(self, src_boxes, target_boxes):
"""
Get box regression transformation deltas (dx, dy, dw, dh) that can be used
to transform the `src_boxes` into the `target_boxes`. That is, the relation
``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true (unless
any delta is too large and is clamped).
Args:
src_boxes (Tensor): source boxes, e.g., object proposals
target_boxes (Tensor): target of the transformation, e.g., ground-truth
boxes.
"""
assert isinstance(src_boxes, torch.Tensor), type(src_boxes)
assert isinstance(target_boxes, torch.Tensor), type(target_boxes)
src_widths = src_boxes[:, 2] - src_boxes[:, 0]
src_heights = src_boxes[:, 3] - src_boxes[:, 1]
src_ctr_x = src_boxes[:, 0] + 0.5 * src_widths
src_ctr_y = src_boxes[:, 1] + 0.5 * src_heights
target_widths = target_boxes[:, 2] - target_boxes[:, 0]
target_heights = target_boxes[:, 3] - target_boxes[:, 1]
target_ctr_x = target_boxes[:, 0] + 0.5 * target_widths
target_ctr_y = target_boxes[:, 1] + 0.5 * target_heights
wx, wy, ww, wh = self.weights
dx = wx * (target_ctr_x - src_ctr_x) / src_widths
dy = wy * (target_ctr_y - src_ctr_y) / src_heights
dw = ww * torch.log(target_widths / src_widths)
dh = wh * torch.log(target_heights / src_heights)
deltas = torch.stack((dx, dy, dw, dh), dim=1)
assert (
(src_widths > 0).all().item()
), "Input boxes to Box2BoxTransform are not valid!"
return deltas
def apply_deltas(self, deltas, boxes):
"""
Apply transformation `deltas` (dx, dy, dw, dh) to `boxes`.
Args:
deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1.
deltas[i] represents k potentially different class-specific
box transformations for the single box boxes[i].
boxes (Tensor): boxes to transform, of shape (N, 4)
"""
boxes = boxes.to(deltas.dtype)
widths = boxes[:, 2] - boxes[:, 0]
heights = boxes[:, 3] - boxes[:, 1]
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
wx, wy, ww, wh = self.weights
dx = deltas[:, 0::4] / wx
dy = deltas[:, 1::4] / wy
dw = deltas[:, 2::4] / ww
dh = deltas[:, 3::4] / wh
# Prevent sending too large values into torch.exp()
dw = torch.clamp(dw, max=self.scale_clamp)
dh = torch.clamp(dh, max=self.scale_clamp)
pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]
pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]
pred_w = torch.exp(dw) * widths[:, None]
pred_h = torch.exp(dh) * heights[:, None]
pred_boxes = torch.zeros_like(deltas)
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w # x1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h # y1
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w # x2
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h # y2
return pred_boxes
class Matcher:
"""
This class assigns to each predicted "element" (e.g., a box) a ground-truth
element. Each predicted element will have exactly zero or one matches; each
ground-truth element may be matched to zero or more predicted elements.
The matching is determined by the MxN match_quality_matrix, that characterizes
how well each (ground-truth, prediction)-pair match each other. For example,
if the elements are boxes, this matrix may contain box intersection-over-union
overlap values.
The matcher returns (a) a vector of length N containing the index of the
ground-truth element m in [0, M) that matches to prediction n in [0, N).
(b) a vector of length N containing the labels for each prediction.
"""
def __init__(
self,
thresholds: List[float],
labels: List[int],
allow_low_quality_matches: bool = False,
):
"""
Args:
thresholds (list): a list of thresholds used to stratify predictions
into levels.
labels (list): a list of values to label predictions belonging at
each level. A label can be one of {-1, 0, 1} signifying
{ignore, negative class, positive class}, respectively.
allow_low_quality_matches (bool): if True, produce additional matches or
predictions with maximum match quality lower than high_threshold.
For example, thresholds = [0.3, 0.5] labels = [0, -1, 1] All predictions
with iou < 0.3 will be marked with 0 and
thus will be considered as false positives while training. All
predictions with 0.3 <= iou < 0.5 will be marked with -1 and
thus will be ignored. All predictions with 0.5 <= iou will be marked
with 1 and thus will be considered as true positives.
"""
thresholds = thresholds[:]
assert thresholds[0] > 0
thresholds.insert(0, -float("inf"))
thresholds.append(float("inf"))
assert all(
[low <= high for (low, high) in zip(thresholds[:-1], thresholds[1:])]
)
assert all([label_i in [-1, 0, 1] for label_i in labels])
assert len(labels) == len(thresholds) - 1
self.thresholds = thresholds
self.labels = labels
self.allow_low_quality_matches = allow_low_quality_matches
def __call__(self, match_quality_matrix):
"""
Args:
match_quality_matrix (Tensor[float]): an MxN tensor, containing the
pairwise quality between M ground-truth elements and N predicted
elements. All elements must be >= 0 (due to the us of `torch.nonzero`
for selecting indices in :meth:`set_low_quality_matches_`).
Returns:
matches (Tensor[int64]): a vector of length N, where matches[i]
is a matched ground-truth index in [0, M)
match_labels (Tensor[int8]): a vector of length N, where pred_labels[i]
indicates true or false positive or ignored
"""
assert match_quality_matrix.dim() == 2
if match_quality_matrix.numel() == 0:
default_matches = match_quality_matrix.new_full(
(match_quality_matrix.size(1),), 0, dtype=torch.int64
)
# When no gt boxes exist, we define IOU = 0 and therefore set labels
# to `self.labels[0]`, which usually defaults to background class 0
# To choose to ignore instead,
# can make labels=[-1,0,-1,1] + set appropriate thresholds
default_match_labels = match_quality_matrix.new_full(
(match_quality_matrix.size(1),), self.labels[0], dtype=torch.int8
)
return default_matches, default_match_labels
assert torch.all(match_quality_matrix >= 0)
# match_quality_matrix is M (gt) x N (predicted)
# Max over gt elements (dim 0) to find best gt candidate for each prediction
matched_vals, matches = match_quality_matrix.max(dim=0)
match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8)
for l, low, high in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]):
low_high = (matched_vals >= low) & (matched_vals < high)
match_labels[low_high] = l
if self.allow_low_quality_matches:
self.set_low_quality_matches_(match_labels, match_quality_matrix)
return matches, match_labels
def set_low_quality_matches_(self, match_labels, match_quality_matrix):
"""
Produce additional matches for predictions that have only low-quality matches.
Specifically, for each ground-truth G find the set of predictions that have
maximum overlap with it (including ties); for each prediction in that set, if
it is unmatched, then match it to the ground-truth G.
This function implements the RPN assignment case (i)
in Sec. 3.1.2 of Faster R-CNN.
"""
# For each gt, find the prediction with which it has highest quality
highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1)
# Find the highest quality match available, even if it is low, including ties.
# Note that the matches qualities must be positive due to the use of
# `torch.nonzero`.
of_quality_inds = match_quality_matrix == highest_quality_foreach_gt[:, None]
if of_quality_inds.dim() == 0:
(_, pred_inds_with_highest_quality) = (
of_quality_inds.unsqueeze(0).nonzero().unbind(1)
)
else:
(_, pred_inds_with_highest_quality) = of_quality_inds.nonzero().unbind(1)
match_labels[pred_inds_with_highest_quality] = 1
class RPNOutputs:
def __init__(
self,
box2box_transform,
anchor_matcher,
batch_size_per_image,
positive_fraction,
images,
pred_objectness_logits,
pred_anchor_deltas,
anchors,
boundary_threshold=0,
gt_boxes=None,
smooth_l1_beta=0.0,
):
"""
Args:
box2box_transform (Box2BoxTransform): :class:`Box2BoxTransform`
instance for anchor-proposal transformations.
anchor_matcher (Matcher): :class:`Matcher` instance for matching
anchors to ground-truth boxes; used to determine training labels.
batch_size_per_image (int): number of proposals to sample when training
positive_fraction (float): target fraction of sampled proposals that
should be positive
images (ImageList): :class:`ImageList` instance representing N input images
pred_objectness_logits (list[Tensor]): A list of L elements. Element i
is a tensor of shape (N, A, Hi, W)
pred_anchor_deltas (list[Tensor]): A list of L elements. Element i is a
tensor of shape (N, A*4, Hi, Wi)
anchors (list[torch.Tensor]): nested list of boxes. anchors[i][j] at (n, l)
stores anchor array for feature map l
boundary_threshold (int): if >= 0, then anchors that extend beyond the image
boundary by more than boundary_thresh are not used in training.
gt_boxes (list[Boxes], optional): A list of N elements.
smooth_l1_beta (float): The transition point between L1 and L2 lossn. When
set to 0, the loss becomes L1. When +inf, it is ignored
"""
self.box2box_transform = box2box_transform
self.anchor_matcher = anchor_matcher
self.batch_size_per_image = batch_size_per_image
self.positive_fraction = positive_fraction
self.pred_objectness_logits = pred_objectness_logits
self.pred_anchor_deltas = pred_anchor_deltas
self.anchors = anchors
self.gt_boxes = gt_boxes
self.num_feature_maps = len(pred_objectness_logits)
self.num_images = len(images)
self.boundary_threshold = boundary_threshold
self.smooth_l1_beta = smooth_l1_beta
def _get_ground_truth(self):
raise NotImplementedError()
def predict_proposals(self):
# pred_anchor_deltas: (L, N, ? Hi, Wi)
# anchors:(N, L, -1, B)
# here we loop over specific feature map, NOT images
proposals = []
anchors = self.anchors.transpose(0, 1)
for anchors_i, pred_anchor_deltas_i in zip(anchors, self.pred_anchor_deltas):
B = anchors_i.size(-1)
N, _, Hi, Wi = pred_anchor_deltas_i.shape
anchors_i = anchors_i.flatten(start_dim=0, end_dim=1)
pred_anchor_deltas_i = (
pred_anchor_deltas_i.view(N, -1, B, Hi, Wi)
.permute(0, 3, 4, 1, 2)
.reshape(-1, B)
)
proposals_i = self.box2box_transform.apply_deltas(
pred_anchor_deltas_i, anchors_i
)
# Append feature map proposals with shape (N, Hi*Wi*A, B)
proposals.append(proposals_i.view(N, -1, B))
proposals = torch.stack(proposals)
return proposals
def predict_objectness_logits(self):
"""
Returns:
pred_objectness_logits (list[Tensor]) -> (N, Hi*Wi*A).
"""
pred_objectness_logits = [
# Reshape: (N, A, Hi, Wi) -> (N, Hi, Wi, A) -> (N, Hi*Wi*A)
score.permute(0, 2, 3, 1).reshape(self.num_images, -1)
for score in self.pred_objectness_logits
]
return pred_objectness_logits
# Main Classes
class Conv2d(torch.nn.Conv2d):
def __init__(self, *args, **kwargs):
norm = kwargs.pop("norm", None)
activation = kwargs.pop("activation", None)
super().__init__(*args, **kwargs)
self.norm = norm
self.activation = activation
def forward(self, x):
if x.numel() == 0 and self.training:
assert not isinstance(self.norm, torch.nn.SyncBatchNorm)
if x.numel() == 0:
assert not isinstance(self.norm, torch.nn.GroupNorm)
output_shape = [
(i + 2 * p - (di * (k - 1) + 1)) // s + 1
for i, p, di, k, s in zip(
x.shape[-2:],
self.padding,
self.dilation,
self.kernel_size,
self.stride,
)
]
output_shape = [x.shape[0], self.weight.shape[0]] + output_shape
empty = _NewEmptyTensorOp.apply(x, output_shape)
if self.training:
_dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
return empty + _dummy
else:
return empty
x = super().forward(x)
if self.norm is not None:
x = self.norm(x)
if self.activation is not None:
x = self.activation(x)
return x
class LastLevelMaxPool(nn.Module):
"""
This module is used in the original FPN to generate a downsampled P6
feature from P5.
"""
def __init__(self):
super().__init__()
self.num_levels = 1
self.in_feature = "p5"
def forward(self, x):
return [F.max_pool2d(x, kernel_size=1, stride=2, padding=0)]
class LastLevelP6P7(nn.Module):
"""
This module is used in RetinaNet to generate extra layers, P6 and P7
from C5 feature.
"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.num_levels = 2
self.in_feature = "res5"
self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1)
self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1)
def forward(self, c5):
p6 = self.p6(c5)
p7 = self.p7(F.relu(p6))
return [p6, p7]
class BasicStem(nn.Module):
def __init__(self, in_channels=3, out_channels=64, norm="BN", caffe_maxpool=False):
super().__init__()
self.conv1 = Conv2d(
in_channels,
out_channels,
kernel_size=7,
stride=2,
padding=3,
bias=False,
norm=get_norm(norm, out_channels),
)
self.caffe_maxpool = caffe_maxpool
# use pad 1 instead of pad zero
def forward(self, x):
x = self.conv1(x)
x = F.relu_(x)
if self.caffe_maxpool:
x = F.max_pool2d(x, kernel_size=3, stride=2, padding=0, ceil_mode=True)
else:
x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1)
return x
@property
def out_channels(self):
return self.conv1.out_channels
@property
def stride(self):
return 4 # = stride 2 conv -> stride 2 max pool
class ResNetBlockBase(nn.Module):
def __init__(self, in_channels, out_channels, stride):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.stride = stride
def freeze(self):
for p in self.parameters():
p.requires_grad = False
return self
class BottleneckBlock(ResNetBlockBase):
def __init__(
self,
in_channels,
out_channels,
bottleneck_channels,
stride=1,
num_groups=1,
norm="BN",
stride_in_1x1=False,
dilation=1,
):
super().__init__(in_channels, out_channels, stride)
if in_channels != out_channels:
self.shortcut = Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=stride,
bias=False,
norm=get_norm(norm, out_channels),
)
else:
self.shortcut = None
# The original MSRA ResNet models have stride in the first 1x1 conv
# The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have
# stride in the 3x3 conv
stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)
self.conv1 = Conv2d(
in_channels,
bottleneck_channels,
kernel_size=1,
stride=stride_1x1,
bias=False,
norm=get_norm(norm, bottleneck_channels),
)
self.conv2 = Conv2d(
bottleneck_channels,
bottleneck_channels,
kernel_size=3,
stride=stride_3x3,
padding=1 * dilation,
bias=False,
groups=num_groups,
dilation=dilation,
norm=get_norm(norm, bottleneck_channels),
)
self.conv3 = Conv2d(
bottleneck_channels,
out_channels,
kernel_size=1,
bias=False,
norm=get_norm(norm, out_channels),
)
def forward(self, x):
out = self.conv1(x)
out = F.relu_(out)
out = self.conv2(out)
out = F.relu_(out)
out = self.conv3(out)
if self.shortcut is not None:
shortcut = self.shortcut(x)
else:
shortcut = x
out += shortcut
out = F.relu_(out)
return out
class Backbone(nn.Module, metaclass=ABCMeta):
def __init__(self):
super().__init__()
@abstractmethod
def forward(self):
pass
@property
def size_divisibility(self):
"""
Some backbones require the input height and width to be divisible
by a specific integer. This is
typically true for encoder / decoder type networks with lateral
connection (e.g., FPN) for which feature maps need to match
dimension in the "bottom up" and "top down" paths. Set to 0 if
no specific input size divisibility is required.
"""
return 0
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name],
stride=self._out_feature_strides[name],
)
for name in self._out_features
}
@property
def out_features(self):
"""deprecated"""
return self._out_features
@property
def out_feature_strides(self):
"""deprecated"""
return {f: self._out_feature_strides[f] for f in self._out_features}
@property
def out_feature_channels(self):
"""deprecated"""
return {f: self._out_feature_channels[f] for f in self._out_features}
class ResNet(Backbone):
def __init__(self, stem, stages, num_classes=None, out_features=None):
"""
Args:
stem (nn.Module): a stem module
stages (list[list[ResNetBlock]]): several (typically 4) stages,
each contains multiple :class:`ResNetBlockBase`.
num_classes (None or int): if None, will not perform classification.
out_features (list[str]): name of the layers whose outputs should
be returned in forward. Can be anything in:
"stem", "linear", or "res2" ... If None, will return the output
of the last layer.
"""
super().__init__()
self.stem = stem
self.num_classes = num_classes
current_stride = self.stem.stride
self._out_feature_strides = {"stem": current_stride}
self._out_feature_channels = {"stem": self.stem.out_channels}
self.stages_and_names = []
for i, blocks in enumerate(stages):
for block in blocks:
assert isinstance(block, ResNetBlockBase), block
curr_channels = block.out_channels
stage = nn.Sequential(*blocks)
name = "res" + str(i + 2)
self.add_module(name, stage)
self.stages_and_names.append((stage, name))
self._out_feature_strides[name] = current_stride = int(
current_stride * np.prod([k.stride for k in blocks])
)
self._out_feature_channels[name] = blocks[-1].out_channels
if num_classes is not None:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.linear = nn.Linear(curr_channels, num_classes)
# Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour":
# "The 1000-way fully-connected layer is initialized by
# drawing weights from a zero-mean Gaussian with std of 0.01."
nn.init.normal_(self.linear.weight, stddev=0.01)
name = "linear"
if out_features is None:
out_features = [name]
self._out_features = out_features
assert len(self._out_features)
children = [x[0] for x in self.named_children()]
for out_feature in self._out_features:
assert out_feature in children, "Available children: {}".format(
", ".join(children)
)
def forward(self, x):
outputs = {}
x = self.stem(x)
if "stem" in self._out_features:
outputs["stem"] = x
for stage, name in self.stages_and_names:
x = stage(x)
if name in self._out_features:
outputs[name] = x
if self.num_classes is not None:
x = self.avgpool(x)
x = self.linear(x)
if "linear" in self._out_features:
outputs["linear"] = x
return outputs
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name],
stride=self._out_feature_strides[name],
)
for name in self._out_features
}
@staticmethod
def make_stage(
block_class,
num_blocks,
first_stride=None,
*,
in_channels,
out_channels,
**kwargs,
):
"""
Usually, layers that produce the same feature map spatial size
are defined as one "stage".
Under such definition, stride_per_block[1:] should all be 1.
"""
if first_stride is not None:
assert "stride" not in kwargs and "stride_per_block" not in kwargs
kwargs["stride_per_block"] = [first_stride] + [1] * (num_blocks - 1)
blocks = []
for i in range(num_blocks):
curr_kwargs = {}
for k, v in kwargs.items():
if k.endswith("_per_block"):
assert len(v) == num_blocks, (
f"Argument '{k}' of make_stage should have the "
f"same length as num_blocks={num_blocks}."
)
newk = k[: -len("_per_block")]
assert (
newk not in kwargs
), f"Cannot call make_stage with both {k} and {newk}!"
curr_kwargs[newk] = v[i]
else:
curr_kwargs[k] = v
blocks.append(
block_class(
in_channels=in_channels, out_channels=out_channels, **curr_kwargs
)
)
in_channels = out_channels
return blocks
class ROIPooler(nn.Module):
"""
Region of interest feature map pooler that supports pooling from one or more
feature maps.
"""
def __init__(
self,
output_size,
scales,
sampling_ratio,
canonical_box_size=224,
canonical_level=4,
):
super().__init__()
# assumption that stride is a power of 2.
min_level = -math.log2(scales[0])
max_level = -math.log2(scales[-1])
# a bunch of testing
assert math.isclose(min_level, int(min_level)) and math.isclose(
max_level, int(max_level)
)
assert len(scales) == max_level - min_level + 1, "not pyramid"
assert 0 < min_level and min_level <= max_level
if isinstance(output_size, int):
output_size = (output_size, output_size)
assert (
len(output_size) == 2
and isinstance(output_size[0], int)
and isinstance(output_size[1], int)
)
if len(scales) > 1:
assert min_level <= canonical_level and canonical_level <= max_level
assert canonical_box_size > 0
self.output_size = output_size
self.min_level = int(min_level)
self.max_level = int(max_level)
self.level_poolers = nn.ModuleList(
RoIPool(output_size, spatial_scale=scale) for scale in scales
)
self.canonical_level = canonical_level
self.canonical_box_size = canonical_box_size
def forward(self, feature_maps, boxes):
"""
Args:
feature_maps: List[torch.Tensor(N,C,W,H)]
box_lists: list[torch.Tensor])
Returns:
A tensor of shape(N*B, Channels, output_size, output_size)
"""
x = [v for v in feature_maps.values()]
num_level_assignments = len(self.level_poolers)
assert len(x) == num_level_assignments and len(boxes) == x[0].size(0)
pooler_fmt_boxes = convert_boxes_to_pooler_format(boxes)
if num_level_assignments == 1:
return self.level_poolers[0](x[0], pooler_fmt_boxes)
level_assignments = assign_boxes_to_levels(
boxes,
self.min_level,
self.max_level,
self.canonical_box_size,
self.canonical_level,
)
num_boxes = len(pooler_fmt_boxes)
num_channels = x[0].shape[1]
output_size = self.output_size[0]
dtype, device = x[0].dtype, x[0].device
output = torch.zeros(
(num_boxes, num_channels, output_size, output_size),
dtype=dtype,
device=device,
)
for level, (x_level, pooler) in enumerate(zip(x, self.level_poolers)):
inds = torch.nonzero(level_assignments == level).squeeze(1)
pooler_fmt_boxes_level = pooler_fmt_boxes[inds]
output[inds] = pooler(x_level, pooler_fmt_boxes_level)
return output
class ROIOutputs:
def __init__(self, cfg, training=False):
self.smooth_l1_beta = cfg.roi_box_head.smooth_l1_beta
self.box2box_transform = Box2BoxTransform(
weights=cfg.roi_box_head.bbox_reg_weights
)
self.training = training
self.score_thresh = cfg.roi_heads.score_thresh_test
self.min_detections = cfg.min_detections
self.max_detections = cfg.max_detections
nms_thresh = list(cfg.roi_heads.nms_thresh_test)
if not isinstance(nms_thresh, list):
nms_thresh = [nms_thresh]
self.nms_thresh = nms_thresh
def _predict_boxes(self, proposals, box_deltas, preds_per_image):
num_pred = box_deltas.size(0)
B = proposals[0].size(-1)
K = box_deltas.size(-1) // B
box_deltas = box_deltas.view(num_pred * K, B)
proposals = torch.cat(proposals, dim=0).unsqueeze(-2).expand(num_pred, K, B)
proposals = proposals.reshape(-1, B)
boxes = self.box2box_transform.apply_deltas(box_deltas, proposals)
return boxes.view(num_pred, K * B).split(preds_per_image, dim=0)
def _predict_objs(self, obj_logits, preds_per_image):
probs = F.softmax(obj_logits, dim=-1)
probs = probs.split(preds_per_image, dim=0)
return probs
def _predict_attrs(self, attr_logits, preds_per_image):
attr_logits = attr_logits[..., :-1].softmax(-1)
attr_probs, attrs = attr_logits.max(-1)
return (
attr_probs.split(preds_per_image, dim=0),
attrs.split(preds_per_image, dim=0),
)
@torch.no_grad()
def inference(
self,
obj_logits,
attr_logits,
box_deltas,
pred_boxes,
features,
sizes,
scales=None,
):
# only the pred boxes is the
preds_per_image = [p.size(0) for p in pred_boxes]
boxes_all = self._predict_boxes(pred_boxes, box_deltas, preds_per_image)
obj_scores_all = self._predict_objs(
obj_logits, preds_per_image
) # list of length N
attr_probs_all, attrs_all = self._predict_attrs(attr_logits, preds_per_image)
features = features.split(preds_per_image, dim=0)
# fun for each image too, also I can experiment and do multiple images
final_results = []
zipped = zip(boxes_all, obj_scores_all, attr_probs_all, attrs_all, sizes)
for i, (boxes, obj_scores, attr_probs, attrs, size) in enumerate(zipped):
for nms_t in self.nms_thresh:
outputs = do_nms(
boxes,
obj_scores,
size,
self.score_thresh,
nms_t,
self.min_detections,
self.max_detections,
)
if outputs is not None:
max_boxes, max_scores, classes, ids = outputs
break
if scales is not None:
scale_yx = scales[i]
max_boxes[:, 0::2] *= scale_yx[1]
max_boxes[:, 1::2] *= scale_yx[0]
final_results.append(
(
max_boxes,
classes,
max_scores,
attrs[ids],
attr_probs[ids],
features[i][ids],
)
)
boxes, classes, class_probs, attrs, attr_probs, roi_features = map(
list, zip(*final_results)
)
return boxes, classes, class_probs, attrs, attr_probs, roi_features
def training(
self, obj_logits, attr_logits, box_deltas, pred_boxes, features, sizes
):
pass
def __call__(
self,
obj_logits,
attr_logits,
box_deltas,
pred_boxes,
features,
sizes,
scales=None,
):
if self.training:
raise NotImplementedError()
return self.inference(
obj_logits,
attr_logits,
box_deltas,
pred_boxes,
features,
sizes,
scales=scales,
)
class Res5ROIHeads(nn.Module):
"""
ROIHeads perform all per-region computation in an R-CNN.
It contains logic of cropping the regions, extract per-region features
(by the res-5 block in this case), and make per-region predictions.
"""
def __init__(self, cfg, input_shape):
super().__init__()
self.batch_size_per_image = cfg.rpn.batch_size_per_image
self.positive_sample_fraction = cfg.roi_heads.positive_fraction
self.in_features = cfg.roi_heads.in_features
self.num_classes = cfg.roi_heads.num_classes
self.proposal_append_gt = cfg.roi_heads.proposal_append_gt
self.feature_strides = {k: v.stride for k, v in input_shape.items()}
self.feature_channels = {k: v.channels for k, v in input_shape.items()}
self.cls_agnostic_bbox_reg = cfg.roi_box_head.cls_agnostic_bbox_reg
self.stage_channel_factor = 2**3 # res5 is 8x res2
self.out_channels = cfg.resnets.res2_out_channels * self.stage_channel_factor
# self.proposal_matcher = Matcher(
# cfg.ROI_HEADS.IOU_THRESHOLDS,
# cfg.ROI_HEADS.IOU_LABELS,
# allow_low_quality_matches=False,
# )
pooler_resolution = cfg.roi_box_head.pooler_resolution
pooler_scales = (1.0 / self.feature_strides[self.in_features[0]],)
sampling_ratio = cfg.roi_box_head.pooler_sampling_ratio
res5_halve = cfg.roi_box_head.res5halve
use_attr = cfg.roi_box_head.attr
num_attrs = cfg.roi_box_head.num_attrs
self.pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
)
self.res5 = self._build_res5_block(cfg)
if not res5_halve:
"""
Modifications for VG in RoI heads:
1. Change the stride of conv1 and shortcut in Res5.Block1 from 2 to 1
2. Modifying all conv2 with (padding: 1 --> 2) and (dilation: 1 --> 2)
"""
self.res5[0].conv1.stride = (1, 1)
self.res5[0].shortcut.stride = (1, 1)
for i in range(3):
self.res5[i].conv2.padding = (2, 2)
self.res5[i].conv2.dilation = (2, 2)
self.box_predictor = FastRCNNOutputLayers(
self.out_channels,
self.num_classes,
self.cls_agnostic_bbox_reg,
use_attr=use_attr,
num_attrs=num_attrs,
)
def _build_res5_block(self, cfg):
stage_channel_factor = self.stage_channel_factor # res5 is 8x res2
num_groups = cfg.resnets.num_groups
width_per_group = cfg.resnets.width_per_group
bottleneck_channels = num_groups * width_per_group * stage_channel_factor
out_channels = self.out_channels
stride_in_1x1 = cfg.resnets.stride_in_1x1
norm = cfg.resnets.norm
blocks = ResNet.make_stage(
BottleneckBlock,
3,
first_stride=2,
in_channels=out_channels // 2,
bottleneck_channels=bottleneck_channels,
out_channels=out_channels,
num_groups=num_groups,
norm=norm,
stride_in_1x1=stride_in_1x1,
)
return nn.Sequential(*blocks)
def _shared_roi_transform(self, features, boxes):
x = self.pooler(features, boxes)
return self.res5(x)
def forward(self, features, proposal_boxes, gt_boxes=None):
if self.training:
"""
see https://github.com/airsplay/py-bottom-up-attention/\
blob/master/detectron2/modeling/roi_heads/roi_heads.py
"""
raise NotImplementedError()
assert not proposal_boxes[0].requires_grad
box_features = self._shared_roi_transform(features, proposal_boxes)
feature_pooled = box_features.mean(dim=[2, 3]) # pooled to 1x1
obj_logits, attr_logits, pred_proposal_deltas = self.box_predictor(
feature_pooled
)
return obj_logits, attr_logits, pred_proposal_deltas, feature_pooled
class AnchorGenerator(nn.Module):
"""
For a set of image sizes and feature maps, computes a set of anchors.
"""
def __init__(self, cfg, input_shape: List[ShapeSpec]):
super().__init__()
sizes = list(cfg.anchor_generator.sizes)
aspect_ratios = list(cfg.anchor_generator.aspect_ratios)
self.strides = [x.stride for x in input_shape]
self.offset = cfg.anchor_generator.offset
assert 0.0 <= self.offset < 1.0, self.offset
"""
sizes (list[list[int]]): sizes[i] is the list of anchor sizes for feat map i
1. given in absolute lengths in units of the input image;
2. they do not dynamically scale if the input image size changes.
aspect_ratios (list[list[float]])
strides (list[int]): stride of each input feature.
"""
self.num_features = len(self.strides)
self.cell_anchors = nn.ParameterList(
self._calculate_anchors(sizes, aspect_ratios)
)
self._spacial_feat_dim = 4
def _calculate_anchors(self, sizes, aspect_ratios):
# If one size (or aspect ratio) is specified and there are multiple feature
# maps, then we "broadcast" anchors of that single size (or aspect ratio)
if len(sizes) == 1:
sizes *= self.num_features
if len(aspect_ratios) == 1:
aspect_ratios *= self.num_features
assert self.num_features == len(sizes)
assert self.num_features == len(aspect_ratios)
cell_anchors = [
self.generate_cell_anchors(s, a).float()
for s, a in zip(sizes, aspect_ratios)
]
return cell_anchors
@property
def box_dim(self):
return self._spacial_feat_dim
@property
def num_cell_anchors(self):
"""
Returns:
list[int]: Each int is the number of anchors at every pixel
location, on that feature map.
"""
return [len(cell_anchors) for cell_anchors in self.cell_anchors]
def grid_anchors(self, grid_sizes):
anchors = []
for size, stride, base_anchors in zip(
grid_sizes, self.strides, self.cell_anchors
):
shift_x, shift_y = _create_grid_offsets(
size, stride, self.offset, base_anchors.device
)
shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1)
anchors.append(
(shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4)
)
return anchors
def generate_cell_anchors(
self, sizes=(32, 64, 128, 256, 512), aspect_ratios=(0.5, 1, 2)
):
"""
anchors are continuous geometric rectangles
centered on one feature map point sample.
We can later build the set of anchors
for the entire feature map by tiling these tensors
"""
anchors = []
for size in sizes:
area = size**2.0
for aspect_ratio in aspect_ratios:
w = math.sqrt(area / aspect_ratio)
h = aspect_ratio * w
x0, y0, x1, y1 = -w / 2.0, -h / 2.0, w / 2.0, h / 2.0
anchors.append([x0, y0, x1, y1])
return nn.Parameter(torch.Tensor(anchors))
def forward(self, features):
"""
Args:
features List[torch.Tensor]: list of feature maps on which to
generate anchors.
Returns:
torch.Tensor: a list of #image elements.
"""
num_images = features[0].size(0)
grid_sizes = [feature_map.shape[-2:] for feature_map in features]
anchors_over_all_feature_maps = self.grid_anchors(grid_sizes)
anchors_over_all_feature_maps = torch.stack(anchors_over_all_feature_maps)
return anchors_over_all_feature_maps.unsqueeze(0).repeat_interleave(
num_images, dim=0
)
class RPNHead(nn.Module):
"""
RPN classification and regression heads. Uses a 3x3 conv to produce a shared
hidden state from which one 1x1 conv predicts objectness logits for each anchor
and a second 1x1 conv predicts bounding-box deltas specifying how to deform
each anchor into an object proposal.
"""
def __init__(self, cfg, input_shape: List[ShapeSpec]):
super().__init__()
# Standard RPN is shared across levels:
in_channels = [s.channels for s in input_shape]
assert len(set(in_channels)) == 1, "Each level must have the same channel!"
in_channels = in_channels[0]
anchor_generator = AnchorGenerator(cfg, input_shape)
num_cell_anchors = anchor_generator.num_cell_anchors
box_dim = anchor_generator.box_dim
assert (
len(set(num_cell_anchors)) == 1
), "Each level must have the same number of cell anchors"
num_cell_anchors = num_cell_anchors[0]
if cfg.proposal_generator.hidden_channels == -1:
hid_channels = in_channels
else:
hid_channels = cfg.proposal_generator.hidden_channels
# Modifications for VG in RPN (modeling/proposal_generator/rpn.py)
# Use hidden dim instead fo the same dim as Res4 (in_channels)
# 3x3 conv for the hidden representation
self.conv = nn.Conv2d(
in_channels, hid_channels, kernel_size=3, stride=1, padding=1
)
# 1x1 conv for predicting objectness logits
self.objectness_logits = nn.Conv2d(
hid_channels, num_cell_anchors, kernel_size=1, stride=1
)
# 1x1 conv for predicting box2box transform deltas
self.anchor_deltas = nn.Conv2d(
hid_channels, num_cell_anchors * box_dim, kernel_size=1, stride=1
)
for layer in [self.conv, self.objectness_logits, self.anchor_deltas]:
nn.init.normal_(layer.weight, std=0.01)
nn.init.constant_(layer.bias, 0)
def forward(self, features):
"""
Args:
features (list[Tensor]): list of feature maps
"""
pred_objectness_logits = []
pred_anchor_deltas = []
for x in features:
t = F.relu(self.conv(x))
pred_objectness_logits.append(self.objectness_logits(t))
pred_anchor_deltas.append(self.anchor_deltas(t))
return pred_objectness_logits, pred_anchor_deltas
class RPN(nn.Module):
"""
Region Proposal Network, introduced by the Faster R-CNN paper.
"""
def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):
super().__init__()
self.min_box_side_len = cfg.proposal_generator.min_size
self.in_features = cfg.rpn.in_features
self.nms_thresh = cfg.rpn.nms_thresh
self.batch_size_per_image = cfg.rpn.batch_size_per_image
self.positive_fraction = cfg.rpn.positive_fraction
self.smooth_l1_beta = cfg.rpn.smooth_l1_beta
self.loss_weight = cfg.rpn.loss_weight
self.pre_nms_topk = {
True: cfg.rpn.pre_nms_topk_train,
False: cfg.rpn.pre_nms_topk_test,
}
self.post_nms_topk = {
True: cfg.rpn.post_nms_topk_train,
False: cfg.rpn.post_nms_topk_test,
}
self.boundary_threshold = cfg.rpn.boundary_thresh
self.anchor_generator = AnchorGenerator(
cfg, [input_shape[f] for f in self.in_features]
)
self.box2box_transform = Box2BoxTransform(weights=cfg.rpn.bbox_reg_weights)
self.anchor_matcher = Matcher(
cfg.rpn.iou_thresholds, cfg.rpn.iou_labels, allow_low_quality_matches=True
)
self.rpn_head = RPNHead(cfg, [input_shape[f] for f in self.in_features])
def training(self, images, image_shapes, features, gt_boxes):
pass
def inference(self, outputs, images, image_shapes, features, gt_boxes=None):
outputs = find_top_rpn_proposals(
outputs.predict_proposals(),
outputs.predict_objectness_logits(),
images,
image_shapes,
self.nms_thresh,
self.pre_nms_topk[self.training],
self.post_nms_topk[self.training],
self.min_box_side_len,
self.training,
)
results = []
for img in outputs:
im_boxes, img_box_logits = img
img_box_logits, inds = img_box_logits.sort(descending=True)
im_boxes = im_boxes[inds]
results.append((im_boxes, img_box_logits))
(proposal_boxes, logits) = tuple(map(list, zip(*results)))
return proposal_boxes, logits
def forward(self, images, image_shapes, features, gt_boxes=None):
"""
Args:
images (torch.Tensor): input images of length `N`
features (dict[str: Tensor])
gt_instances
"""
# features is dict, key = block level, v = feature_map
features = [features[f] for f in self.in_features]
pred_objectness_logits, pred_anchor_deltas = self.rpn_head(features)
anchors = self.anchor_generator(features)
outputs = RPNOutputs(
self.box2box_transform,
self.anchor_matcher,
self.batch_size_per_image,
self.positive_fraction,
images,
pred_objectness_logits,
pred_anchor_deltas,
anchors,
self.boundary_threshold,
gt_boxes,
self.smooth_l1_beta,
)
# For RPN-only models, the proposals are the final output
if self.training:
raise NotImplementedError()
return self.training(outputs, images, image_shapes, features, gt_boxes)
else:
return self.inference(outputs, images, image_shapes, features, gt_boxes)
class FastRCNNOutputLayers(nn.Module):
"""
Two linear layers for predicting Fast R-CNN outputs:
(1) proposal-to-detection box regression deltas
(2) classification scores
"""
def __init__(
self,
input_size,
num_classes,
cls_agnostic_bbox_reg,
box_dim=4,
use_attr=False,
num_attrs=-1,
):
"""
Args:
input_size (int): channels, or (channels, height, width)
num_classes (int)
cls_agnostic_bbox_reg (bool)
box_dim (int)
"""
super().__init__()
if not isinstance(input_size, int):
input_size = np.prod(input_size)
# (do + 1 for background class)
self.cls_score = nn.Linear(input_size, num_classes + 1)
num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes
self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim)
self.use_attr = use_attr
if use_attr:
"""
Modifications for VG in RoI heads
Embedding: {num_classes + 1} --> {input_size // 8}
Linear: {input_size + input_size // 8} --> {input_size // 4}
Linear: {input_size // 4} --> {num_attrs + 1}
"""
self.cls_embedding = nn.Embedding(num_classes + 1, input_size // 8)
self.fc_attr = nn.Linear(input_size + input_size // 8, input_size // 4)
self.attr_score = nn.Linear(input_size // 4, num_attrs + 1)
nn.init.normal_(self.cls_score.weight, std=0.01)
nn.init.normal_(self.bbox_pred.weight, std=0.001)
for item in [self.cls_score, self.bbox_pred]:
nn.init.constant_(item.bias, 0)
def forward(self, roi_features):
if roi_features.dim() > 2:
roi_features = torch.flatten(roi_features, start_dim=1)
scores = self.cls_score(roi_features)
proposal_deltas = self.bbox_pred(roi_features)
if self.use_attr:
_, max_class = scores.max(-1) # [b, c] --> [b]
cls_emb = self.cls_embedding(max_class) # [b] --> [b, 256]
roi_features = torch.cat(
[roi_features, cls_emb], -1
) # [b, 2048] + [b, 256] --> [b, 2304]
roi_features = self.fc_attr(roi_features)
roi_features = F.relu(roi_features)
attr_scores = self.attr_score(roi_features)
return scores, attr_scores, proposal_deltas
else:
return scores, proposal_deltas
class GeneralizedRCNN(nn.Module):
def __init__(self, cfg):
super().__init__()
self.device = torch.device(cfg.model.device)
self.backbone = build_backbone(cfg)
self.proposal_generator = RPN(cfg, self.backbone.output_shape())
self.roi_heads = Res5ROIHeads(cfg, self.backbone.output_shape())
self.roi_outputs = ROIOutputs(cfg)
self.to(self.device)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
config = kwargs.pop("config", None)
state_dict = kwargs.pop("state_dict", None)
cache_dir = kwargs.pop("cache_dir", None)
from_tf = kwargs.pop("from_tf", False)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
local_files_only = kwargs.pop("local_files_only", False)
# Load config if we don't provide a configuration
if not isinstance(config, Config):
config_path = (
config if config is not None else pretrained_model_name_or_path
)
# try:
config = Config.from_pretrained(
config_path,
cache_dir=cache_dir,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
local_files_only=local_files_only,
)
# Load model
if pretrained_model_name_or_path is not None:
if os.path.isdir(pretrained_model_name_or_path):
if os.path.isfile(
os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
):
# Load from a PyTorch checkpoint
archive_file = os.path.join(
pretrained_model_name_or_path, WEIGHTS_NAME
)
else:
raise OSError(
"Error no file named {} found in directory {} ".format(
WEIGHTS_NAME, pretrained_model_name_or_path
)
)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(
pretrained_model_name_or_path
):
archive_file = pretrained_model_name_or_path
elif os.path.isfile(pretrained_model_name_or_path + ".index"):
assert (
from_tf
), "We found a TensorFlow checkpoint at {}, please set from_tf \
to True to load from this checkpoint".format(
pretrained_model_name_or_path + ".index"
)
archive_file = pretrained_model_name_or_path + ".index"
try:
# Load from URL or cache if already cached
resolved_archive_file = cached_path(
archive_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
)
if resolved_archive_file is None:
raise OSError
except OSError:
msg = f"Can't load weights for '{pretrained_model_name_or_path}'."
raise OSError(msg)
if resolved_archive_file == archive_file:
print(f"loading weights file {archive_file}")
else:
print(
f"loading weights file {archive_file} from cache at "
+ f"{resolved_archive_file}"
)
else:
resolved_archive_file = None
# Instantiate model.
model = cls(config)
if state_dict is None:
try:
try:
state_dict = torch.load(resolved_archive_file, map_location="cpu")
except Exception:
state_dict = load_checkpoint(resolved_archive_file)
except Exception:
raise OSError(
"Unable to load weights from pytorch checkpoint file. "
"If you tried to load a PyTorch model from a TF 2.0 "
+ "checkpoint, please set from_tf=True. "
)
missing_keys = []
unexpected_keys = []
error_msgs = []
# Convert old format to new format if needed from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if "gamma" in key:
new_key = key.replace("gamma", "weight")
if "beta" in key:
new_key = key.replace("beta", "bias")
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, "_metadata", None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
model_to_load = model
model_to_load.load_state_dict(state_dict)
if model.__class__.__name__ != model_to_load.__class__.__name__:
base_model_state_dict = model_to_load.state_dict().keys()
head_model_state_dict_without_base_prefix = [
key.split(cls.base_model_prefix + ".")[-1]
for key in model.state_dict().keys()
]
missing_keys.extend(
head_model_state_dict_without_base_prefix - base_model_state_dict
)
if len(unexpected_keys) > 0:
print(
"Some weights of the model checkpoint at "
f"{pretrained_model_name_or_path} were not used when "
f"initializing {model.__class__.__name__}: {unexpected_keys}\n"
f"- This IS expected if you are initializing "
f"{model.__class__.__name__} from the checkpoint of a model trained "
"on another task or with another architecture (e.g. initializing a "
"BertForSequenceClassification model from a BertForPreTraining model)."
"\n- This IS NOT expected if you are initializing "
f"{model.__class__.__name__} from the checkpoint of a model "
"that you expect to be exactly identical (initializing a "
"BertForSequenceClassification model from a "
"BertForSequenceClassification model)."
)
else:
print(
f"All model checkpoint weights were used when initializing "
+ f"{model.__class__.__name__}.\n"
)
if len(missing_keys) > 0:
print(
f"Some weights of {model.__class__.__name__} were not initialized "
+ f"from the model checkpoint at {pretrained_model_name_or_path} "
f"and are newly initialized: {missing_keys}\n"
f"You should probably TRAIN this model on a down-stream task to be "
+ "able to use it for predictions and inference."
)
else:
print(
f"All the weights of {model.__class__.__name__} were initialized "
+ f"from the model checkpoint at {pretrained_model_name_or_path}.\n"
f"If your task is similar to the task the model of the "
+ "checkpoint was trained on, "
f"you can already use {model.__class__.__name__} for "
+ "predictions without further training."
)
if len(error_msgs) > 0:
raise RuntimeError(
"Error(s) in loading state_dict for {}:\n\t{}".format(
model.__class__.__name__, "\n\t".join(error_msgs)
)
)
# Set model in evaluation mode to deactivate DropOut modules by default
model.eval()
return model
def forward(
self,
images,
image_shapes,
gt_boxes=None,
proposals=None,
scales_yx=None,
**kwargs,
):
"""
kwargs:
max_detections (int), return_tensors {"np", "pt", None}, padding {None,
"max_detections"}, pad_value (int), location = {"cuda", "cpu"}
"""
if self.training:
raise NotImplementedError()
return self.inference(
images=images,
image_shapes=image_shapes,
gt_boxes=gt_boxes,
proposals=proposals,
scales_yx=scales_yx,
**kwargs,
)
@torch.no_grad()
def inference(
self,
images,
image_shapes,
gt_boxes=None,
proposals=None,
scales_yx=None,
**kwargs,
):
# run images through backbone
original_sizes = image_shapes * scales_yx
features = self.backbone(images)
# generate proposals if none are available
if proposals is None:
proposal_boxes, _ = self.proposal_generator(
images, image_shapes, features, gt_boxes
)
else:
assert proposals is not None
# pool object features from either gt_boxes, or from proposals
obj_logits, attr_logits, box_deltas, feature_pooled = self.roi_heads(
features, proposal_boxes, gt_boxes
)
# prepare FRCNN Outputs and select top proposals
boxes, classes, class_probs, attrs, attr_probs, roi_features = self.roi_outputs(
obj_logits=obj_logits,
attr_logits=attr_logits,
box_deltas=box_deltas,
pred_boxes=proposal_boxes,
features=feature_pooled,
sizes=image_shapes,
scales=scales_yx,
)
# will we pad???
subset_kwargs = {
"max_detections": kwargs.get("max_detections", None),
"return_tensors": kwargs.get("return_tensors", None),
"pad_value": kwargs.get("pad_value", 0),
"padding": kwargs.get("padding", None),
}
preds_per_image = torch.tensor([p.size(0) for p in boxes])
boxes = pad_list_tensors(boxes, preds_per_image, **subset_kwargs)
classes = pad_list_tensors(classes, preds_per_image, **subset_kwargs)
class_probs = pad_list_tensors(class_probs, preds_per_image, **subset_kwargs)
attrs = pad_list_tensors(attrs, preds_per_image, **subset_kwargs)
attr_probs = pad_list_tensors(attr_probs, preds_per_image, **subset_kwargs)
roi_features = pad_list_tensors(roi_features, preds_per_image, **subset_kwargs)
subset_kwargs["padding"] = None
preds_per_image = pad_list_tensors(preds_per_image, None, **subset_kwargs)
sizes = pad_list_tensors(image_shapes, None, **subset_kwargs)
normalized_boxes = norm_box(boxes, original_sizes)
return OrderedDict(
{
"obj_ids": classes,
"obj_probs": class_probs,
"attr_ids": attrs,
"attr_probs": attr_probs,
"boxes": boxes,
"sizes": sizes,
"preds_per_image": preds_per_image,
"roi_features": roi_features,
"normalized_boxes": normalized_boxes,
}
)
| EXA-1-master | exa/models/mmf-main/tools/scripts/features/frcnn/modeling_frcnn.py |
# Copyright (c) Facebook, Inc. and its affiliates.
"""
coding=utf-8
Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal, Huggingface team :)
Adapted From Facebook Inc, Detectron2
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.import copy
"""
import copy
import json
import os
import pickle as pkl
import shutil
import tarfile
from collections import OrderedDict
from hashlib import sha256
from pathlib import Path
from urllib.parse import urlparse
from zipfile import is_zipfile, ZipFile
import numpy as np
import requests
from filelock import FileLock
from mmf.utils.configuration import get_mmf_cache_dir
from mmf.utils.download import download
from omegaconf import OmegaConf
mmf_cache_home = get_mmf_cache_dir()
try:
import torch
_torch_available = True
except ImportError:
_torch_available = False
default_cache_path = os.path.join(mmf_cache_home, "transformers")
PATH = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
CONFIG = os.path.join(PATH, "config.yaml")
ATTRIBUTES = os.path.join(PATH, "attributes.txt")
OBJECTS = os.path.join(PATH, "objects.txt")
PYTORCH_PRETRAINED_BERT_CACHE = os.getenv(
"PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path
)
PYTORCH_TRANSFORMERS_CACHE = os.getenv(
"PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE
)
TRANSFORMERS_CACHE = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
WEIGHTS_NAME = "pytorch_model.bin"
CONFIG_NAME = "config.yaml"
def load_labels(objs=OBJECTS, attrs=ATTRIBUTES):
vg_classes = []
with open(objs) as f:
for object in f.readlines():
vg_classes.append(object.split(",")[0].lower().strip())
vg_attrs = []
with open(attrs) as f:
for object in f.readlines():
vg_attrs.append(object.split(",")[0].lower().strip())
return vg_classes, vg_attrs
def load_checkpoint(ckp):
r = OrderedDict()
with open(ckp, "rb") as f:
ckp = pkl.load(f)["model"]
for k in copy.deepcopy(list(ckp.keys())):
v = ckp.pop(k)
if isinstance(v, np.ndarray):
v = torch.tensor(v)
else:
assert isinstance(v, torch.tensor), type(v)
r[k] = v
return r
class Config:
_pointer = {}
def __init__(self, dictionary: dict, name: str = "root", level=0):
self._name = name
self._level = level
d = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
k = copy.deepcopy(k)
v = copy.deepcopy(v)
if isinstance(v, dict):
v = Config(v, name=k, level=level + 1)
d[k] = v
setattr(self, k, v)
self._pointer = d
def __repr__(self):
return str(list(self._pointer.keys()))
def __setattr__(self, key, val):
self.__dict__[key] = val
self.__dict__[key.upper()] = val
levels = key.split(".")
last_level = len(levels) - 1
pointer = self._pointer
if len(levels) > 1:
for i, level in enumerate(levels):
if hasattr(self, level) and isinstance(getattr(self, level), Config):
setattr(getattr(self, level), ".".join(levels[i:]), val)
if level == last_level:
pointer[level] = val
else:
pointer = pointer[level]
def to_dict(self):
return self._pointer
def dump_yaml(self, data, file_name):
with open(f"{file_name}", "w") as stream:
OmegaConf.save(data, stream)
def dump_json(self, data, file_name):
with open(f"{file_name}", "w") as stream:
json.dump(data, stream)
@staticmethod
def load_yaml(config):
return dict(OmegaConf.load(config))
def __str__(self):
t = " "
if self._name != "root":
r = f"{t * (self._level-1)}{self._name}:\n"
else:
r = ""
level = self._level
for i, (k, v) in enumerate(self._pointer.items()):
if isinstance(v, Config):
r += f"{t * (self._level)}{v}\n"
self._level += 1
else:
r += f"{t * (self._level)}{k}: {v} ({type(v).__name__})\n"
self._level = level
return r[:-1]
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: str, **kwargs):
config_dict, kwargs = cls.get_config_dict(
pretrained_model_name_or_path, **kwargs
)
return cls(config_dict)
@classmethod
def get_config_dict(cls, pretrained_model_name_or_path: str, **kwargs):
cache_dir = kwargs.pop("cache_dir", None)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
local_files_only = kwargs.pop("local_files_only", False)
if os.path.isdir(pretrained_model_name_or_path):
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
else:
config_file = pretrained_model_name_or_path
try:
# Load from URL or cache if already cached
resolved_config_file = cached_path(
config_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
)
# Load config dict
if resolved_config_file is None:
raise OSError
config_file = Config.load_yaml(resolved_config_file)
except OSError:
msg = "Can't load config for"
raise OSError(msg)
if resolved_config_file == config_file:
print("loading configuration file from path")
else:
print("loading configuration file cache")
return Config.load_yaml(resolved_config_file), kwargs
# quick compare tensors
def compare(in_tensor):
out_tensor = torch.load("dump.pt", map_location=in_tensor.device)
n1 = in_tensor.numpy()
n2 = out_tensor.numpy()[0]
print(n1.shape, n1[0, 0, :5])
print(n2.shape, n2[0, 0, :5])
summ = sum([1 for x in np.isclose(n1, n2, rtol=0.01, atol=0.1).flatten() if not x])
assert np.allclose(
n1, n2, rtol=0.01, atol=0.1
), f"{summ/len(n1.flatten())*100:.4f} % element-wise mismatch"
raise Exception("tensors are all good")
# Hugging face functions below
# TODO use mmf's download functionality
def cached_path(
url_or_filename,
cache_dir=None,
force_download=False,
proxies=None,
resume_download=False,
user_agent=None,
extract_compressed_file=False,
force_extract=False,
local_files_only=False,
):
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if is_remote_url(url_or_filename):
filename = url_to_filename(url_or_filename)
output_path = os.path.join(cache_dir, filename)
if not os.path.isfile(output_path):
assert download(
url_or_filename, cache_dir, filename
), f"failed to download {url_or_filename}"
elif os.path.exists(url_or_filename):
# File, and it exists.
output_path = url_or_filename
elif urlparse(url_or_filename).scheme == "":
# File, but it doesn't exist.
raise OSError(f"file {url_or_filename} not found")
else:
# Something unknown
raise ValueError(
f"unable to parse {url_or_filename} as a URL or as a local path"
)
if extract_compressed_file:
if not is_zipfile(output_path) and not tarfile.is_tarfile(output_path):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted"
# at the end: "./model.zip" => "./model-zip-extracted/"
output_dir, output_file = os.path.split(output_path)
output_extract_dir_name = output_file.replace(".", "-") + "-extracted"
output_path_extracted = os.path.join(output_dir, output_extract_dir_name)
if (
os.path.isdir(output_path_extracted)
and os.listdir(output_path_extracted)
and not force_extract
):
return output_path_extracted
# Prevent parallel extractions
lock_path = output_path + ".lock"
with FileLock(lock_path):
shutil.rmtree(output_path_extracted, ignore_errors=True)
os.makedirs(output_path_extracted)
if is_zipfile(output_path):
with ZipFile(output_path, "r") as zip_file:
zip_file.extractall(output_path_extracted)
zip_file.close()
elif tarfile.is_tarfile(output_path):
tar_file = tarfile.open(output_path)
tar_file.extractall(output_path_extracted)
tar_file.close()
else:
raise OSError(
f"Archive format of {output_path} could not be identified"
)
return output_path_extracted
return output_path
def is_remote_url(url_or_filename):
parsed = urlparse(url_or_filename)
return parsed.scheme in ("http", "https")
def url_to_filename(url, etag=None):
url_bytes = url.encode("utf-8")
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode("utf-8")
etag_hash = sha256(etag_bytes)
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5"):
filename += ".h5"
elif url.endswith(".yaml"):
filename += ".yaml"
elif url.endswith(".bin"):
filename += ".bin"
return filename
def get_data(query, delim=","):
assert isinstance(query, str)
if os.path.isfile(query):
with open(query) as f:
data = eval(f.read())
else:
req = requests.get(query)
try:
data = requests.json()
except Exception:
data = req.content.decode()
assert data is not None, "could not connect"
try:
data = eval(data)
except Exception:
data = data.split("\n")
req.close()
return data
| EXA-1-master | exa/models/mmf-main/tools/scripts/features/frcnn/frcnn_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Run with for example:
python3 mmf/tools/scripts/features/frcnn/extract_features_frcnn.py \
--model_file model.bin --config_file config.yaml --image_dir \
./example_images --output_folder ./output_features
"""
import argparse
import copy
import logging
import os
import numpy as np
import torch
from tools.scripts.features.extraction_utils import chunks, get_image_files
from tools.scripts.features.frcnn.frcnn_utils import Config
from tools.scripts.features.frcnn.modeling_frcnn import GeneralizedRCNN
from tools.scripts.features.frcnn.processing_image import Preprocess
class FeatureExtractor:
MODEL_URL = {
"FRCNN": "https://s3.amazonaws.com/models.huggingface.co/bert/unc-nlp/"
+ "frcnn-vg-finetuned/pytorch_model.bin"
}
CONFIG_URL = {
"FRCNN": "https://s3.amazonaws.com/models.huggingface.co/bert/unc-nlp/"
+ "frcnn-vg-finetuned/config.yaml"
}
def __init__(self):
self.args = self.get_parser().parse_args()
self.frcnn, self.frcnn_cfg = self._build_detection_model()
def get_parser(self):
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_name", default="FRCNN", type=str, help="Model to use for detection"
)
parser.add_argument(
"--model_file",
default=None,
type=str,
help="Huggingface model file. This overrides the model_name param.",
)
parser.add_argument(
"--config_name",
default="FRCNN",
type=str,
help="Config to use for detection",
)
parser.add_argument(
"--config_file", default=None, type=str, help="Huggingface config file"
)
parser.add_argument(
"--start_index", default=0, type=int, help="Index to start from "
)
parser.add_argument("--end_index", default=None, type=int, help="")
parser.add_argument("--batch_size", type=int, default=1, help="Batch size")
parser.add_argument(
"--num_features",
type=int,
default=100,
help="Number of features to extract.",
)
parser.add_argument(
"--output_folder", type=str, default="./output", help="Output folder"
)
parser.add_argument("--image_dir", type=str, help="Image directory or file")
# TODO add functionality for this flag
parser.add_argument(
"--feature_name",
type=str,
help="The name of the feature to extract",
default="fc6",
)
parser.add_argument(
"--exclude_list",
type=str,
help="List of images to be excluded from feature conversion. "
+ "Each image on a new line",
default="./list",
)
parser.add_argument(
"--confidence_threshold",
type=float,
default=0,
help="Threshold of detection confidence above which boxes will be selected",
)
# TODO finish background flag
parser.add_argument(
"--background",
action="store_true",
help="The model will output predictions for the background class when set",
)
parser.add_argument(
"--padding",
type=str,
default=None,
help="You can set your padding, i.e. 'max_detections'",
)
parser.add_argument(
"--visualize",
type=bool,
default=False,
help="Add this flag to save the extra file used for visualization",
)
parser.add_argument(
"--partition",
type=int,
default=None,
help="Add this flag to save the extra file used for visualization",
)
parser.add_argument(
"--max_partition",
type=int,
default=None,
help="Add this flag to save the extra file used for visualization",
)
return parser
def _build_detection_model(self):
if self.args.config_file:
frcnn_cfg = Config.from_pretrained(self.args.config_file)
else:
frcnn_cfg = Config.from_pretrained(
self.CONFIG_URL.get(self.args.config_name, self.args.config_name)
)
if self.args.model_file:
frcnn = GeneralizedRCNN.from_pretrained(
self.args.model_file, config=frcnn_cfg
)
else:
frcnn = GeneralizedRCNN.from_pretrained(
self.MODEL_URL.get(self.args.model_name, self.args.model_name),
config=frcnn_cfg,
)
return frcnn, frcnn_cfg
def get_frcnn_features(self, image_paths):
image_preprocess = Preprocess(self.frcnn_cfg)
images, sizes, scales_yx = image_preprocess(image_paths)
output_dict = self.frcnn(
images,
sizes,
scales_yx=scales_yx,
padding=None,
max_detections=self.frcnn_cfg.max_detections,
return_tensors="pt",
)
return output_dict
def _save_feature(self, file_name, full_features, feat_list, info_list):
file_base_name = os.path.basename(file_name)
file_base_name = file_base_name.split(".")[0]
full_feature_base_name = file_base_name + "_full.npy"
feat_list_base_name = file_base_name + ".npy"
info_list_base_name = file_base_name + "_info.npy"
if self.args.visualize:
np.save(
os.path.join(self.args.output_folder, full_feature_base_name),
full_features,
)
np.save(
os.path.join(self.args.output_folder, feat_list_base_name),
feat_list.cpu().numpy(),
)
np.save(os.path.join(self.args.output_folder, info_list_base_name), info_list)
def _process_features(self, features, index):
feature_keys = [
"obj_ids",
"obj_probs",
"attr_ids",
"attr_probs",
"boxes",
"sizes",
"preds_per_image",
"roi_features",
"normalized_boxes",
]
single_features = dict()
for key in feature_keys:
single_features[key] = features[key][index]
confidence = self.args.confidence_threshold
idx = 0
while idx < single_features["obj_ids"].size()[0]:
removed = False
if (
single_features["obj_probs"][idx] < confidence
or single_features["attr_probs"][idx] < confidence
):
single_features["obj_ids"] = torch.cat(
[
single_features["obj_ids"][0:idx],
single_features["obj_ids"][idx + 1 :],
]
)
single_features["obj_probs"] = torch.cat(
[
single_features["obj_probs"][0:idx],
single_features["obj_probs"][idx + 1 :],
]
)
single_features["attr_ids"] = torch.cat(
[
single_features["attr_ids"][0:idx],
single_features["attr_ids"][idx + 1 :],
]
)
single_features["attr_probs"] = torch.cat(
[
single_features["attr_probs"][0:idx],
single_features["attr_probs"][idx + 1 :],
]
)
single_features["boxes"] = torch.cat(
[
single_features["boxes"][0:idx, :],
single_features["boxes"][idx + 1 :, :],
]
)
single_features["preds_per_image"] = (
single_features["preds_per_image"] - 1
)
single_features["roi_features"] = torch.cat(
[
single_features["roi_features"][0:idx, :],
single_features["roi_features"][idx + 1 :, :],
]
)
single_features["normalized_boxes"] = torch.cat(
[
single_features["normalized_boxes"][0:idx, :],
single_features["normalized_boxes"][idx + 1 :, :],
]
)
removed = True
if not removed:
idx += 1
feat_list = single_features["roi_features"]
boxes = single_features["boxes"][: self.args.num_features].cpu().numpy()
num_boxes = self.args.num_features
objects = single_features["obj_ids"][: self.args.num_features].cpu().numpy()
probs = single_features["obj_probs"][: self.args.num_features].cpu().numpy()
width = single_features["sizes"][1].item()
height = single_features["sizes"][0].item()
info_list = {
"bbox": boxes,
"num_boxes": num_boxes,
"objects": objects,
"cls_prob": probs,
"image_width": width,
"image_height": height,
}
return single_features, feat_list, info_list
def extract_features(self):
image_dir = self.args.image_dir
if os.path.isfile(image_dir):
features = self.get_frcnn_features([image_dir])
full_features, feat_list, info_list = self._process_features(features, 0)
self._save_feature(image_dir, full_features, feat_list, info_list)
else:
files = get_image_files(
self.args.image_dir,
exclude_list=self.args.exclude_list,
partition=self.args.partition,
max_partition=self.args.max_partition,
start_index=self.args.start_index,
end_index=self.args.end_index,
)
finished = 0
total = len(files)
failed = 0
failedNames = []
file_names = copy.deepcopy(files)
for chunk, begin_idx in chunks(files, self.args.batch_size):
try:
features = self.get_frcnn_features(chunk)
for idx, file_name in enumerate(chunk):
full_features, feat_list, info_list = self._process_features(
features, idx
)
self._save_feature(
file_names[begin_idx + idx],
full_features,
feat_list,
info_list,
)
finished += len(chunk)
if finished % 200 == 0:
print(f"Processed {finished}/{total}")
except Exception:
failed += len(chunk)
for idx, file_name in enumerate(chunk):
failedNames.append(file_names[begin_idx + idx])
logging.exception("message")
if self.args.partition is not None:
print("Partition " + str(self.args.partition) + " done.")
print("Failed: " + str(failed))
print("Failed Names: " + str(failedNames))
if __name__ == "__main__":
feature_extractor = FeatureExtractor()
feature_extractor.extract_features()
| EXA-1-master | exa/models/mmf-main/tools/scripts/features/frcnn/extract_features_frcnn.py |
# Copyright (c) Facebook, Inc. and its affiliates.
"""
File can be used for generating test data
Takes in the DB file, features folder, image folder and will generate a test data
folder for a certain amount of samples in the following folder
output_folder/
images/
a.jpg
b.jpg
...
features/
features.lmdb/
data.mdb
lock.mdb
raw/
a.npy
a_info.npy
b.npy
b_info.npy
...
db/
train.jsonl
dev.jsonl
test.jsonl
"""
# Copyright (c) 2017-present, Facebook, Inc.
import argparse
import json
import os
import shutil
import numpy as np
from tools.scripts.features.lmdb_conversion import LMDBConversion
class TestDataBuilder:
def __init__(self):
parser = self.get_parser()
self.args = parser.parse_args()
def get_parser(self):
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"--train_db_file",
required=True,
type=str,
help="DB file that will be used for generating the test data for training.",
)
parser.add_argument(
"--dev_db_file",
required=True,
type=str,
help="DB file that will be used for generating the test data for "
+ "validation.",
)
parser.add_argument(
"--num_samples",
type=int,
default=100,
help="Number of samples to be extracted from the db file.",
)
parser.add_argument(
"--train_images_folder",
type=str,
default=None,
help="Images folder for training set",
)
parser.add_argument(
"--dev_images_folder",
type=str,
default=None,
help="Images folder for dev set",
)
parser.add_argument(
"--train_features_folder",
required=True,
type=str,
help="Features folder.",
default=None,
)
parser.add_argument(
"--dev_features_folder",
required=True,
type=str,
help="Features folder.",
default=None,
)
parser.add_argument(
"--output_folder", required=True, type=str, help="Output folder."
)
return parser
def build(self):
self.generate_and_save_data(
self.args.train_db_file,
self.args.train_images_folder,
self.args.train_features_folder,
"train",
self.args.num_samples,
self.args.output_folder,
)
self.generate_and_save_data(
self.args.dev_db_file,
self.args.dev_images_folder,
self.args.dev_features_folder,
"dev",
# Number of dev and test samples, we generate 1/10th of the training data
self.args.num_samples // 10,
self.args.output_folder,
)
# Test data is generated from dev data
self.generate_and_save_data(
self.args.dev_db_file,
self.args.dev_images_folder,
self.args.dev_features_folder,
"test",
self.args.num_samples // 10,
self.args.output_folder,
)
def generate_and_save_data(
self, db_file, image_folder, features_folder, name, num_samples, output_folder
):
"""This will generate features, db and images folder in proper format
and save them to output folder
Args:
db_file (str): Path to DB file from which samples will be generated
image_folder (str): Folder where images are present
features_folder (str): Folder where raw features are present
name (str): Type of the dataset set
num_samples (int): Number of objects to be sampled
output_folder (str): Path where output files will be stored
"""
data, _ = self._load_db_file(db_file, num_samples)
assert len(data) == num_samples and num_samples > 0
image_paths = self._get_image_paths(data, image_folder)
feature_paths = self._get_feature_paths(data, features_folder)
image_output_folder = os.path.join(output_folder, "images/")
os.makedirs(image_output_folder, exist_ok=True)
for path in image_paths:
shutil.copy(path, image_output_folder)
features_output_folder = os.path.join(output_folder, "features", "raw/")
os.makedirs(features_output_folder, exist_ok=True)
for path in feature_paths:
shutil.copy(path, features_output_folder)
db_output_folder = os.path.join(output_folder, "db/")
os.makedirs(db_output_folder, exist_ok=True)
output = []
for d in data:
output.append(json.dumps(d))
output = "\n".join(output)
with open(os.path.join(db_output_folder, f"{name}.jsonl"), "w") as f:
f.write(output)
lmdb_folder = os.path.join(output_folder, "features")
LMDBConversion.get_parser = mock_lmdb_parser(
features_output_folder, lmdb_folder
)
lmdb_converter = LMDBConversion()
lmdb_converter.execute()
def _load_db_file(self, db_file: str, num_samples: int):
"""Load db file based on the format and return back a randomly sampled
list of 'num_samples' objects from DB.
Args:
db_file (str): Path to DB file
num_samples (int): Number of samples that will be generated
Raises:
ValueError: Raised if DB file is not among ".json|.jsonl|.npy"
Returns:
Tupe(List[Object], str): A tuple containing both the selected data and
actual file path
"""
file_type = None
if db_file.endswith(".npy"):
file_type = "npy"
data = np.load(db_file, allow_pickle=True)
selected_data = np.random.choice(data[1:], size=num_samples, replace=False)
elif db_file.endswith(".jsonl"):
file_type = "jsonl"
with open(db_file) as f:
data = []
for item in f.readlines():
data.append(json.loads(item.strip("\n")))
selected_data = np.random.choice(data, size=num_samples, replace=False)
# Expecting JSON to be in COCOJSONFormat or contain "data" attribute
elif db_file.endswith(".json"):
file_type = "json"
with open(db_file) as f:
data = json.load(f)
selected_data = np.random.choice(data, size=num_samples, replace=False)
else:
raise ValueError("Unexpected DB file type. Valid options {json|jsonl|npy}")
return selected_data, file_type
def _get_image_paths(self, data, image_folder):
if image_folder is None:
return []
images = set()
for item in data:
possible_images = self._get_attrs(item)
for image in possible_images:
images.add(os.path.join(image_folder, image))
return images
def _get_feature_paths(self, data, feature_folder):
if feature_folder is None:
return []
features = set()
for item in data:
possible_images = self._get_attrs(item)
for image in possible_images:
image = ".".join(image.split(".")[:-1])
feature = image + ".npy"
info = image + "_info.npy"
features.add(os.path.join(feature_folder, feature))
features.add(os.path.join(feature_folder, info))
return features
def _get_attrs(self, item):
"""Returns possible attribute that can point to image id
Args:
item (Object): Object from the DB
Returns:
List[str]: List of possible images that will be copied later
"""
image = None
pick = None
attrs = self._get_possible_attrs()
for attr in attrs:
image = item.get(attr, None)
if image is not None:
pick = attr
break
if pick == "identifier":
return [image + "-img0.jpg", image + "-img1.jpg"]
elif pick == "image_name" or pick == "image_id":
return [image + ".jpg"]
else:
return [image]
def _get_possible_attrs(self):
return [
"Flickr30kID",
"Flikr30kID",
"identifier",
"image_path",
"image_name",
"img",
"image_id",
]
def mock_lmdb_parser(features_folder, output_folder):
args = argparse.Namespace()
args.mode = "convert"
args.features_folder = features_folder
args.lmdb_path = os.path.join(output_folder, "features.lmdb")
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.parse_args = lambda: args
return lambda _: parser
if __name__ == "__main__":
builder = TestDataBuilder()
builder.build()
| EXA-1-master | exa/models/mmf-main/tools/scripts/tests/generate_test_data.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import json
from tools.scripts.gqa.extract_vocabulary import ExtractVocabulary
class ExtractVisdialVocabulary(ExtractVocabulary):
def __init__(self):
super().__init__()
def get_text(self):
text = []
for input_file in self.input_files:
with open(input_file) as f:
f_json = json.load(f)
# Add 'questions' from visdial
text += f_json["data"]["questions"]
# Add 'answers' from visdial
text += f_json["data"]["answers"]
for dialog in f_json["data"]["dialogs"]:
text += [dialog["caption"]]
return text
if __name__ == "__main__":
extractor = ExtractVisdialVocabulary()
extractor.extract()
| EXA-1-master | exa/models/mmf-main/tools/scripts/visual_dialog/extract_vocabulary.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import glob
import json
import os
from mmf.utils.text import tokenize
class IMDBBuilder:
def __init__(self):
self.args = self.get_args()
def get_args(self):
parser = argparse.ArgumentParser("Build IMDB for VisDial")
parser.add_argument(
"-o",
"--out_file",
type=str,
default="./imdb.npy",
help="Output file for IMDB",
)
parser.add_argument(
"-i",
"--image_root",
type=str,
default="./COCO",
help="Image directory for COCO",
)
parser.add_argument(
"-v", "--version", type=float, default=0.9, help="Visdial version"
)
parser.add_argument(
"-d",
"--data_dir",
type=str,
default="./visdial",
help="Directory which contains visdial jsons",
)
parser.add_argument(
"-s",
"--set_type",
type=str,
default="train",
help="Dataset type train|val|test",
)
return parser.parse_args()
def get_id_to_path_dict(self):
id2path = {}
globs = glob.iglob(os.path.join(self.args.image_root, "*", "*.npy"))
# NOTE: based on assumption that image_id is unique across all splits
for image_path in globs:
path = "/".join(image_path.split("/")[-2:])
image_id = int(image_path[-16:-4])
id2path[image_id] = path
return id2path
def build(self):
visdial_json_file = os.path.join(
self.args.data_dir,
"visdial_%.1f_%s.json" % (self.args.version, self.args.set_type),
)
data = None
with open(visdial_json_file, "r") as f:
data = json.load(f)["data"]
final_questions = self.get_tokens(data["questions"])
final_answers = self.get_tokens(data["answers"])
dialogs = data["dialogs"]
dialogs_with_features = self.parse_dialogs(dialogs)
imdb = {
"questions": final_questions,
"answers": final_answers,
"dialogs": dialogs_with_features,
}
self.save_imdb(imdb)
def save_imdb(self, imdb):
with open(self.args.out_file, "w") as f:
json.dump(imdb, f)
def get_tokens(self, sentences):
if not isinstance(sentences, list):
sentences = [sentences]
final_sentences = []
for _, sentence in enumerate(sentences):
tokens = tokenize(sentence)
final_sentences.append(tokens)
return final_sentences
def parse_dialogs(self, dialogs):
id2path = self.get_id_to_path_dict()
for dialog in dialogs:
image_id = dialog["image_id"]
image_feature_path = id2path[image_id]
dialog["image_feature_path"] = image_feature_path
dialog["caption"] = self.get_tokens(dialog["caption"])
return dialogs
if __name__ == "__main__":
imdb_builder = IMDBBuilder()
imdb_builder.build()
| EXA-1-master | exa/models/mmf-main/tools/scripts/visual_dialog/build_imdb.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# The following script requires Java 1.8.0 and pycocotools installed.
# The pycocoevalcap can be installed with pip as
# pip install git+https://github.com/flauted/coco-caption.git@python23
# Original pycocoevalcap code is at https://github.com/tylin/coco-caption
# but has no python3 support yet.
import argparse
import json
from pycocoevalcap.bleu.bleu import Bleu
from pycocoevalcap.cider.cider import Cider
from pycocoevalcap.meteor.meteor import Meteor
from pycocoevalcap.rouge.rouge import Rouge
from pycocoevalcap.spice.spice import Spice
from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer
class COCOEvalCap:
"""
COCOEvalCap code is adopted from https://github.com/tylin/coco-caption
"""
def __init__(self, img_ids, coco, coco_res):
self.eval_imgs = []
self.eval = dict()
self.img_to_eval = dict()
self.coco = coco
self.coco_res = coco_res
def evaluate(self):
gts = self.coco
res = self.coco_res
# =================================================
# Set up scorers
# =================================================
print("tokenization...")
tokenizer = PTBTokenizer()
gts = tokenizer.tokenize(gts)
res = tokenizer.tokenize(res)
# =================================================
# Set up scorers
# =================================================
print("setting up scorers...")
scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Meteor(), "METEOR"),
(Rouge(), "ROUGE_L"),
(Cider(), "CIDEr"),
(Spice(), "SPICE"),
]
# =================================================
# Compute scores
# =================================================
for scorer, method in scorers:
print("computing %s score..." % (scorer.method()))
score, scores = scorer.compute_score(gts, res)
if type(method) == list:
for sc, scs, m in zip(score, scores, method):
self.set_eval(sc, m)
self.set_img_to_eval_imgs(scs, gts.keys(), m)
print(f"{m}: {sc:0.3f}")
else:
self.set_eval(score, method)
self.set_img_to_eval_imgs(scores, gts.keys(), method)
print(f"{method}: {score:0.3f}")
self.set_eval_imgs()
def set_eval(self, score, method):
self.eval[method] = score
def set_img_to_eval_imgs(self, scores, img_ids, method):
for img_id, score in zip(img_ids, scores):
if img_id not in self.img_to_eval:
self.img_to_eval[img_id] = dict()
self.img_to_eval[img_id]["image_id"] = img_id
self.img_to_eval[img_id][method] = score
def set_eval_imgs(self):
self.eval_imgs = [eval for img_id, eval in self.img_to_eval.items()]
def calculate_metrics(img_ids, dataset_gts, dataset_res):
img_to_anns_gts = {id: [] for id in img_ids}
for ann in dataset_gts["annotations"]:
img_to_anns_gts[ann["image_id"]] += [ann]
img_to_anns_res = {id: [] for id in img_ids}
for ann in dataset_res["annotations"]:
img_to_anns_res[ann["image_id"]] += [ann]
eval_obj = COCOEvalCap(img_ids, img_to_anns_gts, img_to_anns_res)
eval_obj.evaluate()
return eval_obj.eval
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Image captioning metrics")
parser.add_argument("--reference_json", help="Path to reference captions json")
parser.add_argument("--predicted_json", help="Path to predicted captions json")
args = parser.parse_args()
with open(args.reference_json) as f:
captions = json.load(f)
references = []
img_ids = []
for img in captions["images"]:
if img["split"] == "test":
for c in img["sentences"]:
d = {}
d["image_id"] = c["imgid"]
img_ids.append(c["imgid"])
d["caption"] = c["raw"]
references.append(d)
img_ids = list(set(img_ids))
with open(args.predicted_json) as f:
preds = json.load(f)
dataset_gts = {"annotations": references}
dataset_res = {"annotations": preds}
print(calculate_metrics(img_ids, dataset_gts, dataset_res))
| EXA-1-master | exa/models/mmf-main/tools/scripts/coco/coco_caption_eval.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import numpy as np
import torch
from tqdm import tqdm
try:
from transformers3.modeling_bert import BertModel
from transformers3.tokenization_auto import AutoTokenizer
except ImportError:
from transformers.modeling_bert import BertModel
from transformers.tokenization_auto import AutoTokenizer
class BertFeatExtractor:
def __init__(self, model_name):
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
self.model = BertModel.from_pretrained(model_name).eval()
self.model.cuda()
def get_bert_embedding(self, text):
tokenized_text = self.tokenizer.tokenize(text)
tokenized_text = ["[CLS]"] + tokenized_text + ["[SEP]"]
indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokenized_text)
tokens_tensor = torch.Tensor([indexed_tokens]).long()
segments_tensor = torch.Tensor([0] * len(tokenized_text)).long()
with torch.no_grad():
encoded_layers, _ = self.model(
tokens_tensor.cuda(),
segments_tensor.cuda(),
output_all_encoded_layers=False,
)
return encoded_layers.squeeze()[0]
def extract_bert(imdb_path, out_path, group_id=0, n_groups=1):
imdb = np.load(imdb_path)
feat_extractor = BertFeatExtractor("bert-base-uncased")
if group_id == 0:
iterator_obj = tqdm(imdb[1:])
else:
iterator_obj = imdb[1:]
for idx, el in enumerate(iterator_obj):
if idx % n_groups != group_id:
continue
emb = feat_extractor.get_bert_embedding(el["question_str"])
save_path = out_path + str(el["question_id"])
np.save(save_path, emb.cpu().numpy())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--imdb_path", type=str, default=None)
parser.add_argument("--out_path", type=str, default=None)
parser.add_argument("--group_id", type=int, default=0)
parser.add_argument("--n_groups", type=int, default=1)
args = parser.parse_args()
extract_bert(args.imdb_path, args.out_path, args.group_id, args.n_groups)
| EXA-1-master | exa/models/mmf-main/tools/scripts/bert/extract_bert_embeddings.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import json
import os
import re
from collections import Counter
import h5py
import numpy as np
import tqdm
from mmf.utils.process_answers import preprocess_answer
from mmf.utils.text_processing import text_tokenize as tokenize
def merge_train(train_q_dir):
merged_dic = {}
for file_name in tqdm.tqdm(os.listdir(train_q_dir)):
full_path = os.path.join(train_q_dir, file_name)
partial_questions = json.load(open(full_path))
merged_dic.update(partial_questions)
save_dir = os.path.abspath(os.path.join(train_q_dir, os.pardir))
with open(os.path.join(save_dir, "train_all_questions.json"), "w") as fp:
json.dump(merged_dic, fp)
def get_objects(semantic_str):
matches = re.findall(r"\(([^)]+)", semantic_str)
result = []
for match in matches:
if "," in match:
result += list(map(int, match.split(",")))
elif match.isdigit():
result += [int(match)]
else:
pass
return result
def get_imdb(file_path):
imdb = [{"dataset_name": "gqa"}]
questions = json.load(open(file_path))
print(f"Processing file {file_path}")
for qid, item in tqdm.tqdm(questions.items()):
entry = {
"image_name": item["imageId"] + "jpg",
"image_id": item["imageId"],
"question_id": qid,
"question_str": item["question"],
"question_tokens": tokenize(item["question"]),
}
if "answer" in item:
entry["all_answers"] = [item["answer"] for _ in range(10)]
entry["valid_answers"] = [item["answer"] for _ in range(10)]
entry["semantic_string"] = (item["semanticStr"],)
entry["gt_object_ids"] = (get_objects(item["semanticStr"]),)
entry["meta_data"] = item["types"]
imdb.append(entry)
return np.array(imdb)
def extract_bbox_feats(feat_dir, out_dir):
info_json_path = os.path.join(feat_dir, "gqa_objects_info.json")
info_dict = json.load(open(info_json_path))
file_mapping = {k: [] for k in range(16)}
for k, v in info_dict.items():
file_mapping[v["file"]] += [(k, v)]
for i in range(16):
file_path = os.path.join(feat_dir, f"gqa_objects_{i}.h5")
print(f"Processing file {file_path}")
feat_db = h5py.File(file_path, "r")
for entry in tqdm.tqdm(file_mapping[i]):
image_id = entry[0]
meta = entry[1]
to_save = {
"image_id": image_id,
"boxes": feat_db["bboxes"][meta["idx"]],
"feats": feat_db["features"][meta["idx"]],
"height": meta["height"],
"width": meta["width"],
"n_objects": meta["objectsNum"],
}
save_path = os.path.join(out_dir, str(image_id) + ".npy")
np.save(save_path, to_save)
def extract_spatial_feats(feat_dir, out_dir):
info_json_path = os.path.join(feat_dir, "gqa_spatial_info.json")
info_dict = json.load(open(info_json_path))
file_mapping = {k: [] for k in range(16)}
for k, v in info_dict.items():
file_mapping[v["file"]] += [(k, v)]
for i in range(16):
file_path = os.path.join(feat_dir, f"gqa_spatial_{i}.h5")
print(f"Processing file {file_path}")
feat_db = h5py.File(file_path, "r")
for entry in tqdm.tqdm(file_mapping[i]):
image_id = entry[0]
meta = entry[1]
to_save = feat_db["features"][meta["idx"]]
to_save = to_save.reshape(1, 7, 7, 2048)
save_path = os.path.join(out_dir, str(image_id) + ".npy")
np.save(save_path, to_save)
def extract_image_features(image_dir, out_dir):
extract_bbox_feats(
os.path.join(image_dir, "objects"), os.path.join(out_dir, "objects")
)
extract_spatial_feats(
os.path.join(image_dir, "spatial"), os.path.join(out_dir, "spatial")
)
def convert_gqa_to_vqa(gqa_dir, out_dir):
"""
Takes GQA dataset and converts it into VQA format
Assumes GQA dir structure as:
-gqa_dir/
-images/
-images/
-objects/
-spatial/
-questions/
-scenegraphs/
"""
image_feat_path = os.path.join(gqa_dir, "images")
extract_image_features(image_feat_path, out_dir)
questions_dir = os.path.join(gqa_dir, "questions")
if os.path.isfile(os.path.join(questions_dir, "train_all_questions.json")):
print("Using previously generated train_all_questions.json file")
else:
merge_train(os.path.join(gqa_dir, "questions", "train_all_questions"))
split_mapping = {
"test": "test_all_questions.json",
"val": "val_all_questions.json",
"challenge": "challenge_all_questions.json",
"train": "train_all_questions.json",
}
for split in split_mapping:
for balance_type in ["balanced", "all"]:
filename = split_mapping[split]
csplit = split
if balance_type == "balanced":
filename = filename.replace("_all", "_balanced")
csplit = split + "_balanced"
file_path = os.path.join(questions_dir, filename)
imdb = get_imdb(file_path)
save_path = os.path.join(out_dir, f"imdb_{csplit}.npy")
np.save(save_path, imdb)
splits = ["val", "train"]
split_type = ["balanced", "all"]
global_answer = Counter()
global_q = Counter()
question_len = Counter()
for s in splits:
for st in split_type:
questions_json = os.path.join(questions_dir, f"{s}_{st}_questions.json")
questions = json.load(open(questions_json))
print(f"Processing split {s}_{st}")
answers = Counter()
q_tokens = Counter()
for _, q in tqdm.tqdm(questions.items()):
tokens = tokenize(q["question"])
q_tokens.update(tokens)
global_q.update(tokens)
answers.update([q["answer"].lower()])
global_answer.update([q["answer"].lower()])
question_len.update([len(tokens)])
print("N_unique answers :", len(global_answer))
print("N unique q tokens:", len(global_q))
print("Min Q length", min([x for x in question_len]))
print("Max Q length", max([x for x in question_len]))
print("Q length distribution", question_len)
# Save question vocabulary
q_vocabulary = [w[0] for w in global_q.items()]
q_vocabulary.sort()
q_vocabulary = ["<unk>"] + q_vocabulary
vocab_file = os.path.join(out_dir, "vocabulary_gqa.txt")
with open(vocab_file, "w") as f:
f.writelines([w + "\n" for w in q_vocabulary])
# Save answer vocabulary
answer_list = [preprocess_answer(ans[0]) for ans in global_answer.items()]
answer_list = [t.strip() for t in answer_list if len(t.strip()) > 0]
answer_list.sort()
if "<unk>" not in answer_list:
answer_list = ["<unk>"] + answer_list
answer_file = os.path.join(out_dir, "answers_gqa.txt")
with open(answer_file, "w") as fp:
fp.writelines([w + "\n" for w in answer_list])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--gqa_dir", default=None)
parser.add_argument("--out_dir", default=None)
args = parser.parse_args()
convert_gqa_to_vqa(args.gqa_dir, args.out_dir)
| EXA-1-master | exa/models/mmf-main/tools/scripts/gqa/convert_gqa_to_vqa.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import json
import os
from collections import Counter
from mmf.utils.text import tokenize
class ExtractVocabulary:
def __init__(self):
self.args = self.get_args()
self.input_files = self.args.input_files
self.out_dir = self.args.out_dir
self.min_freq = self.args.min_freq
self.vocab_file_name = self.args.vocab_file_name
def extract(self):
os.makedirs(self.out_dir, exist_ok=True)
word_count = Counter()
texts = self.get_text()
text_lengths = [None] * len(texts)
for inx, text in enumerate(texts):
words = tokenize(text)
text_lengths[inx] = len(words)
word_count.update(words)
# UNK token will added on fly if you use Vocab class in core/text
vocabulary = [w[0] for w in word_count.items() if w[1] >= self.min_freq]
vocabulary.sort()
self.save_vocabulary(vocabulary)
print("min text len=", min(text_lengths))
print("max text len=", max(text_lengths))
def save_vocabulary(self, vocabulary):
vocab_file = os.path.join(self.out_dir, self.vocab_file_name)
with open(vocab_file, "w") as f:
f.writelines([w + "\n" for w in vocabulary])
def get_text(self):
"""
Override this in your child class to extract custom text
Default for VQA. Make sure to return a list of all possible text
"""
text = []
for input_file in self.input_files:
with open(input_file) as f:
text += json.load(f)["questions"]
return text
def get_args(self):
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_files",
nargs="+",
required=True,
help="input question json files, \
if more than 1, split by space",
)
parser.add_argument(
"--out_dir",
type=str,
default="./",
help="output directory, default is current directory",
)
parser.add_argument(
"--min_freq",
type=int,
default=0,
help="the minimum times of word occurrence \
to be included in vocabulary, default 0",
)
parser.add_argument(
"--vocab_file_name",
type=str,
default="vocabulary.txt",
help="Name of the file in vocabulary will be stored",
)
args = parser.parse_args()
return args
if __name__ == "__main__":
extractor = ExtractVocabulary()
extractor.extract()
| EXA-1-master | exa/models/mmf-main/tools/scripts/gqa/extract_vocabulary.py |
# Copyright (c) Facebook, Inc. and its affiliates.
| EXA-1-master | exa/models/mmf-main/projects/m4c_captioner/scripts/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import numpy as np
import tools.scripts.coco.coco_caption_eval as coco_caption_eval
def print_metrics(res_metrics):
print(res_metrics)
keys = [
"Bleu_1",
"Bleu_2",
"Bleu_3",
"Bleu_4",
"METEOR",
"ROUGE_L",
"SPICE",
"CIDEr",
]
print("\n\n**********\nFinal model performance:\n**********")
for k in keys:
print(k, ": %.1f" % (res_metrics[k] * 100))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--pred_file", type=str, required=True)
parser.add_argument("--annotation_file", type=str, required=True)
parser.add_argument("--set", type=str, default="karpathy_val")
args = parser.parse_args()
with open(args.pred_file) as f:
preds = json.load(f)
annotation_file = args.annotation_file
imdb = np.load(annotation_file, allow_pickle=True)
imdb = imdb[1:]
gts = [
{"image_id": info["image_id"], "caption": info["caption_str"]} for info in imdb
]
preds = [{"image_id": int(p["image_id"]), "caption": p["caption"]} for p in preds]
imgids = list({g["image_id"] for g in gts})
metrics = coco_caption_eval.calculate_metrics(
imgids, {"annotations": gts}, {"annotations": preds}
)
print_metrics(metrics)
| EXA-1-master | exa/models/mmf-main/projects/m4c_captioner/scripts/coco_eval.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import numpy as np
import tools.scripts.coco.coco_caption_eval as coco_caption_eval
def print_metrics(res_metrics):
print(res_metrics)
keys = [
"Bleu_1",
"Bleu_2",
"Bleu_3",
"Bleu_4",
"METEOR",
"ROUGE_L",
"SPICE",
"CIDEr",
]
print("\n\n**********\nFinal model performance:\n**********")
for k in keys:
print(k, ": %.1f" % (res_metrics[k] * 100))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--pred_file", type=str, required=True)
parser.add_argument("--annotation_file", type=str, required=True)
parser.add_argument("--set", type=str, default="val")
args = parser.parse_args()
if args.set not in ["train", "val"]:
raise Exception(
"this script only supports TextCaps train and val set. "
"Please use the EvalAI server for test set evaluation"
)
with open(args.pred_file) as f:
preds = json.load(f)
annotation_file = args.annotation_file
imdb = np.load(annotation_file, allow_pickle=True)
imdb = imdb[1:]
gts = [
{"image_id": info["image_id"], "caption": info["caption_str"]} for info in imdb
]
preds = [{"image_id": p["image_id"], "caption": p["caption"]} for p in preds]
imgids = list({g["image_id"] for g in gts})
metrics = coco_caption_eval.calculate_metrics(
imgids, {"annotations": gts}, {"annotations": preds}
)
print_metrics(metrics)
| EXA-1-master | exa/models/mmf-main/projects/m4c_captioner/scripts/textcaps_eval.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This is the generic module for Graph Network computations,
# which can be added as a component to any base network
# Used some word2vec code from https://github.com/adithyamurali/TaskGrasp
# Also used example code from https://github.com/rusty1s/pytorch_geometric
import os
import pickle
import networkx as nx
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from gensim.models import KeyedVectors
from gensim.scripts.glove2word2vec import glove2word2vec
from mmf.common.registry import registry
from mmf.models.base_model import BaseModel
from mmf.utils.text import VocabDict
from networkx import convert_node_labels_to_integers
from torch_geometric.nn import BatchNorm, GCNConv, RGCNConv, SAGEConv
from tqdm import tqdm
def k_hop_subgraph(
node_idx,
num_hops,
edge_index,
relabel_nodes=False,
num_nodes=None,
flow="source_to_target",
):
r"""Computes the :math:`k`-hop subgraph of :obj:`edge_index` around node
:attr:`node_idx`.
It returns (1) the nodes involved in the subgraph, (2) the filtered
:obj:`edge_index` connectivity, (3) the mapping from node indices in
:obj:`node_idx` to their new location, and (4) the edge mask indicating
which edges were preserved.
Args:
node_idx (int, list, tuple or :obj:`torch.Tensor`): The central
node(s).
num_hops: (int): The number of hops :math:`k`.
edge_index (LongTensor): The edge indices.
relabel_nodes (bool, optional): If set to :obj:`True`, the resulting
:obj:`edge_index` will be relabeled to hold consecutive indices
starting from zero. (default: :obj:`False`)
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)
flow (string, optional): The flow direction of :math:`k`-hop
aggregation (:obj:`"source_to_target"` or
:obj:`"target_to_source"`). (default: :obj:`"source_to_target"`)
:rtype: (:class:`LongTensor`, :class:`LongTensor`, :class:`LongTensor`,
:class:`BoolTensor`)
"""
assert flow in ["source_to_target", "target_to_source"]
if flow == "target_to_source":
row, col = edge_index
else:
col, row = edge_index
node_mask = row.new_empty(num_nodes, dtype=torch.bool)
edge_mask = row.new_empty(row.size(0), dtype=torch.bool)
if isinstance(node_idx, (int, list, tuple)):
node_idx = torch.tensor([node_idx], device=row.device).flatten()
else:
node_idx = node_idx.to(row.device)
subsets = [node_idx]
for _ in range(num_hops):
node_mask.fill_(False)
node_mask[subsets[-1]] = True
torch.index_select(node_mask, 0, row, out=edge_mask)
subsets.append(col[edge_mask])
subset, inv = torch.cat(subsets).unique(return_inverse=True)
inv = inv[: node_idx.numel()]
node_mask.fill_(False)
node_mask[subset] = True
edge_mask = node_mask[row] & node_mask[col]
edge_index = edge_index[:, edge_mask]
if relabel_nodes:
node_idx = row.new_full((num_nodes,), -1)
node_idx[subset] = torch.arange(subset.size(0), device=row.device)
edge_index = node_idx[edge_index]
return subset, edge_index, inv, edge_mask
def make_graph(
raw_graph,
prune_culdesacs=False,
prune_unconnected=True,
q_vocab=None,
i_vocab=None,
ans_vocab=None,
include_reverse_relations=False,
):
# None edge cases
if q_vocab is None:
q_vocab = []
q_vocab = set(q_vocab)
if i_vocab is None:
i_vocab = []
i_vocab = set(i_vocab)
if ans_vocab is None:
ans_vocab = []
ans_vocab = set(ans_vocab)
# Init nx graph
graph = nx.DiGraph()
# Add the nodes
for concept in raw_graph["concepts2idx"]:
# Add node type info
q_node = concept in q_vocab
i_node = concept in i_vocab
ans_node = concept in ans_vocab
# Add node
graph.add_node(concept, q_node=q_node, i_node=i_node, ans_node=ans_node)
# Go through edges in raw_graph
for triplet in raw_graph["triplets"]:
# Get triplet
head_idx = triplet[0]
rel_idx = triplet[1]
tail_idx = triplet[2]
# Get the names
head = raw_graph["concepts"][head_idx]
tail = raw_graph["concepts"][tail_idx]
rel = raw_graph["relations"][rel_idx]
# Add to the graph
assert head in graph.nodes and tail in graph.nodes
graph.add_edge(head, tail, relation=rel)
# Prune totally unconnected nodes
if prune_unconnected:
for concept in raw_graph["concepts2idx"]:
assert concept in graph.nodes
# Get edges to/from that node
connecting_edges = list(graph.in_edges(concept)) + list(
graph.out_edges(concept)
)
# Remove if there are no edges
if len(connecting_edges) == 0:
graph.remove_node(concept)
# Prune graph of nodes
# Custom concepts to remove
to_remove = [""]
for concept in to_remove:
if concept in graph.nodes:
graph.remove_node(concept)
# Get the idx graph for easy conversions
graph_idx = convert_node_labels_to_integers(graph)
# Also go ahead and return dicts and edge_type and edge_index
edge_index, edge_type = get_edge_idx_type(
graph, graph_idx, raw_graph["relations2idx"], include_reverse_relations
)
return graph, graph_idx, edge_index, edge_type
def get_edge_idx_type(graph, graph_idx, rel2idx, include_reverse_relations=False):
# Return from a graph, the required edge_index and edge_type info
# Pretty simple since from graph_idx
edge_index = np.array(list(graph_idx.edges)).T
# For type, need to do a conversion
edge_type = [graph.edges[e]["relation"] for e in graph.edges]
edge_type = np.array([rel2idx[rel] for rel in edge_type])
# Add reverse relations
if include_reverse_relations:
edge_src = np.expand_dims(edge_index[0, :], 0)
edge_dest = np.expand_dims(edge_index[1, :], 0)
edge_reverse = np.concatenate([edge_dest, edge_src], axis=0)
edge_index = np.concatenate([edge_index, edge_reverse], axis=1)
return edge_index, edge_type
def prepare_embeddings(node_names, embedding_file, add_split):
"""
This function is used to prepare embeddings for the graph
:param embedding_file: location of the raw embedding file
:return:
"""
print("\n\nCreating node embeddings...")
embedding_model = ""
if "glove" in embedding_file:
embedding_model = "glove"
elif "GoogleNews" in embedding_file:
embedding_model = "word2vec"
elif "subword" in embedding_file:
embedding_model = "fasttext"
elif "numberbatch" in embedding_file:
embedding_model = "numberbatch"
def transform(compound_word):
return [
compound_word,
"_".join([w.lower() for w in compound_word.split(" ")]),
"_".join([w.capitalize() for w in compound_word.split(" ")]),
"-".join([w for w in compound_word.split(" ")]),
"-".join([w for w in compound_word.split(" ")]),
]
node2vec = {}
model = None
# glove has a slightly different format
if embedding_model == "glove":
tmp_file = ".".join(embedding_file.split(".")[:-1]) + "_tmp.txt"
glove2word2vec(embedding_file, tmp_file)
embedding_file = tmp_file
# Important: only native word2vec file needs binary flag to be true
print(f"Loading pretrained embeddings from {embedding_file} ...")
model = KeyedVectors.load_word2vec_format(
embedding_file, binary=(embedding_model == "word2vec")
)
# retrieve embeddings for graph nodes
no_match_nodes = []
match_positions = []
for node_name in tqdm(node_names, desc="Prepare node embeddings"):
try_words = []
try_words.extend(transform(node_name))
# Try to find w2v
found_mapping = False
for i, try_word in enumerate(try_words):
try:
node2vec[node_name] = model.get_vector(try_word)
match_positions.append(i + 1)
found_mapping = True
except KeyError:
pass
if found_mapping:
break
# Try multi-words (average w2v)
if add_split:
if not found_mapping and len(node_name.split(" ")) > 1:
sub_word_vecs = []
for subword in node_name.split(" "):
# Get w2v for the individual words
try_words = []
try_words.extend(transform(subword))
mp = []
found_submap = False
for i, try_word in enumerate(try_words):
try:
sub_word_vecs.append(model.get_vector(try_word))
mp.append(i + 1)
found_submap = True
except KeyError:
pass
if found_submap:
break
# If all subswords successful, add it to node2vec and match_positions
if len(sub_word_vecs) == len(node_name.split(" ")):
node2vec[node_name] = np.mean(sub_word_vecs, 0)
match_positions.append(
np.mean(mp)
) # I'm sort of ignoring match_positions except for counts
found_mapping = True
else:
if not found_mapping and len(node_name.split("_")) > 1:
sub_word_vecs = []
for subword in node_name.split("_"):
# Get w2v for the individual words
try_words = []
try_words.extend(transform(subword))
mp = []
found_submap = False
for i, try_word in enumerate(try_words):
try:
sub_word_vecs.append(model.get_vector(try_word))
mp.append(i + 1)
found_submap = True
except KeyError:
pass
if found_submap:
break
# If all subswords successful, add it to node2vec and match_positions
if len(sub_word_vecs) == len(node_name.split("_")):
node2vec[node_name] = np.mean(sub_word_vecs, 0)
match_positions.append(
np.mean(mp)
) # I'm sort of ignoring match_positions except for counts
found_mapping = True
# All else fails, it's a no match
if not found_mapping:
no_match_nodes.append([node_name, try_words])
# This just wraps GraphNetworkModule for mmf so GNM can be a submodule of
# other networks too
@registry.register_model("graph_network_bare")
class GraphNetworkBare(BaseModel):
def __init__(self, config):
super().__init__(config)
self.build()
@classmethod
def config_path(cls):
return "configs/models/graph_network_bare/defaults.yaml"
# Each method need to define a build method where the model's modules
# are actually build and assigned to the model
def build(self):
extra_config = {}
extra_config["feed_vb_to_graph"] = False
extra_config["feed_q_to_graph"] = False
extra_config["feed_mode"] = None
extra_config["feed_graph_to_vb"] = False
extra_config["feed_special_node"] = False
extra_config["topk_ans_feed"] = None
extra_config["compress_crossmodel"] = False
extra_config["crossmodel_compress_dim"] = None
extra_config["analysis_mode"] = False
extra_config["noback_vb"] = False
self.graph_module = GraphNetworkModule(self.config, extra_config)
if self.config.output_type in [
"graph_level",
"graph_level_ansonly",
"graph_level_inputonly",
]:
# Make a linear last layer
self.fc = nn.Linear(self.graph_module.gn.output_dim, self.config.num_labels)
else:
assert self.config.output_type in ["graph_prediction"]
# Output is size of graph
# Each model in MMF gets a dict called sample_list which contains
# all of the necessary information returned from the image
def forward(self, sample_list):
# Forward with sample list
output = self.graph_module(sample_list)
# If graph_level, we need to now predict logits
if self.config.output_type in [
"graph_level",
"graph_level_ansonly",
"graph_level_inputonly",
]:
# Do last layer
logits = self.fc(output)
else:
assert self.config.output_type in ["graph_prediction"]
logits = output
# Do zerobias
logits -= 6.58
# For loss calculations (automatically done by MMF
# as per the loss defined in the config),
# we need to return a dict with "scores" key as logits
output = {"scores": logits}
# If we're in eval / analysis mode, add more to output
if self.config.analysis_mode:
output = self.graph_module.add_analysis_to_output(output)
# MMF will automatically calculate loss
return output
# Do indirect path stuff with mmf
def mmf_indirect(path):
if os.path.exists(path):
return path
else:
path = os.path.join(os.getenv("MMF_DATA_DIR"), "datasets", path)
return path
# Graph network module
# Can be added as part of a larger network, or used alone using GraphNetworkBare
class GraphNetworkModule(nn.Module):
"""The generic class for graph networks
Can be generically added to any other kind of network
"""
def __init__(self, config, config_extra=None):
super().__init__()
self.config = config
if config_extra is None:
self.config_extra = {}
else:
self.config_extra = config_extra
# Load the input graph
raw_graph = torch.load(mmf_indirect(config.kg_path))
self.graph, self.graph_idx, self.edge_index, self.edge_type = make_graph(
raw_graph, config.prune_culdesacs
)
# Get all the useful graph attributes
self.num_nodes = len(self.graph.nodes)
assert len(self.graph_idx.nodes) == self.num_nodes
self.num_edges = len(self.graph.edges)
assert len(self.graph_idx.edges) == self.num_edges
assert self.edge_index.shape[1] == self.num_edges
assert self.edge_type.shape[0] == self.num_edges
self.num_relations = len(raw_graph["relations2idx"])
# Get the dataset specific info and relate it to the constructed graph
(
self.name2node_idx,
self.qid2nodeact,
self.img_class_sz,
) = self.get_dataset_info(config)
# And get the answer related info
(
self.index_in_ans,
self.index_in_node,
self.graph_answers,
self.graph_ans_node_idx,
) = self.get_answer_info(config)
# Save graph answers (to be used by data loader)
torch.save(self.graph_answers, mmf_indirect(config.graph_vocab_file))
# If features have w2v, initialize it here
node2vec_filename = mmf_indirect(config.node2vec_filename)
node_names = list(self.name2node_idx.keys())
valid_node2vec = False
if os.path.exists(node2vec_filename):
with open(node2vec_filename, "rb") as f:
node2vec, node_names_saved, no_match_nodes = pickle.load(f)
# Make sure the nodes here are identical (otherwise,
# when we update graph code, we might have the wrong graph)
if set(node_names) == set(node_names_saved):
valid_node2vec = True
# Generate node2vec if not done already
if not valid_node2vec:
node2vec, node_names_dbg, no_match_nodes = prepare_embeddings(
node_names,
mmf_indirect(config.embedding_file),
config.add_w2v_multiword,
)
print("Saving synonym2vec to pickle file:", node2vec_filename)
pickle.dump(
(node2vec, node_names_dbg, no_match_nodes),
open(node2vec_filename, "wb"),
)
# Get size
self.w2v_sz = node2vec[list(node2vec.keys())[0]].shape[0]
# Get node input dim
self.in_node_dim = 0
self.q_offest = 0
self.img_offset = 0
self.vb_offset = 0
self.q_enc_offset = 0
self.w2v_offset = 0
# Add question (size 1)
if "question" in config.node_inputs:
self.q_offset = self.in_node_dim
self.in_node_dim += 1
# Add classifiers
if "classifiers" in config.node_inputs:
self.img_offset = self.in_node_dim
self.in_node_dim += self.img_class_sz
# Add w2v
if "w2v" in config.node_inputs:
self.w2v_offset = self.in_node_dim
self.in_node_dim += self.w2v_sz
# Doing no w2v as a seperate option to make this code a LOT simpler
self.use_w2v = config.use_w2v
if self.use_w2v:
# Create the base node feature matrix
# torch.Tensor of size num_nodes x in_node_dim
# In forward pass, will need to copy this batch_size times and
# convert to cuda
self.base_node_features = torch.zeros(self.num_nodes, self.in_node_dim)
# Copy over w2v
for node_name in node2vec:
# Get w2v, convert to torch, then copy over
w2v = torch.from_numpy(node2vec[node_name])
node_idx = self.name2node_idx[node_name]
self.base_node_features[
node_idx, self.w2v_offset : self.w2v_offset + self.w2v_sz
].copy_(w2v)
else:
self.in_node_dim -= self.w2v_sz
self.base_node_features = torch.zeros(self.num_nodes, self.in_node_dim)
# Init
full_node_dim = self.in_node_dim
special_input_node = False
special_input_sz = None
# If feed_special_node, set inputs to graph network
if (
"feed_special_node" in self.config_extra
and self.config_extra["feed_special_node"]
):
assert not self.config_extra["compress_crossmodel"]
special_input_node = True
special_input_sz = 0
# Get input size
if (
"feed_vb_to_graph" in self.config_extra
and self.config_extra["feed_vb_to_graph"]
and self.config_extra["feed_mode"] == "feed_vb_logit_to_graph"
):
special_input_sz += self.config.num_labels
if (
"feed_vb_to_graph" in self.config_extra
and self.config_extra["feed_vb_to_graph"]
and self.config_extra["feed_mode"] == "feed_vb_hid_to_graph"
):
special_input_sz += self.config_extra["vb_hid_sz"]
if (
"feed_q_to_graph" in self.config_extra
and self.config_extra["feed_q_to_graph"]
):
special_input_sz += self.config_extra["q_hid_sz"]
# Otherwise, we feed into every graph node at start
else:
# Add vb conf (just the conf)
if (
"feed_vb_to_graph" in self.config_extra
and self.config_extra["feed_vb_to_graph"]
and self.config_extra["feed_mode"] == "feed_vb_logit_to_graph"
):
assert not self.config_extra["compress_crossmodel"]
self.vb_offset = self.in_node_dim
full_node_dim += 1
# Add vb vector
if (
"feed_vb_to_graph" in self.config_extra
and self.config_extra["feed_vb_to_graph"]
and self.config_extra["feed_mode"] == "feed_vb_hid_to_graph"
):
self.vb_offset = self.in_node_dim
if self.config_extra["compress_crossmodel"]:
full_node_dim += self.config_extra["crossmodel_compress_dim"]
# Make a compress layer (just a linear tranform)
self.compress_linear = nn.Linear(
self.config_extra["vb_hid_sz"],
self.config_extra["crossmodel_compress_dim"],
)
else:
full_node_dim += self.config_extra["vb_hid_sz"]
# Add q vector
if (
"feed_q_to_graph" in self.config_extra
and self.config_extra["feed_q_to_graph"]
):
assert not self.config_extra["compress_crossmodel"]
self.q_enc_offset = self.in_node_dim
full_node_dim += self.config_extra["q_hid_sz"]
# Set noback_vb
self.noback_vb = self.config_extra["noback_vb"]
# Convert edge_index and edge_type matrices to torch
# In forward pass, we repeat this by bs and convert to cuda
self.edge_index = torch.from_numpy(self.edge_index)
self.edge_type = torch.from_numpy(self.edge_type)
# These are the forward pass data inputs to graph network
# They are None to start until we know the batch size
self.node_features_forward = None
self.edge_index_forward = None
self.edge_type_forward = None
# Make graph network itself
self.gn = GraphNetwork(
config,
full_node_dim,
self.num_relations,
self.num_nodes,
special_input_node=special_input_node,
special_input_sz=special_input_sz,
)
# Init hidden debug (used for analysis)
self.graph_hidden_debug = None
def get_dataset_info(self, config):
# Load dataset info
dataset_data = torch.load(mmf_indirect(config.dataset_info_path))
# Go through and collect symbol names and confs from our pretrained classifiers
# Hardcoded to the classifiers
qid2qnode = {}
qid2imginfo = {}
for dat in dataset_data:
# Get qid
qid = dat["id"]
# Get q symbols
q_words = list(dat["symbols_q"])
qid2qnode[qid] = q_words
# Get confidences
in_data = dat["in_names_confs"]
in_data = [(name, conf, 0) for name, conf in in_data]
places_data = dat["places_names_confs"]
places_data = [(name, conf, 1) for name, conf in places_data]
lvis_data = dat["lvis_names_confs"]
lvis_data = [(name, conf, 2) for name, conf in lvis_data]
vg_data = dat["vg_names_confs"]
vg_data = [(name, conf, 3) for name, conf in vg_data]
all_image_tuples = in_data + places_data + lvis_data + vg_data
# Make into dict to start (name -> conf Tensor)
img_data = {}
for name, conf, datasetind in all_image_tuples:
# Check if name has been put in yet
if name in img_data:
# If yes, insert new confidence in the right place
# Don't overwrite in same ind unless conf is higher
if conf > img_data[name][datasetind].item():
img_data[name][datasetind] = conf
else:
# Otherwise, all zeros and add conf to the right index
conf_data = torch.zeros(4)
conf_data[datasetind] = conf
img_data[name] = conf_data
# Convert dict to tuples list and add to qid dict
img_data = [(name, img_data[name]) for name in img_data]
qid2imginfo[qid] = img_data
# Convert qid2qnode and qid2imginfo to go from qid -> (name, conf)
# to qid -> (node_idx, conf) and merge q and img info (concat)
name2node_idx = {}
idx = 0
for nodename in self.graph.nodes:
name2node_idx[nodename] = idx
idx += 1
qid2nodeact = {}
img_class_sz = None
for qid in qid2qnode:
# Get words / confs
q_words = qid2qnode[qid] # qid -> [qw_1, qw_2, ...]
# qid -> [(iw_1, conf_c1, conf_c2, ...), ...]
img_info = qid2imginfo[qid]
img_words = [x[0] for x in img_info]
img_confs = [x[1] for x in img_info]
# Get the node feature size
if img_class_sz is None:
# img_class_confs = img_confs[0]
assert type(img_confs[0]) is torch.Tensor
img_class_sz = img_confs[0].size(0)
# We will arrange the node info
# [q, img_class_1_conf, img_class_2_conf ... w2v]
# Add to list
node_info = {} # node_idx -> torch.Tensor(q, ic1, ic2, ...)
for word in q_words:
# Continue if q word is not in the graph
if word not in name2node_idx:
continue
# Add node info
node_idx = name2node_idx[word]
val = torch.zeros(img_class_sz + 1)
val[0] = 1
node_info[node_idx] = val
# Add img info to node info
for word, img_confs_w in zip(img_words, img_confs):
# Continue if img word not in graph
if word not in name2node_idx:
continue
node_idx = name2node_idx[word]
if node_idx in node_info:
# Append class info to existing node info
node_info[node_idx][1:].copy_(img_confs_w)
else:
# Just prepend a zero to the img info (not a question word)
val = torch.zeros(img_class_sz + 1)
val[1:].copy_(img_confs_w)
node_info[node_idx] = val
# Add node info to dict
# This structure will be used to dynamically create node info
# during forward pass
qid2nodeact[qid] = node_info
# Check the average # of node activations is reasonable
num_acts_per_qid = np.mean(
[len(qid2nodeact[qid].keys()) for qid in qid2nodeact]
)
print("Average of %f nodes activated per question" % num_acts_per_qid)
# Return
return name2node_idx, qid2nodeact, img_class_sz
# Get answer info
def get_answer_info(self, config):
# Get answer info
# Recreates mmf answer_vocab here essentially
answer_vocab = VocabDict(mmf_indirect(config.vocab_file))
assert len(answer_vocab) == config.num_labels
# If we're in okvqa v1.0, need to do this a bit differently
if config.okvqa_v_mode in ["v1.0", "v1.0-121", "v1.0-121-mc"]:
# Load the answer translation file (to go from raw strings to
# stemmed in v1.0 vocab)
tx_data = torch.load(mmf_indirect(config.ans_translation_file))
if config.okvqa_v_mode in ["v1.0-121", "v1.0-121-mc"]:
old_graph_vocab = torch.load(mmf_indirect(config.old_graph_vocab_file))
# Get a list of answer node indices
# Important if we want to index those out to (for instance)
# do node classification on them
index_in_ans = []
index_in_node = []
graph_answers = []
nomatch = []
for ans_str in answer_vocab.word2idx_dict:
# Regular, don't worry about 1-1
if config.okvqa_v_mode == "v1.0":
# Convert it to the most common raw answer and
# see if it's in the graph
if ans_str not in tx_data["v10_2_v11_mc"]:
nomatch.append(ans_str)
continue
# Try most common
if tx_data["v10_2_v11_mc"][ans_str] in self.name2node_idx:
# Get raw answer string
raw_ans = tx_data["v10_2_v11_mc"][ans_str]
else:
# Otherwise try all other options
v11_counts = tx_data["v10_2_v11_count"][ans_str]
sorted_counts = sorted(
v11_counts.items(), key=lambda x: x[1], reverse=True
)
raw_ans = None
for k, _ in sorted_counts:
if k in self.name2node_idx:
raw_ans = k
break
# If still no match, continue
if raw_ans is None:
nomatch.append(ans_str)
continue
# Add ans_str to graph answers
graph_answers.append(ans_str)
# Get the node index
# Use the raw name since that's what matches to nodes
node_idx = self.name2node_idx[raw_ans]
index_in_node.append(node_idx)
# Get the vocab index
ans_idx = answer_vocab.word2idx(ans_str)
index_in_ans.append(ans_idx)
else:
# Convert it to the most common raw answer and see if
# it's in the graph
if ans_str not in tx_data["v10_2_v11_mc"]:
nomatch.append(ans_str)
continue
# Try raw too
if config.okvqa_v_mode == "v1.0-121-mc":
# Try most common
if tx_data["v10_2_raw_mc"][ans_str] in self.name2node_idx:
# Get raw answer string
raw_ans = tx_data["v10_2_raw_mc"][ans_str]
else:
# Otherwise try all other options
v11_counts = tx_data["v10_2_raw_count"][ans_str]
sorted_counts = sorted(
v11_counts.items(), key=lambda x: x[1], reverse=True
)
raw_ans = None
for k, _ in sorted_counts:
if k in self.name2node_idx:
raw_ans = k
break
# If still no match, continue
if raw_ans is None:
nomatch.append(ans_str)
continue
else:
# Try most common
if (
tx_data["v10_2_v11_mc"][ans_str] in self.name2node_idx
and tx_data["v10_2_v11_mc"][ans_str] in old_graph_vocab
):
# Get raw answer string
raw_ans = tx_data["v10_2_v11_mc"][ans_str]
else:
# Otherwise try all other options
v11_counts = tx_data["v10_2_v11_count"][ans_str]
sorted_counts = sorted(
v11_counts.items(), key=lambda x: x[1], reverse=True
)
raw_ans = None
for k, _ in sorted_counts:
if k in self.name2node_idx and k in old_graph_vocab:
raw_ans = k
break
# If still no match, continue
if raw_ans is None:
nomatch.append(ans_str)
continue
# Check 1 to 1
if self.name2node_idx[raw_ans] in index_in_node:
if config.okvqa_v_mode == "v1.0-121-mc":
# Check which is more common
assert len(index_in_node) == len(graph_answers)
assert len(index_in_ans) == len(graph_answers)
idx = index_in_node.index(self.name2node_idx[raw_ans])
node_idx = index_in_node[idx]
old_ans_str = graph_answers[idx]
raw_counts = tx_data["v11_2_raw_count"][raw_ans]
assert ans_str in raw_counts and old_ans_str in raw_counts
assert ans_str != old_ans_str
# If new answer more common, go back and replace everything
if raw_counts[ans_str] > raw_counts[old_ans_str]:
assert node_idx == self.name2node_idx[raw_ans]
graph_answers[idx] = ans_str
ans_idx = answer_vocab.word2idx(ans_str)
index_in_ans[idx] = ans_idx
else:
continue
else:
nomatch.append(ans_str)
continue
else:
# Add ans_str to graph answers
graph_answers.append(ans_str)
# Get the node index
# Use the raw name since that's what matches to nodes
node_idx = self.name2node_idx[raw_ans]
index_in_node.append(node_idx)
# Get the vocab index
ans_idx = answer_vocab.word2idx(ans_str)
index_in_ans.append(ans_idx)
print("%d answers not matches" % len(nomatch))
# Get node indices for alphabetized graph answer too
graph_answers = sorted(graph_answers)
graph_ans_node_idx = []
for ans_str in graph_answers:
# Get node index
node_idx = self.name2node_idx[raw_ans]
graph_ans_node_idx.append(node_idx)
else:
assert config.okvqa_v_mode == "v1.1"
# Get a list of answer node indices
# Important if we want to index those out to (for instance)
# do node classification on them
index_in_ans = []
index_in_node = []
graph_answers = []
for ans_str in answer_vocab.word2idx_dict:
# Check if it's in the graph
if ans_str not in self.name2node_idx:
continue
# Add ans_str to graph answers
graph_answers.append(ans_str)
# Get the node index
node_idx = self.name2node_idx[ans_str]
index_in_node.append(node_idx)
# Get the vocab index
ans_idx = answer_vocab.word2idx(ans_str)
index_in_ans.append(ans_idx)
# Get node indices for alphabetized graph answer too
graph_answers = sorted(graph_answers)
graph_ans_node_idx = []
for ans_str in graph_answers:
# Get node index
node_idx = self.name2node_idx[ans_str]
graph_ans_node_idx.append(node_idx)
# Sanity checks
# Should be same length
assert len(index_in_ans) == len(index_in_node)
# And no repeats
assert len(index_in_ans) == len(set(index_in_ans))
if config.okvqa_v_mode != "v1.0":
assert len(index_in_node) == len(set(index_in_node))
assert len(graph_answers) == len(graph_ans_node_idx)
# Check that the overlap is reasonable
num_ans_in_graph = len(index_in_ans)
print("%d answers in graph" % num_ans_in_graph)
# Convert to tensors now
index_in_ans = torch.LongTensor(index_in_ans)
index_in_node = torch.LongTensor(index_in_node)
graph_ans_node_idx = torch.LongTensor(graph_ans_node_idx)
return index_in_ans, index_in_node, graph_answers, graph_ans_node_idx
# Forward function
# Converts from sample_list to the exact structure needed by the graph network
# Assume right now that it's just passed in exactly how
# I need it and I'll figure it out layer
def forward(self, sample_list):
# Get the batch size, qids, and device
qids = sample_list["id"]
batch_size = qids.size(0)
device = qids.device
# First, if this is first forward pass or batch size changed,
# we need to allocate everything
if (
self.node_features_forward is None
or batch_size * self.num_nodes != self.node_features_forward.size(0)
):
# Allocate the data
self.node_features_forward = torch.zeros(
self.num_nodes * batch_size, self.in_node_dim
).to(device)
_, num_edges = self.edge_index.size()
self.edge_index_forward = (
torch.LongTensor(2, num_edges * batch_size).fill_(0).to(device)
)
if self.gn.gcn_type == "RGCN":
self.edge_type_forward = (
torch.LongTensor(num_edges * batch_size).fill_(0).to(device)
)
# Get initial values for data
for batch_ind in range(batch_size):
# Copy base_node_features without modification
self.node_features_forward[
self.num_nodes * batch_ind : self.num_nodes * (batch_ind + 1), :
].copy_(self.base_node_features)
# Copy edge_index, but we add self.num_nodes*batch_ind to every value
# This is equivalent to batch_size independent subgraphs
self.edge_index_forward[
:, batch_ind * num_edges : (batch_ind + 1) * num_edges
].copy_(self.edge_index)
self.edge_index_forward[
:, batch_ind * num_edges : (batch_ind + 1) * num_edges
].add_(batch_ind * self.num_nodes)
# And copy edge_types without modification
if self.gn.gcn_type == "RGCN":
self.edge_type_forward[
batch_ind * num_edges : (batch_ind + 1) * num_edges
].copy_(self.edge_type)
# Zero fill the confidences for node features
assert (
self.w2v_offset is not None
and self.q_offset is not None
and self.img_offset is not None
)
assert self.w2v_offset > 0
self.node_features_forward[:, : self.w2v_offset].zero_()
# If in not using confs mode, just leave these values at zero
if not self.config.use_conf:
pass
elif not self.config.use_q:
assert self.config.use_img
# Fill in the new confidences for this batch based on qid
all_node_idx = []
for batch_ind, qid in enumerate(qids):
# Fill in the activated nodes into node_features
# These always start at zero
node_info = self.qid2nodeact[qid.item()]
for node_idx in node_info:
node_val = node_info[node_idx]
# Zero-out q
node_val[0] = 0
self.node_features_forward[
self.num_nodes * batch_ind + node_idx,
: self.img_offset + self.img_class_sz,
].copy_(node_val)
all_node_idx.append(node_idx)
elif not self.config.use_img:
# Fill in the new confidences for this batch based on qid
all_node_idx = []
for batch_ind, qid in enumerate(qids):
# Fill in the activated nodes into node_features
# These always start at zero
node_info = self.qid2nodeact[qid.item()]
for node_idx in node_info:
node_val = node_info[node_idx]
# Zero-out img
node_val[1] = 0
node_val[2] = 0
node_val[3] = 0
node_val[4] = 0
self.node_features_forward[
self.num_nodes * batch_ind + node_idx,
: self.img_offset + self.img_class_sz,
].copy_(node_val)
all_node_idx.append(node_idx)
elif self.config.use_partial_img:
# Get the index of image we're keeping
# For all confs except partial_img_idx, fill in 0's
assert self.config.partial_img_idx in [0, 1, 2, 3]
# Fill in the new confidences for this batch based on qid
all_node_idx = []
for batch_ind, qid in enumerate(qids):
# Fill in the activated nodes into node_features
# These always start at zero
node_info = self.qid2nodeact[qid.item()]
for node_idx in node_info:
node_val = node_info[node_idx]
# Zero-out img (except for one)
db_count = 0
if self.config.partial_img_idx != 0:
node_val[1] = 0
db_count += 1
if self.config.partial_img_idx != 1:
node_val[2] = 0
db_count += 1
if self.config.partial_img_idx != 2:
node_val[3] = 0
db_count += 1
if self.config.partial_img_idx != 3:
node_val[4] = 0
db_count += 1
assert db_count == 3
self.node_features_forward[
self.num_nodes * batch_ind + node_idx,
: self.img_offset + self.img_class_sz,
].copy_(node_val)
all_node_idx.append(node_idx)
else:
# Fill in the new confidences for this batch based on qid
all_node_idx = []
for batch_ind, qid in enumerate(qids):
# Fill in the activated nodes into node_features
# These always start at zero
node_info = self.qid2nodeact[qid.item()]
for node_idx in node_info:
node_val = node_info[node_idx]
self.node_features_forward[
self.num_nodes * batch_ind + node_idx,
: self.img_offset + self.img_class_sz,
].copy_(node_val)
all_node_idx.append(node_idx)
# If necessary, pass in "output nodes" depending on output calculation
# This for instance tells the gn which nodes to subsample
if self.gn.output_type == "graph_level_ansonly":
output_nodes = self.index_in_node # These are node indices that are answers
elif self.gn.output_type == "graph_level_inputonly":
output_nodes = torch.LongTensor(
all_node_idx
) # These are all non-zero nodes for the question
else:
output_nodes = None
# If we're feeding in special node, need a different forward pass into self.gn
if (
"feed_special_node" in self.config_extra
and self.config_extra["feed_special_node"]
):
# Get special_node_input
# Add vb conf (just the conf)
if (
"feed_vb_to_graph" in self.config_extra
and self.config_extra["feed_vb_to_graph"]
and self.config_extra["feed_mode"] == "feed_vb_logit_to_graph"
):
# Go through answer vocab and copy conf into it
if self.noback_vb:
vb_logits = sample_list["vb_logits"].detach()
else:
vb_logits = sample_list["vb_logits"]
special_node_input = torch.sigmoid(vb_logits)
# Add vb feats
if (
"feed_vb_to_graph" in self.config_extra
and self.config_extra["feed_vb_to_graph"]
and self.config_extra["feed_mode"] == "feed_vb_hid_to_graph"
):
if self.noback_vb:
special_node_input = sample_list["vb_hidden"].detach()
else:
special_node_input = sample_list["vb_hidden"]
# Add q enc feats
if (
"feed_q_to_graph" in self.config_extra
and self.config_extra["feed_q_to_graph"]
):
special_node_input = sample_list["q_encoded"]
# Do actual graph forward pass
if self.gn.gcn_type == "RGCN":
output, spec_out = self.gn(
self.node_features_forward,
self.edge_index_forward,
self.edge_type_forward,
batch_size=batch_size,
output_nodes=output_nodes,
special_node_input=special_node_input,
)
elif self.gn.gcn_type in ["GCN", "SAGE"]:
output, spec_out = self.gn(
self.node_features_forward,
self.edge_index_forward,
batch_size=batch_size,
output_nodes=output_nodes,
special_node_input=special_node_input,
)
# Otherwise, proceed normally
else:
# Build node_forward
# Concat other stuff onto it
node_feats_tmp = self.node_features_forward
# Add other input types
# Add vb conf (just the conf)
if (
"feed_vb_to_graph" in self.config_extra
and self.config_extra["feed_vb_to_graph"]
and self.config_extra["feed_mode"] == "feed_vb_logit_to_graph"
):
assert not self.config_extra["compress_crossmodel"]
# Go through answer vocab and copy conf into it
node_feats_tmp = node_feats_tmp.reshape(
(batch_size, self.num_nodes, -1)
)
if self.noback_vb:
vb_logits = sample_list["vb_logits"].detach()
else:
vb_logits = sample_list["vb_logits"]
vb_confs = torch.sigmoid(vb_logits)
vb_confs_graphindexed = torch.zeros(batch_size, self.num_nodes).to(
device
)
vb_confs_graphindexed[:, self.index_in_node] = vb_confs[
:, self.index_in_ans
]
node_feats_tmp = torch.cat(
[node_feats_tmp, vb_confs_graphindexed.unsqueeze(2)], dim=2
)
node_feats_tmp = node_feats_tmp.reshape(
(batch_size * self.num_nodes, -1)
)
# Add vb feats
if (
"feed_vb_to_graph" in self.config_extra
and self.config_extra["feed_vb_to_graph"]
and self.config_extra["feed_mode"] == "feed_vb_hid_to_graph"
):
node_feats_tmp = node_feats_tmp.reshape(
(batch_size, self.num_nodes, -1)
)
# Optionally compress vb_hidden
if self.noback_vb:
vb_hid = sample_list["vb_hidden"].detach()
else:
vb_hid = sample_list["vb_hidden"]
if self.config_extra["compress_crossmodel"]:
vb_hid = F.relu(self.compress_linear(vb_hid))
node_feats_tmp = torch.cat(
[
node_feats_tmp,
vb_hid.unsqueeze(1).repeat((1, self.num_nodes, 1)),
],
dim=2,
)
node_feats_tmp = node_feats_tmp.reshape(
(batch_size * self.num_nodes, -1)
)
# Add q enc feats
if (
"feed_q_to_graph" in self.config_extra
and self.config_extra["feed_q_to_graph"]
):
assert not self.config_extra["compress_crossmodel"]
node_feats_tmp = node_feats_tmp.reshape(
(batch_size, self.num_nodes, -1)
)
node_feats_tmp = torch.cat(
[
node_feats_tmp,
sample_list["q_encoded"]
.unsqueeze(1)
.repeat((1, self.num_nodes, 1)),
],
dim=2,
)
node_feats_tmp = node_feats_tmp.reshape(
(batch_size * self.num_nodes, -1)
)
# Do actual graph forward pass
if self.gn.gcn_type == "RGCN":
output, spec_out = self.gn(
node_feats_tmp,
self.edge_index_forward,
self.edge_type_forward,
batch_size=batch_size,
output_nodes=output_nodes,
)
elif self.gn.gcn_type in ["GCN", "SAGE"]:
output, spec_out = self.gn(
node_feats_tmp,
self.edge_index_forward,
batch_size=batch_size,
output_nodes=output_nodes,
)
# Do any reindexing we need
if self.config.output_type == "hidden_ans":
# Outputs graph hidden features, but re-indexes them to anser vocab
# Same as graph_prediction, but before final prediction
assert output.size(1) == self.num_nodes
assert output.size(2) == self.config.node_hid_dim
assert output.dim() == 3
# If in graph_analysis mode, save the hidden states here
if self.config_extra["analysis_mode"]:
self.graph_hidden_debug = output
# Reindex to match with self.graph_vocab
if self.config.output_order == "alpha":
output = output[:, self.graph_ans_node_idx, :]
assert output.size(1) == len(self.graph_answers)
else:
assert self.config.output_order == "ans"
# Re-index into answer_vocab
outputs_tmp = torch.zeros(
batch_size, self.config.num_labels, self.config.node_hid_dim
).to(device)
outputs_tmp[:, self.index_in_ans, :] = output[:, self.index_in_node, :]
output = outputs_tmp
elif self.config.output_type in [
"graph_level",
"graph_level_ansonly",
"graph_level_inputonly",
]:
pass
# Do nothing here, fc will happen layer
else:
assert self.config.output_type == "graph_prediction"
# Output is size of graph
assert output.size(1) == self.num_nodes
assert output.dim() == 2
# Re-index
if self.config.output_order == "alpha":
output = output[:, self.graph_ans_node_idx]
assert output.size(1) == len(self.graph_answers)
else:
assert self.config.output_order == "ans"
# Re-index into answer_vocab
logits = (
torch.zeros(batch_size, self.config.num_labels)
.fill_(-1e3)
.to(device)
)
logits[:, self.index_in_ans] = output[:, self.index_in_node]
output = logits
# If we generated a spec_out in graph network, put in sample
# list for other modules to use
if spec_out is not None:
sample_list["graph_special_node_out"] = spec_out
return output
# Add stuff to output for various analysis
def add_analysis_to_output(self, output):
# Add graphicx graph so we can see what nodes were activated / see subgraphs
output["graph"] = self.graph
output["graph_idx"] = self.graph_idx
# Add structs so we can easily convert between vocabs
output["name2node_idx"] = self.name2node_idx
output["node_acts"] = self.qid2nodeact
output["index_in_ans"] = self.index_in_ans
output["index_in_node"] = self.index_in_node
output["graph_answers"] = self.graph_answers
output["graph_ans_node_idx"] = self.graph_ans_node_idx
output["graph_hidden_act"] = self.graph_hidden_debug.cpu()
# Return output with new keys
return output
# Graph network network
class GraphNetwork(nn.Module):
def __init__(
self,
config,
in_node_dim,
num_relations,
num_nodes,
special_input_node=False,
special_input_sz=None,
):
super().__init__()
# Get/set parameters
self.num_relations = num_relations
self.num_nodes = num_nodes
# Passed in from GraphNetworkModule which constructs the input features
self.in_node_dim = in_node_dim
self.node_hid_dim = config.node_hid_dim
self.num_gcn_conv = config.num_gcn_conv
self.use_bn = config.use_batch_norm
self.use_drop = config.use_dropout
self.output_type = config.output_type
self.gcn_type = config.gcn_type
if self.use_drop:
self.drop_p = config.dropout_p
if "output_dim" in config:
self.output_dim = config.output_dim
else:
self.output_dim = self.node_hid_dim
self.special_input_node = special_input_node
self.special_input_sz = special_input_sz
self.output_special_node = config.output_special_node
# Make GCN and batchnorm layers
if self.num_gcn_conv >= 1:
# Try to add CompGCN at some point
if self.gcn_type == "RGCN":
self.conv1 = RGCNConv(
self.in_node_dim,
self.node_hid_dim,
self.num_relations,
num_bases=None,
)
elif self.gcn_type == "GCN":
self.conv1 = GCNConv(self.in_node_dim, self.node_hid_dim)
elif self.gcn_type == "SAGE":
self.conv1 = SAGEConv(self.in_node_dim, self.node_hid_dim)
else:
raise Exception("GCN type %s not implemented" % self.gcn_type)
if self.num_gcn_conv >= 2:
if self.use_bn:
self.bn1 = BatchNorm(self.node_hid_dim)
if self.gcn_type == "RGCN":
self.conv2 = RGCNConv(
self.node_hid_dim,
self.node_hid_dim,
self.num_relations,
num_bases=None,
)
elif self.gcn_type == "GCN":
self.conv2 = GCNConv(self.node_hid_dim, self.node_hid_dim)
elif self.gcn_type == "SAGE":
self.conv2 = SAGEConv(self.node_hid_dim, self.node_hid_dim)
else:
raise Exception("GCN type %s not implemented" % self.gcn_type)
if self.num_gcn_conv >= 3:
if self.use_bn:
self.bn2 = BatchNorm(self.node_hid_dim)
if self.gcn_type == "RGCN":
self.conv3 = RGCNConv(
self.node_hid_dim,
self.node_hid_dim,
self.num_relations,
num_bases=None,
)
elif self.gcn_type == "GCN":
self.conv3 = GCNConv(self.node_hid_dim, self.node_hid_dim)
elif self.gcn_type == "SAGE":
self.conv3 = SAGEConv(self.node_hid_dim, self.node_hid_dim)
else:
raise Exception("GCN type %s not implemented" % self.gcn_type)
if self.num_gcn_conv >= 4:
if self.use_bn:
self.bn3 = BatchNorm(self.node_hid_dim)
if self.gcn_type == "RGCN":
self.conv4 = RGCNConv(
self.node_hid_dim,
self.node_hid_dim,
self.num_relations,
num_bases=None,
)
elif self.gcn_type == "GCN":
self.conv4 = GCNConv(self.node_hid_dim, self.node_hid_dim)
elif self.gcn_type == "SAGE":
self.conv4 = SAGEConv(self.node_hid_dim, self.node_hid_dim)
else:
raise Exception("GCN type %s not implemented" % self.gcn_type)
if self.num_gcn_conv >= 5:
if self.use_bn:
self.bn4 = BatchNorm(self.node_hid_dim)
if self.gcn_type == "RGCN":
self.conv5 = RGCNConv(
self.node_hid_dim,
self.node_hid_dim,
self.num_relations,
num_bases=None,
)
elif self.gcn_type == "GCN":
self.conv5 = GCNConv(self.node_hid_dim, self.node_hid_dim)
elif self.gcn_type == "SAGE":
self.conv5 = SAGEConv(self.node_hid_dim, self.node_hid_dim)
else:
raise Exception("GCN type %s not implemented" % self.gcn_type)
if self.num_gcn_conv >= 6:
if self.use_bn:
self.bn5 = BatchNorm(self.node_hid_dim)
if self.gcn_type == "RGCN":
self.conv6 = RGCNConv(
self.node_hid_dim,
self.node_hid_dim,
self.num_relations,
num_bases=None,
)
elif self.gcn_type == "GCN":
self.conv6 = GCNConv(self.node_hid_dim, self.node_hid_dim)
elif self.gcn_type == "SAGE":
self.conv6 = SAGEConv(self.node_hid_dim, self.node_hid_dim)
else:
raise Exception("GCN type %s not implemented" % self.gcn_type)
if self.num_gcn_conv >= 7:
raise Exception("Did not implement %d gcn layers yet" % self.num_gcn_conv)
# Add special node for input/output collection
if self.output_special_node or self.special_input_node:
# For special in (not mutally exclusive to special out)
# Make an linear encoder to fit into node hid size
if self.special_input_node:
self.spec_input_fc = nn.Linear(self.special_input_sz, self.node_hid_dim)
# Make graph conv (and transfer layers)
# Add one to num_rels since we have a special relation for this
if self.use_bn:
self.bn_spec = BatchNorm(self.node_hid_dim)
if self.gcn_type == "RGCN":
self.conv_spec = RGCNConv(
self.node_hid_dim,
self.node_hid_dim,
self.num_relations + 1,
num_bases=None,
)
elif self.gcn_type == "GCN":
self.conv_spec = GCNConv(self.node_hid_dim, self.node_hid_dim)
elif self.gcn_type == "SAGE":
self.conv_spec = SAGEConv(self.node_hid_dim, self.node_hid_dim)
else:
raise Exception("GCN type %s not implemented" % self.gcn_type)
# On first pass, populate this and convert to cuda
# Connects all node indices to the special node via a "special" edge type
self.edge_index_special = None
self.edge_type_special = None
self.special_bs = None
# Set output network
if self.output_type in ["hidden", "hidden_subindex", "hidden_ans"]:
# Don't really need anything here, either passing all of G,
# or G for particular indices
pass
elif self.output_type in [
"graph_level",
"graph_level_ansonly",
"graph_level_inputonly",
]:
# Will first predict a logit for each node, then do
# softmax addition of all graph features
self.logit_pred = nn.Linear(
self.node_hid_dim, 1
) # Predicts hid_dim -> 1, which then gets passed into softmax
self.feat_layer = nn.Linear(self.node_hid_dim, self.output_dim)
elif self.output_type in ["graph_prediction"]:
# Need just a final logits prediction
self.logit_pred = nn.Linear(self.node_hid_dim, 1)
else:
raise Exception(
"Output type %s is not implemented right now" % self.output_type
)
def forward(
self,
x,
edge_index,
edge_type=None,
batch_size=1,
output_nodes=None,
special_node_input=None,
):
# x is the input node features num_nodesxin_feat
# edge_index is a 2xnum_edges matrix of which nodes each edge connects
# edge_type is a num_edges of what the edge type is for each of those types
if self.num_nodes is not None:
assert x.size(0) == self.num_nodes * batch_size
# Set optional spec_out to None
spec_out = None
# Check type and inputs match
if self.gcn_type == "RGCN":
assert edge_type is not None
elif self.gcn_type in ["GCN", "SAGE"]:
assert edge_type is None
else:
raise Exception("GCN type %s not implemented" % self.gcn_type)
# First GCN conv
if edge_type is not None:
x = self.conv1(x, edge_index, edge_type)
else:
x = self.conv1(x, edge_index)
if self.num_gcn_conv > 1:
# Transfer layers + bn/drop
if self.use_bn:
x = self.bn1(x)
x = F.relu(x)
if self.use_drop:
x = F.dropout(x, p=self.drop_p, training=self.training)
# Second layer
if edge_type is not None:
x = self.conv2(x, edge_index, edge_type)
else:
x = self.conv2(x, edge_index)
if self.num_gcn_conv > 2:
# Transfer layers + bn/drop
if self.use_bn:
x = self.bn2(x)
x = F.relu(x)
if self.use_drop:
x = F.dropout(x, p=self.drop_p, training=self.training)
# Third layer
if edge_type is not None:
x = self.conv3(x, edge_index, edge_type)
else:
x = self.conv3(x, edge_index)
if self.num_gcn_conv > 3:
# Transfer layers + bn/drop
if self.use_bn:
x = self.bn3(x)
x = F.relu(x)
if self.use_drop:
x = F.dropout(x, p=self.drop_p, training=self.training)
# Third layer
if edge_type is not None:
x = self.conv4(x, edge_index, edge_type)
else:
x = self.conv4(x, edge_index)
if self.num_gcn_conv > 4:
# Transfer layers + bn/drop
if self.use_bn:
x = self.bn4(x)
x = F.relu(x)
if self.use_drop:
x = F.dropout(x, p=self.drop_p, training=self.training)
# Third layer
if edge_type is not None:
x = self.conv5(x, edge_index, edge_type)
else:
x = self.conv5(x, edge_index)
if self.num_gcn_conv > 5:
# Transfer layers + bn/drop
if self.use_bn:
x = self.bn5(x)
x = F.relu(x)
if self.use_drop:
x = F.dropout(x, p=self.drop_p, training=self.training)
# Third layer
if edge_type is not None:
x = self.conv6(x, edge_index, edge_type)
else:
x = self.conv6(x, edge_index)
assert self.num_gcn_conv <= 6
# Add special conv layer for special node input/output
if self.output_special_node or self.special_input_node:
# Encode special input
if self.special_input_node:
assert special_node_input is not None
special_node_input = self.spec_input_fc(special_node_input)
# Or zero-pad it
else:
special_node_input = torch.zeros(batch_size, self.node_hid_dim).to(
x.device
)
# Create special edge_index, edge_type matrices
if self.edge_index_special is None or self.special_bs != batch_size:
# Set special_bs
# This makes sure the prebuild edge_index/type has right batch size
self.special_bs = batch_size
# Figure out the special node edges
# Do bidirectional just to be safe
spec_edges = []
for batch_ind in range(batch_size):
spec_node_idx = self.num_nodes * batch_size + batch_ind
spec_edges += [
[node_idx, spec_node_idx]
for node_idx in range(
self.num_nodes * batch_ind, self.num_nodes * (batch_ind + 1)
)
]
spec_edges += [
[spec_node_idx, node_idx]
for node_idx in range(
self.num_nodes * batch_ind, self.num_nodes * (batch_ind + 1)
)
]
assert len(spec_edges) == self.num_nodes * batch_size * 2
self.edge_index_special = (
torch.LongTensor(spec_edges).transpose(0, 1).to(x.device)
)
# Make edge type (if necessary)
if self.gcn_type == "RGCN":
self.edge_type_special = (
torch.LongTensor(len(spec_edges))
.fill_(self.num_relations)
.to(x.device)
) # edge type is special n+1 edge type
# Forward through final special conv
# Transfer layers + bn/drop
if self.use_bn:
x = self.bn_spec(x)
x = F.relu(x)
if self.use_drop:
x = F.dropout(x, p=self.drop_p, training=self.training)
# Special conv layer
edge_index_tmp = torch.cat([edge_index, self.edge_index_special], dim=1)
x = torch.cat([x, special_node_input], dim=0)
if edge_type is not None:
edge_type_tmp = torch.cat([edge_type, self.edge_type_special], dim=0)
x = self.conv_spec(x, edge_index_tmp, edge_type_tmp)
else:
x = self.conv_spec(x, edge_index_tmp)
# Output
if self.num_nodes is not None:
assert x.size(0) == self.num_nodes * batch_size + batch_size
# If it's output special, get the output as those special
# node hidden states
if self.output_special_node:
# Should be just the last (batch_size) nodes
spec_out = x[self.num_nodes * batch_size :]
assert spec_out.size(0) == batch_size
# Otherwise, we want to remove the last batch_size nodes
# (since we don't use them)
x = x[: self.num_nodes * batch_size]
assert x.size(0) == self.num_nodes * batch_size
# Reshape output to batch size now
# For dynamic graph, we don't do the reshape. It's the class
# above's job to reshape this properly
if self.num_nodes is not None:
x = x.reshape(batch_size, self.num_nodes, self.node_hid_dim)
# Prepare final output
if self.output_type in ["hidden", "hidden_ans", "hidden_subindex"]:
# Don't really need anything here, either passing all of G
# Subindexing happens a level up
pass
elif self.output_type in [
"graph_level",
"graph_level_ansonly",
"graph_level_inputonly",
]:
# Relu
x = F.relu(x)
# Check shape of x is num_nodes x hid_size
assert x.shape[2] == self.node_hid_dim
# For ansonly or inputonly, use input output_nodes (LongTensor) to reindex x
if self.output_type in ["graph_level_ansonly", "graph_level_inputonly"]:
assert output_nodes is not None
x = x[:, output_nodes, :]
bs, num_node, _ = x.shape
x = x.reshape(bs * num_node, self.node_hid_dim)
# Get feat
feat = self.feat_layer(x)
feat = feat.reshape(bs, num_node, self.output_dim)
# Forward through linear to 1
logit = self.logit_pred(x)
logit = logit.reshape(bs, num_node)
logit = F.softmax(logit)
# Get weighted sum of x!
x = torch.bmm(logit.unsqueeze(1), feat).squeeze()
elif self.output_type in ["graph_prediction"]:
# Need just a final logits prediction
x = F.relu(x)
x = self.logit_pred(x)
x = x.squeeze() # Remove final dim
else:
raise Exception("output type not known %s" % self.output_type)
# Return output
return x, spec_out
| EXA-1-master | exa/models/mmf-main/projects/krisp/graphnetwork_module.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# install `vqa-maskrcnn-benchmark` from
# https://github.com/ronghanghu/vqa-maskrcnn-benchmark-m4c
import argparse
import os
import sys
import cv2
import numpy as np
import torch
import tqdm
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.layers import nms
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.structures.image_list import to_image_list
from maskrcnn_benchmark.utils.model_serialization import load_state_dict
from PIL import Image
sys.path.append("/private/home/ronghanghu/workspace/vqa-maskrcnn-benchmark") # NoQA
def load_detection_model(yaml_file, yaml_ckpt):
cfg.merge_from_file(yaml_file)
cfg.freeze()
model = build_detection_model(cfg)
checkpoint = torch.load(yaml_ckpt, map_location=torch.device("cpu"))
load_state_dict(model, checkpoint.pop("model"))
model.to("cuda")
model.eval()
return model
def _image_transform(image_path):
img = Image.open(image_path)
im = np.array(img).astype(np.float32)
# handle a few corner cases
if im.ndim == 2: # gray => RGB
im = np.tile(im[:, :, None], (1, 1, 3))
if im.shape[2] > 3: # RGBA => RGB
im = im[:, :, :3]
im = im[:, :, ::-1] # RGB => BGR
im -= np.array([102.9801, 115.9465, 122.7717])
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(800) / float(im_size_min)
# Prevent the biggest axis from being more than max_size
if np.round(im_scale * im_size_max) > 1333:
im_scale = float(1333) / float(im_size_max)
im = cv2.resize(
im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR
)
img = torch.from_numpy(im).permute(2, 0, 1)
return img, im_scale
def _process_feature_extraction(output, im_scales, feat_name="fc6"):
batch_size = len(output[0]["proposals"])
n_boxes_per_image = [len(_) for _ in output[0]["proposals"]]
score_list = output[0]["scores"].split(n_boxes_per_image)
score_list = [torch.nn.functional.softmax(x, -1) for x in score_list]
feats = output[0][feat_name].split(n_boxes_per_image)
cur_device = score_list[0].device
feat_list = []
bbox_list = []
for i in range(batch_size):
dets = output[0]["proposals"][i].bbox / im_scales[i]
scores = score_list[i]
max_conf = torch.zeros(scores.shape[0]).to(cur_device)
for cls_ind in range(1, scores.shape[1]):
cls_scores = scores[:, cls_ind]
keep = nms(dets, cls_scores, 0.5)
max_conf[keep] = torch.where(
cls_scores[keep] > max_conf[keep], cls_scores[keep], max_conf[keep]
)
keep_boxes = torch.argsort(max_conf, descending=True)[:100]
feat_list.append(feats[i][keep_boxes])
bbox_list.append(output[0]["proposals"][i].bbox[keep_boxes])
return feat_list, bbox_list
def extract_features(detection_model, image_path, input_boxes=None, feat_name="fc6"):
im, im_scale = _image_transform(image_path)
if input_boxes is not None:
if isinstance(input_boxes, np.ndarray):
input_boxes = torch.from_numpy(input_boxes.copy())
input_boxes *= im_scale
img_tensor, im_scales = [im], [im_scale]
current_img_list = to_image_list(img_tensor, size_divisible=32)
current_img_list = current_img_list.to("cuda")
with torch.no_grad():
output = detection_model(current_img_list, input_boxes=input_boxes)
if input_boxes is None:
feat_list, bbox_list = _process_feature_extraction(output, im_scales, feat_name)
feat = feat_list[0].cpu().numpy()
bbox = bbox_list[0].cpu().numpy() / im_scale
else:
feat = output[0][feat_name].cpu().numpy()
bbox = output[0]["proposals"][0].bbox.cpu().numpy() / im_scale
return feat, bbox
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--detection_cfg",
type=str,
default="/private/home/ronghanghu/workspace/pythia/data/"
+ "frcn_feature_extraction/detectron_model.yaml",
help="Detectron config file; download it from "
+ "https://dl.fbaipublicfiles.com/pythia/detectron_model/"
+ "detectron_model.yaml",
)
parser.add_argument(
"--detection_model",
type=str,
default="/private/home/ronghanghu/workspace/pythia/data/"
+ "frcn_feature_extraction/detectron_model.pth",
help="Detectron model file; download it"
+ " from https://dl.fbaipublicfiles.com/pythia/detectron_model/"
+ "detectron_model.pth",
)
parser.add_argument(
"--imdb_file",
type=str,
default="/private/home/ronghanghu/workspace/pythia/data/"
+ "imdb/m4c_textvqa/imdb_train_ocr_en.npy",
help="The imdb to extract features",
)
parser.add_argument(
"--image_dir",
type=str,
default="/private/home/ronghanghu/workspace/DATASETS/TextVQA",
help="The directory containing images",
)
parser.add_argument(
"--save_dir",
type=str,
default="/private/home/ronghanghu/workspace/pythia/data/"
+ "m4c_textvqa_ocr_en_frcn_features_2/train_images",
help="The directory to save extracted features",
)
args = parser.parse_args()
DETECTION_YAML = args.detection_cfg
DETECTION_CKPT = args.detection_model
IMDB_FILE = args.imdb_file
IMAGE_DIR = args.image_dir
SAVE_DIR = args.save_dir
imdb = np.load(IMDB_FILE, allow_pickle=True)[1:]
# keep only one entry per image_id
image_id2info = {info["image_id"]: info for info in imdb}
imdb = list(image_id2info[k] for k in sorted(image_id2info))
detection_model = load_detection_model(DETECTION_YAML, DETECTION_CKPT)
print("Faster R-CNN OCR features")
print("\textracting from", IMDB_FILE)
print("\tsaving to", SAVE_DIR)
for _, info in enumerate(tqdm.tqdm(imdb)):
image_path = os.path.join(IMAGE_DIR, info["image_path"])
save_feat_path = os.path.join(SAVE_DIR, info["feature_path"])
save_info_path = save_feat_path.replace(".npy", "_info.npy")
os.makedirs(os.path.dirname(save_feat_path), exist_ok=True)
w = info["image_width"]
h = info["image_height"]
ocr_normalized_boxes = np.array(info["ocr_normalized_boxes"])
ocr_boxes = ocr_normalized_boxes.reshape(-1, 4) * [w, h, w, h]
ocr_tokens = info["ocr_tokens"]
if len(ocr_boxes) > 0:
extracted_feat, _ = extract_features(
detection_model, image_path, input_boxes=ocr_boxes
)
else:
extracted_feat = np.zeros((0, 2048), np.float32)
np.save(save_info_path, {"ocr_boxes": ocr_boxes, "ocr_tokens": ocr_tokens})
np.save(save_feat_path, extracted_feat)
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/mmf-main/projects/m4c/scripts/extract_ocr_frcn_feature.py |
# Copyright (c) Facebook, Inc. and its affiliates.
| EXA-1-master | exa/models/mmf-main/projects/m4c/scripts/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import contextlib
import itertools
import json
import os
import platform
import random
import socket
import tempfile
import unittest
from dataclasses import dataclass
from typing import Callable, Dict, List, Optional
import torch
from mmf.common.registry import registry
from mmf.common.sample import Sample, SampleList
from mmf.models.base_model import BaseModel
from mmf.utils.general import get_current_device
from omegaconf import OmegaConf
from torch import Tensor
def compare_tensors(a, b):
return torch.equal(a, b)
def dummy_args(model="cnn_lstm", dataset="clevr", config=None):
if config is None:
config = os.path.join("configs", "defaults.yaml")
args = argparse.Namespace()
args.opts = [
f"model={model}",
f"dataset={dataset}",
f"datasets={dataset}",
f"config={config}",
]
args.config_override = None
return args
def is_network_reachable():
try:
# check if host name can be resolved
host = socket.gethostbyname("one.one.one.one")
# check if host is actually reachable
s = socket.create_connection((host, 80), 2)
s.close()
return True
except OSError as e:
if e.errno == 101:
pass
return False
NETWORK_AVAILABLE = is_network_reachable()
CUDA_AVAILBLE = torch.cuda.is_available()
def is_fb():
return (
os.getenv("SANDCASTLE") == "1"
or os.getenv("TW_JOB_USER") == "sandcastle"
or (
socket.gethostname().startswith("dev")
and not socket.gethostname().startswith("devfair")
)
or "fbinfra" in socket.gethostname()
)
def skip_if_no_network(testfn, reason="Network is not available"):
return unittest.skipUnless(NETWORK_AVAILABLE, reason)(testfn)
def skip_if_no_cuda(testfn, reason="Cuda is not available"):
return unittest.skipUnless(CUDA_AVAILBLE, reason)(testfn)
def skip_if_windows(testfn, reason="Doesn't run on Windows"):
return unittest.skipIf("Windows" in platform.system(), reason)(testfn)
def skip_if_macos(testfn, reason="Doesn't run on MacOS"):
return unittest.skipIf("Darwin" in platform.system(), reason)(testfn)
def skip_if_non_fb(testfn, reason="Doesn't run on non FB infra"):
return unittest.skipUnless(is_fb(), reason)(testfn)
def skip_if_old_transformers(min_version="4.5.0"):
def wrap(testfn, reason="Requires newer version of transformers"):
from packaging import version
try:
from transformers3 import __version__ as transformers_version
except ImportError:
from transformers import __version__ as transformers_version
return unittest.skipUnless(
version.parse(transformers_version) >= version.parse(min_version), reason
)(testfn)
return wrap
def skip_if_no_pytorchvideo(testfn, reason="Requires pytorchvideo"):
import importlib
pytorchvideo_spec = importlib.util.find_spec("pytorchvideo")
return unittest.skipIf(pytorchvideo_spec is None, reason)(testfn)
def compare_state_dicts(a, b):
same = True
same = same and (list(a.keys()) == list(b.keys()))
if not same:
return same
for val1, val2 in zip(a.values(), b.values()):
if isinstance(val1, torch.Tensor):
same = same and compare_tensors(val1, val2)
elif not isinstance(val2, torch.Tensor):
same = same and val1 == val2
else:
same = False
if not same:
return same
return same
@contextlib.contextmanager
def make_temp_dir():
temp_dir = tempfile.TemporaryDirectory()
try:
yield temp_dir.name
finally:
# Don't clean up on Windows, as it always results in an error
if "Windows" not in platform.system():
temp_dir.cleanup()
def build_random_sample_list():
first = Sample()
first.x = random.randint(0, 100)
first.y = torch.rand((5, 4))
first.z = Sample()
first.z.x = random.randint(0, 100)
first.z.y = torch.rand((6, 4))
second = Sample()
second.x = random.randint(0, 100)
second.y = torch.rand((5, 4))
second.z = Sample()
second.z.x = random.randint(0, 100)
second.z.y = torch.rand((6, 4))
return SampleList([first, second])
DATA_ITEM_KEY = "test"
class NumbersDataset(torch.utils.data.Dataset):
def __init__(
self,
num_examples: int,
data_item_key: str = DATA_ITEM_KEY,
always_one: bool = False,
dataset_type: str = "train",
):
self.num_examples = num_examples
self.data_item_key = data_item_key
self.always_one = always_one
self.dataset_name = "numbers"
self.dataset_type = dataset_type
def __getitem__(self, idx: int) -> Sample:
sample = Sample()
sample[self.data_item_key] = torch.tensor(idx, dtype=torch.float32).unsqueeze(
-1
)
if self.always_one:
sample.targets = torch.tensor(0, dtype=torch.long)
return sample
def __len__(self) -> int:
return self.num_examples
@registry.register_model("simple_model")
class SimpleModel(BaseModel):
@dataclass
class Config(BaseModel.Config):
in_dim: int = 1
out_dim: int = 1
data_item_key: str = DATA_ITEM_KEY
def __init__(self, config: Config, *args, **kwargs):
config = OmegaConf.merge(OmegaConf.structured(self.Config), config)
super().__init__(config)
self.data_item_key = config.data_item_key
def build(self):
self.classifier = torch.nn.Linear(self.config.in_dim, self.config.out_dim)
def forward(self, prepared_batch: Dict[str, Tensor]):
input_sample = SampleList(prepared_batch)
batch = prepared_batch[self.data_item_key]
output = self.classifier(batch)
loss = torch.nn.MSELoss()(-1 * output, batch)
return {
"losses": {"loss": loss},
"logits": output,
"scores": output,
"input_batch": input_sample,
"dataset_type": input_sample["dataset_type"],
"dataset_name": input_sample["dataset_name"],
}
class SimpleNaNLossModel(SimpleModel):
def forward(self, prepared_batch: Dict[str, Tensor]):
report = super().forward(prepared_batch)
report["losses"]["loss"] /= 0.0 # create an NaN loss
return report
@registry.register_model("simple_lightning_model")
class SimpleLightningModel(SimpleModel):
def __init__(self, config: SimpleModel.Config):
super().__init__(config)
def build_meters(self, run_type):
from mmf.utils.build import build_meters
self.train_meter, self.val_meter, self.test_meter = build_meters(run_type)
def configure_optimizers(self):
config = registry.get("config")
if config is None:
return torch.optim.Adam(self.parameters(), lr=0.01)
else:
from mmf.utils.build import build_lightning_optimizers
return build_lightning_optimizers(self, config)
def assertModulesEqual(mod1, mod2):
for p1, p2 in itertools.zip_longest(mod1.parameters(), mod2.parameters()):
return p1.equal(p2)
def setup_proxy():
# Enable proxy in FB dev env
if not is_network_reachable() and (
os.getenv("SANDCASTLE") == "1"
or os.getenv("TW_JOB_USER") == "sandcastle"
or socket.gethostname().startswith("dev")
or "fbinfra" in socket.gethostname()
):
os.environ["HTTPS_PROXY"] = "http://fwdproxy:8080"
os.environ["HTTP_PROXY"] = "http://fwdproxy:8080"
def compare_torchscript_transformer_models(model, vocab_size):
test_sample = Sample()
test_sample.input_ids = torch.randint(low=0, high=vocab_size, size=(128,)).long()
test_sample.input_mask = torch.ones(128).long()
test_sample.segment_ids = torch.zeros(128).long()
test_sample.image_feature_0 = torch.rand((1, 100, 2048)).float()
test_sample.image = torch.rand((3, 300, 300)).float()
test_sample_list = SampleList([test_sample])
model = model.to(get_current_device())
test_sample_list = test_sample_list.to(get_current_device())
with torch.no_grad():
model_output = model(test_sample_list)
script_model = torch.jit.script(model)
with torch.no_grad():
script_output = script_model(test_sample_list)
return torch.allclose(model_output["scores"], script_output["scores"])
def verify_torchscript_models(model):
model.eval()
script_model = torch.jit.script(model)
with tempfile.NamedTemporaryFile(delete=False) as tmp:
torch.jit.save(script_model, tmp)
loaded_model = torch.jit.load(tmp.name)
return assertModulesEqual(script_model, loaded_model)
def search_log(log_file: str, search_condition: Optional[List[Callable]] = None):
"""Searches a log file for a particular search conditions which can be list
of functions and returns it back
Args:
log_file (str): Log file in which search needs to be performed
search_condition (List[Callable], optional): Search conditions in form of list.
Each corresponding to a function to test a condition. Defaults to None.
Returns:
JSONObject: Json representation of the search line
Throws:
AssertionError: If no log line is found meeting the conditions
"""
if search_condition is None:
search_condition = {}
lines = []
with open(log_file) as f:
lines = f.readlines()
filtered_line = None
for line in lines:
line = line.strip()
if "progress" not in line:
continue
info_index = line.find(" : ")
line = line[info_index + 3 :]
res = json.loads(line)
meets_condition = True
for condition_fn in search_condition:
meets_condition = meets_condition and condition_fn(res)
if meets_condition:
filtered_line = res
break
assert filtered_line is not None, "No match for search condition in log file"
return filtered_line
| EXA-1-master | exa/models/mmf-main/tests/test_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import os
from collections import namedtuple
from itertools import groupby
from psutil import Process
LEAK_LIMIT = 10 * 1024 * 1024
_proc = Process(os.getpid())
START = "START"
END = "END"
ConsumedRamLogEntry = namedtuple(
"ConsumedRamLogEntry", ("nodeid", "on", "consumed_ram")
)
consumed_ram_log = []
def get_consumed_ram():
return _proc.memory_info().rss
def pytest_runtest_setup(item):
log_entry = ConsumedRamLogEntry(item.nodeid, START, get_consumed_ram())
consumed_ram_log.append(log_entry)
def pytest_runtest_teardown(item):
log_entry = ConsumedRamLogEntry(item.nodeid, END, get_consumed_ram())
consumed_ram_log.append(log_entry)
def pytest_terminal_summary(terminalreporter):
grouped = groupby(consumed_ram_log, lambda entry: entry.nodeid)
for nodeid, (start_entry, end_entry) in grouped:
leaked = end_entry.consumed_ram - start_entry.consumed_ram
if leaked > LEAK_LIMIT:
terminalreporter.write(
"LEAKED {}MB in {}\n".format(leaked / 1024 / 1024, nodeid)
)
| EXA-1-master | exa/models/mmf-main/tests/conftest.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from mmf.utils.patch import patch_transformers
patch_transformers()
| EXA-1-master | exa/models/mmf-main/tests/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import tempfile
import unittest
import torch
from mmf.datasets.processors.image_processors import VILTImageProcessor
from mmf.datasets.processors.processors import (
CaptionProcessor,
EvalAIAnswerProcessor,
MultiClassFromFile,
MultiHotAnswerFromVocabProcessor,
Processor,
TransformerBboxProcessor,
)
from mmf.datasets.processors.video_processors import VideoTransforms
from mmf.utils.configuration import load_yaml
from omegaconf import OmegaConf
from tests.test_utils import compare_tensors, skip_if_no_pytorchvideo
class TestDatasetProcessors(unittest.TestCase):
def _get_config(self, path):
path = os.path.join(os.path.abspath(__file__), path)
config = load_yaml(os.path.abspath(path))
return config
def test_caption_processor(self):
config = self._get_config("../../../mmf/configs/datasets/coco/defaults.yaml")
captioning_config = config.dataset_config.coco
caption_processor_config = captioning_config.processors.caption_processor
vocab_path = os.path.join(
os.path.abspath(__file__), "..", "..", "data", "vocab.txt"
)
caption_processor_config.params.vocab.type = "random"
caption_processor_config.params.vocab.vocab_file = os.path.abspath(vocab_path)
caption_processor = CaptionProcessor(caption_processor_config.params)
tokens = [1, 4, 5, 6, 4, 7, 8, 2, 0, 0, 0]
caption = caption_processor(tokens)
# Test start, stop, pad are removed
self.assertNotIn("<s>", caption["tokens"])
self.assertNotIn("</s>", caption["tokens"])
self.assertNotIn("<pad>", caption["tokens"])
# Test caption is correct
self.assertEqual(caption["caption"], "a man with a red helmet")
def test_multi_hot_answer_from_vocab_processor(self):
config = self._get_config("../../../mmf/configs/datasets/clevr/defaults.yaml")
clevr_config = config.dataset_config.clevr
answer_processor_config = clevr_config.processors.answer_processor
# Test num_answers==1 case
vocab_path = os.path.join(
os.path.abspath(__file__), "..", "..", "data", "vocab.txt"
)
answer_processor_config.params.vocab_file = os.path.abspath(vocab_path)
answer_processor = MultiHotAnswerFromVocabProcessor(
answer_processor_config.params
)
processed = answer_processor({"answers": ["helmet"]})
answers_indices = processed["answers_indices"]
answers_scores = processed["answers_scores"]
self.assertTrue(
compare_tensors(answers_indices, torch.tensor([5] * 10, dtype=torch.long))
)
expected_answers_scores = torch.zeros(19, dtype=torch.float)
expected_answers_scores[5] = 1.0
self.assertTrue(compare_tensors(answers_scores, expected_answers_scores))
# Test multihot when num answers greater than 1
answer_processor_config.params.vocab_file = os.path.abspath(vocab_path)
answer_processor_config.params.num_answers = 3
answer_processor = MultiHotAnswerFromVocabProcessor(
answer_processor_config.params
)
processed = answer_processor({"answers": ["man", "with", "countryside"]})
answers_indices = processed["answers_indices"]
answers_scores = processed["answers_scores"]
self.assertTrue(
compare_tensors(
answers_indices,
torch.tensor([2, 3, 15, 2, 3, 15, 2, 3, 15, 2], dtype=torch.long),
)
)
expected_answers_scores = torch.zeros(19, dtype=torch.float)
expected_answers_scores[2] = 1.0
expected_answers_scores[3] = 1.0
expected_answers_scores[15] = 1.0
self.assertTrue(compare_tensors(answers_scores, expected_answers_scores))
# Test unk
processed = answer_processor({"answers": ["test", "answer", "man"]})
answers_indices = processed["answers_indices"]
answers_scores = processed["answers_scores"]
self.assertTrue(
compare_tensors(
answers_indices,
torch.tensor([0, 0, 2, 0, 0, 2, 0, 0, 2, 0], dtype=torch.long),
)
)
expected_answers_scores = torch.zeros(19, dtype=torch.float)
expected_answers_scores[2] = 1.0
self.assertTrue(compare_tensors(answers_scores, expected_answers_scores))
def test_evalai_answer_processor(self):
evalai_answer_processor = EvalAIAnswerProcessor()
# Test number
processed = evalai_answer_processor("two")
expected = "2"
self.assertEqual(processed, expected)
# Test article
processed = evalai_answer_processor("a building")
expected = "building"
self.assertEqual(processed, expected)
# Test tokenize
processed = evalai_answer_processor("snow, mountain")
expected = "snow mountain"
self.assertEqual(processed, expected)
# Test contractions
processed = evalai_answer_processor("isnt")
expected = "isn't"
self.assertEqual(processed, expected)
# Test processor
processed = evalai_answer_processor("the two mountain's \t \n ")
expected = "2 mountain 's"
self.assertEqual(processed, expected)
def test_transformer_bbox_processor(self):
import numpy as np
config = {
"params": {
"bbox_key": "bbox",
"image_width_key": "image_width",
"image_height_key": "image_height",
}
}
bbox_processor = TransformerBboxProcessor(config)
item = {
"bbox": np.array([[100, 100, 100, 100]]),
"image_width": 100,
"image_height": 100,
}
processed_box = bbox_processor(item)["bbox"]
self.assertTrue(
torch.equal(
processed_box, torch.tensor([[1, 1, 1, 1, 0]], dtype=torch.float)
)
)
def test_multi_class_from_file(self):
f = tempfile.NamedTemporaryFile(mode="w", delete=False)
f.writelines("\n".join(["abc", "bcd", "def", "efg"]))
f.close()
config = OmegaConf.create({"vocab_file": f.name})
processor = MultiClassFromFile(config)
output = processor({"label": "abc"})
self.assertEqual(output["class_index"], 0)
output = processor({"label": "efg"})
self.assertEqual(output["class_index"], 3)
output = processor("def")
self.assertEqual(output["class_index"], 2)
self.assertRaises(AssertionError, processor, {"label": "UNK"})
os.unlink(f.name)
def test_vilt_image_processor(self):
from torchvision.transforms import ToPILImage
size = 384
config = OmegaConf.create({"size": [size, size]})
image_processor = VILTImageProcessor(config)
expected_size = torch.Size([3, size, size])
image = ToPILImage()(torch.ones(3, 300, 500))
processed_image = image_processor(image)
self.assertEqual(processed_image.size(), expected_size)
image = ToPILImage()(torch.ones(1, 224, 224))
processed_image = image_processor(image)
self.assertEqual(processed_image.size(), expected_size)
@skip_if_no_pytorchvideo
def test_video_transforms(self):
config = OmegaConf.create(
{
"transforms": [
"permute_and_rescale",
{"type": "Resize", "params": {"size": [140, 100]}},
{"type": "UniformTemporalSubsample", "params": {"num_samples": 7}},
]
}
)
video_transforms = VideoTransforms(config)
video = torch.ones(10, 200, 200, 3)
processed_video = video_transforms(video)
torch.testing.assert_close(processed_video, torch.ones(3, 7, 140, 100) / 255)
def test_processor_class_None(self):
config = OmegaConf.create({"type": "UndefinedType"})
with self.assertRaises(ValueError):
Processor(config)
| EXA-1-master | exa/models/mmf-main/tests/datasets/test_processors.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import tests.test_utils as test_utils
import torch
from omegaconf import OmegaConf
class TestBERTProcessors(unittest.TestCase):
def setUp(self):
self.config = OmegaConf.create(
{
"tokenizer_config": {
"type": "bert-base-uncased",
"params": {"do_lower_case": True},
},
"mask_probability": 0,
"max_seq_length": 128,
}
)
def test_bert_tokenizer(self):
from mmf.datasets.processors.bert_processors import BertTokenizer
test_utils.setup_proxy()
processor = BertTokenizer(self.config)
# Test normal caption
arg = {"text": "This will be a test of tokens?"}
results = processor(arg)
expected_input_ids = torch.zeros(128, dtype=torch.long)
expected_input_ids[:11] = torch.tensor(
[101, 2023, 2097, 2022, 1037, 3231, 1997, 19204, 2015, 1029, 102],
dtype=torch.long,
)
expected_segment_ids = torch.zeros(128, dtype=torch.long)
expected_masks = torch.zeros(128, dtype=torch.long)
expected_masks[:11] = 1
self.assertTrue(torch.equal(results["input_ids"], expected_input_ids))
self.assertTrue(torch.equal(results["segment_ids"], expected_segment_ids))
self.assertTrue(torch.equal(results["input_mask"], expected_masks))
# Test empty caption
arg = {"text": ""}
results = processor(arg)
expected_input_ids = torch.zeros(128, dtype=torch.long)
expected_input_ids[:2] = torch.tensor([101, 102], dtype=torch.long)
expected_segment_ids = torch.zeros(128, dtype=torch.long)
expected_masks = torch.zeros(128, dtype=torch.long)
expected_masks[:2] = 1
self.assertTrue(torch.equal(results["input_ids"], expected_input_ids))
self.assertTrue(torch.equal(results["segment_ids"], expected_segment_ids))
self.assertTrue(torch.equal(results["input_mask"], expected_masks))
# Test long caption
arg = {"text": "I am working for facebook " * 100} # make a long sentence
results = processor(arg)
expected_input_ids = [1045, 2572, 2551, 2005, 9130] * 100
expected_input_ids.insert(0, 101) # [CLS]
expected_input_ids = expected_input_ids[:128]
expected_input_ids[-1] = 102 # [SEP]
expected_input_ids = torch.tensor(expected_input_ids, dtype=torch.long)
expected_segment_ids = torch.zeros(128, dtype=torch.long)
expected_masks = torch.ones(128, dtype=torch.long)
self.assertTrue(torch.equal(results["input_ids"], expected_input_ids))
self.assertTrue(torch.equal(results["segment_ids"], expected_segment_ids))
self.assertTrue(torch.equal(results["input_mask"], expected_masks))
# Test two captions
arg = {
"text_a": "This will be a test of tokens?",
"text_b": "I am working for facebook",
}
results = processor(arg)
expected_input_ids = torch.zeros(128, dtype=torch.long)
expected_input_ids[:17] = torch.tensor(
[101, 2023, 2097, 2022, 1037, 3231, 1997, 19204, 2015, 1029, 102]
+ [1045, 2572, 2551, 2005, 9130, 102],
dtype=torch.long,
)
expected_segment_ids = torch.zeros(128, dtype=torch.long)
expected_segment_ids[11:17] = 1
expected_masks = torch.zeros(128, dtype=torch.long)
expected_masks[:17] = 1
self.assertTrue(torch.equal(results["input_ids"], expected_input_ids))
self.assertTrue(torch.equal(results["segment_ids"], expected_segment_ids))
self.assertTrue(torch.equal(results["input_mask"], expected_masks))
# Test masked caption
processor._probability = 1.0
arg = {"text": "This will be a test of tokens?"}
results = processor(arg)
expected_input_ids = torch.zeros(128, dtype=torch.long)
expected_input_ids[:11] = torch.tensor(
[101, 2023, 2097, 2022, 1037, 3231, 1997, 19204, 2015, 1029, 102],
dtype=torch.long,
)
expected_segment_ids = torch.zeros(128, dtype=torch.long)
self.assertFalse(torch.equal(results["input_ids"], expected_input_ids))
self.assertTrue(torch.equal(results["segment_ids"], expected_segment_ids))
# Test [MASK] token is present
self.assertTrue(103 in results["input_ids"])
def test_vilt_tokenizer(self):
from mmf.datasets.processors.bert_processors import VILTTextTokenizer
test_utils.setup_proxy()
processor = VILTTextTokenizer(self.config)
# Test normal caption
arg = {"text": "This will be a test of tokens?"}
results = processor(arg)
expected_input_ids = torch.zeros(128, dtype=torch.long)
expected_input_ids[:11] = torch.tensor(
[101, 2023, 2097, 2022, 1037, 3231, 1997, 19204, 2015, 1029, 102],
dtype=torch.long,
)
expected_segment_ids = torch.zeros(128, dtype=torch.long)
expected_masks = torch.zeros(128, dtype=torch.long)
expected_masks[:11] = 1
self.assertTrue(torch.equal(results["input_ids"], expected_input_ids))
self.assertTrue(torch.equal(results["segment_ids"], expected_segment_ids))
self.assertTrue(torch.equal(results["input_mask"], expected_masks))
# Test empty caption
arg = {"text": ""}
results = processor(arg)
expected_input_ids = torch.zeros(128, dtype=torch.long)
expected_input_ids[:2] = torch.tensor([101, 102], dtype=torch.long)
expected_segment_ids = torch.zeros(128, dtype=torch.long)
expected_masks = torch.zeros(128, dtype=torch.long)
expected_masks[:2] = 1
self.assertTrue(torch.equal(results["input_ids"], expected_input_ids))
self.assertTrue(torch.equal(results["segment_ids"], expected_segment_ids))
self.assertTrue(torch.equal(results["input_mask"], expected_masks))
# Test long caption
arg = {"text": "I am working for facebook " * 100} # make a long sentence
results = processor(arg)
expected_input_ids = [1045, 2572, 2551, 2005, 9130] * 100
expected_input_ids.insert(0, 101) # [CLS]
expected_input_ids = expected_input_ids[:128]
expected_input_ids[-1] = 102 # [SEP]
expected_input_ids = torch.tensor(expected_input_ids, dtype=torch.long)
expected_segment_ids = torch.zeros(128, dtype=torch.long)
expected_masks = torch.ones(128, dtype=torch.long)
self.assertTrue(torch.equal(results["input_ids"], expected_input_ids))
self.assertTrue(torch.equal(results["segment_ids"], expected_segment_ids))
self.assertTrue(torch.equal(results["input_mask"], expected_masks))
# Test two captions
arg = {
"text_a": "This will be a test of tokens?",
"text_b": "I am working for facebook",
}
results = processor(arg)
expected_input_ids = torch.zeros(128, dtype=torch.long)
expected_input_ids[:17] = torch.tensor(
[101, 2023, 2097, 2022, 1037, 3231, 1997, 19204, 2015, 1029, 102]
+ [1045, 2572, 2551, 2005, 9130, 102],
dtype=torch.long,
)
expected_segment_ids = torch.zeros(128, dtype=torch.long)
expected_segment_ids[11:17] = 1
expected_masks = torch.zeros(128, dtype=torch.long)
expected_masks[:17] = 1
self.assertTrue(torch.equal(results["input_ids"], expected_input_ids))
self.assertTrue(torch.equal(results["segment_ids"], expected_segment_ids))
self.assertTrue(torch.equal(results["input_mask"], expected_masks))
# Test masked caption
processor._probability = 1.0
arg = {"text": "This will be a test of tokens?"}
results = processor(arg)
expected_input_ids = torch.zeros(128, dtype=torch.long)
expected_input_ids[:11] = torch.tensor(
[101, 2023, 2097, 2022, 1037, 3231, 1997, 19204, 2015, 1029, 102],
dtype=torch.long,
)
expected_segment_ids = torch.zeros(128, dtype=torch.long)
self.assertFalse(torch.equal(results["input_ids"], expected_input_ids))
self.assertTrue(torch.equal(results["segment_ids"], expected_segment_ids))
# Test [MASK] token is present
self.assertTrue(103 in results["input_ids"])
def test_uniter_tokenizer(self):
from mmf.datasets.processors.bert_processors import UNITERTextTokenizer
test_utils.setup_proxy()
config = OmegaConf.create(
{
"tokenizer_config": {
"type": "bert-base-uncased",
"params": {"do_lower_case": True},
},
"mask_probability": 0.5,
"max_seq_length": 128,
}
)
processor = UNITERTextTokenizer(config)
# Test normal caption
arg = {"text": "This will be a test of tokens?"}
results = processor(arg)
expected_input_ids = torch.zeros(128, dtype=torch.long)
expected_input_ids[:11] = torch.tensor(
[101, 2023, 2097, 2022, 1037, 3231, 1997, 19204, 2015, 1029, 102],
dtype=torch.long,
)
expected_segment_ids = torch.zeros(128, dtype=torch.long)
expected_masks = torch.zeros(128, dtype=torch.long)
expected_masks[:11] = 1
self.assertTrue(torch.equal(results["input_ids"], expected_input_ids))
self.assertTrue(torch.equal(results["segment_ids"], expected_segment_ids))
self.assertTrue(torch.equal(results["input_mask"], expected_masks))
self.assertTrue("input_ids_masked" in results)
self.assertEqual(results["input_ids"].shape, results["input_ids_masked"].shape)
# Test empty caption
arg = {"text": ""}
results = processor(arg)
expected_input_ids = torch.zeros(128, dtype=torch.long)
expected_input_ids[:2] = torch.tensor([101, 102], dtype=torch.long)
expected_segment_ids = torch.zeros(128, dtype=torch.long)
expected_masks = torch.zeros(128, dtype=torch.long)
expected_masks[:2] = 1
self.assertTrue(torch.equal(results["input_ids"], expected_input_ids))
self.assertTrue(torch.equal(results["segment_ids"], expected_segment_ids))
self.assertTrue(torch.equal(results["input_mask"], expected_masks))
self.assertTrue("input_ids_masked" in results)
self.assertEqual(results["input_ids"].shape, results["input_ids_masked"].shape)
# Test long caption
arg = {"text": "I am working for facebook " * 100} # make a long sentence
results = processor(arg)
expected_input_ids = [1045, 2572, 2551, 2005, 9130] * 100
expected_input_ids.insert(0, 101) # [CLS]
expected_input_ids = expected_input_ids[:128]
expected_input_ids[-1] = 102 # [SEP]
expected_input_ids = torch.tensor(expected_input_ids, dtype=torch.long)
expected_segment_ids = torch.zeros(128, dtype=torch.long)
expected_masks = torch.ones(128, dtype=torch.long)
self.assertTrue(torch.equal(results["input_ids"], expected_input_ids))
self.assertTrue(torch.equal(results["segment_ids"], expected_segment_ids))
self.assertTrue(torch.equal(results["input_mask"], expected_masks))
self.assertTrue("input_ids_masked" in results)
self.assertEqual(results["input_ids"].shape, results["input_ids_masked"].shape)
# Test two captions
arg = {
"text_a": "This will be a test of tokens?",
"text_b": "I am working for facebook",
}
results = processor(arg)
expected_input_ids = torch.zeros(128, dtype=torch.long)
expected_input_ids[:17] = torch.tensor(
[101, 2023, 2097, 2022, 1037, 3231, 1997, 19204, 2015, 1029, 102]
+ [1045, 2572, 2551, 2005, 9130, 102],
dtype=torch.long,
)
expected_segment_ids = torch.zeros(128, dtype=torch.long)
expected_segment_ids[11:17] = 1
expected_masks = torch.zeros(128, dtype=torch.long)
expected_masks[:17] = 1
self.assertTrue(torch.equal(results["input_ids"], expected_input_ids))
self.assertTrue(torch.equal(results["segment_ids"], expected_segment_ids))
self.assertTrue(torch.equal(results["input_mask"], expected_masks))
self.assertTrue("input_ids_masked" in results)
self.assertEqual(results["input_ids"].shape, results["input_ids_masked"].shape)
# Test masked caption
processor._probability = 1.0
arg = {"text": "This will be a test of tokens?"}
results = processor(arg)
expected_input_ids = torch.zeros(128, dtype=torch.long)
expected_input_ids[:11] = torch.tensor(
[101, 2023, 2097, 2022, 1037, 3231, 1997, 19204, 2015, 1029, 102],
dtype=torch.long,
)
expected_segment_ids = torch.zeros(128, dtype=torch.long)
self.assertTrue(torch.equal(results["input_ids"], expected_input_ids))
self.assertTrue(torch.equal(results["segment_ids"], expected_segment_ids))
self.assertTrue("input_ids_masked" in results)
self.assertEqual(results["input_ids"].shape, results["input_ids_masked"].shape)
# Test [MASK] token is present
self.assertTrue(103 in results["input_ids_masked"])
def test_vinvl_tokenizer(self):
from mmf.datasets.processors.bert_processors import VinVLTextTokenizer
test_utils.setup_proxy()
config = OmegaConf.create(
{
"tokenizer_config": {
"type": "bert-base-uncased",
"params": {"do_lower_case": True},
},
"mask_probability": 0.5,
"max_seq_length": 128,
"corrupt_probability": 0,
}
)
processor = VinVLTextTokenizer(config)
# Test normal caption
arg = {"text": "This will be a test of tokens?"}
results = processor(arg)
expected_input_ids = torch.zeros(128, dtype=torch.long)
expected_input_ids[:11] = torch.tensor(
[101, 2023, 2097, 2022, 1037, 3231, 1997, 19204, 2015, 1029, 102],
dtype=torch.long,
)
expected_segment_ids = torch.zeros(128, dtype=torch.long)
expected_masks = torch.zeros(128, dtype=torch.long)
expected_masks[:11] = 1
self.assertTrue(torch.equal(results["input_ids"], expected_input_ids))
self.assertTrue(torch.equal(results["segment_ids"], expected_segment_ids))
self.assertTrue(torch.equal(results["input_mask"], expected_masks))
self.assertTrue("input_ids_masked" in results)
self.assertEqual(results["input_ids"].shape, results["input_ids_masked"].shape)
self.assertTrue("input_ids_corrupt" not in results)
# Test empty caption
arg = {"text": ""}
results = processor(arg)
expected_input_ids = torch.zeros(128, dtype=torch.long)
expected_input_ids[:2] = torch.tensor([101, 102], dtype=torch.long)
expected_segment_ids = torch.zeros(128, dtype=torch.long)
expected_masks = torch.zeros(128, dtype=torch.long)
expected_masks[:2] = 1
self.assertTrue(torch.equal(results["input_ids"], expected_input_ids))
self.assertTrue(torch.equal(results["segment_ids"], expected_segment_ids))
self.assertTrue(torch.equal(results["input_mask"], expected_masks))
self.assertTrue("input_ids_masked" in results)
self.assertEqual(results["input_ids"].shape, results["input_ids_masked"].shape)
self.assertTrue("input_ids_corrupt" not in results)
# Test long caption
arg = {"text": "I am working for facebook " * 100} # make a long sentence
results = processor(arg)
expected_input_ids = [1045, 2572, 2551, 2005, 9130] * 100
expected_input_ids.insert(0, 101) # [CLS]
expected_input_ids = expected_input_ids[:128]
expected_input_ids[-1] = 102 # [SEP]
expected_input_ids = torch.tensor(expected_input_ids, dtype=torch.long)
expected_segment_ids = torch.zeros(128, dtype=torch.long)
expected_masks = torch.ones(128, dtype=torch.long)
self.assertTrue(torch.equal(results["input_ids"], expected_input_ids))
self.assertTrue(torch.equal(results["segment_ids"], expected_segment_ids))
self.assertTrue(torch.equal(results["input_mask"], expected_masks))
self.assertTrue("input_ids_masked" in results)
self.assertEqual(results["input_ids"].shape, results["input_ids_masked"].shape)
self.assertTrue("input_ids_corrupt" not in results)
# Test two captions
arg = {
"text_a": "This will be a test of tokens?",
"text_b": "I am working for facebook",
}
results = processor(arg)
expected_input_ids = torch.zeros(128, dtype=torch.long)
expected_input_ids[:17] = torch.tensor(
[101, 2023, 2097, 2022, 1037, 3231, 1997, 19204, 2015, 1029, 102]
+ [1045, 2572, 2551, 2005, 9130, 102],
dtype=torch.long,
)
expected_segment_ids = torch.zeros(128, dtype=torch.long)
expected_segment_ids[11:17] = 1
expected_masks = torch.zeros(128, dtype=torch.long)
expected_masks[:17] = 1
self.assertTrue(torch.equal(results["input_ids"], expected_input_ids))
self.assertTrue(torch.equal(results["segment_ids"], expected_segment_ids))
self.assertTrue(torch.equal(results["input_mask"], expected_masks))
self.assertTrue("input_ids_masked" in results)
self.assertEqual(results["input_ids"].shape, results["input_ids_masked"].shape)
self.assertTrue("input_ids_corrupt" not in results)
# Test masked caption
processor._probability = 1.0
arg = {"text": "This will be a test of tokens?"}
results = processor(arg)
expected_input_ids = torch.zeros(128, dtype=torch.long)
expected_input_ids[:11] = torch.tensor(
[101, 2023, 2097, 2022, 1037, 3231, 1997, 19204, 2015, 1029, 102],
dtype=torch.long,
)
expected_segment_ids = torch.zeros(128, dtype=torch.long)
self.assertTrue(torch.equal(results["input_ids"], expected_input_ids))
self.assertTrue(torch.equal(results["segment_ids"], expected_segment_ids))
self.assertTrue("input_ids_masked" in results)
self.assertEqual(results["input_ids"].shape, results["input_ids_masked"].shape)
# Test [MASK] token is present
self.assertTrue(103 in results["input_ids_masked"])
self.assertTrue("input_ids_corrupt" not in results)
# Test corrupt tokens
processor._probability = 0.5
processor._corrupt_prob = 1.0
arg = {
"text": "This will be a test of tokens?",
"text_b": "test tokens",
"random_captions": ["Something unexpected"],
"random_labels": ["cat dog icecream"],
}
results = processor(arg)
expected_input_ids = torch.zeros(128, dtype=torch.long)
expected_input_ids[:15] = torch.tensor(
[
101,
2023,
2097,
2022,
1037,
3231,
1997,
19204,
2015,
1029,
102,
3231,
19204,
2015,
102,
],
dtype=torch.long,
)
expected_segment_ids = torch.zeros(128, dtype=torch.long)
self.assertTrue(torch.equal(results["input_ids"], expected_input_ids))
self.assertTrue("input_ids_masked" in results)
self.assertEqual(results["input_ids"].shape, results["input_ids_masked"].shape)
self.assertTrue("input_ids_corrupt" in results)
expected_swapped_caption = torch.zeros(128, dtype=torch.long)
expected_swapped_caption[:8] = torch.tensor(
[101, 2242, 9223, 102, 3231, 19204, 2015, 102],
dtype=torch.long,
)
expected_swapped_labels = torch.zeros(128, dtype=torch.long)
expected_swapped_labels[:17] = torch.tensor(
[
101,
2023,
2097,
2022,
1037,
3231,
1997,
19204,
2015,
1029,
102,
4937,
3899,
3256,
16748,
3286,
102,
],
dtype=torch.long,
)
self.assertTrue(
torch.equal(results["input_ids_corrupt"], expected_swapped_caption)
or torch.equal(results["input_ids_corrupt"], expected_swapped_labels)
)
| EXA-1-master | exa/models/mmf-main/tests/datasets/test_bert_processors.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import functools
import torch
from mmf.datasets.lightning_multi_datamodule import LightningMultiDataModule
from mmf.datasets.mmf_dataset_builder import MMFDatasetBuilder
from mmf.datasets.multi_datamodule import MultiDataModule
from omegaconf import OmegaConf
from tests.datasets.test_mmf_dataset_builder import SimpleMMFDataset
class MultiDataModuleTestObject(MultiDataModule):
def __init__(self, batch_size):
self.batch_size = batch_size
config = OmegaConf.create(
{
"use_features": True,
"annotations": {
"train": "not_a_real_annotations_dataset",
"val": "not_a_real_annotations_dataset",
},
"features": {
"train": "not_a_real_features_dataset",
"val": "not_a_real_features_dataset",
},
"dataset_config": {"simple": 0},
}
)
self.config = config
self.dataset_list = []
dataset_builder = MMFDatasetBuilder(
"simple", functools.partial(SimpleMMFDataset, num_examples=100)
)
dataset_builder.train_dataloader = self._get_dataloader
dataset_builder.val_dataloader = self._get_dataloader
dataset_builder.test_dataloader = self._get_dataloader
self.datamodules = {"simple": dataset_builder}
def _get_dataloader(self):
dataset = SimpleMMFDataset(
num_examples=100,
dataset_name="simple",
dataset_type="val",
config=self.config,
)
dataloader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=1,
drop_last=False,
)
return dataloader
class LightningDataModuleTestObject(LightningMultiDataModule):
def __init__(self, batch_size):
self.batch_size = batch_size
config = OmegaConf.create(
{
"use_features": True,
"annotations": {
"train": "not_a_real_annotations_dataset",
"val": "not_a_real_annotations_dataset",
},
"features": {
"train": "not_a_real_features_dataset",
"val": "not_a_real_features_dataset",
},
"dataset_config": {"simple": 0},
}
)
self.config = config
self.dataset_list = []
dataset_builder = MMFDatasetBuilder(
"simple", functools.partial(SimpleMMFDataset, num_examples=100)
)
dataset_builder.train_dataloader = self._get_dataloader
dataset_builder.val_dataloader = self._get_dataloader
dataset_builder.test_dataloader = self._get_dataloader
self.datamodules = {"simple": dataset_builder}
def _get_dataloader(self):
dataset = SimpleMMFDataset(
num_examples=100,
dataset_name="simple",
dataset_type="val",
config=self.config,
)
dataloader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=1,
drop_last=False,
)
return dataloader
| EXA-1-master | exa/models/mmf-main/tests/datasets/test_multi_datamodule.py |
# Copyright (c) Facebook, Inc. and its affiliates.
| EXA-1-master | exa/models/mmf-main/tests/datasets/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import functools
import os
import unittest
from unittest.mock import MagicMock
import torch
from mmf.datasets.base_dataset import BaseDataset
from mmf.datasets.mmf_dataset_builder import MMFDatasetBuilder
from mmf.utils.general import get_current_device
from omegaconf import OmegaConf
class SimpleMMFDataset(BaseDataset):
def __init__(
self, dataset_name, config, dataset_type, num_examples, *args, **kwargs
):
self.num_examples = num_examples
self.features = [float(x) for x in range(self.num_examples)]
self.annotations = [float(x) for x in range(self.num_examples)]
self._device = get_current_device()
self._dataset_name = dataset_name
def __getitem__(self, idx):
return {
"feature": torch.tensor(self.features[idx]),
"annotation": torch.tensor(self.annotations[idx]),
"test": torch.tensor(self.features[idx]),
}
def __len__(self):
return self.num_examples
class TestMMFDatasetBuilder(unittest.TestCase):
def setUp(self):
self.config = OmegaConf.create(
{
"use_features": True,
"use_images": False,
"split_train": {"val": 0.2, "test": 0.1, "seed": 42},
"annotations": {"train": "not_a_real_annotations_dataset"},
"features": {"train": "not_a_real_features_dataset"},
}
)
self.train = self._create_dataset("train")
self.val = self._create_dataset("val")
self.test = self._create_dataset("test")
def test_train_split_len(self):
self.assertEqual(len(self.train), 70)
self.assertEqual(len(self.val), 20)
self.assertEqual(len(self.test), 10)
def test_train_split_non_overlap(self):
total = (
self._samples_set(self.train)
| self._samples_set(self.val)
| self._samples_set(self.test)
)
self.assertSetEqual(total, {x for x in range(100)})
def test_train_split_alignment(self):
self._test_alignment_in_dataset(self.train)
self._test_alignment_in_dataset(self.val)
self._test_alignment_in_dataset(self.test)
def _create_dataset(self, dataset_type):
dataset_builder = MMFDatasetBuilder(
"vqa", functools.partial(SimpleMMFDataset, num_examples=100)
)
return dataset_builder.load(self.config, dataset_type)
def _samples_set(self, dataset):
return set(dataset.features)
def _test_alignment_in_dataset(self, dataset):
for feature, annotation in zip(dataset.features, dataset.annotations):
self.assertEqual(feature, annotation)
| EXA-1-master | exa/models/mmf-main/tests/datasets/test_mmf_dataset_builder.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
from collections import Counter
import numpy as np
import torch
from mmf.common.registry import registry
from mmf.datasets.multi_dataset_loader import MultiDatasetLoader
from omegaconf import OmegaConf
from torch.utils.data import DataLoader
from ..test_utils import NumbersDataset
class TestMultiDatasetLoader(unittest.TestCase):
def setUp(self):
torch.manual_seed(1234)
np.random.seed(1234)
self.multi_dataset = MultiDatasetLoader()
self.multi_dataset._num_datasets = 3
self.multi_dataset.current_index = 0
numbers_dataset_a = NumbersDataset(4, "a")
numbers_dataset_b = NumbersDataset(40, "b")
numbers_dataset_c = NumbersDataset(4000, "c")
self.multi_dataset.dataset_list = ["a", "b", "c"]
self.multi_dataset._loaders = {
"a": self._get_dataloader(numbers_dataset_a),
"b": self._get_dataloader(numbers_dataset_b),
"c": self._get_dataloader(numbers_dataset_c),
}
self.original_config = registry.get("config")
registry.register(
"config",
OmegaConf.create(
{
"training": {
"dataset_size_proportional_sampling": True,
"max_epochs": None,
},
"multitasking": {"enabled": False},
}
),
)
self.multi_dataset._per_dataset_lengths = [4, 40, 4000]
self.multi_dataset._total_length = 4044
self.multi_dataset._total_length = sum(self.multi_dataset._per_dataset_lengths)
def tearDown(self):
registry.register("config", self.original_config)
def _get_dataloader(self, dataset):
return DataLoader(dataset=dataset, batch_size=4, num_workers=0)
def test_proportional_sampling(self):
self.multi_dataset._infer_dataset_probabilities()
count = 0
count_c = 0
for batch in self.multi_dataset:
batch = self.multi_dataset.prepare_batch(batch)
if "c" in batch:
count_c += 1
count += 1
if count == 100:
break
# Expect more than 95 c's at least as the len for c is very high
self.assertTrue(count_c >= 98)
count = 0
count_epoch = 0
counter = Counter()
for _ in range(1):
for batch in self.multi_dataset:
batch = self.multi_dataset.prepare_batch(batch)
counter[list(batch.keys())[0]] += 1
count += 1
count_epoch += 1
# Expect epoch to be completed
self.assertEqual(count_epoch, 1)
# Expect each dataset to be full iterated
self.assertEqual(count, self.multi_dataset._total_length // 4)
self.assertEqual(counter, Counter({"a": 1, "b": 10, "c": 1000}))
def test_equal_sampling(self):
registry.get("config").training.dataset_size_proportional_sampling = False
self.multi_dataset._infer_dataset_probabilities()
count = 0
count_c = 0
for batch in self.multi_dataset:
batch = self.multi_dataset.prepare_batch(batch)
if "c" in batch:
count_c += 1
count += 1
if count == 100:
break
self.assertTrue(count_c <= 34)
# Epoch will never finish for this case, so test upto proportional sampling's
# epoch length + some extra
for batch in self.multi_dataset:
batch = self.multi_dataset.prepare_batch(batch)
count += 1
if count > self.multi_dataset._total_length // 4 + 100:
break
# The test should reach at this stage and should not be finished at
# epoch length
self.assertTrue(count > self.multi_dataset._total_length // 4 + 100)
| EXA-1-master | exa/models/mmf-main/tests/datasets/test_multi_dataset_loader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import unittest
from mmf.common.registry import registry
from mmf.datasets.base_dataset import BaseDataset
from mmf.utils.configuration import Configuration
from ..test_utils import dummy_args
class TestBaseDataset(unittest.TestCase):
def test_init_processors(self):
path = os.path.join(
os.path.abspath(__file__),
"../../../mmf/configs/datasets/vqa2/defaults.yaml",
)
args = dummy_args()
args.opts.append(f"config={path}")
configuration = Configuration(args)
answer_processor = (
configuration.get_config().dataset_config.vqa2.processors.answer_processor
)
vocab_path = os.path.join(
os.path.abspath(__file__), "..", "..", "data", "vocab.txt"
)
answer_processor.params.vocab_file = os.path.abspath(vocab_path)
self._fix_configuration(configuration)
configuration.freeze()
base_dataset = BaseDataset(
"vqa2", configuration.get_config().dataset_config.vqa2, "train"
)
expected_processors = [
"answer_processor",
"ocr_token_processor",
"bbox_processor",
]
# Check no processors are initialized before init_processors call
self.assertFalse(any(hasattr(base_dataset, key) for key in expected_processors))
for processor in expected_processors:
self.assertIsNone(registry.get("{}_{}".format("vqa2", processor)))
# Check processors are initialized after init_processors
base_dataset.init_processors()
self.assertTrue(all(hasattr(base_dataset, key) for key in expected_processors))
for processor in expected_processors:
self.assertIsNotNone(registry.get("{}_{}".format("vqa2", processor)))
def _fix_configuration(self, configuration):
vqa2_config = configuration.config.dataset_config.vqa2
processors = vqa2_config.processors
processors.pop("text_processor")
processors.pop("context_processor")
| EXA-1-master | exa/models/mmf-main/tests/datasets/test_base_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import torch
from mmf.common.report import Report
from mmf.common.sample import SampleList
from mmf.datasets.processors.prediction_processors import ArgMaxPredictionProcessor
class TestDatasetProcessors(unittest.TestCase):
def setUp(self):
torch.manual_seed(1234)
def test_argmax_prediction_processor(self):
processor = ArgMaxPredictionProcessor(config={})
batch = SampleList({"id": torch.tensor([1, 2, 3, 4, 5], dtype=torch.long)})
model_output = {"scores": torch.rand(5, 4)}
report = Report(batch, model_output)
predictions = processor(report)
expected_answers = [1, 1, 2, 1, 3]
expected = []
for idx, answer in enumerate(expected_answers):
expected.append({"id": idx + 1, "answer": answer})
self.assertEqual(predictions, expected)
| EXA-1-master | exa/models/mmf-main/tests/datasets/test_prediction_processors.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
from collections import Counter
import numpy as np
import torch
from mmf.datasets import iteration_strategies
from tests.test_utils import NumbersDataset
class TestIterationStrategies(unittest.TestCase):
NUM_DATALOADERS = 5
def setUp(self):
np.random.seed(1234)
def _build_dataloaders(self):
dataloaders = {}
for idx in range(self.NUM_DATALOADERS):
dataloaders[f"numbers_{idx}"] = torch.utils.data.DataLoader(
dataset=NumbersDataset((idx + 1) * (10**idx)), num_workers=0
)
return dataloaders
def test_constant_iteration_strategy(self):
dataloaders = self._build_dataloaders()
strategy = iteration_strategies.ConstantIterationStrategy.from_params(
dataloaders=dataloaders
)
counter = Counter()
count = 100
for _ in range(count):
counter[strategy()] += 1
self.assertEqual(counter[0], count)
for idx in range(1, self.NUM_DATALOADERS):
self.assertEqual(counter[idx], 0)
strategy = iteration_strategies.ConstantIterationStrategy.from_params(
dataloaders=dataloaders, idx=1
)
counter = Counter()
count = 100
for _ in range(count):
counter[strategy()] += 1
self.assertEqual(counter[1], count)
for idx in range(0, self.NUM_DATALOADERS):
if idx != 1:
self.assertEqual(counter[idx], 0)
def test_round_robin_strategy(self):
dataloaders = self._build_dataloaders()
strategy = iteration_strategies.RoundRobinIterationStrategy.from_params(
dataloaders=dataloaders
)
counter = Counter()
count = 100
for _ in range(count):
counter[strategy()] += 1
for idx in range(0, self.NUM_DATALOADERS):
self.assertEqual(counter[idx], count // self.NUM_DATALOADERS)
strategy = iteration_strategies.RoundRobinIterationStrategy.from_params(
dataloaders=dataloaders, start_idx=2
)
counter = Counter()
count = 100
for _ in range(count):
counter[strategy()] += 1
for idx in range(0, self.NUM_DATALOADERS):
self.assertEqual(counter[idx], count // self.NUM_DATALOADERS)
def test_random_strategy(self):
dataloaders = self._build_dataloaders()
strategy = iteration_strategies.RandomIterationStrategy.from_params(
dataloaders=dataloaders
)
counter = Counter()
count = 10000
for _ in range(count):
counter[strategy()] += 1
for idx in range(0, self.NUM_DATALOADERS):
self.assertTrue(counter[idx] <= 2100)
self.assertTrue(counter[idx] >= 1900)
def test_size_proportional_strategy(self):
dataloaders = self._build_dataloaders()
strategy = iteration_strategies.SizeProportionalIterationStrategy.from_params(
dataloaders=dataloaders
)
counter = Counter()
count = 10000
for _ in range(count):
counter[strategy()] += 1
for idx in range(0, self.NUM_DATALOADERS):
self.assertTrue(counter[idx] <= 10**idx)
lower_limit = 10 ** (idx - 1)
if idx == 0:
lower_limit = 0
self.assertTrue(counter[idx] >= lower_limit)
def test_ratios_strategy(self):
dataloaders = self._build_dataloaders()
sampling_ratios = {}
# Constant
for idx in range(self.NUM_DATALOADERS):
sampling_ratios[f"numbers_{idx}"] = 0
sampling_ratios["numbers_0"] = 1
strategy = iteration_strategies.RatiosIterationStrategy.from_params(
dataloaders, sampling_ratios=sampling_ratios
)
counter = Counter()
count = 10000
for _ in range(count):
counter[strategy()] += 1
self.assertEqual(counter[0], count)
for idx in range(1, self.NUM_DATALOADERS):
self.assertEqual(counter[idx], 0)
for idx in range(self.NUM_DATALOADERS):
sampling_ratios[f"numbers_{idx}"] = 1.0 / self.NUM_DATALOADERS
strategy = iteration_strategies.RatiosIterationStrategy.from_params(
dataloaders, sampling_ratios=sampling_ratios
)
count = 10000
counter = Counter()
for _ in range(count):
counter[strategy()] += 1
for idx in range(0, self.NUM_DATALOADERS):
self.assertTrue(counter[idx] <= 2100)
self.assertTrue(counter[idx] >= 1900)
lens = sum(len(loader.dataset) for loader in dataloaders.values())
for idx in range(self.NUM_DATALOADERS):
sampling_ratios[f"numbers_{idx}"] = (
len(dataloaders[f"numbers_{idx}"].dataset) / lens
)
strategy = iteration_strategies.RatiosIterationStrategy.from_params(
dataloaders, sampling_ratios=sampling_ratios
)
count = 10000
counter = Counter()
for _ in range(count):
counter[strategy()] += 1
for idx in range(0, self.NUM_DATALOADERS):
self.assertTrue(counter[idx] <= 10**idx)
lower_limit = 10 ** (idx - 1)
if idx == 0:
lower_limit = 0
self.assertTrue(counter[idx] >= lower_limit)
| EXA-1-master | exa/models/mmf-main/tests/datasets/test_iteration_strategies.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
from mmf.common.registry import registry
from torch import nn
class TestDecoderModel(nn.Module):
def __init__(self, config, vocab):
super().__init__()
self.config = config
self.vocab = vocab
def build(self):
return
def init_hidden_state(self, features):
h = features.new_zeros(
(features.size(0), self.config.classifier.params.hidden_dim),
dtype=torch.float,
)
c = features.new_zeros(
(features.size(0), self.config.classifier.params.hidden_dim),
dtype=torch.float,
)
return h, c
def get_data_t(self, data, batch_size_t):
data["texts"] = data["texts"][:batch_size_t]
if "state" in data:
h1 = data["state"]["td_hidden"][0][:batch_size_t]
c1 = data["state"]["td_hidden"][1][:batch_size_t]
h2 = data["state"]["lm_hidden"][0][:batch_size_t]
c2 = data["state"]["lm_hidden"][1][:batch_size_t]
else:
h1, c1 = self.init_hidden_state(data["texts"])
h2, c2 = self.init_hidden_state(data["texts"])
data["state"] = {"td_hidden": (h1, c1), "lm_hidden": (h2, c2)}
registry.register(f"{h1.device}_lstm_state", data["state"])
return data, batch_size_t
def forward(self, sample_list):
scores = torch.rand(sample_list.get_batch_size(), 3127)
decoder = registry.get_decoder_class(self.config.inference.type)(
self.vocab, self.config
)
sample_list.add_field("targets", sample_list.answers[:, 0, 1:])
sample_list = decoder.init_batch(sample_list)
batch_size = sample_list.image_feature_0.size(0)
data = {}
data["texts"] = sample_list.answers.new_full(
(batch_size, 1), self.vocab.SOS_INDEX, dtype=torch.long
)
timesteps = 10
output = None
batch_size_t = batch_size
for t in range(timesteps):
data, batch_size_t = self.get_data_t(data, batch_size_t)
output = torch.randn(batch_size_t, self.vocab.get_size())
if t == timesteps - 1:
# manually add EOS to the first example.
output = torch.ones(batch_size_t, self.vocab.get_size()) * -30.0
output[0, self.vocab.EOS_INDEX] = 10
finish, data, batch_size_t = decoder.decode(t, data, output)
if finish:
break
model_output = {"scores": scores}
model_output["captions"] = decoder.get_result()
return model_output
| EXA-1-master | exa/models/mmf-main/tests/utils/test_model.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import contextlib
import os
import tempfile
import unittest
from io import StringIO
from unittest import mock
import mmf.utils.download as download
import tests.test_utils as test_utils
TEST_DOWNLOAD_URL = (
"https://dl.fbaipublicfiles.com/mmf/data/tests/visual_entailment_small.zip"
)
TEST_DOWNLOAD_SHASUM = (
"e5831397710b71f58a02c243bb6e731989c8f37ef603aaf3ce18957ecd075bf5"
)
class TestUtilsDownload(unittest.TestCase):
@test_utils.skip_if_no_network
@test_utils.skip_if_macos
def test_download_file_class(self):
# Test normal scenario
resource = download.DownloadableFile(
TEST_DOWNLOAD_URL,
"visual_entailment_small.zip",
hashcode=TEST_DOWNLOAD_SHASUM,
compressed=True,
)
with tempfile.TemporaryDirectory() as d:
with contextlib.redirect_stdout(StringIO()):
resource.download_file(d)
self.assertTrue(os.path.exists(os.path.join(d, "visual_entailment_small")))
self.assertTrue(
os.path.exists(
os.path.join(d, "visual_entailment_small", "db", "train.jsonl")
)
)
self.assertTrue(
os.path.exists(
os.path.join(
d,
"visual_entailment_small",
"features",
"features.lmdb",
"data.mdb",
)
)
)
self.assertTrue(
os.path.exists(os.path.join(d, "visual_entailment_small.zip"))
)
# Test when checksum fails
resource = download.DownloadableFile(
TEST_DOWNLOAD_URL,
"visual_entailment_small.zip",
hashcode="some_random_string",
compressed=True,
)
with tempfile.TemporaryDirectory() as d:
with contextlib.redirect_stdout(StringIO()):
self.assertRaises(AssertionError, resource.download_file, d)
# Test when not compressed
resource = download.DownloadableFile(
TEST_DOWNLOAD_URL,
"visual_entailment_small.zip",
hashcode=TEST_DOWNLOAD_SHASUM,
compressed=False,
)
with tempfile.TemporaryDirectory() as d:
with contextlib.redirect_stdout(StringIO()):
resource.download_file(d)
self.assertTrue(
os.path.exists(os.path.join(d, "visual_entailment_small.zip"))
)
# Check already downloaded scenarios
with mock.patch.object(resource, "checksum") as mocked:
with contextlib.redirect_stdout(StringIO()):
resource.download_file(d)
mocked.assert_called_once_with(d)
with mock.patch("mmf.utils.download.download") as mocked:
with contextlib.redirect_stdout(StringIO()):
resource.download_file(d)
mocked.assert_called_once_with(
resource._url, d, resource._file_name, redownload=False
)
with mock.patch.object(resource, "checksum") as mocked:
resource._hashcode = "some_random_string"
with contextlib.redirect_stdout(StringIO()):
resource.download_file(d)
self.assertTrue(mocked.call_count, 2)
with mock.patch("mmf.utils.download.download") as mocked:
resource._hashcode = "some_random_string"
with contextlib.redirect_stdout(StringIO()):
self.assertRaises(AssertionError, resource.download_file, d)
mocked.assert_called_once_with(
resource._url, d, resource._file_name, redownload=True
)
# Test delete original
resource = download.DownloadableFile(
TEST_DOWNLOAD_URL,
"visual_entailment_small.zip",
hashcode=TEST_DOWNLOAD_SHASUM,
compressed=True,
delete_original=True,
)
with tempfile.TemporaryDirectory() as d:
with contextlib.redirect_stdout(StringIO()):
resource.download_file(d)
self.assertFalse(
os.path.exists(os.path.join(d, "visual_entailment_small.zip"))
)
def test_mark_done(self):
with tempfile.TemporaryDirectory() as d:
path = os.path.join(d, ".built.json")
self.assertFalse(os.path.exists(path))
download.mark_done(d, "0.1")
self.assertTrue(os.path.exists(path))
with open(path) as f:
import json
data = json.load(f)
self.assertEqual(list(data.keys()), ["created_at", "version"])
def test_built(self):
with tempfile.TemporaryDirectory() as d:
# First, test without built file
self.assertFalse(download.built(d, "0.2"))
download.mark_done(d, "0.1")
# Test correct version
self.assertTrue(download.built(d, "0.1"))
# Test wrong version
self.assertFalse(download.built(d, "0.2"))
| EXA-1-master | exa/models/mmf-main/tests/utils/test_download.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import gc
import os
import unittest
from mmf.utils.configuration import Configuration, get_zoo_config
from mmf.utils.env import setup_imports, teardown_imports
from mmf.utils.general import get_mmf_root
from tests.test_utils import dummy_args
class TestUtilsConfiguration(unittest.TestCase):
def setUp(self):
setup_imports()
def tearDown(self):
teardown_imports()
gc.collect()
def test_get_zoo_config(self):
# Test direct key
version, resources = get_zoo_config("textvqa.ocr_en")
self.assertIsNotNone(version)
self.assertIsNotNone(resources)
# Test default variation
version, resources = get_zoo_config("textvqa")
self.assertIsNotNone(version)
self.assertIsNotNone(resources)
# Test non-default variation
version, resources = get_zoo_config("textvqa", variation="ocr_en")
self.assertIsNotNone(version)
self.assertIsNotNone(resources)
# Test random key
version, resources = get_zoo_config("some_random")
self.assertIsNone(version)
self.assertIsNone(resources)
# Test non-existent variation
self.assertRaises(
AssertionError, get_zoo_config, "textvqa", variation="some_random"
)
# Test different zoo_type
version, resources = get_zoo_config("visual_bert.pretrained", zoo_type="models")
self.assertIsNotNone(version)
self.assertIsNotNone(resources)
# Test direct config
version, resources = get_zoo_config(
"visual_bert.pretrained",
zoo_config_path=os.path.join("configs", "zoo", "models.yaml"),
)
self.assertIsNotNone(version)
self.assertIsNotNone(resources)
def test_config_overrides(self):
config_path = os.path.join(
get_mmf_root(),
"..",
"projects",
"m4c",
"configs",
"textvqa",
"defaults.yaml",
)
config_path = os.path.abspath(config_path)
args = dummy_args(model="m4c", dataset="textvqa")
args.opts += [
f"config={config_path}",
"training.lr_steps[1]=10000",
'dataset_config.textvqa.zoo_requirements[0]="test"',
]
configuration = Configuration(args)
configuration.freeze()
config = configuration.get_config()
self.assertEqual(config.training.lr_steps[1], 10000)
self.assertEqual(config.dataset_config.textvqa.zoo_requirements[0], "test")
| EXA-1-master | exa/models/mmf-main/tests/utils/test_configuration.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import unittest
from mmf.utils.general import get_mmf_root
def has_python_file(files):
for f in files:
if f.endswith(".py"):
return True
return False
def walk_and_assert_init(folder):
for root, subfolders, files in os.walk(folder):
if has_python_file(files):
assert "__init__.py" in files, f"Folder {root} is missing __init__.py file"
def walk_and_assert_not_empty(folder):
for root, subfolders, files in os.walk(folder):
assert len(files) > 0 or len(subfolders) > 0, f"Folder {root} is empty"
class TestQualityChecks(unittest.TestCase):
def _test_quality_check(self, fn):
fn(get_mmf_root())
fn(os.path.join(get_mmf_root(), "..", "mmf_cli"))
fn(os.path.join(get_mmf_root(), "..", "tests"))
def test_init_files_present(self):
self._test_quality_check(walk_and_assert_init)
def test_no_empty_folders(self):
self._test_quality_check(walk_and_assert_not_empty)
| EXA-1-master | exa/models/mmf-main/tests/utils/test_quality_checks.py |
# Copyright (c) Facebook, Inc. and its affiliates.
| EXA-1-master | exa/models/mmf-main/tests/utils/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import contextlib
import io
import os
import sys
import unittest
from mmf.common.registry import registry
from mmf.utils.configuration import get_mmf_env
from mmf.utils.env import import_user_module, setup_imports
from mmf.utils.general import get_mmf_root
from mmf_cli.run import run
from tests.test_utils import make_temp_dir, search_log
class TestUtilsEnvE2E(unittest.TestCase):
def _delete_dirty_modules(self):
for key in list(sys.modules.keys()):
if key not in self._initial_modules:
del sys.modules[key]
def _sanitize_registry(self):
registry.mapping["builder_name_mapping"].pop("always_one", None)
registry.mapping["model_name_mapping"].pop("simple", None)
registry.mapping["state"] = {}
def _get_user_dir(self, abs_path=True):
if abs_path:
return os.path.join(get_mmf_root(), "..", "tests", "data", "user_dir")
else:
return os.path.join("tests", "data", "user_dir")
def setUp(self):
setup_imports()
self._initial_modules = set(sys.modules)
self._sanitize_registry()
def tearDown(self):
self._delete_dirty_modules()
self._sanitize_registry()
def _test_user_import_e2e(self, extra_opts=None):
if extra_opts is None:
extra_opts = []
MAX_UPDATES = 50
user_dir = self._get_user_dir()
with make_temp_dir() as temp_dir:
opts = [
"model=simple",
"run_type=train_val_test",
"dataset=always_one",
"config=configs/experiment.yaml",
f"env.user_dir={user_dir}",
"training.seed=1",
"training.num_workers=3",
f"training.max_updates={MAX_UPDATES}",
f"env.save_dir={temp_dir}",
]
opts = opts + extra_opts
out = io.StringIO()
with contextlib.redirect_stdout(out):
run(opts)
train_log = os.path.join(temp_dir, "train.log")
log_line = search_log(
train_log,
search_condition=[
lambda x: x["progress"] == f"{MAX_UPDATES}/{MAX_UPDATES}",
lambda x: "best_val/always_one/accuracy" in x,
],
)
self.assertEqual(float(log_line["val/always_one/accuracy"]), 1)
log_line = search_log(
train_log,
search_condition=[
lambda x: x["progress"] == f"{MAX_UPDATES}/{MAX_UPDATES}",
lambda x: "test/always_one/accuracy" in x,
],
)
self.assertEqual(float(log_line["test/always_one/accuracy"]), 1)
def test_user_import_e2e(self):
self._test_user_import_e2e()
def test_cpu_evaluation_e2e(self):
self._test_user_import_e2e(extra_opts=["evaluation.use_cpu=True"])
def test_import_user_module_from_directory_absolute(self, abs_path=True):
# Make sure the modules are not available first
self.assertIsNone(registry.get_builder_class("always_one"))
self.assertIsNone(registry.get_model_class("simple"))
self.assertFalse("mmf_user_dir" in sys.modules)
# Now, import and test
user_dir = self._get_user_dir(abs_path)
import_user_module(user_dir)
self.assertIsNotNone(registry.get_builder_class("always_one"))
self.assertIsNotNone(registry.get_model_class("simple"))
self.assertTrue("mmf_user_dir" in sys.modules)
self.assertTrue(user_dir in get_mmf_env("user_dir"))
def test_import_user_module_from_directory_relative(self):
self.test_import_user_module_from_directory_absolute(abs_path=False)
user_dir = self._get_user_dir(abs_path=False)
self.assertEqual(user_dir, get_mmf_env("user_dir"))
def test_import_user_module_from_file(self):
self.assertIsNone(registry.get_builder_class("always_one"))
self.assertIsNone(registry.get_model_class("simple"))
user_dir = self._get_user_dir()
user_file = os.path.join(user_dir, "models", "simple.py")
import_user_module(user_file)
# Only model should be found and build should be none
self.assertIsNone(registry.get_builder_class("always_one"))
self.assertIsNotNone(registry.get_model_class("simple"))
self.assertTrue("mmf_user_dir" in sys.modules)
self.assertTrue(user_dir in get_mmf_env("user_dir"))
| EXA-1-master | exa/models/mmf-main/tests/utils/test_env.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
from mmf.common.registry import registry
from mmf.utils.patch import (
ORIGINAL_PATCH_FUNCTIONS_KEY,
restore_saved_modules,
safecopy_modules,
)
class TestClass:
@staticmethod
def test_function():
return True
class TestUtilsPatch(unittest.TestCase):
def setUp(self):
registry.register(ORIGINAL_PATCH_FUNCTIONS_KEY, {})
def test_safecopy_modules(self):
safecopy_modules(["TestClass.test_function"], {"TestClass": TestClass})
original_functions = registry.get(ORIGINAL_PATCH_FUNCTIONS_KEY)
self.assertTrue("TestClass.test_function" in original_functions)
TestClass.test_function = lambda: False
restore_saved_modules({"TestClass": TestClass})
self.assertTrue(TestClass.test_function())
| EXA-1-master | exa/models/mmf-main/tests/utils/test_patch.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import unittest
import mmf.utils.text as text_utils
import numpy as np
import torch
from mmf.common.registry import registry
from mmf.common.sample import Sample, SampleList
from mmf.utils.configuration import Configuration
from mmf.utils.env import setup_imports
from mmf.utils.general import get_mmf_root
from tests.test_utils import dummy_args
from tests.utils.test_model import TestDecoderModel
class TestUtilsText(unittest.TestCase):
TOKENS = ["this", "will", "be", "a", "test", "of", "tokens"]
TOKENIZE_EXAMPLE = "This will be a test of tokens?"
VOCAB_EXAMPLE_SENTENCES = [
"Are there more big green things than large purple shiny cubes?"
"How many other things are there of the same shape as the tiny "
+ "cyan matte object?",
"Is the color of the large sphere the same as the large matte cube?"
"What material is the big object that is right of the brown cylinder and "
"left of the large brown sphere?",
"How big is the brown shiny sphere? ;",
]
def setUp(self):
setup_imports()
torch.manual_seed(1234)
config_path = os.path.join(
get_mmf_root(),
"..",
"projects",
"butd",
"configs",
"coco",
"nucleus_sampling.yaml",
)
config_path = os.path.abspath(config_path)
args = dummy_args(model="butd", dataset="coco")
args.opts.append(f"config={config_path}")
configuration = Configuration(args)
configuration.config.datasets = "coco"
configuration.config.model_config.butd.inference.params.sum_threshold = 0.5
configuration.freeze()
self.config = configuration.config
registry.register("config", self.config)
def test_tokenize(self):
tokens = text_utils.tokenize(self.TOKENIZE_EXAMPLE)
self.assertEqual(list(tokens), self.TOKENS)
def test_generate_ngrams(self):
ngrams = text_utils.generate_ngrams(self.TOKENS, 2)
self.assertEqual(
list(ngrams),
["this will", "will be", "be a", "a test", "test of", "of tokens"],
)
ngrams = text_utils.generate_ngrams(self.TOKENS, 3)
self.assertEqual(
list(ngrams),
["this will be", "will be a", "be a test", "a test of", "test of tokens"],
)
def test_generate_ngrams_range(self):
# Test generation of 1grams to 3gram
ngrams = text_utils.generate_ngrams_range(self.TOKENS, (1, 4))
expected_ngrams = self.TOKENS + [
"this will",
"will be",
"be a",
"a test",
"test of",
"of tokens",
"this will be",
"will be a",
"be a test",
"a test of",
"test of tokens",
]
self.assertEqual(list(ngrams), expected_ngrams)
def test_vocab_from_text(self):
vocab = text_utils.VocabFromText(self.VOCAB_EXAMPLE_SENTENCES)
self.assertEqual(vocab.get_size(), 41)
self.assertEqual(len(vocab), 41)
self.assertEqual(vocab.get_unk_index(), 1)
self.assertEqual(vocab.itos[0], vocab.DEFAULT_TOKENS[0])
self.assertEqual(vocab.itos[34], "that")
self.assertEqual(vocab.itos[31], "cube")
self.assertEqual(vocab.itos[25], "cyan")
self.assertEqual(vocab.itos[20], "the")
self.assertEqual(vocab.itos[10], "than")
self.assertEqual(vocab.stoi["sphere"], 30)
self.assertEqual(vocab.stoi["shape"], 22)
vocab = text_utils.VocabFromText(self.VOCAB_EXAMPLE_SENTENCES, min_count=10)
self.assertEqual(vocab.get_size(), 5)
self.assertEqual(vocab.itos[vocab.get_size() - 1], "the")
vocab = text_utils.VocabFromText(self.VOCAB_EXAMPLE_SENTENCES, min_count=11)
self.assertEqual(vocab.get_size(), 4)
vocab = text_utils.VocabFromText(
self.VOCAB_EXAMPLE_SENTENCES, min_count=11, only_unk_extra=True
)
self.assertEqual(vocab.get_size(), 1)
self.assertEqual(vocab.itos[vocab.get_size() - 1], "<unk>")
vocab = text_utils.VocabFromText(
self.VOCAB_EXAMPLE_SENTENCES, min_count=1, remove=[";"]
)
self.assertEqual(vocab.get_size(), 40)
vocab = text_utils.VocabFromText(
self.VOCAB_EXAMPLE_SENTENCES, min_count=1, remove=[";", ",", "?"]
)
self.assertEqual(vocab.get_size(), 38)
vocab = text_utils.VocabFromText(
self.VOCAB_EXAMPLE_SENTENCES, min_count=1, keep=["?"], remove=";"
)
self.assertEqual(vocab.get_size(), 40)
def test_nucleus_sampling(self):
vocab = text_utils.VocabFromText(self.VOCAB_EXAMPLE_SENTENCES)
model_config = self.config.model_config.butd
model = TestDecoderModel(model_config, vocab)
model.build()
model.eval()
sample = Sample()
sample.dataset_name = "coco"
sample.dataset_type = "test"
sample.image_feature_0 = torch.randn(100, 2048)
sample.answers = torch.zeros((5, 10), dtype=torch.long)
sample_list = SampleList([sample])
tokens = model(sample_list)["captions"]
# these are expected tokens for sum_threshold = 0.5
expected_tokens = [1.0, 23.0, 38.0, 30.0, 5.0, 11.0, 2.0]
self.assertEqual(tokens[0].tolist(), expected_tokens)
class TestUtilsTextBeamSearch(unittest.TestCase):
TOKENS = ["this", "will", "be", "a", "test", "of", "tokens"]
TOKENIZE_EXAMPLE = "This will be a test of tokens?"
VOCAB_EXAMPLE_SENTENCES = [
"Are there more big green things than large purple shiny cubes?"
"How many other things are there of the same shape as the tiny "
+ "cyan matte object?",
"Is the color of the large sphere the same as the large matte cube?"
"What material is the big object that is right of the brown cylinder and "
"left of the large brown sphere?",
"How big is the brown shiny sphere? ;",
]
def setUp(self):
setup_imports()
torch.manual_seed(1234)
config_path = os.path.join(
get_mmf_root(),
"..",
"projects",
"butd",
"configs",
"coco",
"beam_search.yaml",
)
config_path = os.path.abspath(config_path)
args = dummy_args(model="butd", dataset="coco")
args.opts.append(f"config={config_path}")
configuration = Configuration(args)
configuration.config.datasets = "coco"
configuration.freeze()
self.config = configuration.config
registry.register("config", self.config)
def test_beam_search(self):
vocab = text_utils.VocabFromText(self.VOCAB_EXAMPLE_SENTENCES)
model_config = self.config.model_config.butd
model = TestDecoderModel(model_config, vocab)
model.build()
model.eval()
expected_tokens = {
1: [1.0, 23.0, 1.0, 24.0, 29.0, 37.0, 40.0, 17.0, 29.0, 2.0],
2: [1.0, 0.0, 8.0, 1.0, 28.0, 25.0, 2.0],
8: [1.0, 34.0, 1.0, 13.0, 1.0, 2.0],
16: [1.0, 25.0, 18.0, 2.0],
}
for batch_size in [1, 2, 8, 16]:
samples = []
for _ in range(batch_size):
sample = Sample()
sample.dataset_name = "coco"
sample.dataset_type = "test"
sample.image_feature_0 = torch.randn(100, 2048)
sample.answers = torch.zeros((5, 10), dtype=torch.long)
samples.append(sample)
sample_list = SampleList(samples)
tokens = model(sample_list)["captions"]
self.assertEqual(
np.trim_zeros(tokens[0].tolist()), expected_tokens[batch_size]
)
| EXA-1-master | exa/models/mmf-main/tests/utils/test_text.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import numpy as np
import torch
from mmf.utils.features.visualizing_image import SingleImageViz
class TestVisualize(unittest.TestCase):
objids = np.array(["obj0", "obj1", "obj2", "obj3"])
attrids = np.array(["attr0", "attr1", "attr2", "attr3"])
img = np.array(
[
[[0, 0, 0], [1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4], [5, 5, 5]],
[[6, 6, 6], [7, 7, 7], [8, 8, 8], [9, 9, 9], [10, 10, 10], [11, 11, 11]],
[[0, 0, 0], [1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4], [5, 5, 5]],
[[6, 6, 6], [7, 7, 7], [8, 8, 8], [9, 9, 9], [10, 10, 10], [11, 11, 11]],
[[0, 0, 0], [1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4], [5, 5, 5]],
[[6, 6, 6], [7, 7, 7], [8, 8, 8], [9, 9, 9], [10, 10, 10], [11, 11, 11]],
],
dtype=np.uint8,
)
output_dict = {
"obj_ids": torch.tensor([0, 1]),
"obj_probs": torch.tensor([0.5, 0.25]),
"attr_ids": torch.tensor([2, 3]),
"attr_probs": torch.tensor([0.3, 0.6]),
"boxes": torch.tensor([[0, 0, 1, 1], [1, 1, 2, 2]]),
}
buffer = np.array(
[
[
[0, 0, 0],
[0, 0, 0],
[62, 48, 70],
[112, 87, 124],
[53, 41, 58],
[38, 30, 42],
[28, 22, 31],
],
[
[6, 6, 6],
[3, 3, 3],
[3, 3, 3],
[4, 4, 4],
[4, 4, 4],
[4, 4, 4],
[5, 5, 5],
],
[
[0, 0, 0],
[1, 1, 1],
[2, 2, 2],
[3, 3, 3],
[3, 3, 3],
[4, 4, 4],
[5, 5, 5],
],
[
[6, 6, 6],
[7, 7, 7],
[8, 8, 8],
[9, 9, 9],
[9, 9, 9],
[10, 10, 10],
[11, 11, 11],
],
[
[6, 6, 6],
[7, 7, 7],
[8, 8, 8],
[9, 9, 9],
[9, 9, 9],
[10, 10, 10],
[11, 11, 11],
],
[
[0, 0, 0],
[1, 1, 1],
[2, 2, 2],
[3, 3, 3],
[3, 3, 3],
[4, 4, 4],
[5, 5, 5],
],
[
[6, 6, 6],
[7, 7, 7],
[8, 8, 8],
[9, 9, 9],
[9, 9, 9],
[10, 10, 10],
[11, 11, 11],
],
]
)
def test_single_image_viz(self) -> None:
frcnn_visualizer = SingleImageViz(
self.img, id2obj=self.objids, id2attr=self.attrids
)
frcnn_visualizer.draw_boxes(
self.output_dict.get("boxes"),
self.output_dict.pop("obj_ids"),
self.output_dict.pop("obj_probs"),
self.output_dict.pop("attr_ids"),
self.output_dict.pop("attr_probs"),
)
buffer = frcnn_visualizer._get_buffer()
self.assertTrue((buffer == self.buffer).all())
| EXA-1-master | exa/models/mmf-main/tests/utils/test_visualize.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import time
import unittest
from mmf.utils.timer import Timer
class TestUtilsTimer(unittest.TestCase):
def test_get_current(self):
timer = Timer()
expected = 0
self.assertEqual(int(timer.get_current().split("ms")[0]), expected)
def test_reset(self):
timer = Timer()
time.sleep(2)
timer.reset()
expected = 0
self.assertEqual(int(timer.get_current().split("ms")[0]), expected)
def test_get_time_since_start(self):
timer = Timer()
time.sleep(2)
expected = 2
self.assertEqual(expected, int(timer.get_time_since_start().split("s")[0]))
| EXA-1-master | exa/models/mmf-main/tests/utils/test_timer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import mmf.utils.distributed as distributed
class TestUtilsDistributed(unittest.TestCase):
def test_object_byte_tensor_conversion(self):
test_obj = [1, "2", {3: 4}, [5]]
test_obj_bytes = distributed.object_to_byte_tensor(test_obj)
test_obj_dec = distributed.byte_tensor_to_object(test_obj_bytes)
self.assertEqual(test_obj_dec, test_obj)
| EXA-1-master | exa/models/mmf-main/tests/utils/test_distributed.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
from mmf.utils.general import dict_to_string, get_overlap_score
class TestUtilsGeneral(unittest.TestCase):
def test_dict_to_string(self):
dictionary = {"one": 1, "two": 2, "three": 3}
expected = "one: 1.0000, two: 2.0000, three: 3.0000"
self.assertEqual(dict_to_string(dictionary), expected)
# TODO: Move later to configuration tests
# def test_nested_dict_update(self):
# # Updates value
# dictionary = {"level1": {"level2": {"levelA": 0, "levelB": 1}}}
# update = {"level1": {"level2": {"levelB": 10}}}
# expected = {"level1": {"level2": {"levelA": 0, "levelB": 10}}}
#
# self.assertEqual(nested_dict_update(dictionary, update), expected)
#
# # Adds new value
# dictionary = {"level1": {"level2": {"levelA": 0}}}
# update = {"level1": {"level2": {"levelB": 10}}}
# expected = {"level1": {"level2": {"levelA": 0, "levelB": 10}}}
#
# self.assertEqual(nested_dict_update(dictionary, update), expected)
def test_get_overlap_score(self):
# Full overlap
candidate = "pythia"
target = "pythia"
self.assertEqual(get_overlap_score(candidate, target), 1.0)
# Partial overlap
candidate = "pythia"
target = "python"
self.assertEqual(get_overlap_score(candidate, target), 2 / 3)
# No overlap
candidate = "pythia"
target = "vqa"
self.assertEqual(get_overlap_score(candidate, target), 0.0)
| EXA-1-master | exa/models/mmf-main/tests/utils/test_general.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import shutil
import tempfile
import unittest
import uuid
from typing import Optional
from mmf.utils.file_io import PathManager
class TestFileIO(unittest.TestCase):
_tmpdir: Optional[str] = None
_tmpfile: Optional[str] = None
_tmpfile_contents = "Hello, World"
@classmethod
def setUpClass(cls) -> None:
cls._tmpdir = tempfile.mkdtemp()
with open(os.path.join(cls._tmpdir, "test.txt"), "w") as f:
cls._tmpfile = f.name
f.write(cls._tmpfile_contents)
f.flush()
@classmethod
def tearDownClass(cls) -> None:
# Cleanup temp working dir.
if cls._tmpdir is not None:
shutil.rmtree(cls._tmpdir)
def test_file_io_open(self):
with PathManager.open(self._tmpfile, mode="r") as f:
s = f.read()
self.assertEqual(s, self._tmpfile_contents)
def test_file_io_copy(self):
PathManager.copy(self._tmpfile, os.path.join(self._tmpdir, "test_copy.txt"))
with open(os.path.join(self._tmpdir, "test_copy.txt")) as f:
s = f.read()
self.assertEqual(s, self._tmpfile_contents)
def test_file_io_exists(self):
self.assertEqual(
PathManager.exists(self._tmpfile), os.path.exists(self._tmpfile)
)
fake_path = os.path.join(self._tmpdir, uuid.uuid4().hex)
self.assertEqual(PathManager.exists(fake_path), os.path.exists(fake_path))
def test_file_io_mkdirs(self):
dir_path = os.path.join(self._tmpdir, "test_dir")
PathManager.mkdirs(dir_path)
self.assertTrue(os.path.isdir(dir_path))
| EXA-1-master | exa/models/mmf-main/tests/utils/test_file_io.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import glob
import os
import shutil
import tempfile
import unittest
from typing import Optional
from mmf.common.registry import registry
from mmf.utils.configuration import Configuration
from mmf.utils.file_io import PathManager
from mmf.utils.logger import setup_logger
class TestLogger(unittest.TestCase):
_tmpdir: Optional[str] = None
_tmpfile_write_contents: str = "print writer contents"
@classmethod
def setUpClass(cls) -> None:
cls._tmpdir = tempfile.mkdtemp()
args = argparse.Namespace()
args.opts = [f"env.save_dir={cls._tmpdir}", f"model=cnn_lstm", f"dataset=clevr"]
args.opts.append(f"config={os.path.join('configs', 'defaults.yaml')}")
args.config_override = None
configuration = Configuration(args)
configuration.freeze()
cls.config = configuration.get_config()
registry.register("config", cls.config)
cls.writer = setup_logger()
@classmethod
def tearDownClass(cls) -> None:
# Cleanup temp working dir.
for handler in cls.writer.handlers:
handler.close()
if cls._tmpdir is not None:
# set ignore_errors as Windows throw error due to Permission
shutil.rmtree(cls._tmpdir, ignore_errors=True)
def test_logger_files(self) -> None:
self.assertTrue(
PathManager.exists(
glob.glob(os.path.join(self._tmpdir, "logs", "train*"))[0]
)
)
self.assertTrue(PathManager.exists(os.path.join(self._tmpdir, "train.log")))
self.assertTrue(PathManager.exists(os.path.join(self._tmpdir, "logs")))
def test_log_writer(self) -> None:
self.writer.info(self._tmpfile_write_contents)
f = PathManager.open(glob.glob(os.path.join(self._tmpdir, "logs", "train*"))[0])
self.assertTrue(
any(self._tmpfile_write_contents in line for line in f.readlines())
)
f = PathManager.open(os.path.join(self._tmpdir, "train.log"))
self.assertTrue(
any(self._tmpfile_write_contents in line for line in f.readlines())
)
| EXA-1-master | exa/models/mmf-main/tests/utils/test_logger.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import contextlib
import os
import tempfile
import unittest
from copy import deepcopy
from io import StringIO
from unittest.mock import Mock, patch
import torch
from mmf.common.registry import registry
from mmf.models.base_model import BaseModel
from mmf.trainers.callbacks.checkpoint import CheckpointCallback
from mmf.trainers.callbacks.early_stopping import EarlyStoppingCallback
from mmf.trainers.callbacks.lr_scheduler import LRSchedulerCallback
from mmf.utils.checkpoint import Checkpoint
from mmf.utils.configuration import load_yaml
from mmf.utils.file_io import PathManager
from omegaconf import OmegaConf
from tests.test_utils import compare_state_dicts, skip_if_no_cuda
@contextlib.contextmanager
def mock_env_with_temp():
d = tempfile.TemporaryDirectory()
patched = patch("mmf.utils.checkpoint.get_mmf_env", return_value=d.name)
patched.start()
yield d.name
d.cleanup()
patched.stop()
class SimpleModule(BaseModel):
def __init__(self, config={}):
super().__init__(config)
self.base = torch.nn.Sequential(
torch.nn.Linear(5, 4), torch.nn.Tanh(), torch.nn.Linear(4, 5)
)
self.classifier = torch.nn.Sequential(
torch.nn.Linear(5, 4), torch.nn.Tanh(), torch.nn.Linear(4, 5)
)
self.loss = torch.nn.CrossEntropyLoss()
def forward(self, x, target):
x = self.classifier(self.base(x))
return {"losses": {"total_loss": self.loss(x, target)}}
class OnlyBase(torch.nn.Module):
def __init__(self):
super().__init__()
self.base_test = torch.nn.Sequential(
torch.nn.Linear(5, 4), torch.nn.Tanh(), torch.nn.Linear(4, 5)
)
def format_state_key(self, key):
return key
class TestUtilsCheckpoint(unittest.TestCase):
def setUp(self):
import argparse
torch.manual_seed(1234)
# An easy way to get a AttributeDict object
self.trainer = argparse.Namespace()
self.config = load_yaml(os.path.join("configs", "defaults.yaml"))
self.config = OmegaConf.merge(
self.config,
{
"model": "simple",
"model_config": {},
"checkpoint": {
"save_git_details": False,
"reset": {
"optimizer": False,
"counts": False,
"all": False,
"fp16_scaler": False,
},
"pretrained_state_mapping": {"base_test": "base"},
"max_to_keep": 5,
},
"config_override": None,
"training": {
"checkpoint_interval": 1,
"early_stop": {"criteria": "val/total_loss", "minimize": True},
"lr_scheduler": True,
},
"scheduler": {
"type": "multi_step",
"params": {
"use_warmup": False,
"lr_steps": [10, 20],
"lr_ratio": 0.1,
"warmup_factor": 1.0,
},
},
},
)
# Keep original copy for testing purposes
self.trainer.config = deepcopy(self.config)
self.trainer.model = SimpleModule()
self.trainer.scaler = torch.cuda.amp.GradScaler()
self.trainer.optimizer = torch.optim.Adam(
self.trainer.model.parameters(), lr=1e-01
)
self.trainer.lr_scheduler_callback = LRSchedulerCallback(
self.config, self.trainer
)
def test_save_config(self):
with mock_env_with_temp() as d:
Checkpoint(self.trainer)
config = load_yaml(os.path.join(d, "config.yaml"))
self.assertTrue(config == self.config)
self.assertTrue(config == self.trainer.config)
def test_save_and_load_state_dict(self):
with mock_env_with_temp() as d:
checkpoint = Checkpoint(self.trainer)
self._init_early_stopping(checkpoint)
self._do_a_pass()
# Test normal case
checkpoint.save(1500)
self.assertTrue(
PathManager.exists(os.path.join(d, "models", "model_1500.ckpt"))
)
self.assertTrue(PathManager.exists(os.path.join(d, "current.ckpt")))
self.assertFalse(PathManager.exists(os.path.join(d, "best.ckpt")))
os.remove(os.path.join(d, "models", "model_1500.ckpt"))
os.remove(os.path.join(d, "current.ckpt"))
best_model = deepcopy(self.trainer.model)
best_optimizer = deepcopy(self.trainer.optimizer)
# Test with update_best
checkpoint.save(2000, update_best=True)
self.assertTrue(
PathManager.exists(os.path.join(d, "models", "model_2000.ckpt"))
)
self.assertTrue(PathManager.exists(os.path.join(d, "best.ckpt")))
self.assertTrue(PathManager.exists(os.path.join(d, "current.ckpt")))
self._do_a_pass()
checkpoint.save(2500)
# Test resume
self.trainer.config.checkpoint.resume = True
current_model = deepcopy(self.trainer.model)
current_optimizer = deepcopy(self.trainer.optimizer)
checkpoint.load_state_dict()
self.assertFalse(
compare_state_dicts(
self.trainer.model.state_dict(), best_model.state_dict()
)
)
self.assertTrue(
compare_state_dicts(
self.trainer.model.state_dict(), current_model.state_dict()
)
)
self.assertFalse(
self._compare_optimizers(self.trainer.optimizer, best_optimizer)
)
self.assertTrue(
self._compare_optimizers(self.trainer.optimizer, current_optimizer)
)
base_0_weight_current = self.trainer.model.base[0].weight.data.clone()
# Test resume_best
self.trainer.config.checkpoint.resume = True
self.trainer.config.checkpoint.resume_best = True
checkpoint.load_state_dict()
self.assertTrue(
compare_state_dicts(
self.trainer.model.state_dict(), best_model.state_dict()
)
)
self.assertTrue(
self._compare_optimizers(self.trainer.optimizer, best_optimizer)
)
self.assertFalse(
self._compare_optimizers(self.trainer.optimizer, current_optimizer)
)
base_0_weight_best = self.trainer.model.base[0].weight.data.clone()
self.trainer.config.checkpoint.resume_best = False
# Test distributed settings
self.trainer.model = torch.nn.DataParallel(self.trainer.model)
checkpoint.load_state_dict()
weight_to_be_tested = self.trainer.model.module.base[0].weight
weight_device = weight_to_be_tested.device
self.assertTrue(
torch.equal(
weight_to_be_tested, base_0_weight_current.to(weight_device)
)
)
self.assertFalse(
torch.equal(weight_to_be_tested, base_0_weight_best.to(weight_device))
)
def test_finalize_and_restore_from_it(self):
with mock_env_with_temp():
checkpoint = Checkpoint(self.trainer)
self._init_early_stopping(checkpoint)
original_model = deepcopy(self.trainer.model)
self._do_a_pass()
model_1500 = deepcopy(self.trainer.model)
checkpoint.save(1500)
swap = self.trainer.model
self.trainer.model = original_model
checkpoint.restore()
# First test without best.ckpt
self.assertTrue(
compare_state_dicts(
self.trainer.model.state_dict(), original_model.state_dict()
)
)
self.assertFalse(
compare_state_dicts(
self.trainer.model.state_dict(), model_1500.state_dict()
)
)
self.trainer.model = swap
self._do_a_pass()
model_2000 = deepcopy(self.trainer.model)
checkpoint.save(2000, update_best=True)
self._do_a_pass()
model_2500 = deepcopy(self.trainer.model)
checkpoint.save(2500)
checkpoint.restore()
self.assertFalse(
compare_state_dicts(
self.trainer.model.state_dict(), original_model.state_dict()
)
)
self.assertFalse(
compare_state_dicts(
self.trainer.model.state_dict(), model_1500.state_dict()
)
)
self.assertTrue(
compare_state_dicts(
self.trainer.model.state_dict(), model_2000.state_dict()
)
)
self.assertFalse(
compare_state_dicts(
self.trainer.model.state_dict(), model_2500.state_dict()
)
)
def test_finalize_and_resume_file(self):
with mock_env_with_temp() as d:
checkpoint = Checkpoint(self.trainer)
self._init_early_stopping(checkpoint)
self._do_a_pass()
checkpoint.finalize()
original = deepcopy(self.trainer.model)
pth_path = os.path.join(d, "simple_final.pth")
self.assertTrue(PathManager.exists(pth_path))
self._do_a_pass()
after_a_pass = deepcopy(self.trainer.model)
original_optimizer = deepcopy(self.trainer.optimizer)
self.trainer.config.checkpoint.resume_file = pth_path
with contextlib.redirect_stdout(StringIO()):
checkpoint.load_state_dict()
self.assertTrue(
compare_state_dicts(
self.trainer.model.state_dict(), original.state_dict()
)
)
self.assertFalse(
compare_state_dicts(
self.trainer.model.state_dict(), after_a_pass.state_dict()
)
)
self.assertTrue(
self._compare_optimizers(self.trainer.optimizer, original_optimizer)
)
def test_resets(self):
with mock_env_with_temp():
checkpoint = Checkpoint(self.trainer)
self._init_early_stopping(checkpoint)
self._do_a_pass()
original_optimizer = deepcopy(self.trainer.optimizer)
original_model = deepcopy(self.trainer.model)
original_scaler = deepcopy(self.trainer.scaler)
self.trainer.current_epoch = 3
checkpoint.save(2000, update_best=True)
self.trainer.current_epoch = 4
# Test reset all
self.trainer.config.checkpoint.resume = True
self.trainer.config.checkpoint.reset.all = True
checkpoint.load_state_dict()
self.assertTrue(
compare_state_dicts(
self.trainer.model.state_dict(), original_model.state_dict()
)
)
self.assertTrue(
self._compare_optimizers(self.trainer.optimizer, original_optimizer)
)
self.assertTrue(
compare_state_dicts(
self.trainer.scaler.state_dict(), original_scaler.state_dict()
)
)
self.assertEqual(self.trainer.num_updates, 0)
self.assertEqual(self.trainer.current_iteration, 0)
self.assertEqual(self.trainer.current_epoch, 4)
# Test reset_optimizer
self._init_early_stopping(checkpoint)
self.trainer.config.checkpoint.reset.all = False
self.trainer.config.checkpoint.reset.optimizer = True
checkpoint.load_state_dict()
self.assertTrue(
compare_state_dicts(
self.trainer.model.state_dict(), original_model.state_dict()
)
)
self.assertTrue(
self._compare_optimizers(self.trainer.optimizer, original_optimizer)
)
self.assertEqual(self.trainer.num_updates, 2000)
self.assertEqual(self.trainer.current_iteration, 2000)
self.assertEqual(self.trainer.current_epoch, 3)
self._init_early_stopping(checkpoint)
# Test reset_counts
self.trainer.config.checkpoint.reset.all = False
self.trainer.config.checkpoint.reset.optimizer = False
self.trainer.config.checkpoint.reset.counts = True
checkpoint.load_state_dict()
self.assertTrue(
compare_state_dicts(
self.trainer.model.state_dict(), original_model.state_dict()
)
)
self.assertTrue(
self._compare_optimizers(self.trainer.optimizer, original_optimizer)
)
self.assertEqual(self.trainer.num_updates, 0)
self.assertEqual(self.trainer.current_iteration, 0)
self.assertEqual(self.trainer.current_epoch, 2)
# Test with resume_best
self._do_a_pass()
checkpoint.save(3000)
self._init_early_stopping(checkpoint)
self.trainer.config.checkpoint.reset.all = False
self.trainer.config.checkpoint.resume_best = True
self.trainer.config.checkpoint.reset.optimizer = True
self.trainer.config.checkpoint.reset.counts = False
checkpoint.load_state_dict()
self.assertTrue(
compare_state_dicts(
self.trainer.model.state_dict(), original_model.state_dict()
)
)
self.assertFalse(
self._compare_optimizers(self.trainer.optimizer, original_optimizer)
)
self.assertEqual(self.trainer.num_updates, 1000)
self.assertEqual(self.trainer.current_iteration, 1000)
self.assertEqual(self.trainer.current_epoch, 3)
@skip_if_no_cuda
def test_checkpoint_scaler_loading(self):
with mock_env_with_temp():
original_scaler = deepcopy(self.trainer.scaler)
checkpoint = Checkpoint(self.trainer)
self._init_early_stopping(checkpoint)
self._do_a_fp16_pass()
checkpoint.save(1000)
self.trainer.config.checkpoint.resume = True
self.trainer.config.checkpoint.reset.all = False
self.trainer.config.checkpoint.reset.optimizer = True
self.trainer.config.checkpoint.reset.counts = True
self.trainer.config.checkpoint.reset.fp16_scaler = True
# Reset to make it same as the default grad scaler
self.trainer.scaler = torch.cuda.amp.GradScaler()
checkpoint.load_state_dict()
self.assertTrue(
compare_state_dicts(
self.trainer.scaler.state_dict(), original_scaler.state_dict()
)
)
self._do_a_fp16_pass()
checkpoint.save(2000)
self.trainer.config.checkpoint.reset.all = False
self.trainer.config.checkpoint.reset.optimizer = True
self.trainer.config.checkpoint.reset.counts = True
self.trainer.config.checkpoint.reset.fp16_scaler = False
# Reset again to make it same as the default grad scaler
self.trainer.scaler = torch.cuda.amp.GradScaler()
checkpoint.load_state_dict()
self.assertFalse(
compare_state_dicts(
self.trainer.scaler.state_dict(), original_scaler.state_dict()
)
)
def test_max_to_keep(self):
with mock_env_with_temp():
checkpoint = Checkpoint(self.trainer)
self._init_early_stopping(checkpoint)
ckpt_paths = []
for indx in [2000, 3000, 4000, 5000, 6000]:
self._do_a_pass()
checkpoint.save(indx, update_best=False)
ckpt_paths.append(
os.path.join(checkpoint.models_foldername, "model_%d.ckpt" % indx)
)
self.assertTrue(os.path.exists(ckpt_paths[-1]))
for indx, u in enumerate([7000, 8000, 9000, 10000, 11000]):
self._do_a_pass()
checkpoint.save(u, update_best=False)
ckpt_paths.append(
os.path.join(checkpoint.models_foldername, "model_%d.ckpt" % u)
)
self.assertTrue(os.path.exists(ckpt_paths[-1]))
self.assertFalse(os.path.exists(ckpt_paths[indx]))
def test_zoo_load(self):
with mock_env_with_temp():
checkpoint = Checkpoint(self.trainer)
self._init_early_stopping(checkpoint)
self._do_a_pass()
original_model = deepcopy(self.trainer.model)
ret_load_pretrained_zoo = {
"config": self.config.model_config,
"checkpoint": deepcopy(self.trainer.model.state_dict()),
"full_config": self.config,
}
self._do_a_pass()
with patch(
"mmf.utils.checkpoint.load_pretrained_model",
return_value=ret_load_pretrained_zoo,
):
self.trainer.config.checkpoint.resume_zoo = "random"
with contextlib.redirect_stdout(StringIO()):
checkpoint.load_state_dict()
self.assertTrue(
compare_state_dicts(
self.trainer.model.state_dict(), original_model.state_dict()
)
)
# Now, test zoo override
self.trainer.config.checkpoint.zoo_config_override = True
SimpleModule.from_pretrained = Mock(
return_value=deepcopy(original_model)
)
registry.register_model("simple")(SimpleModule)
with contextlib.redirect_stdout(StringIO()):
checkpoint.load_state_dict()
self.assertTrue(
compare_state_dicts(
self.trainer.model.state_dict(), original_model.state_dict()
)
)
def test_pretrained_load(self):
with mock_env_with_temp() as d:
checkpoint = Checkpoint(self.trainer)
self._init_early_stopping(checkpoint)
self._do_a_pass()
original_model = deepcopy(self.trainer.model)
# Test with zoo now
ret_load_pretrained_zoo = {
"config": self.config.model_config,
"checkpoint": deepcopy(self.trainer.model.state_dict()),
"full_config": self.config,
}
checkpoint.save(2000)
self.trainer.config.checkpoint.resume_file = os.path.join(d, "current.ckpt")
self.trainer.config.checkpoint.resume_pretrained = True
self.trainer.model = OnlyBase()
checkpoint.load_state_dict()
self.assertTrue(
compare_state_dicts(
self.trainer.model.base_test.state_dict(),
original_model.base.state_dict(),
)
)
with patch(
"mmf.utils.checkpoint.load_pretrained_model",
return_value=ret_load_pretrained_zoo,
):
self.trainer.config.checkpoint.resume_zoo = "random"
self.trainer.config.checkpoint.resume_file = None
self.trainer.model = OnlyBase()
checkpoint.load_state_dict()
self.assertTrue(
compare_state_dicts(
self.trainer.model.base_test.state_dict(),
original_model.base.state_dict(),
)
)
def _init_early_stopping(self, checkpoint):
self.trainer.num_updates = 0
self.trainer.current_iteration = 0
self.trainer.current_epoch = 0
self.trainer.checkpoint_callback = CheckpointCallback(self.config, self.trainer)
self.trainer.early_stop_callback = EarlyStoppingCallback(
self.config, self.trainer
)
self.trainer.early_stop_callback.early_stopping.best_monitored_iteration = 1000
self.trainer.early_stop_callback.early_stopping.best_monitored_update = 1000
self.trainer.early_stop_callback.early_stopping.best_monitored_value = 0.1
self.trainer.current_epoch = 2
def _do_a_pass(self):
self.trainer.optimizer.zero_grad()
self.trainer.model.train()
with contextlib.redirect_stdout(StringIO()):
loss = self.trainer.model(
torch.rand(5, 5, requires_grad=True),
torch.empty(5, dtype=torch.long).random_(5),
)
loss["losses"]["total_loss"].sum().backward()
self.trainer.optimizer.step()
self.trainer.lr_scheduler_callback._scheduler.step()
def _do_a_fp16_pass(self):
self.trainer.optimizer.zero_grad()
self.trainer.model.train()
self.trainer.model.cuda()
with contextlib.redirect_stdout(StringIO()):
with torch.cuda.amp.autocast():
loss = self.trainer.model(
torch.rand(5, 5, requires_grad=True).cuda(),
torch.empty(5, dtype=torch.long).random_(5).cuda(),
)
self.trainer.scaler.scale(loss["losses"]["total_loss"].sum()).backward()
self.trainer.scaler.step(self.trainer.optimizer)
self.trainer.scaler.update()
self.trainer.lr_scheduler_callback._scheduler.step()
def _compare_optimizers(self, a, b):
state_dict_a = a.state_dict()
state_dict_b = b.state_dict()
state_a = state_dict_a["state"]
state_b = state_dict_b["state"]
same = True
same = same and list(state_a.keys()) == list(state_b.keys())
same = same and state_dict_a["param_groups"] == state_dict_b["param_groups"]
for item1, item2 in zip(state_a.values(), state_b.values()):
same = same and compare_state_dicts(item1, item2)
return same
| EXA-1-master | exa/models/mmf-main/tests/utils/test_checkpoint.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import gc
import os
import unittest
import numpy as np
import torch
from mmf.common.registry import registry
from mmf.common.sample import Sample, SampleList
from mmf.models.cnn_lstm import CNNLSTM
from mmf.utils.configuration import Configuration
from mmf.utils.general import get_current_device, get_mmf_root
from tests.test_utils import dummy_args
class TestModelCNNLSTM(unittest.TestCase):
def setUp(self):
torch.manual_seed(1234)
registry.register("clevr_text_vocab_size", 80)
registry.register("clevr_num_final_outputs", 32)
config_path = os.path.join(
get_mmf_root(),
"..",
"projects",
"others",
"cnn_lstm",
"clevr",
"defaults.yaml",
)
config_path = os.path.abspath(config_path)
args = dummy_args(model="cnn_lstm", dataset="clevr")
args.opts.append(f"config={config_path}")
configuration = Configuration(args)
configuration.config.datasets = "clevr"
configuration.freeze()
self.config = configuration.config
registry.register("config", self.config)
def tearDown(self):
registry.unregister("clevr_text_vocab_size")
registry.unregister("clevr_num_final_outputs")
registry.unregister("config")
del self.config
gc.collect()
def test_forward(self):
model_config = self.config.model_config.cnn_lstm
cnn_lstm = CNNLSTM(model_config)
cnn_lstm.build()
cnn_lstm.init_losses()
self.assertTrue(isinstance(cnn_lstm, torch.nn.Module))
test_sample = Sample()
test_sample.text = torch.randint(1, 79, (10,), dtype=torch.long)
test_sample.image = torch.randn(3, 320, 480)
test_sample.targets = torch.randn(32)
test_sample_list = SampleList([test_sample])
test_sample_list.dataset_type = "train"
test_sample_list.dataset_name = "clevr"
test_sample_list = test_sample_list.to(get_current_device())
cnn_lstm = cnn_lstm.to(get_current_device())
output = cnn_lstm(test_sample_list)
scores = output["scores"]
loss = output["losses"]["train/clevr/logit_bce"]
np.testing.assert_almost_equal(loss.item(), 19.2635, decimal=4)
self.assertEqual(scores.size(), torch.Size((1, 32)))
| EXA-1-master | exa/models/mmf-main/tests/models/test_cnn_lstm.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import gc
import unittest
import tests.test_utils as test_utils
import torch
from mmf.common.sample import SampleList
from mmf.modules.hf_layers import replace_with_jit, undo_replace_with_jit
from mmf.utils.build import build_model
from mmf.utils.configuration import Configuration
from mmf.utils.env import setup_imports, teardown_imports
from mmf.utils.general import get_current_device
BERT_VOCAB_SIZE = 30255
class TestVisualBertTorchscript(unittest.TestCase):
def setUp(self):
test_utils.setup_proxy()
setup_imports()
replace_with_jit()
model_name = "visual_bert"
args = test_utils.dummy_args(model=model_name)
configuration = Configuration(args)
config = configuration.get_config()
model_config = config.model_config[model_name]
model_config["training_head_type"] = "classification"
model_config["num_labels"] = 2
model_config.model = model_name
self.finetune_model = build_model(model_config)
def tearDown(self):
teardown_imports()
undo_replace_with_jit()
del self.finetune_model
gc.collect()
def test_load_save_finetune_model(self):
self.assertTrue(test_utils.verify_torchscript_models(self.finetune_model))
def test_finetune_model(self):
model = self.finetune_model.eval()
self.assertTrue(
test_utils.compare_torchscript_transformer_models(
model, vocab_size=BERT_VOCAB_SIZE
)
)
class TestVisualBertPretraining(unittest.TestCase):
def setUp(self):
test_utils.setup_proxy()
setup_imports()
replace_with_jit()
model_name = "visual_bert"
args = test_utils.dummy_args(model=model_name)
configuration = Configuration(args)
config = configuration.get_config()
model_config = config.model_config[model_name]
model_config.model = model_name
self.pretrain_model = build_model(model_config)
def tearDown(self):
teardown_imports()
undo_replace_with_jit()
del self.pretrain_model
gc.collect()
def test_pretrained_model(self):
sample_list = SampleList()
sample_list.add_field(
"input_ids",
torch.randint(low=0, high=BERT_VOCAB_SIZE, size=(1, 128)).long(),
)
sample_list.add_field("input_mask", torch.ones((1, 128)).long())
sample_list.add_field("segment_ids", torch.zeros(1, 128).long())
sample_list.add_field("image_feature_0", torch.rand((1, 100, 2048)).float())
sample_list.add_field(
"lm_label_ids", torch.zeros((1, 128), dtype=torch.long).fill_(-1)
)
self.pretrain_model.eval()
self.pretrain_model = self.pretrain_model.to(get_current_device())
sample_list = sample_list.to(get_current_device())
sample_list.dataset_name = "random"
sample_list.dataset_type = "test"
with torch.no_grad():
model_output = self.pretrain_model(sample_list)
self.assertTrue("losses" in model_output)
self.assertTrue("random/test/masked_lm_loss" in model_output["losses"])
self.assertTrue(
torch.isnan(model_output["losses"]["random/test/masked_lm_loss"])
)
| EXA-1-master | exa/models/mmf-main/tests/models/test_visual_bert.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import gc
import unittest
import tests.test_utils as test_utils
import torch
from mmf.common.sample import SampleList
from mmf.models.uniter import (
UNITERForClassification,
UNITERForPretraining,
UNITERImageEmbeddings,
UNITERModelBase,
)
from mmf.utils.build import build_model
from mmf.utils.configuration import Configuration
from mmf.utils.env import setup_imports, teardown_imports
from mmf.utils.general import get_current_device
from omegaconf import OmegaConf
class TestUNITERImageEmbeddings(unittest.TestCase):
def setUp(self):
bs = 32
num_feat = 100
self.config = OmegaConf.create(
{"img_dim": 1024, "hidden_size": 256, "pos_dim": 7}
)
self.img_feat = torch.rand((bs, num_feat, self.config["img_dim"]))
self.img_pos_feat = torch.rand((bs, num_feat, self.config["pos_dim"]))
self.type_embeddings = torch.ones((bs, num_feat, 1), dtype=torch.long)
def test_forward(self):
embedding = UNITERImageEmbeddings(**self.config)
output = embedding(
self.img_feat, self.img_pos_feat, self.type_embeddings, img_masks=None
)
self.assertEquals(list(output.shape), [32, 100, 256])
class TestUNITERModelBase(unittest.TestCase):
def test_pretrained_model(self):
img_dim = 1024
model = UNITERModelBase(img_dim=img_dim)
device = get_current_device()
model.eval()
model = model.to(device)
bs = 8
num_feats = 100
max_sentence_len = 25
pos_dim = 7
input_ids = torch.ones((bs, max_sentence_len), dtype=torch.long).to(device)
img_feat = torch.rand((bs, num_feats, img_dim)).to(device)
img_pos_feat = torch.rand((bs, num_feats, pos_dim)).to(device)
position_ids = torch.arange(
0, input_ids.size(1), dtype=torch.long, device=device
).unsqueeze(0)
attention_mask = torch.ones((bs, max_sentence_len + num_feats)).to(device)
with torch.no_grad():
model_output = model(
input_ids, position_ids, img_feat, img_pos_feat, attention_mask
).final_layer
self.assertEqual(model_output.shape, torch.Size([8, 125, 768]))
class TestUniterWithHeads(unittest.TestCase):
def _get_sample_list(self):
bs = 8
num_feats = 100
max_sentence_len = 25
img_dim = 2048
cls_dim = 3129
input_ids = torch.ones((bs, max_sentence_len), dtype=torch.long)
input_mask = torch.ones((bs, max_sentence_len), dtype=torch.long)
image_feat = torch.rand((bs, num_feats, img_dim))
position_ids = (
torch.arange(
0, max_sentence_len, dtype=torch.long, device=image_feat.device
)
.unsqueeze(0)
.expand(bs, -1)
)
img_pos_feat = torch.rand((bs, num_feats, 7))
attention_mask = torch.zeros(
(bs, max_sentence_len + num_feats), dtype=torch.long
)
image_mask = torch.zeros((bs, num_feats), dtype=torch.long)
targets = torch.rand((bs, cls_dim))
sample_list = SampleList()
sample_list.add_field("input_ids", input_ids)
sample_list.add_field("input_mask", input_mask)
sample_list.add_field("image_feat", image_feat)
sample_list.add_field("img_pos_feat", img_pos_feat)
sample_list.add_field("attention_mask", attention_mask)
sample_list.add_field("image_mask", image_mask)
sample_list.add_field("targets", targets)
sample_list.add_field("dataset_name", "test")
sample_list.add_field("dataset_type", "test")
sample_list.add_field("position_ids", position_ids)
sample_list.to(get_current_device())
return sample_list
def test_uniter_for_classification(self):
heads = {"test": {"type": "mlp", "num_labels": 3129}}
tasks = "test"
losses = {"test": "logit_bce"}
model = UNITERForClassification(
head_configs=heads, loss_configs=losses, tasks=tasks
)
model.eval()
model = model.to(get_current_device())
sample_list = self._get_sample_list()
with torch.no_grad():
model_output = model(sample_list)
self.assertTrue("losses" in model_output)
self.assertTrue("test/test/logit_bce" in model_output["losses"])
def _enhance_sample_list_for_pretraining(self, sample_list):
bs = sample_list["input_ids"].size(0)
sentence_len = sample_list["input_ids"].size(1)
is_correct = torch.ones((bs,), dtype=torch.long)
lm_label_ids = torch.zeros((bs, sentence_len), dtype=torch.long)
input_ids_masked = sample_list["input_ids"]
num_feat = sample_list["image_feat"].size(1)
cls_dim = 1601
image_info = {"cls_prob": torch.rand((bs, num_feat, cls_dim))}
sample_list.add_field("is_correct", is_correct)
sample_list.add_field("task", "mlm")
sample_list.add_field("lm_label_ids", lm_label_ids)
sample_list.add_field("input_ids_masked", input_ids_masked)
sample_list.add_field("image_info_0", image_info)
sample_list.to(get_current_device())
def test_uniter_for_pretraining(self):
# UNITER pretraining has 5 pretraining tasks,
# we have one unique head for each, and in each
# forward pass we train on a different task.
# In this test we try running a forward pass
# through each head.
heads = {
"mlm": {"type": "mlm"},
"itm": {"type": "itm"},
"mrc": {"type": "mrc"},
"mrfr": {"type": "mrfr"},
"wra": {"type": "wra"},
}
tasks = "mlm,itm,mrc,mrfr,wra"
mask_probability = 0.15
model = UNITERForPretraining(
head_configs=heads, tasks=tasks, mask_probability=mask_probability
)
model.eval()
model = model.to(get_current_device())
sample_list = self._get_sample_list()
self._enhance_sample_list_for_pretraining(sample_list)
expected_loss_names = {
"mlm": "masked_lm_loss",
"itm": "itm_loss",
"mrc": "mrc_loss",
"mrfr": "mrfr_loss",
"wra": "wra_loss",
}
for task_name, loss_name in expected_loss_names.items():
sample_list["task"] = task_name
with torch.no_grad():
model_output = model(sample_list)
self.assertTrue("losses" in model_output)
self.assertTrue(loss_name in model_output["losses"])
class TestUniterModel(unittest.TestCase):
def setUp(self):
test_utils.setup_proxy()
setup_imports()
model_name = "uniter"
args = test_utils.dummy_args(model=model_name, dataset="vqa2")
configuration = Configuration(args)
config = configuration.get_config()
model_config = config.model_config[model_name]
model_config.model = model_name
model_config.losses = {"vqa2": "logit_bce"}
model_config.do_pretraining = False
model_config.tasks = "vqa2"
classification_config_dict = {
"do_pretraining": False,
"tasks": "vqa2",
"heads": {"vqa2": {"type": "mlp", "num_labels": 3129}},
"losses": {"vqa2": "logit_bce"},
}
classification_config = OmegaConf.create(
{**model_config, **classification_config_dict}
)
pretraining_config_dict = {
"do_pretraining": True,
"tasks": "wra",
"heads": {"wra": {"type": "wra"}},
}
pretraining_config = OmegaConf.create(
{**model_config, **pretraining_config_dict}
)
self.model_for_classification = build_model(classification_config)
self.model_for_pretraining = build_model(pretraining_config)
def tearDown(self):
teardown_imports()
del self.model_for_classification
del self.model_for_pretraining
gc.collect()
def _get_sample_list(self):
bs = 8
num_feats = 100
max_sentence_len = 25
img_dim = 2048
vqa_cls_dim = 3129
input_ids = torch.ones((bs, max_sentence_len), dtype=torch.long)
input_mask = torch.ones((bs, max_sentence_len), dtype=torch.long)
img_feat = torch.rand((bs, num_feats, img_dim))
max_features = torch.ones((bs, num_feats)) * num_feats
bbox = torch.randint(50, 200, (bs, num_feats, 4)).float()
image_height = torch.randint(100, 300, (bs,))
image_width = torch.randint(100, 300, (bs,))
image_info = {
"max_features": max_features,
"bbox": bbox,
"image_height": image_height,
"image_width": image_width,
}
targets = torch.rand((bs, vqa_cls_dim))
is_correct = torch.ones((bs,), dtype=torch.long)
sample_list = SampleList()
sample_list.add_field("input_ids", input_ids)
sample_list.add_field("image_feature_0", img_feat)
sample_list.add_field("input_mask", input_mask)
sample_list.add_field("image_info_0", image_info)
sample_list.add_field("targets", targets)
sample_list.add_field("is_correct", is_correct)
sample_list = sample_list.to(get_current_device())
return sample_list
def test_uniter_for_classification(self):
self.model_for_classification.eval()
self.model_for_classification = self.model_for_classification.to(
get_current_device()
)
sample_list = self._get_sample_list()
sample_list.dataset_name = "vqa2"
sample_list.dataset_type = "test"
with torch.no_grad():
model_output = self.model_for_classification(sample_list)
self.assertTrue("losses" in model_output)
self.assertTrue("test/vqa2/logit_bce" in model_output["losses"])
def test_uniter_for_pretraining(self):
self.model_for_pretraining.eval()
self.model_for_pretraining = self.model_for_pretraining.to(get_current_device())
sample_list = self._get_sample_list()
sample_list["tasks"] = "wra"
sample_list.dataset_name = "vqa2"
sample_list.dataset_type = "test"
with torch.no_grad():
model_output = self.model_for_pretraining(sample_list)
self.assertTrue("losses" in model_output)
self.assertTrue("wra_loss" in model_output["losses"])
| EXA-1-master | exa/models/mmf-main/tests/models/test_uniter.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import torch
from mmf.models.albef.vit import AlbefVitEncoder
from omegaconf import OmegaConf
from tests.test_utils import setup_proxy
from torch import nn
class TestAlbefEncoders(unittest.TestCase):
def setUp(self):
setup_proxy()
def _test_init(self, cls, **params):
encoder = cls.from_params(**params)
self.assertTrue(isinstance(encoder, nn.Module))
def test_vision_transformer(self):
config = OmegaConf.structured(AlbefVitEncoder.Config())
encoder = AlbefVitEncoder(config)
x = torch.rand((1, 3, 224, 224))
output = encoder(x)
self.assertEqual(output.size(-1), config.out_dim)
| EXA-1-master | exa/models/mmf-main/tests/models/test_albef.py |
# Copyright (c) Facebook, Inc. and its affiliates.
| EXA-1-master | exa/models/mmf-main/tests/models/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import gc
import unittest
import tests.test_utils as test_utils
import torch
from mmf.modules.hf_layers import undo_replace_with_jit
from mmf.utils.build import build_model
from mmf.utils.configuration import Configuration
from mmf.utils.env import setup_imports, teardown_imports
BERT_VOCAB_SIZE = 30255
class TestViLBertTorchscript(unittest.TestCase):
def setUp(self):
test_utils.setup_proxy()
setup_imports()
model_name = "vilbert"
args = test_utils.dummy_args(model=model_name)
configuration = Configuration(args)
config = configuration.get_config()
self.vision_feature_size = 1024
self.vision_target_size = 1279
model_config = config.model_config[model_name]
model_config["training_head_type"] = "pretraining"
model_config["visual_embedding_dim"] = self.vision_feature_size
model_config["v_feature_size"] = self.vision_feature_size
model_config["v_target_size"] = self.vision_target_size
model_config["dynamic_attention"] = False
model_config.model = model_name
model_config["training_head_type"] = "classification"
model_config["num_labels"] = 2
self.model_config = model_config
def tearDown(self):
teardown_imports()
undo_replace_with_jit()
del self.model_config
gc.collect()
def test_load_save_pretrain_model(self):
self.model_config["training_head_type"] = "pretraining"
pretrain_model = build_model(self.model_config)
self.assertTrue(test_utils.verify_torchscript_models(pretrain_model.model))
def test_load_save_finetune_model(self):
self.model_config["training_head_type"] = "classification"
finetune_model = build_model(self.model_config)
self.assertTrue(test_utils.verify_torchscript_models(finetune_model.model))
def test_pretrained_model(self):
self.model_config["training_head_type"] = "pretraining"
pretrain_model = build_model(self.model_config)
pretrain_model.model.eval()
num_bbox_per_image = 10
input_ids = torch.randint(low=0, high=BERT_VOCAB_SIZE, size=(1, 128)).long()
attention_mask = torch.ones((1, 128)).long()
token_type_ids = torch.zeros(1, 128).long()
visual_embeddings = torch.rand(
(1, num_bbox_per_image, self.vision_feature_size)
).float()
image_attention_mask = torch.zeros((1, num_bbox_per_image)).long()
visual_locations = torch.rand((1, num_bbox_per_image, 5)).float()
masked_lm_labels = torch.zeros((1, 128), dtype=torch.long).fill_(-1)
image_target = torch.zeros(1, num_bbox_per_image, self.vision_target_size)
image_label = torch.ones(1, num_bbox_per_image).fill_(-1)
pretrain_model.eval()
with torch.no_grad():
model_output = pretrain_model.model(
input_ids=input_ids,
image_feature=visual_embeddings,
image_location=visual_locations,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
image_attention_mask=image_attention_mask,
masked_lm_labels=masked_lm_labels,
image_label=image_label,
image_target=image_target,
)
script_model = torch.jit.script(pretrain_model.model)
with torch.no_grad():
script_output = script_model(
input_ids=input_ids,
image_feature=visual_embeddings,
image_location=visual_locations,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
image_attention_mask=image_attention_mask,
masked_lm_labels=masked_lm_labels,
image_label=image_label,
image_target=image_target,
)
self.assertEqual(
torch.isnan(model_output["masked_lm_loss"]),
torch.isnan(script_output["masked_lm_loss"]),
)
def test_finetune_model(self):
self.model_config["training_head_type"] = "classification"
finetune_model = build_model(self.model_config)
finetune_model.model.eval()
num_bbox_per_image = 10
input_ids = torch.randint(low=0, high=BERT_VOCAB_SIZE, size=(1, 128)).long()
attention_mask = torch.ones((1, 128)).long()
token_type_ids = torch.zeros(1, 128).long()
visual_embeddings = torch.rand(
(1, num_bbox_per_image, self.vision_feature_size)
).float()
image_attention_mask = torch.zeros((1, num_bbox_per_image)).long()
visual_locations = torch.rand((1, num_bbox_per_image, 5)).float()
finetune_model.eval()
with torch.no_grad():
model_output = finetune_model.model(
input_ids=input_ids,
image_feature=visual_embeddings,
image_location=visual_locations,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
image_attention_mask=image_attention_mask,
)
script_model = torch.jit.script(finetune_model.model)
with torch.no_grad():
script_output = script_model(
input_ids=input_ids,
image_feature=visual_embeddings,
image_location=visual_locations,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
image_attention_mask=image_attention_mask,
)
self.assertTrue(torch.equal(model_output["scores"], script_output["scores"]))
| EXA-1-master | exa/models/mmf-main/tests/models/test_vilbert.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import gc
import unittest
import tests.test_utils as test_utils
import torch
from mmf.common.sample import SampleList
from mmf.models.vilt import ViLTImageEmbedding, ViLTTextEmbedding
from mmf.utils.build import build_model
from mmf.utils.configuration import Configuration
from mmf.utils.env import setup_imports, teardown_imports
from mmf.utils.general import get_current_device
from tests.test_utils import skip_if_old_transformers
from torch import nn
BERT_VOCAB_SIZE = 30255
@skip_if_old_transformers(min_version="4.5.0")
class TestViltEmbeddings(unittest.TestCase):
def test_vilt_image_embedding(self):
embedding = ViLTImageEmbedding()
self.assertTrue(isinstance(embedding, nn.Module))
image = torch.rand(32, 3, 224, 224)
output = embedding(image)
self.assertEqual(output.shape, torch.Size([32, 197, 768]))
def test_vilt_image_embedding_pretrained(self):
config = {
"random_init": False,
"patch_size": 32,
"pretrained_model_name": "google/vit-base-patch32-384",
"image_size": [384, 384],
}
embedding = ViLTImageEmbedding(**config)
self.assertTrue(isinstance(embedding, nn.Module))
image = torch.rand(32, 3, 384, 384)
output = embedding(image)
self.assertEqual(output.shape, torch.Size([32, 145, 768]))
def test_vilt_text_embedding(self):
embedding = ViLTTextEmbedding()
self.assertTrue(isinstance(embedding, nn.Module))
input_ids = torch.ones(32, 25).long()
segment_ids = torch.ones(32, 25).long()
output = embedding(input_ids, segment_ids)
self.assertEqual(output.shape, torch.Size([32, 25, 768]))
@skip_if_old_transformers(min_version="4.5.0")
class TestViltPretrained(unittest.TestCase):
def setUp(self):
test_utils.setup_proxy()
setup_imports()
model_name = "vilt"
args = test_utils.dummy_args(model=model_name, dataset="test")
configuration = Configuration(args)
config = configuration.get_config()
model_config = config.model_config[model_name]
model_config.model = model_name
self.pretrain_model = build_model(model_config)
def tearDown(self):
teardown_imports()
del self.pretrain_model
gc.collect()
def test_pretrained_model(self):
sample_list = SampleList()
sample_list.add_field(
"input_ids",
torch.randint(low=0, high=BERT_VOCAB_SIZE, size=(1, 128)).long(),
)
sample_list.add_field("input_mask", torch.ones((1, 128)).long())
sample_list.add_field("segment_ids", torch.zeros(1, 128).long())
sample_list.add_field("image", torch.rand((1, 3, 224, 224)).float())
sample_list.add_field("targets", torch.rand((1, 3129)).float())
self.pretrain_model.eval()
self.pretrain_model = self.pretrain_model.to(get_current_device())
sample_list = sample_list.to(get_current_device())
sample_list.dataset_name = "test"
sample_list.dataset_type = "test"
with torch.no_grad():
model_output = self.pretrain_model(sample_list)
self.assertTrue("losses" in model_output)
self.assertTrue("test/test/logit_bce" in model_output["losses"])
| EXA-1-master | exa/models/mmf-main/tests/models/test_vilt.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import gc
import unittest
import tests.test_utils as test_utils
import torch
from mmf.common.sample import SampleList
from mmf.models.mmf_transformer import MMFTransformer, MMFTransformerModalityConfig
from mmf.models.transformers.heads.mlm import MLM
from mmf.modules.encoders import (
EncoderFactory,
IdentityEncoder,
ImageEncoderFactory,
ImageEncoderTypes,
ResNet152ImageEncoder,
TextEncoderFactory,
TextEncoderTypes,
)
from mmf.utils.build import build_model
from mmf.utils.configuration import Configuration
from mmf.utils.env import setup_imports, teardown_imports
from omegaconf import OmegaConf
from tests.test_utils import skip_if_no_pytorchvideo
BERT_VOCAB_SIZE = 30255
ROBERTA_VOCAB_SIZE = 50265
XLM_ROBERTA_VOCAB_SIZE = 250002
class TestMMFTransformerTorchscript(unittest.TestCase):
def setUp(self):
test_utils.setup_proxy()
setup_imports()
self.model_name = "mmf_transformer"
args = test_utils.dummy_args(model=self.model_name)
configuration = Configuration(args)
self.config = configuration.get_config()
self.config.model_config[self.model_name].model = self.model_name
def tearDown(self):
teardown_imports()
del self.config
del self.model_name
gc.collect()
def test_load_save_finetune_model(self):
model = build_model(self.config.model_config[self.model_name])
self.assertTrue(test_utils.verify_torchscript_models(model))
def test_finetune_bert_base(self):
model = build_model(self.config.model_config[self.model_name])
model.eval()
self.assertTrue(
test_utils.compare_torchscript_transformer_models(
model, vocab_size=BERT_VOCAB_SIZE
)
)
def test_finetune_roberta_base(self):
self.config.model_config[self.model_name]["transformer_base"] = "roberta-base"
model = build_model(self.config.model_config[self.model_name])
model.eval()
self.assertTrue(
test_utils.compare_torchscript_transformer_models(
model, vocab_size=ROBERTA_VOCAB_SIZE
)
)
@test_utils.skip_if_no_network
def test_finetune_xlmr_base(self):
self.config.model_config[self.model_name][
"transformer_base"
] = "xlm-roberta-base"
model = build_model(self.config.model_config[self.model_name])
model.eval()
self.assertTrue(
test_utils.compare_torchscript_transformer_models(
model, vocab_size=XLM_ROBERTA_VOCAB_SIZE
)
)
class TestMMFTransformerConfig(unittest.TestCase):
def setUp(self):
setup_imports()
def tearDown(self):
teardown_imports()
def test_mmft_from_params(self):
modalities_config = [
MMFTransformerModalityConfig(
type="image",
key="image",
embedding_dim=256,
position_dim=1,
segment_id=0,
encoder=IdentityEncoder.Config(),
),
MMFTransformerModalityConfig(
type="text",
key="text",
embedding_dim=768,
position_dim=512,
segment_id=1,
encoder=IdentityEncoder.Config(),
),
]
mmft = MMFTransformer.from_params(modalities=modalities_config, num_labels=2)
mmft.build()
config = OmegaConf.structured(
MMFTransformer.Config(modalities=modalities_config, num_labels=2)
)
self.assertIsNotNone(mmft)
self.assertEqual(mmft.config, config)
def test_mmf_from_params_encoder_factory(self):
modalities_config = [
MMFTransformerModalityConfig(
type="image",
key="image",
embedding_dim=256,
position_dim=1,
segment_id=0,
encoder=ImageEncoderFactory.Config(type=ImageEncoderTypes.identity),
),
MMFTransformerModalityConfig(
type="text",
key="text",
embedding_dim=756,
position_dim=512,
segment_id=0,
encoder=TextEncoderFactory.Config(type=TextEncoderTypes.identity),
),
]
mmft = MMFTransformer.from_params(modalities=modalities_config, num_labels=2)
mmft.build()
config = OmegaConf.structured(
MMFTransformer.Config(modalities=modalities_config, num_labels=2)
)
self.assertIsNotNone(mmft)
self.assertEqual(mmft.config, config)
def test_mmft_pretrained(self):
mmft = MMFTransformer.from_params(num_labels=2)
self.assertIsNotNone(mmft)
def test_mmft_from_build_model(self):
modalities_config = [
MMFTransformerModalityConfig(
type="image",
key="image",
embedding_dim=256,
position_dim=1,
segment_id=0,
encoder=ImageEncoderFactory.Config(
type=ImageEncoderTypes.resnet152,
params=ResNet152ImageEncoder.Config(pretrained=False),
),
),
MMFTransformerModalityConfig(
type="text",
key="text",
embedding_dim=756,
position_dim=512,
segment_id=1,
encoder=TextEncoderFactory.Config(type=TextEncoderTypes.identity),
),
]
config = MMFTransformer.Config(modalities=modalities_config, num_labels=2)
mmft = build_model(config)
self.assertIsNotNone(mmft)
class TestMMFTransformer(unittest.TestCase):
def setUp(self):
test_utils.setup_proxy()
setup_imports()
self._image_modality_config = MMFTransformerModalityConfig(
type="image",
key="image",
embedding_dim=256,
position_dim=1,
segment_id=0,
encoder=ImageEncoderFactory.Config(type=ImageEncoderTypes.identity),
)
self._text_modality_config = MMFTransformerModalityConfig(
type="text",
key="text",
embedding_dim=756,
position_dim=128,
segment_id=1,
encoder=TextEncoderFactory.Config(type=TextEncoderTypes.identity),
)
def tearDown(self):
teardown_imports()
del self._image_modality_config
del self._text_modality_config
gc.collect()
def test_one_dim_feature_preprocessing(self):
modalities_config = [self._image_modality_config, self._text_modality_config]
config = MMFTransformer.Config(modalities=modalities_config, num_labels=2)
mmft = build_model(config)
sample_list = SampleList()
sample_list.image = torch.rand(2, 256)
sample_list.text = torch.randint(0, 512, (2, 128))
transformer_input = mmft.preprocess_sample(sample_list)
input_ids = transformer_input["input_ids"]
self.assertEqual(input_ids["image"].dim(), 3)
self.assertEqual(list(input_ids["image"].size()), [2, 1, 256])
self.assertEqual(input_ids["text"].dim(), 2)
self.assertEqual(list(input_ids["text"].size()), [2, 128])
position_ids = transformer_input["position_ids"]
test_utils.compare_tensors(position_ids["image"], torch.tensor([[0], [0]]))
test_utils.compare_tensors(
position_ids["text"], torch.arange(0, 128).unsqueeze(0).expand((2, 128))
)
masks = transformer_input["masks"]
masks = mmft._infer_masks(sample_list, input_ids)
test_utils.compare_tensors(masks["image"], torch.tensor([[1], [1]]))
test_utils.compare_tensors(masks["text"], torch.ones((2, 128)).long())
segment_ids = transformer_input["segment_ids"]
test_utils.compare_tensors(segment_ids["image"], torch.tensor([[0], [0]]))
test_utils.compare_tensors(segment_ids["text"], torch.ones((2, 128)).long())
mlm_labels = transformer_input["mlm_labels"]
test_utils.compare_tensors(
mlm_labels["combined_labels"],
torch.full((2, 129), dtype=torch.long, fill_value=-1),
)
def test_stacked_feature_preprocessing(self):
self._text_modality_config.key = "body"
second_text_modality_config = MMFTransformerModalityConfig(
type="text",
key="ocr",
embedding_dim=756,
position_dim=128,
segment_id=2,
encoder=TextEncoderFactory.Config(type=TextEncoderTypes.identity),
)
modalities_config = [
self._image_modality_config,
self._text_modality_config,
second_text_modality_config,
]
config = MMFTransformer.Config(modalities=modalities_config, num_labels=2)
mmft = build_model(config)
sample_list = SampleList()
sample_list.image = torch.rand(2, 256)
# In stacked case, input_ids should represent all texts
sample_list.input_ids = torch.randint(0, 512, (2, 2, 128))
sample_list.lm_label_ids = torch.randint(-1, 30522, (2, 2, 128))
lm_labels_sum = sample_list.lm_label_ids.sum().item()
transformer_input = mmft.preprocess_sample(sample_list)
self._compare_processed_for_multimodality(transformer_input, lm_labels_sum)
def test_modality_key_preprocessing(self):
self._text_modality_config.key = "body"
second_text_modality_config = MMFTransformerModalityConfig(
type="text",
key="ocr",
embedding_dim=756,
position_dim=128,
segment_id=2,
encoder=TextEncoderFactory.Config(type=TextEncoderTypes.identity),
)
modalities_config = [
self._image_modality_config,
self._text_modality_config,
second_text_modality_config,
]
config = MMFTransformer.Config(modalities=modalities_config, num_labels=2)
mmft = build_model(config)
sample_list = SampleList()
sample_list.image = torch.rand(2, 256)
sample_list.body = torch.randint(0, 512, (2, 128))
sample_list.ocr = torch.randint(0, 512, (2, 128))
sample_list.lm_label_ids = torch.randint(-1, 30522, (2, 128))
lm_labels_sum = sample_list.lm_label_ids.sum().item() * 2
transformer_input = mmft.preprocess_sample(sample_list)
self._compare_processed_for_multimodality(transformer_input, lm_labels_sum)
def _compare_processed_for_multimodality(self, transformer_input, lm_labels_sum=0):
input_ids = transformer_input["input_ids"]
self.assertEqual(input_ids["image"].dim(), 3)
self.assertEqual(list(input_ids["image"].size()), [2, 1, 256])
self.assertEqual(input_ids["body"].dim(), 2)
self.assertEqual(list(input_ids["body"].size()), [2, 128])
self.assertEqual(input_ids["ocr"].dim(), 2)
self.assertEqual(list(input_ids["ocr"].size()), [2, 128])
# Test specific modality keys case
# Test encoder with resnet
# Test input_mask case, test modality_mask case
position_ids = transformer_input["position_ids"]
test_utils.compare_tensors(position_ids["image"], torch.tensor([[0], [0]]))
test_utils.compare_tensors(
position_ids["body"], torch.arange(0, 128).unsqueeze(0).expand((2, 128))
)
test_utils.compare_tensors(
position_ids["ocr"], torch.arange(0, 128).unsqueeze(0).expand((2, 128))
)
masks = transformer_input["masks"]
test_utils.compare_tensors(masks["image"], torch.tensor([[1], [1]]))
test_utils.compare_tensors(masks["body"], torch.ones((2, 128)).long())
test_utils.compare_tensors(masks["ocr"], torch.ones((2, 128)).long())
segment_ids = transformer_input["segment_ids"]
test_utils.compare_tensors(segment_ids["image"], torch.tensor([[0], [0]]))
test_utils.compare_tensors(segment_ids["body"], torch.ones((2, 128)).long())
test_utils.compare_tensors(
segment_ids["ocr"],
torch.full((2, 128), dtype=torch.long, fill_value=2).long(),
)
mlm_labels = transformer_input["mlm_labels"]
self.assertEqual(list(mlm_labels["combined_labels"].size()), [2, 257])
# -2 is for image negative labels
self.assertEqual(mlm_labels["combined_labels"].sum().item(), lm_labels_sum - 2)
def test_custom_feature_and_mask_preprocessing(self):
extra_modality = MMFTransformerModalityConfig(
type="my_random_feature",
key="my_random_feature",
embedding_dim=128,
position_dim=4,
segment_id=3,
encoder=EncoderFactory.Config(type="identity"),
)
modalities_config = [
self._image_modality_config,
self._text_modality_config,
extra_modality,
]
config = MMFTransformer.Config(modalities=modalities_config, num_labels=2)
mmft = build_model(config)
sample_list = SampleList()
sample_list.image = torch.rand(2, 256)
sample_list.text = torch.randint(0, 512, (2, 128))
sample_list.text_mask = torch.ones(2, 128)
sample_list.text_mask[:, 70:] = 0
sample_list.my_random_feature = torch.rand(2, 4, 128)
sample_list.my_random_feature_mask = torch.ones(2, 4)
sample_list.my_random_feature_mask[:, 3:] = 0
transformer_input = mmft.preprocess_sample(sample_list)
input_ids = transformer_input["input_ids"]
self.assertEqual(input_ids["image"].dim(), 3)
self.assertEqual(list(input_ids["image"].size()), [2, 1, 256])
self.assertEqual(input_ids["text"].dim(), 2)
self.assertEqual(list(input_ids["text"].size()), [2, 128])
self.assertEqual(input_ids["my_random_feature"].dim(), 3)
self.assertEqual(list(input_ids["my_random_feature"].size()), [2, 4, 128])
position_ids = transformer_input["position_ids"]
test_utils.compare_tensors(position_ids["image"], torch.tensor([[0], [0]]))
test_utils.compare_tensors(
position_ids["text"], torch.arange(0, 128).unsqueeze(0).expand((2, 128))
)
test_utils.compare_tensors(
position_ids["my_random_feature"],
torch.arange(0, 4).unsqueeze(0).expand((2, 4)),
)
masks = transformer_input["masks"]
test_utils.compare_tensors(masks["image"], torch.tensor([[1], [1]]))
self.assertEqual(masks["text"].sum().item(), 140)
self.assertEqual(masks["my_random_feature"].sum().item(), 6)
segment_ids = transformer_input["segment_ids"]
test_utils.compare_tensors(segment_ids["image"], torch.tensor([[0], [0]]))
test_utils.compare_tensors(segment_ids["text"], torch.ones((2, 128)).long())
test_utils.compare_tensors(
segment_ids["my_random_feature"],
torch.full((2, 4), dtype=torch.long, fill_value=3).long(),
)
def test_preprocessing_with_resnet_encoder(self):
self._image_modality_config = MMFTransformerModalityConfig(
type="image",
key="image",
embedding_dim=2048,
position_dim=1,
segment_id=0,
encoder=ImageEncoderFactory.Config(
type=ImageEncoderTypes.resnet152,
params=ResNet152ImageEncoder.Config(pretrained=False),
),
)
modalities_config = [self._image_modality_config, self._text_modality_config]
config = MMFTransformer.Config(modalities=modalities_config, num_labels=2)
mmft = build_model(config)
sample_list = SampleList()
sample_list.image = torch.rand(2, 3, 224, 224)
sample_list.text = torch.randint(0, 512, (2, 128))
transformer_input = mmft.preprocess_sample(sample_list)
input_ids = transformer_input["input_ids"]
self.assertEqual(input_ids["image"].dim(), 3)
self.assertEqual(list(input_ids["image"].size()), [2, 1, 2048])
self.assertEqual(input_ids["text"].dim(), 2)
self.assertEqual(list(input_ids["text"].size()), [2, 128])
position_ids = transformer_input["position_ids"]
test_utils.compare_tensors(position_ids["image"], torch.tensor([[0], [0]]))
test_utils.compare_tensors(
position_ids["text"], torch.arange(0, 128).unsqueeze(0).expand((2, 128))
)
masks = transformer_input["masks"]
test_utils.compare_tensors(masks["image"], torch.tensor([[1], [1]]))
test_utils.compare_tensors(masks["text"], torch.ones((2, 128)).long())
segment_ids = transformer_input["segment_ids"]
test_utils.compare_tensors(segment_ids["image"], torch.tensor([[0], [0]]))
test_utils.compare_tensors(segment_ids["text"], torch.ones((2, 128)).long())
@skip_if_no_pytorchvideo
def test_preprocessing_with_mvit_encoder(self):
encoder_config = OmegaConf.create(
{
"name": "pytorchvideo",
"model_name": "mvit_base_32x3",
"random_init": True,
"drop_last_n_layers": 0,
"pooler_name": "cls",
"spatial_size": 224,
"temporal_size": 8,
"head": None,
"embed_dim_mul": [[1, 2.0], [3, 2.0], [14, 2.0]],
"atten_head_mul": [[1, 2.0], [3, 2.0], [14, 2.0]],
"pool_q_stride_size": [[1, 1, 2, 2], [3, 1, 2, 2], [14, 1, 2, 2]],
"pool_kv_stride_adaptive": [1, 8, 8],
"pool_kvq_kernel": [3, 3, 3],
}
)
self._image_modality_config = MMFTransformerModalityConfig(
type="image",
key="image",
embedding_dim=768,
position_dim=1,
segment_id=0,
encoder=encoder_config,
)
modalities_config = [self._image_modality_config, self._text_modality_config]
config = MMFTransformer.Config(modalities=modalities_config, num_labels=2)
mmft = build_model(config)
sample_list = SampleList()
sample_list.image = torch.rand((2, 3, 8, 224, 224))
sample_list.text = torch.randint(0, 512, (2, 128))
transformer_input = mmft.preprocess_sample(sample_list)
input_ids = transformer_input["input_ids"]
self.assertEqual(input_ids["image"].dim(), 3)
self.assertEqual(list(input_ids["image"].size()), [2, 1, 768])
self.assertEqual(input_ids["text"].dim(), 2)
self.assertEqual(list(input_ids["text"].size()), [2, 128])
position_ids = transformer_input["position_ids"]
test_utils.compare_tensors(position_ids["image"], torch.tensor([[0], [0]]))
test_utils.compare_tensors(
position_ids["text"], torch.arange(0, 128).unsqueeze(0).expand((2, 128))
)
masks = transformer_input["masks"]
test_utils.compare_tensors(masks["image"], torch.tensor([[1], [1]]))
test_utils.compare_tensors(masks["text"], torch.ones((2, 128)).long())
segment_ids = transformer_input["segment_ids"]
test_utils.compare_tensors(segment_ids["image"], torch.tensor([[0], [0]]))
test_utils.compare_tensors(segment_ids["text"], torch.ones((2, 128)).long())
def test_tie_mlm_head_weight_to_encoder(self):
self._text_modality_config = MMFTransformerModalityConfig(
type="text",
key="text",
embedding_dim=768,
position_dim=128,
segment_id=0,
encoder=TextEncoderFactory.Config(type=TextEncoderTypes.transformer),
)
heads = [MLM.Config()]
modalities_config = [self._image_modality_config, self._text_modality_config]
config = MMFTransformer.Config(
heads=heads,
modalities=modalities_config,
num_labels=2,
tie_weight_to_encoder="text",
)
mmft = build_model(config)
test_utils.compare_tensors(
mmft.heads[0].cls.predictions.decoder.weight,
mmft.encoders["text"].embeddings.word_embeddings.weight,
)
| EXA-1-master | exa/models/mmf-main/tests/models/test_mmf_transformer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import gc
import unittest
import tests.test_utils as test_utils
import torch
from mmf.common.sample import Sample, SampleList
from mmf.models.mmbt import MMBT
from mmf.modules.encoders import (
ImageEncoderFactory,
ImageEncoderTypes,
ResNet152ImageEncoder,
TextEncoderFactory,
TextEncoderTypes,
)
from mmf.modules.hf_layers import undo_replace_with_jit
from mmf.utils.build import build_model
from mmf.utils.configuration import Configuration
from mmf.utils.env import setup_imports, teardown_imports
from omegaconf import OmegaConf
class TestMMBTTorchscript(unittest.TestCase):
def setUp(self):
test_utils.setup_proxy()
setup_imports()
model_name = "mmbt"
args = test_utils.dummy_args(model=model_name)
configuration = Configuration(args)
config = configuration.get_config()
model_config = config.model_config[model_name]
model_config["training_head_type"] = "classification"
model_config["num_labels"] = 2
model_config.model = model_name
self.model_config = model_config
def tearDown(self):
teardown_imports()
undo_replace_with_jit()
del self.model_config
gc.collect()
def test_load_save_finetune_model(self):
finetune_model = build_model(self.model_config)
self.assertTrue(test_utils.verify_torchscript_models(finetune_model))
def test_finetune_model(self):
finetune_model = build_model(self.model_config)
finetune_model.eval()
test_sample = Sample()
test_sample.input_ids = torch.randint(low=0, high=30255, size=(128,)).long()
test_sample.input_mask = torch.ones(128).long()
test_sample.segment_ids = torch.zeros(128).long()
test_sample.image = torch.rand((3, 300, 300)).float()
test_sample_list = SampleList([test_sample.copy()])
with torch.no_grad():
model_output = finetune_model.model(test_sample_list)
test_sample_list = SampleList([test_sample])
script_model = torch.jit.script(finetune_model.model)
with torch.no_grad():
script_output = script_model(test_sample_list)
self.assertTrue(torch.equal(model_output["scores"], script_output["scores"]))
def test_modal_end_token(self):
finetune_model = build_model(self.model_config)
finetune_model.eval()
# Suppose 0 for <cls>, 1 for <pad> 2 for <sep>
CLS = 0
PAD = 1
SEP = 2
size = 128
input_ids = torch.randint(low=0, high=30255, size=(size,)).long()
input_mask = torch.ones(size).long()
input_ids[0] = CLS
length = torch.randint(low=2, high=size - 1, size=(1,))
input_ids[length] = SEP
input_ids[length + 1 :] = PAD
input_mask[length + 1 :] = 0
test_sample = Sample()
test_sample.input_ids = input_ids.clone()
test_sample.input_mask = input_mask.clone()
test_sample.segment_ids = torch.zeros(size).long()
test_sample.image = torch.rand((3, 300, 300)).float()
test_sample_list = SampleList([test_sample])
mmbt_base = finetune_model.model.bert
with torch.no_grad():
actual_modal_end_token = mmbt_base.extract_modal_end_token(test_sample_list)
expected_modal_end_token = torch.zeros([1]).fill_(SEP).long()
self.assertTrue(torch.equal(actual_modal_end_token, expected_modal_end_token))
self.assertTrue(torch.equal(test_sample_list.input_ids[0, :-1], input_ids[1:]))
self.assertTrue(
torch.equal(test_sample_list.input_mask[0, :-1], input_mask[1:])
)
class TestMMBTConfig(unittest.TestCase):
def tearDown(self):
undo_replace_with_jit()
gc.collect()
def test_mmbt_from_params(self):
# default init
mmbt = MMBT.from_params(
modal_encoder=ImageEncoderFactory.Config(
type=ImageEncoderTypes.resnet152,
params=ResNet152ImageEncoder.Config(pretrained=False),
),
text_encoder=TextEncoderFactory.Config(type=TextEncoderTypes.identity),
)
config = OmegaConf.structured(
MMBT.Config(
modal_encoder=ImageEncoderFactory.Config(
type=ImageEncoderTypes.resnet152,
params=ResNet152ImageEncoder.Config(pretrained=False),
),
text_encoder=TextEncoderFactory.Config(type=TextEncoderTypes.identity),
)
)
self.assertIsNotNone(mmbt)
# Make sure that the config is created from MMBT.Config
self.assertEqual(mmbt.config, config)
def test_mmbt_pretrained(self):
test_utils.setup_proxy()
mmbt = MMBT.from_params()
self.assertIsNotNone(mmbt)
def test_mmbt_directly_from_config(self):
config = OmegaConf.structured(
MMBT.Config(
modal_encoder=ImageEncoderFactory.Config(
type=ImageEncoderTypes.resnet152,
params=ResNet152ImageEncoder.Config(pretrained=False),
),
text_encoder=TextEncoderFactory.Config(type=TextEncoderTypes.identity),
)
)
mmbt = MMBT(config)
self.assertIsNotNone(mmbt)
# Make sure that the config is created from MMBT.Config
self.assertEqual(mmbt.config, config)
| EXA-1-master | exa/models/mmf-main/tests/models/test_mmbt.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import tests.test_utils as test_utils
import torch
from mmf.common.sample import SampleList
from mmf.models.vinvl import VinVLBase, VinVLForClassification, VinVLForPretraining
from mmf.utils.build import build_model
from mmf.utils.configuration import Configuration
from mmf.utils.env import setup_imports, teardown_imports
from mmf.utils.general import get_current_device
from omegaconf import OmegaConf
try:
from transformers3.modeling_bert import BertConfig
except ImportError:
from transformers.modeling_bert import BertConfig
class TestVinVLBase(unittest.TestCase):
def test_forward(self):
img_feature_dim = 2054
bert_model_name = "bert-base-uncased"
use_img_layernorm = True
img_layer_norm_eps = 1e-12
bert_config = BertConfig.from_pretrained(bert_model_name)
# augment hf BertConfig for vinvl BertImgModel config
bert_config.img_feature_dim = img_feature_dim
bert_config.use_img_layernorm = use_img_layernorm
bert_config.img_layer_norm_eps = img_layer_norm_eps
model = VinVLBase(bert_config)
model.eval()
model = model.to(get_current_device())
bs = 8
num_feats = 70
max_sentence_len = 25
input_ids = torch.ones((bs, max_sentence_len), dtype=torch.long)
img_feat = torch.rand((bs, num_feats, img_feature_dim))
with torch.no_grad():
model_output = model(input_ids, img_feat).last_hidden_state
self.assertEqual(model_output.shape, torch.Size([8, 95, 768]))
def mock_vinvl_input_tensors(
cls, bs=8, num_feats=70, max_sentence_len=25, img_feature_dim=2054
):
cls.input_ids = torch.ones((bs, max_sentence_len), dtype=torch.long)
cls.img_feats = torch.rand((bs, num_feats, img_feature_dim))
cls.attention_mask = torch.ones(
(bs, max_sentence_len + num_feats), dtype=torch.long
)
cls.token_type_ids = torch.zeros_like(cls.input_ids)
cls.labels = torch.ones((bs, 1)).long()
cls.lm_label_ids = -torch.ones_like(cls.input_ids).long()
cls.contrastive_labels = torch.zeros((bs, 1)).long()
class TestVinVLForClassificationAndPretraining(unittest.TestCase):
def setUp(self):
mock_vinvl_input_tensors(self)
def test_classification_forward(self):
model = VinVLForClassification().to(get_current_device())
model.eval()
with torch.no_grad():
model_output = model(
input_ids=self.input_ids,
img_feats=self.img_feats,
attention_mask=self.attention_mask,
token_type_ids=self.token_type_ids,
labels=self.labels,
)
self.assertTrue("losses" in model_output)
self.assertTrue("scores" in model_output)
self.assertTrue("ce" in model_output["losses"])
def test_pretraining_forward(self):
model = VinVLForPretraining().to(get_current_device())
model.eval()
with torch.no_grad():
model_output = model(
img_feats=self.img_feats,
attention_mask=self.attention_mask,
token_type_ids=self.token_type_ids,
input_ids_masked=self.input_ids,
lm_label_ids=self.lm_label_ids,
contrastive_labels=self.contrastive_labels,
input_ids_corrupt=self.input_ids,
token_type_ids_corrupt=self.token_type_ids,
attention_mask_corrupt=self.attention_mask,
)
self.assertTrue("losses" in model_output)
self.assertTrue("masked_lm_loss" in model_output["losses"])
self.assertTrue("three_way_contrastive_loss" in model_output["losses"])
class TestVinVLModel(unittest.TestCase):
def setUp(self):
test_utils.setup_proxy()
setup_imports()
model_name = "vinvl"
args = test_utils.dummy_args(model=model_name, dataset="test")
configuration = Configuration(args)
config = configuration.get_config()
model_config = config.model_config[model_name]
model_config.model = model_name
model_config.do_pretraining = False
classification_config_dict = {
"do_pretraining": False,
"heads": {"mlp": {"num_labels": 3129}},
"ce_loss": {"ignore_index": -1},
}
self.classification_config = OmegaConf.create(
{**model_config, **classification_config_dict}
)
pretraining_config_dict = {
"do_pretraining": True,
"heads": {"mlm": {"hidden_size": 768}},
}
self.pretraining_config = OmegaConf.create(
{**model_config, **pretraining_config_dict}
)
self.sample_list = self._get_sample_list()
def tearDown(self):
teardown_imports()
def _get_sample_list(self):
bs = 8
num_feats = 70
class MockObj:
pass
mock_input = MockObj()
mock_vinvl_input_tensors(mock_input, bs=bs, num_feats=num_feats)
input_mask = torch.ones_like(mock_input.input_ids)
max_features = torch.ones((bs, num_feats)) * num_feats
bbox = torch.randint(50, 200, (bs, num_feats, 4)).float()
image_height = torch.randint(100, 300, (bs,))
image_width = torch.randint(100, 300, (bs,))
image_info = {
"max_features": max_features,
"bbox": bbox,
"image_height": image_height,
"image_width": image_width,
}
sample_list = SampleList()
sample_list.add_field("input_ids", mock_input.input_ids)
sample_list.add_field("input_ids_corrupt", mock_input.input_ids)
sample_list.add_field("input_ids_masked", mock_input.input_ids)
sample_list.add_field("image_feature_0", mock_input.img_feats)
sample_list.add_field("image_info_0", image_info)
sample_list.add_field("input_mask", input_mask)
sample_list.add_field("input_mask_corrupt", input_mask)
sample_list.add_field("segment_ids", mock_input.token_type_ids)
sample_list.add_field("segment_ids_corrupt", mock_input.token_type_ids)
sample_list.add_field("labels", mock_input.labels)
sample_list.add_field("contrastive_labels", mock_input.contrastive_labels)
sample_list.add_field("lm_label_ids", mock_input.lm_label_ids)
sample_list = sample_list.to(get_current_device())
sample_list.dataset_name = "test"
sample_list.dataset_type = "test"
return sample_list
def test_vinvl_for_classification(self):
model_for_classification = build_model(self.classification_config)
model_for_classification.eval()
model_for_classification = model_for_classification.to(get_current_device())
with torch.no_grad():
model_output = model_for_classification(self.sample_list)
self.assertTrue("losses" in model_output)
self.assertTrue("ce" in model_output["losses"])
def test_vinvl_for_pretraining(self):
model_for_pretraining = build_model(self.pretraining_config)
model_for_pretraining.eval()
model_for_pretraining = model_for_pretraining.to(get_current_device())
with torch.no_grad():
model_output = model_for_pretraining(self.sample_list)
self.assertTrue("losses" in model_output)
self.assertTrue("masked_lm_loss" in model_output["losses"])
self.assertTrue("three_way_contrastive_loss" in model_output["losses"])
| EXA-1-master | exa/models/mmf-main/tests/models/test_vinvl.py |
# Copyright (c) Facebook, Inc. and its affiliates.
| EXA-1-master | exa/models/mmf-main/tests/models/transformers/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import torch
from mmf.common.sample import Sample
from mmf.models.transformers.heads.contrastive import ThreeWayContrastive
from mmf.models.transformers.heads.itm import ITM
from mmf.models.transformers.heads.mlm import MLM
from mmf.models.transformers.heads.mlp import MLP
from mmf.models.transformers.heads.mrc import MRC
from mmf.models.transformers.heads.mrfr import MRFR
from mmf.models.transformers.heads.refiner import Refiner
from mmf.models.transformers.heads.refnet_classifier import RefinerClassifier
from mmf.models.transformers.heads.wra import WRA
from omegaconf import OmegaConf
from tests.test_utils import skip_if_no_cuda
from torch import nn
class TestMLMHead(unittest.TestCase):
def setUp(self):
self.config = OmegaConf.create(
{"type": "mlm", "freeze": False, "vocab_size": 1000, "hidden_size": 768}
)
@skip_if_no_cuda
def test_forward(self):
module = MLM(self.config).to("cuda")
sequence_input = torch.rand(size=(1, 64, 768), dtype=torch.float, device="cuda")
encoder_output = [sequence_input, sequence_input]
processed_sample_list = Sample()
processed_sample_list["mlm_labels"] = {}
processed_sample_list["mlm_labels"]["combined_labels"] = torch.ones(
size=(1, 64), dtype=torch.long, device="cuda"
)
output = module(sequence_input, encoder_output, processed_sample_list)
self.assertTrue("logits" in output)
self.assertTrue("losses" in output and "masked_lm_loss" in output["losses"])
self.assertEqual(output["logits"].shape, torch.Size([64, 1000]))
def test_head_missing_masked_labels(self):
module = MLM(self.config)
sequence_input = torch.rand(size=(1, 64, 768), dtype=torch.float)
encoder_output = [sequence_input, sequence_input]
processed_sample_list = Sample()
processed_sample_list["mlm_labels"] = {}
# masked_labels will be a tensor of all `ignore_index`
processed_sample_list["mlm_labels"]["combined_labels"] = torch.full(
size=(1, 64),
fill_value=module.config.ignore_index,
dtype=torch.long,
)
output = module(sequence_input, encoder_output, processed_sample_list)
self.assertTrue(not torch.isnan(output["losses"]["masked_lm_loss"]))
self.assertTrue(output["losses"]["masked_lm_loss"] == 0.0)
class TestMLPHead(unittest.TestCase):
def setUp(self):
self.config = OmegaConf.create(
{"type": "mlp", "num_labels": 2, "hidden_size": 768}
)
def test_forward(self):
module = MLP(self.config)
sequence_input = torch.ones(size=(1, 64, 768), dtype=torch.float)
encoder_output = [sequence_input, sequence_input]
processed_sample_list = {}
output = module(sequence_input, encoder_output, processed_sample_list)
self.assertTrue("scores" in output)
self.assertEqual(output["scores"].shape, torch.Size([1, 2]))
class TestITMHead(unittest.TestCase):
def setUp(self):
self.config = OmegaConf.create({"type": "itm", "hidden_size": 768})
def test_forward(self):
module = ITM(self.config)
sequence_input = torch.ones(size=(1, 64, 768), dtype=torch.float)
encoder_output = [sequence_input, sequence_input]
processed_sample_list = Sample()
processed_sample_list["itm_labels"] = {}
processed_sample_list["itm_labels"]["is_correct"] = torch.tensor(
False, dtype=torch.long
)
output = module(sequence_input, encoder_output, processed_sample_list)
self.assertTrue("itm_loss" in output["losses"])
self.assertEqual(output["losses"]["itm_loss"].shape, torch.Size([]))
class TestMutilayerMLPHead(unittest.TestCase):
def setUp(self):
self.config = OmegaConf.create(
{
"type": "mlp",
"num_labels": 2,
"hidden_size": 768,
"num_layers": 2,
"in_dim": 768,
"pooler_name": "bert_pooler",
}
)
def test_forward(self):
module = MLP(self.config)
sequence_input = torch.ones(size=(1, 64, 768), dtype=torch.float)
encoder_output = [sequence_input, sequence_input]
processed_sample_list = {}
output = module(sequence_input, encoder_output, processed_sample_list)
self.assertTrue("scores" in output)
self.assertEqual(output["scores"].shape, torch.Size([1, 2]))
class TestRefinerHead(unittest.TestCase):
def setUp(self):
self.config = OmegaConf.create(
{
"type": "refiner",
"refiner_target_pooler": "average_k_from_last",
"refiner_target_layer_depth": 1,
}
)
def test_forward(self):
module = Refiner(self.config)
sequence_input = torch.ones(size=(1, 128, 768), dtype=torch.float)
encoder_output = [sequence_input, sequence_input]
processed_sample_list = {}
processed_sample_list["masks"] = {}
processed_sample_list["masks"]["text"] = torch.ones(
size=(1, 64), dtype=torch.long
)
processed_sample_list["masks"]["image"] = torch.ones(
size=(1, 64), dtype=torch.long
)
output = module(sequence_input, encoder_output, processed_sample_list)
self.assertTrue("losses" in output)
self.assertTrue("fused_embedding" in output)
self.assertTrue("refiner_ss_loss" in output["losses"].keys())
self.assertEqual(output["fused_embedding"].shape, torch.Size([1, 768]))
class TestRefNetClassifierHead(unittest.TestCase):
def setUp(self):
self.refiner_config = {
"type": "refiner",
"refiner_target_pooler": "average_k_from_last",
"refiner_target_layer_depth": 1,
}
self.mlp_loss_config = OmegaConf.create(
{
"config": {"type": "mlp"},
"loss_name": "classification_loss",
"loss": "cross_entropy",
"max_sample_size": 10000,
}
)
self.config = OmegaConf.create(
{
"type": "refiner_classifier",
"use_msloss": True,
"refiner_config": self.refiner_config,
"mlp_loss_config": self.mlp_loss_config,
}
)
def test_forward(self):
module = RefinerClassifier(self.config)
sequence_input = torch.ones(size=(5, 128, 768), dtype=torch.float)
encoder_output = [sequence_input, sequence_input]
processed_sample_list = {}
processed_sample_list["masks"] = {}
processed_sample_list["masks"]["text"] = torch.ones(
size=(5, 64), dtype=torch.long
)
processed_sample_list["masks"]["image"] = torch.ones(
size=(5, 64), dtype=torch.long
)
processed_sample_list["target_key"] = {}
processed_sample_list["target_key"]["targets"] = torch.empty(
5, dtype=torch.long
).random_(2)
output = module(sequence_input, encoder_output, processed_sample_list)
self.assertTrue("losses" in output)
self.assertTrue("fused_embedding" in output)
self.assertTrue("ms_loss" in output["losses"].keys())
class TestMRCHead(unittest.TestCase):
def setUp(self):
bs = 8
num_feat = 64
feat_dim = 768
label_dim = 100
self.sequence_input = torch.ones(
size=(bs, num_feat, feat_dim), dtype=torch.float
)
self.processed_sample_list = Sample()
label_targets = torch.rand((bs, num_feat, label_dim))
self.processed_sample_list["region_class"] = label_targets.contiguous().view(
-1, label_dim
)
self.processed_sample_list["image_region_mask"] = torch.ones(
(bs, num_feat)
).bool()
def test_forward_kldiv(self):
config = OmegaConf.create({"hidden_size": 768, "label_dim": 100})
module = MRC(**config)
output = module(self.sequence_input, self.processed_sample_list)
self.assertTrue("mrc_loss" in output["losses"])
self.assertEqual(output["losses"]["mrc_loss"].shape, torch.Size([]))
def test_forward_ce(self):
config = OmegaConf.create(
{"use_kl": False, "hidden_size": 768, "label_dim": 100}
)
module = MRC(**config)
output = module(self.sequence_input, self.processed_sample_list)
self.assertTrue("mrc_loss" in output["losses"])
self.assertEqual(output["losses"]["mrc_loss"].shape, torch.Size([]))
class TestMRFRHead(unittest.TestCase):
def setUp(self):
bs = 8
num_feat = 64
feat_dim = 768
img_dim = 1024 # feature proj output dim
self.sequence_input = torch.ones(
size=(bs, num_feat, feat_dim), dtype=torch.float
)
self.processed_sample_list = Sample()
feat_targets = torch.zeros((bs, num_feat, img_dim))
self.processed_sample_list[
"mrfr_region_target"
] = feat_targets.contiguous().view(-1, img_dim)
self.processed_sample_list["mrfr_region_mask"] = torch.ones(
(bs, num_feat)
).bool()
self.img_embedding_weight = nn.Parameter(torch.rand((feat_dim, img_dim)))
def test_forward(self):
config = OmegaConf.create({"hidden_size": 768, "img_dim": 1024})
module = MRFR(self.img_embedding_weight, **config)
output = module(self.sequence_input, self.processed_sample_list)
self.assertTrue("mrfr_loss" in output["losses"])
self.assertEqual(output["losses"]["mrfr_loss"].shape, torch.Size([]))
def test_linear_proj_param_is_shared(self):
config = OmegaConf.create({"hidden_size": 768, "img_dim": 1024})
module = MRFR(self.img_embedding_weight, **config)
with torch.no_grad():
self.img_embedding_weight *= 0
output = module(self.sequence_input, self.processed_sample_list)
self.assertTrue(
torch.equal(module.linear_proj_weight, self.img_embedding_weight)
)
self.assertEqual(output["losses"]["mrfr_loss"], 0)
class TestWRAHead(unittest.TestCase):
def setUp(self):
bs = 8
num_feat = 64
feat_dim = 768
img_dim = 1024 # feature proj output dim
sentence_len = 25
num_img_feat = num_feat - sentence_len
self.sequence_input = torch.ones(
size=(bs, num_feat, feat_dim), dtype=torch.float
)
input_ids = torch.ones((bs, sentence_len))
img_feat = torch.rand((bs, num_img_feat, img_dim))
txt_pad = torch.zeros((bs, sentence_len), dtype=torch.long)
img_pad = torch.zeros((bs, num_img_feat), dtype=torch.long)
ot_inputs = {"txt_pad": txt_pad, "img_pad": img_pad}
is_correct = torch.randint(2, (bs,))
self.processed_sample_list = Sample()
self.processed_sample_list["input_ids"] = input_ids
self.processed_sample_list["image_feat"] = img_feat
self.processed_sample_list["wra_info"] = ot_inputs
self.processed_sample_list["is_correct"] = is_correct
def test_forward(self):
module = WRA()
output = module(self.sequence_input, self.processed_sample_list)
self.assertTrue("wra_loss" in output["losses"])
self.assertEqual(output["losses"]["wra_loss"].shape, torch.Size([]))
class TestThreeWayContrastiveHead(unittest.TestCase):
def setUp(self):
bs = 8
num_feat = 64
feat_dim = 768
self.sequence_input = torch.ones(
size=(bs, num_feat, feat_dim), dtype=torch.float
)
contrastive_labels = torch.randint(3, (bs,))
self.processed_sample_list = Sample()
self.processed_sample_list["contrastive_labels"] = contrastive_labels
def test_forward(self):
module = ThreeWayContrastive(
OmegaConf.create({"type": "three_way_contrastive"})
)
output = module(self.sequence_input, self.processed_sample_list)
self.assertTrue("three_way_contrastive_loss" in output["losses"])
self.assertEqual(
output["losses"]["three_way_contrastive_loss"].shape, torch.Size([])
)
| EXA-1-master | exa/models/mmf-main/tests/models/transformers/test_heads.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import torch
from mmf.common.sample import Sample
from mmf.models.transformers.heads.utils import build_heads_dict, HeadsDict
from mmf.modules.losses import MMFLoss
from omegaconf import OmegaConf
class TestHeadsDict(unittest.TestCase):
def setUp(self):
self.config = OmegaConf.create(
{"type": "mlm", "freeze": False, "vocab_size": 1000, "hidden_size": 768}
)
hidden_size = 768
sample_list = Sample()
sample_list["targets"] = torch.rand((1, 2))
sample_list["dataset_type"] = "test"
sample_list["dataset_name"] = "test_dataset"
sample_list["is_correct"] = torch.ones((1,), dtype=torch.long)
self.sample_list = sample_list
self.model_output = torch.rand(size=(1, 1, hidden_size))
self.losses = {"test_cls": MMFLoss("logit_bce")}
def test_constructor_on_dict_confs(self):
heads_conf = {"test": {"type": "mlp", "loss": "test_cls"}}
tasks = ["test"]
heads_dict = build_heads_dict(heads_conf, tasks, self.losses)
self.assertTrue(isinstance(heads_dict, HeadsDict))
# test forward
task = "test"
head_output = heads_dict.forward(task, self.model_output, self.sample_list)
self.assertTrue(isinstance(head_output, dict))
self.assertTrue("losses" in head_output)
self.assertTrue("test/test_dataset/logit_bce" in head_output["losses"])
def test_constructor_on_list_confs(self):
heads_conf = [{"type": "mlp", "loss": "test_cls"}]
tasks = []
heads_dict = build_heads_dict(heads_conf, tasks, self.losses)
self.assertTrue(isinstance(heads_dict, HeadsDict))
# test forward
task = None
head_output = heads_dict.forward(task, self.model_output, self.sample_list)
self.assertTrue(isinstance(head_output, dict))
self.assertTrue("losses" in head_output)
self.assertTrue("test/test_dataset/logit_bce" in head_output["losses"])
def test_constructor_on_multiple_losses_per_task(self):
heads_conf = {"test": [{"type": "mlp", "loss": "test_cls"}, {"type": "itm"}]}
tasks = ["test"]
heads_dict = build_heads_dict(heads_conf, tasks, self.losses)
self.assertTrue(isinstance(heads_dict, HeadsDict))
# test forward
task = "test"
head_output = heads_dict.forward(task, self.model_output, self.sample_list)
self.assertTrue(isinstance(head_output, dict))
self.assertTrue("losses" in head_output)
self.assertTrue("test/test_dataset/logit_bce" in head_output["losses"])
self.assertTrue("itm_loss" in head_output["losses"])
def test_constructor_on_multiple_tasks(self):
heads_conf = {
"test": {"type": "mlp", "loss": "test_cls"},
"other_task": {"type": "itm"},
"third_task": {"type": "mlm"},
}
tasks = ["test", "other_task"]
heads_dict = build_heads_dict(heads_conf, tasks, self.losses)
self.assertTrue(isinstance(heads_dict, HeadsDict))
# test forward
task = "other_task"
head_output = heads_dict.forward(task, self.model_output, self.sample_list)
self.assertTrue(isinstance(head_output, dict))
self.assertTrue("losses" in head_output)
self.assertTrue("test/test_dataset/logit_bce" not in head_output["losses"])
self.assertTrue("itm_loss" in head_output["losses"])
def test_constructor_on_multiple_loss_list(self):
heads_conf = [{"type": "mlp", "loss": "test_cls"}, {"type": "itm"}]
tasks = []
heads_dict = build_heads_dict(heads_conf, tasks, self.losses)
self.assertTrue(isinstance(heads_dict, HeadsDict))
# test forward
task = None
head_output = heads_dict.forward(task, self.model_output, self.sample_list)
self.assertTrue(isinstance(head_output, dict))
self.assertTrue("losses" in head_output)
self.assertTrue("test/test_dataset/logit_bce" in head_output["losses"])
self.assertTrue("itm_loss" in head_output["losses"])
| EXA-1-master | exa/models/mmf-main/tests/models/transformers/test_heads_dict.py |
# Copyright (c) Facebook, Inc. and its affiliates.
| EXA-1-master | exa/models/mmf-main/tests/models/interfaces/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import shutil
import tempfile
import unittest
from pathlib import Path
import numpy as np
import tests.test_utils as test_utils
import torch
from mmf.models.mmbt import MMBT
from mmf.modules.hf_layers import undo_replace_with_jit
from mmf.utils.configuration import get_mmf_env, load_yaml
from mmf.utils.file_io import PathManager
from mmf.utils.general import get_current_device
class TestModelInterfaces(unittest.TestCase):
@test_utils.skip_if_no_network
@test_utils.skip_if_windows
@test_utils.skip_if_macos
def test_mmbt_hm_interface(self):
model = MMBT.from_pretrained("mmbt.hateful_memes.images")
self._test_model_performance(model)
self._test_mmbt_hm_interface_from_file()
self._test_mmbt_hm_interface_from_folder()
def _test_mmbt_hm_interface_from_file(self):
with tempfile.NamedTemporaryFile(suffix=".pth") as tmp:
self._create_checkpoint_file(tmp.name)
model = MMBT.from_pretrained(tmp.name, interface=True)
self._test_model_performance(model)
def _test_mmbt_hm_interface_from_folder(self):
with tempfile.TemporaryDirectory() as tmpdir:
self._create_checkpoint_folder(tmpdir)
model = MMBT.from_pretrained(tmpdir, interface=True)
self._test_model_performance(model)
def _test_model_performance(self, model):
model = model.to(get_current_device())
result = model.classify(
"https://i.imgur.com/tEcsk5q.jpg", "look how many people love you"
)
self.assertEqual(result["label"], 0)
np.testing.assert_almost_equal(result["confidence"], 0.9993, decimal=3)
result = model.classify(
"https://i.imgur.com/tEcsk5q.jpg", "they have the privilege"
)
self.assertEqual(result["label"], 0)
np.testing.assert_almost_equal(result["confidence"], 0.9777, decimal=1)
result = model.classify("https://i.imgur.com/tEcsk5q.jpg", "hitler and jews")
self.assertEqual(result["label"], 1)
np.testing.assert_almost_equal(result["confidence"], 0.8342, decimal=3)
def _create_checkpoint_file(self, path):
model_folder = self._get_model_folder()
model_file = os.path.join(model_folder, "model.pth")
config_file = os.path.join(model_folder, "config.yaml")
config = load_yaml(config_file)
with PathManager.open(model_file, "rb") as f:
ckpt = torch.load(f)
ckpt["config"] = config
torch.save(ckpt, path)
def _create_checkpoint_folder(self, path):
model_folder = self._get_model_folder()
model_file = os.path.join(model_folder, "model.pth")
config_file = os.path.join(model_folder, "config.yaml")
shutil.copy(model_file, path)
shutil.copy(config_file, path)
def _get_model_folder(self):
home = str(Path.home())
data_dir = get_mmf_env(key="data_dir")
model_folder = os.path.join(
home, data_dir, "models", "mmbt.hateful_memes.images"
)
return model_folder
def tearDown(self):
undo_replace_with_jit()
| EXA-1-master | exa/models/mmf-main/tests/models/interfaces/test_interfaces.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import torch
from mmf.utils.build import build_optimizer
from mmf.utils.configuration import load_yaml
from omegaconf import OmegaConf
from tests.test_utils import SimpleLightningModel, SimpleModel
from tests.trainers.lightning.lightning_trainer_mock import LightningTrainerMock
from tests.trainers.test_trainer_mocks import TrainerTrainingLoopMock
def get_trainer_config():
config = load_yaml(os.path.join("configs", "defaults.yaml"))
return OmegaConf.merge(
config,
{
"distributed": {},
"run_type": "train_val",
"training": {
"trainer": "lightning",
"detect_anomaly": False,
"evaluation_interval": 4,
"log_interval": 2,
"update_frequency": 1,
"fp16": False,
"batch_size": 1,
"batch_size_per_device": None,
"lr_scheduler": False,
"tensorboard": False,
"num_workers": 0,
"max_grad_l2_norm": 1,
"exit_on_nan_losses": True,
},
"optimizer": {"type": "adam_w", "params": {"lr": 5e-5, "eps": 1e-8}},
"scheduler": {
"type": "warmup_linear",
"params": {"num_warmup_steps": 8, "num_training_steps": 8},
},
"trainer": {
"type": "lightning",
"params": {
"gpus": 1 if torch.cuda.is_available() else 0,
"num_nodes": 1,
"enable_checkpointing": False,
"deterministic": True,
"benchmark": False,
"gradient_clip_val": 0.0,
"val_check_interval": 4,
"log_every_n_steps": 2,
"progress_bar_refresh_rate": 0,
"accumulate_grad_batches": 1,
"precision": 32,
"num_sanity_val_steps": 0,
"limit_val_batches": 1.0,
"logger": False,
},
},
},
)
def get_config_with_defaults(new_config):
config = get_trainer_config()
config = OmegaConf.merge(config, OmegaConf.create(new_config))
return config
def add_model(trainer, model):
model.build()
model.train()
model.to(trainer.device)
trainer.model = model
def add_optimizer(trainer, config):
optimizer = build_optimizer(trainer.model, config)
trainer.optimizer = optimizer
def get_mmf_trainer(
config=None, model_size=1, num_data_size=100, load_model_from_config=False, seed=2
):
torch.random.manual_seed(seed)
trainer = TrainerTrainingLoopMock(num_data_size, config=config)
if not load_model_from_config:
add_model(trainer, SimpleModel({"in_dim": model_size}))
else:
trainer.load_model()
add_optimizer(trainer, config)
trainer.load_datasets()
return trainer
def get_lightning_trainer(
config=None,
model_size=1,
prepare_trainer=True,
load_model_from_config=False,
seed=2,
**kwargs,
):
torch.random.manual_seed(seed)
trainer = LightningTrainerMock(config=config, **kwargs)
if not load_model_from_config:
trainer.model = SimpleLightningModel({"in_dim": model_size})
trainer.model.build()
trainer.model.train()
trainer.model.build_meters(trainer.run_type)
trainer.model.is_pl_enabled = True
else:
trainer.load_model()
if prepare_trainer:
prepare_lightning_trainer(trainer)
return trainer
def prepare_lightning_trainer(trainer):
trainer.configure_device()
trainer._calculate_max_updates()
trainer.load_metrics()
trainer._load_loggers()
trainer._load_trainer()
def run_lightning_trainer(trainer, on_fit_start_callback=None):
prepare_lightning_trainer(trainer)
if on_fit_start_callback:
on_fit_start_callback()
trainer.trainer.fit(
trainer.model,
train_dataloaders=trainer.train_loader,
val_dataloaders=trainer.val_loader,
)
trainer.run_last_validation_after_train()
| EXA-1-master | exa/models/mmf-main/tests/trainers/test_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import functools
from unittest.mock import MagicMock
import torch
from mmf.common.registry import registry
from mmf.common.sample import SampleList
from mmf.datasets.lightning_multi_datamodule import LightningMultiDataModule
from mmf.datasets.mmf_dataset_builder import MMFDatasetBuilder
from mmf.datasets.multi_datamodule import MultiDataModule
from mmf.trainers.mmf_trainer import MMFTrainer
from omegaconf import OmegaConf
from tests.test_utils import NumbersDataset
class MultiDataModuleNumbersTestObject(MultiDataModule):
def __init__(self, config, num_data):
super().__init__(config)
batch_size = config.training.batch_size
config = OmegaConf.create(
{
"use_features": True,
"annotations": {
"train": "not_a_real_annotations_dataset",
"val": "not_a_real_annotations_dataset",
},
"features": {
"train": "not_a_real_features_dataset",
"val": "not_a_real_features_dataset",
},
"dataset_config": {"numbers": 0},
}
)
self._num_data = num_data
self.batch_size = batch_size
self.config = config
self.dataset_list = []
dataset_builder = MMFDatasetBuilder(
"numbers",
functools.partial(NumbersDataset, num_examples=num_data, always_one=True),
)
dataset_builder.train_dataloader = self._get_dataloader_train
dataset_builder.val_dataloader = self._get_dataloader_val
dataset_builder.test_dataloader = self._get_dataloader_test
self.datamodules = {"numbers": dataset_builder}
def _get_dataloader_train(self):
return self._get_dataloader()
def _get_dataloader_val(self):
return self._get_dataloader("val")
def _get_dataloader_test(self):
return self._get_dataloader("test")
def _get_dataloader(self, dataset_type="train"):
dataset = NumbersDataset(
self._num_data, always_one=True, dataset_type=dataset_type
)
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=1,
drop_last=False,
)
class LightningMultiDataModuleNumbersTestObject(LightningMultiDataModule):
def __init__(self, config, num_data):
super().__init__(config)
batch_size = config.training.batch_size
config = OmegaConf.create(
{
"use_features": True,
"annotations": {
"train": "not_a_real_annotations_dataset",
"val": "not_a_real_annotations_dataset",
},
"features": {
"train": "not_a_real_features_dataset",
"val": "not_a_real_features_dataset",
},
"dataset_config": {"numbers": 0},
}
)
self._num_data = num_data
self.batch_size = batch_size
self.config = config
self.dataset_list = []
dataset_builder = MMFDatasetBuilder(
"numbers",
functools.partial(NumbersDataset, num_examples=num_data, always_one=True),
)
dataset_builder.train_dataloader = self._get_dataloader_train
dataset_builder.val_dataloader = self._get_dataloader_val
dataset_builder.test_dataloader = self._get_dataloader_test
self.datamodules = {"numbers": dataset_builder}
def _get_dataloader_train(self):
return self._get_dataloader()
def _get_dataloader_val(self):
return self._get_dataloader("val")
def _get_dataloader_test(self):
return self._get_dataloader("test")
def _get_dataloader(self, dataset_type="train"):
dataset = NumbersDataset(
self._num_data, always_one=True, dataset_type=dataset_type
)
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=1,
drop_last=False,
)
class TrainerTrainingLoopMock(MMFTrainer):
def __init__(self, num_train_data=100, config=None):
self.training_config = config.training
self.config = config
registry.register("config", self.config)
if torch.cuda.is_available():
self.device = "cuda"
else:
self.device = "cpu"
self.distributed = False
self.on_batch_start = MagicMock(return_value=None)
self.on_update_start = MagicMock(return_value=None)
self.logistics_callback = MagicMock(return_value=None)
self.logistics_callback.log_interval = MagicMock(return_value=None)
self.on_batch_end = MagicMock(return_value=None)
self.on_update_end = MagicMock(return_value=None)
self.on_validation_start = MagicMock(return_value=None)
self.scaler = torch.cuda.amp.GradScaler(enabled=False)
self.early_stop_callback = MagicMock(return_value=None)
self.on_validation_end = MagicMock(return_value=None)
self.metrics = MagicMock(return_value={})
self.num_data = num_train_data
self.run_type = self.config.get("run_type", "train")
def load_datasets(self):
self.dataset_loader = MultiDataModuleNumbersTestObject(
config=self.config, num_data=self.num_data
)
self.dataset_loader.seed_sampler = MagicMock(return_value=None)
self.dataset_loader.prepare_batch = lambda x: SampleList(x)
self.train_loader = self.dataset_loader.train_dataloader()
self.val_loader = self.dataset_loader.val_dataloader()
self.test_loader = self.dataset_loader.test_dataloader()
| EXA-1-master | exa/models/mmf-main/tests/trainers/test_trainer_mocks.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
from unittest.mock import MagicMock, patch
import torch
from mmf.common.registry import registry
from mmf.utils.general import get_batch_size
from tests.test_utils import SimpleModel, SimpleNaNLossModel
from tests.trainers.test_trainer_mocks import TrainerTrainingLoopMock
from tests.trainers.test_utils import (
add_model,
add_optimizer,
get_config_with_defaults,
get_mmf_trainer,
)
class TestTrainingLoop(unittest.TestCase):
@patch("mmf.common.test_reporter.PathManager", return_value=MagicMock())
def test_update_frequency_num_remaining_updates_greater_than_update_frequency(
self, a
):
trainer1 = self._train_with_condition(
num_train_data=20,
max_updates=None,
max_epochs=2,
update_frequency=3,
batch_size=6,
)
self.assertEqual(trainer1.num_updates, 4)
trainer2 = self._train_with_condition(
num_train_data=20,
max_updates=4,
max_epochs=None,
update_frequency=1,
batch_size=18,
)
self.assertEqual(trainer2.num_updates, 4)
self._compare_model_params(trainer1, trainer2)
@patch("mmf.common.test_reporter.PathManager", return_value=MagicMock())
def test_update_frequency_reporting(self, a):
def _on_update_end(report, meter, should_log):
# the losses here should be the sum of two losses in
# iteration 0 and iteration 1 (both constitute update 0).
# Here iter 1 loss: 0.2599, iter 2 loss: 4.2090
loss = report.losses["loss"].detach().cpu().item()
self.assertAlmostEqual(loss, 4.4688, 4)
self._train_with_condition(
num_train_data=100,
max_updates=1,
max_epochs=None,
update_frequency=2,
batch_size=2,
on_update_end_fn=_on_update_end,
)
@patch("mmf.common.test_reporter.PathManager", return_value=MagicMock())
def test_update_frequency_correct_final_iteration(self, a):
config = self._get_config(max_updates=2, max_epochs=None, update_frequency=2)
trainer = get_mmf_trainer(config=config)
trainer.load_datasets()
trainer.training_loop()
self.assertEqual(trainer.max_updates, 2)
self.assertEqual(trainer.current_iteration, 4)
@patch("mmf.common.test_reporter.PathManager", return_value=MagicMock())
def test_update_frequency_same_model_params(self, a):
trainer1 = self._train_with_condition(
num_train_data=100,
max_updates=2,
max_epochs=None,
update_frequency=2,
batch_size=2,
)
trainer1.load_datasets()
trainer2 = self._train_with_condition(
num_train_data=100,
max_updates=2,
max_epochs=None,
update_frequency=1,
batch_size=4,
)
trainer2.load_datasets()
self._compare_model_params(trainer1, trainer2)
def _compare_model_params(self, trainer1, trainer2):
for param1, param2 in zip(
trainer1.model.parameters(), trainer2.model.parameters()
):
self.assertTrue(torch.allclose(param1, param2))
def _train_with_condition(
self,
num_train_data,
max_updates,
max_epochs,
update_frequency,
batch_size,
on_update_end_fn=None,
):
torch.random.manual_seed(2)
config = self._get_config(
max_updates=max_updates,
max_epochs=max_epochs,
update_frequency=update_frequency,
batch_size=batch_size,
)
trainer = get_mmf_trainer(num_data_size=num_train_data, config=config)
if on_update_end_fn:
trainer.on_update_end = on_update_end_fn
trainer.training_loop()
return trainer
def _get_config(
self,
max_updates,
max_epochs,
batch_size=1,
update_frequency=1,
batch_size_per_device=None,
):
config = {
"training": {
"max_updates": max_updates,
"max_epochs": max_epochs,
"update_frequency": update_frequency,
"batch_size": batch_size,
"batch_size_per_device": batch_size_per_device,
}
}
return get_config_with_defaults(config)
@patch("mmf.common.test_reporter.PathManager", return_value=MagicMock())
def test_epoch_over_updates(self, a):
config = self._get_config(max_updates=2, max_epochs=0.04)
trainer = get_mmf_trainer(config=config)
max_updates = trainer._calculate_max_updates()
self.assertEqual(max_updates, 4)
self.check_values(trainer, 0, 0, 0)
trainer.training_loop()
self.check_values(trainer, 4, 1, 4)
@patch("mmf.common.test_reporter.PathManager", return_value=MagicMock())
def test_fractional_epoch(self, a):
config = self._get_config(max_updates=None, max_epochs=0.04)
trainer = get_mmf_trainer(config=config)
max_updates = trainer._calculate_max_updates()
self.assertEqual(max_updates, 4)
self.check_values(trainer, 0, 0, 0)
trainer.training_loop()
self.check_values(trainer, 4, 1, 4)
@patch("mmf.common.test_reporter.PathManager", return_value=MagicMock())
def test_updates(self, a):
config = self._get_config(max_updates=2, max_epochs=None)
trainer = get_mmf_trainer(config=config)
max_updates = trainer._calculate_max_updates()
self.assertEqual(max_updates, 2)
self.check_values(trainer, 0, 0, 0)
trainer.training_loop()
self.check_values(trainer, 2, 1, 2)
@patch("mmf.common.test_reporter.PathManager", return_value=MagicMock())
def test_batch_size_per_device(self, a):
# Need to patch the mmf.utils.general's world size not mmf.utils.distributed
# as the first one is what will be used
with patch("mmf.utils.general.get_world_size", return_value=2):
config = self._get_config(max_updates=2, max_epochs=None, batch_size=4)
trainer = TrainerTrainingLoopMock(config=config)
add_model(trainer, SimpleModel({"in_dim": 1}))
add_optimizer(trainer, config)
registry.register("config", trainer.config)
batch_size = get_batch_size()
trainer.config.training.batch_size = batch_size
trainer.load_datasets()
# Train loader has batch size per device, for global batch size 4
# with world size 2, batch size per device should 4 // 2 = 2
self.assertEqual(trainer.train_loader.current_loader.batch_size, 2)
# This is per device, so should stay same
config = self._get_config(
max_updates=2, max_epochs=None, batch_size_per_device=4
)
trainer = TrainerTrainingLoopMock(config=config)
add_model(trainer, SimpleModel({"in_dim": 1}))
add_optimizer(trainer, config)
registry.register("config", trainer.config)
batch_size = get_batch_size()
trainer.config.training.batch_size = batch_size
trainer.load_datasets()
self.assertEqual(trainer.train_loader.current_loader.batch_size, 4)
max_updates = trainer._calculate_max_updates()
self.assertEqual(max_updates, 2)
self.check_values(trainer, 0, 0, 0)
trainer.training_loop()
self.check_values(trainer, 2, 1, 2)
def check_values(self, trainer, current_iteration, current_epoch, num_updates):
self.assertEqual(trainer.current_iteration, current_iteration)
self.assertEqual(trainer.current_epoch, current_epoch)
self.assertEqual(trainer.num_updates, num_updates)
@patch("mmf.common.test_reporter.PathManager", return_value=MagicMock())
def test_exit_on_nan_losses(self, a):
config = self._get_config(max_updates=2, max_epochs=None, batch_size=4)
trainer = TrainerTrainingLoopMock(config=config)
add_model(trainer, SimpleNaNLossModel({"in_dim": 1}))
add_optimizer(trainer, config)
registry.register("config", trainer.config)
batch_size = get_batch_size()
trainer.config.training.batch_size = batch_size
trainer.load_datasets()
exception_raised = False
try:
trainer.training_loop()
except RuntimeError:
exception_raised = True
self.assertTrue(exception_raised)
| EXA-1-master | exa/models/mmf-main/tests/trainers/test_training_loop.py |
# Copyright (c) Facebook, Inc. and its affiliates.
| EXA-1-master | exa/models/mmf-main/tests/trainers/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import torch
from tests.test_utils import SimpleModel, skip_if_no_cuda
from tests.trainers.test_training_loop import TrainerTrainingLoopMock
from tests.trainers.test_utils import get_config_with_defaults
class SimpleModelWithFp16Assert(SimpleModel):
def forward(self, sample_list):
batch_tensor = sample_list[list(sample_list.keys())[0]]
# Start should be fp32
assert batch_tensor.dtype == torch.float32
batch_tensor = self.classifier(batch_tensor)
# In between operation should be fp16
assert batch_tensor.dtype == torch.float16
loss = torch.sum(batch_tensor)
# Sum should convert it back to fp32
assert loss.dtype == torch.float32
model_output = {"losses": {"loss": loss}}
return model_output
class MMFTrainerMock(TrainerTrainingLoopMock):
def __init__(
self, num_train_data, max_updates, max_epochs, device="cuda", fp16_model=False
):
config = get_config_with_defaults(
{
"training": {
"max_updates": max_updates,
"max_epochs": max_epochs,
"evaluation_interval": 10000,
"fp16": True,
},
"run_type": "train",
}
)
super().__init__(num_train_data, config=config)
if fp16_model:
assert (
torch.cuda.is_available()
), "MMFTrainerMock fp16 requires cuda enabled"
model = SimpleModelWithFp16Assert({"in_dim": 1})
model.build()
model = model.cuda()
else:
model = SimpleModel({"in_dim": 1})
model.build()
model.train()
model.to(self.device)
self.model = model
self.optimizer = torch.optim.SGD(self.model.parameters(), lr=1e-3)
class TestFp16(unittest.TestCase):
@skip_if_no_cuda
def test_fp16_works(self):
trainer = MMFTrainerMock(100, 2, 0.04)
trainer.load_datasets()
trainer.load_fp16_scaler()
self.assertTrue(isinstance(trainer.scaler, torch.cuda.amp.GradScaler))
self.assertEqual(trainer.current_iteration, 0)
trainer.training_loop()
self.assertEqual(trainer.current_iteration, 4)
@skip_if_no_cuda
def test_fp16_values(self):
trainer = MMFTrainerMock(100, 2, 0.04, fp16_model=True)
trainer.load_datasets()
trainer.load_fp16_scaler()
trainer.training_loop()
| EXA-1-master | exa/models/mmf-main/tests/trainers/test_fp16.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import unittest
import torch
from mmf.common.registry import registry
from mmf.trainers.mmf_trainer import MMFTrainer
from mmf.utils.build import build_optimizer
from omegaconf import OmegaConf
from tests.test_utils import SimpleModel, skip_if_no_cuda
from tests.trainers.test_training_loop import TrainerTrainingLoopMock
from tests.trainers.test_utils import get_config_with_defaults
try:
from fairscale.nn.data_parallel import ShardedDataParallel
from fairscale.optim.grad_scaler import ShardedGradScaler
from fairscale.optim.oss import OSS
FAIRSCALE_AVAILABLE = True
except ImportError:
FAIRSCALE_AVAILABLE = False
class MMFTrainerMock(TrainerTrainingLoopMock, MMFTrainer):
def __init__(self, config, num_train_data, max_updates, max_epochs, device="cuda"):
config.training.max_updates = max_updates
config.training.max_epochs = max_epochs
config.training.fp16 = True
config = get_config_with_defaults(config)
super().__init__(num_train_data, config=config)
self.device = torch.device(device)
self.config = config
self.model = SimpleModel({"in_dim": 1})
self.model.build()
self.model = self.model.cuda()
self.optimizer = build_optimizer(self.model, self.config)
self.distributed = True
self.local_rank = 0
self.parallelize_model()
self.load_fp16_scaler()
class TestShardedDDP(unittest.TestCase):
def setUp(self):
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "29501"
torch.distributed.init_process_group(
backend=torch.distributed.Backend.NCCL, rank=0, world_size=1
)
self.config_oss = OmegaConf.create(
{
"optimizer": {
"type": "adam_w",
"enable_state_sharding": True,
"params": {"lr": 5e-5},
},
"training": {"batch_size": 1, "find_unused_parameters": False},
}
)
self.config_no_oss = OmegaConf.create(
{
"optimizer": {
"type": "adam_w",
"enable_state_sharding": False,
"params": {"lr": 5e-5},
},
"training": {"batch_size": 1, "find_unused_parameters": False},
}
)
self.trainer = None
def tearDown(self):
if torch.distributed.is_initialized():
torch.distributed.destroy_process_group()
del self.trainer
registry.unregister("distributed")
@skip_if_no_cuda
@unittest.skipUnless(FAIRSCALE_AVAILABLE, "Tests for fairscale")
def test_no_sharding(self):
self.trainer = MMFTrainerMock(self.config_no_oss, 100, 2, 0.04)
self.trainer.load_datasets()
self.assertFalse(isinstance(self.trainer.optimizer, OSS))
self.assertFalse(isinstance(self.trainer.model, ShardedDataParallel))
self.assertTrue(
isinstance(self.trainer.model, torch.nn.parallel.DistributedDataParallel)
)
self.assertFalse(isinstance(self.trainer.scaler, ShardedGradScaler))
self.assertTrue(isinstance(self.trainer.scaler, torch.cuda.amp.GradScaler))
self.assertEqual(self.trainer.current_iteration, 0)
self.trainer.training_loop()
self.assertEqual(self.trainer.current_iteration, 4)
@skip_if_no_cuda
@unittest.skipUnless(FAIRSCALE_AVAILABLE, "Tests for fairscale")
def test_sharding(self):
self.trainer = MMFTrainerMock(self.config_oss, 100, 2, 0.04)
self.assertTrue(isinstance(self.trainer.optimizer, OSS))
self.assertTrue(isinstance(self.trainer.model, ShardedDataParallel))
self.assertTrue(isinstance(self.trainer.scaler, ShardedGradScaler))
self.assertEqual(self.trainer.current_iteration, 0)
self.trainer.training_loop()
self.assertEqual(self.trainer.current_iteration, 4)
| EXA-1-master | exa/models/mmf-main/tests/trainers/test_sharded_ddp.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import torch
from mmf.trainers.core.device import TrainerDeviceMixin
from mmf.utils.general import get_current_device
from omegaconf import OmegaConf
class DeviceMock(TrainerDeviceMixin):
def __init__(self, config):
self.config = config
class TestDevice(unittest.TestCase):
def test_current_device(self):
config = {
"training": {"seed": 1, "cudnn_benchmark": False},
"distributed": {"init_method": None},
}
deviceMock = DeviceMock(OmegaConf.create(config))
deviceMock.configure_seed()
deviceMock.configure_device()
device = get_current_device()
if torch.cuda.is_available():
self.assertEqual(device, "cuda:0")
else:
self.assertEqual(device, torch.device(type="cpu"))
| EXA-1-master | exa/models/mmf-main/tests/trainers/test_device.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
from unittest.mock import MagicMock, patch
import torch
from tests.trainers.test_utils import get_config_with_defaults, get_mmf_trainer
class TestEvalLoop(unittest.TestCase):
def setUp(self):
torch.manual_seed(2)
@patch(
"mmf.common.test_reporter.PathManager",
return_value=MagicMock(return_value=None),
)
@patch("mmf.common.test_reporter.get_mmf_env", return_value="")
def test_eval_loop(self, a, b):
config = get_config_with_defaults(
{"training": {"max_updates": 2, "max_epochs": 2}}
)
trainer = get_mmf_trainer(config=config)
combined_report, meter = trainer.evaluation_loop("val")
self.assertAlmostEqual(combined_report["losses"]["loss"], 493377.5312)
self.assertAlmostEqual(combined_report["logits"].item(), -0.2379742, 6)
| EXA-1-master | exa/models/mmf-main/tests/trainers/test_eval_loop.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
from unittest.mock import patch
from tests.trainers.test_utils import get_config_with_defaults, get_lightning_trainer
class TestLightningTrainer(unittest.TestCase):
def test_epoch_over_updates(self):
with patch("mmf.trainers.lightning_trainer.get_mmf_env", return_value=""):
config = self._get_config(max_steps=2, max_epochs=0.04)
trainer = get_lightning_trainer(config=config)
self.assertEqual(trainer._max_updates, 4)
self._check_values(trainer, 0, 0)
trainer.trainer.fit(trainer.model, trainer.data_module.train_loader)
self._check_values(trainer, 4, 1)
def test_fractional_epoch(self):
with patch("mmf.trainers.lightning_trainer.get_mmf_env", return_value=""):
config = self._get_config(max_steps=-1, max_epochs=0.04)
trainer = get_lightning_trainer(config=config)
self.assertEqual(trainer._max_updates, 4)
self._check_values(trainer, 0, 0)
trainer.trainer.fit(trainer.model, trainer.data_module.train_loader)
self._check_values(trainer, 4, 1)
def test_updates(self):
with patch("mmf.trainers.lightning_trainer.get_mmf_env", return_value=""):
config = self._get_config(max_steps=2, max_epochs=None)
trainer = get_lightning_trainer(config=config)
self.assertEqual(trainer._max_updates, 2)
self._check_values(trainer, 0, 0)
trainer.trainer.fit(trainer.model, trainer.data_module.train_loader)
self._check_values(trainer, 2, 1)
def _check_values(self, trainer, current_iteration, current_epoch):
self.assertEqual(trainer.trainer.global_step, current_iteration)
self.assertEqual(trainer.trainer.current_epoch, current_epoch)
def _get_config(self, max_steps, max_epochs):
config = {
"trainer": {"params": {"max_steps": max_steps, "max_epochs": max_epochs}}
}
return get_config_with_defaults(config)
| EXA-1-master | exa/models/mmf-main/tests/trainers/lightning/test_loop_conditions.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import gc
import unittest
from typing import Any, Dict, Optional
from unittest.mock import MagicMock, patch
from mmf.common.meter import Meter
from mmf.trainers.callbacks.logistics import LogisticsCallback
from mmf.trainers.lightning_core.loop_callback import LightningLoopCallback
from mmf.trainers.lightning_core.loop_callback_with_torchmetrics import (
LightningTorchMetricsCallback,
)
from mmf.trainers.lightning_core.torchmetric import LightningTorchMetrics
from mmf.utils.logger import TensorboardLogger
from mmf.utils.timer import Timer
from tests.trainers.test_utils import (
get_config_with_defaults,
get_lightning_trainer,
get_mmf_trainer,
run_lightning_trainer,
)
class TestLightningTrainerValidation(unittest.TestCase):
def setUp(self):
self.ground_truths = [
{
"current_iteration": 3,
"num_updates": 3,
"max_updates": 8,
"avg_loss": 9705.2953125,
},
{
"current_iteration": 6,
"num_updates": 6,
"max_updates": 8,
"avg_loss": 9703.29765625,
},
{
"current_iteration": 8,
"num_updates": 8,
"max_updates": 8,
"avg_loss": 9701.88046875,
},
]
def teardown(self):
del self.ground_truths
gc.collect()
@patch("mmf.common.test_reporter.PathManager.mkdirs")
@patch("mmf.trainers.lightning_trainer.get_mmf_env", return_value="")
def test_validation(self, log_dir, mkdirs):
config = self._get_config(
max_steps=8,
batch_size=2,
val_check_interval=3,
log_every_n_steps=9, # turn it off
limit_val_batches=1.0,
)
trainer = get_lightning_trainer(config=config, prepare_trainer=False)
callback = LightningLoopCallback(trainer)
trainer.callbacks.append(callback)
lightning_values = []
def log_values(
current_iteration: int,
num_updates: int,
max_updates: int,
meter: Meter,
extra: Dict[str, Any],
tb_writer: TensorboardLogger,
):
lightning_values.append(
{
"current_iteration": current_iteration,
"num_updates": num_updates,
"max_updates": max_updates,
"avg_loss": meter.loss.avg,
}
)
with patch(
"mmf.trainers.lightning_core.loop_callback.summarize_report",
side_effect=log_values,
):
run_lightning_trainer(trainer)
self.assertEqual(len(self.ground_truths), len(lightning_values))
for gt, lv in zip(self.ground_truths, lightning_values):
keys = list(gt.keys())
self.assertListEqual(keys, list(lv.keys()))
for key in keys:
self.assertAlmostEqual(gt[key], lv[key], 1)
# TODO: update test function with avg_loss
@patch("mmf.common.test_reporter.PathManager.mkdirs")
@patch("mmf.trainers.lightning_trainer.get_mmf_env", return_value="")
def test_validation_torchmetrics(self, log_dir, mkdirs):
config = self._get_config(
max_steps=8,
batch_size=2,
val_check_interval=3,
log_every_n_steps=9, # turn it off
limit_val_batches=1.0,
)
trainer = get_lightning_trainer(config=config, prepare_trainer=False)
trainer.torchmetrics = LightningTorchMetrics([])
callback = LightningTorchMetricsCallback(trainer)
trainer.callbacks.append(callback)
lightning_values = []
def log_values(
extra: Optional[Dict],
num_updates: int,
max_updates: int,
log_type: str = "train",
):
lightning_values.append(
{"num_updates": num_updates, "max_updates": max_updates}
)
with patch(
"mmf.trainers.lightning_core.loop_callback_with_torchmetrics"
+ ".LightningTorchMetricsCallback._log_metrics_and_extra",
side_effect=log_values,
):
run_lightning_trainer(trainer)
self.assertEqual(len(self.ground_truths), len(lightning_values))
for gt, lv in zip(self.ground_truths, lightning_values):
for key in ["num_updates", "max_updates"]:
self.assertAlmostEqual(gt[key], lv[key], 1)
@patch("mmf.common.test_reporter.PathManager.mkdirs")
@patch("torch.utils.tensorboard.SummaryWriter")
@patch("mmf.common.test_reporter.get_mmf_env", return_value="")
@patch("mmf.trainers.callbacks.logistics.summarize_report")
def test_validation_parity(self, summarize_report_fn, test_reporter, sw, mkdirs):
config = self._get_mmf_config(
max_updates=8, max_epochs=None, batch_size=2, evaluation_interval=3
)
mmf_trainer = get_mmf_trainer(config=config)
mmf_trainer.load_metrics()
logistics_callback = LogisticsCallback(mmf_trainer.config, mmf_trainer)
logistics_callback.snapshot_timer = Timer()
logistics_callback.train_timer = Timer()
mmf_trainer.logistics_callback = logistics_callback
mmf_trainer.callbacks.append(logistics_callback)
mmf_trainer.early_stop_callback = MagicMock(return_value=None)
mmf_trainer.on_validation_end = logistics_callback.on_validation_end
mmf_trainer.training_loop()
calls = summarize_report_fn.call_args_list
self.assertEqual(3, len(calls))
self.assertEqual(len(self.ground_truths), len(calls))
self._check_values(calls)
def _check_values(self, calls):
for (_, kwargs), gt in zip(calls, self.ground_truths):
for key, value in gt.items():
if key == "avg_loss":
self.assertAlmostEqual(kwargs["meter"].loss.avg, value, 1)
else:
self.assertAlmostEqual(kwargs[key], value, 1)
def _get_config(
self,
max_steps,
batch_size,
val_check_interval,
log_every_n_steps,
limit_val_batches,
):
config = {
"trainer": {
"params": {
"max_steps": max_steps,
"log_every_n_steps": log_every_n_steps,
"val_check_interval": val_check_interval,
"limit_val_batches": limit_val_batches,
}
},
"training": {"batch_size": batch_size},
}
return get_config_with_defaults(config)
def _get_mmf_config(self, max_updates, max_epochs, batch_size, evaluation_interval):
config = {
"training": {
"max_updates": max_updates,
"max_epochs": max_epochs,
"batch_size": batch_size,
"evaluation_interval": evaluation_interval,
}
}
return get_config_with_defaults(config)
| EXA-1-master | exa/models/mmf-main/tests/trainers/lightning/test_validation.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
from unittest.mock import MagicMock, patch
from mmf.trainers.callbacks.logistics import LogisticsCallback
from mmf.trainers.lightning_core.loop_callback import LightningLoopCallback
from mmf.utils.timer import Timer
from tests.test_utils import skip_if_no_network
from tests.trainers.test_utils import (
get_config_with_defaults,
get_lightning_trainer,
get_mmf_trainer,
run_lightning_trainer,
)
class TestLightningTrainerLogging(unittest.TestCase):
def setUp(self):
self.mmf_tensorboard_logs = []
self.lightning_tensorboard_logs = []
@skip_if_no_network
@patch("mmf.common.test_reporter.PathManager.mkdirs")
@patch("mmf.trainers.callbacks.logistics.setup_output_folder", return_value="logs")
@patch("mmf.trainers.lightning_trainer.setup_output_folder", return_value="logs")
@patch("mmf.utils.logger.setup_output_folder", return_value="logs")
@patch("torch.utils.tensorboard.SummaryWriter")
@patch("mmf.trainers.callbacks.logistics.get_mmf_env", return_value="logs")
@patch("mmf.common.test_reporter.get_mmf_env", return_value="logs")
@patch("mmf.trainers.lightning_trainer.get_mmf_env", return_value="logs")
def test_tensorboard_logging_parity(
self,
summary_writer,
mmf,
lightning,
logistics,
logistics_logs,
report_logs,
trainer_logs,
mkdirs,
):
# mmf trainer
config = self._get_mmf_config(
max_updates=8,
batch_size=2,
max_epochs=None,
log_interval=3,
evaluation_interval=9,
tensorboard=True,
)
mmf_trainer = get_mmf_trainer(config=config)
def _add_scalars_mmf(log_dict, iteration):
self.mmf_tensorboard_logs.append({iteration: log_dict})
mmf_trainer.load_metrics()
logistics_callback = LogisticsCallback(mmf_trainer.config, mmf_trainer)
logistics_callback.snapshot_timer = MagicMock(return_value=None)
logistics_callback.train_timer = Timer()
logistics_callback.tb_writer.add_scalars = _add_scalars_mmf
mmf_trainer.logistics_callback = logistics_callback
mmf_trainer.on_validation_end = logistics_callback.on_validation_end
mmf_trainer.callbacks = [logistics_callback]
mmf_trainer.early_stop_callback = MagicMock(return_value=None)
mmf_trainer.on_update_end = logistics_callback.on_update_end
mmf_trainer.training_loop()
# lightning_trainer
config = self._get_config(
max_steps=8,
batch_size=2,
log_every_n_steps=3,
val_check_interval=9,
tensorboard=True,
)
trainer = get_lightning_trainer(config=config, prepare_trainer=False)
def _add_scalars_lightning(log_dict, iteration):
self.lightning_tensorboard_logs.append({iteration: log_dict})
def _on_fit_start_callback():
trainer.tb_writer.add_scalars = _add_scalars_lightning
callback = LightningLoopCallback(trainer)
trainer.callbacks.append(callback)
run_lightning_trainer(trainer, on_fit_start_callback=_on_fit_start_callback)
self.assertEqual(
len(self.mmf_tensorboard_logs), len(self.lightning_tensorboard_logs)
)
for mmf, lightning in zip(
self.mmf_tensorboard_logs, self.lightning_tensorboard_logs
):
self.assertDictEqual(mmf, lightning)
def _get_config(
self, max_steps, batch_size, log_every_n_steps, val_check_interval, tensorboard
):
config = {
"trainer": {
"params": {
"max_steps": max_steps,
"log_every_n_steps": log_every_n_steps,
"val_check_interval": val_check_interval,
}
},
"training": {"batch_size": batch_size, "tensorboard": tensorboard},
}
return get_config_with_defaults(config)
def _get_mmf_config(
self,
max_updates,
max_epochs,
batch_size,
log_interval,
evaluation_interval,
tensorboard,
):
config = {
"training": {
"batch_size": batch_size,
"tensorboard": tensorboard,
"max_updates": max_updates,
"max_epochs": max_epochs,
"log_interval": log_interval,
"evaluation_interval": evaluation_interval,
}
}
return get_config_with_defaults(config)
| EXA-1-master | exa/models/mmf-main/tests/trainers/lightning/test_logging.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
from unittest.mock import patch
import torch
from tests.trainers.test_utils import get_config_with_defaults, get_lightning_trainer
class TestLightningTrainerGradAccumulate(unittest.TestCase):
def test_grad_accumulate(self):
with patch("mmf.trainers.lightning_trainer.get_mmf_env", return_value=""):
config = self._get_config(
accumulate_grad_batches=2, max_steps=2, batch_size=3
)
trainer1 = get_lightning_trainer(config=config)
trainer1.trainer.fit(trainer1.model, trainer1.data_module.train_loader)
config = self._get_config(
accumulate_grad_batches=1, max_steps=2, batch_size=6
)
trainer2 = get_lightning_trainer(config=config)
trainer2.trainer.fit(trainer2.model, trainer2.data_module.train_loader)
for param1, param2 in zip(
trainer1.model.parameters(), trainer2.model.parameters()
):
self.assertTrue(torch.allclose(param1, param2))
def _get_config(self, accumulate_grad_batches, max_steps, batch_size):
config = {
"trainer": {
"params": {
"accumulate_grad_batches": accumulate_grad_batches,
"max_steps": max_steps,
}
},
"training": {"batch_size": batch_size},
}
return get_config_with_defaults(config)
| EXA-1-master | exa/models/mmf-main/tests/trainers/lightning/test_grad_accumulate.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
from unittest.mock import MagicMock, patch
import torch
from mmf.trainers.callbacks.lr_scheduler import LRSchedulerCallback
from tests.trainers.test_utils import (
get_config_with_defaults,
get_lightning_trainer,
get_mmf_trainer,
)
class TestLightningTrainerLRSchedule(unittest.TestCase):
def test_lr_schedule(self):
with patch("mmf.trainers.lightning_trainer.get_mmf_env", return_value=""):
# note, be aware some of the logic also is in the SimpleLightningModel
config = self._get_config(max_steps=8, lr_scheduler=True)
trainer1 = get_lightning_trainer(config=config)
trainer1.trainer.fit(trainer1.model, trainer1.data_module.train_loader)
config = self._get_config(max_steps=8)
trainer2 = get_lightning_trainer(config=config)
trainer2.trainer.fit(trainer2.model, trainer2.data_module.train_loader)
last_model_param1 = list(trainer1.model.parameters())[-1]
last_model_param2 = list(trainer2.model.parameters())[-1]
self.assertFalse(torch.allclose(last_model_param1, last_model_param2))
def test_lr_schedule_compared_to_mmf_is_same(self):
config = get_config_with_defaults(
{"training": {"max_updates": 8, "max_epochs": None, "lr_scheduler": True}}
)
mmf_trainer = get_mmf_trainer(config=config)
mmf_trainer.lr_scheduler_callback = LRSchedulerCallback(config, mmf_trainer)
mmf_trainer.callbacks.append(mmf_trainer.lr_scheduler_callback)
mmf_trainer.on_update_end = mmf_trainer.lr_scheduler_callback.on_update_end
mmf_trainer.evaluation_loop = MagicMock(return_value=(None, None))
mmf_trainer.training_loop()
with patch("mmf.trainers.lightning_trainer.get_mmf_env", return_value=""):
config = self._get_config(max_steps=8, lr_scheduler=True)
trainer = get_lightning_trainer(config=config)
trainer.trainer.fit(trainer.model, trainer.data_module.train_loader)
mmf_trainer.model.to(trainer.model.device)
last_model_param1 = list(mmf_trainer.model.parameters())[-1]
last_model_param2 = list(trainer.model.parameters())[-1]
self.assertTrue(torch.allclose(last_model_param1, last_model_param2))
def _get_config(self, max_steps, lr_scheduler=False):
config = {
"trainer": {"params": {"max_steps": max_steps}},
"training": {"lr_scheduler": lr_scheduler},
}
return get_config_with_defaults(config)
| EXA-1-master | exa/models/mmf-main/tests/trainers/lightning/test_lr_schedule.py |
# Copyright (c) Facebook, Inc. and its affiliates.
| EXA-1-master | exa/models/mmf-main/tests/trainers/lightning/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
from unittest.mock import MagicMock, patch
from mmf.common.report import Report
from pytorch_lightning.callbacks.base import Callback
from tests.trainers.test_utils import (
get_config_with_defaults,
get_lightning_trainer,
get_mmf_trainer,
)
class TestLightningTrainerLoss(unittest.TestCase, Callback):
def setUp(self):
self.lightning_losses = []
self.mmf_losses = []
def test_loss_computation_parity_with_mmf_trainer(self):
# compute mmf_trainer training losses
def _on_update_end(report, meter, should_log):
self.mmf_losses.append(report["losses"]["loss"].item())
config = get_config_with_defaults(
{"training": {"max_updates": 5, "max_epochs": None}}
)
mmf_trainer = get_mmf_trainer(config=config)
mmf_trainer.on_update_end = _on_update_end
mmf_trainer.evaluation_loop = MagicMock(return_value=(None, None))
mmf_trainer.training_loop()
# compute lightning_trainer training losses
with patch("mmf.trainers.lightning_trainer.get_mmf_env", return_value=""):
config = get_config_with_defaults({"trainer": {"params": {"max_steps": 5}}})
trainer = get_lightning_trainer(config=config)
trainer.callbacks.append(self)
trainer.trainer.fit(trainer.model, trainer.data_module.train_loader)
def on_train_batch_end(
self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx
):
report = Report(outputs["input_batch"], outputs)
self.lightning_losses.append(report["losses"]["loss"].item())
def on_train_end(self, trainer, pl_module):
for lightning_loss, mmf_loss in zip(self.lightning_losses, self.mmf_losses):
self.assertEqual(lightning_loss, mmf_loss)
| EXA-1-master | exa/models/mmf-main/tests/trainers/lightning/test_loss.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
from unittest.mock import MagicMock, patch
import torch
from mmf.utils.general import clip_gradients
from pytorch_lightning.callbacks.base import Callback
from tests.trainers.test_utils import (
get_config_with_defaults,
get_lightning_trainer,
get_mmf_trainer,
)
class TestLightningTrainerGradClipping(unittest.TestCase, Callback):
def setUp(self):
self.mmf_grads = []
self.lightning_grads = []
self.grad_clip_magnitude = 0.15
def test_grad_clipping_and_parity_to_mmf(self):
config = self._get_mmf_config(
max_updates=5,
max_epochs=None,
max_grad_l2_norm=self.grad_clip_magnitude,
clip_norm_mode="all",
)
mmf_trainer = get_mmf_trainer(config=config)
mmf_trainer.evaluation_loop = MagicMock(return_value=(None, None))
def _finish_update():
clip_gradients(
mmf_trainer.model,
mmf_trainer.optimizer,
mmf_trainer.num_updates,
None,
mmf_trainer.config,
)
for param in mmf_trainer.model.parameters():
mmf_grad = torch.clone(param.grad).detach().item()
self.mmf_grads.append(mmf_grad)
mmf_trainer.scaler.step(mmf_trainer.optimizer)
mmf_trainer.scaler.update()
mmf_trainer.num_updates += 1
mmf_trainer._finish_update = _finish_update
mmf_trainer.training_loop()
with patch("mmf.trainers.lightning_trainer.get_mmf_env", return_value=""):
config = self._get_config(
max_steps=5, max_epochs=None, gradient_clip_val=self.grad_clip_magnitude
)
trainer = get_lightning_trainer(config=config)
trainer.callbacks.append(self)
trainer.trainer.fit(trainer.model, trainer.data_module.train_loader)
def on_before_optimizer_step(self, trainer, pl_module, optimizer, optimizer_idx):
for param in pl_module.parameters():
self.assertLessEqual(param.grad, self.grad_clip_magnitude)
for lightning_param in pl_module.parameters():
lightning_grad = torch.clone(lightning_param.grad).detach().item()
self.lightning_grads.append(lightning_grad)
def on_train_end(self, trainer, pl_module):
for lightning_grad, mmf_grad in zip(self.lightning_grads, self.mmf_grads):
self.assertAlmostEqual(lightning_grad, mmf_grad, places=6)
def _get_config(self, max_steps, max_epochs, gradient_clip_val):
config = {
"trainer": {
"params": {
"max_steps": max_steps,
"max_epochs": max_epochs,
"gradient_clip_val": gradient_clip_val,
}
}
}
return get_config_with_defaults(config)
def _get_mmf_config(
self, max_updates, max_epochs, max_grad_l2_norm, clip_norm_mode
):
config = {
"training": {
"max_updates": max_updates,
"max_epochs": max_epochs,
"clip_gradients": True,
"max_grad_l2_norm": max_grad_l2_norm,
"clip_norm_mode": clip_norm_mode,
}
}
return get_config_with_defaults(config)
| EXA-1-master | exa/models/mmf-main/tests/trainers/lightning/test_grad_clipping.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import contextlib
import os
import tempfile
import unittest
from unittest.mock import patch
import torch
from mmf.trainers.callbacks.checkpoint import CheckpointCallback
from mmf.trainers.callbacks.early_stopping import EarlyStoppingCallback
from mmf.trainers.lightning_core.loop_callback import LightningLoopCallback
from mmf.utils.checkpoint import get_ckpt_path_from_folder
from mmf.utils.download import download_pretrained_model
from tests.test_utils import skip_if_no_network
from tests.trainers.test_utils import (
get_config_with_defaults,
get_lightning_trainer,
get_mmf_trainer,
prepare_lightning_trainer,
)
@contextlib.contextmanager
def mock_env_with_temp(path):
d = tempfile.TemporaryDirectory()
patched = patch(path, return_value=d.name)
patched.start()
yield d.name
d.cleanup()
patched.stop()
unimodal_text_model_config = {
"unimodal_text": {
"text_hidden_size": 1,
"classifier": {
"type": "mlp",
"params": {"num_layers": 2, "hidden_dim": 5, "out_dim": 2},
"losses": [{"type": "cross_entropy"}],
},
}
}
class TestLightningCheckpoint(unittest.TestCase):
def _assert_same_dict(self, mmf, lightning, same=True):
if same:
self.assertSetEqual(set(mmf.keys()), set(lightning.keys()))
for key in mmf.keys():
self._assert_same(mmf[key], lightning[key], same=same)
def _assert_same(self, obj1, obj2, same=True):
if same:
if hasattr(obj1, "mean") and obj1.dtype == torch.float:
self.assertAlmostEquals(obj1.mean().item(), obj2.mean().item(), 2)
elif hasattr(obj1, "item"):
self.assertEqual(obj1.item(), obj2.item())
elif type(obj1) is dict and type(obj2) is dict:
self._assert_same_dict(obj1, obj2)
else:
self.assertEqual(obj1, obj2)
else:
if hasattr(obj1, "mean") and obj1.dtype == torch.float:
self.assertNotEqual(obj1.mean().item(), obj2.mean().item())
elif hasattr(obj1, "item"):
self.assertNotEqual(obj1.item(), obj2.item())
elif type(obj1) is dict and type(obj2) is dict:
self._assert_same_dict(obj1, obj2, same=False)
else:
self.assertNotEqual(obj1, obj2)
def _get_ckpt_config(
self, is_pl=False, ckpt_config=None, max_steps=6, resume_from_checkpoint=None
):
if ckpt_config is None:
ckpt_config = {}
if not is_pl:
return get_config_with_defaults(
{
"training": {
"max_updates": max_steps,
"max_epochs": None,
"early_stop": {
"enabled": True,
"criteria": "numbers/accuracy",
"minimize": False,
},
"checkpoint_interval": 2,
"evaluation_interval": 2,
},
"model": "simple_model",
"evaluation": {"metrics": ["accuracy"]},
"checkpoint": {
"max_to_keep": 1,
"save_git_details": False,
**ckpt_config,
},
"run_type": "train_val",
}
)
else:
return get_config_with_defaults(
{
"training": {
"checkpoint_interval": 2,
"early_stop": {
"enabled": True,
"criteria": "numbers/accuracy",
"minimize": False,
},
},
"trainer": {
"params": {
"max_steps": max_steps,
"max_epochs": None,
"enable_checkpointing": True,
"resume_from_checkpoint": resume_from_checkpoint,
"val_check_interval": 2,
}
},
"model": "simple_lightning_model",
"evaluation": {"metrics": ["accuracy"]},
"checkpoint": {
"max_to_keep": 1,
"save_git_details": False,
**ckpt_config,
},
"run_type": "train_val",
}
)
def _get_mmf_trainer(
self, ckpt_config=None, model_config=None, seed=2, max_updates=6
):
config = self._get_ckpt_config(ckpt_config=ckpt_config, max_steps=max_updates)
load_model_from_config = False
if model_config:
config.model_config = model_config
config.model = list(model_config.keys())[0]
load_model_from_config = True
mmf_trainer = get_mmf_trainer(
config=config, load_model_from_config=load_model_from_config, seed=seed
)
mmf_trainer.load_metrics()
checkpoint_callback = CheckpointCallback(config, mmf_trainer)
mmf_trainer.on_init_start = checkpoint_callback.on_init_start
mmf_trainer.on_train_end = checkpoint_callback.on_train_end
mmf_trainer.callbacks.append(checkpoint_callback)
mmf_trainer.checkpoint_callback = checkpoint_callback
mmf_trainer.lr_scheduler_callback = None
early_stop_callback = EarlyStoppingCallback(config, mmf_trainer)
mmf_trainer.early_stop_callback = early_stop_callback
mmf_trainer.callbacks.append(early_stop_callback)
return mmf_trainer
def _get_lightning_trainer(
self,
ckpt_config=None,
model_config=None,
seed=2,
max_steps=6,
resume_from_checkpoint=None,
):
config = self._get_ckpt_config(
ckpt_config=ckpt_config,
max_steps=max_steps,
is_pl=True,
resume_from_checkpoint=resume_from_checkpoint,
)
load_model_from_config = False
if model_config:
config.model_config = model_config
config.model = list(model_config.keys())[0]
load_model_from_config = True
lightning = get_lightning_trainer(
config=config,
prepare_trainer=False,
load_model_from_config=load_model_from_config,
seed=seed,
)
callback = LightningLoopCallback(lightning)
lightning.callbacks.append(callback)
lightning.callbacks += lightning.configure_checkpoint_callbacks()
lightning.callbacks += lightning.configure_monitor_callbacks()
prepare_lightning_trainer(lightning)
return lightning
class TestLightningCheckpoint(TestLightningCheckpoint):
@skip_if_no_network
def test_load_resume_parity_with_mmf(self):
# with checkpoint.resume = True, by default it loads "current.ckpt"
self._load_checkpoint_and_test("current.ckpt", ckpt_config={"resume": True})
def test_load_resume_best_parity_with_mmf(self):
# with checkpoint.resume = True and checkpoint.resume_best = True
# by default it loads best.ckpt. It should load the "best.ckpt"
self._load_checkpoint_and_test(
"best.ckpt", ckpt_config={"resume": True, "resume_best": True}
)
@skip_if_no_network
def test_load_resume_ignore_resume_zoo(self):
# specifying both checkpoint.resume = True and resume_zoo
# resume zoo should be ignored. It should load the "current.ckpt"
self._load_checkpoint_and_test(
"current.ckpt",
ckpt_config={"resume": True, "resume_zoo": "visual_bert.pretrained.coco"},
)
@unittest.skip("causing crash on gha")
def test_load_resume_zoo_parity_with_mmf(self):
# not specifying checkpoint.resume, but specifying
# checkpoint.resume_zoo. It should load the model file
# underlying zoo
resume_zoo = "unimodal_text.hateful_memes.bert"
ckpt_filepath = download_pretrained_model(resume_zoo)
ckpt_filepath = get_ckpt_path_from_folder(ckpt_filepath)
ckpt = torch.load(ckpt_filepath, map_location="cpu")
ckpt_config = {"resume_zoo": resume_zoo, "zoo_config_override": True}
with mock_env_with_temp("mmf.utils.checkpoint.get_mmf_env") as _:
mmf_trainer = self._get_mmf_trainer(
ckpt_config=ckpt_config,
model_config=unimodal_text_model_config,
max_updates=0,
)
mmf_trainer.on_init_start()
mmf_ckpt = mmf_trainer.model.state_dict()
mmf_ckpt.pop("base.encoder.embeddings.position_ids")
self._assert_same_dict(ckpt, mmf_ckpt)
with mock_env_with_temp("mmf.trainers.lightning_trainer.get_mmf_env") as _:
# lightning load from zoo, in this case, the zoo ckpt is in mmf format
lightning = self._get_lightning_trainer(
ckpt_config=ckpt_config,
model_config=unimodal_text_model_config,
max_steps=0,
seed=4,
)
lightning.trainer.fit(
lightning.model, train_dataloaders=lightning.train_loader
)
lightning_ckpt = lightning.model.state_dict()
lightning_ckpt.pop("base.encoder.embeddings.position_ids")
self._assert_same_dict(ckpt, lightning_ckpt)
@unittest.skip("causing crash on gha")
def test_load_zoo_with_pretrained_state_mapping_parity_with_mmf(self):
# mmf with pretrained state mapping model state dict
resume_zoo = "unimodal_text.hateful_memes.bert"
pretrained_key = "base.encoder.embeddings"
ckpt_config = {
"resume_zoo": resume_zoo,
"zoo_config_override": True,
"resume_pretrained": True,
"pretrained_state_mapping": {pretrained_key: pretrained_key},
}
with mock_env_with_temp("mmf.utils.checkpoint.get_mmf_env") as _:
mmf_trainer = self._get_mmf_trainer(
ckpt_config=ckpt_config,
model_config=unimodal_text_model_config,
max_updates=0,
)
mmf_trainer.on_init_start()
mmf_ckpt = mmf_trainer.model.state_dict()
mmf_ckpt.pop("base.encoder.embeddings.position_ids")
with mock_env_with_temp("mmf.trainers.lightning_trainer.get_mmf_env") as _:
lightning = self._get_lightning_trainer(
ckpt_config=ckpt_config,
model_config=unimodal_text_model_config,
max_steps=0,
seed=4,
)
lightning.trainer.fit(
lightning.model, train_dataloaders=lightning.train_loader
)
lightning_ckpt = lightning.model.state_dict()
lightning_ckpt.pop("base.encoder.embeddings.position_ids")
# lightning with pretrained state mapping model state dict should be the same
# only should be the same on certain axis
self.assertSetEqual(set(mmf_ckpt.keys()), set(lightning_ckpt.keys()))
# only the checkpoints with `pretrained_key` prefix will have the same value
for mmf_key in mmf_ckpt:
if pretrained_key in mmf_key:
self._assert_same(mmf_ckpt[mmf_key], lightning_ckpt[mmf_key])
for mmf_key in mmf_ckpt:
if "classifier.layers" in mmf_key:
self._assert_same(
mmf_ckpt[mmf_key], lightning_ckpt[mmf_key], same=False
)
def test_load_mmf_trainer_checkpoint_in_lightning(self):
# specifying an mmf .ckpt as the trainer resume_from_checkpoint
# for lightning trainer
with mock_env_with_temp(
"mmf.utils.checkpoint.get_mmf_env"
) as tmp_d, mock_env_with_temp("mmf.common.test_reporter.get_mmf_env") as _:
# generate checkpoint
self._get_mmf_trainer(max_updates=6).training_loop()
# load the trianer checkpoint that is of mmf type
ckpt_file = os.path.join(tmp_d, "current.ckpt")
ckpt = torch.load(ckpt_file, map_location="cpu")
with patch.object(
LightningLoopCallback, "on_train_batch_end", return_value=None
) as mock_method:
lightning = self._get_lightning_trainer(
max_steps=6,
resume_from_checkpoint=ckpt_file,
model_config={"simple_lightning_model": {"in_dim": 1}},
)
lightning.trainer.fit(
lightning.model, train_dataloaders=lightning.train_loader
)
self.assertEquals(lightning.trainer.global_step, 6)
call_args_list = mock_method.call_args_list
# training will take place 0 times. Since max_steps is the same
# as the checkpoint's global_step
self.assertEquals(len(call_args_list), 0)
# check to make sure that the lightning trainer's model and
# mmf's are the same
lightning_ckpt = lightning.trainer.model.state_dict()
self.assertDictEqual(lightning_ckpt, ckpt["model"])
def test_load_trainer_resume_parity_with_mmf(self):
# directly setting lightning's trainer param: resume_from_checkpoint
filename = "current.ckpt"
mmf_ckpt_current = self._get_mmf_ckpt(filename, ckpt_config={"resume": True})
with mock_env_with_temp("mmf.trainers.lightning_trainer.get_mmf_env") as tmp_d:
lightning = self._get_lightning_trainer(max_steps=6)
lightning.trainer.fit(
lightning.model, train_dataloaders=lightning.train_loader
)
lightning = self._get_lightning_trainer(
max_steps=6, resume_from_checkpoint=os.path.join(tmp_d, filename)
)
lightning.trainer.fit(
lightning.model, train_dataloaders=lightning.train_loader
)
lightning_ckpt_current = torch.load(os.path.join(tmp_d, filename))
self._assert_same_dict(
lightning_ckpt_current["state_dict"], lightning.model.state_dict()
)
# Make sure lightning and mmf parity
self._assert_same_dict(
mmf_ckpt_current["model"], lightning_ckpt_current["state_dict"]
)
@skip_if_no_network
def test_load_trainer_ckpt_number_of_steps(self):
with mock_env_with_temp("mmf.trainers.lightning_trainer.get_mmf_env") as tmp_d:
# to generate ckpt file, max_steps is saved as 6
lightning_gen = self._get_lightning_trainer(max_steps=6)
lightning_gen.trainer.fit(
lightning_gen.model,
train_dataloaders=lightning_gen.train_loader,
val_dataloaders=lightning_gen.val_loader,
)
# load ckpt file using resume_file, and train with max_steps 12
resume_file = os.path.join(tmp_d, "current.ckpt")
lightning = self._get_lightning_trainer(
model_config={"simple_lightning_model": {"in_dim": 1}},
resume_from_checkpoint=resume_file,
seed=4,
max_steps=12,
)
# training will take place 6 times.
with patch.object(
LightningLoopCallback, "on_train_batch_end", return_value=None
) as mock_method:
lightning.trainer.fit(
lightning.model, train_dataloaders=lightning.train_loader
)
self.assertEquals(lightning.trainer.global_step, 12)
call_args_list = [l[0][4] for l in mock_method.call_args_list]
# in lightning 1.6.0 last batch idx from ckpt is repeated
self.assertListEqual(list(range(5, 11)), call_args_list)
def test_trainer_save_current_parity_with_mmf(self):
with mock_env_with_temp(
"mmf.utils.checkpoint.get_mmf_env"
) as tmp_d, mock_env_with_temp("mmf.common.test_reporter.get_mmf_env") as _:
mmf_trainer = self._get_mmf_trainer()
mmf_trainer.training_loop()
mmf_ckpt_current = torch.load(os.path.join(tmp_d, "current.ckpt"))
with mock_env_with_temp("mmf.trainers.lightning_trainer.get_mmf_env") as tmp_d:
lightning = self._get_lightning_trainer()
lightning.trainer.fit(
lightning.model, train_dataloaders=lightning.train_loader
)
lightning_ckpt_current = torch.load(os.path.join(tmp_d, "current.ckpt"))
self._assert_same_dict(
mmf_ckpt_current["model"], lightning_ckpt_current["state_dict"]
)
def test_lightning_checkpoint_structure(self):
with mock_env_with_temp("mmf.trainers.lightning_trainer.get_mmf_env") as tmp_d:
lightning = self._get_lightning_trainer()
lightning.trainer.fit(
lightning.model, train_dataloaders=lightning.train_loader
)
lightning_ckpt_current = torch.load(os.path.join(tmp_d, "current.ckpt"))
self.assertSetEqual(
set(lightning_ckpt_current.keys()),
{
"epoch",
"global_step",
"pytorch-lightning_version",
"state_dict",
"callbacks",
"optimizer_states",
"lr_schedulers",
"config",
"loops",
},
)
def test_lightning_checkpoint_interval(self):
with mock_env_with_temp("mmf.trainers.lightning_trainer.get_mmf_env") as tmp_d:
# generate checkpoint, val_check_interval=2, checkpoint_inteval=2
lightning_gen = self._get_lightning_trainer(max_steps=6)
lightning_gen.trainer.fit(
lightning_gen.model,
train_dataloaders=lightning_gen.train_loader,
val_dataloaders=lightning_gen.val_loader,
)
# this test should generate 3 model files under the modes directory.
# mmf's directory has model_{2|4|6}.ckpt
# lightning's directory has model_step={1|3|5}.ckpt
# this is due to
# https://github.com/PyTorchLightning/pytorch-lightning/pull/6997
# also was an issue according to test_validation.py
files = os.listdir(os.path.join(tmp_d, "models"))
self.assertEquals(3, len(files))
indexes = {int(x[:-5].split("=")[1]) for x in files}
self.assertSetEqual({2, 4, 6}, indexes)
def _get_mmf_ckpt(self, filename, ckpt_config=None):
with mock_env_with_temp(
"mmf.utils.checkpoint.get_mmf_env"
) as tmp_d, mock_env_with_temp("mmf.common.test_reporter.get_mmf_env") as _:
# generate checkpoint
self._get_mmf_trainer(max_updates=6).training_loop()
# load the generated checkpoint, calling on_init_start is
# necessary to load the checkpoint
mmf_trainer = self._get_mmf_trainer(
ckpt_config=ckpt_config, max_updates=0, seed=1
)
mmf_trainer.on_init_start()
mmf_ckpt_current = torch.load(os.path.join(tmp_d, filename))
self._assert_same_dict(
mmf_ckpt_current["model"], mmf_trainer.model.state_dict()
)
return mmf_ckpt_current
def _load_checkpoint_and_test(self, filename, ckpt_config=None):
# Make sure it loads x.ckpt when mmf
mmf_ckpt = self._get_mmf_ckpt(filename, ckpt_config=ckpt_config)
# Make sure it loads x.ckpt when lightning
with mock_env_with_temp("mmf.trainers.lightning_trainer.get_mmf_env") as tmp_d:
# generate checkpoint
lightning_gen = self._get_lightning_trainer(max_steps=6)
lightning_gen.trainer.fit(
lightning_gen.model,
train_dataloaders=lightning_gen.train_loader,
val_dataloaders=lightning_gen.val_loader,
)
# load the generated checkpoint, calling fit is necessary to load the
# checkpoint
lightning_ckpt = torch.load(os.path.join(tmp_d, filename))
lightning = self._get_lightning_trainer(
ckpt_config=ckpt_config,
model_config={"simple_lightning_model": {"in_dim": 1}},
max_steps=6,
seed=4,
)
lightning.trainer.fit(
lightning.model, train_dataloaders=lightning.train_loader
)
self._assert_same_dict(
lightning_ckpt["state_dict"], lightning.model.state_dict()
)
# Make sure lightning and mmf parity
self._assert_same_dict(mmf_ckpt["model"], lightning_ckpt["state_dict"])
self.assertEquals(mmf_ckpt["current_epoch"], lightning_ckpt["epoch"] + 1)
self.assertEquals(mmf_ckpt["num_updates"], lightning_ckpt["global_step"])
self._assert_same_dict(
mmf_ckpt["optimizer"], lightning_ckpt["optimizer_states"][0]
)
| EXA-1-master | exa/models/mmf-main/tests/trainers/lightning/test_checkpoint.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from mmf.common.registry import registry
from mmf.trainers.lightning_trainer import LightningTrainer
from tests.trainers.test_trainer_mocks import LightningMultiDataModuleNumbersTestObject
class LightningTrainerMock(LightningTrainer):
def __init__(self, config, num_data_size=100, **kwargs):
super().__init__(config)
self.config = config
self.callbacks = []
# settings
trainer_config = self.config.trainer.params
self.trainer_config = trainer_config
self.training_config = self.config.training
for key, value in kwargs.items():
trainer_config[key] = value
# data
self.data_module = LightningMultiDataModuleNumbersTestObject(
config=config, num_data=num_data_size
)
self.run_type = self.config.get("run_type", "train")
registry.register("config", self.config)
self.train_loader = self.data_module.train_dataloader()
self.val_loader = self.data_module.val_dataloader()
self.test_loader = self.data_module.test_dataloader()
| EXA-1-master | exa/models/mmf-main/tests/trainers/lightning/lightning_trainer_mock.py |
# Copyright (c) Facebook, Inc. and its affiliates.
| EXA-1-master | exa/models/mmf-main/tests/trainers/callbacks/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import os
import unittest
from copy import deepcopy
import torch
from mmf.common.registry import registry
from mmf.trainers.callbacks.lr_scheduler import LRSchedulerCallback
from mmf.utils.configuration import load_yaml
from omegaconf import OmegaConf
from tests.test_utils import NumbersDataset, SimpleModel
registry.register_callback("test_callback")(LRSchedulerCallback)
class TestUserCallback(unittest.TestCase):
def setUp(self):
self.trainer = argparse.Namespace()
self.config = load_yaml(os.path.join("configs", "defaults.yaml"))
self.config = OmegaConf.merge(
self.config,
{
"model": "simple",
"model_config": {},
"training": {
"lr_scheduler": True,
"lr_ratio": 0.1,
"lr_steps": [1, 2],
"use_warmup": False,
"callbacks": [{"type": "test_callback", "params": {}}],
},
},
)
# Keep original copy for testing purposes
self.trainer.config = deepcopy(self.config)
registry.register("config", self.trainer.config)
model = SimpleModel(SimpleModel.Config())
model.build()
self.trainer.model = model
self.trainer.val_loader = torch.utils.data.DataLoader(
NumbersDataset(2), batch_size=self.config.training.batch_size
)
self.trainer.optimizer = torch.optim.Adam(
self.trainer.model.parameters(), lr=1e-01
)
self.trainer.lr_scheduler_callback = LRSchedulerCallback(
self.config, self.trainer
)
self.trainer.callbacks = []
for callback in self.config.training.get("callbacks", []):
callback_type = callback.type
callback_param = callback.params
callback_cls = registry.get_callback_class(callback_type)
self.trainer.callbacks.append(
callback_cls(self.trainer.config, self.trainer, **callback_param)
)
def tearDown(self):
registry.unregister("config")
def test_on_update_end(self):
self.assertEqual(len(self.trainer.callbacks), 1)
user_callback = self.trainer.callbacks[0]
self.assertTrue(isinstance(user_callback, LRSchedulerCallback))
| EXA-1-master | exa/models/mmf-main/tests/trainers/callbacks/test_user_callback.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import os
import unittest
from copy import deepcopy
import torch
from mmf.common.registry import registry
from mmf.models.base_model import BaseModel
from mmf.trainers.callbacks.lr_scheduler import LRSchedulerCallback
from mmf.utils.configuration import load_yaml
from omegaconf import OmegaConf
class SimpleModule(BaseModel):
def __init__(self, config={}):
super().__init__(config)
self.base = torch.nn.Sequential(
torch.nn.Linear(5, 4), torch.nn.Tanh(), torch.nn.Linear(4, 5)
)
self.classifier = torch.nn.Sequential(
torch.nn.Linear(5, 4), torch.nn.Tanh(), torch.nn.Linear(4, 5)
)
self.loss = torch.nn.CrossEntropyLoss()
def forward(self, x, target):
x = self.classifier(self.base(x))
return {"losses": {"total_loss": self.loss(x, target)}}
class NumbersDataset(torch.utils.data.Dataset):
def __init__(self):
self.samples = list(range(1, 1001))
def __getitem__(self, idx):
return self.samples[idx]
def __len__(self):
return len(self.samples)
class TestLogisticsCallback(unittest.TestCase):
def setUp(self):
self.trainer = argparse.Namespace()
self.config = load_yaml(os.path.join("configs", "defaults.yaml"))
self.config = OmegaConf.merge(
self.config,
{
"model": "simple",
"model_config": {},
"training": {
"lr_scheduler": True,
"lr_ratio": 0.1,
"lr_steps": [1, 2],
"use_warmup": False,
},
},
)
# Keep original copy for testing purposes
self.trainer.config = deepcopy(self.config)
registry.register("config", self.trainer.config)
self.trainer.model = SimpleModule()
self.trainer.val_loader = torch.utils.data.DataLoader(
NumbersDataset(), batch_size=self.config.training.batch_size
)
self.trainer.optimizer = torch.optim.Adam(
self.trainer.model.parameters(), lr=1e-01
)
self.trainer.lr_scheduler_callback = LRSchedulerCallback(
self.config, self.trainer
)
def tearDown(self):
registry.unregister("config")
def test_on_update_end(self):
self.trainer.lr_scheduler_callback.on_update_end()
self.assertAlmostEqual(self.trainer.optimizer.param_groups[0]["lr"], 1e-02)
self.trainer.lr_scheduler_callback.on_update_end()
self.assertAlmostEqual(self.trainer.optimizer.param_groups[0]["lr"], 1e-03)
| EXA-1-master | exa/models/mmf-main/tests/trainers/callbacks/test_lr_scheduler.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import os
import tempfile
import unittest
from copy import deepcopy
from unittest.mock import Mock
import torch
from mmf.common.meter import Meter
from mmf.common.registry import registry
from mmf.common.report import Report
from mmf.models.base_model import BaseModel
from mmf.trainers.callbacks.logistics import LogisticsCallback
from mmf.utils.configuration import load_yaml
from mmf.utils.file_io import PathManager
from mmf.utils.logger import setup_logger
from omegaconf import OmegaConf
class SimpleModule(BaseModel):
def __init__(self, config={}):
super().__init__(config)
self.base = torch.nn.Sequential(
torch.nn.Linear(5, 4), torch.nn.Tanh(), torch.nn.Linear(4, 5)
)
self.classifier = torch.nn.Sequential(
torch.nn.Linear(5, 4), torch.nn.Tanh(), torch.nn.Linear(4, 5)
)
self.loss = torch.nn.CrossEntropyLoss()
def forward(self, x, target):
x = self.classifier(self.base(x))
return {"losses": {"total_loss": self.loss(x, target)}}
class NumbersDataset(torch.utils.data.Dataset):
def __init__(self):
self.samples = list(range(1, 1001))
def __getitem__(self, idx):
return self.samples[idx]
def __len__(self):
return len(self.samples)
class TestLogisticsCallback(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
self.trainer = argparse.Namespace()
self.config = load_yaml(os.path.join("configs", "defaults.yaml"))
self.config = OmegaConf.merge(
self.config,
{
"model": "simple",
"model_config": {},
"training": {
"checkpoint_interval": 1,
"evaluation_interval": 10,
"early_stop": {"criteria": "val/total_loss"},
"batch_size": 16,
"log_interval": 10,
"logger_level": "info",
},
"env": {"save_dir": self.tmpdir},
},
)
# Keep original copy for testing purposes
self.trainer.config = deepcopy(self.config)
registry.register("config", self.trainer.config)
setup_logger()
self.report = Mock(spec=Report)
self.report.dataset_name = "abcd"
self.report.dataset_type = "test"
self.trainer.model = SimpleModule()
self.trainer.val_loader = torch.utils.data.DataLoader(
NumbersDataset(), batch_size=self.config.training.batch_size
)
self.trainer.optimizer = torch.optim.Adam(
self.trainer.model.parameters(), lr=1e-01
)
self.trainer.device = "cpu"
self.trainer.num_updates = 0
self.trainer.current_iteration = 0
self.trainer.current_epoch = 0
self.trainer.max_updates = 0
self.trainer.meter = Meter()
self.cb = LogisticsCallback(self.config, self.trainer)
def tearDown(self):
registry.unregister("config")
def test_on_train_start(self):
self.cb.on_train_start()
expected = 0
self.assertEqual(
int(self.cb.train_timer.get_time_since_start().split("ms")[0]), expected
)
def test_on_update_end(self):
self.cb.on_train_start()
self.cb.on_update_end(meter=self.trainer.meter, should_log=False)
f = PathManager.open(os.path.join(self.tmpdir, "train.log"))
self.assertFalse(any("time_since_start" in line for line in f.readlines()))
self.cb.on_update_end(meter=self.trainer.meter, should_log=True)
f = PathManager.open(os.path.join(self.tmpdir, "train.log"))
self.assertTrue(any("time_since_start" in line for line in f.readlines()))
def test_on_validation_start(self):
self.cb.on_train_start()
self.cb.on_validation_start()
expected = 0
self.assertEqual(
int(self.cb.snapshot_timer.get_time_since_start().split("ms")[0]), expected
)
def test_on_test_end(self):
self.cb.on_test_end(report=self.report, meter=self.trainer.meter)
f = PathManager.open(os.path.join(self.tmpdir, "train.log"))
self.assertTrue(any("Finished run in" in line for line in f.readlines()))
| EXA-1-master | exa/models/mmf-main/tests/trainers/callbacks/test_logistics.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import tests.test_utils as test_utils
import torch
from mmf.common.sample import (
convert_batch_to_sample_list,
Sample,
SampleList,
to_device,
)
class TestSample(unittest.TestCase):
def test_sample_working(self):
initial = Sample()
initial.x = 1
initial["y"] = 2
# Assert setter and getter
self.assertEqual(initial.x, 1)
self.assertEqual(initial["x"], 1)
self.assertEqual(initial.y, 2)
self.assertEqual(initial["y"], 2)
update_dict = {"a": 3, "b": {"c": 4}}
initial.update(update_dict)
self.assertEqual(initial.a, 3)
self.assertEqual(initial["a"], 3)
self.assertEqual(initial.b.c, 4)
self.assertEqual(initial["b"].c, 4)
class TestSampleList(unittest.TestCase):
@test_utils.skip_if_no_cuda
def test_pin_memory(self):
sample_list = test_utils.build_random_sample_list()
sample_list.pin_memory()
pin_list = [sample_list.y, sample_list.z.y]
non_pin_list = [sample_list.x, sample_list.z.x]
all_pinned = True
for pin in pin_list:
all_pinned = all_pinned and pin.is_pinned()
self.assertTrue(all_pinned)
any_pinned = False
for pin in non_pin_list:
any_pinned = any_pinned or (hasattr(pin, "is_pinned") and pin.is_pinned())
self.assertFalse(any_pinned)
def test_to_dict(self):
sample_list = test_utils.build_random_sample_list()
sample_dict = sample_list.to_dict()
self.assertTrue(isinstance(sample_dict, dict))
# hasattr won't work anymore
self.assertFalse(hasattr(sample_dict, "x"))
keys_to_assert = ["x", "y", "z", "z.x", "z.y"]
all_keys = True
for key in keys_to_assert:
current = sample_dict
if "." in key:
sub_keys = key.split(".")
for sub_key in sub_keys:
all_keys = all_keys and sub_key in current
current = current[sub_key]
else:
all_keys = all_keys and key in current
self.assertTrue(all_keys)
self.assertTrue(isinstance(sample_dict, dict))
class TestFunctions(unittest.TestCase):
def test_to_device(self):
sample_list = test_utils.build_random_sample_list()
modified = to_device(sample_list, "cpu")
self.assertEqual(modified.get_device(), torch.device("cpu"))
modified = to_device(sample_list, torch.device("cpu"))
self.assertEqual(modified.get_device(), torch.device("cpu"))
modified = to_device(sample_list, "cuda")
if torch.cuda.is_available():
self.assertEqual(modified.get_device(), torch.device("cuda:0"))
else:
self.assertEqual(modified.get_device(), torch.device("cpu"))
double_modified = to_device(modified, modified.get_device())
self.assertTrue(double_modified is modified)
custom_batch = [{"a": 1}]
self.assertEqual(to_device(custom_batch), custom_batch)
def test_convert_batch_to_sample_list(self):
# Test list conversion
batch = [{"a": torch.tensor([1.0, 1.0])}, {"a": torch.tensor([2.0, 2.0])}]
sample_list = convert_batch_to_sample_list(batch)
expected_a = torch.tensor([[1.0, 1.0], [2.0, 2.0]])
self.assertTrue(torch.equal(expected_a, sample_list.a))
# Test single element list, samplelist
sample_list = SampleList()
sample_list.add_field("a", expected_a)
parsed_sample_list = convert_batch_to_sample_list([sample_list])
self.assertTrue(isinstance(parsed_sample_list, SampleList))
self.assertTrue("a" in parsed_sample_list)
self.assertTrue(torch.equal(expected_a, parsed_sample_list.a))
# Test no tensor field
batch = [{"a": [1]}, {"a": [2]}]
sample_list = convert_batch_to_sample_list(batch)
self.assertTrue(sample_list.a, [[1], [2]])
| EXA-1-master | exa/models/mmf-main/tests/common/test_sample.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import tests.test_utils as test_utils
import torch
from mmf.common.batch_collator import BatchCollator
from mmf.common.sample import Sample
class TestBatchCollator(unittest.TestCase):
def test_call(self):
batch_collator = BatchCollator("vqa2", "train")
sample_list = test_utils.build_random_sample_list()
sample_list = batch_collator(sample_list)
# Test already build sample list
self.assertEqual(sample_list.dataset_name, "vqa2")
self.assertEqual(sample_list.dataset_type, "train")
sample = Sample()
sample.a = torch.tensor([1, 2], dtype=torch.int)
# Test list of samples
sample_list = batch_collator([sample, sample])
self.assertTrue(
test_utils.compare_tensors(
sample_list.a, torch.tensor([[1, 2], [1, 2]], dtype=torch.int)
)
)
# Test IterableDataset case
sample_list = test_utils.build_random_sample_list()
new_sample_list = batch_collator([sample_list])
self.assertEqual(new_sample_list, sample_list)
| EXA-1-master | exa/models/mmf-main/tests/common/test_batch_collator.py |
# Copyright (c) Facebook, Inc. and its affiliates.
| EXA-1-master | exa/models/mmf-main/tests/common/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import tests.test_utils as test_utils
import torch
from mmf.common.report import Report
from mmf.common.sample import SampleList
class TestReport(unittest.TestCase):
def _build_report(self):
tensor_a = torch.tensor([[1, 2, 3, 4], [2, 3, 4, 5]])
sample_list = SampleList()
sample_list.add_field("a", tensor_a)
model_output = {"scores": torch.rand(2, 2)}
report = Report(sample_list, model_output)
return report
def test_report_copy(self):
original_report = self._build_report()
report_copy = original_report.copy()
report_copy["scores"].zero_()
self.assertFalse(
test_utils.compare_tensors(report_copy["scores"], original_report["scores"])
)
def test_report_detach(self):
report = self._build_report()
report.a = report.a.float()
report.a.requires_grad = True
report.scores = report.a * 2
self.assertTrue(report.scores.requires_grad)
self.assertTrue(report.a.requires_grad)
self.assertFalse(report.scores.is_leaf)
self.assertTrue(report.a.is_leaf)
report = report.detach()
self.assertFalse(report.scores.requires_grad)
self.assertFalse(report.a.requires_grad)
self.assertTrue(report.scores.is_leaf)
self.assertTrue(report.a.is_leaf)
@test_utils.skip_if_no_cuda
def test_to_device(self):
report = self._build_report()
self.assertFalse(report.a.is_cuda)
self.assertFalse(report.scores.is_cuda)
report = report.to("cuda")
self.assertTrue(report.a.is_cuda)
self.assertTrue(report.scores.is_cuda)
report = report.to("cpu", non_blocking=False)
self.assertFalse(report.a.is_cuda)
self.assertFalse(report.scores.is_cuda)
report = report.to("cuda", fields=["scores"])
self.assertFalse(report.a.is_cuda)
self.assertTrue(report.scores.is_cuda)
| EXA-1-master | exa/models/mmf-main/tests/common/test_report.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import torch
from mmf.common.meter import Meter
from mmf.common.report import Report
from mmf.common.sample import SampleList
class TestMeter(unittest.TestCase):
def test_meter_update_from_report(self):
meter = Meter()
prepared_batch = SampleList(
{"targets": torch.tensor([1, 2, 3, 4]), "dataset_type": "val"}
)
for idx in range(5):
model_output = {
"scores": torch.tensor([0, 1, 2, 3]),
"losses": {"loss": float(idx)},
}
report = Report(prepared_batch, model_output)
meter.update_from_report(report)
self.assertEqual(meter.loss.global_avg, 2.0)
self.assertEqual(meter.loss.avg, 2.0)
| EXA-1-master | exa/models/mmf-main/tests/common/test_meter.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import contextlib
import gc
import unittest
import warnings
from io import StringIO
from mmf.common.registry import registry
from mmf.utils.configuration import Configuration
from mmf.utils.env import setup_imports, teardown_imports
from tests.test_utils import dummy_args
class TestConfigsForKeys(unittest.TestCase):
def setUp(self):
setup_imports()
def tearDown(self):
teardown_imports()
gc.collect()
def test_model_configs_for_keys(self):
models_mapping = registry.mapping["model_name_mapping"]
for model_key, model_cls in models_mapping.items():
if model_cls.config_path() is None:
warnings.warn(
(
"Model {} has no default configuration defined. "
+ "Skipping it. Make sure it is intentional"
).format(model_key)
)
continue
with contextlib.redirect_stdout(StringIO()):
args = dummy_args(model=model_key)
configuration = Configuration(args)
configuration.freeze()
config = configuration.get_config()
if model_key == "mmft":
continue
self.assertTrue(
model_key in config.model_config,
"Key for model {} doesn't exists in its configuration".format(
model_key
),
)
def test_dataset_configs_for_keys(self):
builder_name = registry.mapping["builder_name_mapping"]
for builder_key, builder_cls in builder_name.items():
if builder_cls.config_path() is None:
warnings.warn(
(
"Dataset {} has no default configuration defined. "
+ "Skipping it. Make sure it is intentional"
).format(builder_key)
)
continue
with contextlib.redirect_stdout(StringIO()):
args = dummy_args(dataset=builder_key)
configuration = Configuration(args)
configuration.freeze()
config = configuration.get_config()
self.assertTrue(
builder_key in config.dataset_config,
"Key for dataset {} doesn't exists in its configuration".format(
builder_key
),
)
| EXA-1-master | exa/models/mmf-main/tests/configs/test_configs_for_keys.py |
# Copyright (c) Facebook, Inc. and its affiliates.
| EXA-1-master | exa/models/mmf-main/tests/configs/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import re
import time
import typing
import unittest
from mmf.utils.configuration import load_yaml
from mmf.utils.download import check_header, DownloadableFile
from omegaconf import DictConfig, OmegaConf
from tests.test_utils import skip_if_macos, skip_if_no_network
class TestConfigsForKeys(unittest.TestCase):
def _recurse_on_config(self, config: DictConfig, callback: typing.Callable):
if OmegaConf.is_list(config) and len(config) > 0 and "url" in config[0]:
# Found the urls, let's test them
for item in config:
# flickr30 download source is down, ignore dataset until a
# mirror can be found
if getattr(item, "file_name", "") == "flickr30_images.tar.gz":
continue
# First try making the DownloadableFile class to make sure
# everything is fine
download = DownloadableFile(**item)
# Now, call the actual callback which will test specific scenarios
callback(download)
elif OmegaConf.is_dict(config):
# Both version and resources should be present
if "version" in config:
self.assertIn("resources", config)
if "resources" in config:
self.assertIn("version", config)
# Let's continue recursing
for item in config:
self._recurse_on_config(config[item], callback=callback)
def _check_download(self, download: DownloadableFile):
# Check the actual header 3 times before failing
for i in range(3):
try:
check_header(download._url, from_google=download._from_google)
break
except AssertionError:
if i == 2:
raise
else:
# If failed, add a sleep of 5 seconds before retrying
time.sleep(2)
def _check_sha256sum(self, download: DownloadableFile):
if download._hashcode is not None:
matches = re.findall(r"^[A-Fa-f0-9]{64}$", download._hashcode)
assert len(matches) == 1, f"{download._url} doesn't have a valid sha256sum"
def _test_zoo(self, path: str, callback: typing.Callable):
zoo_config = load_yaml(path)
self._recurse_on_config(zoo_config, callback=callback)
def _test_all_zoos(self, callback: typing.Callable):
self._test_zoo("configs/zoo/datasets.yaml", callback=callback)
self._test_zoo("configs/zoo/models.yaml", callback=callback)
@skip_if_no_network
@skip_if_macos
def test_zoos(self):
self._test_all_zoos(callback=self._check_download)
def test_sha256sums(self):
self._test_all_zoos(callback=self._check_sha256sum)
| EXA-1-master | exa/models/mmf-main/tests/configs/test_zoo_urls.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import mmf.modules.poolers as poolers
import torch
from mmf.utils.general import get_current_device
class TestModulePoolers(unittest.TestCase):
def setUp(self):
self.k = 2
self.batch_size = 64
self.num_tokens = 10
self.embedding_size = 768
self.token_len = 10
self.device = get_current_device()
self.encoded_layers = [
torch.randn(self.batch_size, self.token_len, self.embedding_size).to(
self.device
)
for _ in range(3)
]
self.pad_mask = torch.randn(self.batch_size, self.token_len).to(self.device)
def test_average_concat(self):
pool_fn = poolers.AverageConcatLastN(self.k).to(self.device)
out = pool_fn(self.encoded_layers, self.pad_mask)
assert torch.Size([self.batch_size, self.embedding_size * self.k]) == out.shape
def test_average_k_from_last(self):
pool_fn = poolers.AverageKFromLast(self.k).to(self.device)
out = pool_fn(self.encoded_layers, self.pad_mask)
assert torch.Size([self.batch_size, self.embedding_size]) == out.shape
def test_average_sum_last_k(self):
pool_fn = poolers.AverageSumLastK(self.k).to(self.device)
out = pool_fn(self.encoded_layers, self.pad_mask)
assert torch.Size([self.batch_size, self.embedding_size]) == out.shape
def test_identity(self):
pool_fn = poolers.IdentityPooler().to(self.device)
out = pool_fn(self.encoded_layers[-1])
assert (
torch.Size([self.batch_size, self.token_len, self.embedding_size])
== out.shape
)
def test_cls(self):
pool_fn = poolers.ClsPooler().to(self.device)
out = pool_fn(self.encoded_layers[-1])
assert torch.Size([self.batch_size, self.embedding_size]) == out.shape
def test_average(self):
pool_fn = poolers.MeanPooler().to(self.device)
out = pool_fn(self.encoded_layers[-1])
assert torch.Size([self.batch_size, self.embedding_size]) == out.shape
| EXA-1-master | exa/models/mmf-main/tests/modules/test_poolers.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import tempfile
import unittest
import torch
from mmf.modules import encoders
from omegaconf import OmegaConf
from tests.test_utils import (
setup_proxy,
skip_if_no_pytorchvideo,
skip_if_old_transformers,
)
from torch import nn
class TestEncoders(unittest.TestCase):
def setUp(self):
setup_proxy()
def _test_init(self, cls, **params):
encoder = cls.from_params(**params)
self.assertTrue(isinstance(encoder, nn.Module))
def test_finetune_faster_rcnn_fpn_fc7(self):
# Add tempfile dir so that the encoder downloads data automatically for testing
self._test_init(
encoders.FinetuneFasterRcnnFpnFc7,
in_dim=2048,
model_data_dir=tempfile.TemporaryDirectory().name,
)
def test_resnet152_image_encoder(self):
self._test_init(encoders.ResNet152ImageEncoder)
def test_text_embedding_encoder(self):
embedding_params = {
"type": "projection",
"params": {"module": "linear", "in_dim": 756, "out_dim": 756},
}
self._test_init(
encoders.TextEmbeddingEncoder,
operator="sum",
embedding_params=embedding_params,
)
def test_transformer_encoder(self):
self._test_init(encoders.TransformerEncoder)
def test_multimodal_encoder_base(self):
self._test_init(encoders.MultiModalEncoderBase)
def test_identity(self):
self._test_init(encoders.IdentityEncoder, in_dim=256)
def test_transformer_encoder_forward(self):
encoder = encoders.TransformerEncoder.from_params()
self.assertEqual(encoder.embeddings.word_embeddings.weight.size(1), 768)
self.assertEqual(encoder.embeddings.word_embeddings.weight.size(0), 30522)
text_ids = torch.randint(
encoder.embeddings.word_embeddings.weight.size(0), (2, 16)
)
text_embeddings = encoder(text_ids)
text_embeddings_cls = encoder(text_ids)
self.assertEqual(text_embeddings_cls.dim(), 2)
self.assertEqual(list(text_embeddings_cls.size()), [2, 768])
text_embeddings = encoder(text_ids, return_sequence=True)
self.assertEqual(text_embeddings.dim(), 3)
self.assertEqual(list(text_embeddings.size()), [2, 16, 768])
def test_r2plus1d18_video_encoder(self):
config = OmegaConf.structured(
encoders.R2Plus1D18VideoEncoder.Config(pretrained=False)
)
encoder = encoders.R2Plus1D18VideoEncoder(config)
x = torch.rand((1, 3, 16, 112, 112))
output = encoder(x)
self.assertEqual(output.size(-1), config.out_dim)
def test_resnet18_audio_encoder(self):
config = OmegaConf.structured(encoders.ResNet18AudioEncoder.Config())
encoder = encoders.ResNet18AudioEncoder(config)
x = torch.rand((1, 1, 4778, 224))
output = encoder(x)
self.assertEqual(output.size(-1), config.out_dim)
@skip_if_old_transformers(min_version="4.5.0")
def test_vit_encoder(self):
from omegaconf import open_dict
config = OmegaConf.structured(encoders.ViTEncoder.Config())
with open_dict(config):
config.update(
{
"layer_norm_eps": 0.0001,
"hidden_size": 768,
"num_hidden_layers": 2,
"do_patch_embeddings": False,
"add_pooling_layer": False,
"out_dim": 768,
}
)
encoder = encoders.ViTEncoder(config)
x = torch.rand(32, 197, 768)
output, _ = encoder(x)
self.assertEqual(output.size(-1), config.out_dim)
@skip_if_no_pytorchvideo
def test_pytorchvideo_slowfast_r50_encoder(self):
# instantiate video encoder from pytorchvideo
# default model is slowfast_r50
config = OmegaConf.structured(encoders.PytorchVideoEncoder.Config())
encoder = encoders.PytorchVideoEncoder(config)
fast = torch.rand((1, 3, 32, 224, 224))
slow = torch.rand((1, 3, 8, 224, 224))
output = encoder([slow, fast])
# check output tensor is the expected feature dim size
# (bs, feature_dim)
self.assertEqual(output.size(1), 2304)
@skip_if_no_pytorchvideo
def test_mvit_encoder(self):
config = {
"name": "pytorchvideo",
"model_name": "mvit_base_32x3",
"random_init": True,
"drop_last_n_layers": 0,
"pooler_name": "cls",
"spatial_size": 224,
"temporal_size": 8,
"head": None,
"embed_dim_mul": [[1, 2.0], [3, 2.0], [14, 2.0]],
"atten_head_mul": [[1, 2.0], [3, 2.0], [14, 2.0]],
"pool_q_stride_size": [[1, 1, 2, 2], [3, 1, 2, 2], [14, 1, 2, 2]],
"pool_kv_stride_adaptive": [1, 8, 8],
"pool_kvq_kernel": [3, 3, 3],
}
# test bert cls pooler
encoder = encoders.PytorchVideoEncoder(OmegaConf.create(config))
x = torch.rand((1, 3, 8, 224, 224))
output = encoder(x)
# check output tensor is the expected feature dim size
# based on pooled attention configs
# for more details consult https://arxiv.org/pdf/2104.11227
# and https://github.com/facebookresearch/pytorchvideo/
# (bs, num_features, feature_dim)
self.assertEqual(output.shape, torch.Size([1, 768]))
# test avg pooler
encoder = encoders.PytorchVideoEncoder(
OmegaConf.create(dict(config, pooler_name="avg"))
)
output = encoder(x)
self.assertEqual(output.shape, torch.Size([1, 768]))
# test no pooling
encoder = encoders.PytorchVideoEncoder(
OmegaConf.create(dict(config, pooler_name="identity"))
)
output = encoder(x)
# (bs, num_features, feature_dim)
self.assertEqual(output.shape, torch.Size([1, 197, 768]))
| EXA-1-master | exa/models/mmf-main/tests/modules/test_encoders.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import unittest
import mmf.modules.metrics as metrics
import torch
from mmf.common.registry import registry
from mmf.common.sample import Sample
from mmf.datasets.processors import CaptionProcessor
from mmf.utils.configuration import load_yaml
class TestModuleMetrics(unittest.TestCase):
def test_caption_bleu4(self):
path = os.path.join(
os.path.abspath(__file__),
"../../../mmf/configs/datasets/coco/defaults.yaml",
)
config = load_yaml(os.path.abspath(path))
captioning_config = config.dataset_config.coco
caption_processor_config = captioning_config.processors.caption_processor
vocab_path = os.path.join(
os.path.abspath(__file__), "..", "..", "data", "vocab.txt"
)
caption_processor_config.params.vocab.type = "random"
caption_processor_config.params.vocab.vocab_file = os.path.abspath(vocab_path)
caption_processor = CaptionProcessor(caption_processor_config.params)
registry.register("coco_caption_processor", caption_processor)
caption_bleu4 = metrics.CaptionBleu4Metric()
expected = Sample()
predicted = dict()
# Test complete match
expected.answers = torch.empty((5, 5, 10))
expected.answers.fill_(4)
predicted["scores"] = torch.zeros((5, 10, 19))
predicted["scores"][:, :, 4] = 1.0
self.assertEqual(caption_bleu4.calculate(expected, predicted).item(), 1.0)
# Test partial match
expected.answers = torch.empty((5, 5, 10))
expected.answers.fill_(4)
predicted["scores"] = torch.zeros((5, 10, 19))
predicted["scores"][:, 0:5, 4] = 1.0
predicted["scores"][:, 5:, 18] = 1.0
self.assertAlmostEqual(
caption_bleu4.calculate(expected, predicted).item(), 0.3928, 4
)
def _test_binary_metric(self, metric, value):
sample = Sample()
predicted = dict()
sample.targets = torch.tensor(
[[0, 1], [1, 0], [1, 0], [0, 1]], dtype=torch.float
)
predicted["scores"] = torch.tensor(
[
[-0.9332, 0.8149],
[-0.8391, 0.6797],
[-0.7235, 0.7220],
[-0.9043, 0.3078],
],
dtype=torch.float,
)
self.assertAlmostEqual(metric.calculate(sample, predicted).item(), value, 4)
sample.targets = torch.tensor([1, 0, 0, 1], dtype=torch.long)
self.assertAlmostEqual(metric.calculate(sample, predicted).item(), value, 4)
def _test_multiclass_metric(self, metric, value):
sample = Sample()
predicted = dict()
sample.targets = torch.tensor(
[[0, 1, 0], [0, 0, 1], [1, 0, 0], [0, 0, 1]], dtype=torch.float
)
predicted["scores"] = torch.tensor(
[
[-0.9332, 0.8149, 0.3491],
[-0.8391, 0.6797, -0.3410],
[-0.7235, 0.7220, 0.9104],
[0.9043, 0.3078, -0.4210],
],
dtype=torch.float,
)
self.assertAlmostEqual(metric.calculate(sample, predicted).item(), value, 4)
sample.targets = torch.tensor([1, 2, 0, 2], dtype=torch.long)
self.assertAlmostEqual(metric.calculate(sample, predicted).item(), value, 4)
def _test_multilabel_metric(self, metric, value):
sample = Sample()
predicted = dict()
sample.targets = torch.tensor(
[[0, 1, 1], [1, 0, 1], [1, 0, 1], [0, 0, 1]], dtype=torch.float
)
predicted["scores"] = torch.tensor(
[
[-0.9332, 0.8149, 0.3491],
[-0.8391, 0.6797, -0.3410],
[-0.7235, 0.7220, 0.9104],
[0.9043, 0.3078, -0.4210],
],
dtype=torch.float,
)
self.assertAlmostEqual(metric.calculate(sample, predicted).item(), value, 4)
def _test_recall_at_k_metric(self, metric, value):
sample = Sample()
predicted = dict()
first_dimension = 10
second_dimension = 100 # second dim MUST be 100
sample.targets = torch.ones(first_dimension, second_dimension)
predicted["scores"] = torch.ones(first_dimension, second_dimension)
for i in range(first_dimension):
for j in range(second_dimension):
# sample = [[0, 1, 2, ..., 99], [0, 1, ..., 99], ...]
sample.targets[i][j] = j
if j == second_dimension - 1 and i != 0:
# changes last value or 'chosen candidate'
# to a lower rank as i increases
# predicted = [[0, 2, 4, ..., 198], [0, 2, ..., 196, 191],
# [0, ..., 196, 189], [0, ..., 196, 187], ...]
predicted["scores"][i][j] = j * 2 - 1 - (i + 2) * 2
else:
# predicted = [[0, 2, 4, ..., 198], [0, 2, ...], ...]
predicted["scores"][i][j] = j * 2
self.assertAlmostEqual(metric.calculate(sample, predicted), value)
def _test_retrieval_recall_at_k_metric(self, metric, value):
sample = Sample()
predicted = dict()
torch.manual_seed(1234)
predicted["targets"] = torch.rand((10, 4))
predicted["scores"] = torch.rand((10, 4))
self.assertAlmostEqual(float(metric.calculate(sample, predicted)), value)
def _test_binary_dict_metric(self, metric, value_dict):
sample = Sample()
predicted = dict()
sample.targets = torch.tensor(
[[0, 1], [1, 0], [1, 0], [0, 1]], dtype=torch.float
)
predicted["scores"] = torch.tensor(
[
[-0.9332, 0.8149],
[-0.8391, 0.6797],
[-0.7235, 0.7220],
[-0.9043, 0.3078],
],
dtype=torch.float,
)
metric_result = metric.calculate(sample, predicted)
for key, val in value_dict.items():
self.assertAlmostEqual(metric_result[key].item(), val, 4)
sample.targets = torch.tensor([1, 0, 0, 1], dtype=torch.long)
metric_result = metric.calculate(sample, predicted)
for key, val in value_dict.items():
self.assertAlmostEqual(metric_result[key].item(), val, 4)
def test_micro_f1(self):
metric = metrics.MicroF1()
self._test_binary_metric(metric, 0.5)
self._test_multiclass_metric(metric, 0.25)
def test_macro_f1(self):
metric = metrics.MacroF1()
self._test_binary_metric(metric, 0.3333)
self._test_multiclass_metric(metric, 0.2222)
def test_binary_f1(self):
metric = metrics.BinaryF1()
self._test_binary_metric(metric, 0.66666666)
def test_multilabel_micro_f1(self):
metric = metrics.MultiLabelMicroF1()
self._test_binary_metric(metric, 0.5)
def test_multilabel_macro_f1(self):
metric = metrics.MultiLabelMacroF1()
self._test_multilabel_metric(metric, 0.355555)
def test_micro_f1_precision_recall(self):
metric = metrics.MicroF1PrecisionRecall()
self._test_binary_dict_metric(
metric, {"f1": 0.5, "precision": 0.5, "recall": 0.5}
)
def test_macro_f1_precision_recall(self):
metric = metrics.MacroF1PrecisionRecall()
self._test_binary_dict_metric(
metric, {"f1": 0.3333, "precision": 0.25, "recall": 0.5}
)
def test_binary_f1_precision_recall(self):
metric = metrics.BinaryF1PrecisionRecall()
self._test_binary_dict_metric(
metric, {"f1": 0.66666666, "precision": 0.5, "recall": 1.0}
)
def test_macro_roc_auc(self):
metric = metrics.MacroROC_AUC()
self._test_binary_metric(metric, 0.5)
self._test_multiclass_metric(metric, 0.2222)
def test_micro_roc_auc(self):
metric = metrics.MicroROC_AUC()
self._test_binary_metric(metric, 0.5)
self._test_multiclass_metric(metric, 0.34375)
def test_binary_ap(self):
metric = metrics.BinaryAP()
self._test_binary_metric(metric, 0.75)
def test_recall_at_precision_k(self):
metric = metrics.RecallAtPrecisionK(50)
self._test_binary_metric(metric, 1.0)
metric = metrics.RecallAtPrecisionK(90)
self._test_binary_metric(metric, 0.5)
metric = metrics.RecallAtPrecisionK(110)
self._test_binary_metric(metric, 0)
def test_micro_ap(self):
metric = metrics.MicroAP()
self._test_binary_metric(metric, 0.642857)
self._test_multiclass_metric(metric, 0.354166)
def test_macro_ap(self):
metric = metrics.MacroAP()
self._test_binary_metric(metric, 0.6666666)
self._test_multiclass_metric(metric, 0.3888888)
def test_recall_at_1(self):
metric = metrics.RecallAt1()
self._test_recall_at_k_metric(metric, 0.1)
def test_recall_at_5(self):
metric = metrics.RecallAt5()
self._test_recall_at_k_metric(metric, 0.3)
def test_recall_at_10(self):
metric = metrics.RecallAt10()
self._test_recall_at_k_metric(metric, 0.8)
def test_retrieval_recall_at_1(self):
metric = metrics.RecallAt1_ret()
self._test_retrieval_recall_at_k_metric(metric, 0.1)
def test_retrieval_recall_at_5(self):
metric = metrics.RecallAt5_ret()
self._test_retrieval_recall_at_k_metric(metric, 0.4)
def test_retrieval_recall_at_10(self):
metric = metrics.RecallAt10_ret()
self._test_retrieval_recall_at_k_metric(metric, 1.0)
def test_accuracy_base(self):
metric = metrics.Accuracy()
torch.manual_seed(2)
targets = torch.rand((25, 10))
scores = torch.rand((25, 10))
acc = metric.calculate({"targets": targets}, {"scores": scores})
self.assertAlmostEqual(0.04, acc.item())
def test_accuracy_base2(self):
metric = metrics.Accuracy()
torch.manual_seed(2)
targets = torch.rand((25, 10))
scores = torch.rand((25, 10))
scores = torch.max(scores, 1)[1]
acc = metric.calculate({"targets": targets}, {"scores": scores})
self.assertAlmostEqual(0.04, acc.item())
def test_accuracy_base3(self):
metric = metrics.Accuracy()
torch.manual_seed(2)
targets = torch.rand((25, 10))
targets = torch.max(targets, 1)[1]
scores = torch.rand((25, 10))
acc = metric.calculate({"targets": targets}, {"scores": scores})
self.assertAlmostEqual(0.04, acc.item())
def test_accuracy_top1(self):
metric = metrics.TopKAccuracy(score_key="scores", k=1)
torch.manual_seed(2)
targets = torch.rand((25, 10))
scores = torch.rand((25, 10))
targets = targets.topk(1, 1, True, True)[1].t().squeeze()
acc = metric.calculate({"targets": targets}, {"scores": scores})
self.assertAlmostEqual(0.04, acc.item(), 1)
def test_accuracy_top1_with_max(self):
metric = metrics.TopKAccuracy(score_key="scores", k=1)
torch.manual_seed(2)
targets = torch.rand((25, 10))
targets = torch.max(targets, 1)[1]
scores = torch.rand((25, 10))
acc = metric.calculate({"targets": targets}, {"scores": scores})
self.assertAlmostEqual(0.04, acc.item(), 1)
def test_accuracy_top5(self):
metric = metrics.TopKAccuracy(score_key="scores", k=5)
torch.manual_seed(2)
targets = torch.rand((25, 10))
targets = torch.max(targets, 1)[1]
scores = torch.rand((25, 10))
acc = metric.calculate({"targets": targets}, {"scores": scores})
self.assertAlmostEqual(0.48, acc.item(), 1)
| EXA-1-master | exa/models/mmf-main/tests/modules/test_metrics.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import collections
import unittest
from unittest.mock import MagicMock
import mmf.modules.losses as losses
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmf.common.registry import registry
from mmf.common.sample import SampleList
RETURN_VALUE = torch.tensor(1.0)
def build_loss_side_effect(return_value=RETURN_VALUE):
def loss_side_effect(item):
loss_object_mock = MagicMock(return_value=return_value)
loss_class_mock = MagicMock(return_value=loss_object_mock)
valid_losses = ["cross_entropy", "multi"]
if isinstance(item, collections.abc.MutableMapping):
if item["type"] not in valid_losses:
return None
elif item not in valid_losses:
return None
else:
return loss_class_mock
return loss_side_effect
@registry.register_loss("mse_mae")
class TestMSEAndMAELoss(nn.Module):
"""Mean squared, absolute error loss.
Calculates both losses and returns a dict with string keys.
"""
def __init__(self):
super().__init__()
def forward(self, sample_list, model_output):
targets = sample_list["targets"]
scores = model_output["scores"]
loss = {"mse": F.mse_loss(scores, targets), "mae": F.l1_loss(scores, targets)}
return loss
class TestModuleLosses(unittest.TestCase):
def setUp(self):
torch.manual_seed(1234)
self.registry_loss_class = registry.get_loss_class
def tearDown(self):
registry.get_loss_class = self.registry_loss_class
def test_mmf_loss(self):
get_loss_class_mock = MagicMock(side_effect=build_loss_side_effect())
registry.get_loss_class = get_loss_class_mock
# Test if MMFLoss accepts empty parameters
self.assertRaises(ValueError, losses.MMFLoss)
self.assertTrue(losses.MMFLoss({"type": "cross_entropy"}).name, "cross_entropy")
self.assertTrue(losses.MMFLoss("cross_entropy").name, "cross_entropy")
self.assertRaises(AssertionError, losses.MMFLoss, [])
# Multi requires dict
self.assertRaises(AssertionError, losses.MMFLoss, "multi")
cross_entropy = losses.MMFLoss("cross_entropy")
cross_entropy_from_dict = losses.MMFLoss({"type": "cross_entropy"})
sample_list = SampleList()
sample_list.dataset_type = "val"
sample_list.dataset_name = "vqa2"
output = cross_entropy(sample_list, {})
output_from_dict = cross_entropy_from_dict(sample_list, {})
self.assertEqual(output, {"val/vqa2/cross_entropy": torch.tensor(1.0)})
self.assertEqual(output_from_dict, output)
get_loss_class_mock.side_effect = build_loss_side_effect(1.0)
output = cross_entropy(sample_list, {})
self.assertEqual(output, {"val/vqa2/cross_entropy": torch.tensor(1.0)})
self.assertEqual(output_from_dict, output)
self.assertTrue(get_loss_class_mock.called)
self.assertEqual(get_loss_class_mock.call_count, 5)
def test_mmf_dict_loss(self):
mse_mae_loss = losses.MMFLoss("mse_mae")
torch.manual_seed(1234)
random_tensor = torch.rand((1, 768))
sample_list = SampleList()
sample_list.dataset_type = "val"
sample_list.dataset_name = "vqa2"
sample_list["targets"] = random_tensor
model_output = {"scores": random_tensor}
output = mse_mae_loss(sample_list, model_output)
self.assertEqual(output["val/vqa2/mse_mae/mse"].item(), 0.0)
self.assertEqual(output["val/vqa2/mse_mae/mae"].item(), 0.0)
def test_caption_cross_entropy(self):
caption_ce_loss = losses.CaptionCrossEntropyLoss()
expected = dict()
predicted = dict()
# Test complete match
expected["targets"] = torch.empty((1, 10), dtype=torch.long)
expected["targets"].fill_(4)
predicted["scores"] = torch.zeros((1, 10, 10))
predicted["scores"][:, :, 4] = 100.0
self.assertEqual(caption_ce_loss(expected, predicted).item(), 0.0)
# Test random initialized
torch.manual_seed(1234)
expected["targets"] = torch.randint(0, 9491, (5, 10))
predicted["scores"] = torch.rand((5, 10, 9491))
self.assertAlmostEqual(caption_ce_loss(expected, predicted).item(), 9.2507, 4)
def test_in_batch_hinge(self):
in_batch_hinge_loss = losses.InBatchHinge(0.2, True)
sample_list_input = dict()
predicted = dict()
# Test when the image and text have the same embeddings
predicted["targets"] = torch.Tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
predicted["scores"] = predicted["targets"]
self.assertEqual(in_batch_hinge_loss(sample_list_input, predicted).item(), 0.0)
# Test random initialized
torch.manual_seed(1234)
predicted["targets"] = torch.rand((5, 10))
predicted["scores"] = torch.rand((5, 10))
self.assertAlmostEqual(
in_batch_hinge_loss(sample_list_input, predicted).item(), 6.5529985, 4
)
def test_mse_loss(self):
mse_loss = losses.MSELoss()
# Test random tensor but the same targets and scores
torch.manual_seed(1234)
random_tensor = torch.rand((1, 768))
sample_list = {"targets": random_tensor}
model_output = {"scores": random_tensor}
self.assertEqual(mse_loss(sample_list, model_output).item(), 0.0)
def test_cosine_embedding_loss(self):
cos_emb_loss = losses.CosineEmbeddingLoss()
# Test random tensor but the same targets and scores
torch.manual_seed(1234)
random_tensor = torch.rand((1, 768))
sample_list = {"targets": random_tensor}
model_output = {"scores": random_tensor}
self.assertEqual(cos_emb_loss(sample_list, model_output).item(), 0.0)
def test_bce_kl_loss(self):
combined_loss = losses.BCEAndKLLoss(0.5)
# Test random tensor but the same targets and scores
torch.manual_seed(1234)
random_tensor = torch.rand((1, 768))
sample_list = {"targets": random_tensor}
model_output = {"scores": random_tensor}
loss_result = combined_loss(sample_list, model_output)
self.assertAlmostEqual(loss_result["bce"].item(), 504.22253418, 4)
self.assertAlmostEqual(loss_result["kl"].item(), 0.031847, 4)
def test_refiner_ms_loss(self):
refiner_ms_loss = losses.RefinerMSLoss(
alpha=50, beta=2, base=0.5, margin=0.1, epsilon=1e-16
)
torch.manual_seed(1234)
random_tensor = torch.rand((1, 768))
sample_list = {"targets": random_tensor}
model_output = {"scores": random_tensor}
loss_result = refiner_ms_loss(sample_list, model_output)
self.assertEqual(loss_result, 0.0)
def test_ms_loss(self):
ms_loss = losses.MSLoss(
alpha=50, beta=2, margin=0.5, hard_mining=True, is_multilabel=False
)
torch.manual_seed(1234)
label_tensor = torch.Tensor([0, 0, 0, 0, 0])
fused_tensor = torch.randn(5, 768)
sample_list = {"targets": label_tensor}
model_output = {"fused_embedding": fused_tensor}
loss_result = ms_loss(sample_list, model_output)
self.assertEqual(loss_result, 0.0)
label_tensor = torch.Tensor([1, 1, 1, 1, 1])
loss_result = ms_loss(sample_list, model_output)
self.assertEqual(loss_result, 0.0)
label_tensor = torch.Tensor([1, 1, 1, 1, 1])
loss_result = ms_loss(sample_list, model_output)
self.assertEqual(loss_result, 0.0)
def test_refiner_contrastive_loss(self):
refiner_contrastive_loss = losses.RefinerContrastiveLoss(
sim_thresh=0.1, epsilon=1e-16
)
inputs = torch.rand((10, 768))
targets = inputs
sample_list = {"targets": targets}
model_output = {"scores": inputs}
loss_result = refiner_contrastive_loss(sample_list, model_output)
self.assertEqual(loss_result, 0.0)
| EXA-1-master | exa/models/mmf-main/tests/modules/test_losses.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import functools
import operator
import random
import unittest
import mmf.modules.layers as layers
import numpy as np
import torch
from omegaconf import OmegaConf
class TestModuleLayers(unittest.TestCase):
def setUp(self):
torch.manual_seed(1234)
def test_conv_net(self):
conv_net = layers.ConvNet(150, 75, 3)
input_tensor = torch.randn(4, 150, 64, 64)
output = conv_net(input_tensor)
expected_size = torch.Size((4, 75, 32, 32))
self.assertEqual(output.size(), expected_size)
# Since seed is fix we can check some of tensor values
np.testing.assert_almost_equal(output[0][0][0][0].item(), 0.149190, decimal=5)
np.testing.assert_almost_equal(
output[3][74][31][31].item(), -0.25199, decimal=5
)
def test_flatten(self):
flatten = layers.Flatten()
# Test 3 dim
input_tensor = torch.randn(5, 6, 10)
expected_size = torch.Size((5, 60))
actual_size = flatten(input_tensor).size()
self.assertEqual(actual_size, expected_size)
# Test 1 dim
input_tensor = torch.randn(5)
expected_size = torch.Size((5,))
actual_size = flatten(input_tensor).size()
self.assertEqual(actual_size, expected_size)
# Test 6 dim
size_list = [random.randint(2, 4) for _ in range(7)]
expected_size = torch.Size(
(size_list[0], functools.reduce(operator.mul, size_list[1:]))
)
input_tensor = torch.randn(*size_list)
actual_size = flatten(input_tensor).size()
self.assertEqual(actual_size, expected_size)
def test_unflatten(self):
unflatten = layers.UnFlatten()
# Test 2 dim to 3 dim
input_tensor = torch.randn(5, 60)
expected_size = torch.Size((5, 6, 10))
actual_size = unflatten(input_tensor, sizes=[6, 10]).size()
self.assertEqual(actual_size, expected_size)
# Test 1 dim
input_tensor = torch.randn(5)
expected_size = torch.Size((5,))
actual_size = unflatten(input_tensor, sizes=[]).size()
self.assertEqual(expected_size, actual_size)
def test_mlp(self):
mlp = layers.ClassifierLayer("mlp", in_dim=300, out_dim=1)
self.assertEqual(len(list(mlp.module.layers.children())), 1)
self.assertEqual(len(list(mlp.parameters())), 2)
inp = torch.rand(3, 300)
output = mlp(inp)
self.assertEqual(output.size(), torch.Size((3, 1)))
np.testing.assert_almost_equal(
output.squeeze().tolist(), [0.1949174, 0.4030975, -0.0109139]
)
mlp = layers.ClassifierLayer(
"mlp", in_dim=300, out_dim=1, hidden_dim=150, num_layers=1
)
self.assertEqual(len(list(mlp.module.layers.children())), 5)
self.assertEqual(len(list(mlp.parameters())), 6)
inp = torch.rand(3, 300)
output = mlp(inp)
self.assertEqual(output.size(), torch.Size((3, 1)))
np.testing.assert_almost_equal(
output.squeeze().tolist(), [-0.503411, 0.1725615, -0.6833304], decimal=3
)
def test_bert_classifier_head(self):
config = {}
config["hidden_size"] = 768
config["hidden_act"] = "gelu"
config["layer_norm_eps"] = 1e-12
config["hidden_dropout_prob"] = 0.1
config = OmegaConf.create(config)
clf = layers.ClassifierLayer("bert", 768, 1, config=config)
self.assertEqual(len(list(clf.module.children())), 3)
self.assertEqual(len(list(clf.parameters())), 6)
inp = torch.rand(3, 768)
output = clf(inp)
self.assertEqual(output.size(), torch.Size((3, 1)))
np.testing.assert_almost_equal(
output.squeeze().tolist(), [0.5452202, -0.0437842, -0.377468], decimal=3
)
| EXA-1-master | exa/models/mmf-main/tests/modules/test_layers.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.