python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
import os
import PIL
import torch
from mmf.common.sample import Sample
from mmf.datasets.base_dataset import BaseDataset
from mmf.datasets.builders.charades._utils import (
CharadesVideoClips,
img2gif,
make_charades_df,
)
from mmf.utils.distributed import byte_tensor_to_object, object_to_byte_tensor
from mmf.utils.file_io import PathManager
class CharadesDataset(BaseDataset):
def __init__(self, config, dataset_type, imdb_file_index, *args, **kwargs):
super().__init__("charades", config, dataset_type)
self.imdb_file_index = imdb_file_index
self.load_df()
self.length = len(self.video_clips)
self.audio_processor = None
self.video_processor = None
self.video_train_processor = None
self.video_test_processor = None
self.prediction_threshold = self.config.get("prediction_threshold", 0.5)
# Some pickling related issues can be resolved by loading it at runtime
# optionally, uncomment next line if you face those issues.
# self.video_clips = []
def init_processors(self):
super().init_processors()
self.set_processors()
def load_df(self):
dataset_type = self.dataset_type
imdb_file_index = self.imdb_file_index
config = self.config
csv_path = self.get_resource_path(
config, config.annotations.get(dataset_type)[imdb_file_index]
)
video_dir = self.get_resource_path(
config, config.videos.get(dataset_type)[imdb_file_index]
)
classes_file = self.get_resource_path(config, config.classes_file)
df = make_charades_df(
csv_path=csv_path, video_dir=video_dir, classes_file=classes_file
)
precomputed_metadata = None
pkl_path = os.path.join("charades", "defaults", f"metadata_{dataset_type}.pt")
pkl_path = self.get_resource_path(config, pkl_path)
if PathManager.exists(pkl_path):
local_path = PathManager.get_local_path(pkl_path)
with PathManager.open(local_path, "rb") as f:
precomputed_metadata = torch.load(f)
self.process_df(
df,
frames_per_clip=16,
column_map={
"labels": "action_labels",
"video": "path",
"text": "script",
"id": "id",
},
num_workers=10,
_precomputed_metadata=precomputed_metadata,
)
if not PathManager.exists(pkl_path):
with PathManager.open(pkl_path, "wb") as f:
torch.save(self.metadata, f)
def get_resource_path(self, config, path):
return os.path.join(config.data_dir, path)
def process_df(
self,
df,
frames_per_clip=16,
column_map={},
num_workers=1,
_precomputed_metadata=None,
**kwargs,
):
self.labels = df[column_map.get("labels", "labels")].tolist()
self.idx_to_class = sorted(
list(set([item for sublist in self.labels for item in sublist]))
)
self.classes = self.idx_to_class
self.class_to_idx = {self.classes[i]: i for i in range(len(self.classes))}
self.text_list = df[column_map.get("text", "text")].tolist()
self.ids_list = df[column_map.get("id", "id")].tolist()
video_list = df[column_map.get("video", "video")].tolist()
self.video_clips = CharadesVideoClips(
video_list,
clip_length_in_frames=frames_per_clip,
_precomputed_metadata=_precomputed_metadata,
num_workers=num_workers,
)
@property
def metadata(self):
return self.video_clips.metadata
def set_processors(self):
if self.dataset_type == "train":
self.video_processor = self.video_train_processor
else:
self.video_processor = self.video_test_processor
def format_for_prediction(self, report):
scores = torch.sigmoid(report.scores)
binary_scores = scores > self.prediction_threshold
predictions = []
for idx, item_id in enumerate(report.id):
item_id = byte_tensor_to_object(item_id)
score = binary_scores[idx]
labels = []
score = score.nonzero(as_tuple=False)
for item in score:
labels.append(self.idx_to_class[item.item()])
predictions.append({"id": item_id, "labels": labels})
return predictions
def __len__(self):
return self.length
def __getitem__(self, idx):
if len(self.video_clips) == 0:
self.load_df()
video, audio, info = self.video_clips.get_clip(idx)
text = self.text_list[idx]
actual_idx = self.ids_list[idx]
label = [self.class_to_idx[class_name] for class_name in self.labels[idx]]
one_hot_label = torch.zeros(len(self.class_to_idx))
one_hot_label[label] = 1
if self.video_processor is not None:
video = self.video_processor(video)
if self.audio_processor is not None:
audio = self.audio_processor(audio)
sample = Sample()
sample.id = object_to_byte_tensor(actual_idx)
sample.video = video
sample.audio = audio
sample.update(self.text_processor({"text": text}))
sample.targets = one_hot_label
return sample
def show_clip(self, idx):
from IPython.display import Audio, display, Image
video, audio, text, one_hot = self[idx]
# one hot to label index
label = (
torch.arange(one_hot.shape[0])[one_hot == 1].numpy().astype(int).tolist()
)
image_list = [PIL.Image.fromarray(frame.numpy()) for frame in video]
audio_list = audio.numpy()
path_to_gif = img2gif(image_list)
display(
"Labels: {}".format(str([self.classes[label_id] for label_id in label]))
)
display(Image(str(path_to_gif), format="png"))
display(Audio(audio_list, rate=48000))
display(text)
| EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/charades/dataset.py |
import logging
import tempfile
from pathlib import Path
import pandas as pd
import torch
from torchvision.datasets.video_utils import VideoClips
from torchvision.io import read_video
logger = logging.getLogger(__name__)
def make_charades_df(csv_path, video_dir, classes_file):
# load the csv
logger.info(f"Reading from {csv_path}")
df = pd.read_csv(csv_path)
# transform the id to a pathname
df["path"] = df["id"].map(lambda x: "{}/{}.mp4".format(video_dir, x))
# parse action labels
df["action_labels"] = df["actions"].map(
lambda x: [label.split(" ")[0] for label in x.split(";")]
if pd.notnull(x)
else []
)
# load id to class map
with open(classes_file, "r") as f:
class_names = f.readlines()
id2classname = {}
for c in class_names:
c_split = c.split(" ")
assert len(c_split) > 1
class_id = c_split[0]
class_name = " ".join(c_split[1:]).strip("\n")
id2classname[class_id] = class_name
# transform label ids to names
df["action_labels"] = df["action_labels"].map(
lambda x: [id2classname[class_id] for class_id in x]
)
# filter only these videos that actually exist
df_exists = df[df["path"].map(lambda x: Path(x).exists())]
return df_exists
def img2gif(image_list, temporal_path="./tmp/"):
tmp_file = Path(tempfile.NamedTemporaryFile(suffix=".gif").name).name
tmp_file = Path(temporal_path) / tmp_file
Path(tmp_file.parent).mkdir(exist_ok=True)
print("Write to {}".format(tmp_file))
with open(tmp_file, "wb") as tmp:
image_list[0].save(
tmp,
format="GIF",
append_images=image_list[1:],
save_all=True,
duration=3,
loop=0,
)
return tmp_file
class CharadesVideoClips(VideoClips):
@staticmethod
def select_clips_from_video(video_pts, num_frames, fps, frame_rate):
# this function replaces compute_clips_for_video from original Kinetics400
# it yields one clip with evenly separated num_frames
if fps is None:
# if for some reason the video doesn't have fps
# (because doesn't have a video stream) set the fps to 1.
# The value doesn't matter, because video_pts is empty anyway
fps = 1
if frame_rate is None:
frame_rate = fps
idxs = torch.round(torch.linspace(0, len(video_pts) - 1, num_frames)).type(
torch.LongTensor
)
video_pts = video_pts[idxs]
return video_pts, idxs
def compute_clips(self, num_frames, step, frame_rate=None):
self.num_frames = num_frames
self.step = step
self.frame_rate = frame_rate
self.clips = []
self.resampling_idxs = []
for video_pts, fps in zip(self.video_pts, self.video_fps):
clips, idxs = self.select_clips_from_video(
video_pts, num_frames, fps, frame_rate
)
self.clips.append(clips)
self.resampling_idxs.append(idxs)
def __len__(self):
return self.num_clips()
def num_videos(self):
return len(self.video_paths)
def num_clips(self):
return len(self.clips)
def get_clip(self, idx):
"""
Gets a subclip from a list of videos.
Arguments:
idx (int): index of the subclip. Must be between 0 and num_clips().
Returns:
video (Tensor)
audio (Tensor)
info (Dict)
video_idx (int): index of the video in `video_paths`
"""
if idx >= self.num_clips():
raise IndexError(
"Index {} out of range "
"({} number of clips)".format(idx, self.num_clips())
)
video_path = self.video_paths[idx]
clip_pts = self.clips[idx]
from torchvision import get_video_backend
backend = get_video_backend()
if backend == "pyav":
# check for invalid options
if self._video_width != 0:
raise ValueError("pyav backend doesn't support _video_width != 0")
if self._video_height != 0:
raise ValueError("pyav backend doesn't support _video_height != 0")
if self._video_min_dimension != 0:
raise ValueError(
"pyav backend doesn't support _video_min_dimension != 0"
)
if self._video_max_dimension != 0:
raise ValueError(
"pyav backend doesn't support _video_max_dimension != 0"
)
if self._audio_samples != 0:
raise ValueError("pyav backend doesn't support _audio_samples != 0")
if backend == "pyav":
assert len(clip_pts) > 0
start_pts = clip_pts[0].item()
end_pts = clip_pts[-1].item()
video, audio, info = read_video(video_path, start_pts, end_pts)
else:
raise NotImplementedError(f"backend {backend} is not implemented.")
resampling_idx = self.resampling_idxs[idx]
if isinstance(resampling_idx, torch.Tensor):
resampling_idx = resampling_idx - resampling_idx[0]
video = video[resampling_idx]
info["video_fps"] = self.frame_rate
assert len(video) == self.num_frames, "{} x {}".format(
video.shape, self.num_frames
)
return video, audio, info
| EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/charades/_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import json
from mmf.datasets.databases.annotation_database import AnnotationDatabase
from mmf.utils.general import get_absolute_path
class OKVQAAnnotationDatabase(AnnotationDatabase):
def __init__(self, config, path, *args, **kwargs):
path = path.split(",")
super().__init__(config, path, *args, **kwargs)
def load_annotation_db(self, path):
# Expect two paths, one to questions and one to annotations
assert (
len(path) == 2
), "OKVQA requires 2 paths; one to questions and one to annotations"
with open(path[0]) as f:
path_0 = json.load(f)
with open(path[1]) as f:
path_1 = json.load(f)
if "annotations" in path_0:
annotations = path_0
questions = path_1
else:
annotations = path_1
questions = path_0
# Convert to linear format
data = []
question_dict = {}
for question in questions["questions"]:
question_dict[question["question_id"]] = question["question"]
for annotation in annotations["annotations"]:
annotation["question"] = question_dict[annotation["question_id"]]
answers = []
for answer in annotation["answers"]:
answers.append(answer["answer"])
annotation["answers"] = answers
data.append(copy.deepcopy(annotation))
self.data = data
| EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/okvqa/database.py |
EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/okvqa/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
from mmf.common.registry import registry
from mmf.datasets.builders.okvqa.dataset import OKVQADataset
from mmf.datasets.mmf_dataset_builder import MMFDatasetBuilder
@registry.register_builder("okvqa")
class OKVQABuilder(MMFDatasetBuilder):
def __init__(
self, dataset_name="okvqa", dataset_class=OKVQADataset, *args, **kwargs
):
super().__init__(dataset_name, dataset_class, *args, **kwargs)
@classmethod
def config_path(cls):
return "configs/datasets/okvqa/defaults.yaml"
| EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/okvqa/builder.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Type, Union
import torch
from mmf.common.sample import Sample
from mmf.common.typings import MMFDatasetConfigType
from mmf.datasets.builders.okvqa.database import OKVQAAnnotationDatabase
from mmf.datasets.mmf_dataset import MMFDataset
from mmf.datasets.processors import GraphVQAAnswerProcessor
class OKVQADataset(MMFDataset):
def __init__(
self,
config: MMFDatasetConfigType,
dataset_type: str,
index: int,
*args,
**kwargs,
):
super().__init__("okvqa", config, dataset_type, index, *args, **kwargs)
"""def build_annotation_db(self) -> Type[OKVQAAnnotationDatabase]:
annotation_path = self._get_path_based_on_index(
self.config, "annotations", self._index
)
return OKVQAAnnotationDatabase(self.config, annotation_path)
"""
def get_image_path(self, image_id: Union[str, int]) -> str:
if self.dataset_type == "train":
image_path = f"COCO_train2014_{str(image_id).zfill(12)}.jpg"
else:
image_path = f"COCO_val2014_{str(image_id).zfill(12)}.jpg"
return image_path
def init_processors(self):
super().init_processors()
if hasattr(self, "image_db"):
self.image_db.transform = self.image_processor
def __getitem__(self, idx: int) -> Type[Sample]:
sample_info = self.annotation_db[idx]
current_sample = Sample()
if "question_tokens" in sample_info:
text_processor_argument = {
"tokens": sample_info["question_tokens"],
"text": sample_info["question_str"],
}
else:
text_processor_argument = {"text": sample_info["question"]}
processed_question = self.text_processor(text_processor_argument)
current_sample.update(processed_question)
current_sample.id = torch.tensor(
int(sample_info["question_id"]), dtype=torch.int
)
if self._use_features:
features = self.features_db[idx]
if hasattr(self, "transformer_bbox_processor"):
features["image_info_0"] = self.transformer_bbox_processor(
features["image_info_0"]
)
current_sample.update(features)
else:
image_path = sample_info["image_name"] + ".jpg"
current_sample.image = self.image_db.from_path(image_path)["images"][0]
current_sample = self.add_answer_info(sample_info, current_sample)
return current_sample
def add_answer_info(self, sample_info, sample):
if "answers" in sample_info:
answers = sample_info["answers"]
answer_processor_arg = {"answers": answers}
processed_soft_copy_answers = self.answer_processor(answer_processor_arg)
sample.targets = processed_soft_copy_answers["answers_scores"]
return sample
def idx_to_answer(self, idx):
return self.answer_processor.convert_idx_to_answer(idx)
def format_for_prediction(self, report):
# Check for case of scores coming from graph
reg_vocab_sz = self.answer_processor.get_true_vocab_size()
if report.scores.size(1) > reg_vocab_sz:
# Should actually have the graph_vqa_answer
assert type(self.answer_processor.processor) is GraphVQAAnswerProcessor
# Collapse into one set of confs (i.e. copy graph ones over if conf is greater)
# Again, assumes graph ans is subset of all answers
scores = torch.Tensor(report.scores.shape).copy_(report.scores)
for batch_ind in range(report.scores.size(0)):
for graph_ind, graph_ans in enumerate(
self.answer_processor.graph_vocab
):
# Get graph conf
graph_conf = scores[batch_ind, reg_vocab_sz + graph_ind].item()
# Get non-graph conf
reg_idx = self.answer_processor.answer_vocab.word2idx(graph_ans)
assert (
reg_idx != self.answer_processor.answer_vocab.UNK_INDEX
and reg_idx < reg_vocab_sz
)
reg_conf = scores[batch_ind, reg_idx].item()
# Set to max, zero out graph ind
scores[batch_ind, reg_idx] = max(graph_conf, reg_conf)
scores[batch_ind, reg_vocab_sz + graph_ind] = -float("Inf")
else:
scores = report.scores
# Get top 5 answers and scores
topkscores, topkinds = torch.topk(scores, 5, dim=1)
answers = scores.argmax(dim=1)
predictions = []
answer_space_size = self.answer_processor.get_true_vocab_size()
for idx, question_id in enumerate(report.id):
# Dictionary to append for prediction
pred_dict = {}
pred_dict["question_id"] = question_id.item()
# Get top-k answers
assert (
len(topkscores[idx]) == len(topkinds[idx]) and len(topkscores[idx]) == 5
)
topk_ans_scores = []
for score, aid in zip(topkscores[idx], topkinds[idx]):
score = score.item()
kaid = aid.item()
if kaid >= answer_space_size:
kaid -= answer_space_size
kanswer = report.context_tokens[idx][kaid]
if kanswer == self.context_processor.PAD_TOKEN:
kanswer = "unanswerable"
else:
kanswer = self.answer_processor.idx2word(kaid)
kanswer = kanswer.replace(" 's", "'s")
topk_ans_scores.append((kanswer, score))
pred_dict["topk"] = topk_ans_scores
# Now get regular answer
answer_id = answers[idx].item()
if answer_id >= answer_space_size:
answer_id -= answer_space_size
answer = report.context_tokens[idx][answer_id]
if answer == self.context_processor.PAD_TOKEN:
answer = "unanswerable"
else:
answer = self.answer_processor.idx2word(answer_id)
answer = answer.replace(" 's", "'s")
pred_dict["answer"] = answer
predictions.append(pred_dict)
# Dump the info
info = {}
info["scores"] = report.scores[idx].cpu()
return predictions
| EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/okvqa/dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
| EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/nlvr2/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from mmf.common.registry import registry
from mmf.datasets.builders.nlvr2.dataset import NLVR2Dataset
from mmf.datasets.builders.vqa2.builder import VQA2Builder
@registry.register_builder("nlvr2")
class NLVR2Builder(VQA2Builder):
def __init__(self):
super().__init__()
self.dataset_name = "nlvr2"
self.dataset_class = NLVR2Dataset
@classmethod
def config_path(cls):
return "configs/datasets/nlvr2/defaults.yaml"
| EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/nlvr2/builder.py |
import copy
import json
import torch
from mmf.common.sample import Sample
from mmf.datasets.builders.vqa2 import VQA2Dataset
class NLVR2Dataset(VQA2Dataset):
def __init__(self, config, dataset_type, imdb_file_index, *args, **kwargs):
super().__init__(
config, dataset_type, imdb_file_index, dataset_name="nlvr2", *args, **kwargs
)
def load_item(self, idx):
sample_info = self.annotation_db[idx]
current_sample = Sample()
processed_sentence = self.text_processor({"text": sample_info["sentence"]})
current_sample.text = processed_sentence["text"]
if "input_ids" in processed_sentence:
current_sample.update(processed_sentence)
if self._use_features is True:
# Remove sentence id from end
identifier = "-".join(sample_info["identifier"].split("-")[:-1])
# Load img0 and img1 features
sample_info["feature_path"] = "{}-img0.npy".format(identifier)
features = self.features_db[idx]
if hasattr(self, "transformer_bbox_processor"):
features["image_info_0"] = self.transformer_bbox_processor(
features["image_info_0"]
)
current_sample.img0 = Sample()
current_sample.img0.update(features)
sample_info["feature_path"] = "{}-img1.npy".format(identifier)
features = self.features_db[idx]
if hasattr(self, "transformer_bbox_processor"):
features["image_info_0"] = self.transformer_bbox_processor(
features["image_info_0"]
)
current_sample.img1 = Sample()
current_sample.img1.update(features)
is_correct = 1 if sample_info["label"] == "True" else 0
current_sample.targets = torch.tensor(is_correct, dtype=torch.long)
return current_sample
| EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/nlvr2/dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from mmf.common.registry import registry
from mmf.datasets.builders.gqa.builder import GQABuilder
from mmf.datasets.builders.gqa.masked_dataset import MaskedGQADataset
@registry.register_builder("masked_gqa")
class MaskedGQABuilder(GQABuilder):
def __init__(self):
super().__init__()
self.dataset_name = "masked_gqa"
self.dataset_class = MaskedGQADataset
@classmethod
def config_path(cls):
return "configs/datasets/gqa/masked.yaml"
| EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/gqa/masked_builder.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import random
from mmf.common.sample import Sample
from mmf.datasets.mmf_dataset import MMFDataset
class MaskedGQADataset(MMFDataset):
def __init__(self, config, dataset_type, imdb_file_index, *args, **kwargs):
super().__init__(
"masked_gqa", config, dataset_type, imdb_file_index, *args, **kwargs
)
self._add_answer = config.get("add_answer", True)
def __getitem__(self, idx):
sample_info = self.annotation_db[idx]
current_sample = Sample()
if self._use_features is True:
features = self.features_db[idx]
if hasattr(self, "transformer_bbox_processor"):
features["image_info_0"] = self.transformer_bbox_processor(
features["image_info_0"]
)
if self.config.get("use_image_feature_masks", False):
current_sample.update(
{
"image_labels": self.masked_region_processor(
features["image_feature_0"]
)
}
)
current_sample.update(features)
current_sample = self._add_masked_question(sample_info, current_sample)
return current_sample
def _add_masked_question(self, sample_info, current_sample):
question = sample_info["question_str"]
random_answer = random.choice(sample_info["all_answers"])
processed = self.masked_token_processor(
{"text_a": question, "text_b": random_answer, "is_correct": -1}
)
processed.pop("tokens")
current_sample.update(processed)
return current_sample
| EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/gqa/masked_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
__all__ = ["GQABuilder", "GQADataset", "MaskedGQABuilder", "MaskedGQADataset"]
from .builder import GQABuilder
from .dataset import GQADataset
from .masked_builder import MaskedGQABuilder
from .masked_dataset import MaskedGQADataset
| EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/gqa/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from mmf.common.registry import registry
from mmf.datasets.builders.gqa.dataset import GQADataset
from mmf.datasets.mmf_dataset_builder import MMFDatasetBuilder
@registry.register_builder("gqa")
class GQABuilder(MMFDatasetBuilder):
def __init__(self, dataset_name="gqa", dataset_class=GQADataset, *args, **kwargs):
super().__init__(dataset_name, dataset_class)
self.dataset_class = GQADataset
@classmethod
def config_path(cls):
return "configs/datasets/gqa/defaults.yaml"
# TODO: Deprecate this method and move configuration updates directly to processors
def update_registry_for_model(self, config):
if hasattr(self.dataset, "text_processor"):
registry.register(
self.dataset_name + "_text_vocab_size",
self.dataset.text_processor.get_vocab_size(),
)
if hasattr(self.dataset, "answer_processor"):
registry.register(
self.dataset_name + "_num_final_outputs",
self.dataset.answer_processor.get_vocab_size(),
)
| EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/gqa/builder.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
from mmf.common.sample import Sample
from mmf.datasets.mmf_dataset import MMFDataset
class GQADataset(MMFDataset):
def __init__(self, config, dataset_type, imdb_file_index, *args, **kwargs):
super().__init__("gqa", config, dataset_type, imdb_file_index, *args, **kwargs)
def __getitem__(self, idx):
sample_info = self.annotation_db[idx]
current_sample = Sample()
text_processor_argument = {"text": sample_info["question_str"]}
processed_question = self.text_processor(text_processor_argument)
current_sample.text = processed_question["text"]
if "input_ids" in processed_question:
current_sample.update(processed_question)
current_sample.question_id = torch.tensor(
sample_info["question_id"], dtype=torch.int
)
if isinstance(sample_info["image_id"], int):
current_sample.image_id = torch.tensor(
sample_info["image_id"], dtype=torch.int
)
else:
current_sample.image_id = sample_info["image_id"]
if self._use_features is True:
features = self.features_db[idx]
if hasattr(self, "transformer_bbox_processor"):
features["image_info_0"] = self.transformer_bbox_processor(
features["image_info_0"]
)
current_sample.update(features)
# Depending on whether we are using soft copy this can add
# dynamic answer space
current_sample = self.add_answer_info(sample_info, current_sample)
return current_sample
def add_answer_info(self, sample_info, sample):
if "answers" in sample_info:
answers = sample_info["answers"]
answer_processor_arg = {"answers": answers}
processed_soft_copy_answers = self.answer_processor(answer_processor_arg)
sample.targets = processed_soft_copy_answers["answers_scores"]
return sample
def format_for_prediction(self, report):
answers = report.scores.argmax(dim=1)
predictions = []
answer_space_size = self.answer_processor.get_true_vocab_size()
for idx, question_id in enumerate(report.question_id):
answer_id = answers[idx].item()
if answer_id >= answer_space_size:
answer_id -= answer_space_size
answer = report.context_tokens[idx][answer_id]
if answer == self.context_processor.PAD_TOKEN:
answer = "unanswerable"
else:
answer = self.answer_processor.idx2word(answer_id)
predictions.append({"questionId": question_id.item(), "prediction": answer})
return predictions
| EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/gqa/dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from .builder import VizWizBuilder
from .dataset import VizWizDataset
__all__ = ["VizWizBuilder", "VizWizDataset"]
| EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/vizwiz/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from mmf.common.registry import registry
from mmf.datasets.builders.vizwiz.dataset import VizWizDataset
from mmf.datasets.builders.vqa2 import VQA2Builder
@registry.register_builder("vizwiz")
class VizWizBuilder(VQA2Builder):
def __init__(self):
super().__init__()
self.dataset_name = "vizwiz"
self.set_dataset_class(VizWizDataset)
@classmethod
def config_path(cls):
return "configs/datasets/vizwiz/defaults.yaml"
def update_registry_for_model(self, config):
super().update_registry_for_model(config)
| EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/vizwiz/builder.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
from mmf.common.sample import Sample
from mmf.datasets.builders.vqa2 import VQA2Dataset
class VizWizDataset(VQA2Dataset):
def __init__(self, config, dataset_type, imdb_file_index, *args, **kwargs):
super().__init__(
config,
dataset_type,
imdb_file_index,
dataset_name="vizwiz",
*args,
**kwargs,
)
def load_item(self, idx):
sample = super().load_item(idx)
# sample_info = self.annotation_db[idx]
# if "image_name" in sample_info:
# sample.image_id = sample_info["image_name"]
return sample
def format_for_prediction(self, report):
answers = report.scores.argmax(dim=1)
predictions = []
answer_space_size = self.answer_processor.get_true_vocab_size()
for idx, image_id in enumerate(report.image_id):
answer_id = answers[idx].item()
if answer_id >= answer_space_size:
answer_id -= answer_space_size
answer = report.context_tokens[idx][answer_id]
else:
answer = self.answer_processor.idx2word(answer_id)
# if answer == self.context_processor.PAD_TOKEN:
# answer = "unanswerable"
if answer == "<unk>" or answer == "<pad>":
answer = "unanswerable"
predictions.append(
{
# "image": "_".join(["VizWiz"] + image_id.split("_")[2:]) + ".jpg",
"image": "VizWiz_"
+ self._dataset_type
+ "_"
+ str(image_id.item()).zfill(12)
+ ".jpg",
"answer": answer,
}
)
return predictions
| EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/vizwiz/dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from mmf.utils.env import import_files
import_files(__file__, "mmf.datasets.builders.airstore")
| EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/airstore/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from mmf.common.registry import registry
from mmf.datasets.builders.airstore.dataset import AirstoreDataset
from mmf.datasets.mmf_dataset_builder import MMFDatasetBuilder
@registry.register_builder("airstore")
class AirstoreDatasetBuilder(MMFDatasetBuilder):
def __init__(
self, dataset_name="airstore", dataset_class=AirstoreDataset, *args, **kwargs
):
super().__init__(dataset_name)
self.dataset_class = AirstoreDataset
@classmethod
def config_path(cls):
return "configs/datasets/airstore/defaults.yaml"
| EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/airstore/builder.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import logging
from io import BytesIO
from typing import Any, Iterable
import torch
from iopath.common.file_io import PathManager
from mmf.common.sample import Sample
from mmf.datasets.base_dataset import BaseDataset
from mmf.utils.general import get_batch_size
from PIL import Image, ImageFile
logger = logging.getLogger(__name__)
def create_path_manager() -> PathManager:
# TODO: move this inline import out after AIRStore OSS public released
from airstore.client.airstore_tabular import AIRStorePathHandler
pathmanager = PathManager()
pathmanager.register_handler(AIRStorePathHandler())
pathmanager.set_strict_kwargs_checking(False)
return pathmanager
class AirstoreDataset(BaseDataset):
def __init__(self, config, dataset_type, imdb_file_index, *args, **kwargs):
super().__init__("airstore", config, dataset_type)
self.pathmanager = create_path_manager()
self.config = config
self.batch_size = get_batch_size()
self.airstore_uri = config.annotations.get(dataset_type)[imdb_file_index]
self.split = dataset_type
self.epoch = 0
self.start_iter = 0
self.global_rank = torch.distributed.get_rank()
self.global_world_size = torch.distributed.get_world_size()
self._iterator = None
def set_epoch(self, epoch: int):
# TODO : Currently sets the same seed every epoch, set this from MultiDataLoader
logger.info(f"set epoch to {epoch} in airstore dataset")
self.epoch = epoch
def _open_iterator(self) -> Iterable[Any]:
# iterator from airstore for current data split. data are sharded by global
# total number of workers after shuffling
# extract numbers of dataloading workers and current worker id (range from 0 to
# num_workers-1) from torch.utils. If we can't get worker_info we assume the
# current process is the only dataloading worker.
worker_info = torch.utils.data.get_worker_info()
if worker_info is None:
num_workers = 1
worker_id = 0
else:
num_workers = worker_info.num_workers
worker_id = worker_info.id
# split the dataset for each worker
airstore_world_size = self.global_world_size * num_workers
# each worker take it's split by it's parent process rank and worker id
airstore_rank = self.global_rank * num_workers + worker_id
shuffle = self.split == "train" and self.config.get("enable_shuffle", True)
return self.pathmanager.opent(
self.airstore_uri,
"r",
enable_shuffle=shuffle,
shuffle_window=self.config.get("shuffle_window", 128),
seed=self.epoch,
world_size=airstore_world_size,
rank=airstore_rank,
limit=self.config.get("data_limit", -1),
offset=self.config.get("data_offset", 0),
num_of_threads=self.config.get("num_of_threads", 2),
prefetch=self.config.get("prefetch", 1),
max_holding_bundles=self.config.get("max_holding_bundles", 5),
bundle_download_timeout_ms=self.config.get(
"bundle_download_timeout_ms", 30000
),
max_retries=self.config.get("max_retries", 5),
env=self.config.get(
"env", "OSS"
), # Set to "FB" if run in FB, "RSC" for RSC, otherwise set to "OSS"
)
def __len__(self) -> int:
return self._open_iterator().total_size
def __getitem__(self, idx):
if self._iterator is None:
self._iterator = self._open_iterator()
sample_info = next(self._iterator)
current_sample = Sample()
processed_text = self.text_processor({"text": sample_info["caption"]})
current_sample.text = processed_text["text"]
if "input_ids" in processed_text:
current_sample.update(processed_text)
ImageFile.LOAD_TRUNCATED_IMAGES = True
with Image.open(BytesIO(sample_info["image"]), mode="r") as pil_img:
image = pil_img.convert("RGB")
current_sample.image = self.image_processor(image)
return current_sample
| EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/airstore/dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from mmf.common.registry import registry
from mmf.datasets.builders.vqa2.builder import VQA2Builder
from mmf.datasets.builders.vqa2.masked_dataset import MaskedVQA2Dataset
@registry.register_builder("masked_vqa2")
class MaskedVQA2Builder(VQA2Builder):
def __init__(self):
super().__init__()
self.dataset_name = "masked_vqa2"
self.dataset_class = MaskedVQA2Dataset
@classmethod
def config_path(cls):
return "configs/datasets/vqa2/masked.yaml"
| EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/vqa2/masked_builder.py |
import random
from mmf.common.sample import Sample
from mmf.datasets.builders.vqa2.dataset import VQA2Dataset
class MaskedVQA2Dataset(VQA2Dataset):
def __init__(self, config, dataset_type, imdb_file_index, *args, **kwargs):
super().__init__(
config,
dataset_type,
imdb_file_index,
dataset_name="masked_vqa2",
*args,
**kwargs,
)
self._add_answer = config.get("add_answer", False)
def __getitem__(self, idx):
sample_info = self.annotation_db[idx]
current_sample = Sample()
if self._use_features:
features = self.features_db[idx]
if hasattr(self, "transformer_bbox_processor"):
features["image_info_0"] = self.transformer_bbox_processor(
features["image_info_0"]
)
if self.config.get("use_image_feature_masks", False):
current_sample.update(
{
"image_labels": self.masked_region_processor(
features["image_feature_0"]
)
}
)
current_sample.update(features)
else:
image_path = str(sample_info["image_name"]) + ".jpg"
current_sample.image = self.image_db.from_path(image_path)["images"][0]
current_sample = self._add_masked_question(sample_info, current_sample)
if self._add_answer:
current_sample = self.add_answer_info(sample_info, current_sample)
return current_sample
def _add_masked_question(self, sample_info, current_sample):
question = sample_info["question_str"]
random_answer = random.choice(sample_info["all_answers"])
processed = self.masked_token_processor(
{"text_a": question, "text_b": random_answer, "is_correct": -1}
)
processed.pop("tokens")
current_sample.update(processed)
return current_sample
| EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/vqa2/masked_dataset.py |
import random
from mmf.datasets.builders.vqa2.dataset import VQA2Dataset
class MaskedQVQA2Dataset(VQA2Dataset):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dataset_name = "masked_q_vqa2"
def add_answer_info(self, sample_info, current_sample):
length = min(len(current_sample.text), current_sample.text_len)
index = random.randint(0, length - 1)
word = self.text_processor.vocab.get_itos()[current_sample.text[index].item()]
current_sample.text[index] = self.text_processor.vocab.get_stoi()["<mask>"]
answer_processor_arg = {"answers": [word]}
processed_soft_copy_answers = self.answer_processor(answer_processor_arg)
current_sample.answers = processed_soft_copy_answers["answers"]
current_sample.targets = processed_soft_copy_answers["answers_scores"]
if self.answer_processor.word2idx(word) == self.answer_processor.word2idx(
"<unk>"
):
current_sample.targets.zero_()
return current_sample
| EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/vqa2/masked_q_vqa2_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import warnings
from mmf.common.registry import registry
from mmf.datasets.builders.vqa2.builder import VQA2Builder
from mmf.datasets.builders.vqa2.masked_q_vqa2_dataset import MaskedQVQA2Dataset
from mmf.datasets.concat_dataset import MMFConcatDataset
@registry.register_builder("masked_q_vqa2")
class MaskedQVQA2Builder(VQA2Builder):
def __init__(self):
super().__init__()
self.dataset_name = "masked_q_vqa2"
self.dataset_class = MaskedQVQA2Dataset
@classmethod
def config_path(cls):
return "configs/datasets/vqa2/masked_q.yaml"
| EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/vqa2/masked_q_vqa2_builder.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from mmf.common.registry import Registry
from mmf.datasets.builders.vizwiz import VizWizBuilder
from mmf.datasets.builders.vqa2.ocr_dataset import VQA2OCRDataset
@Registry.register_builder("vqa2_ocr")
class TextVQABuilder(VizWizBuilder):
def __init__(self):
super().__init__()
self.dataset_name = "VQA2_OCR"
self.set_dataset_class(VQA2OCRDataset)
@classmethod
def config_path(self):
return None
| EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/vqa2/ocr_builder.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from mmf.datasets.builders.vizwiz import VizWizDataset
from mmf.utils.text import word_tokenize
class VQA2OCRDataset(VizWizDataset):
def __init__(self, imdb_file, image_feat_directories, verbose=False, **data_params):
super(VQA2OCRDataset, self).__init__(
imdb_file, image_feat_directories, verbose, **data_params
)
self.name = "vqa2_ocr"
def format_for_prediction(self, batch, answers):
answers = answers.argmax(dim=1)
predictions = []
for idx, question_id in enumerate(batch["question_id"]):
answer_id = answers[idx]
if answer_id >= self.answer_space_size:
answer_id -= self.answer_space_size
answer = word_tokenize(batch["ocr_tokens"][answer_id][idx])
else:
answer = self.answer_dict.idx2word(answer_id)
predictions.append({"question_id": question_id.item(), "answer": answer})
return predictions
def __getitem__(self, idx):
sample = super(VQA2OCRDataset, self).__getitem__(idx)
if sample["question_id"] is None:
sample["question_id"] = -1
return sample
| EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/vqa2/ocr_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
__all__ = ["VQA2Builder", "VQA2Dataset"]
from .builder import VQA2Builder
from .dataset import VQA2Dataset
| EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/vqa2/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from mmf.common.registry import registry
from mmf.datasets.builders.vqa2.dataset import VQA2Dataset
from mmf.datasets.mmf_dataset_builder import MMFDatasetBuilder
@registry.register_builder("vqa2")
class VQA2Builder(MMFDatasetBuilder):
def __init__(self, dataset_name="vqa2", dataset_class=VQA2Dataset, *args, **kwargs):
super().__init__(dataset_name, dataset_class)
self.dataset_class = VQA2Dataset
@classmethod
def config_path(cls):
return "configs/datasets/vqa2/defaults.yaml"
def load(self, *args, **kwargs):
dataset = super().load(*args, **kwargs)
if dataset is not None and hasattr(dataset, "try_fast_read"):
dataset.try_fast_read()
return dataset
# TODO: Deprecate this method and move configuration updates directly to processors
def update_registry_for_model(self, config):
if hasattr(self.dataset, "text_processor"):
registry.register(
self.dataset_name + "_text_vocab_size",
self.dataset.text_processor.get_vocab_size(),
)
if hasattr(self.dataset, "answer_processor"):
registry.register(
self.dataset_name + "_num_final_outputs",
self.dataset.answer_processor.get_vocab_size(),
)
@registry.register_builder("vqa2_train_val")
class VQA2TrainValBuilder(VQA2Builder):
def __init__(self, dataset_name="vqa2_train_val"):
super().__init__(dataset_name)
@classmethod
def config_path(self):
return "configs/datasets/vqa2/train_val.yaml"
| EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/vqa2/builder.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import torch
import tqdm
from mmf.common.sample import Sample
from mmf.datasets.mmf_dataset import MMFDataset
from mmf.utils.distributed import is_main
logger = logging.getLogger(__name__)
class VQA2Dataset(MMFDataset):
def __init__(self, config, dataset_type, imdb_file_index, *args, **kwargs):
if "name" in kwargs:
name = kwargs["name"]
elif "dataset_name" in kwargs:
name = kwargs["dataset_name"]
else:
name = "vqa2"
super().__init__(name, config, dataset_type, index=imdb_file_index)
self._should_fast_read = self.config.get("fast_read", False)
self.use_ocr = self.config.use_ocr
self.use_ocr_info = self.config.use_ocr_info
def init_processors(self):
super().init_processors()
if not self._use_features:
self.image_db.transform = self.image_processor
def try_fast_read(self):
# Don't fast read in case of test set.
if self._dataset_type == "test":
return
if hasattr(self, "_should_fast_read") and self._should_fast_read is True:
logger.info(
f"Starting to fast read {self.dataset_name} {self.dataset_type} "
+ "dataset"
)
self.cache = {}
for idx in tqdm.tqdm(
range(len(self.annotation_db)), miniters=100, disable=not is_main()
):
self.cache[idx] = self.load_item(idx)
def __getitem__(self, idx):
if self._should_fast_read is True and self._dataset_type != "test":
return self.cache[idx]
else:
return self.load_item(idx)
def load_item(self, idx):
sample_info = self.annotation_db[idx]
current_sample = Sample()
if "question_tokens" in sample_info:
text_processor_argument = {
"tokens": sample_info["question_tokens"],
"text": sample_info["question_str"],
}
else:
text_processor_argument = {"text": sample_info["question"]}
processed_question = self.text_processor(text_processor_argument)
current_sample.text = processed_question["text"]
if "input_ids" in processed_question:
current_sample.update(processed_question)
current_sample.question_id = torch.tensor(
sample_info["question_id"], dtype=torch.int
)
if isinstance(sample_info["image_id"], int):
current_sample.image_id = torch.tensor(
sample_info["image_id"], dtype=torch.int
)
else:
current_sample.image_id = sample_info["image_id"]
if "question_tokens" in sample_info:
current_sample.text_len = torch.tensor(
len(sample_info["question_tokens"]), dtype=torch.int
)
if self._use_features:
features = self.features_db[idx]
if hasattr(self, "transformer_bbox_processor"):
features["image_info_0"] = self.transformer_bbox_processor(
features["image_info_0"]
)
current_sample.update(features)
else:
image_path = sample_info["image_name"] + ".jpg"
current_sample.image = self.image_db.from_path(image_path)["images"][0]
# Add details for OCR like OCR bbox, vectors, tokens here
current_sample = self.add_ocr_details(sample_info, current_sample)
# Depending on whether we are using soft copy this can add
# dynamic answer space
current_sample = self.add_answer_info(sample_info, current_sample)
return current_sample
def add_ocr_details(self, sample_info, sample):
if self.use_ocr:
# Preprocess OCR tokens
ocr_tokens = [
self.ocr_token_processor({"text": token})["text"]
for token in sample_info["ocr_tokens"]
]
# Get embeddings for tokens
context = self.context_processor({"tokens": ocr_tokens})
sample.context = context["text"]
sample.context_tokens = context["tokens"]
sample.context_feature_0 = context["text"]
sample.context_info_0 = Sample()
sample.context_info_0.max_features = context["length"]
order_vectors = torch.eye(len(sample.context_tokens))
order_vectors[context["length"] :] = 0
sample.order_vectors = order_vectors
if self.use_ocr_info and "ocr_info" in sample_info:
sample.ocr_bbox = self.bbox_processor({"info": sample_info["ocr_info"]})[
"bbox"
]
return sample
def add_answer_info(self, sample_info, sample):
if "answers" in sample_info:
answers = sample_info["answers"]
answer_processor_arg = {"answers": answers}
if self.use_ocr:
answer_processor_arg["tokens"] = sample_info["ocr_tokens"]
processed_soft_copy_answers = self.answer_processor(answer_processor_arg)
# sample.answers = processed_soft_copy_answers["answers"]
sample.targets = processed_soft_copy_answers["answers_scores"]
return sample
def idx_to_answer(self, idx):
return self.answer_processor.convert_idx_to_answer(idx)
def format_for_prediction(self, report):
answers = report.scores.argmax(dim=1)
predictions = []
answer_space_size = self.answer_processor.get_true_vocab_size()
for idx, question_id in enumerate(report.question_id):
answer_id = answers[idx].item()
if answer_id >= answer_space_size:
answer_id -= answer_space_size
answer = report.context_tokens[idx][answer_id]
if answer == self.context_processor.PAD_TOKEN:
answer = "unanswerable"
else:
answer = self.answer_processor.idx2word(answer_id)
# actual_answer = report.answers[idx]
predictions.append(
{
"question_id": question_id.item(),
"answer": answer,
# "actual_answers": actual_answer,
# "question_tokens": report.question_tokens[idx],
# "image_id": report.image_id[idx].item()
}
)
return predictions
| EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/vqa2/dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from mmf.common.registry import registry
from mmf.datasets.builders.flickr30k.masked_dataset import MaskedFlickr30kDataset
from mmf.datasets.mmf_dataset_builder import MMFDatasetBuilder
@registry.register_builder("masked_flickr30k")
class MaskedFlickr30kBuilder(MMFDatasetBuilder):
def __init__(
self,
dataset_name="masked_flickr30k",
dataset_class=MaskedFlickr30kDataset,
*args,
**kwargs,
):
super().__init__(dataset_name, dataset_class, *args, **kwargs)
@classmethod
def config_path(cls):
return "configs/datasets/flickr30k/masked.yaml"
| EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/flickr30k/masked_builder.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from mmf.common.typings import MMFDatasetConfigType
from mmf.datasets.builders.localized_narratives.masked_dataset import (
MaskedLocalizedNarrativesDatasetMixin,
)
from mmf.datasets.mmf_dataset import MMFDataset
class MaskedFlickr30kDataset(MaskedLocalizedNarrativesDatasetMixin, MMFDataset):
def __init__(
self,
config: MMFDatasetConfigType,
dataset_type: str,
index: int,
*args,
**kwargs,
):
super().__init__(
"masked_flickr30k", config, dataset_type, index, *args, **kwargs
)
| EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/flickr30k/masked_dataset.py |
EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/flickr30k/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import json
from mmf.datasets.builders.okvqa.database import OKVQAAnnotationDatabase
from mmf.utils.file_io import PathManager
class VQACPv2AnnotationDatabase(OKVQAAnnotationDatabase):
def __init__(self, config, path, *args, **kwargs):
super().__init__(config, path, *args, **kwargs)
def load_annotation_db(self, path):
# Expect two paths, one to questions and one to annotations
assert (
len(path) == 2
), "VQACPv2 requires 2 paths; one to questions and one to annotations"
with PathManager.open(path[0]) as f:
path_0 = json.load(f)
with PathManager.open(path[1]) as f:
path_1 = json.load(f)
if "annotations" in path[0]:
annotations = path_0
questions = path_1
else:
annotations = path_1
questions = path_0
# Convert to linear format
data = []
question_dict = {}
for question in questions:
question_dict[question["question_id"]] = question["question"]
for annotation in annotations:
annotation["question"] = question_dict[annotation["question_id"]]
answers = []
for answer in annotation["answers"]:
answers.append(answer["answer"])
annotation["answers"] = answers
data.append(copy.deepcopy(annotation))
self.data = data
| EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/vqacp_v2/database.py |
EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/vqacp_v2/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
from mmf.common.registry import registry
from mmf.datasets.builders.vqacp_v2.dataset import VQACPv2Dataset
from mmf.datasets.mmf_dataset_builder import MMFDatasetBuilder
@registry.register_builder("vqacp_v2")
class VQACPv2Builder(MMFDatasetBuilder):
def __init__(
self, dataset_name="vqacp_v2", dataset_class=VQACPv2Dataset, *args, **kwargs
):
super().__init__(dataset_name, dataset_class, *args, **kwargs)
@classmethod
def config_path(cls):
return "configs/datasets/vqacp_v2/defaults.yaml"
| EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/vqacp_v2/builder.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import os
from typing import Type, Union
import torch
from mmf.common.sample import Sample
from mmf.common.typings import MMFDatasetConfigType
from mmf.datasets.builders.okvqa.dataset import OKVQADataset
from mmf.datasets.builders.vqacp_v2.database import VQACPv2AnnotationDatabase
class VQACPv2Dataset(OKVQADataset):
def __init__(
self,
config: MMFDatasetConfigType,
dataset_type: str,
index: int,
*args,
**kwargs,
):
super().__init__(config, dataset_type, index, *args, **kwargs)
def build_annotation_db(self) -> Type[VQACPv2AnnotationDatabase]:
annotation_path = self._get_path_based_on_index(
self.config, "annotations", self._index
)
return VQACPv2AnnotationDatabase(self.config, annotation_path)
def get_image_path(self, image_id: Union[str, int], coco_split: str) -> str:
base_paths = self._get_path_based_on_index(self.config, "images", self._index)
base_paths = base_paths.split(",")
if "train" in base_paths[0]:
train_path = base_paths[0]
val_path = base_paths[1]
else:
train_path = base_paths[1]
val_path = base_paths[0]
# coco_split indicates whether the image is from the train or val split of COCO
if "train" in coco_split:
image_path = f"COCO_train2014_{str(image_id).zfill(12)}.jpg"
image_path = os.path.join(train_path, image_path)
else:
image_path = f"COCO_val2014_{str(image_id).zfill(12)}.jpg"
image_path = os.path.join(val_path, image_path)
return image_path
def __getitem__(self, idx: int) -> Type[Sample]:
sample_info = self.annotation_db[idx]
current_sample = Sample()
processed_question = self.text_processor({"text": sample_info["question"]})
current_sample.update(processed_question)
current_sample.id = torch.tensor(
int(sample_info["question_id"]), dtype=torch.int
)
image_path = self.get_image_path(
sample_info["image_id"], sample_info["coco_split"]
)
current_sample.image = self.image_db.from_path(image_path)["images"][0]
if "answers" in sample_info:
answers = self.answer_processor({"answers": sample_info["answers"]})
current_sample.targets = answers["answers_scores"]
return current_sample
| EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/vqacp_v2/dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
| EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/textcaps/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from mmf.common.registry import Registry
from mmf.datasets.builders.coco.dataset import COCODataset
from mmf.datasets.builders.textcaps.dataset import TextCapsDataset
from mmf.datasets.builders.textvqa.builder import TextVQABuilder
@Registry.register_builder("textcaps")
class TextCapsBuilder(TextVQABuilder):
def __init__(
self, dataset_name="textcaps", dataset_class=TextCapsDataset, *args, **kwargs
):
super().__init__(dataset_name, dataset_class, *args, **kwargs)
@classmethod
def config_path(cls):
return "configs/datasets/textcaps/defaults.yaml"
def load(self, config, *args, **kwargs):
annotation_style = config.get("annotation_style", self.dataset_name)
if annotation_style == "coco":
self.dataset_class = COCODataset
dataset = super().load(config, *args, **kwargs)
dataset.dataset_name = self.dataset_name
return dataset
| EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/textcaps/builder.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from mmf.datasets.builders.textvqa.dataset import TextVQADataset
from mmf.utils.distributed import object_to_byte_tensor
class TextCapsDataset(TextVQADataset):
def __init__(self, config, dataset_type, imdb_file_index, *args, **kwargs):
super().__init__(config, dataset_type, imdb_file_index, *args, **kwargs)
self.dataset_name = "textcaps"
def preprocess_sample_info(self, sample_info):
sample_info = super().preprocess_sample_info(sample_info)
# add dummy questions to train with M4C (for TextVQA)
sample_info["question_str"] = "" # empty question
sample_info["question_id"] = sample_info["caption_id"]
return sample_info
def postprocess_evalai_entry(self, entry):
new_entry = {
"caption_id": entry["question_id"],
"image_id": entry["image_id"],
"caption": entry["answer"],
"pred_source": entry["pred_source"],
}
return new_entry
def add_answer_info(self, sample_info, sample):
sample_has_caption = "caption_str" in sample_info
if sample_has_caption:
sample_info["answers"] = [sample_info["caption_str"]]
sample = super().add_answer_info(sample_info, sample)
if sample_has_caption:
sample.caption_str = object_to_byte_tensor(sample_info["caption_str"])
sample.ref_strs = object_to_byte_tensor(sample_info["reference_strs"])
sample.pop("answers")
return sample
| EXA-1-master | exa/models/mmf-main/mmf/datasets/builders/textcaps/dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from dataclasses import dataclass
from typing import Any, Dict, List, Type
from mmf.common.registry import registry
from mmf.common.report import Report
from mmf.datasets.processors.processors import BatchProcessor, BatchProcessorConfigType
@dataclass
class ArgMaxPredictionProcessorConfig(BatchProcessorConfigType):
# Key that will be used for id in report
id_key: str = "id"
# Key that will be used for result in report
result_key: str = "answer"
@registry.register_processor("prediction.argmax")
class ArgMaxPredictionProcessor(BatchProcessor):
"""This prediction processor returns the index with maximum score for each
id as the answer. Expects report to have scores and id keys.
"""
def __init__(self, config: ArgMaxPredictionProcessorConfig, *args, **kwargs):
super().__init__(config, *args, **kwargs)
self._id_key = config.get("id_key", "id")
self._result_key = config.get("result_key", "answer")
def __call__(self, report: Type[Report], *args, **kwargs) -> List[Dict[str, Any]]:
answers = report.scores.argmax(dim=1)
predictions = []
for idx, item_id in enumerate(report.id):
answer = answers[idx]
predictions.append(
{self._id_key: item_id.item(), self._result_key: answer.item()}
)
return predictions
| EXA-1-master | exa/models/mmf-main/mmf/datasets/processors/prediction_processors.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import collections
import importlib
import logging
import random
import mmf.datasets.processors.functional as F
import torch
from mmf.common.registry import registry
from mmf.datasets.processors import BaseProcessor
from omegaconf import OmegaConf
from torchvision import transforms as img_transforms
logger = logging.getLogger()
@registry.register_processor("video_random_crop")
class VideoRandomCrop(BaseProcessor):
def __init__(self, *args, size=None, **kwargs):
super().__init__()
if size is None:
raise TypeError("Parameter 'size' is required")
self.size = size
@staticmethod
def get_params(vid, output_size):
"""Get parameters for ``crop`` for a random crop."""
h, w = vid.shape[-2:]
th, tw = output_size
if w == tw and h == th:
return 0, 0, h, w
i = random.randint(0, h - th)
j = random.randint(0, w - tw)
return i, j, th, tw
def __call__(self, vid):
i, j, h, w = self.get_params(vid, self.size)
return F.video_crop(vid, i, j, h, w)
@registry.register_processor("video_center_crop")
class VideoCenterCrop(BaseProcessor):
def __init__(self, *args, size=None, **kwargs):
super().__init__()
if size is None:
raise TypeError("Parameter 'size' is required")
self.size = size
def __call__(self, vid):
return F.video_center_crop(vid, self.size)
@registry.register_processor("video_resize")
class VideoResize(BaseProcessor):
def __init__(self, *args, size=None, **kwargs):
if size is None:
raise TypeError("Parameter 'size' is required")
self.size = size
def __call__(self, vid):
return F.video_resize(vid, self.size)
# This does the same thing as 'VideoToTensor'
@registry.register_processor("permute_and_rescale")
class PermuteAndRescale(BaseProcessor):
def __init__(self, *args, **kwargs):
super().__init__()
from pytorchvideo import transforms as ptv_transforms
self.transform = img_transforms.Compose(
[
ptv_transforms.Permute((3, 0, 1, 2)),
ptv_transforms.Div255(),
]
)
def __call__(self, vid):
return self.transform(vid)
@registry.register_processor("video_to_tensor")
class VideoToTensor(BaseProcessor):
def __init__(self, *args, **kwargs):
super().__init__()
pass
def __call__(self, vid):
return F.video_to_normalized_float_tensor(vid)
@registry.register_processor("video_normalize")
class VideoNormalize(BaseProcessor):
def __init__(self, mean=None, std=None, **kwargs):
super().__init__()
if mean is None and std is None:
raise TypeError("'mean' and 'std' params are required")
self.mean = mean
self.std = std
def __call__(self, vid):
return F.video_normalize(vid, self.mean, self.std)
@registry.register_processor("video_random_horizontal_flip")
class VideoRandomHorizontalFlip(BaseProcessor):
def __init__(self, p=0.5, **kwargs):
super().__init__()
self.p = p
def __call__(self, vid):
if random.random() < self.p:
return F.video_hflip(vid)
return vid
@registry.register_processor("video_pad")
class Pad(BaseProcessor):
def __init__(self, padding=None, fill=0, **kwargs):
super().__init__()
if padding is None:
raise TypeError("Parameter 'padding' is required")
self.padding = padding
self.fill = fill
def __call__(self, vid):
return F.video_pad(vid, self.padding, fill=self.fill)
@registry.register_processor("truncate_or_pad")
class TruncateOrPad(BaseProcessor):
# truncate or add 0 until the desired output size
def __init__(self, output_size=None, **kwargs):
super().__init__()
if output_size is None:
raise TypeError("Parameter 'output_size' is required")
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def __call__(self, sample):
if sample.shape[1] >= self.output_size:
return sample[0, : self.output_size]
else:
return torch.cat(
(sample[0, :], torch.zeros(1, self.output_size - sample.shape[1])),
axis=1,
)
@registry.register_processor("video_transforms")
class VideoTransforms(BaseProcessor):
def __init__(self, config, *args, **kwargs):
transform_params = config.transforms
assert OmegaConf.is_dict(transform_params) or OmegaConf.is_list(
transform_params
)
if OmegaConf.is_dict(transform_params):
transform_params = [transform_params]
pytorchvideo_spec = importlib.util.find_spec("pytorchvideo")
assert (
pytorchvideo_spec is not None
), "Must have pytorchvideo installed to use VideoTransforms"
transforms_list = []
for param in transform_params:
if OmegaConf.is_dict(param):
# This will throw config error if missing
transform_type = param.type
transform_param = param.get("params", OmegaConf.create({}))
else:
assert isinstance(param, str), (
"Each transform should either be str or dict containing "
"type and params"
)
transform_type = param
transform_param = OmegaConf.create([])
transforms_list.append(
self.get_transform_object(transform_type, transform_param)
)
self.transform = img_transforms.Compose(transforms_list)
def get_transform_object(self, transform_type, transform_params):
from pytorchvideo import transforms as ptv_transforms
# Look for the transform in:
# 1) pytorchvideo.transforms
transform = getattr(ptv_transforms, transform_type, None)
if transform is None:
# 2) processor registry
transform = registry.get_processor_class(transform_type)
if transform is not None:
return self.instantiate_transform(transform, transform_params)
# 3) torchvision.transforms
img_transform = getattr(img_transforms, transform_type, None)
assert img_transform is not None, (
f"transform {transform_type} is not found in pytorchvideo "
"transforms, processor registry, or torchvision transforms"
)
# To use the image transform on a video, we need to permute the axes
# to (T,C,H,W) and back
return img_transforms.Compose(
[
ptv_transforms.Permute((1, 0, 2, 3)),
self.instantiate_transform(img_transform, transform_params),
ptv_transforms.Permute((1, 0, 2, 3)),
]
)
@staticmethod
def instantiate_transform(transform, params):
# https://github.com/omry/omegaconf/issues/248
transform_params = OmegaConf.to_container(params)
# If a dict, it will be passed as **kwargs, else a list is *args
if isinstance(transform_params, collections.abc.Mapping):
return transform(**transform_params)
return transform(*transform_params)
def __call__(self, x):
# Support both dict and normal mode
if isinstance(x, collections.abc.Mapping):
x = x["video"]
return {"video": self.transform(x)}
else:
return self.transform(x)
| EXA-1-master | exa/models/mmf-main/mmf/datasets/processors/video_processors.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from mmf.datasets.processors.bert_processors import MaskedTokenProcessor
from mmf.datasets.processors.frcnn_processor import FRCNNPreprocess
from mmf.datasets.processors.image_processors import TorchvisionTransforms
from mmf.datasets.processors.prediction_processors import ArgMaxPredictionProcessor
from mmf.datasets.processors.processors import (
BaseProcessor,
BBoxProcessor,
CaptionProcessor,
FastTextProcessor,
GloVeProcessor,
GraphVQAAnswerProcessor,
MultiHotAnswerFromVocabProcessor,
Processor,
SimpleSentenceProcessor,
SimpleWordProcessor,
SoftCopyAnswerProcessor,
VocabProcessor,
VQAAnswerProcessor,
)
__all__ = [
"BaseProcessor",
"Processor",
"VocabProcessor",
"GloVeProcessor",
"FastTextProcessor",
"VQAAnswerProcessor",
"GraphVQAAnswerProcessor",
"MultiHotAnswerFromVocabProcessor",
"SoftCopyAnswerProcessor",
"SimpleWordProcessor",
"SimpleSentenceProcessor",
"BBoxProcessor",
"CaptionProcessor",
"MaskedTokenProcessor",
"TorchvisionTransforms",
"FRCNNPreprocess",
"ArgMaxPredictionProcessor",
]
| EXA-1-master | exa/models/mmf-main/mmf/datasets/processors/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import List, Tuple, Union
import torch
# Functional file similar to torch.nn.functional
def video_crop(vid: torch.tensor, i: int, j: int, h: int, w: int) -> torch.Tensor:
return vid[..., i : (i + h), j : (j + w)]
def video_center_crop(vid: torch.Tensor, output_size: Tuple[int, int]) -> torch.Tensor:
h, w = vid.shape[-2:]
th, tw = output_size
i = int(round((h - th) / 2.0))
j = int(round((w - tw) / 2.0))
return video_crop(vid, i, j, th, tw)
def video_hflip(vid: torch.Tensor) -> torch.Tensor:
return vid.flip(dims=(-1,))
# NOTE: for those functions, which generally expect mini-batches, we keep them
# as non-minibatch so that they are applied as if they were 4d (thus image).
# this way, we only apply the transformation in the spatial domain
def video_resize(
vid: torch.Tensor,
size: Union[int, Tuple[int, int]],
interpolation: str = "bilinear",
) -> torch.Tensor:
# NOTE: using bilinear interpolation because we don't work on minibatches
# at this level
scale = None
if isinstance(size, int):
scale = float(size) / min(vid.shape[-2:])
size = None
return torch.nn.functional.interpolate(
vid, size=size, scale_factor=scale, mode=interpolation, align_corners=False
)
def video_pad(
vid: torch.Tensor,
padding: List[int],
fill: float = 0,
padding_mode: str = "constant",
) -> torch.Tensor:
# NOTE: don't want to pad on temporal dimension, so let as non-batch
# (4d) before padding. This works as expected
return torch.nn.functional.pad(vid, padding, value=fill, mode=padding_mode)
def video_to_normalized_float_tensor(vid: torch.Tensor) -> torch.Tensor:
return vid.permute(3, 0, 1, 2).to(torch.float32) / 255
def video_normalize(
vid: torch.Tensor, mean: List[float], std: List[float]
) -> torch.Tensor:
shape = (-1,) + (1,) * (vid.dim() - 1)
mean = torch.as_tensor(mean).reshape(shape)
std = torch.as_tensor(std).reshape(shape)
return (vid - mean) / std
| EXA-1-master | exa/models/mmf-main/mmf/datasets/processors/functional.py |
# Copyright (c) Facebook, Inc. and its affiliates.
"""
coding=utf-8
Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal
Adapted From Facebook Inc, Detectron2
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.import copy
"""
import os
import sys
from dataclasses import dataclass
from typing import List
import numpy as np
import omegaconf
import torch
import torch.nn.functional as F
from mmf.common.registry import registry
from mmf.datasets.processors.processors import BaseProcessor
from mmf.utils.download import get_image_from_url
from PIL import Image
class ResizeShortestEdge:
def __init__(self, short_edge_length: List[int], max_size: int = sys.maxsize):
"""
Args:
short_edge_length (list[min, max])
max_size (int): maximum allowed longest edge length.
"""
self.interp_method = "bilinear"
self.max_size = max_size
self.short_edge_length = short_edge_length
def __call__(self, imgs: List[torch.Tensor]):
img_augs = []
for img in imgs:
h, w = img.shape[:2]
# later: provide list and randomly choose index for resize
size = np.random.randint(
self.short_edge_length[0], self.short_edge_length[1] + 1
)
if size == 0:
return img
scale = size * 1.0 / min(h, w)
if h < w:
newh, neww = size, scale * w
else:
newh, neww = scale * h, size
if max(newh, neww) > self.max_size:
scale = self.max_size * 1.0 / max(newh, neww)
newh = newh * scale
neww = neww * scale
neww = int(neww + 0.5)
newh = int(newh + 0.5)
if img.dtype == np.uint8:
pil_image = Image.fromarray(img)
pil_image = pil_image.resize((neww, newh), Image.BILINEAR)
img = np.asarray(pil_image)
else:
img = img.permute(2, 0, 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
img = F.interpolate(
img, (newh, neww), mode=self.interp_method, align_corners=False
).squeeze(0)
img_augs.append(img)
return img_augs
@registry.register_processor("frcnn_preprocess")
class FRCNNPreprocess(BaseProcessor):
@dataclass
class FRCNNPreprocessConfig:
model: omegaconf.DictConfig = omegaconf.MISSING
input: omegaconf.DictConfig = omegaconf.MISSING
size_divisibility: int = 0
pad_value: float = 0
def __init__(self, config: FRCNNPreprocessConfig, *args, **kwargs):
config_input = config.get("input", None)
assert config_input is not None
min_size_test = config_input.get("min_size_test", 800)
max_size_test = config_input.get("max_size_test", 1333)
self.aug = ResizeShortestEdge([min_size_test, min_size_test], max_size_test)
self.input_format = config_input.get("format", "BGR")
self.size_divisibility = config.get("size_divisibility", 0)
self.pad_value = config.get("pad_value", 0)
self.max_image_size = max_size_test
config_model = config.get("model", None)
assert config_model is not None
self.device = config_model.get("device", "cpu")
config_pixel_std = config_model.get("pixel_std", [1.0, 1.0, 1.0])
self.pixel_std = (
torch.tensor(config_pixel_std)
.to(self.device)
.view(len(config_pixel_std), 1, 1)
)
config_pixel_mean = config_model.get(
"pixel_mean", [102.9801, 115.9465, 122.7717]
)
self.pixel_mean = (
torch.tensor(config_pixel_mean)
.to(self.device)
.view(len(config_pixel_std), 1, 1)
)
self.normalizer = lambda x: (x - self.pixel_mean) / self.pixel_std
def pad(self, images: List[torch.Tensor]):
max_size = tuple(max(s) for s in zip(*[img.shape for img in images]))
image_sizes = [im.shape[-2:] for im in images]
images = [
F.pad(
im,
[0, max_size[-1] - size[1], 0, max_size[-2] - size[0]],
value=self.pad_value,
)
for size, im in zip(image_sizes, images)
]
return torch.stack(images), torch.tensor(image_sizes)
def __call__(self, images: torch.Tensor, single_image: bool = False):
"""
Takes images of variable sizes, returns preprocessed
version based on config sizing, etc.
"""
with torch.no_grad():
if not isinstance(images, list):
images = [images]
if single_image:
assert len(images) == 1
for i in range(len(images)):
if isinstance(images[i], torch.Tensor):
images.insert(i, images.pop(i).to(self.device).float())
elif not isinstance(images[i], torch.Tensor):
images.insert(
i,
torch.as_tensor(img_tensorize(images.pop(i)))
.to(self.device)
.float(),
)
# resize smallest edge
raw_sizes = torch.tensor([im.shape[:2] for im in images])
images = self.aug(images)
# flip rgb to bgr
for idx in range(len(images)):
images[idx] = torch.flip(images[idx], [0])
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32"))
# .permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
images = [self.normalizer(x) for x in images]
# now pad them to do the following operations
images, sizes = self.pad(images)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
scales_yx = torch.true_divide(raw_sizes, sizes)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def img_tensorize(im: str):
assert isinstance(im, str)
if os.path.isfile(im):
img = np.array(Image.open(im).convert("RGB"))
else:
img = get_image_from_url(im)
assert img is not None, f"could not connect to: {im}"
return img
| EXA-1-master | exa/models/mmf-main/mmf/datasets/processors/frcnn_processor.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import random
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from mmf.common.registry import registry
from mmf.common.sample import Sample, SampleList
from mmf.datasets.processors.processors import BaseProcessor
try:
from transformers3.tokenization_auto import AutoTokenizer
except ImportError:
from transformers.tokenization_auto import AutoTokenizer
@registry.register_processor("masked_token")
class MaskedTokenProcessor(BaseProcessor):
_CLS_TOKEN = "[CLS]"
_SEP_TOKEN = "[SEP]"
_MASK_TOKEN = "[MASK]"
_PAD_TOKEN_ID = 0
def __init__(self, config, *args, **kwargs):
tokenizer_config = config.tokenizer_config
self._tokenizer = AutoTokenizer.from_pretrained(
tokenizer_config.type, **tokenizer_config.params
)
self._max_seq_length = config.max_seq_length
self._probability = config.get("mask_probability", 0.15)
def get_vocab_size(self) -> int:
return len(self._tokenizer)
def tokenize(self, tokens: Union[str, List[str]]) -> List[str]:
return self._tokenizer.tokenize(tokens)
def _convert_tokens_to_ids(
self, tokens: Union[str, List[str]]
) -> Union[int, List[int]]:
return self._tokenizer.convert_tokens_to_ids(tokens)
def _convert_ids_to_tokens(
self, ids: Union[int, List[int]]
) -> Union[str, List[str]]:
return self._tokenizer.convert_ids_to_tokens(ids)
def _random_word(
self, tokens: List[str], probability: float = 0.15
) -> Tuple[List[str], List[int]]:
labels = []
for idx, token in enumerate(tokens):
prob = random.random()
if prob < probability:
prob /= probability
# 80% randomly change token to mask token
if prob < 0.8:
tokens[idx] = self._MASK_TOKEN
# 10% randomly change token to random token
elif prob < 0.9:
tokens[idx] = self._convert_ids_to_tokens(
torch.randint(self.get_vocab_size(), (1,), dtype=torch.long)
)[0]
# rest 10% keep the original token as it is
labels.append(self._convert_tokens_to_ids(token))
else:
labels.append(-1)
return tokens, labels
def _truncate_seq_pair(
self, tokens_a: List[str], tokens_b: List[str], max_length: int
):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
if tokens_b is None:
tokens_b = []
max_length -= 2
else:
# _convert_to_indices does [CLS] tokens_a [SEP] tokens_b [SEP]
max_length -= 3
assert max_length >= 0, (
"Max length should be minimum 2 in case of single sentence"
+ " and 3 in case of two sentences."
)
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def _convert_to_indices(
self,
tokens_a: List[str],
tokens_b: Optional[List[str]] = None,
probability: float = 0.15,
) -> Dict[str, torch.Tensor]:
"""
BERT encodes
- single sequence: ``[CLS] X [SEP]``
- pair of sequences: ``[CLS] A [SEP] B [SEP]``
"""
tokens_a, label_a = self._random_word(tokens_a, probability=probability)
tokens = [self._CLS_TOKEN] + tokens_a + [self._SEP_TOKEN]
segment_ids = [0] + [0] * len(tokens_a) + [0]
if tokens_b:
tokens_b, label_b = self._random_word(tokens_b, probability=probability)
lm_label_ids = [-1] + label_a + [-1] + label_b + [-1]
assert len(tokens_b) > 0
tokens += tokens_b + [self._SEP_TOKEN]
segment_ids += [1] * len(tokens_b) + [1]
else:
lm_label_ids = [-1] + label_a + [-1]
input_ids = self._convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < self._max_seq_length:
input_ids.append(self._PAD_TOKEN_ID)
input_mask.append(0)
segment_ids.append(0)
lm_label_ids.append(-1)
assert len(input_ids) == self._max_seq_length
assert len(input_mask) == self._max_seq_length
assert len(segment_ids) == self._max_seq_length
assert len(lm_label_ids) == self._max_seq_length
input_ids = torch.tensor(input_ids, dtype=torch.long)
input_mask = torch.tensor(input_mask, dtype=torch.long)
segment_ids = torch.tensor(segment_ids, dtype=torch.long)
lm_label_ids = torch.tensor(lm_label_ids, dtype=torch.long)
return {
"input_ids": input_ids,
"input_mask": input_mask,
"segment_ids": segment_ids,
"lm_label_ids": lm_label_ids,
"tokens": tokens,
}
def __call__(self, item: Dict[str, Any]):
text_a = item["text_a"]
text_b = item.get("text_b", None)
tokens_a = self.tokenize(text_a)
tokens_b = None
if text_b:
tokens_b = self.tokenize(text_b)
self._truncate_seq_pair(tokens_a, tokens_b, self._max_seq_length)
output = self._convert_to_indices(
tokens_a, tokens_b, probability=self._probability
)
output["is_correct"] = torch.tensor(
item.get("is_correct", True), dtype=torch.long
)
return output
@registry.register_processor("bert_tokenizer")
class BertTokenizer(MaskedTokenProcessor):
def __init__(self, config, *args, **kwargs):
super().__init__(config, *args, **kwargs)
self._probability = config.get("mask_probability", 0)
def __call__(self, item: Dict[str, Any]):
if "text" in item:
text_a = item["text"]
elif "text_a" in item:
text_a = item["text_a"]
else:
text_a = " ".join(item["tokens"])
if isinstance(text_a, list):
text_a = " ".join(text_a)
tokens_a = self.tokenize(text_a)
# 'text_b' can be defined in the dataset preparation
tokens_b = None
if "text_b" in item:
text_b = item["text_b"]
if text_b:
tokens_b = self.tokenize(text_b)
self._truncate_seq_pair(tokens_a, tokens_b, self._max_seq_length)
output = self._convert_to_indices(
tokens_a, tokens_b, probability=self._probability
)
output["text"] = output["tokens"]
return output
@registry.register_processor("multi_sentence_bert_tokenizer")
class MultiSentenceBertTokenizer(BaseProcessor):
"""Extension of BertTokenizer which supports multiple sentences.
Separate from normal usecase, each sentence will be passed through
bert tokenizer separately and indices will be reshaped as single
tensor. Segment ids will also be increasing in number.
"""
def __init__(self, config, *args, **kwargs):
super().__init__(config, *args, **kwargs)
self.fusion_strategy = config.get("fusion", "concat")
self._probability = config.get("mask_probability", 0)
self.tokenizer = BertTokenizer(config)
def __call__(self, item: Dict[str, Any]):
texts = item["text"]
if not isinstance(texts, list):
texts = [texts]
processed = []
for idx, text in enumerate(texts):
sample = Sample()
processed_text = self.tokenizer({"text": text})
sample.update(processed_text)
sample.segment_ids.fill_(idx)
processed.append(sample)
# Use SampleList to convert list of tensors to stacked tensors
processed = SampleList(processed)
if self.fusion_strategy == "concat":
processed.input_ids = processed.input_ids.view(-1)
processed.input_mask = processed.input_mask.view(-1)
processed.segment_ids = processed.segment_ids.view(-1)
processed.lm_label_ids = processed.lm_label_ids.view(-1)
return processed.to_dict()
@registry.register_processor("masked_roberta_tokenizer")
class MaskedRobertaTokenizer(MaskedTokenProcessor):
def __init__(self, config, *args, **kwargs):
# https://huggingface.co/transformers/model_doc/xlmroberta.html
# roberta is with different tokenization of above default (bert)
tokenizer_config = config.tokenizer_config
self._tokenizer = AutoTokenizer.from_pretrained(
tokenizer_config.type, **tokenizer_config.params
)
self._CLS_TOKEN = self._tokenizer.bos_token # <s>
self._SEP_TOKEN = self._tokenizer.sep_token # </s>
self._MASK_TOKEN = self._tokenizer.mask_token # <mask>
self._PAD_TOKEN_ID = self._tokenizer.pad_token_id # 1
self._max_seq_length = config.max_seq_length
self._probability = getattr(config, "mask_probability", 0.15)
def _truncate_seq_pair(
self, tokens_a: List[str], tokens_b: List[str], max_length: int
):
"""Truncates a sequence pair in place to the maximum length."""
if tokens_b is None:
tokens_b = []
max_length -= 2
else:
# _convert_to_indices does <s> tokens_a </s> </s> tokens_b </s>
max_length -= 4
assert max_length >= 0, (
"Max length should be minimum 2 in case of single sentence"
+ " and 4 in case of two sentences."
)
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def _convert_to_indices(
self,
tokens_a: List[str],
tokens_b: Optional[List[str]] = None,
probability: float = 0.15,
) -> Dict[str, torch.Tensor]:
"""
Roberta encodes
- single sequence: ``<s> X </s>``
- pair of sequences: ``<s> A </s> </s> B </s>``
"""
tokens_a, label_a = self._random_word(tokens_a, probability=probability)
tokens = [self._CLS_TOKEN] + tokens_a + [self._SEP_TOKEN]
segment_ids = [0] + [0] * len(tokens_a) + [0]
lm_label_ids = [-1] + label_a + [-1]
if tokens_b:
tokens_b, label_b = self._random_word(tokens_b, probability=probability)
assert len(tokens_b) > 0
# ``<s> A </s> </s> B </s>``
tokens += [self._SEP_TOKEN] + tokens_b + [self._SEP_TOKEN]
# RoBERTA and XLM-R don't use segment_ids, segment_ids are all 0's
segment_ids += [0] + [0] * len(tokens_b) + [0]
lm_label_ids += [-1] + label_b + [-1]
input_ids = self._convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < self._max_seq_length:
input_ids.append(self._PAD_TOKEN_ID)
input_mask.append(0)
segment_ids.append(0)
lm_label_ids.append(-1)
assert len(input_ids) == self._max_seq_length
assert len(input_mask) == self._max_seq_length
assert len(segment_ids) == self._max_seq_length
assert len(lm_label_ids) == self._max_seq_length
input_ids = torch.tensor(input_ids, dtype=torch.long)
input_mask = torch.tensor(input_mask, dtype=torch.long)
segment_ids = torch.tensor(segment_ids, dtype=torch.long)
lm_label_ids = torch.tensor(lm_label_ids, dtype=torch.long)
return {
"input_ids": input_ids,
"input_mask": input_mask,
"segment_ids": segment_ids,
"lm_label_ids": lm_label_ids,
"tokens": tokens,
}
@registry.register_processor("roberta_tokenizer")
class RobertaTokenizer(BertTokenizer, MaskedRobertaTokenizer):
def __init__(self, config, *args, **kwargs):
super().__init__(config, *args, **kwargs)
self._probability = config.get("mask_probability", 0)
@registry.register_processor("multi_sentence_roberta_tokenizer")
class MultiSentenceRobertaTokenizer(MultiSentenceBertTokenizer):
"""Extension of SPMTokenizer which supports multiple sentences.
Similar to MultiSentenceBertTokenizer.
"""
def __init__(self, config, *args, **kwargs):
self.fusion_strategy = config.get("fusion", "concat")
self.tokenizer = RobertaTokenizer(config, *args, **kwargs)
self._probability = config.get("mask_probability", 0)
def get_pair_text_tokens(
item: Dict[str, Any], masked_token_processor: MaskedTokenProcessor
) -> Dict[str, torch.Tensor]:
"""Given an item Dict with either 1 or 2 text sentences,
tokenize and concat them returning a Dict that contains at least
"input_ids", "input_mask", and "segment_ids'.
Args:
item (Dict[str, Any]):
A Dict containing keys
"text" or "tokens", and optionally "text_b"
masked_token_processor (MaskedTokenProcessor):
A processor used to tokenize the texts.
Returns:
[Dict[str, torch.Tensor]]:
A Dict containing tokenized texts and
related tensors.
"""
if "text" in item:
text_a = item["text"]
elif "text_a" in item:
text_a = item["text_a"]
else:
text_a = " ".join(item["tokens"])
if isinstance(text_a, list):
text_a = " ".join(text_a)
tokens_a = masked_token_processor.tokenize(text_a)
# 'text_b' can be defined in the dataset preparation
tokens_b = None
if "text_b" in item:
text_b = item["text_b"]
if text_b:
tokens_b = masked_token_processor.tokenize(text_b)
masked_token_processor._truncate_seq_pair(
tokens_a, tokens_b, masked_token_processor._max_seq_length
)
output = masked_token_processor._convert_to_indices(
tokens_a, tokens_b, probability=masked_token_processor._probability
)
return output
@registry.register_processor("vilt_text_tokenizer")
class VILTTextTokenizer(MaskedTokenProcessor):
def __init__(self, config, *args, **kwargs):
try:
from transformers3 import BertTokenizer
except ImportError:
from transformers import BertTokenizer
if isinstance(config, str):
config = {"from_pretrained": config}
from_pretrained_name = config.get("from_pretrained", "bert-base-uncased")
kwargs_dict = dict(kwargs, do_lower_case="uncased" in from_pretrained_name)
self._tokenizer = BertTokenizer.from_pretrained(
from_pretrained_name, **kwargs_dict
)
self._max_seq_length = config.get("max_seq_length", 25)
self._probability = config.get("mask_probability", 0)
def __call__(self, item):
output = get_pair_text_tokens(item, self)
output["text"] = output["tokens"]
return output
@registry.register_processor("uniter_text_tokenizer")
class UNITERTextTokenizer(MaskedTokenProcessor):
def __init__(self, config, *args, **kwargs):
try:
from transformers3 import BertTokenizer
except ImportError:
from transformers import BertTokenizer
if isinstance(config, str):
config = {"from_pretrained": config}
from_pretrained_name = config.get("from_pretrained", "bert-base-uncased")
kwargs_dict = dict(kwargs, do_lower_case="uncased" in from_pretrained_name)
self._tokenizer = BertTokenizer.from_pretrained(
from_pretrained_name, **kwargs_dict
)
self._max_seq_length = config.get("max_seq_length", 25)
self._probability = config.get("mask_probability", 0)
def __call__(self, item: Dict[str, Any]):
output = get_pair_text_tokens(item, self)
output["text"] = output["tokens_masked"]
output["tokens"] = output["tokens_masked"]
if "is_correct" in item:
output["is_correct"] = torch.tensor(
item.get("is_correct", True), dtype=torch.long
)
return output
def _token_transform(
self, tokens: List[str], tokens_b: Optional[List[str]] = None
) -> Tuple[torch.Tensor, int, int, List[str]]:
tokens = [self._CLS_TOKEN] + tokens + [self._SEP_TOKEN]
if tokens_b:
tokens += tokens_b + [self._SEP_TOKEN]
input_ids = self._convert_tokens_to_ids(tokens)
token_len = len(input_ids)
token_pad = self._max_seq_length - token_len
# Zero-pad up to the sequence length.
input_ids += [self._PAD_TOKEN_ID] * token_pad
input_ids_tensor = torch.tensor(input_ids, dtype=torch.long)
return input_ids_tensor, token_len, token_pad, tokens
def _convert_to_indices(
self,
tokens_a: List[str],
tokens_b: Optional[List[str]] = None,
probability: float = 0.15,
) -> Dict[str, torch.Tensor]:
"""
BERT encodes
- single sequence: ``[CLS] X [SEP]``
- pair of sequences: ``[CLS] A [SEP] B [SEP]``
"""
input_ids_original, _, _, _ = self._token_transform(tokens_a, tokens_b)
tokens_a, label_a = self._random_word(tokens_a, probability=probability)
segment_ids = [0] * (len(tokens_a) + 2)
if tokens_b:
tokens_b, label_b = self._random_word(tokens_b, probability=probability)
lm_label_ids = [-1] + label_a + [-1] + label_b + [-1]
assert len(tokens_b) > 0
segment_ids += [1] * (len(tokens_b) + 1)
else:
lm_label_ids = [-1] + label_a + [-1]
input_ids_masked, token_len, token_pad, tokens_masked = self._token_transform(
tokens_a, tokens_b
)
input_mask = [1] * token_len + [0] * token_pad
segment_ids += [0] * token_pad
lm_label_ids += [-1] * token_pad
input_mask = torch.tensor(input_mask, dtype=torch.long)
segment_ids = torch.tensor(segment_ids, dtype=torch.long)
lm_label_ids = torch.tensor(lm_label_ids, dtype=torch.long)
return {
"input_ids_masked": input_ids_masked, # specifically for MLM heads
"input_ids": input_ids_original, # unmasked tokens for CLIP heads
# input_mask is non-padding (1) vs padding (0) mask (not MLM token masking)
"input_mask": input_mask,
"segment_ids": segment_ids,
"lm_label_ids": lm_label_ids,
"tokens_masked": tokens_masked,
}
@registry.register_processor("vinvl_text_tokenizer")
class VinVLTextTokenizer(MaskedTokenProcessor):
def __init__(self, config, *args, **kwargs):
try:
from transformers3 import BertTokenizer
except ImportError:
from transformers import BertTokenizer
if isinstance(config, str):
config = {"from_pretrained": config}
from_pretrained_name = config.get("from_pretrained", "bert-base-uncased")
kwargs_dict = dict(kwargs, do_lower_case="uncased" in from_pretrained_name)
self._tokenizer = BertTokenizer.from_pretrained(
from_pretrained_name, **kwargs_dict
)
self._max_seq_length = config.get("max_seq_length", 70)
self._probability = config.get("mask_probability", 0)
self._corrupt_prob = config.get("corrupt_probability", 0)
self._corrupt_caption_prob = config.get("corrupt_caption_probability", 0)
def __call__(self, item: Dict[str, Any]) -> Dict[str, torch.Tensor]:
output = get_pair_text_tokens(item, self)
output["text"] = output["tokens_masked"]
output["tokens"] = output["tokens_masked"]
if self._corrupt_prob > 0:
contrastive_label, corrupt_output = self._get_contrastive_output(item)
output["input_ids_corrupt"] = corrupt_output["input_ids"]
output["segment_ids_corrupt"] = corrupt_output["segment_ids"]
output["input_mask_corrupt"] = corrupt_output["input_mask"]
output["contrastive_label"] = contrastive_label
return output
def _get_contrastive_output(
self, item: Dict[str, Any]
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
error_msg = (
"'{}' are required in the annotations for VinVL pretraining."
+ "These should be created using the MMF feature extraction script for MMF."
+ "To run finetuning or pretraining without contrastive loss,"
+ " set corrupt_probability to 0."
)
assert "random_captions" in item, error_msg.format("random_captions")
assert "random_labels" in item, error_msg.format("random_labels")
assert "text_b" in item, error_msg.format("text_b")
corrupt_item = copy.deepcopy(item)
p_match = 1 - self._corrupt_prob
p_caption = self._corrupt_prob * self._corrupt_caption_prob
p_label = self._corrupt_prob * (1 - self._corrupt_caption_prob)
contrastive_label = torch.multinomial(
torch.tensor([p_match, p_caption, p_label]), num_samples=1
).long()
if contrastive_label == 0:
pass
elif contrastive_label == 1:
num_subs = len(item["random_captions"])
neg_index = torch.randint(num_subs, (1,))
corrupt_item["text"] = item["random_captions"][neg_index]
else:
num_subs = len(item["random_labels"])
neg_index = torch.randint(num_subs, (1,))
corrupt_item["text_b"] = item["random_labels"][neg_index]
return contrastive_label, get_pair_text_tokens(corrupt_item, self)
def _token_transform(
self, tokens: List[str], tokens_b: Optional[List[str]] = None
) -> Tuple[torch.Tensor, int, int, List[str]]:
tokens = [self._CLS_TOKEN] + tokens + [self._SEP_TOKEN]
if tokens_b:
tokens += tokens_b + [self._SEP_TOKEN]
input_ids = self._convert_tokens_to_ids(tokens)
token_len = len(input_ids)
token_pad = self._max_seq_length - token_len
# Zero-pad up to the sequence length.
input_ids += [self._PAD_TOKEN_ID] * token_pad
input_ids_tensor = torch.tensor(input_ids, dtype=torch.long)
return input_ids_tensor, token_len, token_pad, tokens
def _convert_to_indices(
self,
tokens_a: List[str],
tokens_b: Optional[List[str]] = None,
probability: float = 0.15,
) -> Dict[str, torch.Tensor]:
"""
BERT encodes
- single sequence: ``[CLS] X [SEP]``
- pair of sequences: ``[CLS] A [SEP] B [SEP]``
"""
tokens_a_original = tokens_a.copy()
tokens_a, label_a = self._random_word(tokens_a, probability=probability)
segment_ids = [0] * (len(tokens_a) + 2)
if tokens_b:
tokens_b_original = tokens_b.copy()
tokens_b, label_b = self._random_word(tokens_b, probability=probability)
lm_label_ids = [-1] + label_a + [-1] + label_b + [-1]
assert len(tokens_b) > 0
segment_ids += [1] * (len(tokens_b) + 1)
else:
tokens_b_original = None
lm_label_ids = [-1] + label_a + [-1]
input_ids_masked, token_len, token_pad, tokens_masked = self._token_transform(
tokens_a, tokens_b
)
input_ids_original, _, _, _ = self._token_transform(
tokens_a_original, tokens_b_original
)
input_mask = [1] * token_len + [0] * token_pad
segment_ids += [0] * token_pad
lm_label_ids += [-1] * token_pad
input_mask = torch.tensor(input_mask, dtype=torch.long)
segment_ids = torch.tensor(segment_ids, dtype=torch.long)
lm_label_ids = torch.tensor(lm_label_ids, dtype=torch.long)
return {
"input_ids_masked": input_ids_masked, # specifically for MLM heads
"input_ids": input_ids_original,
# input_mask is non-padding (1) vs padding (0) mask (not MLM token masking)
"input_mask": input_mask,
"segment_ids": segment_ids,
"lm_label_ids": lm_label_ids,
"tokens_masked": tokens_masked,
}
| EXA-1-master | exa/models/mmf-main/mmf/datasets/processors/bert_processors.py |
# Copyright (c) Facebook, Inc. and its affiliates.
"""
The processors exist in MMF to make data processing pipelines in various
datasets as similar as possible while allowing code reuse.
The processors also help maintain proper abstractions to keep only what matters
inside the dataset's code. This allows us to keep the dataset ``__getitem__``
logic really clean and no need about maintaining opinions about data type.
Processors can work on both images and text due to their generic structure.
To create a new processor, follow these steps:
1. Inherit the ``BaseProcessor`` class.
2. Implement ``_call`` function which takes in a dict and returns a dict with
same keys preprocessed as well as any extra keys that need to be returned.
3. Register the processor using ``@registry.register_processor('name')`` to
registry where 'name' will be used to refer to your processor later.
In processor's config you can specify ``preprocessor`` option to specify
different kind of preprocessors you want in your dataset.
Let's break down processor's config inside a dataset (VQA2.0) a bit to understand
different moving parts.
Config::
dataset_config:
vqa2:
data_dir: ${env.data_dir}
processors:
text_processor:
type: vocab
params:
max_length: 14
vocab:
type: intersected
embedding_name: glove.6B.300d
vocab_file: vqa2/defaults/extras/vocabs/vocabulary_100k.txt
preprocessor:
type: simple_sentence
params: {}
``BaseDataset`` will init the processors and they will available inside your
dataset with same attribute name as the key name, for e.g. `text_processor` will
be available as `self.text_processor` inside your dataset. As is with every module
in MMF, processor also accept a ``DictConfig`` with a `type` and `params`
attributes. `params` defined the custom parameters for each of the processors.
By default, processor initialization process will also init `preprocessor` attribute
which can be a processor config in itself. `preprocessor` can be then be accessed
inside the processor's functions.
Example::
from mmf.common.registry import registry
from mmf.datasets.processors import BaseProcessor
@registry.register_processor('my_processor')
class MyProcessor(BaseProcessor):
def __init__(self, config, *args, **kwargs):
return
def __call__(self, item, *args, **kwargs):
text = item['text']
text = [t.strip() for t in text.split(" ")]
return {"text": text}
"""
import collections
import copy
import logging
import os
import random
import re
import warnings
from collections import Counter, defaultdict
from dataclasses import dataclass
from typing import Any, Dict, Optional, Union
import numpy as np
import torch
from mmf.common.constants import IMAGE_COLOR_MEAN, IMAGE_COLOR_STD
from mmf.common.registry import registry
from mmf.common.typings import ProcessorConfigType
from mmf.utils.configuration import get_mmf_cache_dir, get_mmf_env
from mmf.utils.distributed import is_main, synchronize
from mmf.utils.file_io import PathManager
from mmf.utils.logger import log_class_usage
from mmf.utils.text import VocabDict
from mmf.utils.vocab import Vocab, WordToVectorDict
from omegaconf import DictConfig
logger = logging.getLogger(__name__)
@dataclass
class BatchProcessorConfigType:
processors: ProcessorConfigType
class BaseProcessor:
"""Every processor in MMF needs to inherit this class for compatibility
with MMF. End user mainly needs to implement ``__call__`` function.
Args:
config (DictConfig): Config for this processor, containing `type` and
`params` attributes if available.
"""
def __init__(self, *args, config: Optional[DictConfig] = None, **kwargs):
log_class_usage("Processor", self.__class__)
return
def __call__(self, item: Any, *args, **kwargs) -> Any:
"""Main function of the processor. Takes in a dict and returns back
a dict
Args:
item (Dict): Some item that needs to be processed.
Returns:
Dict: Processed dict.
"""
return item
class Processor:
"""Wrapper class used by MMF to initialized processor based on their
``type`` as passed in configuration. It retrieves the processor class
registered in registry corresponding to the ``type`` key and initializes
with ``params`` passed in configuration. All functions and attributes of
the processor initialized are directly available via this class.
Args:
config (DictConfig): DictConfig containing ``type`` of the processor to
be initialized and ``params`` of that processor.
"""
def __init__(self, config: ProcessorConfigType, *args, **kwargs):
if "type" not in config:
raise AttributeError(
"Config must have 'type' attribute to specify type of processor"
)
processor_class = registry.get_processor_class(config.type)
if processor_class is None:
raise ValueError(f"No processor class named {config.type} is defined.")
params = {}
if "params" not in config:
logger.warning(
"Config doesn't have 'params' attribute to "
"specify parameters of the processor "
f"of type {config.type}. Setting to default {{}}"
)
else:
params = config.params
self.processor = processor_class(params, *args, **kwargs)
self._dir_representation = dir(self)
def __call__(self, item, *args, **kwargs):
return self.processor(item, *args, **kwargs)
def __getattr__(self, name):
if "_dir_representation" in self.__dict__ and name in self._dir_representation:
return getattr(self, name)
elif "processor" in self.__dict__ and hasattr(self.processor, name):
return getattr(self.processor, name)
else:
raise AttributeError(f"The processor {name} doesn't exist in the registry.")
class BatchProcessor(BaseProcessor):
"""BatchProcessor is an extension of normal processor which usually are
used in cases where dataset works on full batch instead of samples.
Such cases can be observed in the case of the iterable datasets.
BatchProcessor if provided with processors key in the config, will
initialize a member variable processors_dict for you which will contain
initialization of all of the processors you specified and will need to process
your complete batch.
Rest it behaves in same way, expects an item and returns an item which can be
of any type.
"""
def __init__(self, config: BatchProcessorConfigType, *args, **kwargs):
extra_params = {"data_dir": get_mmf_env(key="data_dir")}
processors_dict = config.get("processors", {})
# Since build_processors also imports processor, import it at runtime to
# avoid circular dependencies
from mmf.utils.build import build_processors
self.processors = build_processors(processors_dict, **extra_params)
def __call__(self, item: Any) -> Any:
return item
@registry.register_processor("vocab")
class VocabProcessor(BaseProcessor):
"""Use VocabProcessor when you have vocab file and you want to process
words to indices. Expects UNK token as "<unk>" and pads sentences using
"<pad>" token. Config parameters can have ``preprocessor`` property which
is used to preprocess the item passed and ``max_length`` property which
points to maximum length of the sentence/tokens which can be convert to
indices. If the length is smaller, the sentence will be padded. Parameters
for "vocab" are necessary to be passed.
**Key**: vocab
Example Config::
dataset_config:
vqa2:
data_dir: ${env.data_dir}
processors:
text_processor:
type: vocab
params:
max_length: 14
vocab:
type: intersected
embedding_name: glove.6B.300d
vocab_file: vqa2/defaults/extras/vocabs/vocabulary_100k.txt
Args:
config (DictConfig): node containing configuration parameters of
the processor
Attributes:
vocab (Vocab): Vocab class object which is abstraction over the vocab
file passed.
"""
MAX_LENGTH_DEFAULT = 50
PAD_TOKEN = "<pad>"
PAD_INDEX = 0
def __init__(self, config, *args, **kwargs):
if not hasattr(config, "vocab"):
raise AttributeError(
"config passed to the processor has no attribute vocab"
)
self.vocab = Vocab(*args, **config.vocab, **kwargs)
self._init_extras(config)
def _init_extras(self, config, *args, **kwargs):
self.preprocessor = None
if hasattr(config, "max_length"):
self.max_length = config.max_length
else:
warnings.warn(
"No 'max_length' parameter in Processor's "
"configuration. Setting to {}.".format(self.MAX_LENGTH_DEFAULT)
)
self.max_length = self.MAX_LENGTH_DEFAULT
if "preprocessor" in config:
self.preprocessor = Processor(config.preprocessor, *args, **kwargs)
if self.preprocessor is None:
raise ValueError(
f"No text processor named {config.preprocessor} is defined."
)
def __call__(self, item):
"""Call requires item to have either "tokens" attribute or either
"text" attribute. If "text" is present, it will tokenized using
the preprocessor.
Args:
item (Dict): Dict containing the "text" or "tokens".
Returns:
Dict: Dict containing indices in "text" key, "tokens" in "tokens"
key and "length" of the string in "length" key.
"""
indices = None
if not isinstance(item, dict):
raise TypeError(
"Argument passed to the processor must be "
"a dict with either 'text' or 'tokens' as "
"keys"
)
if "tokens" in item:
tokens = item["tokens"]
indices = self._map_strings_to_indices(item["tokens"])
elif "text" in item:
if self.preprocessor is None:
raise AssertionError(
"If tokens are not provided, a text "
"processor must be defined in the config"
)
tokens = self.preprocessor({"text": item["text"]})["text"]
indices = self._map_strings_to_indices(tokens)
else:
raise AssertionError(
"A dict with either 'text' or 'tokens' keys "
"must be passed to the processor"
)
tokens, length = self._pad_tokens(tokens)
return {"text": indices, "tokens": tokens, "length": length}
def _pad_tokens(self, tokens):
padded_tokens = [self.PAD_TOKEN] * self.max_length
token_length = min(len(tokens), self.max_length)
padded_tokens[:token_length] = tokens[:token_length]
token_length = torch.tensor(token_length, dtype=torch.long)
return padded_tokens, token_length
def get_pad_index(self):
"""Get index of padding <pad> token in vocabulary.
Returns:
int: index of the padding token.
"""
return self.vocab.get_pad_index()
def get_vocab_size(self):
"""Get size of the vocabulary.
Returns:
int: size of the vocabulary.
"""
return self.vocab.get_size()
def _map_strings_to_indices(self, tokens):
length = min(len(tokens), self.max_length)
tokens = tokens[:length]
output = torch.zeros(self.max_length, dtype=torch.long)
output.fill_(self.vocab.get_pad_index())
for idx, token in enumerate(tokens):
output[idx] = self.vocab.stoi[token]
return output
@registry.register_processor("glove")
class GloVeProcessor(VocabProcessor):
"""Inherits VocabProcessor, and returns GloVe vectors for each of the
words. Maps them to index using vocab processor, and then gets GloVe vectors
corresponding to those indices.
Args:
config (DictConfig): Configuration parameters for GloVe same as
:func:`~VocabProcessor`.
"""
def __init__(self, config, *args, **kwargs):
if not hasattr(config, "vocab"):
raise AttributeError(
"Config passed to the processor has no attribute vocab"
)
vocab_processor_config = copy.deepcopy(config)
# GloVeProcessor needs vocab type to be "intersected"
vocab_processor_config.vocab.type = "intersected"
if "vocab_file" not in vocab_processor_config.vocab:
warnings.warn(
"'vocab_file' key is not present in the config."
" Switching to pretrained vocab."
)
vocab_processor_config.vocab.type = "pretrained"
self._init_extras(vocab_processor_config)
self.config = vocab_processor_config
self._already_downloaded = False
self._args = args
self._kwargs = kwargs
def __call__(self, item):
if not self._already_downloaded:
self.vocab = Vocab(*self._args, **self.config.vocab, **self._kwargs)
self._already_downloaded = True
indices = super().__call__(item)["text"]
embeddings = torch.zeros(
(len(indices), self.vocab.get_embedding_dim()), dtype=torch.float
)
for idx, index in enumerate(indices):
embeddings[idx] = self.vocab.vectors[index]
return {"text": embeddings}
@registry.register_processor("fasttext")
class FastTextProcessor(VocabProcessor):
"""FastText processor, similar to GloVe processor but returns FastText vectors.
Args:
config (DictConfig): Configuration values for the processor.
"""
def __init__(self, config, *args, **kwargs):
self._init_extras(config)
self.config = config
self._download_initially = config.get("download_initially", True)
self._already_downloaded = False
self._already_loaded = False
if self._download_initially:
self._try_download()
def _try_download(self):
_is_main = is_main()
if self._already_downloaded:
return
needs_download = False
if not hasattr(self.config, "model_file"):
if _is_main:
warnings.warn(
"'model_file' key is required but missing "
"from FastTextProcessor's config."
)
needs_download = True
model_file = self.config.model_file
# If model_file is already an existing path don't join to cache dir
if not PathManager.exists(model_file):
model_file = os.path.join(get_mmf_cache_dir(), model_file)
if not PathManager.exists(model_file):
if _is_main:
warnings.warn(f"No model file present at {model_file}.")
needs_download = True
if needs_download:
logger.info("Downloading FastText bin")
model_file = self._download_model()
self.model_file = model_file
self._already_downloaded = True
synchronize()
def _download_model(self):
_is_main = is_main()
model_file_path = os.path.join(get_mmf_cache_dir(), "wiki.en.bin")
if not _is_main:
return model_file_path
if PathManager.exists(model_file_path):
logger.info(f"Vectors already present at {model_file_path}.")
return model_file_path
import requests
from mmf.common.constants import FASTTEXT_WIKI_URL
from tqdm import tqdm
PathManager.mkdirs(os.path.dirname(model_file_path))
response = requests.get(FASTTEXT_WIKI_URL, stream=True)
with PathManager.open(model_file_path, "wb") as f:
pbar = tqdm(
total=int(response.headers["Content-Length"]) / 4096,
miniters=50,
disable=not _is_main,
)
idx = 0
for data in response.iter_content(chunk_size=4096):
if data:
if idx % 50 == 0:
pbar.update(len(data))
f.write(data)
idx += 1
pbar.close()
logger.info(f"fastText bin downloaded at {model_file_path}.")
return model_file_path
def _load_fasttext_model(self, model_file):
if self._already_loaded:
return
from fasttext import load_model
logger.info(f"Loading fasttext model now from {model_file}")
self.model = load_model(model_file)
# String to Vector
self.stov = WordToVectorDict(self.model)
logger.info("Finished loading fasttext model")
self._already_loaded = True
def _map_strings_to_indices(self, tokens):
length = min(len(tokens), self.max_length)
tokens = tokens[:length]
output = torch.full(
(self.max_length, self.model.get_dimension()),
fill_value=self.PAD_INDEX,
dtype=torch.float,
)
for idx, token in enumerate(tokens):
output[idx] = torch.from_numpy(self.stov[token])
return output
def __call__(self, item):
self._load_fasttext_model(self.model_file)
return super().__call__(item)
@registry.register_processor("vqa_answer")
class VQAAnswerProcessor(BaseProcessor):
"""Processor for generating answer scores for answers passed using VQA
accuracy formula. Using VocabDict class to represent answer vocabulary,
so parameters must specify "vocab_file". "num_answers" in parameter config
specify the max number of answers possible. Takes in dict containing
"answers" or "answers_tokens". "answers" are preprocessed to generate
"answers_tokens" if passed.
Args:
config (DictConfig): Configuration for the processor
Attributes:
answer_vocab (VocabDict): Class representing answer vocabulary
"""
DEFAULT_NUM_ANSWERS = 10
def __init__(self, config, *args, **kwargs):
if not hasattr(config, "vocab_file"):
raise AttributeError(
"'vocab_file' argument required, but not "
"present in AnswerProcessor's config"
)
self.answer_vocab = VocabDict(config.vocab_file, *args, **kwargs)
self.PAD_IDX = self.answer_vocab.word2idx("<pad>")
self.BOS_IDX = self.answer_vocab.word2idx("<s>")
self.EOS_IDX = self.answer_vocab.word2idx("</s>")
self.UNK_IDX = self.answer_vocab.UNK_INDEX
# Set EOS to something not achievable if it is not there
if self.EOS_IDX == self.UNK_IDX:
self.EOS_IDX = len(self.answer_vocab)
self.preprocessor = None
if hasattr(config, "preprocessor"):
self.preprocessor = Processor(config.preprocessor)
if self.preprocessor is None:
raise ValueError(
f"No processor named {config.preprocessor} is defined."
)
if hasattr(config, "num_answers"):
self.num_answers = config.num_answers
else:
self.num_answers = self.DEFAULT_NUM_ANSWERS
warnings.warn(
"'num_answers' not defined in the config. "
"Setting to default of {}".format(self.DEFAULT_NUM_ANSWERS)
)
def __call__(self, item):
"""Takes in dict with answers or answers_tokens, and returns back
a dict with answers (processed), "answers_indices" which point to
indices of the answers if present and "answers_scores" which represent
VQA style scores for the answers.
Args:
item (Dict): Dict containing answers or answers_tokens
Returns:
Dict: Processed answers, indices and scores.
"""
tokens = []
if not isinstance(item, dict):
raise TypeError("'item' passed to processor must be a dict")
if "answer_tokens" in item:
tokens = item["answer_tokens"]
elif "answers" in item and item["answers"] is not None:
if self.preprocessor is None:
raise AssertionError(
"'preprocessor' must be defined if you "
"don't pass 'answer_tokens'"
)
tokens = [
self.preprocessor({"text": answer})["text"]
for answer in item["answers"]
]
else:
raise AssertionError(
"'answers' or 'answer_tokens' must be passed"
" to answer processor in a dict"
)
if len(tokens) != 0:
tokens = self._increase_to_ten(tokens)
answers_indices = torch.zeros(self.DEFAULT_NUM_ANSWERS, dtype=torch.long)
answers_indices.fill_(self.answer_vocab.get_unk_index())
for idx, token in enumerate(tokens):
answers_indices[idx] = self.answer_vocab.word2idx(token)
answers_scores = self.compute_answers_scores(answers_indices)
return {
"answers": tokens,
"answers_indices": answers_indices,
"answers_scores": answers_scores,
}
def get_vocab_size(self):
"""Get vocab size of the answer vocabulary. Can also include
soft copy dynamic answer space size.
Returns:
int: size of the answer vocabulary
"""
return self.answer_vocab.num_vocab
def get_true_vocab_size(self):
"""True vocab size can be different from normal vocab size in some cases
such as soft copy where dynamic answer space is added.
Returns:
int: True vocab size.
"""
return self.answer_vocab.num_vocab
def word2idx(self, word):
"""Convert a word to its index according to vocabulary
Args:
word (str): Word to be converted to index.
Returns:
int: Index of the word.
"""
return self.answer_vocab.word2idx(word)
def idx2word(self, idx):
"""Index to word according to the vocabulary.
Args:
idx (int): Index to be converted to the word.
Returns:
str: Word corresponding to the index.
"""
return self.answer_vocab.idx2word(idx)
def compute_answers_scores(self, answers_indices):
"""Generate VQA based answer scores for answers_indices.
Args:
answers_indices (torch.LongTensor): tensor containing indices of the answers
Returns:
torch.FloatTensor: tensor containing scores.
"""
scores = torch.zeros(self.get_vocab_size(), dtype=torch.float)
gt_answers = list(enumerate(answers_indices))
unique_answers = set(answers_indices.tolist())
for answer in unique_answers:
accs = []
for gt_answer in gt_answers:
other_answers = [item for item in gt_answers if item != gt_answer]
matching_answers = [item for item in other_answers if item[1] == answer]
acc = min(1, float(len(matching_answers)) / 3)
accs.append(acc)
avg_acc = sum(accs) / len(accs)
if answer != self.answer_vocab.UNK_INDEX:
scores[answer] = avg_acc
return scores
def _increase_to_ten(self, tokens):
while len(tokens) < self.DEFAULT_NUM_ANSWERS:
tokens += tokens[: self.DEFAULT_NUM_ANSWERS - len(tokens)]
return tokens
@registry.register_processor("graph_vqa_answer")
class GraphVQAAnswerProcessor(BaseProcessor):
"""Processor for generating answer scores for answers passed using VQA
accuracy formula. Using VocabDict class to represent answer vocabulary,
so parameters must specify "vocab_file". "num_answers" in parameter config
specify the max number of answers possible. Takes in dict containing
"answers" or "answers_tokens". "answers" are preprocessed to generate
"answers_tokens" if passed.
This version also takes a graph vocab and predicts a main and graph
stream simultanously
Args:
config (DictConfig): Configuration for the processor
Attributes:
answer_vocab (VocabDict): Class representing answer vocabulary
"""
DEFAULT_NUM_ANSWERS = 10
def __init__(self, config, *args, **kwargs):
if not hasattr(config, "vocab_file"):
raise AttributeError(
"'vocab_file' argument required, but not "
"present in AnswerProcessor's config"
)
self.answer_vocab = VocabDict(config.vocab_file, *args, **kwargs)
self.PAD_IDX = self.answer_vocab.word2idx("<pad>")
self.BOS_IDX = self.answer_vocab.word2idx("<s>")
self.EOS_IDX = self.answer_vocab.word2idx("</s>")
self.UNK_IDX = self.answer_vocab.UNK_INDEX
# Set EOS to something not achievable if it is not there
if self.EOS_IDX == self.UNK_IDX:
self.EOS_IDX = len(self.answer_vocab)
self.preprocessor = None
if hasattr(config, "preprocessor"):
self.preprocessor = Processor(config.preprocessor)
if self.preprocessor is None:
raise ValueError(
f"No processor named {config.preprocessor} is defined."
)
if hasattr(config, "num_answers"):
self.num_answers = config.num_answers
else:
self.num_answers = self.DEFAULT_NUM_ANSWERS
warnings.warn(
"'num_answers' not defined in the config. "
"Setting to default of {}".format(self.DEFAULT_NUM_ANSWERS)
)
# Load the graph answer vocab file
if os.path.exists(config.graph_vocab_file):
graph_vocab_file = config.graph_vocab_file
else:
graph_vocab_file = os.path.join(
os.getenv("MMF_DATA_DIR"), "datasets", config.graph_vocab_file
)
self.graph_vocab = sorted(list(torch.load(graph_vocab_file)))
self.ans2graphvocabidx = {}
for graphvocabidx, graph_ans in enumerate(self.graph_vocab):
# Right now, this does need to overlap with the regular vocab
self.ans2graphvocabidx[graph_ans] = graphvocabidx
# Make sure graph_ans actually in vocab
assert graph_ans in self.answer_vocab.word2idx_dict
self.config = config
def __call__(self, item):
"""Takes in dict with answers or answers_tokens, and returns back
a dict with answers (processed), "answers_indices" which point to
indices of the answers if present and "answers_scores" which represent
VQA style scores for the answers.
Args:
item (Dict): Dict containing answers or answers_tokens
Returns:
Dict: Processed answers, indices and scores.
"""
tokens = []
if not isinstance(item, dict):
raise TypeError("'item' passed to processor must be a dict")
if "answer_tokens" in item:
tokens = item["answer_tokens"]
elif "answers" in item and item["answers"] is not None:
if self.preprocessor is None:
raise AssertionError(
"'preprocessor' must be defined if you "
"don't pass 'answer_tokens'"
)
tokens = [
self.preprocessor({"text": answer})["text"]
for answer in item["answers"]
]
else:
raise AssertionError(
"'answers' or 'answer_tokens' must be passed"
" to answer processor in a dict"
)
if len(tokens) != 0:
tokens = self._increase_to_ten(tokens)
answers_indices = torch.zeros(self.DEFAULT_NUM_ANSWERS, dtype=torch.long)
answers_indices.fill_(self.answer_vocab.get_unk_index())
for idx, token in enumerate(tokens):
answers_indices[idx] = self.answer_vocab.word2idx(token)
answers_scores = self.compute_answers_scores(answers_indices)
# Get answer scores for the graph vocab
if self.config.concat_scores:
answers_scores_graph = torch.zeros(len(self.graph_vocab), dtype=torch.float)
unique_answers = set(answers_indices.tolist())
for answer in unique_answers:
# Get the original score
if answer != self.answer_vocab.UNK_INDEX:
score = answers_scores[answer]
# Copy into graph scores (if it's in there)
ans_str = self.answer_vocab.idx2word(answer)
if ans_str in self.ans2graphvocabidx:
graph_idx = self.ans2graphvocabidx[ans_str]
answers_scores_graph[graph_idx] = score
# Concat scores
answers_scores = torch.cat([answers_scores, answers_scores_graph], dim=0)
return {
"answers": tokens,
"answers_indices": answers_indices,
"answers_scores": answers_scores,
}
def get_vocab_size(self):
"""Get vocab size of the answer vocabulary. Can also include
soft copy dynamic answer space size.
Returns:
int: size of the answer vocabulary
"""
return self.answer_vocab.num_vocab
def get_true_vocab_size(self):
"""True vocab size can be different from normal vocab size in some cases
such as soft copy where dynamic answer space is added.
Returns:
int: True vocab size.
"""
return self.answer_vocab.num_vocab
def word2idx(self, word):
"""Convert a word to its index according to vocabulary
Args:
word (str): Word to be converted to index.
Returns:
int: Index of the word.
"""
return self.answer_vocab.word2idx(word)
def idx2word(self, idx):
"""Index to word according to the vocabulary.
Args:
idx (int): Index to be converted to the word.
Returns:
str: Word corresponding to the index.
"""
return self.answer_vocab.idx2word(idx)
def compute_answers_scores(self, answers_indices):
"""Generate VQA based answer scores for answers_indices.
Args:
answers_indices (torch.LongTensor): tensor containing indices of the answers
Returns:
torch.FloatTensor: tensor containing scores.
"""
scores = torch.zeros(self.get_vocab_size(), dtype=torch.float)
gt_answers = list(enumerate(answers_indices))
unique_answers = set(answers_indices.tolist())
for answer in unique_answers:
accs = []
for gt_answer in gt_answers:
other_answers = [item for item in gt_answers if item != gt_answer]
matching_answers = [item for item in other_answers if item[1] == answer]
acc = min(1, float(len(matching_answers)) / 3)
accs.append(acc)
avg_acc = sum(accs) / len(accs)
if answer != self.answer_vocab.UNK_INDEX:
scores[answer] = avg_acc
return scores
def _increase_to_ten(self, tokens):
while len(tokens) < self.DEFAULT_NUM_ANSWERS:
tokens += tokens[: self.DEFAULT_NUM_ANSWERS - len(tokens)]
return tokens
@registry.register_processor("multi_hot_answer_from_vocab")
class MultiHotAnswerFromVocabProcessor(VQAAnswerProcessor):
def __init__(self, config, *args, **kwargs):
super().__init__(config, *args, **kwargs)
def compute_answers_scores(self, answers_indices):
scores = torch.zeros(self.get_vocab_size(), dtype=torch.float)
scores[answers_indices] = 1
scores[self.answer_vocab.UNK_INDEX] = 0
return scores
@registry.register_processor("soft_copy_answer")
class SoftCopyAnswerProcessor(VQAAnswerProcessor):
"""Similar to Answer Processor but adds soft copy dynamic answer space to it.
Read https://arxiv.org/abs/1904.08920 for extra information on soft copy
and LoRRA.
Args:
config (DictConfig): Configuration for soft copy processor.
"""
DEFAULT_MAX_LENGTH = 50
def __init__(self, config, *args, **kwargs):
super().__init__(config, *args, **kwargs)
if hasattr(config, "max_length"):
self.max_length = config.max_length
else:
self.max_length = self.DEFAULT_MAX_LENGTH
warnings.warn(
"'max_length' not defined in the config. "
"Setting to default of {}".format(self.DEFAULT_MAX_LENGTH)
)
self.context_preprocessor = None
if hasattr(config, "context_preprocessor"):
self.context_preprocessor = Processor(config.context_preprocessor)
def get_vocab_size(self):
"""Size of Vocab + Size of Dynamic soft-copy based answer space
Returns:
int: Size of vocab + size of dynamic soft-copy answer space.
"""
answer_vocab_nums = self.answer_vocab.num_vocab
answer_vocab_nums += self.max_length
return answer_vocab_nums
def get_true_vocab_size(self):
"""Actual vocab size which only include size of the vocabulary file.
Returns:
int: Actual size of vocabs.
"""
return self.answer_vocab.num_vocab
def __call__(self, item):
answers = item["answers"]
scores = super().__call__({"answers": answers})
indices = scores["answers_indices"]
answers = scores["answers"]
scores = scores["answers_scores"]
tokens_scores = scores.new_zeros(self.max_length)
tokens = item["tokens"]
length = min(len(tokens), self.max_length)
gt_answers = list(enumerate(answers))
if self.context_preprocessor is not None:
tokens = [
self.context_preprocessor({"text": token})["text"] for token in tokens
]
answer_counter = Counter(answers)
for idx, token in enumerate(tokens[:length]):
if answer_counter[token] == 0:
continue
accs = []
for gt_answer in gt_answers:
other_answers = [item for item in gt_answers if item != gt_answer]
matching_answers = [item for item in other_answers if item[1] == token]
acc = min(1, float(len(matching_answers)) / 3)
accs.append(acc)
tokens_scores[idx] = sum(accs) / len(accs)
# Scores are already proper size, see L314. Now,
# fix scores for soft copy candidates
scores[-len(tokens_scores) :] = tokens_scores
return {
"answers": answers,
"answers_indices": indices,
"answers_scores": scores,
}
@registry.register_processor("simple_word")
class SimpleWordProcessor(BaseProcessor):
"""Tokenizes a word and processes it.
Attributes:
tokenizer (function): Type of tokenizer to be used.
"""
def __init__(self, *args, **kwargs):
from mmf.utils.text import word_tokenize
self.tokenizer = word_tokenize
def __call__(self, item, *args, **kwargs):
return {"text": self.tokenizer(item["text"], *args, **kwargs)}
@registry.register_processor("simple_sentence")
class SimpleSentenceProcessor(BaseProcessor):
"""Tokenizes a sentence and processes it.
Attributes:
tokenizer (function): Type of tokenizer to be used.
"""
def __init__(self, *args, **kwargs):
from mmf.utils.text import tokenize
self.tokenizer = tokenize
def __call__(self, item, *args, **kwargs):
return {"text": self.tokenizer(item["text"], *args, **kwargs)}
@registry.register_processor("bbox")
class BBoxProcessor(VocabProcessor):
"""Generates bboxes in proper format.
Takes in a dict which contains "info" key which is a list of dicts
containing following for each of the the bounding box
Example bbox input::
{
"info": [
{
"bounding_box": {
"top_left_x": 100,
"top_left_y": 100,
"width": 200,
"height": 300
}
},
...
]
}
This will further return a Sample in a dict with key "bbox" with last
dimension of 4 corresponding to "xyxy". So sample will look like following:
Example Sample::
Sample({
"coordinates": torch.Size(n, 4),
"width": List[number], # size n
"height": List[number], # size n
"bbox_types": List[str] # size n, either xyxy or xywh.
# currently only supports xyxy.
})
"""
def __init__(self, config, *args, **kwargs):
from mmf.utils.dataset import build_bbox_tensors
self.lambda_fn = build_bbox_tensors
self._init_extras(config)
def __call__(self, item):
info = item["info"]
if self.preprocessor is not None:
info = self.preprocessor(info)
return {"bbox": self.lambda_fn(info, self.max_length)}
@registry.register_processor("caption")
class CaptionProcessor(BaseProcessor):
"""Processes a caption with start, end and pad tokens and returns raw string.
Args:
config (DictConfig): Configuration for caption processor.
"""
def __init__(self, config, *args, **kwargs):
if not hasattr(config, "vocab"):
raise AttributeError(
"config passed to the processor has no " "attribute vocab"
)
self.vocab = Vocab(*args, **config.vocab, **kwargs)
def __call__(self, item):
for idx, v in enumerate(item):
if v == self.vocab.EOS_INDEX:
item = item[:idx]
break
tokens = [
self.vocab.get_itos()[w]
for w in item
if w
not in {self.vocab.SOS_INDEX, self.vocab.EOS_INDEX, self.vocab.PAD_INDEX}
]
caption = " ".join(tokens)
return {"tokens": tokens, "caption": caption}
@registry.register_processor("evalai_answer")
class EvalAIAnswerProcessor(BaseProcessor):
"""Processes an answer similar to Eval AI"""
CONTRACTIONS = {
"aint": "ain't",
"arent": "aren't",
"cant": "can't",
"couldve": "could've",
"couldnt": "couldn't",
"couldn'tve": "couldn't've",
"couldnt've": "couldn't've",
"didnt": "didn't",
"doesnt": "doesn't",
"dont": "don't",
"hadnt": "hadn't",
"hadnt've": "hadn't've",
"hadn'tve": "hadn't've",
"hasnt": "hasn't",
"havent": "haven't",
"hed": "he'd",
"hed've": "he'd've",
"he'dve": "he'd've",
"hes": "he's",
"howd": "how'd",
"howll": "how'll",
"hows": "how's",
"Id've": "I'd've",
"I'dve": "I'd've",
"Im": "I'm",
"Ive": "I've",
"isnt": "isn't",
"itd": "it'd",
"itd've": "it'd've",
"it'dve": "it'd've",
"itll": "it'll",
"let's": "let's",
"maam": "ma'am",
"mightnt": "mightn't",
"mightnt've": "mightn't've",
"mightn'tve": "mightn't've",
"mightve": "might've",
"mustnt": "mustn't",
"mustve": "must've",
"neednt": "needn't",
"notve": "not've",
"oclock": "o'clock",
"oughtnt": "oughtn't",
"ow's'at": "'ow's'at",
"'ows'at": "'ow's'at",
"'ow'sat": "'ow's'at",
"shant": "shan't",
"shed've": "she'd've",
"she'dve": "she'd've",
"she's": "she's",
"shouldve": "should've",
"shouldnt": "shouldn't",
"shouldnt've": "shouldn't've",
"shouldn'tve": "shouldn't've",
"somebody'd": "somebodyd",
"somebodyd've": "somebody'd've",
"somebody'dve": "somebody'd've",
"somebodyll": "somebody'll",
"somebodys": "somebody's",
"someoned": "someone'd",
"someoned've": "someone'd've",
"someone'dve": "someone'd've",
"someonell": "someone'll",
"someones": "someone's",
"somethingd": "something'd",
"somethingd've": "something'd've",
"something'dve": "something'd've",
"somethingll": "something'll",
"thats": "that's",
"thered": "there'd",
"thered've": "there'd've",
"there'dve": "there'd've",
"therere": "there're",
"theres": "there's",
"theyd": "they'd",
"theyd've": "they'd've",
"they'dve": "they'd've",
"theyll": "they'll",
"theyre": "they're",
"theyve": "they've",
"twas": "'twas",
"wasnt": "wasn't",
"wed've": "we'd've",
"we'dve": "we'd've",
"weve": "we've",
"werent": "weren't",
"whatll": "what'll",
"whatre": "what're",
"whats": "what's",
"whatve": "what've",
"whens": "when's",
"whered": "where'd",
"wheres": "where's",
"whereve": "where've",
"whod": "who'd",
"whod've": "who'd've",
"who'dve": "who'd've",
"wholl": "who'll",
"whos": "who's",
"whove": "who've",
"whyll": "why'll",
"whyre": "why're",
"whys": "why's",
"wont": "won't",
"wouldve": "would've",
"wouldnt": "wouldn't",
"wouldnt've": "wouldn't've",
"wouldn'tve": "wouldn't've",
"yall": "y'all",
"yall'll": "y'all'll",
"y'allll": "y'all'll",
"yall'd've": "y'all'd've",
"y'alld've": "y'all'd've",
"y'all'dve": "y'all'd've",
"youd": "you'd",
"youd've": "you'd've",
"you'dve": "you'd've",
"youll": "you'll",
"youre": "you're",
"youve": "you've",
}
NUMBER_MAP = {
"none": "0",
"zero": "0",
"one": "1",
"two": "2",
"three": "3",
"four": "4",
"five": "5",
"six": "6",
"seven": "7",
"eight": "8",
"nine": "9",
"ten": "10",
}
ARTICLES = ["a", "an", "the"]
PERIOD_STRIP = re.compile(r"(?!<=\d)(\.)(?!\d)")
COMMA_STRIP = re.compile(r"(?<=\d)(\,)+(?=\d)")
PUNCTUATIONS = [
";",
r"/",
"[",
"]",
'"',
"{",
"}",
"(",
")",
"=",
"+",
"\\",
"_",
"-",
">",
"<",
"@",
"`",
",",
"?",
"!",
]
def __init__(self, *args, **kwargs):
pass
def word_tokenize(self, word):
word = word.lower()
word = word.replace(",", "").replace("?", "").replace("'s", " 's")
return word.strip()
def process_punctuation(self, in_text):
out_text = in_text
for p in self.PUNCTUATIONS:
if (p + " " in in_text or " " + p in in_text) or (
re.search(self.COMMA_STRIP, in_text) is not None
):
out_text = out_text.replace(p, "")
else:
out_text = out_text.replace(p, " ")
out_text = self.PERIOD_STRIP.sub("", out_text, re.UNICODE)
return out_text
def process_digit_article(self, in_text):
out_text = []
temp_text = in_text.lower().split()
for word in temp_text:
word = self.NUMBER_MAP.setdefault(word, word)
if word not in self.ARTICLES:
out_text.append(word)
else:
pass
for word_id, word in enumerate(out_text):
if word in self.CONTRACTIONS:
out_text[word_id] = self.CONTRACTIONS[word]
out_text = " ".join(out_text)
return out_text
def __call__(self, item):
item = self.word_tokenize(item)
item = item.replace("\n", " ").replace("\t", " ").strip()
item = self.process_punctuation(item)
item = self.process_digit_article(item)
return item
@registry.register_processor("phoc")
class PhocProcessor(VocabProcessor):
"""
Compute PHOC features from text tokens
"""
def __init__(self, config, *args, **kwargs):
from mmf.utils.phoc import build_phoc
self._build_phoc = build_phoc
self._init_extras(config)
self.config = config
def _map_strings_to_indices(self, tokens):
length = min(len(tokens), self.max_length)
tokens = tokens[:length]
phoc_dim = 604
output = torch.full(
(self.max_length, phoc_dim), fill_value=self.PAD_INDEX, dtype=torch.float
)
for idx, token in enumerate(tokens):
output[idx] = torch.from_numpy(self._build_phoc(token))
return output
@registry.register_processor("copy")
class CopyProcessor(BaseProcessor):
"""
Copy boxes from numpy array
"""
def __init__(self, config, *args, **kwargs):
self.max_length = config.max_length
def __call__(self, item):
blob = item["blob"]
final_blob = np.zeros((self.max_length,) + blob.shape[1:], blob.dtype)
final_blob[: len(blob)] = blob[: len(final_blob)]
return {"blob": torch.from_numpy(final_blob)}
@registry.register_processor("m4c_answer")
class M4CAnswerProcessor(BaseProcessor):
"""
Process a TextVQA answer for iterative decoding in M4C
"""
def __init__(self, config, *args, **kwargs):
super().__init__(config, *args, **kwargs)
self.answer_vocab = VocabDict(config.vocab_file, *args, **kwargs)
self.PAD_IDX = self.answer_vocab.word2idx("<pad>")
self.BOS_IDX = self.answer_vocab.word2idx("<s>")
self.EOS_IDX = self.answer_vocab.word2idx("</s>")
self.UNK_IDX = self.answer_vocab.UNK_INDEX
# make sure PAD_IDX, BOS_IDX and PAD_IDX are valid (not <unk>)
assert self.PAD_IDX != self.answer_vocab.UNK_INDEX
assert self.BOS_IDX != self.answer_vocab.UNK_INDEX
assert self.EOS_IDX != self.answer_vocab.UNK_INDEX
assert self.PAD_IDX == 0
self.answer_preprocessor = Processor(config.preprocessor)
assert self.answer_preprocessor is not None
self.num_answers = config.num_answers
self.max_length = config.max_length
self.max_copy_steps = config.max_copy_steps
assert self.max_copy_steps >= 1
self.match_answer_to_unk = False
def tokenize(self, sentence):
return sentence.split()
def match_answer_to_vocab_ocr_seq(
self, answer, vocab2idx_dict, ocr2inds_dict, max_match_num=20
):
"""
Match an answer to a list of sequences of indices
each index corresponds to either a fixed vocabulary or an OCR token
(in the index address space, the OCR tokens are after the fixed vocab)
"""
num_vocab = len(vocab2idx_dict)
answer_words = self.tokenize(answer)
answer_word_matches = []
for word in answer_words:
# match answer word to fixed vocabulary
matched_inds = []
if word in vocab2idx_dict:
matched_inds.append(vocab2idx_dict.get(word))
# match answer word to OCR
# we put OCR after the fixed vocabulary in the answer index space
# so add num_vocab offset to the OCR index
matched_inds.extend([num_vocab + idx for idx in ocr2inds_dict[word]])
if len(matched_inds) == 0:
if self.match_answer_to_unk:
matched_inds.append(vocab2idx_dict.get("<unk>"))
else:
return []
answer_word_matches.append(matched_inds)
# expand per-word matched indices into the list of matched sequences
if len(answer_word_matches) == 0:
return []
idx_seq_list = [()]
for matched_inds in answer_word_matches:
idx_seq_list = [
seq + (idx,) for seq in idx_seq_list for idx in matched_inds
]
if len(idx_seq_list) > max_match_num:
idx_seq_list = idx_seq_list[:max_match_num]
return idx_seq_list
def get_vocab_size(self):
answer_vocab_nums = self.answer_vocab.num_vocab
answer_vocab_nums += self.max_length
return answer_vocab_nums
def get_true_vocab_size(self):
return self.answer_vocab.num_vocab
def compute_answer_scores(self, answers):
gt_answers = list(enumerate(answers))
unique_answers = sorted(set(answers))
unique_answer_scores = [0] * len(unique_answers)
for idx, unique_answer in enumerate(unique_answers):
accs = []
for gt_answer in gt_answers:
other_answers = [item for item in gt_answers if item != gt_answer]
matching_answers = [
item for item in other_answers if item[1] == unique_answer
]
acc = min(1, float(len(matching_answers)) / 3)
accs.append(acc)
unique_answer_scores[idx] = sum(accs) / len(accs)
unique_answer2score = {
a: s for a, s in zip(unique_answers, unique_answer_scores)
}
return unique_answer2score
def __call__(self, item):
answers = item["answers"]
if not answers:
return {
"sampled_idx_seq": None,
"train_prev_inds": torch.zeros(self.max_copy_steps, dtype=torch.long),
}
answers = [self.answer_preprocessor({"text": a})["text"] for a in answers]
assert len(answers) == self.num_answers
# Step 1: calculate the soft score of ground-truth answers
unique_answer2score = self.compute_answer_scores(answers)
# Step 2: fill the first step soft scores for tokens
scores = torch.zeros(
self.max_copy_steps, self.get_vocab_size(), dtype=torch.float
)
# match answers to fixed vocabularies and OCR tokens.
ocr2inds_dict = defaultdict(list)
for idx, token in enumerate(item["tokens"]):
ocr2inds_dict[token].append(idx)
answer_dec_inds = [
self.match_answer_to_vocab_ocr_seq(
a, self.answer_vocab.word2idx_dict, ocr2inds_dict
)
for a in answers
]
# Collect all the valid decoding sequences for each answer.
# This part (idx_seq_list) was pre-computed in imdb (instead of online)
# to save time
all_idx_seq_list = []
for answer, idx_seq_list in zip(answers, answer_dec_inds):
all_idx_seq_list.extend(idx_seq_list)
# fill in the soft score for the first decoding step
score = unique_answer2score[answer]
for idx_seq in idx_seq_list:
score_idx = idx_seq[0]
# the scores for the decoding Step 0 will be the maximum
# among all answers starting with that vocab
# for example:
# if "red apple" has score 0.7 and "red flag" has score 0.8
# the score for "red" at Step 0 will be max(0.7, 0.8) = 0.8
scores[0, score_idx] = max(scores[0, score_idx], score)
# train_prev_inds is the previous prediction indices in auto-regressive
# decoding
train_prev_inds = torch.zeros(self.max_copy_steps, dtype=torch.long)
# train_loss_mask records the decoding steps where losses are applied
train_loss_mask = torch.zeros(self.max_copy_steps, dtype=torch.float)
if len(all_idx_seq_list) > 0:
# sample a random decoding answer sequence for teacher-forcing
idx_seq = all_idx_seq_list[np.random.choice(len(all_idx_seq_list))]
dec_step_num = min(1 + len(idx_seq), self.max_copy_steps)
train_loss_mask[:dec_step_num] = 1.0
train_prev_inds[0] = self.BOS_IDX
for t in range(1, dec_step_num):
train_prev_inds[t] = idx_seq[t - 1]
score_idx = idx_seq[t] if t < len(idx_seq) else self.EOS_IDX
scores[t, score_idx] = 1.0
else:
idx_seq = ()
answer_info = {
"answers": answers,
"answers_scores": scores,
"sampled_idx_seq": idx_seq,
"train_prev_inds": train_prev_inds,
"train_loss_mask": train_loss_mask,
}
return answer_info
@registry.register_processor("m4c_caption")
class M4CCaptionProcessor(M4CAnswerProcessor):
def __init__(self, config, *args, **kwargs):
super().__init__(config, *args, **kwargs)
import re
self.SENTENCE_SPLIT_REGEX = re.compile(r"(\W+)")
self.match_answer_to_unk = True
def tokenize(self, sentence):
sentence = sentence.lower()
sentence = (
sentence.replace(",", "")
.replace("?", "")
.replace(".", "")
.replace("'s", " 's")
)
tokens = self.SENTENCE_SPLIT_REGEX.split(sentence)
tokens = [t.strip() for t in tokens if len(t.strip()) > 0]
return tokens
def compute_answer_scores(self, answers):
unique_answer2score = {a: 1.0 for a in answers}
return unique_answer2score
@registry.register_processor("masked_region")
class MaskedRegionProcessor(BaseProcessor):
"""
Masks a region with probability `mask_probability`
"""
def __init__(self, config, *args, **kwargs):
super().__init__(config, *args, **kwargs)
self.mask_prob = config.get("mask_probability", 0.15)
self.mask_region_prob = config.get("mask_region_probability", 0.9)
def __call__(self, item):
image_labels = []
for i in range(item.shape[0]):
prob = random.random()
# mask token with 15% probability
if prob < self.mask_prob:
prob /= self.mask_prob
if prob < self.mask_region_prob:
item[i] = 0
image_labels.append(1)
else:
# no masking token (will be ignored by loss function later)
image_labels.append(-1)
return torch.tensor(image_labels, dtype=torch.long)
@registry.register_processor("transformer_bbox")
class TransformerBboxProcessor(BaseProcessor):
"""
Process a bounding box and returns a array of normalized bbox positions and area
"""
def __init__(self, config, *args, **kwargs):
super().__init__(config, *args, **kwargs)
self.bbox_key = config.get("bbox_key", "bbox")
self.image_width_key = config.get("image_width_key", "image_width")
self.image_height_key = config.get("image_height_key", "image_height")
def __call__(self, item):
bbox = item[self.bbox_key]
image_w = item[self.image_width_key]
image_h = item[self.image_height_key]
image_location = torch.zeros((bbox.shape[0], 5), dtype=torch.float)
image_location[:, :4] = torch.from_numpy(bbox[:, :4])
image_location[:, 4] = (
(image_location[:, 3] - image_location[:, 1])
* (image_location[:, 2] - image_location[:, 0])
/ (image_w * image_h)
)
image_location[:, 0] = image_location[:, 0] / image_w
image_location[:, 1] = image_location[:, 1] / image_h
image_location[:, 2] = image_location[:, 2] / image_w
image_location[:, 3] = image_location[:, 3] / image_h
item["bbox"] = image_location
return item
@dataclass
class MultiClassFromFileConfig:
# Vocab file containing the strings for the available classes
vocab_file: str
@registry.register_processor("multi_class_from_file")
class MultiClassFromFile(BaseProcessor):
"""Label processor for multi class cases where the labels are
saved in a file.
"""
def __init__(self, config: MultiClassFromFileConfig, *args, **kwargs):
self.label_vocab = VocabDict(config.vocab_file, *args, **kwargs)
def __call__(self, item: Union[Dict[str, Any], str]) -> Dict[str, Any]:
if isinstance(item, collections.abc.Mapping):
label = item["label"]
else:
label = item
# Remove UNK by subtracting 1 from output
# UNK will always be at 0 even if it is not in vocab as it is automatically
# always added by vocab dict
class_index = self.label_vocab.word2idx(label) - 1
assert class_index != -1, f"{label} is not present in vocab file"
return {"class_index": torch.tensor(class_index, dtype=torch.long)}
@registry.register_processor("detr_image_and_target")
class DETRImageAndTargetProcessor(BaseProcessor):
"""Process a detection image and target in consistent with DETR. At training time,
random crop is done. At test time, an image is deterministically resized with short
side equal to `image_size` (while ensuring its long side no larger than `max_size`)
"""
def __init__(self, config, *args, **kwargs):
super().__init__(config, *args, **kwargs)
from mmf.datasets.processors import detection_transforms as T
train_image_sizes = list(config.train_image_sizes)
self.training_transform = T.Compose(
[
T.RandomHorizontalFlip(),
T.RandomSelect(
T.RandomResize(train_image_sizes, max_size=config.max_size),
T.Compose(
[
T.RandomResize(list(config.train_resize_random_sizes)),
T.RandomSizeCrop(*config.train_crop_size),
T.RandomResize(train_image_sizes, max_size=config.max_size),
]
),
),
T.ToTensor(),
T.Normalize(IMAGE_COLOR_MEAN, IMAGE_COLOR_STD),
]
)
self.inference_transform = T.Compose(
[
T.RandomResize([config.test_image_size], max_size=config.max_size),
T.ToTensor(),
T.Normalize(IMAGE_COLOR_MEAN, IMAGE_COLOR_STD),
]
)
def __call__(self, item):
dataset_type = item["dataset_type"]
img = item["img"]
target = item["target"]
if dataset_type == "train":
img, target = self.training_transform(img, target)
elif dataset_type == "val" or dataset_type == "test":
img, target = self.inference_transform(img, target)
else:
raise Exception(f"unknown dataset_type: {dataset_type}")
return {"img": img, "target": target}
| EXA-1-master | exa/models/mmf-main/mmf/datasets/processors/processors.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Mostly copy-pasted from
# https://github.com/facebookresearch/detr/blob/master/datasets/transforms.py
import random
from typing import List, Optional, Union
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as F
from mmf.common.registry import registry
from mmf.datasets.processors.processors import BaseProcessor
from mmf.utils.box_ops import box_xyxy_to_cxcywh
from torch import Tensor
def crop(image: Tensor, target: dict, region: List[int]):
cropped_image = F.crop(image, *region)
target = target.copy()
i, j, h, w = region
# should we do something wrt the original size?
target["size"] = torch.tensor([h, w])
fields = ["labels", "area", "iscrowd"]
if "boxes" in target:
boxes = target["boxes"]
max_size = torch.as_tensor([w, h], dtype=torch.float32)
cropped_boxes = boxes - torch.as_tensor([j, i, j, i])
cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
cropped_boxes = cropped_boxes.clamp(min=0)
area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1)
target["boxes"] = cropped_boxes.reshape(-1, 4)
target["area"] = area
fields.append("boxes")
if "attributes" in target:
fields.append("attributes")
# remove elements for which the boxes have zero area
if "boxes" in target:
# favor boxes selection when defining which elements to keep
# this is compatible with previous implementation
cropped_boxes = target["boxes"].reshape(-1, 2, 2)
keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)
for field in fields:
target[field] = target[field][keep]
return cropped_image, target
def hflip(image: Tensor, target: dict):
flipped_image = F.hflip(image)
w, h = image.size
target = target.copy()
if "boxes" in target:
boxes = target["boxes"]
boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor(
[-1, 1, -1, 1]
) + torch.as_tensor([w, 0, w, 0])
target["boxes"] = boxes
return flipped_image, target
def get_size_with_aspect_ratio(
image_size: List[int], size: int, max_size: Optional[int] = None
):
w, h = image_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(round(max_size * min_original_size / max_original_size))
if (w <= h and w == size) or (h <= w and h == size):
return (h, w)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (oh, ow)
def get_size(
image_size: List[int], size: Union[int, List[int]], max_size: Optional[int] = None
):
if isinstance(size, (list, tuple)):
return size[::-1]
else:
return get_size_with_aspect_ratio(image_size, size, max_size)
def resize(
image: Tensor,
target: dict,
size: Union[int, List[int]],
max_size: Optional[int] = None,
):
# size can be min_size (scalar) or (w, h) tuple
size = get_size(image.size, size, max_size)
rescaled_image = F.resize(image, size)
if target is None:
return rescaled_image, None
ratios = tuple(
float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size)
)
ratio_width, ratio_height = ratios
target = target.copy()
if "boxes" in target:
boxes = target["boxes"]
scaled_boxes = boxes * torch.as_tensor(
[ratio_width, ratio_height, ratio_width, ratio_height]
)
target["boxes"] = scaled_boxes
if "area" in target:
area = target["area"]
scaled_area = area * (ratio_width * ratio_height)
target["area"] = scaled_area
h, w = size
target["size"] = torch.tensor([h, w])
return rescaled_image, target
def pad(image: Tensor, target: dict, padding: List[int]):
# assumes that we only pad on the bottom right corners
padded_image = F.pad(image, (0, 0, padding[0], padding[1]))
if target is None:
return padded_image, None
target = target.copy()
# should we do something wrt the original size?
target["size"] = torch.tensor(padded_image[::-1])
return padded_image, target
@registry.register_processor("detection_random_size_crop")
class RandomSizeCrop(BaseProcessor):
def __init__(self, min_size: int, max_size: int):
self.min_size = min_size
self.max_size = max_size
def __call__(self, img: Tensor, target: dict):
w = random.randint(self.min_size, min(img.width, self.max_size))
h = random.randint(self.min_size, min(img.height, self.max_size))
region = T.RandomCrop.get_params(img, [h, w])
return crop(img, target, region)
@registry.register_processor("detection_random_horizontal_flip")
class RandomHorizontalFlip(BaseProcessor):
def __init__(self, p=0.5):
self.p = p
def __call__(self, img: Tensor, target: dict):
if random.random() < self.p:
return hflip(img, target)
return img, target
@registry.register_processor("detection_random_resize")
class RandomResize(BaseProcessor):
def __init__(self, sizes, max_size=None):
assert isinstance(sizes, (list, tuple))
self.sizes = sizes
self.max_size = max_size
def __call__(self, img: Tensor, target: Optional[dict] = None):
size = random.choice(self.sizes)
return resize(img, target, size, self.max_size)
@registry.register_processor("detection_random_select")
class RandomSelect(BaseProcessor):
"""
Randomly selects between transforms1 and transforms2,
with probability p for transforms1 and (1 - p) for transforms2
"""
def __init__(self, transforms1, transforms2, p=0.5):
self.transforms1 = transforms1
self.transforms2 = transforms2
self.p = p
def __call__(self, img: Tensor, target: dict):
if random.random() < self.p:
return self.transforms1(img, target)
return self.transforms2(img, target)
@registry.register_processor("detection_to_tensor")
class ToTensor(BaseProcessor):
def __init__(self):
pass
def __call__(self, img: Tensor, target: dict):
return F.to_tensor(img), target
@registry.register_processor("detection_normalize")
class Normalize(BaseProcessor):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, image: Tensor, target: Optional[dict] = None):
image = F.normalize(image, mean=self.mean, std=self.std)
if target is None:
return image, None
target = target.copy()
h, w = image.shape[-2:]
if "boxes" in target:
boxes = target["boxes"]
boxes = box_xyxy_to_cxcywh(boxes)
boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32)
target["boxes"] = boxes
return image, target
@registry.register_processor("detection_compose")
class Compose(BaseProcessor):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image: Tensor, target: dict):
for t in self.transforms:
image, target = t(image, target)
return image, target
def __repr__(self):
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += f" {t}"
format_string += "\n)"
return format_string
| EXA-1-master | exa/models/mmf-main/mmf/datasets/processors/detection_transforms.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import collections
import math
import random
import warnings
import torch
from mmf.common.constants import INCEPTION_IMAGE_NORMALIZE
from mmf.common.registry import registry
from mmf.datasets.processors.processors import BaseProcessor
from omegaconf import OmegaConf
from torchvision import transforms
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
@registry.register_processor("torchvision_transforms")
class TorchvisionTransforms(BaseProcessor):
def __init__(self, config, *args, **kwargs):
transform_params = config.transforms
assert OmegaConf.is_dict(transform_params) or OmegaConf.is_list(
transform_params
)
if OmegaConf.is_dict(transform_params):
transform_params = [transform_params]
transforms_list = []
for param in transform_params:
if OmegaConf.is_dict(param):
# This will throw config error if missing
transform_type = param.type
transform_param = param.get("params", OmegaConf.create({}))
else:
assert isinstance(param, str), (
"Each transform should either be str or dict containing "
+ "type and params"
)
transform_type = param
transform_param = OmegaConf.create([])
transform = getattr(transforms, transform_type, None)
if transform is None:
from mmf.utils.env import setup_torchaudio
setup_torchaudio()
from torchaudio import transforms as torchaudio_transforms
transform = getattr(torchaudio_transforms, transform_type, None)
# If torchvision or torchaudiodoesn't contain this, check our registry
# if we implemented a custom transform as processor
if transform is None:
transform = registry.get_processor_class(transform_type)
assert transform is not None, (
f"transform {transform_type} is not present in torchvision, "
+ "torchaudio or processor registry"
)
# https://github.com/omry/omegaconf/issues/248
transform_param = OmegaConf.to_container(transform_param)
# If a dict, it will be passed as **kwargs, else a list is *args
if isinstance(transform_param, collections.abc.Mapping):
transform_object = transform(**transform_param)
else:
transform_object = transform(*transform_param)
transforms_list.append(transform_object)
self.transform = transforms.Compose(transforms_list)
def __call__(self, x):
# Support both dict and normal mode
if isinstance(x, collections.abc.Mapping):
x = x["image"]
return {"image": self.transform(x)}
else:
return self.transform(x)
@registry.register_processor("GrayScaleTo3Channels")
class GrayScaleTo3Channels(BaseProcessor):
def __init__(self, *args, **kwargs):
return
def __call__(self, x):
if isinstance(x, collections.abc.Mapping):
x = x["image"]
return {"image": self.transform(x)}
else:
return self.transform(x)
def transform(self, x):
assert isinstance(x, torch.Tensor)
# Handle grayscale, tile 3 times
if x.size(0) == 1:
x = torch.cat([x] * 3, dim=0)
return x
@registry.register_processor("ResizeShortest")
class ResizeShortest(BaseProcessor):
def __init__(self, *args, **kwargs):
min_size = kwargs["min_size"]
max_size = kwargs["max_size"]
if not isinstance(min_size, (list, tuple)):
min_size = (min_size,)
self.min_size = min_size
self.max_size = max_size
def get_size(self, image_size):
w, h = image_size
size = random.choice(self.min_size)
max_size = self.max_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(math.floor(max_size * min_original_size / max_original_size))
if (w <= h and w == size) or (h <= w and h == size):
return (h, w)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (oh, ow)
def __call__(self, image):
size = self.get_size(image.size)
image = transforms.functional.resize(image, size)
return image
@registry.register_processor("NormalizeBGR255")
class NormalizeBGR255(BaseProcessor):
def __init__(self, *args, **kwargs):
self.mean = kwargs["mean"]
self.std = kwargs["std"]
self.to_bgr255 = kwargs["to_bgr255"]
self.pad_size = kwargs["pad_size"]
if self.pad_size > 0:
warnings.warn(
f"You are setting pad_size > 0, tensor will be padded to a fix size of"
f"{self.pad_size}. "
f"The image_mask will cover the pad_size of {self.pad_size} instead of"
"the original size."
)
def __call__(self, image):
if self.to_bgr255:
image = image[[2, 1, 0]] * 255
image = transforms.functional.normalize(image, mean=self.mean, std=self.std)
if self.pad_size > 0:
assert (
self.pad_size >= image.shape[1] and self.pad_size >= image.shape[2]
), f"image size: {image.shape}"
padded_image = image.new_zeros(3, self.pad_size, self.pad_size)
padded_image[:, : image.shape[1], : image.shape[2]] = image.clone()
return padded_image
return image
@registry.register_processor("vilt_image_processor")
class VILTImageProcessor(BaseProcessor):
def __init__(self, config, *args, **kwargs):
image_size = config.get("size", [224, 224])
transforms_list = []
transforms_list.append(Resize(image_size))
transforms_list.append(ToTensor())
transforms_list.append(GrayScaleTo3Channels())
transforms_list.append(
Normalize(INCEPTION_IMAGE_NORMALIZE, INCEPTION_IMAGE_NORMALIZE)
)
self.transform = Compose(transforms_list)
def __call__(self, x):
# Support both dict and normal mode
if isinstance(x, collections.abc.Mapping):
x = x["image"]
return {"image": self.transform(x)}
else:
return self.transform(x)
| EXA-1-master | exa/models/mmf-main/mmf/datasets/processors/image_processors.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import importlib
import logging
import sys
from mmf.common.registry import registry
from packaging import version
logger = logging.getLogger(__name__)
ORIGINAL_PATCH_FUNCTIONS_KEY = "original_patch_functions"
registry.register(ORIGINAL_PATCH_FUNCTIONS_KEY, {})
def patch_transformers(log_incompatible=False):
"""
Patches transformers version > 4.x to work with code that
was written for version < 4.x. Specifically, before you
could do something like `from transformers.modeling_bert import x`
but this was moved to
`from transformers.models.bert.modeling_bert import x`
in newer versions. This functions fixes this discrepancy by adding
these modules back to path.
Another thing this function fixes is the conflict with local
datasets folder vs huggingface datasets library in loading
of transformers > 4.x version. To achieve this we modify sys.path
to look for local folder at the last in path resolver. This is
reverted back to original behavior at the end of the function.
"""
try:
import transformers3 as transformers
except ImportError:
import transformers
# pl uses importlib to find_transformers spec throwing if None
# this prevents mmf/__init__() from raising and value error
if transformers.__spec__ is None:
transformers.__spec__ = "MISSING"
if version.parse(transformers.__version__) < version.parse("4.0.0"):
return
if not hasattr(transformers, "models"):
return
logger.info(f"Patching transformers version: {transformers.__version__}")
sys.path = sys.path[1:] + [sys.path[0]]
for key in dir(transformers.models):
if key.startswith("__"):
continue
model_lib = importlib.import_module(f"transformers.models.{key}")
if not hasattr(model_lib, "_modules"):
if log_incompatible:
logger.info(
f"transformers' patching: model {key} has no "
+ "_modules attribute. Skipping."
)
continue
for module in model_lib._modules:
if not module or module == "." or module[0] == ".":
continue
sys.modules[f"transformers.{module}"] = importlib.import_module(
f"transformers.models.{key}.{module}"
)
sys.path = [sys.path[-1]] + sys.path[:-1]
def safecopy_modules(module_function_names, caller_modules):
"""
Saves a reference to each module.function in list of strings module_function_names.
References are made from dict caller_modules, from module name str to
caller module obj.
module.functions can be reassigned, replacing the current functions using
restore_saved_modules(caller_modules)
Example:
from transformers.modeling_bert import BertSelfAttention
caller_modules = {'BertSelfAttention': BertSelfAttention}
original_forward = BertSelfAttention.forward
safecopy_modules(['BertSelfAttention.forward'], caller_modules)
BertSelfAttention.forward = None
restore_saved_modules(caller_modules)
assert( original_forward is BertSelfAttention.forward )
"""
original_functions = registry.get(ORIGINAL_PATCH_FUNCTIONS_KEY)
for module_function_name in module_function_names:
module_name, function_name = module_function_name.split(".")
module = caller_modules[module_name]
function = getattr(module, function_name)
# store function is nothing is stored,
# prevents multiple calls from overwriting original function
original_functions[module_function_name] = original_functions.get(
module_function_name, function
)
def restore_saved_modules(caller_globals):
"""
Restore function for safecopy_modules()
Reassigns current dictionary of 'module.function': function
saved by safecopy_modules to callers modules.
Assumes caller_globals is a dict from module name str to caller module obj.
Example:
restore_saved_modules({'BertSelfAttention': BertSelfAttention})
"""
original_functions = registry.get(ORIGINAL_PATCH_FUNCTIONS_KEY)
for module_function_name, function in original_functions.items():
module_name, function_name = module_function_name.split(".")
if module_name in caller_globals:
setattr(caller_globals[module_name], function_name, function)
registry.register(ORIGINAL_PATCH_FUNCTIONS_KEY, {})
| EXA-1-master | exa/models/mmf-main/mmf/utils/patch.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import os
import warnings
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple, Union
import mmf
import pytorch_lightning as pl
import torch
from mmf.common.meter import Meter
from mmf.common.registry import registry
from mmf.datasets.iteration_strategies import (
ConstantIterationStrategy,
IterationStrategy,
SizeProportionalIterationStrategy,
)
from mmf.datasets.processors.processors import Processor
from mmf.utils.configuration import Configuration, get_global_config
from mmf.utils.distributed import is_dist_initialized, is_main, is_xla, synchronize
from mmf.utils.general import get_optimizer_parameters
from omegaconf import DictConfig, OmegaConf
from packaging import version
try:
import torch_xla.core.xla_model as xm # noqa
import torch_xla.distributed.parallel_loader as xla_pl # noqa
except ImportError:
xm = None
ProcessorDict = Dict[str, Processor]
logger = logging.getLogger(__name__)
def build_config(configuration: Configuration, *args, **kwargs) -> DictConfig:
"""Builder function for config. Freezes the configuration and registers
configuration object and config DictConfig object to registry.
Args:
configuration (Configuration): Configuration object that will be
used to create the config.
Returns:
(DictConfig): A config which is of type omegaconf.DictConfig
"""
configuration.freeze()
config = configuration.get_config()
registry.register("config", config)
registry.register("configuration", configuration)
return config
def build_trainer(config: DictConfig) -> Any:
"""Builder function for creating a trainer class. Trainer class name
is picked from the config.
Args:
config (DictConfig): Configuration that will be used to create
the trainer.
Returns:
(BaseTrainer): A trainer instance
"""
trainer_type = config.training.trainer
trainer_cls = registry.get_trainer_class(trainer_type)
trainer_obj = trainer_cls(config)
return trainer_obj
def build_lightning_model(
config: Union[DictConfig, "mmf.models.base_model.BaseModel.Config"],
checkpoint_path: str = None,
) -> "mmf.models.base_model.BaseModel":
from mmf.models.base_model import BaseModel
if not checkpoint_path:
model = build_model(config)
model.is_pl_enabled = True
return model
# If it is not an OmegaConf object, create the object
if not isinstance(config, DictConfig) and isinstance(config, BaseModel.Config):
config = OmegaConf.structured(config)
model_name = config.model
model_class = registry.get_model_class(model_name)
if model_class is None:
raise RuntimeError(f"No model registered for name: {model_name}")
""" model.build is called inside on_load_checkpoint as suggested here:
https://github.com/PyTorchLightning/pytorch-lightning/issues/5410
"""
if is_main():
model_class.load_requirements(model_class, config=config)
model = model_class.load_from_checkpoint(
checkpoint_path, config=config, strict=False
)
synchronize()
else:
synchronize()
model = model_class.load_from_checkpoint(
checkpoint_path, config=config, strict=False
)
model.init_losses()
model.is_pl_enabled = True
return model
def build_model(
config: Union[DictConfig, "mmf.models.base_model.BaseModel.Config"],
) -> "mmf.models.base_model.BaseModel":
from mmf.models.base_model import BaseModel
# If it is not an OmegaConf object, create the object
if not isinstance(config, DictConfig) and isinstance(config, BaseModel.Config):
config = OmegaConf.structured(config)
model_name = config.model
model_class = registry.get_model_class(model_name)
if model_class is None:
raise RuntimeError(f"No model registered for name: {model_name}")
model = model_class(config)
if hasattr(model, "build"):
"""Model build involves checkpoint loading
If the checkpoint is not available the underlying
methods try to download it.
Let master build the model (download the checkpoints) while
other ranks wait for the sync message
Once the master has downloaded the checkpoint and built the
model it sends the sync message, completing the synchronization
now other cores can proceed to build the model
using already downloaded checkpoint.
"""
if is_main():
model_class.load_requirements(model_class, config=config)
model.build()
synchronize()
else:
synchronize()
model.build()
model.init_losses()
return model
def build_dataset(
dataset_key: str, config=None, dataset_type="train"
) -> torch.utils.data.Dataset:
"""Builder function for creating a dataset. If dataset_key is passed
the dataset is created from default config of the dataset and thus is
disable config even if it is passed. Otherwise, we use MultiDatasetLoader to
build and return an instance of dataset based on the config
Args:
dataset_key (str): Key of dataset to build.
config (DictConfig, optional): Configuration that will be used to create
the dataset. If not passed, dataset's default config will be used.
Defaults to {}.
dataset_type (str, optional): Type of the dataset to build, train|val|test.
Defaults to "train".
Returns:
(torch.utils.data.Dataset): A dataset instance of type torch Dataset
"""
from mmf.datasets.base_dataset_builder import BaseDatasetBuilder
from mmf.utils.configuration import load_yaml_with_defaults
datamodule_instance = build_datamodule(dataset_key)
# If config is not provided, we take it from default one
if not config:
config_path = datamodule_instance.config_path()
if config_path is None:
# If config path wasn't defined, send an empty config path
# but don't force dataset to define a config
warnings.warn(
f"Config path not defined for {dataset_key}, "
+ "continuing with empty config"
)
config = OmegaConf.create()
else:
config = load_yaml_with_defaults(config_path)
config = OmegaConf.select(config, f"dataset_config.{dataset_key}")
if config is None:
config = OmegaConf.create()
OmegaConf.set_struct(config, True)
elif dataset_key in config:
# Handle Global config
config = config[dataset_key]
datamodule_instance.build_dataset(config)
dataset = datamodule_instance.load_dataset(config, dataset_type)
if hasattr(datamodule_instance, "update_registry_for_model"):
datamodule_instance.update_registry_for_model(config)
return dataset
# TODO: move dataset_type enum to typings
def build_datasets(
dataset_list: List[str], dataset_config: DictConfig, dataset_type="train"
) -> List[torch.utils.data.Dataset]:
datasets = []
for dataset in dataset_list:
if dataset in dataset_config:
dataset_config = dataset_config[dataset]
else:
warnings.warn(
f"Dataset {dataset} is missing from dataset_config"
+ " in config. Proceeding with empty config."
)
dataset_config = OmegaConf.create()
dataset_instance = build_dataset(dataset, dataset_config, dataset_type)
if dataset_instance is None:
continue
datasets.append(dataset_instance)
return datasets
def build_datamodule(dataset_key) -> pl.LightningDataModule:
dataset_builder = registry.get_builder_class(dataset_key)
assert dataset_builder, (
f"Key {dataset_key} doesn't have a registered " + "dataset builder"
)
builder_instance: pl.LightningDataModule = dataset_builder()
return builder_instance
def build_multiple_datamodules(
dataset_list: List[str], all_dataset_config: DictConfig
) -> Dict[str, pl.LightningDataModule]:
datamodules: Dict[str, pl.LightningDataModule] = {}
for dataset in dataset_list:
datamodule_instance = build_datamodule(dataset)
if dataset in all_dataset_config:
dataset_config = all_dataset_config[dataset]
else:
warnings.warn(
f"Dataset {dataset} is missing from dataset_config"
+ " in config. Proceeding with empty config."
)
dataset_config = OmegaConf.create()
if is_main():
datamodule_instance.prepare_data(dataset_config)
synchronize()
datamodule_instance.setup(config=dataset_config)
if hasattr(datamodule_instance, "update_registry_for_model"):
datamodule_instance.update_registry_for_model(dataset_config)
datamodules[dataset] = datamodule_instance
return datamodules
def build_dataloader_and_sampler(
dataset_instance: torch.utils.data.Dataset, datamodule_config: DictConfig
) -> Tuple[torch.utils.data.DataLoader, Optional[torch.utils.data.Sampler]]:
"""Builds and returns a dataloader along with its sample
Args:
dataset_instance (torch.utils.data.Dataset): Instance of dataset for which
dataloader has to be created
datamodule_config (omegaconf.DictConfig): Datamodule configuration; required
for infering params for dataloader
Returns:
Tuple[torch.utils.data.DataLoader, Optional[torch.utils.data.Sampler]]:
Tuple of Dataloader and Sampler instance
"""
from mmf.common.batch_collator import BatchCollator
training_config = get_global_config("training")
# Support params coming in from dataloader params
other_args = {
"num_workers": datamodule_config.get(
"num_workers", training_config.get("num_workers", 4)
),
"pin_memory": datamodule_config.get(
"pin_memory", training_config.get("pin_memory", False)
),
"shuffle": datamodule_config.get("shuffle", None),
"batch_size": datamodule_config.get("batch_size", None),
}
if version.parse(torch.__version__) >= version.parse("1.8"):
# only use persistent workers in PyTorch 1.8 or higher
# (PyTorch 1.7 also has this option but doesn't support it correctly due to
# https://github.com/pytorch/pytorch/issues/48370)
other_args["persistent_workers"] = (
datamodule_config.get(
"persistent_workers", training_config.get("persistent_workers", True)
),
)
if other_args["persistent_workers"] and other_args["num_workers"] == 0:
logger.warning(
"persistent_workers cannot be used together with num_workers == 0; "
"setting persistent_workers to False"
)
other_args["persistent_workers"] = False
# IterableDataset returns batches directly, so no need to add Sampler
# or batch size as user is expected to control those. This is a fine
# assumption for now to not support single item based IterableDataset
# as it will add unnecessary complexity and config parameters
# to the codebase
if not isinstance(dataset_instance, torch.utils.data.IterableDataset):
other_args = _add_extra_args_for_dataloader(dataset_instance, other_args)
else:
other_args.pop("shuffle")
# Set drop_last=True when using XLA to have constant batch size.
# In this case we also need to set drop_last=True in DistributedSampler.
loader = torch.utils.data.DataLoader(
dataset=dataset_instance,
collate_fn=BatchCollator(
dataset_instance.dataset_name, dataset_instance.dataset_type
),
drop_last=is_xla(), # see also MultiDatasetLoader.__len__
**other_args,
)
if is_xla():
device = xm.xla_device()
loader = xla_pl.MpDeviceLoader(loader, device)
if other_args["num_workers"] >= 0:
# Suppress leaking semaphore warning
os.environ["PYTHONWARNINGS"] = "ignore:semaphore_tracker:UserWarning"
loader.dataset_type = dataset_instance.dataset_type
return loader, other_args.get("sampler", None)
def build_test_reporter(
datamodules: List[pl.LightningDataModule],
config: DictConfig = None,
dataset_type: str = "train",
):
test_reporter_key = "default"
if config:
test_reporter_key = config.get("type", "default")
test_reporter_class = registry.get_test_rerporter_class(test_reporter_key)
assert (
test_reporter_class
), f"Key {test_reporter_key} doesn't have a registered test_reporter class"
if not config:
warnings.warn(
f"Config not provided for {test_reporter_key}, test_reporter"
+ "continuing with empty config"
)
params_config = OmegaConf.create()
else:
params_config = config.params
return test_reporter_class(datamodules, params_config, dataset_type)
def _add_extra_args_for_dataloader(
dataset_instance: torch.utils.data.Dataset, other_args: Dict[str, Any] = None
) -> Dict[str, Any]:
from mmf.utils.general import get_batch_size
dataset_type = dataset_instance.dataset_type
if other_args["shuffle"] is None:
other_args["shuffle"] = False
if dataset_type != "test":
other_args["shuffle"] = True
# In distributed mode, we use DistributedSampler from PyTorch
if is_dist_initialized():
other_args["sampler"] = torch.utils.data.DistributedSampler(
dataset_instance, shuffle=other_args["shuffle"]
)
# Shuffle is mutually exclusive with sampler, let DistributedSampler
# take care of shuffle and pop from main args
other_args.pop("shuffle")
if is_xla():
other_args["sampler"] = torch.utils.data.DistributedSampler(
dataset_instance,
num_replicas=xm.xrt_world_size(),
rank=xm.get_ordinal(),
shuffle=other_args["shuffle"],
drop_last=True,
)
other_args.pop("shuffle")
if other_args["batch_size"] is None:
other_args["batch_size"] = get_batch_size()
return other_args
def build_optimizer(model, config):
optimizer_config = config.optimizer
if "type" not in optimizer_config:
raise ValueError(
"Optimizer attributes must have a 'type' key "
"specifying the type of optimizer. "
"(Custom or PyTorch, e.g. 'adam_w' or 'SGD')"
)
optimizer_type = optimizer_config.type
if "params" not in optimizer_config:
warnings.warn("optimizer attributes has no params defined, defaulting to {}.")
params = optimizer_config.get("params", {})
if hasattr(torch.optim, optimizer_type):
optimizer_class = getattr(torch.optim, optimizer_type)
else:
optimizer_class = registry.get_optimizer_class(optimizer_type)
if optimizer_class is None:
raise ValueError(
"No optimizer class of type {} present in "
"either torch or registered to registry"
)
parameters = get_optimizer_parameters(model, config)
if optimizer_config.get("enable_state_sharding", False):
# TODO(vedanuj): Remove once OSS is moved to PT upstream
try:
from fairscale.optim.oss import OSS
except ImportError:
print(
"Optimizer state sharding requires fairscale. "
+ "Install using pip install fairscale."
)
raise
assert (
is_dist_initialized()
), "Optimizer state sharding can only be used in distributed mode."
is_fp16 = config.get("training", {}).get("fp16", False)
optimizer = OSS(
params=parameters, optim=optimizer_class, broadcast_fp16=is_fp16, **params
)
else:
optimizer = optimizer_class(parameters, **params)
return optimizer
def build_lightning_optimizers(model, config):
optimizer = build_optimizer(model, config)
if config.training.lr_scheduler:
lr_scheduler = build_scheduler(optimizer, config)
return {
"optimizer": optimizer,
"lr_scheduler": {"scheduler": lr_scheduler, "interval": "step"},
}
else:
return optimizer
def build_scheduler(optimizer, config):
scheduler_config = config.get("scheduler", {})
if "type" not in scheduler_config:
warnings.warn(
"No type for scheduler specified even though lr_scheduler is True, "
"setting default to 'Pythia'"
)
scheduler_type = scheduler_config.get("type", "pythia")
if "params" not in scheduler_config:
warnings.warn("scheduler attributes has no params defined, defaulting to {}.")
params = scheduler_config.get("params", {})
scheduler_class = registry.get_scheduler_class(scheduler_type)
scheduler = scheduler_class(optimizer, **params)
return scheduler
def build_classifier_layer(config, *args, **kwargs):
from mmf.modules.layers import ClassifierLayer
classifier = ClassifierLayer(config.type, *args, **config.params, **kwargs)
return classifier.module
def build_text_encoder(config, *args, **kwargs):
"""Deprecated, please do not use"""
try:
from mmf.modules.fb.encoders import TextEncoderFactory
except ImportError:
from mmf.modules.encoders import TextEncoderFactory
text_encoder = TextEncoderFactory(config, *args, **kwargs)
return text_encoder.module
def build_image_encoder(config, direct_features=False, **kwargs):
"""Deprecated, please do not use"""
from mmf.modules.encoders import ImageEncoderFactory, ImageFeatureEncoderFactory
if direct_features:
module = ImageFeatureEncoderFactory(config)
else:
module = ImageEncoderFactory(config)
return module.module
def build_encoder(config: Union[DictConfig, "mmf.modules.encoders.Encoder.Config"]):
from mmf.modules.encoders import Encoder
# If it is not an OmegaConf object, create the object
if not isinstance(config, DictConfig) and isinstance(config, Encoder.Config):
config = OmegaConf.structured(config)
if "type" in config:
# Support config initialization in form of
# encoder:
# type: identity # noqa
# params:
# in_dim: 256
name = config.type
if isinstance(name, Enum):
name = name.value
params = config.get("params", None)
else:
# Structured Config support
name = config.name
params = config
encoder_cls = registry.get_encoder_class(name)
# If params were not passed, try generating them from encoder
# class's default config
if params is None:
params = OmegaConf.structured(getattr(encoder_cls, "Config", {}))
return encoder_cls(params)
def build_processors(
processors_config: DictConfig, registry_key: str = None, *args, **kwargs
) -> ProcessorDict:
"""Given a processor config, builds the processors present and returns back
a dict containing processors mapped to keys as per the config
Args:
processors_config (omegaconf.DictConfig): OmegaConf DictConfig describing
the parameters and type of each processor passed here
registry_key (str, optional): If passed, function would look into registry for
this particular key and return it back. .format with processor_key will
be called on this string. Defaults to None.
Returns:
ProcessorDict: Dictionary containing key to
processor mapping
"""
from mmf.datasets.processors.processors import Processor
processor_dict = {}
for processor_key, processor_params in processors_config.items():
if not processor_params:
continue
processor_instance = None
if registry_key is not None:
full_key = registry_key.format(processor_key)
processor_instance = registry.get(full_key, no_warning=True)
if processor_instance is None:
processor_instance = Processor(processor_params, *args, **kwargs)
# We don't register back here as in case of hub interface, we
# want the processors to be instantiate every time. BaseDataset
# can register at its own end
processor_dict[processor_key] = processor_instance
return processor_dict
def build_iteration_strategy(
config: DictConfig,
dataloaders: Dict[str, torch.utils.data.DataLoader],
*args,
**kwargs,
) -> IterationStrategy:
if not config.get("enabled", True):
return ConstantIterationStrategy.from_params(dataloaders, *args, **kwargs)
else:
assert (
"type" in config
), "multitasking config must define 'type' attribute if enabled"
# This assumes all dataloaders will have same dataset type
iteration_strategy_class = registry.get_iteration_strategy_class(config.type)
config = config.get("params", {})
# val and test splits won't be affected as test reporter iterates
# over the datasets one by one without using any iteration strategy
return iteration_strategy_class(config, dataloaders, *args, **kwargs)
def build_meters(run_type: str) -> List[Meter]:
train_meter, val_meter, test_meter = None, None, None
if "train" in run_type:
train_meter = Meter()
# val_meter used for validation after training loop
val_meter = Meter()
elif "val" in run_type or "inference" in run_type:
val_meter = Meter()
if "test" in run_type:
test_meter = Meter()
return train_meter, val_meter, test_meter
| EXA-1-master | exa/models/mmf-main/mmf/utils/build.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import collections
import json
import logging
import os
import warnings
from ast import literal_eval
from typing import List
import torch
from mmf.common.registry import registry
from mmf.utils.env import import_user_module
from mmf.utils.file_io import PathManager
from mmf.utils.general import get_absolute_path, get_mmf_root
from omegaconf import DictConfig, errors as OCErrors, OmegaConf
logger = logging.getLogger(__name__)
def load_yaml(f):
# Convert to absolute path for loading includes
abs_f = get_absolute_path(f)
try:
mapping = OmegaConf.load(PathManager.get_local_path(abs_f))
f = abs_f
except FileNotFoundError as e:
# Check if this file might be relative to root?
# TODO: Later test if this can be removed
relative = os.path.abspath(os.path.join(get_mmf_root(), f))
if not PathManager.isfile(relative):
raise e
else:
f = relative
mapping = OmegaConf.load(PathManager.get_local_path(f))
if mapping is None:
mapping = OmegaConf.create()
includes = mapping.get("includes", [])
if not isinstance(includes, collections.abc.Sequence):
raise AttributeError(
"Includes must be a list, {} provided".format(type(includes))
)
include_mapping = OmegaConf.create()
mmf_root_dir = get_mmf_root()
for include in includes:
original_include_path = include
include = os.path.join(mmf_root_dir, include)
# If path doesn't exist relative to MMF root, try relative to current file
if not PathManager.exists(include):
include = os.path.join(os.path.dirname(f), original_include_path)
current_include_mapping = load_yaml(include)
include_mapping = OmegaConf.merge(include_mapping, current_include_mapping)
mapping.pop("includes", None)
mapping = OmegaConf.merge(include_mapping, mapping)
return mapping
def get_default_config_path():
directory = os.path.dirname(os.path.abspath(__file__))
configs_dir = os.path.join(directory, "..", "configs")
# Check for fb defaults
fb_defaults = os.path.join(configs_dir, "fb_defaults.yaml")
if PathManager.exists(fb_defaults):
return fb_defaults
else:
return os.path.join(configs_dir, "defaults.yaml")
def load_yaml_with_defaults(f):
default_config = get_default_config_path()
return OmegaConf.merge(load_yaml(default_config), load_yaml(f))
def get_zoo_config(
key, variation="defaults", zoo_config_path=None, zoo_type="datasets"
):
version = None
resources = None
if zoo_config_path is None:
zoo_config_path = os.path.join("configs", "zoo", f"{zoo_type}.yaml")
zoo = load_yaml(zoo_config_path)
# Set struct on zoo so that unidentified access is not allowed
OmegaConf.set_struct(zoo, True)
try:
item = OmegaConf.select(zoo, key)
except Exception:
# Key wasn't present or something else happened, return None, None
return version, resources
if not item:
return version, resources
if variation not in item:
# If variation is not present, then key value should
# be directly returned if "defaults" was selected as the variation
assert (
variation == "defaults"
), f"'{variation}' variation not present in zoo config"
return _get_version_and_resources(item)
elif "resources" in item:
# Case where full key is directly passed
return _get_version_and_resources(item)
else:
return _get_version_and_resources(item[variation])
def _get_version_and_resources(item):
assert "version" in item, "'version' key should be present in zoo config {}".format(
item._get_full_key("")
)
assert (
"resources" in item
), "'resources' key should be present in zoo config {}".format(
item._get_full_key("")
)
return item.version, item.resources
def get_global_config(key=None):
config = registry.get("config")
if config is None:
configuration = Configuration()
config = configuration.get_config()
registry.register("config", config)
if key:
config = OmegaConf.select(config, key)
return config
def get_mmf_cache_dir():
config = get_global_config()
cache_dir = config.env.cache_dir
# If cache_dir path exists do not join to mmf root
if not os.path.exists(cache_dir):
cache_dir = os.path.join(get_mmf_root(), cache_dir)
return cache_dir
def get_mmf_env(key=None):
config = get_global_config()
if key:
return OmegaConf.select(config.env, key)
else:
return config.env
def _merge_with_dotlist(
config: DictConfig,
opts: List[str],
skip_missing: bool = False,
log_info: bool = True,
):
# TODO: To remove technical debt, a possible solution is to use
# struct mode to update with dotlist OmegaConf node. Look into this
# in next iteration
# TODO: Simplify this function
if opts is None:
opts = []
if len(opts) == 0:
return config
# Support equal e.g. model=visual_bert for better future hydra support
has_equal = opts[0].find("=") != -1
if has_equal:
opt_values = [opt.split("=", maxsplit=1) for opt in opts]
if not all(len(opt) == 2 for opt in opt_values):
for opt in opt_values:
assert len(opt) == 2, f"{opt} has no value"
else:
assert len(opts) % 2 == 0, "Number of opts should be multiple of 2"
opt_values = zip(opts[0::2], opts[1::2])
for opt, value in opt_values:
if opt == "dataset":
opt = "datasets"
splits = opt.split(".")
current = config
for idx, field in enumerate(splits):
array_index = -1
if field.find("[") != -1 and field.find("]") != -1:
stripped_field = field[: field.find("[")]
array_index = int(field[field.find("[") + 1 : field.find("]")])
else:
stripped_field = field
if stripped_field not in current:
if skip_missing is True:
break
raise AttributeError(
"While updating configuration"
" option {} is missing from"
" configuration at field {}".format(opt, stripped_field)
)
if isinstance(current[stripped_field], collections.abc.Mapping):
current = current[stripped_field]
elif (
isinstance(current[stripped_field], collections.abc.Sequence)
and array_index != -1
):
try:
current_value = current[stripped_field][array_index]
except OCErrors.ConfigIndexError:
if skip_missing:
break
raise
# Case where array element to be updated is last element
if (
not isinstance(
current_value,
(collections.abc.Mapping, collections.abc.Sequence),
)
or idx == len(splits) - 1
):
if log_info:
logger.info(f"Overriding option {opt} to {value}")
current[stripped_field][array_index] = _decode_value(value)
else:
# Otherwise move on down the chain
current = current_value
else:
if idx == len(splits) - 1:
if log_info:
logger.info(f"Overriding option {opt} to {value}")
current[stripped_field] = _decode_value(value)
else:
if skip_missing:
break
raise AttributeError(
"While updating configuration",
"option {} is not present "
"after field {}".format(opt, stripped_field),
)
return config
def _decode_value(value):
# https://github.com/rbgirshick/yacs/blob/master/yacs/config.py#L400
if not isinstance(value, str):
return value
if value == "None":
value = None
try:
value = literal_eval(value)
except ValueError:
pass
except SyntaxError:
pass
return value
def resolve_cache_dir(env_variable="MMF_CACHE_DIR", default="mmf"):
# Some of this follow what "transformers" does for there cache resolving
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv(
"TORCH_HOME",
os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"),
)
)
default_cache_path = os.path.join(torch_cache_home, default)
cache_path = os.getenv(env_variable, default_cache_path)
if not PathManager.exists(cache_path):
try:
PathManager.mkdirs(cache_path)
except PermissionError:
cache_path = os.path.join(get_mmf_root(), ".mmf_cache")
PathManager.mkdirs(cache_path)
return cache_path
def resolve_dir(env_variable, default="data"):
default_dir = os.path.join(resolve_cache_dir(), default)
dir_path = os.getenv(env_variable, default_dir)
if not PathManager.exists(dir_path):
PathManager.mkdirs(dir_path)
return dir_path
class Configuration:
def __init__(self, args=None, default_only=False):
self.config = {}
if not args:
import argparse
args = argparse.Namespace(opts=[])
default_only = True
self.args = args
self._register_resolvers()
self._default_config = self._build_default_config()
# Initially, silently add opts so that some of the overrides for the defaults
# from command line required for setup can be honored
self._default_config = _merge_with_dotlist(
self._default_config, args.opts, skip_missing=True, log_info=False
)
# Register the config and configuration for setup
registry.register("config", self._default_config)
registry.register("configuration", self)
if default_only:
other_configs = {}
else:
other_configs = self._build_other_configs()
self.config = OmegaConf.merge(self._default_config, other_configs)
self.config = _merge_with_dotlist(self.config, args.opts)
self._update_specific(self.config)
self.upgrade(self.config)
# Resolve the config here itself after full creation so that spawned workers
# don't face any issues
self.config = OmegaConf.create(
OmegaConf.to_container(self.config, resolve=True)
)
# Update the registry with final config
registry.register("config", self.config)
def _build_default_config(self):
self.default_config_path = get_default_config_path()
default_config = load_yaml(self.default_config_path)
return default_config
def _build_other_configs(self):
opts_config = self._build_opt_list(self.args.opts)
user_config = self._build_user_config(opts_config)
self._opts_config = opts_config
self._user_config = user_config
self.import_user_dir()
model_config = self._build_model_config(opts_config)
dataset_config = self._build_dataset_config(opts_config)
args_overrides = self._build_demjson_config(self.args.config_override)
other_configs = OmegaConf.merge(
model_config, dataset_config, user_config, args_overrides
)
return other_configs
def _build_opt_list(self, opts):
opts_dot_list = self._convert_to_dot_list(opts)
return OmegaConf.from_dotlist(opts_dot_list)
def _build_user_config(self, opts):
user_config = {}
# Update user_config with opts if passed
self.config_path = opts.config
if self.config_path is not None:
user_config = load_yaml(self.config_path)
return user_config
def import_user_dir(self):
# Try user_dir options in order of MMF configuration hierarchy
# First try the default one, which can be set via environment as well
user_dir = self._default_config.env.user_dir
# Now, check user's config
user_config_user_dir = self._user_config.get("env", {}).get("user_dir", None)
if user_config_user_dir:
user_dir = user_config_user_dir
# Finally, check opts
opts_user_dir = self._opts_config.get("env", {}).get("user_dir", None)
if opts_user_dir:
user_dir = opts_user_dir
if user_dir:
import_user_module(user_dir)
def _build_model_config(self, config):
model = config.model
if model is None:
raise KeyError("Required argument 'model' not passed")
model_cls = registry.get_model_class(model)
if model_cls is None:
warning = f"No model named '{model}' has been registered"
warnings.warn(warning)
return OmegaConf.create()
default_model_config_path = model_cls.config_path()
if default_model_config_path is None:
warning = "Model {}'s class has no default configuration provided".format(
model
)
warnings.warn(warning)
return OmegaConf.create()
return load_yaml(default_model_config_path)
def _build_dataset_config(self, config):
dataset = config.get("dataset", None)
datasets = config.get("datasets", None)
if dataset is None and datasets is None:
raise KeyError("Required argument 'dataset|datasets' not passed")
if datasets is None:
config.datasets = dataset
datasets = dataset.split(",")
else:
datasets = datasets.split(",")
dataset_config = OmegaConf.create()
for dataset in datasets:
builder_cls = registry.get_builder_class(dataset)
if builder_cls is None:
warning = f"No dataset named '{dataset}' has been registered"
warnings.warn(warning)
continue
default_dataset_config_path = builder_cls.config_path()
if default_dataset_config_path is None:
warning = (
f"Dataset {dataset}'s builder class has no default configuration "
+ "provided"
)
warnings.warn(warning)
continue
dataset_config = OmegaConf.merge(
dataset_config, load_yaml(default_dataset_config_path)
)
return dataset_config
def get_config(self):
self._register_resolvers()
return self.config
def _build_demjson_config(self, demjson_string):
if demjson_string is None:
return OmegaConf.create()
try:
import demjson
except ImportError:
logger.warning("demjson is required to use config_override")
raise
demjson_dict = demjson.decode(demjson_string)
return OmegaConf.create(demjson_dict)
def _get_args_config(self, args):
args_dict = vars(args)
return OmegaConf.create(args_dict)
def _register_resolvers(self):
OmegaConf.clear_resolvers()
# Device count resolver
device_count = max(1, torch.cuda.device_count())
OmegaConf.register_new_resolver("device_count", lambda: device_count)
OmegaConf.register_new_resolver("resolve_cache_dir", resolve_cache_dir)
OmegaConf.register_new_resolver("resolve_dir", resolve_dir)
def freeze(self):
OmegaConf.set_struct(self.config, True)
def defrost(self):
OmegaConf.set_struct(self.config, False)
def _convert_to_dot_list(self, opts):
if opts is None:
opts = []
if len(opts) == 0:
return opts
# Support equal e.g. model=visual_bert for better future hydra support
has_equal = opts[0].find("=") != -1
if has_equal:
return opts
return [(opt + "=" + value) for opt, value in zip(opts[0::2], opts[1::2])]
def pretty_print(self):
if not self.config.training.log_detailed_config:
return
logger.info("===== Training Parameters =====")
logger.info(self._convert_node_to_json(self.config.training))
logger.info("====== Dataset Attributes ======")
datasets = self.config.datasets.split(",")
for dataset in datasets:
if dataset in self.config.dataset_config:
logger.info(f"======== {dataset} =======")
dataset_config = self.config.dataset_config[dataset]
logger.info(self._convert_node_to_json(dataset_config))
else:
logger.warning(f"No dataset named '{dataset}' in config. Skipping")
logger.info("====== Optimizer Attributes ======")
logger.info(self._convert_node_to_json(self.config.optimizer))
if self.config.model not in self.config.model_config:
raise ValueError(f"{self.config.model} not present in model attributes")
logger.info(f"====== Model ({self.config.model}) Attributes ======")
logger.info(
self._convert_node_to_json(self.config.model_config[self.config.model])
)
def _convert_node_to_json(self, node):
container = OmegaConf.to_container(node, resolve=True)
return json.dumps(container, indent=4, sort_keys=True)
def _update_specific(self, config):
# tp = self.config.training
# if args["seed"] is not None or tp['seed'] is not None:
# print(
# "You have chosen to seed the training. This will turn on CUDNN "
# "deterministic setting which can slow down your training "
# "considerably! You may see unexpected behavior when restarting "
# "from checkpoints."
# )
# if args["seed"] == -1:
# self.config["training"]["seed"] = random.randint(1, 1000000)
if (
"learning_rate" in config
and "optimizer" in config
and "params" in config.optimizer
):
lr = config.learning_rate
config.optimizer.params.lr = lr
# TODO: Correct the following issue
# This check is triggered before the config override from
# commandline is effective even after setting
# training.device = 'xla', it gets triggered.
if not torch.cuda.is_available() and "cuda" in config.training.device:
warnings.warn(
"Device specified is 'cuda' but cuda is not present. "
+ "Switching to CPU version."
)
config.training.device = "cpu"
return config
def upgrade(self, config):
mapping = {
"training.resume_file": "checkpoint.resume_file",
"training.resume": "checkpoint.resume",
"training.resume_best": "checkpoint.resume_best",
"training.load_pretrained": "checkpoint.resume_pretrained",
"training.pretrained_state_mapping": "checkpoint.pretrained_state_mapping",
"training.run_type": "run_type",
}
for old, new in mapping.items():
value = OmegaConf.select(config, old)
if value:
OmegaConf.update(config, new, value)
# This is still here due to legacy reasons around
# older checkpoint loading from v0.3
class ConfigNode(collections.OrderedDict):
pass
| EXA-1-master | exa/models/mmf-main/mmf/utils/configuration.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import os
from collections import defaultdict
import numpy as np
import torch
from mmf.utils.configuration import get_mmf_cache_dir
from mmf.utils.distributed import is_main, synchronize
from mmf.utils.file_io import PathManager
from mmf.utils.general import get_absolute_path
from torchtext import vocab
EMBEDDING_NAME_CLASS_MAPPING = {"glove": "GloVe", "fasttext": "FastText"}
logger = logging.getLogger(__name__)
class Vocab:
def __init__(self, *args, **params):
vocab_type = params.get("type", "pretrained")
# Stores final parameters extracted from vocab_params
if vocab_type == "random":
if params["vocab_file"] is None:
raise ValueError("No vocab path passed for vocab")
self.vocab = BaseVocab(*args, **params)
elif vocab_type == "custom":
if params["vocab_file"] is None or params["embedding_file"] is None:
raise ValueError("No vocab path or embedding_file passed for vocab")
self.vocab = CustomVocab(*args, **params)
elif vocab_type == "pretrained":
self.vocab = PretrainedVocab(*args, **params)
elif vocab_type == "intersected":
if params["vocab_file"] is None or params["embedding_name"] is None:
raise ValueError("No vocab path or embedding_name passed for vocab")
self.vocab = IntersectedVocab(*args, **params)
elif vocab_type == "extracted":
if params["base_path"] is None or params["embedding_dim"] is None:
raise ValueError("No base_path or embedding_dim passed for vocab")
self.vocab = ExtractedVocab(*args, **params)
elif vocab_type == "model":
if params["name"] is None or params["model_file"] is None:
raise ValueError("No name or model_file passed for vocab")
if params["name"] == "fasttext":
self.vocab = ModelVocab(*args, **params)
else:
raise ValueError("Unknown vocab type: %s" % vocab_type)
self._dir_representation = dir(self)
def __call__(self, *args, **kwargs):
return self.vocab(*args, **kwargs)
def __getattr__(self, name):
if "_dir_representation" in self.__dict__ and name in self._dir_representation:
return getattr(self, name)
elif "vocab" in self.__dict__ and hasattr(self.vocab, name):
return getattr(self.vocab, name)
else:
type_vocab = "Vocab"
if "vocab" in self.__dict__:
type_vocab = type(self.vocab)
raise AttributeError(f"{type_vocab} vocab type has no attribute {name}.")
class BaseVocab:
PAD_TOKEN = "<pad>"
SOS_TOKEN = "<s>"
EOS_TOKEN = "</s>"
UNK_TOKEN = "<unk>"
PAD_INDEX = 0
SOS_INDEX = 1
EOS_INDEX = 2
UNK_INDEX = 3
def __init__(
self, vocab_file=None, embedding_dim=300, data_dir=None, *args, **kwargs
):
"""Vocab class to be used when you want to train word embeddings from
scratch based on a custom vocab. This will initialize the random
vectors for the vocabulary you pass. Get the vectors using
`get_vectors` function. This will also create random embeddings for
some predefined words like PAD - <pad>, SOS - <s>, EOS - </s>,
UNK - <unk>.
Parameters
----------
vocab_file : str
Path of the vocabulary file containing one word per line
embedding_dim : int
Size of the embedding
"""
self.type = "base"
self.word_dict = {}
self.itos = {}
self.itos[self.PAD_INDEX] = self.PAD_TOKEN
self.itos[self.SOS_INDEX] = self.SOS_TOKEN
self.itos[self.EOS_INDEX] = self.EOS_TOKEN
self.itos[self.UNK_INDEX] = self.UNK_TOKEN
self.word_dict[self.SOS_TOKEN] = self.SOS_INDEX
self.word_dict[self.EOS_TOKEN] = self.EOS_INDEX
self.word_dict[self.PAD_TOKEN] = self.PAD_INDEX
self.word_dict[self.UNK_TOKEN] = self.UNK_INDEX
index = len(self.itos.keys())
self.total_predefined = len(self.itos.keys())
if vocab_file is not None:
if not os.path.isabs(vocab_file) and data_dir is not None:
vocab_file = os.path.join(data_dir, vocab_file)
vocab_file = get_absolute_path(vocab_file)
if not PathManager.exists(vocab_file):
raise RuntimeError("Vocab not found at " + vocab_file)
with PathManager.open(vocab_file, "r") as f:
for line in f:
self.itos[index] = line.strip()
self.word_dict[line.strip()] = index
index += 1
self.word_dict[self.SOS_TOKEN] = self.SOS_INDEX
self.word_dict[self.EOS_TOKEN] = self.EOS_INDEX
self.word_dict[self.PAD_TOKEN] = self.PAD_INDEX
self.word_dict[self.UNK_TOKEN] = self.UNK_INDEX
# Return unk index by default
self.stoi = defaultdict(self.get_unk_index)
self.stoi.update(self.word_dict)
self.vectors = torch.FloatTensor(self.get_size(), embedding_dim)
def get_itos(self):
return self.itos
def get_stoi(self):
return self.stoi
def get_size(self):
return len(self.itos)
def get_pad_index(self):
return self.PAD_INDEX
def get_pad_token(self):
return self.PAD_TOKEN
def get_start_index(self):
return self.SOS_INDEX
def get_start_token(self):
return self.SOS_TOKEN
def get_end_index(self):
return self.EOS_INDEX
def get_end_token(self):
return self.EOS_TOKEN
def get_unk_index(self):
return self.UNK_INDEX
def get_unk_token(self):
return self.UNK_TOKEN
def get_vectors(self):
return getattr(self, "vectors", None)
def get_embedding(self, cls, **embedding_kwargs):
vector_dim = len(self.vectors[0])
embedding_kwargs["vocab_size"] = self.get_size()
embedding_dim = embedding_kwargs["embedding_dim"]
embedding_kwargs["embedding_dim"] = vector_dim
embedding = None
if cls == torch.nn.Embedding:
embedding = torch.nn.Embedding(self.get_size(), vector_dim)
else:
embedding = cls(**embedding_kwargs)
if hasattr(embedding, "embedding"):
embedding.embedding = torch.nn.Embedding.from_pretrained(
self.vectors, freeze=False
)
else:
embedding = torch.nn.Embedding.from_pretrained(self.vectors, freeze=False)
if vector_dim == embedding_dim:
return embedding
else:
return torch.nn.Sequential(
[embedding, torch.nn.Linear(vector_dim, embedding_dim)]
)
class CustomVocab(BaseVocab):
def __init__(self, vocab_file, embedding_file, data_dir=None, *args, **kwargs):
"""Use this vocab class when you have a custom vocab as well as a
custom embeddings file.
This will inherit vocab class, so you will get predefined tokens with
this one.
IMPORTANT: To init your embedding, get your vectors from this class's
object by calling `get_vectors` function
Parameters
----------
vocab_file : str
Path of custom vocabulary
embedding_file : str
Path to custom embedding inititalization file
data_dir : str
Path to data directory if embedding file is not an absolute path.
Default: None
"""
super().__init__(vocab_file)
self.type = "custom"
if not os.path.isabs(embedding_file) and data_dir is not None:
embedding_file = os.path.join(data_dir, embedding_file)
embedding_file = get_absolute_path(embedding_file)
if not PathManager.exists(embedding_file):
raise RuntimeError(f"Embedding file path {embedding_file} doesn't exist")
embedding_vectors = torch.from_numpy(np.load(embedding_file))
self.vectors = torch.FloatTensor(self.get_size(), len(embedding_vectors[0]))
for i in range(0, 4):
self.vectors[i] = torch.ones_like(self.vectors[i]) * 0.1 * i
for i in range(4, self.get_size()):
self.vectors[i] = embedding_vectors[i - 4]
class IntersectedVocab(BaseVocab):
def __init__(self, vocab_file, embedding_name, *args, **kwargs):
"""Use this vocab class when you have a custom vocabulary class but you
want to use pretrained embedding vectos for it. This will only load
the vectors which intersect with your vocabulary. Use the
embedding_name specified in torchtext's pretrained aliases:
['charngram.100d', 'fasttext.en.300d', 'fasttext.simple.300d',
'glove.42B.300d', 'glove.840B.300d', 'glove.twitter.27B.25d',
'glove.twitter.27B.50d', 'glove.twitter.27B.100d',
'glove.twitter.27B.200d', 'glove.6B.50d', 'glove.6B.100d',
'glove.6B.200d', 'glove.6B.300d']
Parameters
----------
vocab_file : str
Vocabulary file containing list of words with one word per line
which will be used to collect vectors
embedding_name : str
Embedding name picked up from the list of the pretrained aliases
mentioned above
"""
super().__init__(vocab_file, *args, **kwargs)
self.type = "intersected"
name = embedding_name.split(".")[0]
dim = embedding_name.split(".")[2][:-1]
middle = embedding_name.split(".")[1]
class_name = EMBEDDING_NAME_CLASS_MAPPING[name]
if not hasattr(vocab, class_name):
raise RuntimeError(f"Unknown embedding type: {name}")
params = [middle]
if name == "glove":
params.append(int(dim))
vector_cache = get_mmf_cache_dir()
# First test loading the vectors in master so that everybody doesn't
# download it in case it doesn't exist
if is_main():
vocab.pretrained_aliases[embedding_name](cache=vector_cache)
synchronize()
embedding = getattr(vocab, class_name)(*params, cache=vector_cache)
self.vectors = torch.empty(
(self.get_size(), len(embedding.vectors[0])), dtype=torch.float
)
self.embedding_dim = len(embedding.vectors[0])
for i in range(0, 4):
self.vectors[i] = torch.ones_like(self.vectors[i]) * 0.1 * i
for i in range(4, self.get_size()):
word = self.itos[i]
embedding_index = embedding.stoi.get(word, None)
if embedding_index is None:
self.vectors[i] = self.vectors[self.UNK_INDEX]
else:
self.vectors[i] = embedding.vectors[embedding_index]
def get_embedding_dim(self):
return self.embedding_dim
class PretrainedVocab(BaseVocab):
def __init__(self, embedding_name, *args, **kwargs):
"""Use this if you want to use pretrained embedding. See description
of IntersectedVocab to get a list of the embedding available from
torchtext
Parameters
----------
embedding_name : str
Name of the pretrained alias for the embedding to used
"""
self.type = "pretrained"
if embedding_name not in vocab.pretrained_aliases:
raise RuntimeError(f"Unknown embedding type: {embedding_name}")
vector_cache = get_mmf_cache_dir()
# First test loading the vectors in master so that everybody doesn't
# download it in case it doesn't exist
if is_main():
vocab.pretrained_aliases[embedding_name](cache=vector_cache)
synchronize()
embedding = vocab.pretrained_aliases[embedding_name](cache=vector_cache)
self.UNK_INDEX = 3
self.stoi = defaultdict(lambda: self.UNK_INDEX)
self.itos = {}
self.itos[self.PAD_INDEX] = self.PAD_TOKEN
self.itos[self.SOS_INDEX] = self.SOS_TOKEN
self.itos[self.EOS_INDEX] = self.EOS_TOKEN
self.itos[self.UNK_INDEX] = self.UNK_TOKEN
self.stoi[self.SOS_TOKEN] = self.SOS_INDEX
self.stoi[self.EOS_TOKEN] = self.EOS_INDEX
self.stoi[self.PAD_TOKEN] = self.PAD_INDEX
self.stoi[self.UNK_TOKEN] = self.UNK_INDEX
self.vectors = torch.FloatTensor(
len(self.itos.keys()) + len(embedding.itos), len(embedding.vectors[0])
)
for i in range(4):
self.vectors[i] = torch.ones_like(self.vectors[i]) * 0.1 * i
index = 4
for word in embedding.stoi:
self.itos[index] = word
self.stoi[word] = index
actual_index = embedding.stoi[word]
self.vectors[index] = embedding.vectors[actual_index]
index += 1
class WordToVectorDict:
def __init__(self, model):
self.model = model
def __getitem__(self, word):
# Check if mean for word split needs to be done here
return np.mean([self.model.get_word_vector(w) for w in word.split(" ")], axis=0)
class ModelVocab(BaseVocab):
def __init__(self, name, model_file, *args, **kwargs):
"""Special vocab which is not really vocabulary but instead a model
which returns embedding directly instead of vocabulary. This is just
an abstraction over a model which generates embeddings directly.
For e.g. for fasttext model we encapsulate it inside this and provide
it as a vocab so that the API of the vocab remains same.
NOTE: stoi's functionality will remain same but it is actually calling
a function to get word vectors. Currently, only fasttext is supported.
Parameters
----------
name : str
Name of the embedding model which this vocab currently is loading
model_file : str
File from which model will be loaded. This API might need to be
changed in future.
"""
super().__init__(*args, **kwargs)
self.type = "model"
if name != "fasttext":
raise ValueError("Model vocab only supports fasttext as of now")
else:
self._load_fasttext_model(model_file)
def _load_fasttext_model(self, model_file):
from fastText import load_model
model_file = os.path.join(get_mmf_cache_dir(), model_file)
logger.info(f"Loading fasttext model now from {model_file}")
self.model = load_model(model_file)
self.stoi = WordToVectorDict(self.model)
def get_embedding_dim(self):
return self.model.get_dimension()
class ExtractedVocab(BaseVocab):
def __init__(self, base_path, emb_dim, *args, **kwargs):
"""Special vocab which is not really vocabulary but instead a class
which returns embedding pre-extracted from files. Can be used load
word embeddings from popular models like ELMo and BERT
Parameters
----------
base_path: str
path containing saved files with embeddings one file per txt item
"""
super().__init__(*args, **kwargs)
self.type = "extracted"
self.emb_dim = emb_dim
self.base_path = base_path
def get_dim(self):
return self.emb_dim
| EXA-1-master | exa/models/mmf-main/mmf/utils/vocab.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import glob
import importlib
import logging
import os
import random
import sys
from datetime import datetime
import numpy as np
import torch
from omegaconf import OmegaConf, open_dict
def set_seed(seed):
if seed:
if seed == -1:
# From detectron2
seed = (
os.getpid()
+ int(datetime.now().strftime("%S%f"))
+ int.from_bytes(os.urandom(2), "big")
)
np.random.seed(seed)
torch.manual_seed(seed)
random.seed(seed)
return seed
def import_user_module(user_dir: str):
"""Given a user dir, this function imports it as a module.
This user_module is expected to have an __init__.py at its root.
You can use import_files to import your python files easily in
__init__.py
Args:
user_dir (str): directory which has to be imported
"""
from mmf.common.registry import registry
from mmf.utils.general import get_absolute_path # noqa
logger = logging.getLogger(__name__)
if user_dir:
if registry.get("__mmf_user_dir_imported__", no_warning=True):
logger.info(f"User dir {user_dir} already imported. Skipping.")
return
# Allow loading of files as user source
if user_dir.endswith(".py"):
user_dir = user_dir[:-3]
dot_path = ".".join(user_dir.split(os.path.sep))
# In case of abspath which start from "/" the first char
# will be "." which turns it into relative module which
# find_spec doesn't like
if os.path.isabs(user_dir):
dot_path = dot_path[1:]
try:
dot_spec = importlib.util.find_spec(dot_path)
except ModuleNotFoundError:
dot_spec = None
abs_user_dir = get_absolute_path(user_dir)
module_parent, module_name = os.path.split(abs_user_dir)
# If dot path is found in sys.modules, or path can be directly
# be imported, we don't need to play jugglery with actual path
if dot_path in sys.modules or dot_spec is not None:
module_name = dot_path
else:
user_dir = abs_user_dir
logger.info(f"Importing from {user_dir}")
if module_name != dot_path:
# Since dot path hasn't been found or can't be imported,
# we can try importing the module by changing sys path
# to the parent
sys.path.insert(0, module_parent)
importlib.import_module(module_name)
sys.modules["mmf_user_dir"] = sys.modules[module_name]
# Register config for user's model and dataset config
# relative path resolution
config = registry.get("config")
if config is None:
registry.register(
"config", OmegaConf.create({"env": {"user_dir": user_dir}})
)
else:
with open_dict(config):
config.env.user_dir = user_dir
registry.register("__mmf_user_dir_imported__", True)
def import_files(file_path: str, module_name: str = None):
"""The function imports all of the files present in file_path's directory.
This is useful for end user in case they want to easily import files without
mentioning each of them in their __init__.py. module_name if specified
is the full path to module under which all modules will be imported.
my_project/
my_models/
my_model.py
__init__.py
Contents of __init__.py
```
from mmf.utils.env import import_files
import_files(__file__, "my_project.my_models")
```
This will then allow you to import `my_project.my_models.my_model` anywhere.
Args:
file_path (str): Path to file in whose directory everything will be imported
module_name (str): Module name if this file under some specified structure
"""
for file in os.listdir(os.path.dirname(file_path)):
if file.endswith(".py") and not file.startswith("_"):
import_name = file[: file.find(".py")]
if module_name:
importlib.import_module(f"{module_name}.{import_name}")
else:
importlib.import_module(f"{import_name}")
def setup_imports():
from mmf.common.registry import registry
# First, check if imports are already setup
has_already_setup = registry.get("imports_setup", no_warning=True)
if has_already_setup:
return
# Automatically load all of the modules, so that
# they register with registry
root_folder = registry.get("mmf_root", no_warning=True)
if root_folder is None:
root_folder = os.path.dirname(os.path.abspath(__file__))
root_folder = os.path.join(root_folder, "..")
environment_mmf_path = os.environ.get("MMF_PATH", os.environ.get("PYTHIA_PATH"))
if environment_mmf_path is not None:
root_folder = environment_mmf_path
registry.register("pythia_path", root_folder)
registry.register("mmf_path", root_folder)
trainer_folder = os.path.join(root_folder, "trainers")
trainer_pattern = os.path.join(trainer_folder, "**", "*.py")
datasets_folder = os.path.join(root_folder, "datasets")
datasets_pattern = os.path.join(datasets_folder, "**", "*.py")
model_folder = os.path.join(root_folder, "models")
common_folder = os.path.join(root_folder, "common")
modules_folder = os.path.join(root_folder, "modules")
model_pattern = os.path.join(model_folder, "**", "*.py")
common_pattern = os.path.join(common_folder, "**", "*.py")
modules_pattern = os.path.join(modules_folder, "**", "*.py")
importlib.import_module("mmf.common.meter")
files = (
glob.glob(datasets_pattern, recursive=True)
+ glob.glob(model_pattern, recursive=True)
+ glob.glob(trainer_pattern, recursive=True)
+ glob.glob(common_pattern, recursive=True)
+ glob.glob(modules_pattern, recursive=True)
)
for f in files:
f = os.path.realpath(f)
if f.endswith(".py") and not f.endswith("__init__.py"):
splits = f.split(os.sep)
import_prefix_index = 0
for idx, split in enumerate(splits):
if split == "mmf":
import_prefix_index = idx + 1
file_name = splits[-1]
module_name = file_name[: file_name.find(".py")]
module = ".".join(["mmf"] + splits[import_prefix_index:-1] + [module_name])
importlib.import_module(module)
registry.register("imports_setup", True)
def setup_torchaudio():
# required for soundfile
try:
import libfb.py.ctypesmonkeypatch
libfb.py.ctypesmonkeypatch.install()
except ImportError:
pass
def teardown_imports():
from mmf.common.registry import registry
registry.unregister("pythia_path")
registry.unregister("mmf_path")
registry.unregister("imports_setup")
| EXA-1-master | exa/models/mmf-main/mmf/utils/env.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import glob
import importlib
import logging
import os
import sys
import warnings
from typing import Any, Dict
import torch
from mmf.common.registry import registry
from mmf.utils.checkpoint_updater import get_pretrained_state_mapping_checkpoint
from mmf.utils.configuration import get_mmf_env, load_yaml
from mmf.utils.distributed import is_main, is_xla, open_if_main, synchronize
from mmf.utils.download import download_pretrained_model
from mmf.utils.file_io import PathManager
from mmf.utils.general import get_current_device, updir
from mmf.utils.xla import save_xla_ckpt
from omegaconf import OmegaConf
try:
import git
except ImportError:
git = None
try:
import torch_xla.core.xla_model as xm
except ImportError:
xm = None
logger = logging.getLogger(__name__)
ALLOWED_CHECKPOINT_EXTS = [".ckpt", ".pth", ".pt"]
def _hack_imports():
# NOTE: This can probably be made universal to support backwards
# compatibility with name "pythia" if needed.
sys.modules["pythia"] = importlib.import_module("mmf")
sys.modules["pythia.utils.configuration"] = importlib.import_module(
"mmf.utils.configuration"
)
def get_ckpt_path_from_folder(folder) -> str:
ckpts = []
allowed_ckpt_types = [f"*{ext}" for ext in ALLOWED_CHECKPOINT_EXTS]
for ckpt_type in allowed_ckpt_types:
ckpts.extend(glob.glob(os.path.join(folder, ckpt_type)))
assert (
len(ckpts) == 1
), "None or multiple checkpoints files. MMF doesn't know what to do."
return ckpts[0]
def get_ckpt_from_path(path) -> Dict[str, Any]:
with PathManager.open(path, "rb") as f:
ckpt = torch.load(f, map_location=lambda storage, loc: storage)
return ckpt
def get_config_from_folder_or_ckpt(
folder: str, ckpt: Dict[str, Any] = None
) -> Dict[str, Any]:
r"""gets config from folder or checkpoint
Args:
folder (str): folder from which config will be searched first
ckpt (Optional[Dict[str, Any]]): optional checkpoint from which config
might be found.
Returns:
config (Dict[str, Any]): config object
"""
configs = glob.glob(os.path.join(folder, "*.yaml"))
if len(configs) > 0:
assert len(configs) <= 1, (
"Multiple yaml files with the pretrained model. "
+ "MMF doesn't know what to do."
)
config_file = configs[0]
config = load_yaml(config_file)
else:
assert "config" in ckpt, (
"No configs provided with pretrained model"
" while checkpoint also doesn't have configuration."
)
config = ckpt["config"]
return config
def _load_pretrained_checkpoint(checkpoint_path, *args, **kwargs):
assert (
os.path.splitext(checkpoint_path)[1] in ALLOWED_CHECKPOINT_EXTS
), f"Checkpoint must have extensions: {ALLOWED_CHECKPOINT_EXTS}"
_hack_imports()
with PathManager.open(checkpoint_path, "rb") as f:
ckpt = torch.load(f, map_location=lambda storage, loc: storage)
assert "config" in ckpt, (
"No configs provided with pretrained model "
" while checkpoint also doesn't have configuration."
)
config = ckpt.pop("config", None)
model_config = config.get("model_config", config)
ckpt = ckpt.get("model", ckpt)
if "model_name" in kwargs:
model_name = kwargs["model_name"]
else:
assert len(model_config.keys()) == 1, "Only one model type should be specified."
model_name = list(model_config.keys())[0]
model_config = model_config.get(model_name)
return {"config": model_config, "checkpoint": ckpt, "full_config": config}
def _load_pretrained_model(model_name_or_path, *args, **kwargs):
if PathManager.exists(model_name_or_path):
download_path = model_name_or_path
model_name = model_name_or_path
else:
download_path = download_pretrained_model(model_name_or_path, *args, **kwargs)
model_name = model_name_or_path
_hack_imports()
ckpt_path = get_ckpt_path_from_folder(download_path)
ckpt = get_ckpt_from_path(ckpt_path)
# If configs are not present, will ckpt provide the config?
config = get_config_from_folder_or_ckpt(download_path, ckpt)
model_config = config.get("model_config", config)
ckpt = ckpt.get("model", ckpt)
# Also handle the case of model_name is path
if PathManager.exists(model_name):
# This shouldn't happen
assert len(model_config.keys()) == 1, "Checkpoint contains more than one model?"
# Take first key
model_config = model_config[list(model_config.keys())[0]]
else:
model_config = model_config.get(model_name.split(os.path.sep)[-1].split(".")[0])
return {"config": model_config, "checkpoint": ckpt, "full_config": config}
def load_pretrained_model(model_name_or_path_or_checkpoint, *args, **kwargs):
# If this is a file, then load this directly else download and load
if PathManager.isfile(model_name_or_path_or_checkpoint):
return _load_pretrained_checkpoint(
model_name_or_path_or_checkpoint, args, kwargs
)
else:
return _load_pretrained_model(model_name_or_path_or_checkpoint, args, kwargs)
def consolidate_optim_state_dict(optimizer):
if hasattr(optimizer, "consolidate_state_dict"):
optimizer.consolidate_state_dict(recipient_rank=0)
class Checkpoint:
def __init__(self, trainer):
"""
Generates a path for saving model which can also be used for resuming
from a checkpoint.
"""
self.trainer = trainer
self.config = self.trainer.config
self.save_dir = get_mmf_env(key="save_dir")
self.model_name = self.config.model
self.ckpt_foldername = self.save_dir
self.device = get_current_device()
self.ckpt_prefix = ""
if hasattr(self.trainer.model, "get_ckpt_name"):
self.ckpt_prefix = self.trainer.model.get_ckpt_name() + "_"
self.pth_filepath = os.path.join(
self.ckpt_foldername, self.ckpt_prefix + self.model_name + "_final.pth"
)
self.models_foldername = os.path.join(self.ckpt_foldername, "models")
if not PathManager.exists(self.models_foldername):
PathManager.mkdirs(self.models_foldername)
self.save_config()
self.repo_path = updir(os.path.abspath(__file__), n=3)
self.git_repo = None
if git and self.config.checkpoint.save_git_details:
try:
self.git_repo = git.Repo(self.repo_path)
except git.exc.InvalidGitRepositoryError:
# Not a git repo, don't do anything
pass
self.max_to_keep = self.config.checkpoint.max_to_keep
self.saved_iterations = []
def save_config(self):
if not is_main():
return
cfg_file = os.path.join(self.ckpt_foldername, "config.yaml")
with PathManager.open(cfg_file, "w") as f:
f.write(OmegaConf.to_yaml(self.config, resolve=True))
def load_state_dict(self):
ckpt_config = self.config.checkpoint
suffix = "best.ckpt" if ckpt_config.resume_best else "current.ckpt"
reverse_suffix = "best.ckpt" if not ckpt_config.resume_best else "current.ckpt"
ckpt_filepath = os.path.join(self.ckpt_foldername, self.ckpt_prefix + suffix)
# In case of interrupts and resume, ckpt_config.resume_file would be there
# But, if the checkpoints are already created in the save dir
# and resume is true signifying the interrupt resume, we should skip
# loading the resume file.
if (
ckpt_config.resume_file is not None or ckpt_config.resume_zoo is not None
) and (not ckpt_config.resume or not PathManager.exists(ckpt_filepath)):
if ckpt_config.resume_file and PathManager.exists(ckpt_config.resume_file):
self._load(
ckpt_config.resume_file,
load_pretrained=ckpt_config.resume_pretrained,
)
return
# resume_file doesn't exist, try from zoo now
elif ckpt_config.resume_zoo is not None:
self._load(
ckpt_config.resume_zoo,
load_zoo=True,
load_pretrained=ckpt_config.resume_pretrained,
)
return
else:
raise RuntimeError(f"{ckpt_config.resume_file} doesn't exist")
if ckpt_config.resume:
if PathManager.exists(ckpt_filepath):
self._load(ckpt_filepath)
else:
warnings.warn(
"Tried to resume but checkpoint filepath {} "
"is not present. Trying {}, otherwise skipping.".format(
ckpt_filepath, reverse_suffix
)
)
ckpt_filepath = ckpt_filepath.replace(suffix, reverse_suffix)
if PathManager.exists(ckpt_filepath):
self._load(ckpt_filepath)
def _is_pl_trainer_checkpoint(self, checkpoint):
return "pytorch-lightning_version" in checkpoint
def _load(self, file, force=False, load_zoo=False, load_pretrained=False):
ckpt_config = self.config.checkpoint
logger.info("Loading checkpoint")
if load_zoo:
ckpt, should_continue = self._load_from_zoo(file)
if not should_continue:
return
else:
ckpt = self._torch_load(file)
pretrained_state_mapping = ckpt_config.pretrained_state_mapping
if not load_pretrained or force is True:
pretrained_state_mapping = {}
if not self._is_pl_trainer_checkpoint(ckpt):
if "model" not in ckpt:
ckpt = {"model": ckpt}
state_dict = self.upgrade_state_dict(ckpt["model"])
else:
state_dict = self.upgrade_state_dict(ckpt["state_dict"])
if len(pretrained_state_mapping.items()) == 0:
incompatible_keys = self.trainer.model.load_state_dict(
state_dict, strict=False
)
if len(incompatible_keys.missing_keys) != 0:
logger.warning(
f"Missing keys {incompatible_keys.missing_keys} in the"
+ " checkpoint.\n"
+ "If this is not your checkpoint, please open up an "
+ "issue on MMF GitHub. \n"
+ f"Unexpected keys if any: {incompatible_keys.unexpected_keys}"
)
if len(incompatible_keys.unexpected_keys) != 0:
logger.warning(
"Unexpected keys in state dict: "
+ f"{incompatible_keys.unexpected_keys} \n"
+ "This is usually not a problem with pretrained models, but "
+ "if this is your own model, please double check. \n"
+ "If you think this is an issue, please open up a "
+ "bug at MMF GitHub."
)
reset_optimizer = ckpt_config.reset.optimizer or ckpt_config.reset.all
if not reset_optimizer:
self._load_optimizer(ckpt)
reset_counts = ckpt_config.reset.all or ckpt_config.reset.counts
if not reset_counts:
self.trainer.early_stop_callback.early_stopping.init_from_checkpoint(
ckpt
)
self._load_counts_and_lr_scheduler(ckpt)
reset_scaler = ckpt_config.reset.all or ckpt_config.reset.fp16_scaler
if not reset_scaler:
self._load_fp16_scaler(ckpt)
else:
self._load_pretrained(state_dict)
logger.info("Checkpoint loaded.")
logger.info(f"Current num updates: {self.trainer.num_updates}")
logger.info(f"Current iteration: {self.trainer.current_iteration}")
logger.info(f"Current epoch: {self.trainer.current_epoch}")
def _load_optimizer(self, ckpt):
if "optimizer" in ckpt:
try:
self.trainer.optimizer.load_state_dict(ckpt["optimizer"])
except ValueError:
logger.info(
"Optimizer failed to load. Try with "
+ "checkpoint.reset.optimizer=True"
)
raise
else:
warnings.warn(
"'optimizer' key is not present in the "
"checkpoint asked to be loaded. Skipping."
)
def _load_counts_and_lr_scheduler(self, ckpt):
ckpt_config = self.trainer.config.checkpoint
if "best_update" in ckpt:
if ckpt_config.resume_best:
self.trainer.num_updates = ckpt.get(
"best_update", self.trainer.num_updates
)
self.trainer.current_iteration = ckpt.get(
"best_iteration", self.trainer.current_iteration
)
else:
self.trainer.num_updates = ckpt.get(
"num_updates", self.trainer.num_updates
)
self.trainer.current_iteration = ckpt.get(
"current_iteration", self.trainer.current_iteration
)
self.trainer.current_epoch = ckpt.get(
"current_epoch", self.trainer.current_epoch
)
elif "best_iteration" in ckpt:
# Preserve old behavior for old checkpoints where we always
# load best iteration
if ckpt_config.resume_best and "current_iteration" in ckpt:
self.trainer.current_iteration = ckpt["current_iteration"]
else:
self.trainer.current_iteration = ckpt.get(
"best_iteration", self.trainer.current_iteration
)
self.trainer.num_updates = self.trainer.current_iteration
lr_scheduler = self.trainer.lr_scheduler_callback
if (
lr_scheduler is not None
and getattr(lr_scheduler, "_scheduler", None) is not None
):
lr_scheduler = lr_scheduler._scheduler
if "lr_scheduler" in ckpt:
lr_scheduler.load_state_dict(ckpt["lr_scheduler"])
else:
warnings.warn(
"'lr_scheduler' key is not present in the "
"checkpoint asked to be loaded. Setting lr_scheduler's "
"last_epoch to current_iteration."
)
lr_scheduler.last_epoch = self.trainer.current_iteration
registry.register("current_iteration", self.trainer.current_iteration)
registry.register("num_updates", self.trainer.num_updates)
self.trainer.current_epoch = ckpt.get("best_epoch", self.trainer.current_epoch)
registry.register("current_epoch", self.trainer.current_epoch)
def _load_fp16_scaler(self, ckpt):
scaler = getattr(self.trainer, "scaler", None)
scaler_dict = ckpt.get("fp16_scaler", None)
if scaler is not None and scaler_dict is not None:
scaler.load_state_dict(scaler_dict)
def _load_pretrained(self, ckpt):
model = self.trainer.model
own_state = model.state_dict()
ckpt_update_dict = get_pretrained_state_mapping_checkpoint(
checkpoint=ckpt, model=model, config=self.trainer.config
)
for own_attr, attr in ckpt_update_dict.items():
logger.info("Copying " + own_attr + " from " + attr)
own_state[own_attr].copy_(ckpt[attr])
logger.info("Pretrained model loaded")
def upgrade_state_dict(self, state_dict):
data_parallel = registry.get("data_parallel") or registry.get("distributed")
data_parallel = data_parallel or isinstance(
self.trainer.model,
(torch.nn.DataParallel, torch.nn.parallel.DistributedDataParallel),
)
if data_parallel:
model = self.trainer.model.module
else:
model = self.trainer.model
new_dict = {}
for attr in state_dict:
new_attr = model.format_state_key(attr)
if not data_parallel and attr.startswith("module."):
# In case the ckpt was actually a data parallel model
# replace first module. from dataparallel with empty string
new_attr = new_attr.replace("module.", "", 1)
elif data_parallel and not attr.startswith("module."):
new_attr = "module." + new_attr
# Log if key has changed but not when the difference
# is only due to data parallel's `module`
if new_attr != attr and ("module." + new_attr != attr):
logger.info(f"Will load key {new_attr} from {attr}")
new_dict[new_attr] = state_dict[attr]
return new_dict
def _load_from_zoo(self, file):
ckpt_config = self.trainer.config.checkpoint
zoo_ckpt = load_pretrained_model(file)
# If zoo_config_override, load the model directly using `from_pretrained`
if ckpt_config.zoo_config_override:
model_cls = registry.get_model_class(self.trainer.config.model)
self.trainer.model = model_cls.from_pretrained(ckpt_config.resume_zoo)
self.trainer.config.model_config = zoo_ckpt["full_config"].model_config
return None, False
else:
return self.upgrade_state_dict(zoo_ckpt["checkpoint"]), True
def _torch_load(self, file):
# Backwards compatibility to Pythia
_hack_imports()
# Force get_local_path to always redownload checkpoints
local_path = PathManager.get_local_path(file, force=True)
with PathManager.open(local_path, "rb") as f:
if "cuda" in str(self.device):
return torch.load(f, map_location=self.device)
else:
return torch.load(f, map_location=lambda storage, loc: storage)
def _get_vcs_fields(self):
"""Returns a dict with git fields of the current repository
To reproduce an experiment directly from a checkpoint
1) Export `config` key as a yaml
2) Clone repository and checkout at given commit on given branch
3) Any local change (diff) while running the experiment is stored
in the value with key `git/diff`, output the diff to a `path.diff`
file and apply the patch to the current state by simply
`patch -p0 < path.diff`
"""
return {
"git/branch": self.git_repo.active_branch.name,
"git/commit_hash": self.git_repo.head.commit.name_rev,
"git/commit_author": self.git_repo.head.commit.author.name,
"git/commit_message": self.git_repo.head.commit.message,
"git/diff": self.git_repo.git.diff("--no-prefix"),
}
def save_func(self, *args):
return save_xla_ckpt(*args) if is_xla() else torch.save(*args)
def save(self, update, iteration=None, update_best=False):
# Only save in main process
# For xla we use xm.save method
# Which ensures that actual checkpoint saving happens
# only for the master node.
# The method also takes care of all the necessary synchronization
if not is_main() and not is_xla():
return
logger.info("Checkpoint save operation started!")
if not iteration:
iteration = update
ckpt_filepath = os.path.join(self.models_foldername, "model_%d.ckpt" % update)
best_ckpt_filepath = os.path.join(
self.ckpt_foldername, self.ckpt_prefix + "best.ckpt"
)
current_ckpt_filepath = os.path.join(
self.ckpt_foldername, self.ckpt_prefix + "current.ckpt"
)
best_iteration = (
self.trainer.early_stop_callback.early_stopping.best_monitored_iteration
)
best_update = (
self.trainer.early_stop_callback.early_stopping.best_monitored_update
)
best_metric = (
self.trainer.early_stop_callback.early_stopping.best_monitored_value
)
model = self.trainer.model
data_parallel = registry.get("data_parallel") or registry.get("distributed")
fp16_scaler = getattr(self.trainer, "scaler", None)
fp16_scaler_dict = None
if fp16_scaler is not None:
fp16_scaler_dict = fp16_scaler.state_dict()
if data_parallel is True:
model = model.module
ckpt = {
"model": model.state_dict(),
"optimizer": self.trainer.optimizer.state_dict(),
"best_iteration": best_iteration,
"current_iteration": iteration,
"current_epoch": self.trainer.current_epoch,
"num_updates": update,
"best_update": best_update,
"best_metric_value": best_metric,
"fp16_scaler": fp16_scaler_dict,
# Convert to container to avoid any dependencies
"config": OmegaConf.to_container(self.config, resolve=True),
}
lr_scheduler = self.trainer.lr_scheduler_callback
if (
lr_scheduler is not None
and getattr(lr_scheduler, "_scheduler", None) is not None
):
lr_scheduler = lr_scheduler._scheduler
ckpt["lr_scheduler"] = lr_scheduler.state_dict()
if self.git_repo:
git_metadata_dict = self._get_vcs_fields()
ckpt.update(git_metadata_dict)
with open_if_main(ckpt_filepath, "wb") as f:
self.save_func(ckpt, f)
if update_best:
logger.info("Saving best checkpoint")
with open_if_main(best_ckpt_filepath, "wb") as f:
self.save_func(ckpt, f)
# Save current always
logger.info("Saving current checkpoint")
with open_if_main(current_ckpt_filepath, "wb") as f:
self.save_func(ckpt, f)
# Save the current checkpoint as W&B artifacts for model versioning.
if self.config.training.wandb.log_checkpoint:
logger.info(
"Saving current checkpoint as W&B Artifacts for model versioning"
)
self.trainer.logistics_callback.wandb_logger.log_model_checkpoint(
current_ckpt_filepath
)
# Remove old checkpoints if max_to_keep is set
# In XLA, only delete checkpoint files in main process
if self.max_to_keep > 0 and is_main():
if len(self.saved_iterations) == self.max_to_keep:
self.remove(self.saved_iterations.pop(0))
self.saved_iterations.append(update)
logger.info("Checkpoint save operation finished!")
def remove(self, update):
ckpt_filepath = os.path.join(self.models_foldername, "model_%d.ckpt" % update)
if PathManager.isfile(ckpt_filepath):
PathManager.rm(ckpt_filepath)
def restore(self):
synchronize()
logger.info("Restoring checkpoint")
best_path = os.path.join(self.ckpt_foldername, self.ckpt_prefix + "best.ckpt")
if PathManager.exists(best_path):
self._load(best_path, force=True)
def finalize(self):
if is_main() or is_xla():
with open_if_main(self.pth_filepath, "wb") as f:
self.save_func(self.trainer.model.state_dict(), f)
| EXA-1-master | exa/models/mmf-main/mmf/utils/checkpoint.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import re
class EvalAIAnswerProcessor:
"""
Processes an answer similar to Eval AI
copied from
https://github.com/facebookresearch/mmf/blob/c46b3b3391275b4181567db80943473a89ab98ab/pythia/tasks/processors.py#L897
"""
CONTRACTIONS = {
"aint": "ain't",
"arent": "aren't",
"cant": "can't",
"couldve": "could've",
"couldnt": "couldn't",
"couldn'tve": "couldn't've",
"couldnt've": "couldn't've",
"didnt": "didn't",
"doesnt": "doesn't",
"dont": "don't",
"hadnt": "hadn't",
"hadnt've": "hadn't've",
"hadn'tve": "hadn't've",
"hasnt": "hasn't",
"havent": "haven't",
"hed": "he'd",
"hed've": "he'd've",
"he'dve": "he'd've",
"hes": "he's",
"howd": "how'd",
"howll": "how'll",
"hows": "how's",
"Id've": "I'd've",
"I'dve": "I'd've",
"Im": "I'm",
"Ive": "I've",
"isnt": "isn't",
"itd": "it'd",
"itd've": "it'd've",
"it'dve": "it'd've",
"itll": "it'll",
"let's": "let's",
"maam": "ma'am",
"mightnt": "mightn't",
"mightnt've": "mightn't've",
"mightn'tve": "mightn't've",
"mightve": "might've",
"mustnt": "mustn't",
"mustve": "must've",
"neednt": "needn't",
"notve": "not've",
"oclock": "o'clock",
"oughtnt": "oughtn't",
"ow's'at": "'ow's'at",
"'ows'at": "'ow's'at",
"'ow'sat": "'ow's'at",
"shant": "shan't",
"shed've": "she'd've",
"she'dve": "she'd've",
"she's": "she's",
"shouldve": "should've",
"shouldnt": "shouldn't",
"shouldnt've": "shouldn't've",
"shouldn'tve": "shouldn't've",
"somebody'd": "somebodyd",
"somebodyd've": "somebody'd've",
"somebody'dve": "somebody'd've",
"somebodyll": "somebody'll",
"somebodys": "somebody's",
"someoned": "someone'd",
"someoned've": "someone'd've",
"someone'dve": "someone'd've",
"someonell": "someone'll",
"someones": "someone's",
"somethingd": "something'd",
"somethingd've": "something'd've",
"something'dve": "something'd've",
"somethingll": "something'll",
"thats": "that's",
"thered": "there'd",
"thered've": "there'd've",
"there'dve": "there'd've",
"therere": "there're",
"theres": "there's",
"theyd": "they'd",
"theyd've": "they'd've",
"they'dve": "they'd've",
"theyll": "they'll",
"theyre": "they're",
"theyve": "they've",
"twas": "'twas",
"wasnt": "wasn't",
"wed've": "we'd've",
"we'dve": "we'd've",
"weve": "we've",
"werent": "weren't",
"whatll": "what'll",
"whatre": "what're",
"whats": "what's",
"whatve": "what've",
"whens": "when's",
"whered": "where'd",
"wheres": "where's",
"whereve": "where've",
"whod": "who'd",
"whod've": "who'd've",
"who'dve": "who'd've",
"wholl": "who'll",
"whos": "who's",
"whove": "who've",
"whyll": "why'll",
"whyre": "why're",
"whys": "why's",
"wont": "won't",
"wouldve": "would've",
"wouldnt": "wouldn't",
"wouldnt've": "wouldn't've",
"wouldn'tve": "wouldn't've",
"yall": "y'all",
"yall'll": "y'all'll",
"y'allll": "y'all'll",
"yall'd've": "y'all'd've",
"y'alld've": "y'all'd've",
"y'all'dve": "y'all'd've",
"youd": "you'd",
"youd've": "you'd've",
"you'dve": "you'd've",
"youll": "you'll",
"youre": "you're",
"youve": "you've",
}
NUMBER_MAP = {
"none": "0",
"zero": "0",
"one": "1",
"two": "2",
"three": "3",
"four": "4",
"five": "5",
"six": "6",
"seven": "7",
"eight": "8",
"nine": "9",
"ten": "10",
}
ARTICLES = ["a", "an", "the"]
PERIOD_STRIP = re.compile(r"(?!<=\d)(\.)(?!\d)")
COMMA_STRIP = re.compile(r"(?<=\d)(\,)+(?=\d)")
PUNCTUATIONS = [
";",
r"/",
"[",
"]",
'"',
"{",
"}",
"(",
")",
"=",
"+",
"\\",
"_",
"-",
">",
"<",
"@",
"`",
",",
"?",
"!",
]
def __init__(self, *args, **kwargs):
pass
def word_tokenize(self, word):
word = word.lower()
word = word.replace(",", "").replace("?", "").replace("'s", " 's")
return word.strip()
def process_punctuation(self, in_text):
out_text = in_text
for p in self.PUNCTUATIONS:
if (p + " " in in_text or " " + p in in_text) or (
re.search(self.COMMA_STRIP, in_text) is not None
):
out_text = out_text.replace(p, "")
else:
out_text = out_text.replace(p, " ")
out_text = self.PERIOD_STRIP.sub("", out_text, re.UNICODE)
return out_text
def process_digit_article(self, in_text):
out_text = []
temp_text = in_text.lower().split()
for word in temp_text:
word = self.NUMBER_MAP.setdefault(word, word)
if word not in self.ARTICLES:
out_text.append(word)
else:
pass
for word_id, word in enumerate(out_text):
if word in self.CONTRACTIONS:
out_text[word_id] = self.CONTRACTIONS[word]
out_text = " ".join(out_text)
return out_text
def __call__(self, item):
item = self.word_tokenize(item)
item = item.replace("\n", " ").replace("\t", " ").strip()
item = self.process_punctuation(item)
item = self.process_digit_article(item)
return item
class TextVQAAccuracyEvaluator:
def __init__(self):
self.answer_processor = EvalAIAnswerProcessor()
def _compute_answer_scores(self, raw_answers):
"""
compute the accuracy (soft score) of human answers
"""
answers = [self.answer_processor(a) for a in raw_answers]
assert len(answers) == 10
gt_answers = list(enumerate(answers))
unique_answers = set(answers)
unique_answer_scores = {}
for unique_answer in unique_answers:
accs = []
for gt_answer in gt_answers:
other_answers = [item for item in gt_answers if item != gt_answer]
matching_answers = [
item for item in other_answers if item[1] == unique_answer
]
acc = min(1, float(len(matching_answers)) / 3)
accs.append(acc)
unique_answer_scores[unique_answer] = sum(accs) / len(accs)
return unique_answer_scores
def eval_pred_list(self, pred_list):
pred_scores = []
for entry in pred_list:
pred_answer = self.answer_processor(entry["pred_answer"])
unique_answer_scores = self._compute_answer_scores(entry["gt_answers"])
score = unique_answer_scores.get(pred_answer, 0.0)
pred_scores.append(score)
accuracy = sum(pred_scores) / len(pred_scores)
return accuracy
class STVQAAccuracyEvaluator:
def __init__(self):
self.answer_processor = EvalAIAnswerProcessor()
def eval_pred_list(self, pred_list):
pred_scores = []
for entry in pred_list:
pred_answer = self.answer_processor(entry["pred_answer"])
gts = [self.answer_processor(a) for a in entry["gt_answers"]]
score = 1.0 if pred_answer in gts else 0.0
pred_scores.append(score)
accuracy = sum(pred_scores) / len(pred_scores)
return accuracy
class STVQAANLSEvaluator:
def __init__(self):
import editdistance # install with `pip install editdistance`
self.get_edit_distance = editdistance.eval
def get_anls(self, s1, s2):
s1 = s1.lower().strip()
s2 = s2.lower().strip()
iou = 1 - self.get_edit_distance(s1, s2) / max(len(s1), len(s2))
anls = iou if iou >= 0.5 else 0.0
return anls
def eval_pred_list(self, pred_list):
pred_scores = []
for entry in pred_list:
anls = max(
self.get_anls(entry["pred_answer"], gt) for gt in entry["gt_answers"]
)
pred_scores.append(anls)
accuracy = sum(pred_scores) / len(pred_scores)
return accuracy
class TextCapsBleu4Evaluator:
def __init__(self):
# The following script requires Java 1.8.0 and pycocotools installed.
# The pycocoevalcap can be installed with pip as
# pip install git+https://github.com/ronghanghu/coco-caption.git@python23
# Original pycocoevalcap code is at https://github.com/tylin/coco-caption
# but has no python3 support yet.
try:
from pycocoevalcap.bleu.bleu import Bleu
from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer
except ModuleNotFoundError:
print(
"Please install pycocoevalcap module using "
"pip install git+https://github.com/ronghanghu/coco-caption.git@python23" # noqa
)
raise
self.tokenizer = PTBTokenizer()
self.scorer = Bleu(4)
def eval_pred_list(self, pred_list):
# Create reference and hypotheses captions.
gts = {}
res = {}
for idx, entry in enumerate(pred_list):
gts[idx] = [{"caption": a} for a in entry["gt_answers"]]
res[idx] = [{"caption": entry["pred_answer"]}]
gts = self.tokenizer.tokenize(gts)
res = self.tokenizer.tokenize(res)
score, _ = self.scorer.compute_score(gts, res)
bleu4 = score[3] # score is (Bleu-1, Bleu-2, Bleu-3, Bleu-4)
return bleu4
| EXA-1-master | exa/models/mmf-main/mmf/utils/m4c_evaluators.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Mostly copy-pasted from
# https://github.com/facebookresearch/detr/blob/master/util/box_ops.py
import torch
from torch import Tensor
from torchvision.ops.boxes import box_area
def box_cxcywh_to_xyxy(x: Tensor):
x_c, y_c, w, h = x.unbind(-1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=-1)
def box_xyxy_to_cxcywh(x: Tensor):
x0, y0, x1, y1 = x.unbind(-1)
b = [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0)]
return torch.stack(b, dim=-1)
def box_iou(boxes1: Tensor, boxes2: Tensor):
area1 = box_area(boxes1)
area2 = box_area(boxes2)
lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
wh = (rb - lt).clamp(min=0) # [N,M,2]
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
union = area1[:, None] + area2 - inter
iou = inter / union
return iou, union
def generalized_box_iou(boxes1: Tensor, boxes2: Tensor):
"""
Generalized IoU from https://giou.stanford.edu/
The boxes should be in [x0, y0, x1, y1] format
Returns a [N, M] pairwise matrix, where N = len(boxes1)
and M = len(boxes2)
"""
# degenerate boxes gives inf / nan results
# so do an early check
assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
iou, union = box_iou(boxes1, boxes2)
lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])
rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
wh = (rb - lt).clamp(min=0) # [N,M,2]
area = wh[:, :, 0] * wh[:, :, 1]
return iou - (area - union) / area
| EXA-1-master | exa/models/mmf-main/mmf/utils/box_ops.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import time
class Timer:
DEFAULT_TIME_FORMAT_DATE_TIME = "%Y/%m/%d %H:%M:%S"
DEFAULT_TIME_FORMAT = ["%03dms", "%02ds", "%02dm", "%02dh"]
def __init__(self):
self.start = time.time() * 1000
def get_current(self):
return self.get_time_hhmmss(self.start)
def reset(self):
self.start = time.time() * 1000
def get_time_since_start(self, format=None):
return self.get_time_hhmmss(self.start, format)
def unix_time_since_start(self, in_seconds=True):
gap = time.time() * 1000 - self.start
if in_seconds:
gap = gap // 1000
# Prevent 0 division errors
if gap == 0:
gap = 1
return gap
def get_time_hhmmss(self, start=None, end=None, gap=None, format=None):
"""
Calculates time since `start` and formats as a string.
"""
if start is None and gap is None:
if format is None:
format = self.DEFAULT_TIME_FORMAT_DATE_TIME
return time.strftime(format)
if end is None:
end = time.time() * 1000
if gap is None:
gap = end - start
s, ms = divmod(gap, 1000)
m, s = divmod(s, 60)
h, m = divmod(m, 60)
if format is None:
format = self.DEFAULT_TIME_FORMAT
items = [ms, s, m, h]
assert len(items) == len(format), "Format length should be same as items"
time_str = ""
for idx, item in enumerate(items):
if item != 0:
time_str = format[idx] % item + " " + time_str
# Means no more time is left.
if len(time_str) == 0:
time_str = "0ms"
return time_str.strip()
| EXA-1-master | exa/models/mmf-main/mmf/utils/timer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
class Flags:
def __init__(self):
self.parser = argparse.ArgumentParser()
self.add_core_args()
def get_parser(self):
return self.parser
def add_core_args(self):
self.parser.add_argument_group("Core Arguments")
# TODO: Add Help flag here describing MMF Configuration
# and point to configuration documentation
self.parser.add_argument(
"-co",
"--config_override",
type=str,
default=None,
help="Use to override config from command line directly",
)
# This is needed to support torch.distributed.launch
self.parser.add_argument(
"--local_rank", type=int, default=None, help="Local rank of the argument"
)
self.parser.add_argument(
"opts",
default=None,
nargs=argparse.REMAINDER,
help="Modify config options from command line",
)
flags = Flags()
| EXA-1-master | exa/models/mmf-main/mmf/utils/flags.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# Original taken from ParlAI https://git.io/JvjfS, this file has been
# adapted for MMF use cases.
"""
Utilities for downloading and building data.
These can be replaced if your particular file system does not support them.
"""
import collections
import datetime
import hashlib
import io
import json
import os
import shutil
import time
from pathlib import Path
import numpy as np
import requests
import tqdm
from mmf.utils.file_io import PathManager
from mmf.utils.general import get_absolute_path
from PIL import Image
class DownloadableFile:
"""
A class used to abstract any file that has to be downloaded online.
Originally taken from ParlAI, this file has been modified for MMF specific
use cases.
Any dataset/model that needs to download a file needs to have a list RESOURCES
that have objects of this class as elements.
The class automatically figures out if the file is from Google Drive.
This class provides the following functionality:
- Download a file from a URL / Google Drive
- Decompress the file if compressed
- Checksum for the downloaded file
- Send HEAD request to validate URL or Google Drive link
- If the file is present and checksum is same, it won't be redownloaded
Raises:
AssertionError: If while downloading checksum of the files fails.
"""
GOOGLE_DRIVE_SUBSTR = "drive.google"
MMF_PREFIX = "mmf://"
MMF_PREFIX_REPLACEMENT = "https://dl.fbaipublicfiles.com/mmf/data/"
def __init__(
self,
url,
file_name,
hashcode=None,
compressed=True,
delete_original=False,
dest_folder=None,
):
"""
An object of this class needs to be created with:
Args:
url (string): URL or Google Drive id to download from
file_name (string): File name that the file should be named
hashcode (string, optional): SHA256 hashcode of the downloaded file.
Defaults to None. Won't be checked if not
passed.
compressed (bool, optional): False if the file is not compressed.
Defaults to True.
delete_original (bool, optional): If compressed whether to delete original.
Defaults to False.
dest_folder (str, optional): Folder which will be appended to destination
path provided when downloading. Defaults to None.
"""
self._url = self._parse_url(url)
self._file_name = file_name
self._hashcode = hashcode
self._compressed = compressed
self._from_google = self._url.find(self.GOOGLE_DRIVE_SUBSTR) != -1
if self._from_google:
assert "id=" in self._url, "Google Drive URL should have Google Drive ID"
self._url = self._url.split("=")[-1]
self._delete_original = delete_original
self._dest_folder = dest_folder
def _parse_url(self, url):
if url.find(self.MMF_PREFIX) == -1:
return url
else:
return self.MMF_PREFIX_REPLACEMENT + url[len(self.MMF_PREFIX) :]
def checksum(self, download_path):
"""
Checksum on a given file.
Args:
download_path (string): path to the downloaded file.
"""
if self._hashcode is None:
print(f"[ Checksum not provided, skipping for {self._file_name}]")
return
sha256_hash = hashlib.sha256()
destination = os.path.join(download_path, self._file_name)
if not PathManager.isfile(destination):
# File is not present, nothing to checksum
return
with PathManager.open(destination, "rb") as f:
print(f"[ Starting checksum for {self._file_name}]")
for byte_block in iter(lambda: f.read(65536), b""):
sha256_hash.update(byte_block)
if sha256_hash.hexdigest() != self._hashcode:
# remove_dir(download_path)
raise AssertionError(
f"[ Checksum for {self._file_name} from \n{self._url}\n"
"does not match the expected checksum. Please try again. ]"
)
else:
print(f"[ Checksum successful for {self._file_name}]")
def download_file(self, download_path):
downloaded = False
redownload = False
if self._dest_folder is not None:
download_path = str(Path(f"{download_path}/{self._dest_folder}"))
make_dir(download_path)
try:
self.checksum(download_path)
except AssertionError:
# File exists but checksum has changed. Will be redownloaded
print(f"[ Checksum changed for {download_path}. Redownloading]")
redownload = True
if self._from_google:
downloaded = download_from_google_drive(
self._url,
os.path.join(download_path, self._file_name),
redownload=redownload,
)
else:
downloaded = download(
self._url, download_path, self._file_name, redownload=redownload
)
# If download actually happened, then only checksum again and decompress
if downloaded:
self.checksum(download_path)
if self._compressed:
decompress(download_path, self._file_name, self._delete_original)
def built(path, version_string=None):
"""
Check if '.built' flag has been set for that task.
If a version_string is provided, this has to match, or the version
is regarded as not built.
Version_string are generally the dataset version + the date the file was
last updated. If this doesn't match, dataset will be mark not built. This makes
sure that if we update our features or anything else features are updated
for the end user.
"""
if version_string:
fname = os.path.join(path, ".built.json")
if not PathManager.isfile(fname):
return False
else:
with PathManager.open(fname, "r") as read:
text = json.load(read)
return text.get("version", None) == version_string
else:
return PathManager.isfile(os.path.join(path, ".built.json"))
def mark_done(path, version_string=None):
"""
Mark this path as prebuilt.
Marks the path as done by adding a '.built' file with the current timestamp
plus a version description string if specified.
Args:
path (str): The file path to mark as built
version_string (str): The version of this dataset
"""
data = {}
data["created_at"] = str(datetime.datetime.today())
data["version"] = version_string
with PathManager.open(os.path.join(path, ".built.json"), "w") as f:
json.dump(data, f)
def download(url, path, fname, redownload=True, disable_tqdm=False):
"""
Download file using `requests`.
If ``redownload`` is set to false, then will not download tar file again if it is
present (default ``True``).
Returns whether download actually happened or not
"""
outfile = os.path.join(path, fname)
download = not PathManager.isfile(outfile) or redownload
retry = 5
exp_backoff = [2**r for r in reversed(range(retry))]
pbar = None
if download:
# First test if the link is actually downloadable
check_header(url)
if not disable_tqdm:
print("[ Downloading: " + url + " to " + outfile + " ]")
pbar = tqdm.tqdm(
unit="B", unit_scale=True, desc=f"Downloading {fname}", disable=disable_tqdm
)
while download and retry >= 0:
resume_file = outfile + ".part"
resume = PathManager.isfile(resume_file)
if resume:
resume_pos = os.path.getsize(resume_file)
mode = "ab"
else:
resume_pos = 0
mode = "wb"
response = None
with requests.Session() as session:
try:
header = (
{"Range": "bytes=%d-" % resume_pos, "Accept-Encoding": "identity"}
if resume
else {}
)
response = session.get(url, stream=True, timeout=5, headers=header)
# negative reply could be 'none' or just missing
if resume and response.headers.get("Accept-Ranges", "none") == "none":
resume_pos = 0
mode = "wb"
CHUNK_SIZE = 32768
total_size = int(response.headers.get("Content-Length", -1))
# server returns remaining size if resuming, so adjust total
total_size += resume_pos
pbar.total = total_size
done = resume_pos
with PathManager.open(resume_file, mode) as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
if total_size > 0:
done += len(chunk)
if total_size < done:
# don't freak out if content-length was too small
total_size = done
pbar.total = total_size
pbar.update(len(chunk))
break
except (
requests.exceptions.ConnectionError,
requests.exceptions.ReadTimeout,
):
retry -= 1
pbar.clear()
if retry >= 0:
print("Connection error, retrying. (%d retries left)" % retry)
time.sleep(exp_backoff[retry])
else:
print("Retried too many times, stopped retrying.")
finally:
if response:
response.close()
if retry < 0:
raise RuntimeWarning("Connection broken too many times. Stopped retrying.")
if download and retry > 0:
pbar.update(done - pbar.n)
if done < total_size:
raise RuntimeWarning(
"Received less data than specified in "
+ "Content-Length header for "
+ url
+ ". There may be a download problem."
)
move(resume_file, outfile)
if pbar:
pbar.close()
return download
def check_header(url, from_google=False):
"""
Performs a HEAD request to check if the URL / Google Drive ID is live.
"""
session = requests.Session()
if from_google:
URL = "https://docs.google.com/uc?export=download"
response = session.head(URL, params={"id": url}, stream=True)
else:
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) "
+ "AppleWebKit/537.36 (KHTML, like Gecko) "
+ "Chrome/77.0.3865.90 Safari/537.36"
}
response = session.head(url, allow_redirects=True, headers=headers)
status = response.status_code
session.close()
assert status == 200, (
"The url {} is broken. If this is not your own url,"
+ " please open up an issue on GitHub"
).format(url)
def download_pretrained_model(model_name, *args, **kwargs):
import omegaconf
from mmf.utils.configuration import get_mmf_env, load_yaml
from omegaconf import OmegaConf
model_zoo = load_yaml(get_mmf_env(key="model_zoo"))
OmegaConf.set_struct(model_zoo, True)
OmegaConf.set_readonly(model_zoo, True)
data_dir = get_absolute_path(get_mmf_env("data_dir"))
model_data_dir = os.path.join(data_dir, "models")
download_path = os.path.join(model_data_dir, model_name)
try:
model_config = OmegaConf.select(model_zoo, model_name)
except omegaconf.errors.OmegaConfBaseException as e:
print(f"No such model name {model_name} defined in mmf zoo")
raise e
if "version" not in model_config or "resources" not in model_config:
# Version and Resources are not present time to try the defaults
try:
model_config = model_config.defaults
download_path = os.path.join(model_data_dir, model_name + ".defaults")
except omegaconf.errors.OmegaConfBaseException as e:
print(
f"Model name {model_name} doesn't specify 'resources' and 'version' "
"while no defaults have been provided"
)
raise e
# Download requirements if any specified by "zoo_requirements" field
# This can either be a list or a string
if "zoo_requirements" in model_config:
requirements = model_config.zoo_requirements
if isinstance(requirements, str):
requirements = [requirements]
for item in requirements:
download_pretrained_model(item, *args, **kwargs)
version = model_config.version
resources = model_config.resources
download_resources(resources, download_path, version)
return download_path
def download_resources(resources, download_path, version):
is_built = built(download_path, version_string=version)
if not is_built:
make_dir(download_path)
# Make it list if it isn't
if not isinstance(resources, collections.abc.Sequence):
resources = [resources]
if len(resources) == 0:
return
for resource in resources:
download_resource(resource, download_path)
mark_done(download_path, version_string=version)
def download_resource(resource, download_path):
if isinstance(resource, collections.abc.Mapping):
# Try building DownloadableFile class object from resource dict
resource = DownloadableFile(**resource)
assert isinstance(resource, DownloadableFile)
resource.download_file(download_path)
def make_dir(path):
"""
Make the directory and any nonexistent parent directories (`mkdir -p`).
"""
# the current working directory is a fine path
if path != "":
PathManager.mkdirs(path)
def move(path1, path2):
"""
Rename the given file.
"""
shutil.move(path1, path2)
def copy(path1, path2):
"""
Copy the given file from path1 to path2.
"""
shutil.copy(path1, path2)
def remove_dir(path):
"""
Remove the given directory, if it exists.
"""
shutil.rmtree(path, ignore_errors=True)
def decompress(path, fname, delete_original=True):
"""
Unpack the given archive file to the same directory.
Args:
path(str): The folder containing the archive. Will contain the contents.
fname (str): The filename of the archive file.
delete_original (bool, optional): If true, the archive will be deleted
after extraction. Default to True.
"""
print("Unpacking " + fname)
fullpath = os.path.join(path, fname)
shutil.unpack_archive(fullpath, path)
if delete_original:
os.remove(fullpath)
def _get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith("download_warning"):
return value
return None
def download_from_google_drive(gd_id, destination, redownload=True):
"""
Use the requests package to download a file from Google Drive.
"""
download = not PathManager.isfile(destination) or redownload
URL = "https://docs.google.com/uc?export=download"
if not download:
return download
else:
# Check first if link is live
check_header(gd_id, from_google=True)
with requests.Session() as session:
response = session.get(URL, params={"id": gd_id}, stream=True)
token = _get_confirm_token(response)
if token:
response.close()
params = {"id": gd_id, "confirm": token}
response = session.get(URL, params=params, stream=True)
CHUNK_SIZE = 32768
with PathManager.open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
response.close()
return download
def get_image_from_url(url):
response = requests.get(url)
img = np.array(Image.open(io.BytesIO(response.content)))
return img
| EXA-1-master | exa/models/mmf-main/mmf/utils/download.py |
# Copyright (c) Facebook, Inc. and its affiliates.
| EXA-1-master | exa/models/mmf-main/mmf/utils/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Any, List, Optional, Tuple
import numpy as np
import torch
import torchvision
from mmf.datasets.processors.frcnn_processor import img_tensorize
from mmf.utils.features.visualizing_image import SingleImageViz
from PIL import Image
def visualize_images(
images: List[Any], size: Optional[Tuple[int, int]] = (224, 224), *args, **kwargs
):
"""Visualize a set of images using torchvision's make grid function. Expects
PIL images which it will convert to tensor and optionally resize them. If resize is
not passed, it will only accept a list with single image
Args:
images (List[Any]): List of images to be visualized
size (Optional[Tuple[int, int]], optional): Size to which Images can be resized.
If not passed, the function will only accept list with single image.
Defaults to (224, 224).
"""
try:
import matplotlib.pyplot as plt
except ImportError:
print(
"Visualization tools require matplotlib. "
+ "Install using pip install matplotlib."
)
raise
transform_list = []
assert (
size is not None or len(images) == 1
), "If size is not passed, only one image can be visualized"
if size is not None:
transform_list.append(torchvision.transforms.Resize(size=size))
transform_list.append(torchvision.transforms.ToTensor())
transform = torchvision.transforms.Compose(transform_list)
img_tensors = torch.stack([transform(image) for image in images])
grid = torchvision.utils.make_grid(img_tensors, *args, **kwargs)
plt.axis("off")
plt.imshow(grid.permute(1, 2, 0))
def visualize_frcnn_features(
image_path: str, features_path: str, objids: List[str], attrids: List[str]
):
img = img_tensorize(image_path)
output_dict = np.load(features_path, allow_pickle=True).item()
frcnn_visualizer = SingleImageViz(img, id2obj=objids, id2attr=attrids)
frcnn_visualizer.draw_boxes(
output_dict.get("boxes"),
output_dict.pop("obj_ids"),
output_dict.pop("obj_probs"),
output_dict.pop("attr_ids"),
output_dict.pop("attr_probs"),
)
height, width, channels = img.shape
buffer = frcnn_visualizer._get_buffer()
array = np.uint8(np.clip(buffer, 0, 255))
image = Image.fromarray(array)
visualize_images([image], (height, width))
| EXA-1-master | exa/models/mmf-main/mmf/utils/visualize.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import collections
import collections.abc
import functools
import json
import logging
import os
import sys
import time
from functools import wraps
from typing import Any, Callable, Dict, Optional, Union
import torch
from mmf.common.registry import registry
from mmf.utils.configuration import get_mmf_env
from mmf.utils.distributed import get_rank, is_main, is_xla
from mmf.utils.file_io import PathManager
from mmf.utils.timer import Timer
from termcolor import colored
def setup_output_folder(folder_only: bool = False):
"""Sets up and returns the output file where the logs will be placed
based on the configuration passed. Usually "save_dir/logs/log_<timestamp>.txt".
If env.log_dir is passed, logs will be directly saved in this folder.
Args:
folder_only (bool, optional): If folder should be returned and not the file.
Defaults to False.
Returns:
str: folder or file path depending on folder_only flag
"""
save_dir = get_mmf_env(key="save_dir")
time_format = "%Y_%m_%dT%H_%M_%S"
log_filename = "train_"
log_filename += Timer().get_time_hhmmss(None, format=time_format)
log_filename += ".log"
log_folder = os.path.join(save_dir, "logs")
env_log_dir = get_mmf_env(key="log_dir")
if env_log_dir:
log_folder = env_log_dir
if not PathManager.exists(log_folder):
PathManager.mkdirs(log_folder)
if folder_only:
return log_folder
log_filename = os.path.join(log_folder, log_filename)
return log_filename
def setup_logger(
output: str = None,
color: bool = True,
name: str = "mmf",
disable: bool = False,
clear_handlers=True,
*args,
**kwargs,
):
"""
Initialize the MMF logger and set its verbosity level to "INFO".
Outside libraries shouldn't call this in case they have set there
own logging handlers and setup. If they do, and don't want to
clear handlers, pass clear_handlers options.
The initial version of this function was taken from D2 and adapted
for MMF.
Args:
output (str): a file name or a directory to save log.
If ends with ".txt" or ".log", assumed to be a file name.
Default: Saved to file <save_dir/logs/log_[timestamp].txt>
color (bool): If false, won't log colored logs. Default: true
name (str): the root module name of this logger. Defaults to "mmf".
clear_handlers (bool): If false, won't clear existing handlers.
Returns:
logging.Logger: a logger
"""
if disable:
return None
logger = logging.getLogger(name)
logger.propagate = False
logging.captureWarnings(True)
warnings_logger = logging.getLogger("py.warnings")
plain_formatter = logging.Formatter(
"%(asctime)s | %(levelname)s | %(name)s : %(message)s",
datefmt="%Y-%m-%dT%H:%M:%S",
)
distributed_rank = get_rank()
handlers = []
config = registry.get("config")
if config:
logging_level = config.get("training", {}).get("logger_level", "info").upper()
else:
logging_level = logging.INFO
if distributed_rank == 0:
logger.setLevel(logging_level)
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging_level)
if color:
formatter = ColorfulFormatter(
colored("%(asctime)s | %(name)s: ", "green") + "%(message)s",
datefmt="%Y-%m-%dT%H:%M:%S",
)
else:
formatter = plain_formatter
ch.setFormatter(formatter)
logger.addHandler(ch)
warnings_logger.addHandler(ch)
handlers.append(ch)
# file logging: all workers
if output is None:
output = setup_output_folder()
if output is not None:
if output.endswith(".txt") or output.endswith(".log"):
filename = output
else:
filename = os.path.join(output, "train.log")
if distributed_rank > 0:
filename = filename + f".rank{distributed_rank}"
PathManager.mkdirs(os.path.dirname(filename))
fh = logging.StreamHandler(_cached_log_stream(filename))
fh.setLevel(logging_level)
fh.setFormatter(plain_formatter)
logger.addHandler(fh)
warnings_logger.addHandler(fh)
handlers.append(fh)
# Slurm/FB output, only log the main process
if "train.log" not in filename and distributed_rank == 0:
save_dir = get_mmf_env(key="save_dir")
filename = os.path.join(save_dir, "train.log")
sh = logging.StreamHandler(_cached_log_stream(filename))
sh.setLevel(logging_level)
sh.setFormatter(plain_formatter)
logger.addHandler(sh)
warnings_logger.addHandler(sh)
handlers.append(sh)
logger.info(f"Logging to: {filename}")
# Remove existing handlers to add MMF specific handlers
if clear_handlers:
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
# Now, add our handlers.
logging.basicConfig(level=logging_level, handlers=handlers)
registry.register("writer", logger)
return logger
def setup_very_basic_config(color=True):
plain_formatter = logging.Formatter(
"%(asctime)s | %(levelname)s | %(name)s : %(message)s",
datefmt="%Y-%m-%dT%H:%M:%S",
)
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.INFO)
if color:
formatter = ColorfulFormatter(
colored("%(asctime)s | %(name)s: ", "green") + "%(message)s",
datefmt="%Y-%m-%dT%H:%M:%S",
)
else:
formatter = plain_formatter
ch.setFormatter(formatter)
# Setup a minimal configuration for logging in case something tries to
# log a message even before logging is setup by MMF.
logging.basicConfig(level=logging.INFO, handlers=[ch])
# cache the opened file object, so that different calls to `setup_logger`
# with the same file name can safely write to the same file.
@functools.lru_cache(maxsize=None)
def _cached_log_stream(filename):
return PathManager.open(filename, "a")
def _find_caller():
"""
Returns:
str: module name of the caller
tuple: a hashable key to be used to identify different callers
"""
frame = sys._getframe(2)
while frame:
code = frame.f_code
if os.path.join("utils", "logger.") not in code.co_filename:
mod_name = frame.f_globals["__name__"]
if mod_name == "__main__":
mod_name = "mmf"
return mod_name, (code.co_filename, frame.f_lineno, code.co_name)
frame = frame.f_back
def summarize_report(
current_iteration,
num_updates,
max_updates,
meter,
should_print=True,
extra=None,
tb_writer=None,
wandb_logger=None,
):
if extra is None:
extra = {}
if not is_main() and not is_xla():
return
# Log the learning rate if available
if wandb_logger and "lr" in extra:
wandb_logger.log_metrics(
{"train/learning_rate": float(extra["lr"])}, commit=False
)
if tb_writer:
scalar_dict = meter.get_scalar_dict()
tb_writer.add_scalars(scalar_dict, current_iteration)
if wandb_logger:
metrics = meter.get_scalar_dict()
wandb_logger.log_metrics({**metrics, "trainer/global_step": current_iteration})
if not should_print:
return
log_dict = {}
if num_updates is not None and max_updates is not None:
log_dict.update({"progress": f"{num_updates}/{max_updates}"})
log_dict.update(meter.get_log_dict())
log_dict.update(extra)
log_progress(log_dict)
def calculate_time_left(
max_updates,
num_updates,
timer,
num_snapshot_iterations,
log_interval,
eval_interval,
):
if num_updates is None or max_updates is None:
return "Unknown"
time_taken_for_log = time.time() * 1000 - timer.start
iterations_left = max_updates - num_updates
num_logs_left = iterations_left / log_interval
time_left = num_logs_left * time_taken_for_log
snapshot_iteration = num_snapshot_iterations / log_interval
if eval_interval:
snapshot_iteration *= iterations_left / eval_interval
time_left += snapshot_iteration * time_taken_for_log
return timer.get_time_hhmmss(gap=time_left)
def log_progress(info: Union[Dict, Any], log_format="simple"):
"""Useful for logging progress dict.
Args:
info (dict|any): If dict, will be logged as key value pair. Otherwise,
it will be logged directly.
log_format (str, optional): json|simple. Defaults to "simple".
Will use simple mode.
"""
caller, key = _find_caller()
logger = logging.getLogger(caller)
if not isinstance(info, collections.abc.Mapping):
logger.info(info)
if log_format == "simple":
config = registry.get("config")
if config:
log_format = config.training.log_format
if log_format == "simple":
output = ", ".join([f"{key}: {value}" for key, value in info.items()])
elif log_format == "json":
output = json.dumps(info)
else:
output = str(info)
logger.info(output)
def log_class_usage(component_type, klass):
"""This function is used to log the usage of different MMF components."""
identifier = "MMF"
if klass and hasattr(klass, "__name__"):
identifier += f".{component_type}.{klass.__name__}"
torch._C._log_api_usage_once(identifier)
def skip_if_tensorboard_inactive(fn: Callable) -> Callable:
"""
Checks whether summary writer is initialized and rank is 0 (main)
Args:
fn (Callable): Function which should be called based on whether
tensorboard should log or not
"""
@wraps(fn)
def wrapped_fn(self, *args: Any, **kwargs: Any) -> Optional[Any]:
if self.summary_writer is None or not self._is_main:
return None
else:
return fn(self, *args, **kwargs)
return wrapped_fn
# ColorfulFormatter is adopted from Detectron2 and adapted for MMF
class ColorfulFormatter(logging.Formatter):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def formatMessage(self, record):
log = super().formatMessage(record)
if record.levelno == logging.WARNING:
prefix = colored("WARNING", "red", attrs=["blink"])
elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL:
prefix = colored("ERROR", "red", attrs=["blink", "underline"])
else:
return log
return prefix + " " + log
class TensorboardLogger:
def __init__(self, log_folder="./logs", iteration=0):
self._summary_writer = None
self._is_main = is_main()
self.timer = Timer()
self.log_folder = log_folder
self.time_format = "%Y-%m-%dT%H:%M:%S"
current_time = self.timer.get_time_hhmmss(None, format=self.time_format)
self.tensorboard_folder = os.path.join(
self.log_folder, f"tensorboard_{current_time}"
)
@property
def summary_writer(self):
# Only on rank zero
if not self._is_main:
return None
if self._summary_writer is None:
# This would handle warning of missing tensorboard
from torch.utils.tensorboard import SummaryWriter
self._summary_writer = SummaryWriter(self.tensorboard_folder)
return self._summary_writer
@skip_if_tensorboard_inactive
def close(self):
"""
Closes the tensorboard summary writer.
"""
self.summary_writer.close()
@skip_if_tensorboard_inactive
def add_scalar(self, key, value, iteration):
self.summary_writer.add_scalar(key, value, iteration)
@skip_if_tensorboard_inactive
def add_scalars(self, scalar_dict, iteration):
for key, val in scalar_dict.items():
self.summary_writer.add_scalar(key, val, iteration)
@skip_if_tensorboard_inactive
def add_histogram_for_model(self, model, iteration):
for name, param in model.named_parameters():
np_param = param.clone().cpu().data.numpy()
self.summary_writer.add_histogram(name, np_param, iteration)
class WandbLogger:
r"""
Log using `Weights and Biases`.
Args:
entity: An entity is a username or team name where you're sending runs.
config: Configuration for the run.
project: Name of the W&B project.
Raises:
ImportError: If wandb package is not installed.
"""
def __init__(
self,
entity: Optional[str] = None,
config: Optional[Dict] = None,
project: Optional[str] = None,
):
try:
import wandb
except ImportError:
raise ImportError(
"To use the Weights and Biases Logger please install wandb."
"Run `pip install wandb` to install it."
)
self._wandb = wandb
self._wandb_init = dict(entity=entity, config=config, project=project)
wandb_kwargs = dict(config.training.wandb)
wandb_kwargs.pop("enabled")
wandb_kwargs.pop("entity")
wandb_kwargs.pop("project")
wandb_kwargs.pop("log_checkpoint")
self._wandb_init.update(**wandb_kwargs)
self.setup()
def setup(self):
"""
Setup `Weights and Biases` for logging.
"""
if is_main():
if self._wandb.run is None:
self._wandb.init(**self._wandb_init)
# define default x-axis (for latest wandb versions)
if getattr(self._wandb, "define_metric", None):
self._wandb.define_metric("trainer/global_step")
self._wandb.define_metric(
"*", step_metric="trainer/global_step", step_sync=True
)
def __del__(self):
if getattr(self, "_wandb", None) is not None:
self._wandb.finish()
def _should_log_wandb(self):
if self._wandb is None or not is_main():
return False
else:
return True
def log_metrics(self, metrics: Dict[str, float], commit=True):
"""
Log the monitored metrics to the wand dashboard.
Args:
metrics (Dict[str, float]): A dictionary of metrics to log.
commit (bool): Save the metrics dict to the wandb server and
increment the step. (default: True)
"""
if not self._should_log_wandb():
return
self._wandb.log(metrics, commit=commit)
def log_model_checkpoint(self, model_path):
"""
Log the model checkpoint to the wandb dashboard.
Args:
model_path (str): Path to the model file.
"""
if not self._should_log_wandb():
return
model_artifact = self._wandb.Artifact(
"run_" + self._wandb.run.id + "_model", type="model"
)
model_artifact.add_file(model_path, name="current.pt")
self._wandb.log_artifact(model_artifact, aliases=["latest"])
| EXA-1-master | exa/models/mmf-main/mmf/utils/logger.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
from typing import Any, Dict, Tuple
import torch
from mmf.common.registry import registry
logger = logging.getLogger(__name__)
def is_pl_model_checkpoint(checkpoint):
return "state_dict" in checkpoint
def is_pl_trainer_checkpoint(checkpoint):
return "pytorch-lightning_version" in checkpoint
def is_model_only_checkpoint(checkpoint):
if is_pl_trainer_checkpoint(checkpoint):
return "state_dict" not in checkpoint
else:
return "model" not in checkpoint
def _format_state_key(model: torch.nn.Module, attr: str):
if hasattr(model, "format_state_key"):
formatted_attr = model.format_state_key(attr)
else:
formatted_attr = attr
return formatted_attr
def _should_skip_if_mismatch(
shape1: Tuple[str, torch.Size],
shape2: Tuple[str, torch.Size],
config: Dict[str, Any],
) -> None:
if shape1[1] != shape2[1]:
message = f"""
Modules {shape1[0]} and {shape2[0]} don't have the same shape:
own_attr has shape {shape1[1]} while
attr has shape {shape2[1]}. This can fail down the line.
"""
if config.checkpoint.get("bypass_shape_mismatch", False):
message += "bypass_shape_mismatch in config.checkpoint "
message += "is set to be True, -- so skipping copy"
logger.warning(message)
return True
else:
logger.warning(message)
# In case of either mismatch or match both, MMF will try
# to copy the attribute.
return False
def get_pretrained_state_mapping_checkpoint(
checkpoint: Dict[str, Any], model: torch.nn.Module, config: Dict[str, Any]
) -> Dict[str, Any]:
"""
This function gets the checkpoint keys that exists in pretrained state mapping
that also exist in model's state, and returns a dictionary with the value from the
`checkpoint` dict.
"""
mapping = config.checkpoint.pretrained_state_mapping
own_state = model.state_dict()
tmp_checkpoint = dict(checkpoint)
ckpt_update_dict = dict()
for key, value in mapping.items():
key += "."
value += "."
for attr in tmp_checkpoint:
formatted_attr = _format_state_key(model, attr)
for own_attr in own_state:
if (
key in own_attr
and value in formatted_attr
and own_attr.replace(key, "") == formatted_attr.replace(value, "")
):
if _should_skip_if_mismatch(
(own_attr, own_state[own_attr].shape),
(attr, checkpoint[attr].shape),
config,
):
continue
ckpt_update_dict[own_attr] = attr
return ckpt_update_dict
def remove_keys_inplace(ckpt: Dict[str, Any], keys_to_remove):
tmp_keys = dict(ckpt)
for key in tmp_keys:
if key in keys_to_remove:
ckpt.pop(key)
class MMFToPLCheckpointUpdater:
def __init__(self):
pass
def update_checkpoint(
self, checkpoint: Dict[str, Any], model: torch.nn.Module
) -> None:
r"""
This function should only be called on lightning. It handles checkpoint
update that is being called by LightningModule's `on_load_checkpoint`,
which should update the checkpoint to the format desired. The logic
contains two parts, when checkpoint is a model only checkpoint and
when checkpoint is a trainer checkpoint. This function applies the checkpoint
update in place.
If the checkpoint is a model only checkpoint:
1. If it is an mmf checkpoint, convert to lightning format
putting it inside a "state_dict" key
2. Apply the model's format state key to give the model a chance to update
3. If config.checkpoint.pretrained_state_mapping is True, apply
the mapping speicified in the config, and remove the keys that exist
in the checkpoint that do not exist in the mapping.
The updated checkpoint should be of the format: {"state_dict": ckpts}, where
ckpts should be the model state_dict.
If the checkpoint is a trainer only checkpoint:
1. do the above steps for model checkpoint update
2. do the checkpoint trainer state update from mmf to lightning
The updated checkpoint should be of the format: {
`epoch`: x,
`global_step`: x,
`pytorch-lightning_version`: x,
`state_dict`: x,
`callbacks`: x,
`optimizer_states`: [x],
`lr_schedulers`: [x],
}
"""
if is_model_only_checkpoint(checkpoint):
self._update_model_checkpoint(checkpoint=checkpoint, model=model)
return
# this assumes the checkpoint is trainer only
if not is_pl_trainer_checkpoint(checkpoint):
self._update_trainer_checkpoint_from_mmf(checkpoint=checkpoint, model=model)
def _update_trainer_checkpoint_from_mmf(
self, checkpoint: Dict[str, Any], model: Any
) -> None:
"""updates checkpoint from the mmf format to lightning format.
mmf checkpoint is with keys:
`model`, `optimizer`, `best_iteration`, `current_iteration`, `current_epoch`, ,
`num_updates`, `best_update`, `best_metric_value`, `fp16_scaler`, `config`, ,
`lr_scheduler`, `git/branch`, `git/commit_hash`, `git/commit_author`,
`git/commit_message`, `git/diff`
"""
remove_keys_inplace(
checkpoint,
{
"best_iteration",
"current_iteration",
"best_update",
"best_metric_value",
"fp16_scaler",
"config",
"git/branch",
"git/commit_hash",
"git/commit_author",
"git/commit_message",
"git/diff",
},
)
# update model
if "model" in checkpoint:
model_checkpoint = checkpoint.pop("model")
checkpoint["state_dict"] = model_checkpoint
self._update_model_format_state_keys(checkpoint["state_dict"], model=model)
config = registry.get("config")
if config.checkpoint.get("resume_pretrained", False):
self._update_pretrained_state_mapping(
checkpoint=checkpoint["state_dict"], model=model, config=config
)
# update trainer progress
if "optimizer" in checkpoint:
optimizer = checkpoint.pop("optimizer")
checkpoint["optimizer_states"] = [optimizer]
if "lr_scheduler" in checkpoint:
lr_scheduler = checkpoint.pop("lr_scheduler")
checkpoint["lr_schedulers"] = [lr_scheduler]
else:
# we need to set this if it is not specified bc lightning expects
# lr_schedulers to be present to resume checkpoint while in mmf, it is
# not guranteed that lr_schedulers are used and saved in the checkpoint.
checkpoint["lr_schedulers"] = []
if "num_updates" in checkpoint:
global_step = checkpoint.pop("num_updates")
checkpoint["global_step"] = global_step
if "current_epoch" in checkpoint:
epoch = checkpoint.pop("current_epoch")
checkpoint["epoch"] = epoch
def _update_model_checkpoint(
self, checkpoint: Dict[str, Any], model: torch.nn.Module
) -> None:
"""
This function assumes the checkpoint is just the model and does not include
training params.
"""
if not is_pl_model_checkpoint(checkpoint):
self._update_model_checkpoint_from_mmf(checkpoint)
# this assumes that model_checkpoint here is the lightning format
self._update_model_format_state_keys(checkpoint["state_dict"], model=model)
config = registry.get("config")
if config.checkpoint.get("resume_pretrained", False):
self._update_pretrained_state_mapping(
checkpoint=checkpoint["state_dict"], model=model, config=config
)
def _update_pretrained_state_mapping(
self, checkpoint: Dict[str, Any], model: torch.nn.Module, config: Dict[str, Any]
) -> None:
"""
This function removes all checkpoint keys that do not exist in
the `pretrained_state_mapping`
"""
ckpt_update_dict = get_pretrained_state_mapping_checkpoint(
checkpoint=checkpoint, model=model, config=config
)
accepted_keys = set()
for own_attr, attr in ckpt_update_dict.items():
assert own_attr == attr, (
"Since `_update_model_format_state_keys` was run ",
"before, this has to be held true",
)
logger.info("Copying " + own_attr + " from " + attr)
accepted_keys.add(attr)
# keep only the checkpoint keys that exist in the `pretrained_state_mapping`
tmp_checkpoint = dict(checkpoint)
for key in tmp_checkpoint:
if key not in accepted_keys:
checkpoint.pop(key)
def _update_model_format_state_keys(
self, checkpoint: Dict[str, Any], model: torch.nn.Module
) -> None:
"""
Function to rewrite the checkpoint in place to give the model a chance
to update state_dict keys. This assumes that checkpoint is the
model's state_dict.
"""
tmp_state_dict = dict(checkpoint)
for attr in tmp_state_dict:
new_attr = _format_state_key(model, attr)
if attr != new_attr:
logger.info(f"checkpoint: rewriting {attr} into {new_attr}")
value = checkpoint.pop(attr)
checkpoint[new_attr] = value
def _update_model_checkpoint_from_mmf(self, checkpoint: Dict[str, Any]) -> None:
tmp_checkpoint = dict(checkpoint)
checkpoint.clear()
checkpoint["state_dict"] = tmp_checkpoint
| EXA-1-master | exa/models/mmf-main/mmf/utils/checkpoint_updater.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import warnings
from typing import List
import torch
from mmf.common.sample import Sample
from omegaconf import DictConfig
def build_bbox_tensors(infos, max_length):
num_bbox = min(max_length, len(infos))
# After num_bbox, everything else should be zero
coord_tensor = torch.zeros((max_length, 4), dtype=torch.float)
width_tensor = torch.zeros(max_length, dtype=torch.float)
height_tensor = torch.zeros(max_length, dtype=torch.float)
bbox_types = ["xyxy"] * max_length
infos = infos[:num_bbox]
sample = Sample()
for idx, info in enumerate(infos):
bbox = info["bounding_box"]
x = bbox.get("top_left_x", bbox["topLeftX"])
y = bbox.get("top_left_y", bbox["topLeftY"])
width = bbox["width"]
height = bbox["height"]
coord_tensor[idx][0] = x
coord_tensor[idx][1] = y
coord_tensor[idx][2] = x + width
coord_tensor[idx][3] = y + height
width_tensor[idx] = width
height_tensor[idx] = height
sample.coordinates = coord_tensor
sample.width = width_tensor
sample.height = height_tensor
sample.bbox_types = bbox_types
return sample
def build_dataset_from_multiple_imdbs(config, dataset_cls, dataset_type):
from mmf.datasets.concat_dataset import MMFConcatDataset
if dataset_type not in config.imdb_files:
warnings.warn(
"Dataset type {} is not present in "
"imdb_files of dataset config. Returning None. "
"This dataset won't be used.".format(dataset_type)
)
return None
imdb_files = config["imdb_files"][dataset_type]
datasets = []
for imdb_idx in range(len(imdb_files)):
dataset = dataset_cls(dataset_type, imdb_idx, config)
datasets.append(dataset)
dataset = MMFConcatDataset(datasets)
return dataset
def dataset_list_from_config(config: DictConfig) -> List[str]:
if "datasets" not in config:
warnings.warn("No datasets attribute present. Setting default to vqa2.")
datasets = "vqa2"
else:
datasets = config.datasets
if type(datasets) == str:
datasets = list(map(lambda x: x.strip(), datasets.split(",")))
return datasets
| EXA-1-master | exa/models/mmf-main/mmf/utils/dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Inspired from maskrcnn_benchmark, fairseq
import contextlib
import logging
import os
import pickle
import socket
import subprocess
import warnings
from itertools import chain
import torch
from mmf.common.registry import registry
from torch import distributed as dist
try:
import torch_xla.core.xla_model as xm
except ImportError:
xm = None
MAX_SIZE_LIMIT = 65533
BYTE_SIZE = 256
logger = logging.getLogger(__name__)
# copied from https://github.com/facebookresearch/vissl/blob/master/vissl/utils/
# distributed_gradients.py
class GatherLayer(torch.autograd.Function):
"""
Gather tensors from all workers with support for backward propagation:
This implementation does not cut the gradients as torch.distributed.all_gather does.
"""
@staticmethod
def forward(ctx, x):
output = [torch.zeros_like(x) for _ in range(dist.get_world_size())]
dist.all_gather(output, x)
return tuple(output)
@staticmethod
def backward(ctx, *grads):
all_gradients = torch.stack(grads)
dist.all_reduce(all_gradients)
return all_gradients[dist.get_rank()]
class XLAGatherLayer(torch.autograd.Function):
"""
Gather tensors from all TPU workers with support for backward propagation.
"""
@staticmethod
def forward(ctx, x, dim):
ctx.dim = dim
tensor_list = xm.all_gather(x.unsqueeze(dim), dim=dim)
return tensor_list
@staticmethod
def backward(ctx, grad_output):
dim = ctx.dim
all_grad_output = xm.all_reduce(xm.REDUCE_SUM, grad_output)
return all_grad_output.select(dim, xm.get_ordinal()), None
def synchronize(message="sync-workers"):
if is_xla():
xm.rendezvous(message)
elif not dist.is_available():
return
if not dist.is_nccl_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def is_xla():
# Cover none case as well
return not (not registry.get("is_xla", no_warning=True))
def get_rank():
if is_xla():
return xm.get_ordinal()
if not dist.is_available():
return 0
if not dist.is_nccl_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def is_main():
return is_master()
def is_master():
return get_rank() == 0
def is_dist_initialized():
return dist.is_available() and dist.is_initialized()
def get_world_size():
if is_xla():
return xm.xrt_world_size()
if not dist.is_available():
return 1
if not dist.is_nccl_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def broadcast_tensor(tensor, src=0):
world_size = get_world_size()
if world_size < 2:
return tensor
with torch.no_grad():
if is_xla():
tensor = xm.all_to_all(
tensor.repeat([world_size] + [1] * tensor.dim()),
split_dimension=0,
concat_dimension=0,
split_count=world_size,
)[src]
else:
dist.broadcast(tensor, src=0)
return tensor
def broadcast_scalar(scalar, src=0, device="cpu"):
if get_world_size() < 2:
return scalar
scalar_tensor = torch.tensor(scalar).long().to(device)
scalar_tensor = broadcast_tensor(scalar_tensor, src)
return scalar_tensor.item()
def reduce_tensor(tensor):
world_size = get_world_size()
if world_size < 2:
return tensor
with torch.no_grad():
dist.reduce(tensor, dst=0)
if dist.get_rank() == 0:
tensor = tensor.div(world_size)
return tensor
def gather_tensor(tensor):
world_size = get_world_size()
if world_size < 2:
return tensor
with torch.no_grad():
tensor_list = []
if is_xla():
tensor_list = xm.all_gather(tensor)
tensor_list = tensor_list.view(world_size, *tensor.size())
else:
for _ in range(world_size):
tensor_list.append(torch.zeros_like(tensor))
dist.all_gather(tensor_list, tensor)
tensor_list = torch.stack(tensor_list, dim=0)
return tensor_list
def gather_tensor_along_batch(tensor, dim=0):
world_size = get_world_size()
if world_size < 2:
return tensor
with torch.no_grad():
tensor_list = []
for _ in range(world_size):
tensor_list.append(torch.zeros_like(tensor))
dist.all_gather(tensor_list, tensor)
tensor_list = torch.cat(tensor_list, dim=dim)
return tensor_list
def gather_tensor_along_batch_with_backward(tensor, dim=0):
world_size = get_world_size()
if world_size < 2:
return tensor
if is_xla():
tensor_list = XLAGatherLayer.apply(tensor, dim)
tensor_list = tensor_list.flatten(start_dim=dim, end_dim=dim + 1)
else:
tensor_list = GatherLayer.apply(tensor)
tensor_list = torch.cat(tensor_list, dim=dim)
return tensor_list
def reduce_dict(dictionary):
world_size = get_world_size()
if world_size < 2:
return dictionary
with torch.no_grad():
if len(dictionary) == 0:
return dictionary
keys, values = zip(*sorted(dictionary.items()))
values = torch.stack(values, dim=0)
if is_xla():
values = xm.all_reduce("sum", [values], scale=1.0 / world_size)[0]
else:
dist.reduce(values, dst=0)
if dist.get_rank() == 0:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(keys, values)}
return reduced_dict
# Object byte tensor utilities have been adopted from
# https://github.com/pytorch/fairseq/blob/main/fairseq/distributed_utils.py
def object_to_byte_tensor(obj, max_size=4094):
"""
Encode Python objects to PyTorch byte tensors
"""
assert max_size <= MAX_SIZE_LIMIT
byte_tensor = torch.zeros(max_size, dtype=torch.uint8)
obj_enc = pickle.dumps(obj)
obj_size = len(obj_enc)
if obj_size > max_size:
raise Exception(
f"objects too large: object size {obj_size}, max size {max_size}"
)
byte_tensor[0] = obj_size // 256
byte_tensor[1] = obj_size % 256
byte_tensor[2 : 2 + obj_size] = torch.ByteTensor(list(obj_enc))
return byte_tensor
def byte_tensor_to_object(byte_tensor, max_size=MAX_SIZE_LIMIT):
"""
Decode PyTorch byte tensors to Python objects
"""
assert max_size <= MAX_SIZE_LIMIT
obj_size = byte_tensor[0].item() * 256 + byte_tensor[1].item()
obj_enc = bytes(byte_tensor[2 : 2 + obj_size].tolist())
obj = pickle.loads(obj_enc)
return obj
def infer_init_method(config):
if config.distributed.init_method is not None:
return
registry.register("is_xla", config.training.get("device", "cuda") == "xla")
# support torch.distributed.launch
if all(
key in os.environ
for key in ["MASTER_ADDR", "MASTER_PORT", "WORLD_SIZE", "RANK"]
):
config.distributed.init_method = "env://"
config.distributed.world_size = int(os.environ["WORLD_SIZE"])
config.distributed.rank = int(os.environ["RANK"])
config.distributed.no_spawn = True
# we can determine the init method automatically for Slurm
elif config.distributed.port > 0:
node_list = os.environ.get("SLURM_STEP_NODELIST")
if node_list is None:
node_list = os.environ.get("SLURM_JOB_NODELIST")
if node_list is not None:
try:
hostnames = subprocess.check_output(
["scontrol", "show", "hostnames", node_list]
)
config.distributed.init_method = "tcp://{host}:{port}".format(
host=hostnames.split()[0].decode("utf-8"),
port=config.distributed.port,
)
nnodes = int(os.environ.get("SLURM_NNODES"))
ntasks_per_node = os.environ.get("SLURM_NTASKS_PER_NODE")
if ntasks_per_node is not None:
ntasks_per_node = int(ntasks_per_node)
else:
ntasks = int(os.environ.get("SLURM_NTASKS"))
nnodes = int(os.environ.get("SLURM_NNODES"))
assert ntasks % nnodes == 0
ntasks_per_node = int(ntasks / nnodes)
if ntasks_per_node == 1:
assert config.distributed.world_size % nnodes == 0
gpus_per_node = config.distributed.world_size // nnodes
node_id = int(os.environ.get("SLURM_NODEID"))
config.distributed.rank = node_id * gpus_per_node
else:
assert ntasks_per_node == config.distributed.world_size // nnodes
config.distributed.no_spawn = True
config.distributed.rank = int(os.environ.get("SLURM_PROCID"))
config.device_id = int(os.environ.get("SLURM_LOCALID"))
except subprocess.CalledProcessError as e: # scontrol failed
raise e
except FileNotFoundError: # Slurm is not installed
pass
def distributed_init(config):
if config.distributed.world_size == 1:
raise ValueError("Cannot initialize distributed with distributed_world_size=1")
logger.info(f"XLA Mode:{is_xla()}")
if is_xla():
config.device_id = xm.get_local_ordinal()
config.distributed.rank = xm.get_ordinal()
elif dist.is_initialized():
warnings.warn("Distributed is already initialized, cannot initialize twice!")
config.distributed.rank = dist.get_rank()
else:
logger.info(
f"Distributed Init (Rank {config.distributed.rank}): "
f"{config.distributed.init_method}"
)
nccl_config = config.distributed.get("nccl", {})
if nccl_config.get("nsocks_perthread", None):
os.environ["NCCL_NSOCKS_PERTHREAD"] = str(nccl_config["nsocks_perthread"])
logger.info(f"NCCL_NSOCKS_PERTHREAD: {os.environ['NCCL_NSOCKS_PERTHREAD']}")
if nccl_config.get("socket_nthreads", None):
os.environ["NCCL_SOCKET_NTHREADS"] = str(nccl_config["socket_nthreads"])
logger.info(f"NCCL_SOCKET_NTHREADS: {os.environ['NCCL_SOCKET_NTHREADS']}")
dist.init_process_group(
backend=config.distributed.backend,
init_method=config.distributed.init_method,
world_size=config.distributed.world_size,
rank=config.distributed.rank,
)
logger.info(
f"Initialized Host {socket.gethostname()} as Rank "
f"{config.distributed.rank}"
)
if "MASTER_ADDR" not in os.environ or "MASTER_PORT" not in os.environ:
# Set for onboxdataloader support
split = config.distributed.init_method.split("//")
assert len(split) == 2, (
"host url for distributed should be split by '//' "
+ "into exactly two elements"
)
split = split[1].split(":")
assert (
len(split) == 2
), "host url should be of the form <host_url>:<host_port>"
os.environ["MASTER_ADDR"] = split[0]
os.environ["MASTER_PORT"] = split[1]
# perform a dummy all-reduce to initialize the NCCL communicator
dist.all_reduce(torch.zeros(1).cuda())
suppress_output(is_main())
config.distributed.rank = dist.get_rank()
return config.distributed.rank
def suppress_output(is_main):
"""Suppress printing on the current device. Force printing with `force=True`."""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop("force", False)
if is_main or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
import warnings
builtin_warn = warnings.warn
def warn(*args, **kwargs):
force = kwargs.pop("force", False)
if is_main or force:
builtin_warn(*args, **kwargs)
# Log warnings only once
warnings.warn = warn
warnings.simplefilter("once", UserWarning)
def open_if_master(path, mode):
from mmf.utils.file_io import PathManager
if is_main():
return PathManager.open(path, mode)
else:
return contextlib.nullcontext()
def open_if_main(*args):
return open_if_master(*args)
def broadcast_xla_master_model_param(model):
logger.info("Broadcasting XLA model parameters and buffers from master process ...")
parameters_and_buffers = []
for p in chain(model.parameters(), model.buffers()):
# Set all params in non-master devices to zero so that all_reduce is equivalent
# to broadcasting parameters from master to other devices.
if not is_main():
zero = torch.tensor(0, dtype=p.data.dtype, device=p.data.device)
p.data.mul_(zero)
parameters_and_buffers.append(p.data)
xm.wait_device_ops()
xm.all_reduce(xm.REDUCE_SUM, parameters_and_buffers)
xm.mark_step()
xm.rendezvous("mmf.trainers.core.device.broadcast_xla_master_model_param")
logger.info("Done!")
| EXA-1-master | exa/models/mmf-main/mmf/utils/distributed.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from torch import Tensor
def transform_to_batch_sequence(tensor: Tensor) -> Tensor:
if len(tensor.size()) == 2:
return tensor
else:
assert len(tensor.size()) == 3
return tensor.contiguous().view(-1, tensor.size(-1))
def transform_to_batch_sequence_dim(tensor: Tensor) -> Tensor:
if len(tensor.size()) == 3:
return tensor
else:
assert len(tensor.size()) == 4
return tensor.contiguous().view(-1, tensor.size(-2), tensor.size(-1))
| EXA-1-master | exa/models/mmf-main/mmf/utils/transform.py |
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Text utils module contains implementations for various decoding strategies like
Greedy, Beam Search and Nucleus Sampling.
In your model's config you can specify ``inference`` attribute to use these strategies
in the following way:
.. code::
model_config:
some_model:
inference:
- type: greedy
- params: {}
"""
import os
import re
from collections import Counter
from itertools import chain
import torch
from mmf.common.registry import registry
from mmf.utils.file_io import PathManager
from mmf.utils.general import get_absolute_path
SENTENCE_SPLIT_REGEX = re.compile(r"(\W+)")
def generate_ngrams(tokens, n=1):
"""Generate ngrams for particular 'n' from a list of tokens
Args:
tokens (List[str]): List of tokens for which the ngram are to be generated
n (int, optional): n for which ngrams are to be generated. Defaults to 1.
Returns:
List[str]: List of ngrams generated.
"""
shifted_tokens = (tokens[i:] for i in range(n))
tuple_ngrams = zip(*shifted_tokens)
return (" ".join(i) for i in tuple_ngrams)
def generate_ngrams_range(tokens, ngram_range=(1, 3)):
"""Generates and returns a list of ngrams for all n present in ngram_range
Args:
tokens (List[str]): List of string tokens for which ngram are to be generated
ngram_range (List[int], optional): List of 'n' for which ngrams are to be
generated. For e.g. if ngram_range = (1, 4) then it will returns
1grams, 2grams and 3grams. Defaults to (1, 3).
Returns:
List[str]: List of ngrams for each n in ngram_range
"""
assert len(ngram_range) == 2, (
"'ngram_range' should be a tuple" " of two elements which is range of numbers"
)
return chain(*(generate_ngrams(tokens, i) for i in range(*ngram_range)))
def tokenize(sentence, regex=SENTENCE_SPLIT_REGEX, keep=None, remove=None):
if keep is None:
keep = ["'s"]
if remove is None:
remove = [",", "?"]
sentence = sentence.lower()
for token in keep:
sentence = sentence.replace(token, " " + token)
for token in remove:
sentence = sentence.replace(token, "")
tokens = regex.split(sentence)
tokens = [t.strip() for t in tokens if len(t.strip()) > 0]
return tokens
def word_tokenize(word, remove=None):
if remove is None:
remove = [",", "?"]
word = word.lower()
for item in remove:
word = word.replace(item, "")
word = word.replace("'s", " 's")
return word.strip()
def load_str_list(fname):
with PathManager.open(fname) as f:
lines = f.readlines()
lines = [line.strip() for line in lines]
return lines
class VocabDict:
UNK_TOKEN = "<unk>"
PAD_TOKEN = "<pad>"
START_TOKEN = "<s>"
END_TOKEN = "</s>"
PAD_INDEX = 0
SOS_INDEX = 1
EOS_INDEX = 2
UNK_INDEX = 3
def __init__(self, vocab_file, data_dir=None):
if not PathManager.exists(vocab_file) and data_dir is not None:
vocab_file = get_absolute_path(os.path.join(data_dir, vocab_file))
if not PathManager.exists(vocab_file):
raise RuntimeError(f"Vocab file {vocab_file} for vocab dict doesn't exist")
self.word_list = load_str_list(vocab_file)
self._build()
def _build(self):
if self.UNK_TOKEN not in self.word_list:
self.word_list = [self.UNK_TOKEN] + self.word_list
self.word2idx_dict = {w: n_w for n_w, w in enumerate(self.word_list)}
# String (word) to integer (index) dict mapping
self.stoi = self.word2idx_dict
# Integer to string (word) reverse mapping
self.itos = self.word_list
self.num_vocab = len(self.word_list)
self.UNK_INDEX = (
self.word2idx_dict[self.UNK_TOKEN]
if self.UNK_TOKEN in self.word2idx_dict
else None
)
self.PAD_INDEX = (
self.word2idx_dict[self.PAD_TOKEN]
if self.PAD_TOKEN in self.word2idx_dict
else None
)
def idx2word(self, n_w):
return self.word_list[n_w]
def __len__(self):
return len(self.word_list)
def get_size(self):
return len(self.word_list)
def get_unk_index(self):
return self.UNK_INDEX
def get_unk_token(self):
return self.UNK_TOKEN
def word2idx(self, w):
if w in self.word2idx_dict:
return self.word2idx_dict[w]
elif self.UNK_INDEX is not None:
return self.UNK_INDEX
else:
raise ValueError(
"word %s not in dictionary \
(while dictionary does not contain <unk>)"
% w
)
def tokenize_and_index(self, sentence):
inds = [self.word2idx(w) for w in tokenize(sentence)]
return inds
class VocabFromText(VocabDict):
DEFAULT_TOKENS = [
VocabDict.PAD_TOKEN,
VocabDict.UNK_TOKEN,
VocabDict.START_TOKEN,
VocabDict.END_TOKEN,
]
def __init__(
self,
sentences,
min_count=1,
regex=SENTENCE_SPLIT_REGEX,
keep=None,
remove=None,
only_unk_extra=False,
):
if keep is None:
keep = []
if remove is None:
remove = []
token_counter = Counter()
for sentence in sentences:
tokens = tokenize(sentence, regex=regex, keep=keep, remove=remove)
token_counter.update(tokens)
token_list = []
for token in token_counter:
if token_counter[token] >= min_count:
token_list.append(token)
extras = self.DEFAULT_TOKENS
if only_unk_extra:
extras = [self.UNK_TOKEN]
self.word_list = extras + token_list
self._build()
class TextDecoder:
"""Base class to be inherited by all decoding strategies. Contains
implementations that are common for all strategies.
Args:
vocab (list): Collection of all words in vocabulary.
"""
def __init__(self, vocab):
self._vocab = vocab
self._vocab_size = vocab.get_size()
# Lists to store completed sequences and scores
self._complete_seqs = []
self._complete_seqs_scores = []
def init_batch(self, sample_list):
img_size = sample_list.image_feature_0.size()
self._batch_size, feature_size_1, feature_size_2 = img_size
t_batch_size = self._batch_size * self._decode_size
self.seqs = sample_list.answers.new_full(
(t_batch_size, 1), self._vocab.SOS_INDEX, dtype=torch.long
)
sample_list.image_feature_0 = (
sample_list.image_feature_0.unsqueeze(1)
.expand(-1, self._decode_size, -1, -1)
.reshape(t_batch_size, feature_size_1, feature_size_2)
)
self.sample_list = sample_list
return sample_list
def add_next_word(self, seqs, prev_word_inds, next_word_inds):
return torch.cat([seqs[prev_word_inds], next_word_inds.unsqueeze(1)], dim=1)
def find_complete_inds(self, next_word_inds):
incomplete_inds = []
for ind, next_word in enumerate(next_word_inds):
if next_word != self._vocab.EOS_INDEX:
incomplete_inds.append(ind)
complete_inds = list(set(range(len(next_word_inds))) - set(incomplete_inds))
return complete_inds, incomplete_inds
def update_data(self, data, prev_word_inds, next_word_inds, incomplete_inds):
data["texts"] = next_word_inds[incomplete_inds].unsqueeze(1)
h1 = data["state"]["td_hidden"][0][prev_word_inds[incomplete_inds]]
c1 = data["state"]["td_hidden"][1][prev_word_inds[incomplete_inds]]
h2 = data["state"]["lm_hidden"][0][prev_word_inds[incomplete_inds]]
c2 = data["state"]["lm_hidden"][1][prev_word_inds[incomplete_inds]]
data["state"] = {"td_hidden": (h1, c1), "lm_hidden": (h2, c2)}
return data
@registry.register_decoder("beam_search")
class BeamSearch(TextDecoder):
def __init__(self, vocab, config):
super().__init__(vocab)
self._decode_size = config["inference"]["params"]["beam_length"]
def init_batch(self, sample_list):
self.sample_list = super().init_batch(sample_list)
# initialize with t_batch_size = _batch_size * _decode_size
self.top_k_scores = sample_list.answers.new_zeros(
(self._batch_size * self._decode_size, 1), dtype=torch.float
)
# maintain _decode_size, _complete_seqs and _complete_seqs_scores
# for each example in a batch.
self._decode_sizes = [self._decode_size] * self._batch_size
for _ in range(self._batch_size):
self._complete_seqs.append([])
self._complete_seqs_scores.append([])
return self.sample_list
def decode(self, t, data, scores):
# Add predicted scores to top_k_scores
scores = torch.nn.functional.log_softmax(scores, dim=1)
scores = self.top_k_scores.expand_as(scores) + scores
# Find next top k scores and words. We flatten the scores tensor here
# and get the top_k_scores and their indices top_k_words
top_k_scores, top_k_words = [], []
ex_start = 0
for decode_size in self._decode_sizes:
ex_end = ex_start + decode_size
if t == 0:
top_k_score, top_k_word = scores[ex_start].topk(
decode_size, 0, True, True
)
else:
top_k_score, top_k_word = (
scores[ex_start:ex_end].view(-1).topk(decode_size, 0, True, True)
)
top_k_scores.extend(top_k_score)
top_k_words.append(top_k_word)
ex_start = ex_end
self.top_k_scores = torch.stack(top_k_scores)
# Convert to vocab indices. top_k_words contain indices from a flattened
# k x vocab_size tensor. To get prev_word_indices we divide top_k_words
# by vocab_size to determine which index in the beam among k generated
# the next top_k_word. To get next_word_indices we take top_k_words
# modulo vocab_size index. For example :
# vocab_size : 9491
# top_k_words : [610, 7, 19592, 9529, 292]
# prev_word_ind : [0, 0, 2, 1, 0]
# next_word_ind : [610, 7, 610, 38, 292]
# further, shift the prev_word_ind by ex_start to find corresponding example
# within a batch.
ex_start = 0
prev_word_inds, next_word_inds = [], []
for ex_idx, decode_size in enumerate(self._decode_sizes):
prev_word_inds.extend((top_k_words[ex_idx] // self._vocab_size) + ex_start)
next_word_inds.extend(top_k_words[ex_idx] % self._vocab_size)
ex_start += decode_size
prev_word_inds = torch.stack(prev_word_inds)
next_word_inds = torch.stack(next_word_inds)
# Add new words to sequences
self.seqs = self.add_next_word(self.seqs, prev_word_inds, next_word_inds)
# Find completed sequences
complete_inds, incomplete_inds = self.find_complete_inds(next_word_inds)
# Add to completed sequences and Reduce beam length
ex_start = 0
for ex_idx, decode_size in enumerate(self._decode_sizes):
for beam_idx in range(ex_start, ex_start + decode_size):
if beam_idx in complete_inds:
top_k_score = self.top_k_scores[beam_idx]
self._complete_seqs[ex_idx].append(self.seqs[beam_idx].tolist())
self._complete_seqs_scores[ex_idx].append(top_k_score)
self._decode_sizes[ex_idx] -= 1
ex_start += decode_size
# Proceed with incomplete sequences
if sum(self._decode_sizes) == 0:
return True, data, 0
self.seqs = self.seqs[incomplete_inds]
self.top_k_scores = self.top_k_scores[incomplete_inds].unsqueeze(1)
# TODO: Make the data update generic for any type of model
# This is specific to BUTD model only.
image_feature_0 = self.sample_list.image_feature_0
self.sample_list.image_feature_0 = image_feature_0[incomplete_inds]
data = self.update_data(data, prev_word_inds, next_word_inds, incomplete_inds)
next_beam_length = len(prev_word_inds[incomplete_inds])
return False, data, next_beam_length
def get_result(self):
captions = []
max_len = 0
for ex_idx in range(len(self._complete_seqs_scores)):
if len(self._complete_seqs_scores[ex_idx]) == 0:
captions.append([0] * 5)
max_len = max(5, max_len)
else:
max_score = max(self._complete_seqs_scores[ex_idx])
max_idx = self._complete_seqs_scores[ex_idx].index(max_score)
captions.append(self._complete_seqs[ex_idx][max_idx])
max_len = max(max_len, len(captions[-1]))
for ex_idx in range(len(captions)):
padded_tokens = [self._vocab.PAD_INDEX] * (max_len - len(captions[ex_idx]))
captions[ex_idx].extend(padded_tokens)
return torch.FloatTensor(captions)
@registry.register_decoder("nucleus_sampling")
class NucleusSampling(TextDecoder):
"""Nucleus Sampling is a new text decoding strategy that avoids likelihood
maximization. Rather, it works by sampling from the smallest set of top
tokens which have a cumulative probability greater than a specified
threshold.
Present text decoding strategies like beam search do not work well on open-ended
generation tasks (even on strong language models like GPT-2). They tend to repeat
text a lot and the main reason behind it is that they try to maximize likelihood,
which is a contrast from human-generated text which has a mix of high and low
probability tokens.
Nucleus Sampling is a stochastic approach and resolves this issue. Moreover,
it improves upon other stochastic methods like top-k sampling by choosing the
right amount of tokens to sample from. The overall result is better text
generation on the same language model.
Link to the paper introducing Nucleus Sampling (Section 6) -
https://arxiv.org/pdf/1904.09751.pdf
Args:
vocab (list): Collection of all words in vocabulary.
sum_threshold (float): Ceiling of sum of probabilities of tokens to
sample from.
"""
def __init__(self, vocab, config):
super().__init__(vocab)
self._decode_size = 1
# Threshold for sum of probability
self._threshold = config["inference"]["params"]["sum_threshold"]
def decode(self, t, data, scores):
# Convert scores to probabilities
scores = torch.nn.functional.softmax(scores, dim=1)
# Sort scores in descending order and then select the top m elements having
# sum more than threshold.
# We get the top_m_scores and their indices top_m_words
if t == 0:
top_m_scores, top_m_words = scores[0].sort(0, True)
else:
top_m_scores, top_m_words = scores.view(-1).sort(0, True)
last_index = 0
score_sum = 0
for score in top_m_scores:
last_index += 1
score_sum += score
if score_sum >= self._threshold:
break
top_m_scores = torch.div(top_m_scores[:last_index], score_sum)
top_m_words = top_m_words[:last_index]
# Zero value inside prev_word_inds because we are predicting a single
# stream of output.
prev_word_ind = torch.tensor([0])
# Get next word based on probabilities of top m words.
next_word_ind = top_m_words[torch.multinomial(top_m_scores, 1)]
# Add next word to sequence
self.seqs = self.add_next_word(self.seqs, prev_word_ind, next_word_ind)
# Check if sequence is complete
complete_inds, incomplete_inds = self.find_complete_inds(next_word_ind)
# If sequence is complete then return
if len(complete_inds) > 0:
self._complete_seqs.extend(self.seqs[complete_inds].tolist())
return True, data, 0
self.seqs = self.seqs[incomplete_inds]
data = self.update_data(data, prev_word_ind, next_word_ind, incomplete_inds)
return False, data, 1
def get_result(self):
if len(self._complete_seqs) == 0:
captions = torch.FloatTensor([0] * 5).unsqueeze(0)
else:
captions = torch.FloatTensor(self._complete_seqs[0]).unsqueeze(0)
return captions
| EXA-1-master | exa/models/mmf-main/mmf/utils/text.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import requests
import torch
from mmf.common.report import Report
from mmf.common.sample import Sample, SampleList
from mmf.utils.build import build_encoder, build_model, build_processors
from mmf.utils.checkpoint import load_pretrained_model
from mmf.utils.general import get_current_device
from omegaconf import OmegaConf
from PIL import Image
class Inference:
def __init__(self, checkpoint_path: str = None):
self.checkpoint = checkpoint_path
assert self.checkpoint is not None
self.processor, self.feature_extractor, self.model = self._build_model()
def _build_model(self):
self.model_items = load_pretrained_model(self.checkpoint)
self.config = OmegaConf.create(self.model_items["full_config"])
dataset_name = list(self.config.dataset_config.keys())[0]
processor = build_processors(
self.config.dataset_config[dataset_name].processors
)
feature_extractor = build_encoder(
self.model_items["config"].image_feature_encodings
)
ckpt = self.model_items["checkpoint"]
model = build_model(self.model_items["config"])
model.load_state_dict(ckpt)
return processor, feature_extractor, model
def forward(self, image_path: str, text: dict, image_format: str = "path"):
text_output = self.processor["text_processor"](text)
if image_format == "path":
img = np.array(Image.open(image_path))
elif image_format == "url":
img = np.array(Image.open(requests.get(image_path, stream=True).raw))
img = torch.as_tensor(img)
if self.model_items["config"].image_feature_encodings.type == "frcnn":
max_detect = self.model_items[
"config"
].image_feature_encodings.params.max_detections
image_preprocessed, sizes, scales_yx = self.processor["image_processor"](
img
)
image_output = self.feature_extractor(
image_preprocessed,
sizes=sizes,
scales_yx=scales_yx,
padding=None,
max_detections=max_detect,
return_tensors="pt",
)
image_output = image_output[0]
else:
image_preprocessed = self.processor["image_processor"](img)
image_output = self.feature_extractor(image_preprocessed)
sample = Sample(text_output)
sample.image_feature_0 = image_output
sample_list = SampleList([sample])
sample_list = sample_list.to(get_current_device())
self.model = self.model.to(get_current_device())
output = self.model(sample_list)
sample_list.id = [sample_list.input_ids[0][0]]
report = Report(sample_list, output)
answers = self.processor["output_processor"](report)
answer = self.processor["answer_processor"].idx2word(answers[0]["answer"])
return answer
| EXA-1-master | exa/models/mmf-main/mmf/utils/inference.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from iopath.common.file_io import PathManager as pm
PathManager = pm()
try:
# [FB only] register internal file IO handlers
from mmf.utils.fb.file_io_handlers import register_handlers
register_handlers(PathManager)
except ImportError:
pass
| EXA-1-master | exa/models/mmf-main/mmf/utils/file_io.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import torch
from mmf.utils.distributed import is_main, is_xla
class EarlyStopping:
"""
Provides early stopping functionality. Keeps track of an early stop criteria,
and if it doesn't improve over time restores last best performing
parameters.
"""
def __init__(
self,
model,
checkpoint_instance,
early_stop_criteria="total_loss",
patience=1000,
minimize=False,
should_stop=True,
):
self.minimize = minimize
self.patience = patience
self.model = model
self.checkpoint = checkpoint_instance
self.early_stop_criteria = early_stop_criteria
if "val" not in self.early_stop_criteria:
self.early_stop_criteria = f"val/{self.early_stop_criteria}"
self.best_monitored_value = -np.inf if not minimize else np.inf
self.best_monitored_iteration = 0
self.best_monitored_update = 0
self.should_stop = should_stop
self.activated = False
self.metric = self.early_stop_criteria
def __call__(self, update, iteration, meter):
"""
Method to be called everytime you need to check whether to
early stop or not
Arguments:
update {number}: Current update number
iteration {number}: Current iteration number
Returns:
bool -- Tells whether early stopping occurred or not
"""
# There are operations involving synchronization downstream
# For XLA those calls must be executed from all cores
# Therefore we do return here in case of XLA
if not is_main() and not is_xla():
return False
value = meter.meters.get(self.early_stop_criteria, None)
if value is None:
raise ValueError(
"Criteria used for early stopping ({}) is not "
"present in meter.".format(self.early_stop_criteria)
)
value = value.global_avg
if isinstance(value, torch.Tensor):
value = value.item()
if (self.minimize and value < self.best_monitored_value) or (
not self.minimize and value > self.best_monitored_value
):
self.best_monitored_value = value
self.best_monitored_iteration = iteration
self.best_monitored_update = update
self.checkpoint.save(update, iteration, update_best=True)
elif self.best_monitored_update + self.patience < update:
self.activated = True
if self.should_stop is True:
self.checkpoint.restore()
self.checkpoint.finalize()
return True
else:
return False
else:
self.checkpoint.save(update, iteration, update_best=False)
return False
def is_activated(self):
return self.activated
def init_from_checkpoint(self, load):
if "best_iteration" in load:
self.best_monitored_iteration = load["best_iteration"]
if "best_metric_value" in load:
self.best_monitored_value = load["best_metric_value"]
def get_info(self):
return {
"best_update": self.best_monitored_update,
"best_iteration": self.best_monitored_iteration,
f"best_{self.metric}": f"{self.best_monitored_value:.6f}",
}
| EXA-1-master | exa/models/mmf-main/mmf/utils/early_stopping.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Dict, Optional
from torch import Tensor
def getattr_torchscriptable(
dictionary: Dict[str, Tensor], key: str, default: Optional[Tensor] = None
) -> Optional[Tensor]:
if key in dictionary:
return dictionary[key]
else:
return default
| EXA-1-master | exa/models/mmf-main/mmf/utils/torchscript.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import json
import os
from mmf.datasets.processors.processors import EvalAIAnswerProcessor
def get_score(occurences):
if occurences == 0:
return 0
elif occurences == 1:
return 0.3
elif occurences == 2:
return 0.6
elif occurences == 3:
return 0.9
else:
return 1
def multiple_replace(text, wordDict):
for key in wordDict:
text = text.replace(key, wordDict[key])
return text
def filter_answers(answers_dset, min_occurence):
"""This will change the answer to preprocessed version"""
occurence = {}
answer_list = []
evalai_answer_processor = EvalAIAnswerProcessor()
for ans_entry in answers_dset:
gtruth = ans_entry["multiple_choice_answer"]
gtruth = evalai_answer_processor(gtruth)
if gtruth not in occurence:
occurence[gtruth] = set()
occurence[gtruth].add(ans_entry["question_id"])
for answer in occurence.keys():
if len(occurence[answer]) >= min_occurence:
answer_list.append(answer)
print(
"Num of answers that appear >= %d times: %d" % (min_occurence, len(answer_list))
)
return answer_list
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--annotation_file",
type=str,
required=True,
help="input train annotationjson file",
)
parser.add_argument(
"--val_annotation_file",
type=str,
required=False,
help="input val annotation json file",
)
parser.add_argument(
"--out_dir",
type=str,
default="./",
help="output directory, default is current directory",
)
parser.add_argument(
"--min_freq",
type=int,
default=0,
help="the minimum times of answer occurrence \
to be included in vocabulary, default 0",
)
args = parser.parse_args()
train_annotation_file = args.annotation_file
out_dir = args.out_dir
min_freq = args.min_freq
answer_file_name = "answers_vqa.txt"
os.makedirs(out_dir, exist_ok=True)
train_answers = json.load(open(train_annotation_file))["annotations"]
answers = train_answers
if args.val_annotation_file is not None:
val_annotation_file = args.val_annotation_file
val_answers = json.load(open(val_annotation_file))["annotations"]
answers = train_answers + val_answers
answer_list = filter_answers(answers, min_freq)
answer_list = [t.strip() for t in answer_list if len(t.strip()) > 0]
answer_list.sort()
if "<unk>" not in answer_list:
answer_list = ["<unk>"] + answer_list
answer_file = os.path.join(out_dir, answer_file_name)
with open(answer_file, "w") as f:
f.writelines([w + "\n" for w in answer_list])
| EXA-1-master | exa/models/mmf-main/mmf/utils/process_answers.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
from torch import nn
logger = logging.getLogger(__name__)
ACT2FN = {
"relu": nn.ReLU,
"sigmoid": nn.Sigmoid,
"tanh": nn.Tanh,
"leaky_relu": nn.LeakyReLU,
}
def get_bert_configured_parameters(module, lr=None, weight_decay=0.01):
# module param can either be a nn.Module or in some cases can also be
# a list of named parameters for a nn.Module
if isinstance(module, nn.Module):
param_optimizer = list(module.named_parameters())
elif isinstance(module, list):
param_optimizer = module
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p for n, p in param_optimizer if not any(nd in n for nd in no_decay)
],
"weight_decay": weight_decay,
},
{
"params": [
p for n, p in param_optimizer if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
if lr is not None:
for p in optimizer_grouped_parameters:
p["lr"] = lr
return optimizer_grouped_parameters
def get_optimizer_parameters_for_bert(module, config):
lr = config.optimizer.params.lr
model_config = config.model_config.get(config.model, {})
finetune_lr_multiplier = model_config.get("finetune_lr_multiplier", 1)
# For pretraining or when finetune_lr_multiplier == 1, all modules will be trained
# with default lr.
if module.config.training_head_type == "pretraining" or finetune_lr_multiplier == 1:
return get_bert_configured_parameters(module)
# For non pretraining heads, where finetune_lr_multiplier != 1, all modules other
# than classifier will be trained with (lr * finetune_lr_multiplier).
parameters = []
for name, submodule in module.named_children():
if name == "classifier":
continue
parameters += get_bert_configured_parameters(
submodule, lr * finetune_lr_multiplier
)
logger.info(f"Overriding {name} module's LR to {lr * finetune_lr_multiplier}")
# Classifier will be trained with default lr.
parameters += get_bert_configured_parameters(module.classifier)
return parameters
| EXA-1-master | exa/models/mmf-main/mmf/utils/modeling.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
from mmf.utils.distributed import is_main
try:
import torch_xla.core.xla_model as xm
except ImportError:
xm = None
def save_xla_ckpt(ckpt, file_or_path):
"""
Similar to xm.save, but only try to convert "model" and "optimizer" in an MMF
checkpoint to CPU, since they hold PyTorch tensors. Other items like lr_scheduler
often cannot be saved with xm.save due to its errors in handling mappingproxy.
Only save on the global main process (which is different from the default behavior
of xm.save that saves a checkpoint on each node).
"""
should_write_data = is_main()
is_full_ckpt = isinstance(ckpt, dict) and "model" in ckpt and "optimizer" in ckpt
if is_full_ckpt:
ckpt["model"] = xm._maybe_convert_to_cpu(
ckpt["model"], convert=should_write_data
)
ckpt["optimizer"] = xm._maybe_convert_to_cpu(
ckpt["optimizer"], convert=should_write_data
)
else:
ckpt = xm._maybe_convert_to_cpu(ckpt, convert=should_write_data)
if should_write_data:
torch.save(ckpt, file_or_path)
xm.rendezvous("mmf.utils.checkpoint.save_xla_ckpt")
| EXA-1-master | exa/models/mmf-main/mmf/utils/xla.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import collections
import gc
import logging
import math
import os
import sys
import time
import warnings
from bisect import bisect
from typing import Any, Callable, Dict
import torch
from mmf.utils.distributed import get_rank, get_world_size, is_xla
from mmf.utils.file_io import PathManager
from packaging import version
from torch import nn, Tensor
logger = logging.getLogger(__name__)
def lr_lambda_update(i_iter, cfg):
if cfg.training.use_warmup is True and i_iter <= cfg.training.warmup_iterations:
alpha = float(i_iter) / float(cfg.training.warmup_iterations)
return cfg.training.warmup_factor * (1.0 - alpha) + alpha
else:
idx = bisect(cfg.training.lr_steps, i_iter)
return pow(cfg.training.lr_ratio, idx)
def clip_gradients(model, optimizer, i_iter, writer, config, scale=1.0):
max_grad_l2_norm = config.training.max_grad_l2_norm
clip_norm_mode = config.training.clip_norm_mode
if max_grad_l2_norm is not None:
if clip_norm_mode == "all":
if hasattr(optimizer, "clip_grad_norm"):
norm = optimizer.clip_grad_norm(max_grad_l2_norm * scale)
else:
norm = nn.utils.clip_grad_norm_(
model.parameters(), max_grad_l2_norm * scale
)
if writer is not None:
writer.add_scalars({"grad_norm": norm}, i_iter)
else:
raise NotImplementedError(
"Clip norm mode %s not implemented" % clip_norm_mode
)
def ckpt_name_from_core_args(config):
seed = config.training.seed
ckpt_name = f"{config.datasets}_{config.model}"
if seed is not None:
ckpt_name += f"_{seed:d}"
return ckpt_name
def foldername_from_config_override(args):
cfg_override = None
if hasattr(args, "config_override"):
cfg_override = args.config_override
elif "config_override" in args:
cfg_override = args["config_override"]
folder_name = ""
if cfg_override is not None and len(cfg_override) > 0:
folder_name = str(cfg_override)
folder_name = folder_name.replace(":", ".").replace("\n", " ")
folder_name = folder_name.replace("/", "_")
folder_name = " ".join(folder_name.split())
folder_name = folder_name.replace(". ", ".").replace(" ", "_")
folder_name = "_" + folder_name
return folder_name
def get_mmf_root():
from mmf.common.registry import registry
mmf_root = registry.get("mmf_root", no_warning=True)
if mmf_root is None:
mmf_root = os.path.dirname(os.path.abspath(__file__))
mmf_root = os.path.abspath(os.path.join(mmf_root, ".."))
registry.register("mmf_root", mmf_root)
return mmf_root
def get_absolute_path(paths):
# String check should be first as Sequence would pass for string too
if isinstance(paths, str):
# If path is absolute return it directly
if os.path.isabs(paths):
return paths
possible_paths = [
# Direct path
paths
]
# Now, try relative to user_dir if it exists
from mmf.utils.configuration import get_mmf_env
mmf_root = get_mmf_root()
user_dir = get_mmf_env(key="user_dir")
if user_dir:
possible_paths.append(os.path.join(user_dir, paths))
# check in relative to mmf relative user dir
possible_paths.append(os.path.join(mmf_root, "..", user_dir, paths))
# Relative to root folder of mmf install
possible_paths.append(os.path.join(mmf_root, "..", paths))
# Relative to mmf root
possible_paths.append(os.path.join(mmf_root, paths))
# Test all these paths, if any exists return
for path in possible_paths:
if PathManager.exists(path):
# URIs
if path.find("://") == -1:
return os.path.abspath(path)
else:
return path
# If nothing works, return original path so that it throws an error
return paths
elif isinstance(paths, collections.abc.Iterable):
return [get_absolute_path(path) for path in paths]
else:
raise TypeError("Paths passed to dataset should either be " "string or list")
def get_optimizer_parameters(model, config):
parameters = model.parameters()
has_custom = hasattr(model, "get_optimizer_parameters")
if has_custom:
parameters = model.get_optimizer_parameters(config)
is_parallel = isinstance(model, nn.DataParallel) or isinstance(
model, nn.parallel.DistributedDataParallel
)
if is_parallel and hasattr(model.module, "get_optimizer_parameters"):
parameters = model.module.get_optimizer_parameters(config)
# If parameters are a generator, convert to a list first
parameters = list(parameters)
if len(parameters) == 0:
raise ValueError("optimizer got an empty parameter list")
# If parameters are in format of list, instead of grouped params
# convert them to grouped params form
if not isinstance(parameters[0], dict):
parameters = [{"params": parameters}]
for group in parameters:
group["params"] = list(group["params"])
check_unused_parameters(parameters, model, config)
return parameters
def check_unused_parameters(parameters, model, config):
optimizer_param_set = {p for group in parameters for p in group["params"]}
unused_param_names = []
for n, p in model.named_parameters():
if p.requires_grad and p not in optimizer_param_set:
unused_param_names.append(n)
if len(unused_param_names) > 0:
logger.info(
"Model parameters not used by optimizer: {}".format(
" ".join(unused_param_names)
)
)
if not config.optimizer.allow_unused_parameters:
raise Exception(
"Found model parameters not used by optimizer. Please check the "
"model's get_optimizer_parameters and add all parameters. If this "
"is intended, set optimizer.allow_unused_parameters to True to "
"ignore it."
)
def dict_to_string(dictionary):
logs = []
if dictionary is None:
return ""
for key, val in dictionary.items():
if hasattr(val, "item"):
val = val.item()
# if key.count('_') == 2:
# key = key[key.find('_') + 1:]
logs.append(f"{key}: {val:.4f}")
return ", ".join(logs)
def get_overlap_score(candidate, target):
"""Takes a candidate word and a target word and returns the overlap
score between the two.
Parameters
----------
candidate : str
Candidate word whose overlap has to be detected.
target : str
Target word against which the overlap will be detected
Returns
-------
float
Overlap score betwen candidate and the target.
"""
if len(candidate) < len(target):
temp = candidate
candidate = target
target = temp
overlap = 0.0
while len(target) >= 2:
if target in candidate:
overlap = len(target)
return overlap * 1.0 / len(candidate)
else:
target = target[:-1]
return 0.0
def updir(d, n):
"""Given path d, go up n dirs from d and return that path"""
ret_val = d
for _ in range(n):
ret_val = os.path.dirname(ret_val)
return ret_val
def print_cuda_usage():
print("Memory Allocated:", torch.cuda.memory_allocated() / (1024 * 1024))
print("Max Memory Allocated:", torch.cuda.max_memory_allocated() / (1024 * 1024))
print("Memory Cached:", torch.cuda.memory_cached() / (1024 * 1024))
print("Max Memory Cached:", torch.cuda.max_memory_cached() / (1024 * 1024))
def check_fft_version():
# Acquires and parses the PyTorch version
if version.parse(torch.__version__) >= version.parse("1.7"):
if "torch.fft" not in sys.modules:
raise RuntimeError("torch.fft module available but not imported")
def rfft(input_tensor, signal_ndim=1, n=None, dim=-1, norm=None) -> torch.Tensor:
check_fft_version()
if "torch.fft" not in sys.modules:
return torch.rfft(input_tensor, signal_ndim=signal_ndim)
else:
return torch.fft.rfft(input_tensor, n, dim, norm)
def irfft(input_tensor, s=None, signal_ndim=1, dim=None, norm=None) -> torch.Tensor:
check_fft_version()
if "torch.fft" not in sys.modules:
return torch.irfft(input_tensor, signal_ndim=signal_ndim, signal_sizes=s)
else:
return torch.fft.irfftn(input_tensor, s, dim, norm)
def get_current_tensors():
for obj in gc.get_objects():
try:
if torch.is_tensor(obj) or (
hasattr(obj, "data") and torch.is_tensor(obj.data)
):
print(type(obj), obj.size())
except Exception:
pass
def get_batch_size():
from mmf.utils.configuration import get_global_config
batch_size = get_global_config("training.batch_size")
world_size = get_world_size()
batch_size_per_device = get_global_config("training.batch_size_per_device")
if batch_size_per_device is not None:
logger.info(
f"training.batch_size_per_device has been used as {batch_size_per_device} "
+ "This will override training.batch_size and set the global batch size to "
+ f"{batch_size_per_device} x {world_size} = "
+ f"{batch_size_per_device * world_size}"
)
batch_size = batch_size_per_device * world_size
if batch_size % world_size != 0:
raise RuntimeError(
"Batch size {} must be divisible by number "
"of GPUs {} used.".format(batch_size, world_size)
)
return batch_size // world_size
def print_model_parameters(model, return_only=False):
total_params = sum(p.numel() for p in model.parameters())
trained_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
if not return_only:
logger.info(
f"Total Parameters: {total_params}. Trained Parameters: {trained_params}"
)
return total_params, trained_params
def get_sizes_list(dim, chunks):
split_size = (dim + chunks - 1) // chunks
sizes_list = [split_size] * chunks
sizes_list[-1] = sizes_list[-1] - (sum(sizes_list) - dim) # Adjust last
assert sum(sizes_list) == dim
if sizes_list[-1] < 0:
n_miss = sizes_list[-2] - sizes_list[-1]
sizes_list[-1] = sizes_list[-2]
for j in range(n_miss):
sizes_list[-j - 1] -= 1
assert sum(sizes_list) == dim
assert min(sizes_list) > 0
return sizes_list
def get_max_updates(config_max_updates, config_max_epochs, train_loader, update_freq):
if config_max_updates is None and config_max_epochs is None:
raise ValueError("Neither max_updates nor max_epochs is specified.")
if isinstance(train_loader.current_dataset, torch.utils.data.IterableDataset):
warnings.warn(
"max_epochs not supported for Iterable datasets. Falling back "
+ "to max_updates."
)
return config_max_updates, config_max_epochs
if config_max_updates is not None and config_max_epochs is not None:
warnings.warn(
"Both max_updates and max_epochs are specified. "
+ f"Favoring max_epochs: {config_max_epochs}"
)
if config_max_epochs is not None:
assert (
hasattr(train_loader, "__len__") and len(train_loader) != 0
), "max_epochs can't be used with IterableDatasets"
max_updates = math.ceil(len(train_loader) / update_freq) * config_max_epochs
max_epochs = config_max_epochs
else:
max_updates = config_max_updates
if hasattr(train_loader, "__len__") and len(train_loader) != 0:
max_epochs = max_updates / len(train_loader)
else:
max_epochs = math.inf
return max_updates, max_epochs
def extract_loss(report: Dict[str, Any], loss_divisor: int) -> torch.Tensor:
loss_dict = report.losses
assert len(loss_dict) != 0, (
"Model returned an empty loss dict. "
"Did you forget to (i) define losses in your model configuration or"
"(ii) return losses dict from your model?"
)
# Since losses are batch averaged in MMF, this makes sure the
# scaling is right.
for key, value in loss_dict.items():
value = value.mean() / loss_divisor
report.losses[key] = value
loss = sum(loss.mean() for loss in loss_dict.values())
return loss
def get_chunks(x, sizes):
out = []
begin = 0
for s in sizes:
y = x.narrow(1, begin, s)
out.append(y)
begin += s
return out
def filter_grads(parameters):
return [param for param in parameters if param.requires_grad]
def log_device_names():
if torch.cuda.is_available():
device_name = torch.cuda.get_device_name()
logger.info(f"CUDA Device {get_rank()} is: {device_name}")
def assert_iterator_finished(iter):
try:
_ = next(iter)
except StopIteration:
pass
else:
assert False
def get_current_device():
if is_xla():
import torch_xla.core.xla_model as xm
return xm.xla_device()
if torch.cuda.is_available() and torch.cuda.is_initialized():
return f"cuda:{torch.cuda.current_device()}"
else:
return torch.device("cpu")
def retry_n(n: int, fn: Callable, *args, log_tries=False, **kwargs) -> Any:
"""Retries a function n times with increasing exponentionally
increasing sleep intervals in between. First argument is number of tries
if n==1, means function will be called at least twice, first is try, second
is retry. Second argument is the function itself, rest of the arguments and
keyword arguments are passed to the function directly. Returns the output
of the function directly. if failed after n retries, the exception will be
raised.
Args:
n (int): Number of tries to be made
fn (Callable): Function to be called
log_tries (bool): If the function should log the try iteration. Default: False
Returns:
Any: Output from fn
"""
completed = False
count = 0
output = None
while not completed:
try:
output = fn(*args, **kwargs)
completed = True
except Exception:
if count < n:
if log_tries:
logger.info(
f"Try {count + 1}/{n} failed for {fn.__name__}. Will retry "
f"after {2 ** count} second(s)."
)
time.sleep(2**count)
count += 1
else:
raise
return output
def scalarize_dict_values(dict_with_tensors: Dict[str, Tensor]):
"""
this method returns a new dict where the values of
`dict_with_tensors` would be a scalar
Returns:
Dict: a new dict with scalarized values
"""
dict_with_scalar_tensors = {}
for key, val in dict_with_tensors.items():
if torch.is_tensor(val):
if val.dim() != 0:
val = val.mean()
dict_with_scalar_tensors[key] = val
return dict_with_scalar_tensors
| EXA-1-master | exa/models/mmf-main/mmf/utils/general.py |
# Copyright (c) Facebook, Inc. and its affiliates.
| EXA-1-master | exa/models/mmf-main/mmf/utils/features/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
"""
coding=utf-8
Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal
Adapted From Facebook Inc, Detectron2
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.import copy
"""
import colorsys
import io
import os
from typing import Union
import matplotlib as mpl
import matplotlib.colors as mplc
import matplotlib.figure as mplfigure
import numpy as np
import requests
import torch
from matplotlib.backends.backend_agg import FigureCanvasAgg
from mmf.datasets.processors.frcnn_processor import img_tensorize
from PIL import Image
_SMALL_OBJ = 1000
def get_data(query: str, delim: str = ","):
assert isinstance(query, str)
if os.path.isfile(query):
with open(query) as f:
data = eval(f.read())
else:
req = requests.get(query)
try:
data = requests.json()
except Exception:
data = req.content.decode()
assert data is not None, "could not connect"
try:
data = eval(data)
except Exception:
data = data.split("\n")
req.close()
return data
class SingleImageViz:
def __init__(
self,
img: Union[str, np.ndarray],
scale: float = 1.2,
edgecolor: str = "g",
alpha: float = 0.5,
linestyle: str = "-",
saveas: str = "test_out.jpg",
rgb: bool = True,
pynb: bool = False,
id2obj: list = None,
id2attr: list = None,
pad: float = 0.7,
):
"""
img: an RGB image of shape (H, W, 3).
"""
if isinstance(img, torch.Tensor):
img = img.numpy().astype("np.uint8")
if isinstance(img, str):
img = img_tensorize(img)
assert isinstance(img, np.ndarray)
width, height = img.shape[1], img.shape[0]
fig = mplfigure.Figure(frameon=False)
dpi = fig.get_dpi()
width_in = (width * scale + 1e-2) / dpi
height_in = (height * scale + 1e-2) / dpi
fig.set_size_inches(width_in, height_in)
ax = fig.add_axes([0.0, 0.0, 1.0, 1.0])
ax.axis("off")
ax.set_xlim(0.0, width)
ax.set_ylim(height)
np.random.seed(42) # generate same random
self.saveas = saveas
self.rgb = rgb
self.pynb = pynb
self.img = img
self.edgecolor = edgecolor
self.alpha = 0.5
self.linestyle = linestyle
self.font_size = int(np.sqrt(min(height, width)) * scale // 3)
self.width = width
self.height = height
self.scale = scale
self.fig = fig
self.ax = ax
self.pad = pad
self.id2obj = id2obj
self.id2attr = id2attr
self.canvas = FigureCanvasAgg(fig)
def add_box(self, box: np.ndarray, color: np.ndarray = None):
if color is None:
color = self.edgecolor
(x0, y0, x1, y1) = box
width = x1 - x0
height = y1 - y0
self.ax.add_patch(
mpl.patches.Rectangle(
(x0, y0),
width,
height,
fill=False,
edgecolor=color,
linewidth=self.font_size // 3,
alpha=self.alpha,
linestyle=self.linestyle,
)
)
def draw_boxes(
self,
boxes: torch.Tensor,
obj_ids: torch.Tensor = None,
obj_scores: torch.Tensor = None,
attr_ids: torch.Tensor = None,
attr_scores: torch.Tensor = None,
):
if len(boxes.shape) > 2:
boxes = boxes[0]
if len(obj_ids.shape) > 1:
obj_ids = obj_ids[0]
if len(obj_scores.shape) > 1:
obj_scores = obj_scores[0]
if len(attr_ids.shape) > 1:
attr_ids = attr_ids[0]
if len(attr_scores.shape) > 1:
attr_scores = attr_scores[0]
if isinstance(boxes, torch.Tensor):
boxes = boxes.numpy()
if isinstance(boxes, list):
boxes = np.array(boxes)
assert isinstance(boxes, np.ndarray)
areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1)
sorted_idxs = np.argsort(-areas).tolist()
boxes = boxes[sorted_idxs] if boxes is not None else None
obj_ids = obj_ids[sorted_idxs] if obj_ids is not None else None
obj_scores = obj_scores[sorted_idxs] if obj_scores is not None else None
attr_ids = attr_ids[sorted_idxs] if attr_ids is not None else None
attr_scores = attr_scores[sorted_idxs] if attr_scores is not None else None
assigned_colors = [self._random_color(maximum=1) for _ in range(len(boxes))]
assigned_colors = [assigned_colors[idx] for idx in sorted_idxs]
if obj_ids is not None:
labels = self._create_text_labels_attr(
obj_ids, obj_scores, attr_ids, attr_scores
)
for i in range(len(boxes)):
color = assigned_colors[i]
self.add_box(boxes[i], color)
self.draw_labels(labels[i], boxes[i], color)
def draw_labels(self, label: str, box: np.ndarray, color: np.ndarray):
x0, y0, x1, y1 = box
text_pos = (x0, y0)
instance_area = (y1 - y0) * (x1 - x0)
small = _SMALL_OBJ * self.scale
if instance_area < small or y1 - y0 < 40 * self.scale:
if y1 >= self.height - 5:
text_pos = (x1, y0)
else:
text_pos = (x0, y1)
height_ratio = (y1 - y0) / np.sqrt(self.height * self.width)
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
font_size = np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2)
font_size *= 0.75 * self.font_size
self.draw_text(text=label, position=text_pos, color=lighter_color)
def draw_text(
self, text: str, position: tuple, color: tuple = "g", ha: str = "left"
):
rotation = 0
font_size = self.font_size
color = np.maximum(list(mplc.to_rgb(color)), 0.2)
color[np.argmax(color)] = max(0.8, np.max(color))
bbox = {
"facecolor": "black",
"alpha": self.alpha,
"pad": self.pad,
"edgecolor": "none",
}
x, y = position
self.ax.text(
x,
y,
text,
size=font_size * self.scale,
family="sans-serif",
bbox=bbox,
verticalalignment="top",
horizontalalignment=ha,
color=color,
zorder=10,
rotation=rotation,
)
def save(self, saveas: str = None):
if saveas is None:
saveas = self.saveas
if saveas.lower().endswith(".jpg") or saveas.lower().endswith(".png"):
im = Image.open(self._get_buffer()[:, :, ::-1])
im.save(saveas)
else:
self.fig.savefig(saveas)
def _create_text_labels_attr(
self,
classes: torch.Tensor,
scores: torch.Tensor,
attr_classes: torch.Tensor,
attr_scores: torch.Tensor,
):
labels = [self.id2obj[i] for i in classes]
attr_labels = [self.id2attr[i] for i in attr_classes]
labels = [
f"{label} {score:.2f} {attr} {attr_score:.2f}"
for label, score, attr, attr_score in zip(
labels, scores, attr_labels, attr_scores
)
]
return labels
def _create_text_labels(self, classes: torch.Tensor, scores: torch.Tensor):
labels = [self.id2obj[i] for i in classes]
if scores is not None:
if labels is None:
labels = ["{:.0f}%".format(s * 100) for s in scores]
else:
labels = [
"{} {:.0f}%".format(li, s * 100) for li, s in zip(labels, scores)
]
return labels
def _random_color(self, maximum: int = 255):
idx = np.random.randint(0, len(_COLORS))
ret = _COLORS[idx] * maximum
if not self.rgb:
ret = ret[::-1]
return ret
def _get_buffer(self):
if not self.pynb:
s, (width, height) = self.canvas.print_to_buffer()
if (width, height) != (self.width, self.height):
img = Image.fromarray(self.img)
img = img.resize((width, height), Image.NEAREST)
else:
img = self.img
else:
buf = io.BytesIO() # works for cairo backend
self.canvas.print_rgba(buf)
width, height = self.width, self.height
s = buf.getvalue()
img = self.img
buffer = np.frombuffer(s, dtype="uint8")
img_rgba = buffer.reshape(height, width, 4)
rgb, alpha = np.split(img_rgba, [3], axis=2)
try:
import numexpr as ne # fuse them with numexpr
visualized_image = ne.evaluate(
"img * (1 - alpha / 255.0) + rgb * (alpha / 255.0)"
)
except ImportError:
alpha = alpha.astype("float32") / 255.0
visualized_image = img * (1 - alpha) + rgb * alpha
return visualized_image.astype("uint8")
def _change_color_brightness(self, color: np.ndarray, brightness_factor: float):
assert brightness_factor >= -1.0 and brightness_factor <= 1.0
color = mplc.to_rgb(color)
polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color))
modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1])
modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness
modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness
modified_color = colorsys.hls_to_rgb(
polygon_color[0], modified_lightness, polygon_color[2]
)
return modified_color
# Color map
_COLORS = (
np.array(
[
0.000,
0.447,
0.741,
0.850,
0.325,
0.098,
0.929,
0.694,
0.125,
0.494,
0.184,
0.556,
0.466,
0.674,
0.188,
0.301,
0.745,
0.933,
0.635,
0.078,
0.184,
0.300,
0.300,
0.300,
0.600,
0.600,
0.600,
1.000,
0.000,
0.000,
1.000,
0.500,
0.000,
0.749,
0.749,
0.000,
0.000,
1.000,
0.000,
0.000,
0.000,
1.000,
0.667,
0.000,
1.000,
0.333,
0.333,
0.000,
0.333,
0.667,
0.000,
0.333,
1.000,
0.000,
0.667,
0.333,
0.000,
0.667,
0.667,
0.000,
0.667,
1.000,
0.000,
1.000,
0.333,
0.000,
1.000,
0.667,
0.000,
1.000,
1.000,
0.000,
0.000,
0.333,
0.500,
0.000,
0.667,
0.500,
0.000,
1.000,
0.500,
0.333,
0.000,
0.500,
0.333,
0.333,
0.500,
0.333,
0.667,
0.500,
0.333,
1.000,
0.500,
0.667,
0.000,
0.500,
0.667,
0.333,
0.500,
0.667,
0.667,
0.500,
0.667,
1.000,
0.500,
1.000,
0.000,
0.500,
1.000,
0.333,
0.500,
1.000,
0.667,
0.500,
1.000,
1.000,
0.500,
0.000,
0.333,
1.000,
0.000,
0.667,
1.000,
0.000,
1.000,
1.000,
0.333,
0.000,
1.000,
0.333,
0.333,
1.000,
0.333,
0.667,
1.000,
0.333,
1.000,
1.000,
0.667,
0.000,
1.000,
0.667,
0.333,
1.000,
0.667,
0.667,
1.000,
0.667,
1.000,
1.000,
1.000,
0.000,
1.000,
1.000,
0.333,
1.000,
1.000,
0.667,
1.000,
0.333,
0.000,
0.000,
0.500,
0.000,
0.000,
0.667,
0.000,
0.000,
0.833,
0.000,
0.000,
1.000,
0.000,
0.000,
0.000,
0.167,
0.000,
0.000,
0.333,
0.000,
0.000,
0.500,
0.000,
0.000,
0.667,
0.000,
0.000,
0.833,
0.000,
0.000,
1.000,
0.000,
0.000,
0.000,
0.167,
0.000,
0.000,
0.333,
0.000,
0.000,
0.500,
0.000,
0.000,
0.667,
0.000,
0.000,
0.833,
0.000,
0.000,
1.000,
0.000,
0.000,
0.000,
0.143,
0.143,
0.143,
0.857,
0.857,
0.857,
1.000,
1.000,
1.000,
]
)
.astype(np.float32)
.reshape(-1, 3)
)
| EXA-1-master | exa/models/mmf-main/mmf/utils/features/visualizing_image.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from .build_phoc import build_phoc # NoQA
| EXA-1-master | exa/models/mmf-main/mmf/utils/phoc/__init__.py |
import numpy as np
from .cphoc import build_phoc as _build_phoc_raw
_alphabet = {
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
"0",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
} # NoQA
def build_phoc(token):
token = token.lower().strip()
token = "".join([c for c in token if c in _alphabet])
phoc = _build_phoc_raw(token)
phoc = np.array(phoc, dtype=np.float32)
return phoc
| EXA-1-master | exa/models/mmf-main/mmf/utils/phoc/build_phoc.py |
# Copyright (c) Facebook, Inc. and its affiliates.
"""
coding=utf-8
Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal
Adapted From Facebook Inc, Detectron2 && Huggingface Co.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.import copy
"""
from typing import List
import omegaconf
import torch
from torch import nn
from torch.nn import functional as F
try:
from detectron2.layers.batch_norm import get_norm
from detectron2.layers.wrappers import Conv2d
from detectron2.modeling import ShapeSpec
from detectron2.modeling.backbone.resnet import BottleneckBlock, ResNet
from detectron2.modeling.proposal_generator.rpn import RPN, StandardRPNHead
from detectron2.modeling.roi_heads.roi_heads import Res5ROIHeads
from detectron2.structures.image_list import ImageList
except ImportError:
pass
def build_backbone(config: omegaconf.DictConfig):
"""
Difference between this and the build_backbone provided
by detectron2 is as follows:
- Different stem, include caffe_maxpool
- Number of blocks is different, unconfigurable in detectron
- Freeze-at operates differently in detectron
"""
input_shape = ShapeSpec(channels=len(config.MODEL.PIXEL_MEAN))
norm = config.MODEL.RESNETS.NORM
stem = BasicStem(
in_channels=input_shape.channels,
out_channels=config.MODEL.RESNETS.STEM_OUT_CHANNELS,
norm=norm,
caffe_maxpool=config.MODEL.MAX_POOL,
)
freeze_at = config.MODEL.BACKBONE.FREEZE_AT
if freeze_at >= 1:
for p in stem.parameters():
p.requires_grad = False
out_features = config.MODEL.RESNETS.OUT_FEATURES
depth = config.MODEL.RESNETS.DEPTH
num_groups = config.MODEL.RESNETS.NUM_GROUPS
width_per_group = config.MODEL.RESNETS.WIDTH_PER_GROUP
bottleneck_channels = num_groups * width_per_group
in_channels = config.MODEL.RESNETS.STEM_OUT_CHANNELS
out_channels = config.MODEL.RESNETS.RES2_OUT_CHANNELS
stride_in_1x1 = config.MODEL.RESNETS.STRIDE_IN_1X1
res5_dilation = config.MODEL.RESNETS.RES5_DILATION
assert res5_dilation in {1, 2}, f"res5_dilation cannot be {res5_dilation}."
num_blocks_per_stage = {50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3]}[
depth
]
stages = []
out_stage_idx = [
{"res2": 2, "res3": 3, "res4": 4, "res5": 5}[f] for f in out_features
]
max_stage_idx = max(out_stage_idx)
for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)):
dilation = res5_dilation if stage_idx == 5 else 1
first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2
stage_kargs = {
"num_blocks": num_blocks_per_stage[idx],
"stride_per_block": [first_stride] + [1] * (num_blocks_per_stage[idx] - 1),
"in_channels": in_channels,
"bottleneck_channels": bottleneck_channels,
"out_channels": out_channels,
"num_groups": num_groups,
"norm": norm,
"stride_in_1x1": stride_in_1x1,
"dilation": dilation,
}
stage_kargs["block_class"] = BottleneckBlock
blocks = ResNet.make_stage(**stage_kargs)
in_channels = out_channels
out_channels *= 2
bottleneck_channels *= 2
stages.append(blocks)
return ResNet(stem, stages, out_features=out_features, freeze_at=-1)
class BasicStem(nn.Module):
"""
The differences between this and detectron:
- The forward method uses caffe_maxpool
this is not configurable in detectron
"""
def __init__(
self,
in_channels: int = 3,
out_channels: int = 64,
norm: str = "BN",
caffe_maxpool: bool = False,
):
super().__init__()
self.conv1 = Conv2d(
in_channels,
out_channels,
kernel_size=7,
stride=2,
padding=3,
bias=False,
norm=get_norm(norm, out_channels),
)
self.caffe_maxpool = caffe_maxpool
# use pad 1 instead of pad zero
def forward(self, x: torch.Tensor):
x = self.conv1(x)
x = F.relu_(x)
if self.caffe_maxpool:
x = F.max_pool2d(x, kernel_size=3, stride=2, padding=0, ceil_mode=True)
else:
x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1)
return x
@property
def out_channels(self):
return self.conv1.out_channels
@property
def stride(self):
return 4 # = stride 2 conv -> stride 2 max pool
class GeneralizedRCNN(nn.Module):
def __init__(self, config: omegaconf.DictConfig):
super().__init__()
self.device = torch.device(config.MODEL.DEVICE)
self.backbone = build_backbone(config)
self.proposal_generator = RPN(config, self.backbone.output_shape())
self._fix_proposal_generator(config)
self.roi_heads = Res5ROIHeads(config, self.backbone.output_shape())
self._fix_res5_block(config)
self.to(self.device)
def _fix_proposal_generator(self, config: omegaconf.DictConfig):
in_channels = [
val.channels for key, val in self.backbone.output_shape().items()
]
assert len(set(in_channels)) == 1, "Each level must have the same channel!"
in_channels = in_channels[0]
if config.MODEL.PROPOSAL_GENERATOR.HIDDEN_CHANNELS == -1:
hid_channels = in_channels
else:
hid_channels = config.MODEL.PROPOSAL_GENERATOR.HIDDEN_CHANNELS
self.proposal_generator.rpn_head.conv = nn.Conv2d(
in_channels, hid_channels, kernel_size=3, stride=1, padding=1
)
shape = self.backbone.output_shape()
features = config.MODEL.RPN.IN_FEATURES
example_head = StandardRPNHead.from_config(config, [shape[f] for f in features])
num_cell_anchors = example_head["num_anchors"]
box_dim = example_head["box_dim"]
self.proposal_generator.rpn_head.objectness_logits = nn.Conv2d(
hid_channels, num_cell_anchors, kernel_size=1, stride=1
)
self.proposal_generator.rpn_head.anchor_deltas = nn.Conv2d(
hid_channels, num_cell_anchors * box_dim, kernel_size=1, stride=1
)
def _fix_res5_block(self, config: omegaconf.DictConfig):
res5_halve = config.MODEL.ROI_BOX_HEAD.RES5HALVE
if not res5_halve:
"""
Modifications for VG in RoI heads:
1. Change the stride of conv1 and shortcut in Res5.Block1 from 2 to 1
2. Modifying all conv2 with (padding: 1 --> 2) and (dilation: 1 --> 2)
"""
self.roi_heads.res5[0].conv1.stride = (1, 1)
self.roi_heads.res5[0].shortcut.stride = (1, 1)
for i in range(3):
self.roi_heads.res5[i].conv2.padding = (2, 2)
self.roi_heads.res5[i].conv2.dilation = (2, 2)
def forward_for_roi_head(self, features: List, proposal_boxes: List):
box_features = self.roi_heads._shared_roi_transform(features, proposal_boxes)
feature_pooled = box_features.mean(dim=[2, 3]) # pooled to 1x1
return feature_pooled
def forward(
self,
images: torch.Tensor,
image_shapes: torch.Tensor,
gt_boxes: torch.Tensor = None,
proposals: torch.Tensor = None,
scales_yx: torch.Tensor = None,
**kwargs,
):
"""
kwargs:
max_detections (int), return_tensors {"np", "pt", None}, padding {None,
"max_detections"}, pad_value (int), location = {"cuda", "cpu"}
"""
if self.training:
raise NotImplementedError()
return self.inference(
images=images,
image_shapes=image_shapes,
gt_boxes=gt_boxes,
proposals=proposals,
scales_yx=scales_yx,
**kwargs,
)
@torch.no_grad()
def inference(
self,
images: torch.Tensor,
image_shapes: torch.Tensor,
gt_boxes: torch.Tensor = None,
proposals: torch.Tensor = None,
scales_yx: torch.Tensor = None,
**kwargs,
):
# run images through backbone
features = self.backbone(images)
image_list = ImageList(images, image_shapes)
# generate proposals if none are available
if proposals is None:
proposal_boxes, _ = self.proposal_generator(image_list, features, gt_boxes)
else:
assert proposals is not None
proposal_boxes = [proposal_boxes[0].get_fields()["proposal_boxes"]]
feature_pooled = self.forward_for_roi_head([features["res4"]], proposal_boxes)
preds_per_image = [p.size(0) for p in [proposal_boxes[0].tensor]]
roi_features = feature_pooled.split(preds_per_image, dim=0)
return roi_features
| EXA-1-master | exa/models/mmf-main/mmf/models/frcnn.py |
# Copyright 2019 project LXMERT.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
from mmf.common.registry import registry
from mmf.models import BaseModel
from mmf.utils.configuration import get_mmf_cache_dir
from mmf.utils.modeling import get_optimizer_parameters_for_bert
from omegaconf import OmegaConf
from torch import nn
from torch.nn import CrossEntropyLoss, SmoothL1Loss
try:
from transformers3.modeling_bert import (
ACT2FN,
BertAttention,
BertConfig,
BertEmbeddings,
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertPredictionHeadTransform,
BertPreTrainedModel,
BertSelfAttention,
BertSelfOutput,
)
except ImportError:
from transformers.modeling_bert import (
ACT2FN,
BertAttention,
BertConfig,
BertEmbeddings,
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertPredictionHeadTransform,
BertPreTrainedModel,
BertSelfAttention,
BertSelfOutput,
)
class GeLU(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return ACT2FN["gelu"](x)
class BertCrossattLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.att = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, ctx_tensor, ctx_att_mask=None):
output = self.att(
input_tensor,
encoder_hidden_states=ctx_tensor,
encoder_attention_mask=ctx_att_mask,
)[0]
attention_output = self.output(output, input_tensor)
return attention_output
class BertClassificationHead(nn.Module):
def __init__(self, num_labels, hid_dim, training_head_type):
super().__init__()
if training_head_type == "nlvr2":
in_dim = hid_dim * 2
out_dim = 2
else:
in_dim = hid_dim
out_dim = num_labels
self.logit_fc = nn.Sequential(
nn.Linear(in_dim, hid_dim * 2),
GeLU(),
nn.LayerNorm(hid_dim * 2, eps=1e-12),
nn.Linear(hid_dim * 2, out_dim),
)
def forward(self, x):
logit = self.logit_fc(x)
return logit
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(
bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False,
)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertVisualAnswerHead(nn.Module):
def __init__(self, config, num_labels):
super().__init__()
hid_dim = config.hidden_size
if config.training_head_type == "nlvr2":
in_dim = hid_dim * 2
out_dim = 2
else:
in_dim = hid_dim
out_dim = config.num_labels
add_gqa = isinstance(num_labels, list)
if add_gqa:
self.logit_gqa = nn.Sequential(
nn.Linear(in_dim, hid_dim * 2),
GeLU(),
nn.LayerNorm(hid_dim * 2, eps=1e-12),
nn.Linear(hid_dim * 2, num_labels[1]),
)
out_dim = num_labels[0]
self.logit_fc = nn.Sequential(
nn.Linear(in_dim, hid_dim * 2),
GeLU(),
nn.LayerNorm(hid_dim * 2, eps=1e-12),
nn.Linear(hid_dim * 2, out_dim),
)
def forward(self, hidden_states, name=None):
if name is None or "gqa" not in name:
return self.logit_fc(hidden_states)
else:
return self.logit_gqa(hidden_states)
class BertVisualObjHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
self.visual_losses = config.visual_losses
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder_dict = nn.ModuleDict(
{
key: nn.Linear(config.hidden_size, config.visual_loss_config[key][0])
for key in self.visual_losses
}
)
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
output = {}
for key in self.visual_losses:
output[key] = self.decoder_dict[key](hidden_states)
return output
class BertPreTrainingHeads(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super().__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class VisualFeatEncoder(nn.Module):
def __init__(self, config):
super().__init__()
feat_dim = config.visual_feat_dim
pos_dim = config.visual_pos_dim
# Object feature encoding
self.visn_fc = nn.Linear(feat_dim, config.hidden_size)
self.visn_layer_norm = nn.LayerNorm(config.hidden_size, eps=1e-12)
# Box position encoding
self.box_fc = nn.Linear(pos_dim, config.hidden_size)
self.box_layer_norm = nn.LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, visn_input):
feats, boxes = visn_input
x = self.visn_fc(feats)
x = self.visn_layer_norm(x)
if boxes is not None:
y = self.box_fc(boxes)
y = self.box_layer_norm(y)
output = (x + y) / 2
else:
output = x
output = self.dropout(output)
return output
class LXMERTXLayer(nn.Module):
def __init__(self, config):
super().__init__()
# The cross-attention Layer
self.visual_attention = BertCrossattLayer(config)
# Self-attention Layers
self.lang_self_att = BertAttention(config)
self.visn_self_att = BertAttention(config)
# Intermediate and Output Layers (FFNs)
self.lang_inter = BertIntermediate(config)
self.lang_output = BertOutput(config)
self.visn_inter = BertIntermediate(config)
self.visn_output = BertOutput(config)
def cross_att(
self, lang_input, lang_attention_mask, visn_input, visn_attention_mask
):
# Cross Attention
lang_att_output = self.visual_attention(
lang_input, visn_input, ctx_att_mask=visn_attention_mask
)
visn_att_output = self.visual_attention(
visn_input, lang_input, ctx_att_mask=lang_attention_mask
)
return lang_att_output, visn_att_output
def self_att(
self, lang_input, lang_attention_mask, visn_input, visn_attention_mask
):
# Self Attention
lang_att_output = self.lang_self_att(lang_input, lang_attention_mask)[0]
visn_att_output = self.visn_self_att(visn_input, visn_attention_mask)[0]
return lang_att_output, visn_att_output
def output_fc(self, lang_input, visn_input):
# FC layers
lang_inter_output = self.lang_inter(lang_input)
visn_inter_output = self.visn_inter(visn_input)
# Layer output
lang_output = self.lang_output(lang_inter_output, lang_input)
visn_output = self.visn_output(visn_inter_output, visn_input)
return lang_output, visn_output
def forward(self, lang_feats, lang_attention_mask, visn_feats, visn_attention_mask):
lang_att_output = lang_feats
visn_att_output = visn_feats
lang_att_output, visn_att_output = self.cross_att(
lang_att_output, lang_attention_mask, visn_att_output, visn_attention_mask
)
lang_att_output, visn_att_output = self.self_att(
lang_att_output, lang_attention_mask, visn_att_output, visn_attention_mask
)
lang_output, visn_output = self.output_fc(lang_att_output, visn_att_output)
return lang_output, visn_output
class LXMERTEncoder(nn.Module):
def __init__(self, config):
super().__init__()
# Obj-level image embedding layer
self.visn_fc = VisualFeatEncoder(config)
# Number of layers
self.num_l_layers = config.l_layers
self.num_x_layers = config.x_layers
self.num_r_layers = config.r_layers
self.layer = nn.ModuleList(
[BertLayer(config) for _ in range(self.num_l_layers)]
)
self.x_layers = nn.ModuleList(
[LXMERTXLayer(config) for _ in range(self.num_x_layers)]
)
self.r_layers = nn.ModuleList(
[BertLayer(config) for _ in range(self.num_r_layers)]
)
def forward(
self, lang_feats, lang_attention_mask, visn_feats, visn_attention_mask=None
):
# Run visual embedding layer
# Note: Word embedding layer was executed outside this module.
# Keep this design to allow loading BERT weights.
visn_feats = self.visn_fc(visn_feats)
# Run language layers
for layer_module in self.layer:
lang_feats = layer_module(lang_feats, lang_attention_mask)[0]
# Run relational layers
for layer_module in self.r_layers:
visn_feats = layer_module(visn_feats, visn_attention_mask)[0]
# Run cross-modality layers
for layer_module in self.x_layers:
lang_feats, visn_feats = layer_module(
lang_feats, lang_attention_mask, visn_feats, visn_attention_mask
)
return lang_feats, visn_feats
class LXMERTBase(BertPreTrainedModel):
"""LXMERT Model."""
def __init__(self, config):
super().__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = LXMERTEncoder(config)
self.pooler = BertPooler(config)
self.init_weights()
def forward(
self,
input_ids,
token_type_ids=None,
attention_mask=None,
visual_feats=None,
visual_loc=None,
visual_attention_mask=None,
output_all_attention_masks=False,
output_all_encoded_layers=False,
):
if output_all_encoded_layers:
raise NotImplementedError
if output_all_attention_masks:
raise NotImplementedError
visual_feats = (visual_feats, visual_loc)
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the
# triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype
) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# Process the visual attention mask
if visual_attention_mask is not None:
extended_visual_attention_mask = visual_attention_mask.unsqueeze(
1
).unsqueeze(2)
extended_visual_attention_mask = extended_visual_attention_mask.to(
dtype=next(self.parameters()).dtype
) # fp16 compatibility
extended_visual_attention_mask = (
1.0 - extended_visual_attention_mask
) * -10000.0
else:
extended_visual_attention_mask = None
# Positional Word Embeddings
embedding_output = self.embeddings(input_ids, token_type_ids)
# Run LXMERT backbone
lang_feats, visn_feats = self.encoder(
embedding_output,
extended_attention_mask,
visn_feats=visual_feats,
visn_attention_mask=extended_visual_attention_mask,
)
pooled_output = self.pooler(lang_feats)
return (lang_feats, visn_feats), pooled_output
class LXMERTForPretraining(nn.Module):
def __init__(self, config):
super().__init__()
# Configuration
self.config = config
# LXMERT backbone
self.bert = LXMERTBase.from_pretrained(
self.config.bert_model_name,
config=BertConfig.from_dict(
OmegaConf.to_container(self.config, resolve=True)
),
cache_dir=os.path.join(get_mmf_cache_dir(), "distributed_{}".format(-1)),
)
self.num_labels = config.num_labels
self.gqa_labels = config.gqa_labels
self.task_mask_lm = config.task_mask_lm
self.task_obj_predict = config.task_obj_predict
self.task_matched = config.task_matched
self.task_qa = config.task_qa
self.visual_losses = config.visual_losses
self.visual_loss_config = config.visual_loss_config
# Pre-training heads
self.cls = BertPreTrainingHeads(
config, self.bert.embeddings.word_embeddings.weight
)
if self.task_obj_predict:
self.obj_predict_head = BertVisualObjHead(config)
if self.task_qa:
self.answer_head = BertVisualAnswerHead(
config, [self.num_labels, self.gqa_labels]
)
# loss functions
self.loss_fcts = {
"l2": SmoothL1Loss(reduction="none"),
"ce": CrossEntropyLoss(ignore_index=-1, reduction="none"),
"ce_lang": CrossEntropyLoss(ignore_index=-1),
}
def init_weights(self):
if self.config.random_initialize is False:
if self.config.bert_model_name is None:
# No pretrained model, init weights
self.bert.init_weights()
self.cls.apply(self.bert._init_weights)
self.tie_weights()
def tie_weights(self):
"""Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning
them instead.
"""
self._tie_or_clone_weights(
self.cls.predictions.decoder, self.bert.embeddings.word_embeddings
)
def forward(
self,
input_ids, # tokens
token_type_ids=None,
attention_mask=None,
visual_feats=None,
visual_pos=None,
visual_attention_mask=None,
masked_lm_labels=None,
masked_image_labels=None,
obj_labels=None,
matched_label=None, #
ans=None, # qa answer
num_features=None, # max num of objects
name=None,
output_all_attention_masks=False,
output_all_encoded_layers=False,
):
(lang_output, visn_output), pooled_output = self.bert(
input_ids,
token_type_ids,
attention_mask,
visual_feats,
visual_pos,
visual_attention_mask,
output_all_attention_masks,
output_all_encoded_layers,
)
lang_prediction_scores, cross_relationship_score = self.cls(
lang_output, pooled_output
)
## KEEP TRACK OF OUTPUTS HERE
output = {}
if output_all_attention_masks:
raise NotImplementedError
if ans is not None and self.task_qa:
answer_score = self.answer_head(pooled_output, name)
if name is None or "gqa" not in name:
num_labels = self.config.num_labels
else:
num_labels = self.config.gqa_labels
answer_loss = self.loss_fcts["ce_lang"](
answer_score.view(-1, num_labels), ans.argmax(-1)
)
output["answer_loss"] = answer_loss
if masked_lm_labels is not None and self.task_mask_lm:
masked_lm_loss = self.loss_fcts["ce_lang"](
lang_prediction_scores.view(-1, lang_prediction_scores.size(-1)),
masked_lm_labels.view(-1),
)
output["masked_lm_loss"] = masked_lm_loss
if matched_label is not None and self.task_matched:
matched_label = matched_label.to(cross_relationship_score).long()
matched_loss = self.loss_fcts["ce_lang"](
cross_relationship_score.view(-1, 2), matched_label
)
output["matched_loss"] = matched_loss
if obj_labels is not None and self.task_obj_predict:
total_visn_loss = 0.0
visn_prediction_scores_dict = self.obj_predict_head(visn_output)
for key in self.visual_losses:
visn_prediction_scores = visn_prediction_scores_dict[key]
(
output_dim,
loss_fct_name,
label_shape,
weight,
) = self.visual_loss_config[key]
if key == "attr":
continue
elif key == "obj":
temp_obj_labels_dict = obj_labels.max(-1)
mask_conf = temp_obj_labels_dict.values
visn_loss = self.loss_fcts[loss_fct_name](
visn_prediction_scores.view(-1, output_dim),
temp_obj_labels_dict.indices.view(-1),
)
elif key == "feat":
if type(masked_image_labels) is None:
continue
mask_conf = (masked_image_labels == 1).float()
visn_loss = self.loss_fcts[loss_fct_name](
visn_prediction_scores.view(-1, output_dim),
visual_feats.view(-1, output_dim),
)
if visn_loss.dim() > 1: # Regression Losses
visn_loss = visn_loss.mean(1)
visn_loss = (visn_loss * mask_conf.view(-1)).mean() * weight
total_visn_loss += visn_loss
output["visn_loss"] = total_visn_loss
return output
class LXMERTForClassification(nn.Module):
def __init__(self, config, mode="lxr"):
super().__init__()
self.config = config
self.num_labels = config.num_labels
self.gqa_labels = config.gqa_labels
self.mode = config.mode
self.bert = LXMERTBase.from_pretrained(
self.config.bert_model_name,
config=BertConfig.from_dict(
OmegaConf.to_container(self.config, resolve=True)
),
cache_dir=os.path.join(get_mmf_cache_dir(), "distributed_{}".format(-1)),
)
self.classifier = BertVisualAnswerHead(
config, [self.num_labels, self.gqa_labels]
)
self.init_weights()
def init_weights(self):
if self.config.random_initialize is False:
if self.config.bert_model_name is None:
# No pretrained model, init weights
self.bert.init_weights()
# Classifier needs to be initialized always as it is task specific
self.classifier.apply(self.bert._init_weights)
def forward(
self,
input_ids,
token_type_ids=None,
attention_mask=None,
visual_feats=None,
visual_pos=None,
visual_attention_mask=None,
masked_lm_labels=None,
obj_labels=None, # is img_labels in vilbert
matched_label=None, # next_sent_label in VilBERT
ans=None,
max_features=None,
output_all_attention_masks=False,
output_all_encoded_layers=False,
):
(lang_output, visn_output), pooled_output = self.bert(
input_ids,
token_type_ids,
attention_mask,
visual_feats,
visual_pos,
visual_attention_mask,
output_all_encoded_layers,
output_all_attention_masks,
)
output = {}
if output_all_attention_masks:
raise NotImplementedError
if self.config.training_head_type == "nlvr2":
pooled_output = pooled_output.view(-1, pooled_output.size(1) * 2)
logits = self.classifier(pooled_output)
reshaped_logits = logits.contiguous().view(-1, self.config.num_labels)
output["scores"] = reshaped_logits
return output
@registry.register_model("lxmert")
class LXMERT(BaseModel):
def __init__(self, config):
super().__init__(config)
@classmethod
def config_path(cls):
return "configs/models/lxmert/pretrain.yaml"
def build(self):
if self.config.training_head_type == "pretraining":
self.model = LXMERTForPretraining(self.config)
else:
self.model = LXMERTForClassification(self.config)
if getattr(self.config, "freeze_base", False):
for p in self.model.bert.parameters():
p.requires_grad = False
def get_image_and_text_features(self, sample_list, device):
# bert input
bert_input_ids = sample_list.input_ids
bert_input_mask = sample_list.input_mask
bert_input_type_ids = sample_list.segment_ids
masked_lm_labels = sample_list.lm_label_ids
# image input
image_info = getattr(sample_list, "image_info_0", {})
image_dim_variable = getattr(image_info, "max_features", None)
image_feature_variable = getattr(sample_list, "image_feature_0", None)
max_features = torch.tensor(
image_feature_variable.shape[1], dtype=torch.int
).to(device)
image_location_variable = getattr(image_info, "bbox", None)
image_location_variable = image_location_variable[:, : max_features.item(), :4]
# aux data
image_label_variable = getattr(sample_list, "image_labels", None)
if image_label_variable is not None:
image_label_variable = image_label_variable[:, : max_features.item(), None]
image_label_variable = image_label_variable.unsqueeze(-1).to(device)
cls_prob = getattr(image_info, "cls_prob", None)
if cls_prob is not None:
cls_prob = torch.tensor(cls_prob)[:, : max_features.item(), None].to(device)
answers = getattr(sample_list, "targets", None)
if answers is None:
answers = getattr(sample_list, "answers", None)
if answers is not None:
if not isinstance(answers, torch.Tensor):
answers = torch.tensor(answers)
answers = answers.to(device)
is_correct = getattr(sample_list, "is_correct", None)
if is_correct is not None:
if isinstance(is_correct, torch.Tensor):
is_correct = is_correct.to(device)
else:
is_correct = torch.tensor(is_correct).to(device)
return {
"input_ids": bert_input_ids,
"token_type_ids": bert_input_mask,
"attention_mask": bert_input_type_ids,
"masked_lm_labels": masked_lm_labels,
"visual_feats": image_feature_variable,
"pos": image_location_variable,
"masked_image_labels": image_label_variable,
"obj_labels": cls_prob,
"matched_label": is_correct,
"ans": answers,
"image_dim": image_dim_variable,
"max_features": max_features,
"dataset_name": str(sample_list.dataset_name),
}
def get_optimizer_parameters(self, config):
return get_optimizer_parameters_for_bert(self.model, config)
def forward(self, sample_list):
device = registry.get("config").training.device
params = self.get_image_and_text_features(sample_list, device)
if params["visual_feats"] is not None and params["image_dim"] is not None:
device = params["visual_feats"].device
image_mask = (
torch.arange(params["visual_feats"].size(-2))
.expand(*params["visual_feats"].size()[:-1])
.to(device)
)
if len(params["image_dim"].size()) < len(image_mask.size()):
params["image_dim"] = params["image_dim"].unsqueeze(-1)
assert len(params["image_dim"].size()) == len(image_mask.size())
image_mask = image_mask < params["image_dim"]
params["image_attention_mask"] = image_mask.long()
else:
params["image_attention_mask"] = None
if self.config.training_head_type == "pretraining":
output_dict = self.model(
input_ids=params["input_ids"],
token_type_ids=params["token_type_ids"],
attention_mask=params["attention_mask"],
visual_feats=params["visual_feats"],
visual_pos=params["pos"],
visual_attention_mask=params["image_attention_mask"],
masked_lm_labels=params["masked_lm_labels"],
masked_image_labels=params["masked_image_labels"],
obj_labels=params["obj_labels"],
matched_label=params["matched_label"],
ans=params["ans"],
num_features=params["max_features"],
name=params["dataset_name"],
)
loss_key = "{}/{}".format(
sample_list.dataset_name, sample_list.dataset_type
)
output_dict["losses"] = {}
if "masked_lm_loss" in output_dict.keys():
output_dict["losses"][loss_key + "/masked_lm_loss"] = output_dict.pop(
"masked_lm_loss"
)
if "matched_loss" in output_dict.keys():
output_dict["losses"][loss_key + "/matched_loss"] = output_dict.pop(
"matched_loss"
)
if "visn_loss" in output_dict.keys():
output_dict["losses"][loss_key + "/visn_loss"] = output_dict.pop(
"visn_loss"
)
if "answer_loss" in output_dict.keys():
output_dict["losses"][loss_key + "/answer_loss"] = output_dict.pop(
"answer_loss"
)
else:
output_dict = self.model(
input_ids=params["input_ids"],
token_type_ids=params["token_type_ids"],
attention_mask=params["attention_mask"],
visual_feats=params["visual_feats"],
visual_pos=params["pos"],
visual_attention_mask=params["image_attention_mask"],
)
return output_dict
| EXA-1-master | exa/models/mmf-main/mmf/models/lxmert.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
from mmf.common.registry import registry
from mmf.models.pythia import Pythia
from mmf.modules.embeddings import ProjectionEmbedding
from mmf.utils.transform import transform_to_batch_sequence
from torch import nn
try:
from transformers3.modeling_bert import (
BertConfig,
BertEmbeddings,
BertForPreTraining,
BertPooler,
BertPredictionHeadTransform,
BertPreTrainingHeads,
)
except ImportError:
from transformers.modeling_bert import (
BertConfig,
BertEmbeddings,
BertForPreTraining,
BertPooler,
BertPredictionHeadTransform,
BertPreTrainingHeads,
)
@registry.register_model("mmf_bert")
class MMFBert(Pythia):
def __init__(self, config):
super().__init__(config)
@classmethod
def config_path(cls):
return "configs/models/mmf_bert/defaults.yaml"
def build(self):
super().build()
self.tie_weights()
if self.config.get("freeze_base", False):
for n, p in self.named_parameters():
if "classifier" not in n:
p.requires_grad = False
def _build_word_embedding(self):
self.bert_config = BertConfig.from_pretrained(self.config.bert_model_name)
if self.config.pretrained_bert:
bert_model = BertForPreTraining.from_pretrained(self.config.bert_model_name)
self.word_embedding = bert_model.bert.embeddings
self.pooler = bert_model.bert.pooler
self.pooler.apply(self.init_weights)
else:
self.pooler = BertPooler(self.bert_config)
self.word_embedding = BertEmbeddings(self.bert_config)
def _init_classifier(self, hidden_size):
if "pretraining" in self.config.training_head_type:
self.classifier = BertPreTrainingHeads(self.bert_config)
if "vqa" in self.config.training_head_type:
self.dropout = nn.Dropout(self.bert_config.hidden_dropout_prob)
self.answer_space_size = 3129
self.classifier = nn.Sequential(
BertPredictionHeadTransform(self.bert_config),
nn.Linear(self.bert_config.hidden_size, self.answer_space_size),
)
# self.classifier = nn.Linear(self.bert_config.hidden_size,
# self.answer_space_size)
elif "vizwiz" in self.config.training_head_type:
self.dropout = nn.Dropout(self.bert_config.hidden_dropout_prob)
self.answer_space_size = 7371
self.classifier = nn.Sequential(
BertPredictionHeadTransform(self.bert_config),
nn.Linear(self.bert_config.hidden_size, self.answer_space_size),
)
# self.classifier = nn.Linear(self.bert_config.hidden_size,
# self.answer_space_size)
elif self.config.training_head_type == "visual_entailment":
self.dropout = nn.Dropout(self.bert_config.hidden_dropout_prob)
self.classifier = nn.Sequential(
BertPredictionHeadTransform(self.bert_config),
nn.Linear(self.bert_config.hidden_size, 3),
)
# self.classifier = nn.Linear(self.bert_config.hidden_size, 3)
def _init_text_embeddings(self, attr="text"):
self.text_embeddings_out_dim = self.bert_config.hidden_size
self.text_embedding = nn.MultiheadAttention(**self.config.text_embeddings[0])
def _tie_or_clone_weights(self, first_module, second_module):
"""Tie or clone module weights depending of weither we are using
TorchScript or not
"""
if self.config.torchscript:
first_module.weight = nn.Parameter(second_module.weight.clone())
else:
first_module.weight = second_module.weight
def tie_weights(self):
"""Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning
them instead.
"""
if hasattr(self, "cls"):
self._tie_or_clone_weights(
self.cls.predictions.decoder, self.word_embeddings.word_embeddings
)
def _init_feature_embeddings(self, attr):
feature_embeddings_list = []
num_feature_feat = len(self.config.get(f"{attr}_feature_encodings"))
self.image_feature_projection = ProjectionEmbedding(
**self.config.image_feature_projection
)
self.feature_embeddings_out_dim = 0
if self.config.image_intra_attention:
self.image_feature_intra_attention = nn.MultiheadAttention(
**self.config.image_feature_attentions[0]
)
for _ in range(num_feature_feat):
feature_embeddings = []
feature_attn_model_list = self.config[attr + "_feature_embeddings"]
for feature_attn_model_params in feature_attn_model_list:
feature_embedding = nn.MultiheadAttention(**feature_attn_model_params)
feature_embeddings.append(feature_embedding)
self.feature_embeddings_out_dim += feature_attn_model_params[
"embed_dim"
]
feature_embeddings = nn.ModuleList(feature_embeddings)
feature_embeddings_list.append(feature_embeddings)
setattr(
self, attr + "_feature_embeddings_out_dim", self.feature_embeddings_out_dim
)
del self.feature_embeddings_out_dim
setattr(
self,
attr + "_feature_embeddings_list",
nn.ModuleList(feature_embeddings_list),
)
def get_optimizer_parameters(self, config):
param_optimizer = list(self.named_parameters())
image_feature_encoders_params = [
n for n in param_optimizer if "image_feature_encoders" in n[0]
]
param_optimizer = [
n for n in param_optimizer if "image_feature_encoders" not in n[0]
]
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p for n, p in param_optimizer if not any(nd in n for nd in no_decay)
],
"weight_decay": 0.01,
},
{
"params": [
p for n, p in param_optimizer if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
{
"params": [p for _, p in image_feature_encoders_params],
"lr": (config.optimizer.params.lr * 0.1),
"weight_decay": 0.01,
},
]
return optimizer_grouped_parameters
# WARNING(ASG): This doesn't have finetune_lr_multiplier option enabled yet
def process_text_embedding(self, text_embedding, key_padding_mask=None):
text_embedding = text_embedding.transpose(0, 1)
embedding, _ = self.text_embedding(
text_embedding,
text_embedding,
text_embedding,
key_padding_mask=key_padding_mask,
)
return embedding.transpose(0, 1)
def process_feature_embedding(
self,
attr,
sample_list,
text_embedding_total,
key_padding_mask=None,
attn_mask=None,
extra=None,
batch_size_t=None,
):
if extra is None:
extra = []
feature_embeddings = []
feature_attentions = []
features = []
batch_size_t = (
sample_list.get_batch_size() if batch_size_t is None else batch_size_t
)
# Convert list of keys to the actual values
extra = sample_list.get_fields(extra)
feature_idx = 0
# Get all of the features, which are in the form, "image_feature_0"
# "image_feature_1" ...
while True:
feature = getattr(sample_list, f"{attr}_feature_{feature_idx:d}", None)
if feature is None:
break
feature_idx += 1
feature = feature[:batch_size_t]
features.append(feature)
feature_encoders = getattr(self, attr + "_feature_encoders")
# Each feature should have a separate image feature encoders
assert len(features) == len(feature_encoders), (
"Number of feature encoders, {} are not equal "
"to number of features, {}.".format(len(feature_encoders), len(features))
)
# Now, iterate to get final attended image features
for i, feature in enumerate(features):
# Get info related to the current feature. info is generally
# in key of format "image_info_0" for 0th feature
feature_info = getattr(sample_list, f"{attr}_info_{i:d}", {})
# For Pythia, we need max_features to mask attention
feature_dim = getattr(feature_info, "max_features", None)
if feature_dim is not None:
feature_dim = feature_dim[:batch_size_t]
# Attribute in which encoders are saved, for "image" it
# will be "image_feature_encoders", other example is
# "context_feature_encoders"
encoders_attr = attr + "_feature_encoders"
feature_encoder = getattr(self, encoders_attr)[i]
# Encode the features
encoded_feature = feature_encoder(feature)
# Get all of the feature embeddings
list_attr = attr + "_feature_embeddings_list"
feature_embedding_models = getattr(self, list_attr)[i]
encoded_feature = self.image_feature_projection(encoded_feature)
encoded_feature = encoded_feature.transpose(0, 1)
text_embedding_total = text_embedding_total.transpose(0, 1)
if self.config.image_intra_attention:
encoded_feature, _ = self.image_feature_intra_attention(
encoded_feature,
encoded_feature,
encoded_feature,
key_padding_mask=key_padding_mask,
attn_mask=attn_mask,
)
# Forward through these embeddings one by one
for feature_embedding_model in feature_embedding_models:
inp = (text_embedding_total, encoded_feature, encoded_feature)
embedding, attention = feature_embedding_model(
*inp, key_padding_mask=key_padding_mask, attn_mask=attn_mask
)
feature_embeddings.append(embedding.transpose(0, 1))
feature_attentions.append(attention.squeeze(-1))
# Concatenate all features embeddings and return along with attention
feature_embedding_total = torch.cat(feature_embeddings, dim=1)
return feature_embedding_total, feature_attentions
def init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal
# for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.bert_config.initializer_range)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def forward(self, sample_list):
# bert text input
input_ids = sample_list.input_ids
input_mask = sample_list.input_mask
input_type_ids = sample_list.segment_ids
input_ids = transform_to_batch_sequence(input_ids)
input_type_ids = transform_to_batch_sequence(input_type_ids)
input_mask = transform_to_batch_sequence(input_mask)
if input_mask is None:
input_mask = torch.ones_like(input_ids)
if input_type_ids is None:
input_type_ids = torch.zeros_like(input_ids)
attention_mask = input_mask.unsqueeze(1).unsqueeze(2)
# pretraining labels
masked_lm_labels = getattr(sample_list, "lm_label_ids", None)
masked_lm_labels = transform_to_batch_sequence(masked_lm_labels)
# pretraining labels
# is_random_next = getattr(sample_list, "is_correct", None)
# TODO(aps): Fix later on dataset side
is_random_next = None
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
# fp16 compatibility
attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype)
attention_mask = (1.0 - attention_mask) * -10000.0
text_embedding = self.word_embedding(input_ids, input_type_ids)
text_embedding_total = self.process_text_embedding(
text_embedding, input_mask == 0
)
image_embedding_total, _ = self.process_feature_embedding(
"image", sample_list, text_embedding_total
)
if self.inter_model is not None:
image_embedding_total = self.inter_model(image_embedding_total)
# image_embedding_total = image_embedding_total *
# input_mask.unsqueeze(-1).float()
# text_embedding_total = text_embedding_total *
# input_mask.unsqueeze(-1).float()
if self.config.combine_embeddings:
joint_embedding = self.combine_embeddings(
["image", "text"], [image_embedding_total, text_embedding_total]
)
else:
joint_embedding = image_embedding_total
output_dict = {}
pooled_output = self.pooler(joint_embedding)
if "pretraining" in self.config.training_head_type:
prediction_scores, seq_relationship_score = self.classifier(
joint_embedding, pooled_output
)
output_dict["logits"] = prediction_scores
if masked_lm_labels is not None:
loss_fct = nn.CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(
prediction_scores.contiguous().view(
-1, self.bert_config.vocab_size
),
masked_lm_labels.contiguous().view(-1),
)
# print(seq_relationship_score.argmax(dim=1), is_random_next)
loss_key = "{}/{}".format(
sample_list.dataset_name, sample_list.dataset_type
)
output_dict["losses"] = {}
output_dict["losses"][loss_key + "/masked_lm_loss"] = masked_lm_loss
if is_random_next is not None:
output_dict["seq_relationship_score"] = seq_relationship_score
next_sentence_loss = loss_fct(
seq_relationship_score.contiguous().view(-1, 2),
is_random_next.contiguous().view(-1),
)
output_dict["losses"][
loss_key + "/next_sentence_loss"
] = next_sentence_loss
return output_dict
elif (
"vqa" in self.config.training_head_type
or self.config.training_head_type == "vizwiz"
):
index_to_gather = input_mask.sum(1) - 2
pooled_output = torch.gather(
joint_embedding,
1,
index_to_gather.unsqueeze(-1)
.unsqueeze(-1)
.expand(index_to_gather.size(0), 1, joint_embedding.size(-1)),
)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.contiguous().view(-1, self.answer_space_size)
output_dict["scores"] = reshaped_logits
return output_dict
elif (
self.config.training_head_type == "nlvr2"
or self.config.training_head_type == "visual_entailment"
):
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
output_dict["scores"] = logits
return output_dict
return output_dict
| EXA-1-master | exa/models/mmf-main/mmf/models/mmf_bert.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
from dataclasses import dataclass, field
from typing import Dict, List, Optional
import torch
from mmf.common.registry import registry
from mmf.models.transformers.base import (
BaseTransformer,
BaseTransformerBackendConfig,
BaseTransformerHead,
BaseTransformerModalityConfig,
)
from mmf.models.transformers.heads.mlp import MLP
from mmf.modules.encoders import ResNet152ImageEncoder
from mmf.utils.build import build_encoder
from omegaconf import MISSING, OmegaConf
from torch import nn, Tensor
logger = logging.getLogger(__name__)
@dataclass
class MMFTransformerModalityConfig(BaseTransformerModalityConfig):
pass
@dataclass
class MMFTransformerBackendConfig(BaseTransformerBackendConfig):
pass
# Can be used with mmft or mmf_transformer
@registry.register_model("mmft")
@registry.register_model("mmf_transformer")
class MMFTransformer(BaseTransformer):
@dataclass
class Config(BaseTransformer.Config):
model: str = "mmft"
transformer_base: str = "bert-base-uncased"
heads: List[BaseTransformerHead.Config] = field(
default_factory=lambda: [MLP.Config()]
)
num_labels: int = MISSING
initializer_range: float = 0.02
initializer_mean: float = 0.0
token_noise_std: float = 0.01
token_noise_mean: float = 0.0
layer_norm_weight_fill: float = 1.0
random_initialize: bool = False
freeze_transformer: bool = False
freeze_image_encoder: bool = False
tie_weight_to_encoder: Optional[str] = None
finetune_lr_multiplier: float = 1
backend: BaseTransformerBackendConfig = MMFTransformerBackendConfig(
type="huggingface"
)
modalities: List[BaseTransformerModalityConfig] = field(
default_factory=lambda: [
MMFTransformerModalityConfig(
type="text",
key="text",
position_dim=512,
embedding_dim=768,
segment_id=0,
),
MMFTransformerModalityConfig(
type="image",
key="image",
embedding_dim=2048,
position_dim=1,
segment_id=1,
# NOTE: One can also specify encoder in factory mode as
# encoder=ImageEncoderFactory.Config(
# type="resnet152",
# params=ResNet152ImageEncoder.Config()
# )
encoder=ResNet152ImageEncoder.Config(),
),
]
)
def __init__(self, config: BaseTransformer.Config, *args, **kwargs):
super().__init__(config)
self.modality_keys: List = []
self.modality_type: List = []
self.modality_segments: List = []
for modality in self.config.modalities:
self.modality_keys.append(modality.key)
self.modality_type.append(modality.type)
if "segment_id" in modality:
self.modality_segments.append(modality.segment_id)
else:
self.modality_segments.append(-1)
# Backward compatibility for code for old mmft checkpoints
@classmethod
def format_state_key(cls, key):
if key.startswith("pooler.") or key.startswith("classifier."):
return key.replace("pooler.", "heads.0.pooler.").replace(
"classifier.", "heads.0.classifier."
)
return key
@classmethod
def config_path(cls) -> str:
return "configs/models/mmf_transformer/defaults.yaml"
def build_encoders(self):
self.encoders = nn.ModuleDict()
for modality in self.config.modalities:
if "encoder" not in modality:
# Support "image_encoder" attribute in config if directly provided
if modality.type == "image" and "image_encoder" in self.config:
encoder_config = self.config.image_encoder
else:
# 100 is a random number added to satisfy identity encoder
# Set encoder to identity
encoder_config = OmegaConf.create(
{"type": "identity", "params": {"in_dim": 100}}
)
else:
encoder_config = modality.encoder
encoder = build_encoder(encoder_config)
self.encoders[modality.key] = encoder
if modality.type == "image" and self.config.get(
"freeze_image_encoder", False
):
logger.info("Freezing image encoder...")
for param in encoder.parameters():
param.requires_grad = False
if modality.type == "text" and self.config.get(
"freeze_text_encoder", False
):
logger.info("Freezing text encoder...")
for param in encoder.parameters():
param.requires_grad = False
def tie_weights(self):
"""Tie some head weights with backend embeddings"""
text_embedding_idx = self.modality_type.index("text")
if text_embedding_idx >= 0:
for head in self.heads:
if self.config.get("tie_weight_to_encoder", None):
encoder_key = self._find_unique_encoder_key(
self.config.tie_weight_to_encoder
)
logger.info(
f"Tie head weights to {encoder_key} encoder token embeddings"
)
if hasattr(self.encoders[encoder_key], "transformer"):
head.tie_weights(
self.encoders[
encoder_key
].transformer.transformer.token_embedding
)
elif hasattr(self.encoders[encoder_key], "embeddings"):
head.tie_weights(
self.encoders[encoder_key].embeddings.word_embeddings
)
else:
raise NotImplementedError(
"Current encoder module arch not supported."
)
else:
head.tie_weights(
self.backend.embeddings.token_embeddings[text_embedding_idx]
)
def preprocess_sample(
self, sample_list: Dict[str, Tensor]
) -> Dict[str, Dict[str, Tensor]]:
"""Preprocess the sample list elements and return a Dict[str, Dict[str, Tensor]]
object. This object standardizes how we represent multiple modalities. Check
the definition of this in BaseTransformer.
Returns:
Dict[str, Dict[str, Tensor]]: containing input_ids, position_ids,
segment_ids, masks and mlm_labels
input_ids: dict of input ids for all modalities
position_ids: dict of position ids for all modalities
segment_ids: dict of segment/token type ids for all modalities
masks: dict of masks for all modalities
mlm_labels: dict of mlm labels for all modalities, also contains
key `combined_labels` which is a concatenation of all labels
in order of modalities
"""
input_ids = self._infer_input_ids(sample_list)
position_ids = self._infer_position_ids(input_ids)
masks = self._infer_masks(sample_list, input_ids)
segment_ids = self._infer_segment_ids(sample_list, input_ids)
mlm_labels = self._infer_mlm_labels(sample_list, input_ids)
itm_labels = self._infer_itm_labels(sample_list, input_ids)
return {
"input_ids": input_ids,
"position_ids": position_ids,
"segment_ids": segment_ids,
"masks": masks,
"mlm_labels": mlm_labels,
"itm_labels": itm_labels,
}
def _infer_input_ids(self, sample_list: Dict[str, Tensor]) -> Dict[str, Tensor]:
# Input IDs (or text tokens/image features)
input_ids: Dict[str, Tensor] = {}
current_text_idx = 0
for idx, encoder in enumerate(self.encoders.values()):
modality = self.modality_keys[idx]
if self.modality_type[idx] == "text":
# First, check if standard input_ids corresponds to text
# if not, check for modality key inside the sample list
text_ids = self._check_keys_for_modality(
sample_list, ("input_ids", modality)
)
# This handles the case of more than one text modalities
# with type text. The input_ids must be stacked in this case.
# For example, if there are two text modalities, input ids should
# look have shape: B X 2 X L where second dim points to stacked
# text ids. Furthermore, make sure that the sequence of modalities
# in config is same as the sequence in the stacked input ids.
if text_ids.dim() > 2:
input_ids[modality] = text_ids[:, current_text_idx]
current_text_idx += 1
else:
input_ids[modality] = text_ids
elif self.modality_type[idx] == "image":
# input_modal is originally used by MMBT, added for
# cross-compatibility of interops and datasets.
input_ids[modality] = self._check_keys_for_modality(
sample_list, (modality, "image", "input_modal", "image_feature_0")
)
else:
# TODO: Later deliberate if missing modalities should
# be supported in MMFT.
input_ids[modality] = self._check_keys_for_modality(
sample_list, (modality,)
)
# In the other case feature will be skipped, as it is not present in
# the sample list
if encoder is not None:
input_ids[modality] = encoder(input_ids[modality])
# For a feature which is of shape B X D and
# is not text (which is B X L converted later by embeddings to B X L X D)
# We convert it to B X 1 X D to signify single position dim.
if self.modality_type[idx] != "text" and input_ids[modality].dim() == 2:
input_ids[modality] = input_ids[modality].unsqueeze(1)
return input_ids
def _check_keys_for_modality(
self, sample_list: Dict[str, Tensor], keys: List[str]
) -> Tensor:
assert len(keys) != 0
for key in keys:
if key in sample_list:
return sample_list[key]
# Reaching here means nothing was found.
# Easier to write code this way to keep torchscript happy
if len(keys) == 1:
expected_list = keys[0]
else:
expected_list: str = ", ".join(keys[:-1])
expected_list = f"{expected_list} or {keys[-1]}"
raise TypeError(
f"Missing modality in SampleList. Expected to find {expected_list}"
)
def _infer_position_ids(self, input_ids: Dict[str, Tensor]) -> Dict[str, Tensor]:
# Position IDs
position_ids: Dict[str, Tensor] = {}
for modality in self.modality_keys:
end_idx = input_ids[modality].size(1)
position_ids[modality] = (
torch.arange(
0, end_idx, dtype=torch.long, device=input_ids[modality].device
)
.unsqueeze(0)
.expand((input_ids[modality].size(0), end_idx))
)
return position_ids
def _infer_masks(
self, sample_list: Dict[str, Tensor], input_ids: Dict[str, Tensor]
) -> Dict[str, Tensor]:
masks: Dict[str, Tensor] = {}
current_text_idx = 0
for idx, modality in enumerate(self.modality_keys):
if self.modality_type[idx] == "text" and "input_mask" in sample_list:
if sample_list["input_mask"].dim() > 2:
masks[modality] = sample_list["input_mask"][:, current_text_idx]
current_text_idx += 1
else:
masks[modality] = sample_list["input_mask"]
else:
mask_attribute = f"{modality}_mask"
if mask_attribute in sample_list:
masks[modality] = sample_list[mask_attribute]
else:
masks[modality] = torch.ones(
input_ids[modality].size()[:2],
dtype=torch.long,
device=input_ids[modality].device,
)
return masks
def _infer_segment_ids(
self, sample_list: Dict[str, Tensor], input_ids: Dict[str, Tensor]
) -> Dict[str, Tensor]:
# Segment IDs
segment_ids: Dict[str, Tensor] = {}
current_text_idx = 0
for idx, modality in enumerate(self.modality_keys):
if self.modality_segments[idx] == -1:
continue
if self.modality_type[idx] == "text" and "segment_ids" in sample_list:
if sample_list["segment_ids"].dim() > 2:
segment_ids[modality] = sample_list["segment_ids"][
:, current_text_idx
]
current_text_idx += 1
else:
segment_ids[modality] = sample_list["segment_ids"]
else:
segment_ids[modality] = torch.full(
input_ids[modality].size()[:2],
fill_value=self.modality_segments[idx],
dtype=torch.long,
device=input_ids[modality].device,
)
return segment_ids
def _infer_itm_labels(
self, sample_list: Dict[str, Tensor], input_ids: Dict[str, Tensor]
) -> Dict[str, Tensor]:
# ITM Labels
# Currently supports only global match/mismatch between all modalities but
# not pairwise between modalities.
itm_labels: Dict[str, Tensor] = {}
if "is_correct" in sample_list:
itm_labels["is_correct"] = sample_list["is_correct"]
else:
itm_labels["is_correct"] = torch.tensor(
True, dtype=torch.long, device=input_ids[self.modality_keys[0]].device
)
return itm_labels
def _infer_mlm_labels(
self, sample_list: Dict[str, Tensor], input_ids: Dict[str, Tensor]
) -> Dict[str, Tensor]:
# MLM Labels
mlm_labels: Dict[str, Tensor] = {}
current_text_idx = 0
for idx, modality in enumerate(self.modality_keys):
if self.modality_type[idx] == "text" and "lm_label_ids" in sample_list:
if sample_list["lm_label_ids"].dim() > 2:
mlm_labels[modality] = sample_list["lm_label_ids"][
:, current_text_idx
]
current_text_idx += 1
else:
mlm_labels[modality] = sample_list["lm_label_ids"]
else:
mlm_labels[modality] = torch.full(
input_ids[modality].size()[:2],
fill_value=-1,
dtype=torch.long,
device=input_ids[modality].device,
)
mlm_labels_list = []
for modality in self.modality_keys:
mlm_labels_list.append(mlm_labels[modality])
if mlm_labels_list:
mlm_labels["combined_labels"] = torch.cat(mlm_labels_list, dim=-1)
return mlm_labels
def forward(self, sample_list: Dict[str, Tensor]) -> Dict[str, Tensor]:
# Sample preprocess
orig_and_processed_sample_list = self.preprocess_sample(sample_list)
orig_and_processed_sample_list["target_key"] = sample_list
# Arrange masks in a list
masks = []
for modality in self.modality_keys:
masks.append(orig_and_processed_sample_list["masks"][modality])
# Call transformer backend
sequence_output, encoded_layers = self.backend(
orig_and_processed_sample_list["input_ids"],
orig_and_processed_sample_list["position_ids"],
orig_and_processed_sample_list["segment_ids"],
masks,
)
# Transformer Heads
return self.postprocess_output(
sequence_output, encoded_layers, orig_and_processed_sample_list
)
def postprocess_output(
self,
sequence_output: Tensor,
encoded_layers: List[Tensor],
processed_sample_list: Dict[str, Dict[str, Tensor]],
) -> Dict[str, Tensor]:
"""Postprocess the output from the transformer encoder and forward
through the heads.
"""
output_dict = {}
for head in self.heads:
output_dict.update(
head(sequence_output, encoded_layers, processed_sample_list)
)
return output_dict
def _find_unique_encoder_key(self, key):
assert key in self.encoders, f"MMFT doesn't have {key} encoder."
for modality in self.config.modalities:
if modality.key == key:
assert (
len([m for m in self.config.modalities if m.key == modality.key])
== 1
), f"MMFT has multiple modalities with the same key {key}."
assert (
len([m for m in self.config.modalities if m.type == modality.type])
== 1
), f"Encoder {key} should be the only encoder for {modality.type}."
return key
| EXA-1-master | exa/models/mmf-main/mmf/models/mmf_transformer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import math
from copy import deepcopy
from dataclasses import dataclass
from typing import Any, Tuple
import torch
from mmf.common.registry import registry
from mmf.models.base_model import BaseModel
from mmf.modules.encoders import IdentityEncoder
from mmf.modules.layers import AttnPool1d
from mmf.utils.build import (
build_classifier_layer,
build_image_encoder,
build_text_encoder,
)
from mmf.utils.general import filter_grads
from mmf.utils.modeling import get_bert_configured_parameters
from mmf.utils.transform import transform_to_batch_sequence
from omegaconf import MISSING
class PositionEmbeddingSine(torch.nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one
used by the Attention is all you need paper, generalized to work on images.
"""
def __init__(
self,
num_pos_feats: int = 64,
temperature: float = 10000,
eps: float = 1e-06,
normalize: bool = True,
scale: bool = None,
):
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
self.eps = eps
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, tensor):
# input shape is B x 2048 x 7 x 7 or B x 2048 x 1 x 1
x = tensor
not_mask = torch.ones((x.shape[0], *x.shape[2:]), device=x.device)
y_embed = not_mask.cumsum(1, dtype=torch.float32)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
if self.normalize:
y_embed = y_embed / (y_embed[:, -1:, :] + self.eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + self.eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack(
(pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4
).flatten(3)
pos_y = torch.stack(
(pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4
).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
class BaseAlign(BaseModel):
@dataclass
class Config(BaseModel.Config):
# final layer mlp hidden size
final_hidden_size: int = 512
# whether to normalize the embedding
norm_img_embeddings: bool = False
norm_text_embeddings: bool = True
direct_features_input: bool = False
image_encoder: Any = MISSING
text_encoder: Any = MISSING
image_projection: Any = IdentityEncoder.Config()
text_projection: Any = IdentityEncoder.Config()
def __init__(self, config: Config):
"""Initialize the config which is the model configuration."""
super().__init__(config)
self.config = config
def preprocess_text(self, sample_list) -> Tuple:
raise NotImplementedError("Text processing not implemented")
def preprocess_image(self, sample_list) -> Tuple:
raise NotImplementedError("Image processing not implemented")
def get_image_embeddings(self, sample_list) -> torch.Tensor:
raise NotImplementedError("Image Encoder not implemented")
def get_text_embedding(self, sample_list) -> torch.Tensor:
raise NotImplementedError("Text Encoder not implemented")
@registry.register_model("cm_shared_transformer")
class CMSharedTransformer(BaseAlign):
def __init__(self, config: BaseAlign.Config):
"""Initialize the config which is the model configuration."""
super().__init__(config)
self.config = config
self.build()
@classmethod
def config_path(cls):
return "configs/models/alignment/defaults.yaml"
def build(self):
self._is_direct_features_input = self.config.direct_features_input
# Encoders
self.text_encoder = build_text_encoder(self.config.text_encoder)
self.image_encoder = build_image_encoder(
self.config.image_encoder, self._is_direct_features_input
)
# Projectors
image_proj_config = deepcopy(self.config.image_projection)
self.image_proj = build_classifier_layer(image_proj_config)
text_proj_config = deepcopy(self.config.text_projection)
self.text_proj = build_classifier_layer(text_proj_config)
# Aggregators
self.image_pool = AttnPool1d(self.config.final_hidden_size, 1)
self.text_pool = AttnPool1d(self.config.final_hidden_size, 1)
# Shared transformer
transformer_layer = torch.nn.TransformerEncoderLayer(
self.config.final_hidden_size, 4, 2048, dropout=0.1, activation="relu"
)
self.shared_transformer = torch.nn.TransformerEncoder(
transformer_layer, num_layers=2
)
# Position embeddings - Image
self.image_pos_emb = PositionEmbeddingSine(self.config.final_hidden_size // 2)
def get_optimizer_parameters(self, config):
base_lr = config.optimizer.params.lr
bert_params = get_bert_configured_parameters(self.text_encoder, base_lr * 0.1)
backbone_params = [
{
"params": filter_grads(self.image_encoder.parameters()),
"lr": base_lr * 0.1,
}
]
rest_params = [
{"params": filter_grads(self.image_proj.parameters()), "lr": base_lr},
{"params": filter_grads(self.text_proj.parameters()), "lr": base_lr},
{"params": filter_grads(self.image_pool.parameters()), "lr": base_lr},
{"params": filter_grads(self.text_pool.parameters()), "lr": base_lr},
{
"params": filter_grads(self.shared_transformer.parameters()),
"lr": base_lr,
},
]
training_parameters = bert_params + backbone_params + rest_params
return training_parameters
def preprocess_text(self, sample_list) -> Tuple:
text = transform_to_batch_sequence(sample_list.input_ids)
mask = transform_to_batch_sequence(sample_list.input_mask)
segment = transform_to_batch_sequence(sample_list.segment_ids)
return (text, mask, segment)
def preprocess_image(self, sample_list) -> Tuple:
if self._is_direct_features_input:
return sample_list.image_feature_0.permute(0, 2, 1).unsqueeze(3)
# return shape is B x 2048 x 1 x 1
else:
return sample_list.image
def get_image_embeddings(self, sample_list) -> Tuple[torch.Tensor, torch.Tensor]:
image_data = self.preprocess_image(sample_list)
# image_data shape B x 3 x 224 x 224, B x 1 x 2048
src = self.image_encoder(image_data)
# src shape B x 49 x 2048, B x 1 x 2048
if isinstance(src, dict):
src = src[0]
# Image embedding
pos_src = src.permute(0, 2, 1) # B x 2048 x 49,
image_pos_embd = self.image_pos_emb(
pos_src.reshape(
(
pos_src.shape[0],
pos_src.shape[1],
int(math.sqrt(pos_src.shape[2])),
int(math.sqrt(pos_src.shape[2])),
)
)
)
image_pos_embd = image_pos_embd.flatten(2).permute(2, 0, 1)
src_reshaped = src.flatten(2).permute(1, 0, 2)
image_proj = self.image_proj(src_reshaped)
# image_proj shape 49 x B x 512 1 x B x 512
image_emb = image_proj + image_pos_embd
# Shared transformer
image_proj_sec = self.shared_transformer(image_emb)
# Project to shared space
image_proj_sec = image_proj_sec.permute(1, 0, 2)
image_pool = self.image_pool(image_proj_sec, image_proj_sec).squeeze(1)
if self.config.norm_img_embeddings:
image_pool = torch.nn.functional.normalize(image_pool, 2, dim=1)
return image_pool
def get_text_embeddings(self, sample_list) -> Tuple[torch.Tensor, torch.Tensor]:
text, mask, segment = self.preprocess_text(sample_list)
text_enc = self.text_encoder(text, mask, segment)
# Text embedding
text_proj = self.text_proj(text_enc[0]).permute(1, 0, 2)
text_ebd = text_proj
# Shared transformer
text_proj_sec = self.shared_transformer(
text_ebd, src_key_padding_mask=mask.eq(0)
)
# Project to shared space
text_proj_sec = text_proj_sec.permute(1, 0, 2)
text_pool = self.text_pool(text_proj_sec, text_proj_sec, mask.eq(0)).squeeze(1)
if self.config.norm_text_embeddings:
text_pool = torch.nn.functional.normalize(text_pool, 2, dim=1)
return text_pool
def forward(self, sample_list):
image_proj = self.get_image_embeddings(sample_list)
text_proj = self.get_text_embeddings(sample_list)
output = {
"scores": image_proj,
"targets": text_proj,
"text_len": sample_list.input_mask.sum(-1).flatten(),
}
return output
| EXA-1-master | exa/models/mmf-main/mmf/models/alignment.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from mmf.common.registry import registry
from mmf.models.pythia import Pythia
@registry.register_model("lorra")
class LoRRA(Pythia):
def __init__(self, config):
super().__init__(config)
@classmethod
def config_path(cls):
return "configs/models/lorra/defaults.yaml"
def build(self):
self._init_text_embeddings("text")
# For LoRRA context feature and text embeddings would be identity
# but to keep a unified API, we will init them also
# and we need to build them first before building pythia's other
# modules as some of the modules require context attributes to be set
self._init_text_embeddings("context")
self._init_feature_encoders("context")
self._init_feature_embeddings("context")
super().build()
def get_optimizer_parameters(self, config):
params = super().get_optimizer_parameters(config)
params += [
{"params": self.context_feature_embeddings_list.parameters()},
{"params": self.context_embeddings.parameters()},
{"params": self.context_feature_encoders.parameters()},
]
return params
def _get_classifier_input_dim(self):
# Now, the classifier's input will be cat of image and context based
# features
return 2 * super()._get_classifier_input_dim()
def forward(self, sample_list):
sample_list.text = self.word_embedding(sample_list.text)
text_embedding_total = self.process_text_embedding(sample_list)
image_embedding_total, _ = self.process_feature_embedding(
"image", sample_list, text_embedding_total
)
context_embedding_total, _ = self.process_feature_embedding(
"context", sample_list, text_embedding_total, ["order_vectors"]
)
if self.inter_model is not None:
image_embedding_total = self.inter_model(image_embedding_total)
joint_embedding = self.combine_embeddings(
["image", "text"],
[image_embedding_total, text_embedding_total, context_embedding_total],
)
scores = self.calculate_logits(joint_embedding)
return {"scores": scores}
| EXA-1-master | exa/models/mmf-main/mmf/models/lorra.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from copy import deepcopy
import torch
from mmf.common.registry import registry
from mmf.models.base_model import BaseModel
from mmf.modules.layers import ClassifierLayer, ConvNet, Flatten
from torch import nn
_TEMPLATES = {
"question_vocab_size": "{}_text_vocab_size",
"number_of_answers": "{}_num_final_outputs",
}
_CONSTANTS = {"hidden_state_warning": "hidden state (final) should have 1st dim as 2"}
@registry.register_model("cnn_lstm")
class CNNLSTM(BaseModel):
"""CNNLSTM is a simple model for vision and language tasks. CNNLSTM is supposed
to acts as a baseline to test out your stuff without any complex functionality.
Passes image through a CNN, and text through an LSTM and fuses them using
concatenation. Then, it finally passes the fused representation from a MLP to
generate scores for each of the possible answers.
Args:
config (DictConfig): Configuration node containing all of the necessary
config required to initialize CNNLSTM.
Inputs: sample_list (SampleList)
- **sample_list** should contain image attribute for image, text for
question split into word indices, targets for answer scores
"""
def __init__(self, config):
super().__init__(config)
self._global_config = registry.get("config")
self._datasets = self._global_config.datasets.split(",")
@classmethod
def config_path(cls):
return "configs/models/cnn_lstm/defaults.yaml"
def build(self):
assert len(self._datasets) > 0
num_question_choices = registry.get(
_TEMPLATES["question_vocab_size"].format(self._datasets[0])
)
num_answer_choices = registry.get(
_TEMPLATES["number_of_answers"].format(self._datasets[0])
)
self.text_embedding = nn.Embedding(
num_question_choices, self.config.text_embedding.embedding_dim
)
self.lstm = nn.LSTM(**self.config.lstm)
layers_config = self.config.cnn.layers
conv_layers = []
for i in range(len(layers_config.input_dims)):
conv_layers.append(
ConvNet(
layers_config.input_dims[i],
layers_config.output_dims[i],
kernel_size=layers_config.kernel_sizes[i],
)
)
conv_layers.append(Flatten())
self.cnn = nn.Sequential(*conv_layers)
# As we generate output dim dynamically, we need to copy the config
# to update it
classifier_config = deepcopy(self.config.classifier)
classifier_config.params.out_dim = num_answer_choices
self.classifier = ClassifierLayer(
classifier_config.type, **classifier_config.params
)
def forward(self, sample_list):
self.lstm.flatten_parameters()
question = sample_list.text
image = sample_list.image
# Get (h_n, c_n), last hidden and cell state
_, hidden = self.lstm(self.text_embedding(question))
# X x B x H => B x X x H where X = num_layers * num_directions
hidden = hidden[0].transpose(0, 1)
# X should be 2 so we can merge in that dimension
assert hidden.size(1) == 2, _CONSTANTS["hidden_state_warning"]
hidden = torch.cat([hidden[:, 0, :], hidden[:, 1, :]], dim=-1)
image = self.cnn(image)
# Fuse into single dimension
fused = torch.cat([hidden, image], dim=-1)
scores = self.classifier(fused)
return {"scores": scores}
| EXA-1-master | exa/models/mmf-main/mmf/models/cnn_lstm.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import math
import os
from copy import deepcopy
from typing import Dict, List, Optional, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from mmf.common.registry import registry
from mmf.models import BaseModel
from mmf.modules.hf_layers import replace_with_jit
from mmf.utils.configuration import get_mmf_cache_dir
from mmf.utils.modeling import get_optimizer_parameters_for_bert
from omegaconf import OmegaConf
from torch import nn, Tensor
from torch.nn import CrossEntropyLoss
try:
from transformers3.modeling_bert import (
ACT2FN,
BertConfig,
BertEmbeddings,
BertIntermediate,
BertLMPredictionHead,
BertOutput,
BertPredictionHeadTransform,
BertPreTrainedModel,
BertSelfOutput,
)
except ImportError:
from transformers.modeling_bert import (
ACT2FN,
BertConfig,
BertEmbeddings,
BertIntermediate,
BertLMPredictionHead,
BertOutput,
BertPredictionHeadTransform,
BertPreTrainedModel,
BertSelfOutput,
)
class BertSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.visualization = config.visualization
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (
self.num_attention_heads,
self.attention_head_size,
)
x = x.view(new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self, hidden_states: Tensor, attention_mask: Tensor
) -> Tuple[Tensor, Dict[str, Tensor]]:
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in
# BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
if self.visualization:
attn_data = {
"attn": attention_probs,
"queries": query_layer,
"keys": key_layer,
}
else:
attn_data = {}
return context_layer, attn_data
class BertAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(
self, input_tensor: Tensor, attention_mask: Tensor
) -> Tuple[Tensor, Dict[str, Tensor]]:
self_output, attention_probs = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output, attention_probs
class BertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(
self, hidden_states: Tensor, attention_mask: Tensor
) -> Tuple[Tensor, Dict[str, Tensor]]:
attention_output, attention_probs = self.attention(
hidden_states, attention_mask
)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output, attention_probs
@torch.no_grad()
def forward_no_grad(self, hidden_states, attention_mask):
return self.forward(hidden_states, attention_mask)
class BertImageSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.v_hidden_size % config.v_num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.v_hidden_size, config.v_num_attention_heads)
)
self.dynamic_attention = config.dynamic_attention
self.num_attention_heads = config.v_num_attention_heads
self.attention_head_size = int(
config.v_hidden_size / config.v_num_attention_heads
)
self.visualization = config.visualization
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.v_hidden_size, self.all_head_size)
self.key = nn.Linear(config.v_hidden_size, self.all_head_size)
self.value = nn.Linear(config.v_hidden_size, self.all_head_size)
if self.dynamic_attention:
self.dyLinear_q = nn.Linear(config.hidden_size, self.all_head_size)
self.dyLinear_k = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.v_attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (
self.num_attention_heads,
self.attention_head_size,
)
x = x.view(new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states: Tensor,
attention_mask: Tensor,
txt_embedding: Tensor,
txt_attention_mask: Tensor,
) -> Tuple[Tensor, Dict[str, Tensor]]:
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
if (
self.dynamic_attention
and hasattr(self, "dyLinear_q")
and hasattr(self, "dyLinear_k")
):
pool_embedding = (txt_embedding * txt_attention_mask).sum(1)
pool_embedding = pool_embedding / txt_attention_mask.sum(1)
# given pool embedding, Linear and Sigmoid layer.
gate_q = 1 + torch.sigmoid(self.dyLinear_q(pool_embedding))
gate_k = 1 + torch.sigmoid(self.dyLinear_k(pool_embedding))
mixed_query_layer = mixed_query_layer * gate_q.unsqueeze(1)
mixed_key_layer = mixed_key_layer * gate_k.unsqueeze(1)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the
# raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel
# forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
if self.visualization:
attn_data = {
"attn": attention_probs,
"queries": query_layer,
"keys": key_layer,
}
else:
attn_data = {}
return context_layer, attn_data
class BertImageSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.v_hidden_size, config.v_hidden_size)
self.LayerNorm = nn.LayerNorm(config.v_hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.v_hidden_dropout_prob)
def forward(self, hidden_states: Tensor, input_tensor: Tensor) -> Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertImageAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = BertImageSelfAttention(config)
self.output = BertImageSelfOutput(config)
def forward(
self,
input_tensor: Tensor,
attention_mask: Tensor,
txt_embedding: Tensor,
txt_attention_mask: Tensor,
) -> Tuple[Tensor, Dict[str, Tensor]]:
self_output, attention_probs = self.self(
input_tensor, attention_mask, txt_embedding, txt_attention_mask
)
attention_output = self.output(self_output, input_tensor)
return attention_output, attention_probs
class BertImageIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.v_hidden_size, config.v_intermediate_size)
if isinstance(config.v_hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.v_hidden_act]
else:
self.intermediate_act_fn = config.v_hidden_act
def forward(self, hidden_states: Tensor) -> Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertImageOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.v_intermediate_size, config.v_hidden_size)
self.LayerNorm = nn.LayerNorm(config.v_hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.v_hidden_dropout_prob)
def forward(self, hidden_states: Tensor, input_tensor: Tensor) -> Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertImageLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = BertImageAttention(config)
self.intermediate = BertImageIntermediate(config)
self.output = BertImageOutput(config)
def forward(
self,
hidden_states: Tensor,
attention_mask: Tensor,
txt_embedding: Tensor,
txt_attention_mask: Tensor,
) -> Tuple[Tensor, Dict[str, Tensor]]:
attention_output, attention_probs = self.attention(
hidden_states, attention_mask, txt_embedding, txt_attention_mask
)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output, attention_probs
@torch.no_grad()
def forward_no_grad(
self,
hidden_states: Tensor,
attention_mask: Tensor,
txt_embedding: Tensor,
txt_attention_mask: Tensor,
) -> Tuple[Tensor, Dict[str, Tensor]]:
return self.forward(
hidden_states, attention_mask, txt_embedding, txt_attention_mask
)
class BertBiAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.bi_hidden_size % config.bi_num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.bi_hidden_size, config.bi_num_attention_heads)
)
self.visualization = config.visualization
self.num_attention_heads = config.bi_num_attention_heads
self.attention_head_size = int(
config.bi_hidden_size / config.bi_num_attention_heads
)
self.all_head_size = self.num_attention_heads * self.attention_head_size
# self.scale = nn.Linear(1, self.num_attention_heads, bias=False)
# self.scale_act_fn = ACT2FN['relu']
self.query1 = nn.Linear(config.v_hidden_size, self.all_head_size)
self.key1 = nn.Linear(config.v_hidden_size, self.all_head_size)
self.value1 = nn.Linear(config.v_hidden_size, self.all_head_size)
# self.logit1 = nn.Linear(config.hidden_size, self.num_attention_heads)
self.dropout1 = nn.Dropout(config.v_attention_probs_dropout_prob)
self.query2 = nn.Linear(config.hidden_size, self.all_head_size)
self.key2 = nn.Linear(config.hidden_size, self.all_head_size)
self.value2 = nn.Linear(config.hidden_size, self.all_head_size)
# self.logit2 = nn.Linear(config.hidden_size, self.num_attention_heads)
self.dropout2 = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (
self.num_attention_heads,
self.attention_head_size,
)
x = x.view(new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
input_tensor1: Tensor,
attention_mask1: Tensor,
input_tensor2: Tensor,
attention_mask2: Tensor,
co_attention_mask: Optional[Tensor] = None,
use_co_attention_mask: bool = False,
) -> Tuple[Tensor, Tensor, Dict[str, Tensor]]:
# for vision input.
mixed_query_layer1 = self.query1(input_tensor1)
mixed_key_layer1 = self.key1(input_tensor1)
mixed_value_layer1 = self.value1(input_tensor1)
# mixed_logit_layer1 = self.logit1(input_tensor1)
query_layer1 = self.transpose_for_scores(mixed_query_layer1)
key_layer1 = self.transpose_for_scores(mixed_key_layer1)
value_layer1 = self.transpose_for_scores(mixed_value_layer1)
# logit_layer1 = self.transpose_for_logits(mixed_logit_layer1)
# for text input:
mixed_query_layer2 = self.query2(input_tensor2)
mixed_key_layer2 = self.key2(input_tensor2)
mixed_value_layer2 = self.value2(input_tensor2)
# mixed_logit_layer2 = self.logit2(input_tensor2)
query_layer2 = self.transpose_for_scores(mixed_query_layer2)
key_layer2 = self.transpose_for_scores(mixed_key_layer2)
value_layer2 = self.transpose_for_scores(mixed_value_layer2)
# logit_layer2 = self.transpose_for_logits(mixed_logit_layer2)
# Take the dot product between "query2" and "key1" to get the raw
# attention scores for value 1.
attention_scores1 = torch.matmul(query_layer2, key_layer1.transpose(-1, -2))
attention_scores1 = attention_scores1 / math.sqrt(self.attention_head_size)
attention_scores1 = attention_scores1 + attention_mask1
# if use_co_attention_mask:
# attention_scores1 = attention_scores1 + co_attention_mask.permute(0,1,3,2)
# Normalize the attention scores to probabilities.
attention_probs1 = nn.functional.softmax(attention_scores1, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs1 = self.dropout1(attention_probs1)
context_layer1 = torch.matmul(attention_probs1, value_layer1)
context_layer1 = context_layer1.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape1 = context_layer1.size()[:-2] + (self.all_head_size,)
context_layer1 = context_layer1.view(new_context_layer_shape1)
# Take the dot product between "query1" and "key2" to get the
# raw attention scores for value 2.
attention_scores2 = torch.matmul(query_layer1, key_layer2.transpose(-1, -2))
attention_scores2 = attention_scores2 / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel
# forward() function)
# we can comment this line for single flow.
attention_scores2 = attention_scores2 + attention_mask2
# if use_co_attention_mask:
# attention_scores2 = attention_scores2 + co_attention_mask
# Normalize the attention scores to probabilities.
attention_probs2 = nn.functional.softmax(attention_scores2, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs2 = self.dropout2(attention_probs2)
context_layer2 = torch.matmul(attention_probs2, value_layer2)
context_layer2 = context_layer2.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape2 = context_layer2.size()[:-2] + (self.all_head_size,)
context_layer2 = context_layer2.view(new_context_layer_shape2)
attn_data = {}
if self.visualization:
attn_data = {
"attn1": attention_probs1,
"queries1": query_layer2,
"keys1": key_layer1,
"attn2": attention_probs2,
"querues2": query_layer1,
"keys2": key_layer2,
}
return context_layer1, context_layer2, attn_data
class BertBiOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense1 = nn.Linear(config.bi_hidden_size, config.v_hidden_size)
self.LayerNorm1 = nn.LayerNorm(config.v_hidden_size, eps=1e-12)
self.dropout1 = nn.Dropout(config.v_hidden_dropout_prob)
self.q_dense1 = nn.Linear(config.bi_hidden_size, config.v_hidden_size)
self.q_dropout1 = nn.Dropout(config.v_hidden_dropout_prob)
self.dense2 = nn.Linear(config.bi_hidden_size, config.hidden_size)
self.LayerNorm2 = nn.LayerNorm(config.hidden_size, eps=1e-12)
self.dropout2 = nn.Dropout(config.hidden_dropout_prob)
self.q_dense2 = nn.Linear(config.bi_hidden_size, config.hidden_size)
self.q_dropout2 = nn.Dropout(config.hidden_dropout_prob)
def forward(
self,
hidden_states1: Tensor,
input_tensor1: Tensor,
hidden_states2: Tensor,
input_tensor2: Tensor,
) -> Tuple[Tensor, Tensor]:
context_state1 = self.dense1(hidden_states1)
context_state1 = self.dropout1(context_state1)
context_state2 = self.dense2(hidden_states2)
context_state2 = self.dropout2(context_state2)
hidden_states1 = self.LayerNorm1(context_state1 + input_tensor1)
hidden_states2 = self.LayerNorm2(context_state2 + input_tensor2)
return hidden_states1, hidden_states2
class BertConnectionLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.biattention = BertBiAttention(config)
self.biOutput = BertBiOutput(config)
self.v_intermediate = BertImageIntermediate(config)
self.v_output = BertImageOutput(config)
self.t_intermediate = BertIntermediate(config)
self.t_output = BertOutput(config)
def forward(
self,
input_tensor1: Tensor,
attention_mask1: Tensor,
input_tensor2: Tensor,
attention_mask2: Tensor,
co_attention_mask: Optional[Tensor] = None,
use_co_attention_mask: bool = False,
) -> Tuple[Tensor, Tensor, Dict[str, Tensor]]:
bi_output1, bi_output2, co_attention_probs = self.biattention(
input_tensor1,
attention_mask1,
input_tensor2,
attention_mask2,
co_attention_mask,
use_co_attention_mask,
)
attention_output1, attention_output2 = self.biOutput(
bi_output2, input_tensor1, bi_output1, input_tensor2
)
intermediate_output1 = self.v_intermediate(attention_output1)
layer_output1 = self.v_output(intermediate_output1, attention_output1)
intermediate_output2 = self.t_intermediate(attention_output2)
layer_output2 = self.t_output(intermediate_output2, attention_output2)
return layer_output1, layer_output2, co_attention_probs
class BertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
# in the bert encoder, we need to extract three things here.
# text bert layer: BertLayer
# vision bert layer: BertImageLayer
# Bi-Attention: Given the output of two bertlayer, perform bi-directional
# attention and add on two layers.
self.FAST_MODE = config.fast_mode
self.with_coattention = config.with_coattention
self.v_biattention_id = config.v_biattention_id
self.t_biattention_id = config.t_biattention_id
self.in_batch_pairs = config.in_batch_pairs
self.fixed_t_layer = config.fixed_t_layer
self.fixed_v_layer = config.fixed_v_layer
layer = BertLayer(config)
v_layer = BertImageLayer(config)
connect_layer = BertConnectionLayer(config)
self.layer = nn.ModuleList(
[deepcopy(layer) for _ in range(config.num_hidden_layers)]
)
self.v_layer = nn.ModuleList(
[deepcopy(v_layer) for _ in range(config.v_num_hidden_layers)]
)
self.c_layer = nn.ModuleList(
[deepcopy(connect_layer) for _ in range(len(config.v_biattention_id))]
)
def forward(
self,
txt_embedding: Tensor,
image_embedding: Tensor,
txt_attention_mask: Tensor,
txt_attention_mask2: Tensor,
image_attention_mask: Tensor,
co_attention_mask: Tensor,
output_all_encoded_layers: bool = True,
output_all_attention_masks: bool = False,
) -> Tuple[
List[Tensor],
List[Tensor],
Tuple[List[Tensor], List[Tensor], List[Tuple[Tensor, Tensor]]],
]:
v_start = 0
t_start = 0
count = 0
all_encoder_layers_t: List[Tensor] = []
all_encoder_layers_v: List[Tensor] = []
all_attention_mask_t: List[Tensor] = []
all_attnetion_mask_v: List[Tensor] = []
all_attention_mask_c: List[Tuple[Tensor, Tensor]] = []
batch_size, num_words, t_hidden_size = txt_embedding.size()
_, num_regions, v_hidden_size = image_embedding.size()
use_co_attention_mask = False
for v_layer_id, t_layer_id in zip(self.v_biattention_id, self.t_biattention_id):
v_end = v_layer_id
t_end = t_layer_id
assert self.fixed_t_layer <= t_end
assert self.fixed_v_layer <= v_end
cur_idx = 0
for cur_layer in self.layer:
if t_start <= cur_idx < self.fixed_t_layer:
txt_embedding, txt_attention_probs = cur_layer.forward_no_grad(
txt_embedding, txt_attention_mask
)
t_start = self.fixed_t_layer
if output_all_attention_masks and "attn" in txt_attention_probs:
all_attention_mask_t.append(txt_attention_probs["attn"])
cur_idx += 1
cur_idx = 0
for cur_layer in self.layer:
if t_start <= cur_idx < t_end:
txt_embedding, txt_attention_probs = cur_layer(
txt_embedding, txt_attention_mask
)
if output_all_attention_masks and "attn" in txt_attention_probs:
all_attention_mask_t.append(txt_attention_probs["attn"])
cur_idx += 1
cur_v_idx = 0
for cur_v_layer in self.v_layer:
if v_start <= cur_v_idx < self.fixed_v_layer:
(
image_embedding,
image_attention_probs,
) = cur_v_layer.forward_no_grad(
image_embedding,
image_attention_mask,
txt_embedding,
txt_attention_mask2,
)
v_start = self.fixed_v_layer
if output_all_attention_masks and "attn" in image_attention_probs:
all_attnetion_mask_v.append(image_attention_probs["attn"])
cur_v_idx += 1
cur_v_idx = 0
for cur_v_layer in self.v_layer:
if v_start <= cur_v_idx < v_end:
image_embedding, image_attention_probs = cur_v_layer(
image_embedding,
image_attention_mask,
txt_embedding,
txt_attention_mask2,
)
if output_all_attention_masks and "attn" in image_attention_probs:
all_attnetion_mask_v.append(image_attention_probs["attn"])
cur_v_idx += 1
if count == 0 and self.in_batch_pairs:
# new batch size is the batch_size ^2
image_embedding = (
image_embedding.unsqueeze(0)
.expand(batch_size, batch_size, num_regions, v_hidden_size)
.contiguous()
.view(batch_size * batch_size, num_regions, v_hidden_size)
)
image_attention_mask = (
image_attention_mask.unsqueeze(0)
.expand(batch_size, batch_size, 1, 1, num_regions)
.contiguous()
.view(batch_size * batch_size, 1, 1, num_regions)
)
txt_embedding = (
txt_embedding.unsqueeze(1)
.expand(batch_size, batch_size, num_words, t_hidden_size)
.contiguous()
.view(batch_size * batch_size, num_words, t_hidden_size)
)
txt_attention_mask = (
txt_attention_mask.unsqueeze(1)
.expand(batch_size, batch_size, 1, 1, num_words)
.contiguous()
.view(batch_size * batch_size, 1, 1, num_words)
)
co_attention_mask = (
co_attention_mask.unsqueeze(1)
.expand(batch_size, batch_size, 1, num_regions, num_words)
.contiguous()
.view(batch_size * batch_size, 1, num_regions, num_words)
)
if count == 0 and self.FAST_MODE:
txt_embedding = txt_embedding.expand(
image_embedding.size(0),
txt_embedding.size(1),
txt_embedding.size(2),
)
txt_attention_mask = txt_attention_mask.expand(
image_embedding.size(0),
txt_attention_mask.size(1),
txt_attention_mask.size(2),
txt_attention_mask.size(3),
)
if self.with_coattention:
cur_c_idx = 0
for cur_c_layer in self.c_layer:
if cur_c_idx == count:
# do the bi attention.
(
image_embedding,
txt_embedding,
co_attention_probs,
) = cur_c_layer(
image_embedding,
image_attention_mask,
txt_embedding,
txt_attention_mask,
co_attention_mask,
use_co_attention_mask,
)
if (
output_all_attention_masks
and "attn1" in co_attention_probs
and "attn2" in co_attention_probs
):
all_attention_mask_c.append(
(
co_attention_probs["attn1"],
co_attention_probs["attn2"],
)
)
cur_c_idx += 1
v_start = v_end
t_start = t_end
count += 1
if output_all_encoded_layers:
all_encoder_layers_t.append(txt_embedding)
all_encoder_layers_v.append(image_embedding)
cur_v_idx = 0
for cur_v_layer in self.v_layer:
if cur_v_idx >= v_start:
image_embedding, image_attention_probs = cur_v_layer(
image_embedding,
image_attention_mask,
txt_embedding,
txt_attention_mask2,
)
if output_all_attention_masks and "attn" in image_attention_probs:
all_attnetion_mask_v.append(image_attention_probs["attn"])
cur_v_idx += 1
cur_idx = 0
for cur_layer in self.layer:
if cur_idx >= t_start:
txt_embedding, txt_attention_probs = cur_layer(
txt_embedding, txt_attention_mask
)
if output_all_attention_masks and "attn" in txt_attention_probs:
all_attention_mask_t.append(txt_attention_probs["attn"])
cur_idx += 1
# add the end part to finish.
if not output_all_encoded_layers:
all_encoder_layers_t.append(txt_embedding)
all_encoder_layers_v.append(image_embedding)
return (
all_encoder_layers_t,
all_encoder_layers_v,
(all_attention_mask_t, all_attnetion_mask_v, all_attention_mask_c),
)
class BertTextPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.bi_hidden_size)
self.activation = nn.ReLU()
def forward(self, hidden_states: Tensor) -> Tensor:
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertImagePooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.v_hidden_size, config.bi_hidden_size)
self.activation = nn.ReLU()
def forward(self, hidden_states: Tensor) -> Tensor:
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertImgPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.v_hidden_size, config.v_hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.v_hidden_act
self.LayerNorm = nn.LayerNorm(config.v_hidden_size, eps=1e-12)
def forward(self, hidden_states: Tensor) -> Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertImagePredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertImgPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.v_hidden_size, config.v_target_size)
def forward(self, hidden_states: Tensor) -> Tensor:
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class BertPreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
self.bi_seq_relationship = nn.Linear(config.bi_hidden_size, 2)
self.imagePredictions = BertImagePredictionHead(config)
self.fusion_method = config.fusion_method
self.dropout = nn.Dropout(0.1)
def forward(
self,
sequence_output_t: Tensor,
sequence_output_v: Tensor,
pooled_output_t: Tensor,
pooled_output_v: Tensor,
) -> Tuple[Tensor, Tensor, Tensor]:
if self.fusion_method == "sum":
pooled_output = self.dropout(pooled_output_t + pooled_output_v)
elif self.fusion_method == "mul":
pooled_output = self.dropout(pooled_output_t * pooled_output_v)
else:
raise AssertionError
prediction_scores_t = self.predictions(sequence_output_t)
seq_relationship_score = self.bi_seq_relationship(pooled_output)
prediction_scores_v = self.imagePredictions(sequence_output_v)
return prediction_scores_t, prediction_scores_v, seq_relationship_score
class BertImageFeatureEmbeddings(nn.Module):
"""Construct the embeddings from image, spatial location (omit now) and
token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.image_embeddings = nn.Linear(config.v_feature_size, config.v_hidden_size)
self.image_location_embeddings = nn.Linear(5, config.v_hidden_size)
self.LayerNorm = nn.LayerNorm(config.v_hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, image_feature: Tensor, image_location: Tensor) -> Tensor:
img_embeddings = self.image_embeddings(image_feature)
loc_embeddings = self.image_location_embeddings(image_location)
# TODO: we want to make the padding_idx==0, however, with custom initilization,
# it seems it will have a bias. Let's do masking for now
embeddings = self.LayerNorm(img_embeddings + loc_embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class ViLBERTBase(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
# Replace transformer layers with scriptable JIT layers
replace_with_jit()
# initilize word embedding
self.embeddings = BertEmbeddings(config)
self.task_specific_tokens = config.task_specific_tokens
# initlize the vision embedding
self.v_embeddings = BertImageFeatureEmbeddings(config)
self.encoder = BertEncoder(config)
self.t_pooler = BertTextPooler(config)
self.v_pooler = BertImagePooler(config)
self.init_weights()
def forward(
self,
input_txt: Tensor,
image_feature: Tensor,
image_location: Tensor,
token_type_ids: Optional[Tensor] = None,
attention_mask: Optional[Tensor] = None,
image_attention_mask: Optional[Tensor] = None,
co_attention_mask: Optional[Tensor] = None,
task_ids: Optional[Tensor] = None,
output_all_encoded_layers: bool = False,
output_all_attention_masks: bool = False,
) -> Tuple[
Tensor,
Tensor,
Tensor,
Tensor,
Optional[Tuple[List[Tensor], List[Tensor], List[Tuple[Tensor, Tensor]]]],
Optional[List[Tensor]],
Optional[List[Tensor]],
]:
if attention_mask is None:
attention_mask = torch.ones_like(input_txt)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_txt)
if image_attention_mask is None:
image_attention_mask = torch.ones(
image_feature.size(0), image_feature.size(1)
).type_as(input_txt)
all_attention_mask_output: Optional[
Tuple[List[Tensor], List[Tensor], List[Tuple[Tensor, Tensor]]]
] = None
encoded_layers_t_output: Optional[List[Tensor]] = None
encoded_layers_v_output: Optional[List[Tensor]] = None
if self.task_specific_tokens:
# extend the mask
mask_tokens = torch.ones(input_txt.size(0), 1, device=input_txt.device)
attention_mask = torch.cat([mask_tokens, attention_mask], dim=1)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of
# causal attention used in OpenAI GPT, we just need to prepare the
# broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_image_attention_mask = image_attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask2 = attention_mask.unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
if not torch.jit.is_scripting():
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype
) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
if not torch.jit.is_scripting():
extended_attention_mask2 = extended_attention_mask2.to(
dtype=next(self.parameters()).dtype
) # fp16 compatibility
extended_image_attention_mask = extended_image_attention_mask.to(
dtype=next(self.parameters()).dtype
)
extended_image_attention_mask = (1.0 - extended_image_attention_mask) * -10000.0
if co_attention_mask is None:
co_attention_mask = torch.zeros(
input_txt.size(0), image_feature.size(1), input_txt.size(1)
).type_as(extended_image_attention_mask)
extended_co_attention_mask = co_attention_mask.unsqueeze(1)
# extended_co_attention_mask = co_attention_mask.unsqueeze(-1)
extended_co_attention_mask = extended_co_attention_mask * 5.0
if not torch.jit.is_scripting():
extended_co_attention_mask = extended_co_attention_mask.to(
dtype=next(self.parameters()).dtype
)
embedding_output = self.embeddings(input_txt, token_type_ids, task_ids)
v_embedding_output = self.v_embeddings(image_feature, image_location)
encoded_layers_t, encoded_layers_v, all_attention_mask = self.encoder(
embedding_output,
v_embedding_output,
extended_attention_mask,
extended_attention_mask2,
extended_image_attention_mask,
extended_co_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
output_all_attention_masks=output_all_attention_masks,
)
sequence_output_t = encoded_layers_t[-1]
sequence_output_v = encoded_layers_v[-1]
pooled_output_t = self.t_pooler(sequence_output_t)
pooled_output_v = self.v_pooler(sequence_output_v)
if output_all_attention_masks:
all_attention_mask_output = all_attention_mask
if output_all_encoded_layers:
encoded_layers_t_output = encoded_layers_t
encoded_layers_v_output = encoded_layers_v
return (
sequence_output_t,
sequence_output_v,
pooled_output_t,
pooled_output_v,
all_attention_mask_output,
encoded_layers_t_output,
encoded_layers_v_output,
)
class ViLBERTForPretraining(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.bert = ViLBERTBase.from_pretrained(
self.config.bert_model_name,
config=BertConfig.from_dict(
OmegaConf.to_container(self.config, resolve=True)
),
cache_dir=os.path.join(get_mmf_cache_dir(), "distributed_{}".format(-1)),
)
self.cls = BertPreTrainingHeads(config)
self.vocab_size = self.config.vocab_size
self.visual_target = config.visual_target
self.num_negative = config.num_negative
self.loss_fct = CrossEntropyLoss(ignore_index=-1)
if self.visual_target == 0:
self.vis_criterion = nn.KLDivLoss(reduction="none")
elif self.visual_target == 1:
self.vis_criterion = nn.MSELoss(reduction="none")
elif self.visual_target == 2:
self.vis_criterion = CrossEntropyLoss()
def init_weights(self):
if self.config.random_initialize is False:
if self.config.bert_model_name is None:
# No pretrained model, init weights
self.bert.init_weights()
self.cls.apply(self.bert._init_weights)
self.tie_weights()
def tie_weights(self):
"""Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning
them instead.
"""
self._tie_or_clone_weights(
self.cls.predictions.decoder, self.bert.embeddings.word_embeddings
)
def forward(
self,
input_ids: Tensor,
image_feature: Tensor,
image_location: Tensor,
token_type_ids: Tensor,
attention_mask: Tensor,
image_attention_mask: Tensor,
masked_lm_labels: Optional[Tensor] = None,
image_label: Optional[Tensor] = None,
image_target: Optional[Tensor] = None,
output_all_attention_masks: bool = False,
) -> Dict[str, Tensor]:
masked_img_loss: Optional[Tensor] = None
(
sequence_output_t,
sequence_output_v,
pooled_output_t,
pooled_output_v,
attention_weights,
_encoded_layers_t_output,
_encoded_layers_v_output,
) = self.bert(
input_ids,
image_feature,
image_location,
token_type_ids,
attention_mask,
image_attention_mask,
output_all_encoded_layers=False,
output_all_attention_masks=output_all_attention_masks,
)
prediction_scores_t, prediction_scores_v, seq_relationship_score = self.cls(
sequence_output_t, sequence_output_v, pooled_output_t, pooled_output_v
)
output = {}
if not torch.jit.is_scripting() and output_all_attention_masks:
output["attention_weights"] = attention_weights
if image_label is not None and image_target is not None:
if self.visual_target == 1:
img_loss = self.vis_criterion(prediction_scores_v, image_target)
masked_img_loss = torch.sum(
img_loss * torch.eq(image_label, 1).unsqueeze(2).float()
) / max(
torch.sum(
torch.eq(image_label, 1).unsqueeze(2).expand_as(img_loss)
),
1,
)
elif self.visual_target == 0:
img_loss = self.vis_criterion(
F.log_softmax(prediction_scores_v, dim=2), image_target
)
masked_img_loss = torch.sum(
img_loss * torch.eq(image_label, 1).unsqueeze(2).float()
) / max(torch.sum(torch.eq(image_label, 1)), 0)
elif self.visual_target == 2:
# generate negative sampled index.
num_across_batch = int(self.num_negative * 0.7)
num_inside_batch = int(self.num_negative * 0.3)
batch_size, num_regions, _ = prediction_scores_v.size()
assert batch_size != 0
# random negative across batches.
row_across_index = torch.ones(
batch_size,
num_regions,
num_across_batch,
dtype=input_ids.dtype,
device=input_ids.device,
).random_(0, batch_size - 1)
col_across_index = torch.ones(
batch_size,
num_regions,
num_across_batch,
dtype=input_ids.dtype,
device=input_ids.device,
).random_(0, num_regions)
for i in range(batch_size - 1):
row_across_index[i][row_across_index[i] == i] = batch_size - 1
final_across_index = row_across_index * num_regions + col_across_index
# random negative inside batches.
row_inside_index = torch.zeros(
batch_size,
num_regions,
num_inside_batch,
dtype=input_ids.dtype,
device=input_ids.device,
)
col_inside_index = torch.ones(
batch_size,
num_regions,
num_inside_batch,
dtype=input_ids.dtype,
device=input_ids.device,
).random_(0, num_regions - 1)
for i in range(batch_size):
row_inside_index[i] = i
for i in range(num_regions - 1):
col_inside_index[:, i, :][col_inside_index[:, i, :] == i] = (
num_regions - 1
)
final_inside_index = row_inside_index * num_regions + col_inside_index
final_index = torch.cat((final_across_index, final_inside_index), dim=2)
# Let's first sample where we need to compute.
predict_v = prediction_scores_v[image_label == 1]
neg_index_v = final_index[image_label == 1]
flat_image_target = image_target.view(batch_size * num_regions, -1)
# we also need to append the target feature at the beginning.
negative_v = flat_image_target[neg_index_v]
positive_v = image_target[image_label == 1]
sample_v = torch.cat((positive_v.unsqueeze(1), negative_v), dim=1)
# calculate the loss.
score = torch.bmm(sample_v, predict_v.unsqueeze(2)).squeeze(2)
masked_img_loss = self.vis_criterion(
score,
torch.zeros(
score.size(0), dtype=input_ids.dtype, device=input_ids.device
),
)
if masked_img_loss is not None:
output["masked_img_loss"] = masked_img_loss.unsqueeze(0)
if masked_lm_labels is not None:
masked_lm_loss = self.loss_fct(
prediction_scores_t.view(-1, self.vocab_size), masked_lm_labels.view(-1)
)
output["masked_lm_loss"] = masked_lm_loss.unsqueeze(0)
# next_sentence_loss = self.loss_fct(
# seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)
# )
# output["next_sentence_loss"] = next_sentence_loss.unsqueeze(0)
return output
class ViLBERTForClassification(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.bert = ViLBERTBase.from_pretrained(
self.config.bert_model_name,
config=BertConfig.from_dict(
OmegaConf.to_container(self.config, resolve=True)
),
cache_dir=os.path.join(get_mmf_cache_dir(), "distributed_{}".format(-1)),
)
self.training_head_type = self.config.training_head_type
self.num_labels = self.config.num_labels
self.fusion_method = config.fusion_method
self.dropout = nn.Dropout(self.config.hidden_dropout_prob)
# Create a copy of config since struct mode won't allow direct overrides
# classifier_config is only needed for initializing the classifier
classifier_config = deepcopy(config)
classifier_config.hidden_size = config.bi_hidden_size
if self.config.training_head_type == "nlvr2":
classifier_config.hidden_size *= 2
self.classifier = nn.Sequential(
BertPredictionHeadTransform(classifier_config),
nn.Linear(classifier_config.hidden_size, self.num_labels),
)
self.init_weights()
def init_weights(self):
if self.config.random_initialize is False:
if self.config.bert_model_name is None:
# No pretrained model, init weights
self.bert.init_weights()
# Classifier needs to be initialized always as it is task specific
self.classifier.apply(self.bert._init_weights)
def forward(
self,
input_ids: Tensor,
image_feature: Tensor,
image_location: Tensor,
token_type_ids: Optional[Tensor] = None,
attention_mask: Optional[Tensor] = None,
image_attention_mask: Optional[Tensor] = None,
masked_lm_labels: Optional[Tensor] = None,
image_label: Optional[Tensor] = None,
image_target: Optional[Tensor] = None,
next_sentence_label: Optional[Tensor] = None,
output_all_attention_masks: bool = False,
) -> Dict[str, Tensor]:
(
sequence_output_t,
sequence_output_v,
pooled_output_t,
pooled_output_v,
attention_weights,
_encoded_layers_t_output,
_encoded_layers_v_output,
) = self.bert(
input_ids,
image_feature,
image_location,
token_type_ids,
attention_mask,
image_attention_mask,
output_all_encoded_layers=False,
output_all_attention_masks=output_all_attention_masks,
)
output = {}
if not torch.jit.is_scripting() and output_all_attention_masks:
output["attention_weights"] = attention_weights
if self.fusion_method == "sum":
pooled_output = self.dropout(pooled_output_t + pooled_output_v)
elif self.fusion_method == "mul":
pooled_output = self.dropout(pooled_output_t * pooled_output_v)
else:
raise AssertionError
if self.training_head_type == "nlvr2":
pooled_output = pooled_output.view(-1, pooled_output.size(1) * 2)
logits = self.classifier(pooled_output)
reshaped_logits = logits.contiguous().view(-1, self.num_labels)
output["scores"] = reshaped_logits
return output
@registry.register_model("vilbert")
class ViLBERT(BaseModel):
def __init__(self, config):
super().__init__(config)
@classmethod
def config_path(cls):
return "configs/models/vilbert/pretrain.yaml"
# Backward compatibility
@classmethod
def format_state_key(cls, key):
return (
key.replace("bert.bert", "model.bert")
.replace("bert.cls", "model.cls")
.replace("bert.classifier", "model.classifier")
)
def build(self):
if self.config.training_head_type == "pretraining":
self.model = ViLBERTForPretraining(self.config)
else:
self.model = ViLBERTForClassification(self.config)
if self.config.get("freeze_base", False):
for p in self.model.bert.parameters():
p.requires_grad = False
def get_image_and_text_features(self, sample_list):
bert_input_ids = sample_list.input_ids
bert_input_mask = sample_list.input_mask
bert_input_type_ids = sample_list.segment_ids
if sample_list.dataset_name == "nlvr2":
bert_input_ids = torch.cat([bert_input_ids, bert_input_ids])
bert_input_mask = torch.cat([bert_input_mask, bert_input_mask])
bert_input_type_ids = torch.cat([bert_input_type_ids, bert_input_type_ids])
# image input
img0 = getattr(sample_list, "img0", {})
image_info = getattr(img0, "image_info_0", {})
image_dim_variable_0 = getattr(image_info, "max_features", None)
image_feature_variable_0 = getattr(img0, "image_feature_0", None)
image_location_variable_0 = getattr(image_info, "bbox", None)
img1 = getattr(sample_list, "img1", {})
image_info = getattr(img1, "image_info_0", {})
image_dim_variable_1 = getattr(image_info, "max_features", None)
image_feature_variable_1 = getattr(img1, "image_feature_0", None)
image_location_variable_1 = getattr(image_info, "bbox", None)
image_feature_variable = torch.cat(
[image_feature_variable_0, image_feature_variable_1]
)
image_location_variable = torch.cat(
[image_location_variable_0, image_location_variable_1]
)
image_dim_variable = torch.cat([image_dim_variable_0, image_dim_variable_1])
image_label_variable = None
image_target_variable = None
else:
image_info = getattr(sample_list, "image_info_0", {})
image_dim_variable = getattr(image_info, "max_features", None)
image_feature_variable = getattr(sample_list, "image_feature_0", None)
image_label_variable = getattr(sample_list, "image_labels", None)
image_location_variable = getattr(image_info, "bbox", None)
cls_prob = getattr(image_info, "cls_prob", None)
image_target = np.array(cls_prob, dtype=np.float32)
image_target_variable = torch.tensor(
image_target, dtype=torch.float, device=bert_input_ids.device
)
return {
"input_ids": bert_input_ids,
"attention_mask": bert_input_mask,
"token_type_ids": bert_input_type_ids,
"image_dim": image_dim_variable,
"image_feature": image_feature_variable,
"image_location": image_location_variable,
"image_target": image_target_variable,
"image_label": image_label_variable,
}
def get_optimizer_parameters(self, config):
return get_optimizer_parameters_for_bert(self.model, config)
def forward(self, sample_list):
params = self.get_image_and_text_features(sample_list)
# pretraining labels
params["masked_lm_labels"] = getattr(sample_list, "lm_label_ids", None)
# is_random_next = getattr(sample_list, "is_correct", None)
# TODO(aps): Fix on dataset side
# params["is_random_next"] = None
# Prepare Mask
if params["image_feature"] is not None and params["image_dim"] is not None:
image_mask = torch.arange(
params["image_feature"].size(-2), device=params["image_feature"].device
).expand(*params["image_feature"].size()[:-1])
if len(params["image_dim"].size()) < len(image_mask.size()):
params["image_dim"] = params["image_dim"].unsqueeze(-1)
assert len(params["image_dim"].size()) == len(image_mask.size())
image_mask = image_mask < params["image_dim"]
params["image_attention_mask"] = image_mask.long()
else:
params["image_attention_mask"] = None
params.pop("image_dim")
output_dict = self.model(
params["input_ids"],
params["image_feature"],
params["image_location"],
params["token_type_ids"],
params["attention_mask"],
params["image_attention_mask"],
params["masked_lm_labels"],
params["image_label"],
params["image_target"],
)
if self.config.training_head_type == "pretraining":
loss_key = "{}/{}".format(
sample_list.dataset_name, sample_list.dataset_type
)
output_dict["losses"] = {}
output_dict["losses"][loss_key + "/masked_lm_loss"] = output_dict.pop(
"masked_lm_loss"
)
output_dict["losses"][loss_key + "/masked_img_loss"] = output_dict.pop(
"masked_img_loss"
)
# if params["is_random_next"] is not None:
# output_dict["losses"][loss_key + "/next_sentence_loss"]
# = output_dict.pop("next_sentence_loss")
return output_dict
| EXA-1-master | exa/models/mmf-main/mmf/models/vilbert.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
from mmf.common.registry import registry
from mmf.models.base_model import BaseModel
from mmf.modules.layers import ReLUWithWeightNormFC
# Note: Doesn't work currently. Needs to be migrated to new API
@registry.register_model("top_down_bottom_up")
class TopDownBottomUp(BaseModel):
def __init__(self, image_attention_model, text_embedding_models, classifier):
super().__init__()
self.image_attention_model = image_attention_model
self.text_embedding_models = text_embedding_models
self.classifier = classifier
text_lstm_dim = sum([q.text_out_dim for q in text_embedding_models])
joint_embedding_out_dim = classifier.input_dim
image_feat_dim = image_attention_model.image_feat_dim
self.non_linear_text = ReLUWithWeightNormFC(
text_lstm_dim, joint_embedding_out_dim
)
self.non_linear_image = ReLUWithWeightNormFC(
image_feat_dim, joint_embedding_out_dim
)
@classmethod
def config_path(self):
return None
def build(self):
return
def forward(
self, image_feat_variable, input_text_variable, input_answers=None, **kwargs
):
text_embeddings = []
for q_model in self.text_embedding_models:
q_embedding = q_model(input_text_variable)
text_embeddings.append(q_embedding)
text_embedding = torch.cat(text_embeddings, dim=1)
if isinstance(image_feat_variable, list):
image_embeddings = []
for idx, image_feat in enumerate(image_feat_variable):
ques_embedding_each = torch.unsqueeze(text_embedding[idx, :], 0)
image_feat_each = torch.unsqueeze(image_feat, dim=0)
attention_each = self.image_attention_model(
image_feat_each, ques_embedding_each
)
image_embedding_each = torch.sum(attention_each * image_feat, dim=1)
image_embeddings.append(image_embedding_each)
image_embedding = torch.cat(image_embeddings, dim=0)
else:
attention = self.image_attention_model(image_feat_variable, text_embedding)
image_embedding = torch.sum(attention * image_feat_variable, dim=1)
joint_embedding = self.non_linear_text(text_embedding) * self.non_linear_image(
image_embedding
)
logit_res = self.classifier(joint_embedding)
return logit_res
| EXA-1-master | exa/models/mmf-main/mmf/models/top_down_bottom_up.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# isort:skip_file
from .albef.vit import AlbefVitEncoder
from .ban import BAN
from .base_model import BaseModel
from .butd import BUTD
from .cnn_lstm import CNNLSTM
from .fusions import ConcatBERT, ConcatBoW, FusionBase, LateFusion
from .lorra import LoRRA
from .m4c import M4C
from .m4c_captioner import M4CCaptioner
from .mmbt import MMBT, MMBTForClassification, MMBTForPreTraining
from .mmf_transformer import MMFTransformer
from .pythia import Pythia
from .top_down_bottom_up import TopDownBottomUp
from .unimodal import UnimodalBase, UnimodalModal, UnimodalText
from .uniter import UNITER
from .vilbert import ViLBERT
from .vilt import ViLT
from .vinvl import VinVL
from .visual_bert import VisualBERT
__all__ = [
"TopDownBottomUp",
"Pythia",
"LoRRA",
"BAN",
"BaseModel",
"BUTD",
"MMBTForClassification",
"MMBTForPreTraining",
"FusionBase",
"ConcatBoW",
"ConcatBERT",
"LateFusion",
"CNNLSTM",
"M4C",
"M4CCaptioner",
"MMBT",
"MMFTransformer",
"VisualBERT",
"ViLBERT",
"UnimodalBase",
"UnimodalModal",
"UnimodalText",
"AlbefVitEncoder",
"ViLT",
"UNITER",
"VinVL",
]
| EXA-1-master | exa/models/mmf-main/mmf/models/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from mmf.common.registry import registry
from mmf.models.m4c import M4C
@registry.register_model("m4c_captioner")
class M4CCaptioner(M4C):
def __init__(self, config):
super().__init__(config)
self.remove_unk_in_pred = self.config.remove_unk_in_pred
@classmethod
def config_path(cls):
return "configs/models/m4c_captioner/defaults.yaml"
def _forward_output(self, sample_list, fwd_results):
super()._forward_output(sample_list, fwd_results)
if self.remove_unk_in_pred:
# avoid outputting <unk> in the generated captions
fwd_results["scores"][..., self.answer_processor.UNK_IDX] = -1e10
return fwd_results
| EXA-1-master | exa/models/mmf-main/mmf/models/m4c_captioner.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Code based off https://github.com/microsoft/Oscar
# modified for MMF
# Licensed under the MIT license.
import logging
from collections import namedtuple
from dataclasses import asdict, dataclass
from typing import Any, Dict, Optional, Tuple
import torch
from mmf.common.registry import registry
from mmf.common.sample import SampleList
from mmf.models.base_model import BaseModel
from mmf.models.transformers.heads.contrastive import ThreeWayContrastive
from mmf.models.transformers.heads.mlm import MLM
from mmf.models.transformers.heads.mlp import MLP
from mmf.utils.general import retry_n
from omegaconf import MISSING, OmegaConf
from torch import nn, Tensor
try:
from transformers3.modeling_bert import (
BertConfig,
BertEmbeddings,
BertEncoder,
BertPreTrainedModel,
)
except ImportError:
from transformers.modeling_bert import (
BertConfig,
BertEmbeddings,
BertEncoder,
BertPreTrainedModel,
)
logger = logging.getLogger(__name__)
NUM_RETRIES = 6
class VinVLBase(BertPreTrainedModel):
"""VinVL Bert Encoder for image features
From https://github.com/microsoft/Oscar/blob/master/oscar/modeling/modeling_bert.py
Is a thin wrapper around BertEncoder that handles image features
"""
def __init__(self, config: BertConfig):
super().__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.img_dim = config.img_feature_dim
self.use_img_layernorm = getattr(config, "use_img_layernorm", False)
img_projection = nn.Linear(self.img_dim, self.config.hidden_size, bias=True)
img_embedding_list = [img_projection]
if self.use_img_layernorm:
img_embedding_list += [
nn.LayerNorm(config.hidden_size, eps=config.img_layer_norm_eps)
]
dropout = nn.Dropout(config.hidden_dropout_prob)
img_embedding_list += [dropout]
# is an image encoding used as input to the transformer trunk
self.img_embedding = nn.Sequential(*img_embedding_list)
def forward(
self,
input_ids: Tensor,
img_feats: Tensor,
token_type_ids: Optional[Tensor] = None,
attention_mask: Optional[Tensor] = None,
position_ids: Optional[Tensor] = None,
) -> Tuple[Tensor]:
if attention_mask is None:
attention_mask = torch.ones(
(input_ids.size(0), input_ids.size(1) + img_feats.size(1))
).to(input_ids.device)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We can provide a self-attention mask of dimensions
# [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
# attention_mask with dim 3 is to specify a unique mask for each feature,
# it is broadcast over heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# Make the mask broadcastable to
# [batch_size, num_heads, seq_length, seq_length]
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
f"Wrong shape for input_ids (shape {input_ids.shape})"
+ " or attention_mask (shape {attention_mask.shape})"
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# Do embeddings
text_embedding_output = self.embeddings(
input_ids, position_ids=position_ids, token_type_ids=token_type_ids
)
img_embedding_output = self.img_embedding(img_feats)
embedding_output = torch.cat((text_embedding_output, img_embedding_output), 1)
encoder_outputs = self.encoder(
embedding_output,
extended_attention_mask,
output_hidden_states=True,
)
layers = namedtuple("TransformerOutput", ["last_hidden_state", "hidden_layers"])
return layers(encoder_outputs[0], encoder_outputs[1])
def build_vinvl_base(
bert_model_name: str = "bert-base-uncased",
img_feature_dim: int = 2054,
use_img_layernorm: bool = True,
img_layer_norm_eps: float = 1e-12,
random_init: bool = True,
) -> VinVLBase:
bert_config = retry_n(
NUM_RETRIES,
BertConfig.from_pretrained,
bert_model_name,
)
# augment hf BertConfig for vinvl BertImgModel config
bert_config.img_feature_dim = img_feature_dim
bert_config.use_img_layernorm = use_img_layernorm
bert_config.img_layer_norm_eps = img_layer_norm_eps
if random_init:
bert = VinVLBase(bert_config)
else:
bert = retry_n(
NUM_RETRIES,
VinVLBase.from_pretrained,
bert_model_name,
config=bert_config,
)
return bert
class VinVLForClassification(nn.Module):
"""VINVL wrapper for classification"""
def __init__(
self,
mlp_config: Optional[Dict] = None,
loss_config: Optional[Dict] = None,
random_init: bool = False,
bert_model_name: str = "bert-base-uncased",
img_feature_dim: int = 2054,
use_img_layernorm: bool = True,
img_layer_norm_eps: float = 1e-12,
*args,
**kwargs,
):
"""VinVL model constructor for classification.
MLP head is configurable through Dict type.
Consult the MLP head class for the config options.
Args:
mlp_config (Optional[Dict], optional):
Classifier MLP head config.
Defaults to {"num_layers": 0}.
loss_config (Optional[Dict], optional):
nn.CrossEntropyLoss params dict.
Defaults to {}.
random_init (bool, optional):
Flag to load VinVL bert weights from random_init.
Defaults to False.
bert_model_name (str, optional):
Name for base bert model.
Used for VinVL base configs and weights.
Defaults to "bert-base-uncased".
img_feature_dim (int, optional):
The size of the VinVL image feature inputs.
Defaults to 2054.
use_img_layernorm (bool, optional):
Flag to use layernorm on image encoding.
Defaults to True.
img_layer_norm_eps (float, optional):
Image layernorm epsilon. Defaults to 1e-12.
"""
super().__init__()
if mlp_config is None:
mlp_config = {"num_layers": 0}
if loss_config is None:
loss_config = {}
self.bert = build_vinvl_base(
bert_model_name=bert_model_name,
img_feature_dim=img_feature_dim,
use_img_layernorm=use_img_layernorm,
img_layer_norm_eps=img_layer_norm_eps,
random_init=random_init,
)
self.classifier = MLP(config=mlp_config)
self.ce_loss = nn.CrossEntropyLoss(**loss_config)
def forward(
self,
input_ids: Tensor,
token_type_ids: Tensor,
attention_mask: Tensor,
img_feats: Tensor,
position_ids: Optional[Tensor] = None,
labels: Optional[Tensor] = None,
) -> Dict[str, Tensor]:
sequence_output = self.bert(
input_ids,
img_feats=img_feats,
position_ids=position_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
).last_hidden_state
logits = self.classifier(sequence_output)["scores"]
result = {"scores": logits}
if labels is not None:
ce_loss = self.ce_loss(logits.view(-1, logits.size(1)), labels.view(-1))
result["losses"] = {"ce": ce_loss}
return result
class VinVLForPretraining(nn.Module):
"""VINVL wrapper for pretraining
MLM loss is described in https://arxiv.org/pdf/2004.06165.pdf
Contrastive loss is an itm loss to guess,
0 for a match,
1 for a corrupt caption,
2 for corrupt image labels
VinVL trains with object detection labels concatenated with the input text.
"""
def __init__(
self,
mlm_config: Optional[MLM.Config] = None,
contrast_config: Optional[ThreeWayContrastive.Config] = None,
random_init: bool = False,
bert_model_name: str = "bert-base-uncased",
img_feature_dim: int = 2054,
use_img_layernorm: bool = True,
img_layer_norm_eps: float = 1e-12,
*args,
**kwargs,
):
"""VinVL model constructor for pretraining.
MLM and Contrastive Loss heads are configurable through Dict types.
Consult MLM and MLP head classes for their config options.
Args:
mlm_config (Optional[MLM.Config], optional):
Config object for MLM head.
Defaults to MLM.Config which uses the default MLM configs.
contrast_config (Optional[ThreeWayContrastive.Config], optional):
Config object for the 3-way contrastive head.
Defaults to ThreeWayContrastive.Config which uses a MLP with 3 classes
random_init (bool, optional):
Flag to load VinVL bert weights from random_init.
Defaults to False.
bert_model_name (str, optional):
Name for base bert model.
Used for VinVL base configs and weights.
Defaults to "bert-base-uncased".
img_feature_dim (int, optional):
The size of the VinVL image feature inputs.
Defaults to 2054.
use_img_layernorm (bool, optional):
Flag to use layernorm on image encoding.
Defaults to True.
img_layer_norm_eps (float, optional):
Image layernorm epsilon. Defaults to 1e-12.
"""
super().__init__()
if mlm_config is None:
mlm_config = asdict(MLM.Config())
if contrast_config is None:
contrast_config = asdict(ThreeWayContrastive.Config())
self.bert = build_vinvl_base(
bert_model_name=bert_model_name,
img_feature_dim=img_feature_dim,
use_img_layernorm=use_img_layernorm,
img_layer_norm_eps=img_layer_norm_eps,
random_init=random_init,
)
self.mlm_head = MLM(config=mlm_config)
self.ce_loss = nn.CrossEntropyLoss()
self.contrast_head = ThreeWayContrastive(contrast_config)
def mlm_forward(
self,
input_ids_masked: Tensor,
lm_label_ids: Tensor,
token_type_ids: Tensor,
attention_mask: Tensor,
img_feats: Tensor,
position_ids: Optional[Tensor] = None,
) -> Dict[str, Tensor]:
hidden_layers = self.bert(
input_ids_masked,
img_feats=img_feats,
position_ids=position_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
).last_hidden_state
mlm_labels = {}
mlm_labels["text"] = lm_label_ids
mlm_labels["image"] = torch.full(
img_feats.shape[:2],
fill_value=-1,
dtype=torch.long,
device=lm_label_ids.device,
)
mlm_labels["combined_labels"] = torch.cat(
[mlm_labels["text"], mlm_labels["image"]], dim=-1
)
processed_sample_list = SampleList({"mlm_labels": mlm_labels})
return self.mlm_head(
hidden_layers, processed_sample_list=processed_sample_list
)["losses"]
def contrastive_forward(
self,
input_ids: Tensor,
token_type_ids: Tensor,
attention_mask: Tensor,
img_feats: Tensor,
contrastive_labels: Tensor,
position_ids: Optional[Tensor] = None,
) -> Dict[str, Tensor]:
last_hidden_state = self.bert(
input_ids,
img_feats=img_feats,
position_ids=position_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
).last_hidden_state
processed_sample_list = SampleList({"contrastive_labels": contrastive_labels})
# contrastive 3-way loss has 3 classes,
# 0 for a match, 1, 2 for a corrupt caption/image
# labels respectively
return self.contrast_head(last_hidden_state, processed_sample_list)["losses"]
def forward(
self,
input_ids_masked: Tensor,
input_ids_corrupt: Tensor,
lm_label_ids: Tensor,
contrastive_labels: Tensor,
token_type_ids: Tensor,
attention_mask: Tensor,
token_type_ids_corrupt: Tensor,
attention_mask_corrupt: Tensor,
img_feats: Tensor,
position_ids: Optional[Tensor] = None,
) -> Dict[str, Tensor]:
mlm_result = self.mlm_forward(
input_ids_masked,
lm_label_ids,
token_type_ids,
attention_mask,
img_feats,
position_ids,
)
contrastive_loss_result = self.contrastive_forward(
input_ids_corrupt,
token_type_ids_corrupt,
attention_mask_corrupt,
img_feats,
contrastive_labels,
position_ids,
)
losses = {**mlm_result, **contrastive_loss_result}
return {"losses": losses}
@registry.register_model("vinvl")
class VinVL(BaseModel):
"""VinVL base model called by MMF.
VinVL paper, 3-way contrastive loss:
https://arxiv.org/pdf/2101.00529.pdf
Implementation based on https://github.com/microsoft/Oscar
Expects VinVL features extracted by
https://github.com/microsoft/scene_graph_benchmark
using Visual Genome object detection labels.
The label map used for training is available at
https://github.com/microsoft/scene_graph_benchmark/blob/main/README.md
"""
@dataclass
class Config:
random_init: bool = False
bert_model_name: str = "bert-base-uncased"
hidden_size: int = 768
heads: Any = MISSING
do_pretraining: bool = False
img_feature_dim: int = 2054
img_feature_type: str = "frcnn"
use_img_layernorm: bool = True
img_layer_norm_eps: float = 1e-12
max_img_seq_len: int = 70
def __init__(self, config):
super().__init__(config)
self.config = OmegaConf.create({**asdict(self.Config()), **config})
self.do_pretraining = self.config.do_pretraining
@classmethod
def config_path(cls):
return "configs/models/vinvl/defaults.yaml"
def build(self):
if self.do_pretraining:
mlm_config = self.config.heads.get("mlm")
contrast_config = self.config.heads.get("contrast")
self.vinvl = VinVLForPretraining(
mlm_config=mlm_config, contrast_config=contrast_config, **self.config
)
else:
# do classification finetuning
mlp_config = self.config.heads.get("mlp")
loss_config = self.config.get("ce_loss")
self.vinvl = VinVLForClassification(
mlp_config=mlp_config, loss_config=loss_config, **self.config
)
def init_losses(self):
"""
Defer loss management to submodels,
do nothing when called by build_model.
"""
def forward(self, sample_list: Dict[str, Tensor]) -> Dict[str, Tensor]:
attention_mask = self._get_attention_mask(
sample_list["image_feature_0"],
sample_list["image_info_0"],
sample_list["input_mask"],
)
if self.do_pretraining:
corrupt_attention_mask = self._get_attention_mask(
sample_list["image_feature_0"],
sample_list["image_info_0"],
sample_list["input_mask_corrupt"],
)
return self.vinvl(
sample_list["input_ids_masked"],
sample_list["input_ids_corrupt"],
sample_list["lm_label_ids"],
sample_list["contrastive_labels"],
sample_list["segment_ids"],
attention_mask,
sample_list["segment_ids_corrupt"],
corrupt_attention_mask,
sample_list["image_feature_0"],
)
else:
return self.vinvl(
sample_list["input_ids"],
sample_list["segment_ids"],
attention_mask,
sample_list["image_feature_0"],
labels=sample_list.get("labels"),
)
def _get_attention_mask(
self, image_feat: Tensor, image_info: Dict[str, Tensor], input_mask: Tensor
) -> Tensor:
# image_dim = (bs,)
# with the number of features per image in the batch as an int
image_dim = image_info.get("max_features")
if image_dim is None:
image_mask = torch.ones(
(image_feat.size(0), image_feat.size(1)), device=image_feat.device
).long()
else:
image_mask = torch.arange(
image_feat.size(-2), device=image_feat.device
).expand(image_feat.size()[:-1])
if len(image_dim.size()) < len(image_mask.size()):
image_dim = image_dim.unsqueeze(-1)
assert len(image_dim.size()) == len(image_mask.size())
image_mask = image_mask < image_dim
image_mask = image_mask.long()
attention_mask = torch.cat((input_mask, image_mask), dim=-1)
return attention_mask
| EXA-1-master | exa/models/mmf-main/mmf/models/vinvl.py |
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Models built in MMF need to inherit ``BaseModel`` class and adhere to
a fixed format. To create a model for MMF, follow this quick cheatsheet.
1. Inherit ``BaseModel`` class, make sure to call ``super().__init__()`` in your
class's ``__init__`` function.
2. Implement `build` function for your model. If you build everything in ``__init__``,
you can just return in this function.
3. Write a `forward` function which takes in a ``SampleList`` as an argument and
returns a dict.
4. Register using ``@registry.register_model("key")`` decorator on top of the
class.
If you are doing logits based predictions, the dict you return from your model
should contain a `scores` field. Losses are automatically calculated by the
``BaseModel`` class and added to this dict if not present.
Example::
import torch
from mmf.common.registry import registry
from mmf.models.base_model import BaseModel
@registry.register("pythia")
class Pythia(BaseModel):
# config is model_config from global config
def __init__(self, config):
super().__init__(config)
def build(self):
....
def forward(self, sample_list):
scores = torch.rand(sample_list.get_batch_size(), 3127)
return {"scores": scores}
"""
import collections
import logging
import warnings
from copy import deepcopy
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Union
import pytorch_lightning as pl
from mmf.common.registry import registry
from mmf.common.report import Report
from mmf.common.sample import SampleList, to_device
from mmf.modules.losses import LossConfig, Losses
from mmf.utils.checkpoint import load_pretrained_model
from mmf.utils.checkpoint_updater import MMFToPLCheckpointUpdater
from mmf.utils.download import download_pretrained_model
from mmf.utils.file_io import PathManager
from mmf.utils.general import get_current_device
from mmf.utils.logger import log_class_usage
from omegaconf import DictConfig, MISSING, OmegaConf
logger = logging.getLogger(__name__)
class BaseModel(pl.LightningModule):
"""For integration with MMF's trainer, datasets and other features,
models needs to inherit this class, call `super`, write a build function,
write a forward function taking a ``SampleList`` as input and returning a
dict as output and finally, register it using ``@registry.register_model``
Args:
config (DictConfig): ``model_config`` configuration from global config.
"""
@dataclass
class Config:
# Name of the model that is used in registry
model: str = MISSING
losses: Optional[List[LossConfig]] = MISSING
def __init__(self, config: Union[DictConfig, Config]):
super().__init__()
if not isinstance(config, DictConfig) and isinstance(config, self.Config):
config = OmegaConf.structured(config)
self.config = config
self._logged_warning = {"losses_present": False}
self._is_pretrained = False
self._is_pl_enabled = False
self.checkpoint_updater = None
log_class_usage("Model", self.__class__)
@classmethod
def from_params(cls, **kwargs):
return cls(OmegaConf.structured(cls.Config(**kwargs)))
@property
def is_pretrained(self):
return self._is_pretrained
@property
def is_pl_enabled(self):
return self._is_pl_enabled
@is_pretrained.setter
def is_pretrained(self, x: bool):
self._is_pretrained = x
@is_pl_enabled.setter
def is_pl_enabled(self, x: bool):
self._is_pl_enabled = x
def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
"""
This is called by the pl.LightningModule before the model's checkpoint
is loaded.
"""
self.build()
if self.checkpoint_updater is None:
self.checkpoint_updater = MMFToPLCheckpointUpdater()
self.checkpoint_updater.update_checkpoint(checkpoint, self)
def _run_format_state_key(self, state_dict: Dict[str, Any]) -> None:
"""Function to rewrtie the checkpoint in place"""
tmp_state_dict = dict(state_dict)
for attr in tmp_state_dict:
new_attr = self.format_state_key(attr)
if attr != new_attr:
value = state_dict.pop(attr)
state_dict[new_attr] = value
def build(self):
"""Function to be implemented by the child class, in case they need to
build their model separately than ``__init__``. All model related
downloads should also happen here.
"""
raise NotImplementedError(
"Build method not implemented in the child model class."
)
def build_meters(self, run_type):
from mmf.utils.build import build_meters
"""Function only used in lightning setting"""
self.train_meter, self.val_meter, self.test_meter = build_meters(run_type)
def init_losses(self):
"""Initializes loss for the model based ``losses`` key. Automatically called by
MMF internally after building the model.
"""
losses = self.config.get("losses", [])
if len(losses) == 0 and not self.is_pretrained:
warnings.warn(
"No losses are defined in model configuration. You are expected "
"to return loss in your return dict from forward."
)
self.losses = Losses(losses)
@classmethod
def config_path(cls):
return None
@classmethod
def format_state_key(cls, key):
"""Can be implemented if something special needs to be done to the
key when pretrained model is being loaded. This will adapt and return
keys according to that. Useful for backwards compatibility. See
updated load_state_dict below. For an example, see VisualBERT model's
code.
Args:
key (string): key to be formatted
Returns:
string: formatted key
"""
return key
def load_state_dict(self, state_dict, *args, **kwargs):
copied_state_dict = deepcopy(state_dict)
for key in list(copied_state_dict.keys()):
formatted_key = self.format_state_key(key)
copied_state_dict[formatted_key] = copied_state_dict.pop(key)
return super().load_state_dict(copied_state_dict, *args, **kwargs)
def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
super().on_save_checkpoint(checkpoint)
config = registry.get("config")
config_dict = OmegaConf.to_container(config, resolve=True)
checkpoint["config"] = config_dict
# TODO: add git features, for example:
# 'git/branch', 'git/commit_hash', 'git/commit_author',
# 'git/commit_message', 'git/diff'
def forward(self, sample_list, *args, **kwargs):
"""To be implemented by child class. Takes in a ``SampleList`` and
returns back a dict.
Args:
sample_list (SampleList): SampleList returned by the DataLoader for
current iteration
Returns:
Dict: Dict containing scores object.
"""
raise NotImplementedError(
"Forward of the child model class needs to be implemented."
)
def training_step(self, batch: SampleList, batch_idx: int, *args, **kwargs):
"""Member function of PL modules. Used only when PL enabled.
To be implemented by child class. Takes in a ``SampleList``,
batch_idx and returns back a dict.
Args:
sample_list (SampleList): SampleList returned by the DataLoader for
current iteration
Returns:
Dict: Dict containing loss.
"""
output = self._forward_lightning_step(batch, batch_idx)
if hasattr(self, "train_meter"):
report = Report(batch, output).detach()
self.train_meter.update_from_report(report)
return output
def validation_step(self, batch: SampleList, batch_idx: int, *args, **kwargs):
"""Member function of PL modules. Used only when PL enabled.
To be implemented by child class. Takes in a ``SampleList``,
batch_idx and returns back a dict.
Args:
sample_list (SampleList): SampleList returned by the DataLoader for
current iteration
Returns:
Dict
"""
output = self._forward_lightning_step(batch, batch_idx)
if hasattr(self, "val_meter"):
report = Report(batch, output).detach()
self.val_meter.update_from_report(report, should_update_loss=False)
report.metrics = self.metrics(report, report)
self.log_dict(report.metrics)
return output
def test_step(self, batch: SampleList, batch_idx: int, *args, **kwargs):
"""Member function of PL modules. Used only when PL enabled.
To be implemented by child class. Takes in a ``SampleList``,
batch_idx and returns back a dict.
Args:
sample_list (SampleList): SampleList returned by the DataLoader for
current iteration
Returns:
Dict
"""
return self._forward_lightning_step(batch, batch_idx)
def _forward_lightning_step(self, batch, batch_idx):
batch = self._ensure_sample_list(batch)
output = self(batch)
loss_dict = output["losses"]
output["loss"] = sum(loss.mean() for loss in loss_dict.values())
self._detach_forward_output(output)
return output
def _detach_forward_output(self, output):
keys_to_detach = [key for key in output.keys() if key != "loss"]
for key in keys_to_detach:
if hasattr(output[key], "detach"):
output[key] = output[key].detach()
def configure_optimizers(self):
"""Member function of PL modules. Used only when PL enabled."""
assert self._is_pl_enabled, (
"configure_optimizers should be only used as a member "
"function of LightningModule when pytorch lightning is enabled."
)
from mmf.utils.build import build_lightning_optimizers
config = registry.get("config")
return build_lightning_optimizers(self, config)
def _ensure_sample_list(self, batch):
if not isinstance(batch, SampleList):
# Try converting to SampleList
batch = SampleList(batch)
return batch
def __call__(self, sample_list, *args, **kwargs):
if not self._is_pl_enabled:
# Move to proper device i.e. same as the model before passing
sample_list = to_device(sample_list, get_current_device())
model_output = super().__call__(sample_list, *args, **kwargs)
# Don't do anything fancy to output if it is pretrained
if self.is_pretrained:
return model_output
# Make sure that the output from the model is a Mapping
assert isinstance(
model_output, collections.abc.Mapping
), "A dict must be returned from the forward of the model."
if "losses" in model_output:
if not self._logged_warning["losses_present"]:
warnings.warn(
"'losses' already present in model output. "
"No calculation will be done in base model."
)
self._logged_warning["losses_present"] = True
assert isinstance(
model_output["losses"], collections.abc.Mapping
), "'losses' must be a dict."
elif hasattr(self, "losses"):
model_output["losses"] = self.losses(sample_list, model_output)
else:
model_output["losses"] = {}
return model_output
def load_requirements(self, config, *args, **kwargs):
requirements = config.get("zoo_requirements", [])
if isinstance(requirements, str):
requirements = [requirements]
for item in requirements:
download_pretrained_model(item, *args, **kwargs)
def format_for_prediction(self, results, report):
"""Implement this method in models if it requires to modify prediction
results using report fields. Note that the required fields in report
should already be gathered in report.
"""
return results
@classmethod
def from_pretrained(cls, model_name_or_path, *args, **kwargs):
# Check if the path exists, if not it is pretrained, otherwise,
# we will try to load the checkpoint from the path
if not PathManager.exists(model_name_or_path):
model_key = model_name_or_path.split(".")[0]
model_cls = registry.get_model_class(model_key)
assert (
model_cls == cls
), f"Incorrect pretrained model key {model_name_or_path} "
"for class {cls.__name__}"
output = load_pretrained_model(model_name_or_path, *args, **kwargs)
config, checkpoint, full_config = (
output["config"],
output["checkpoint"],
output["full_config"],
)
# Save original config for state reset later
config_temp_holder = registry.get("config")
# Register full config from checkpoint when loading the model
registry.register("config", full_config)
# Some models need registry updates to be load pretrained model
# If they have this method, call it so they can update accordingly
if hasattr(cls, "update_registry_for_pretrained"):
cls.update_registry_for_pretrained(config, checkpoint, output)
instance = cls(config)
instance.is_pretrained = True
instance.build()
incompatible_keys = instance.load_state_dict(checkpoint, strict=False)
# The model has loaded, reset the state
registry.register("config", config_temp_holder)
if len(incompatible_keys.missing_keys) != 0:
logger.warning(
f"Missing keys {incompatible_keys.missing_keys} in the"
+ " checkpoint.\n"
+ "If this is not your checkpoint, please open up an "
+ "issue on MMF GitHub. \n"
+ f"Unexpected keys if any: {incompatible_keys.unexpected_keys}"
)
if len(incompatible_keys.unexpected_keys) != 0:
logger.warning(
"Unexpected keys in state dict: "
+ f"{incompatible_keys.unexpected_keys} \n"
+ "This is usually not a problem with pretrained models, but "
+ "if this is your own model, please double check. \n"
+ "If you think this is an issue, please open up a "
+ "bug at MMF GitHub."
)
instance.eval()
return instance
| EXA-1-master | exa/models/mmf-main/mmf/models/base_model.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Initial version was taken from https://github.com/uclanlp/visualbert
# which was cleaned up and adapted for MMF.
import os
from copy import deepcopy
from typing import Dict, List, Optional, Tuple
import torch
from mmf.common.registry import registry
from mmf.models import BaseModel
from mmf.modules.embeddings import BertVisioLinguisticEmbeddings
from mmf.modules.hf_layers import BertEncoderJit, BertLayerJit
from mmf.utils.configuration import get_mmf_cache_dir
from mmf.utils.modeling import get_optimizer_parameters_for_bert
from mmf.utils.torchscript import getattr_torchscriptable
from mmf.utils.transform import (
transform_to_batch_sequence,
transform_to_batch_sequence_dim,
)
from omegaconf import OmegaConf
from torch import nn, Tensor
try:
from transformers3.modeling_bert import (
BertConfig,
BertForPreTraining,
BertPooler,
BertPredictionHeadTransform,
BertPreTrainedModel,
)
except ImportError:
from transformers.modeling_bert import (
BertConfig,
BertForPreTraining,
BertPooler,
BertPredictionHeadTransform,
BertPreTrainedModel,
)
class VisualBERTBase(BertPreTrainedModel):
def __init__(
self,
config,
visual_embedding_dim=512,
embedding_strategy="plain",
bypass_transformer=False,
output_attentions=False,
output_hidden_states=False,
):
super().__init__(config)
self.config = config
config.visual_embedding_dim = visual_embedding_dim
config.embedding_strategy = embedding_strategy
config.bypass_transformer = bypass_transformer
config.output_attentions = output_attentions
config.output_hidden_states = output_hidden_states
self.embeddings = BertVisioLinguisticEmbeddings(config)
self.encoder = BertEncoderJit(config)
self.pooler = BertPooler(config)
self.bypass_transformer = config.bypass_transformer
if self.bypass_transformer:
self.additional_layer = BertLayerJit(config)
self.output_attentions = self.config.output_attentions
self.output_hidden_states = self.config.output_hidden_states
self.init_weights()
def forward(
self,
input_ids: Tensor,
attention_mask: Optional[Tensor] = None,
token_type_ids: Optional[Tensor] = None,
visual_embeddings: Optional[Tensor] = None,
visual_embeddings_type: Optional[Tensor] = None,
image_text_alignment: Optional[Tensor] = None,
) -> Tuple[Tensor, Tensor, List[Tensor]]:
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of
# causal attention used in OpenAI GPT, we just need to prepare the
# broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
# Python builtin next is currently not supported in Torchscript
if not torch.jit.is_scripting():
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype
) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(
input_ids,
token_type_ids,
visual_embeddings=visual_embeddings,
visual_embeddings_type=visual_embeddings_type,
image_text_alignment=image_text_alignment,
)
if (
self.bypass_transformer
and visual_embeddings is not None
and hasattr(self, "additional_layer")
):
assert (
not self.output_hidden_states
) # Don't support this for the bypass model
text_length = input_ids.size(1)
text_embedding_output = embedding_output[:, :text_length, :]
visual_part = embedding_output[:, text_length:, :]
text_extended_attention_mask = extended_attention_mask[
:, :, :text_length, :text_length
]
encoded_layers = self.encoder(
text_embedding_output, text_extended_attention_mask
)
sequence_output = encoded_layers[0]
new_input = torch.cat((sequence_output, visual_part), dim=1)
final_sequence_output = self.additional_layer(
new_input, extended_attention_mask
)
pooled_output = self.pooler(final_sequence_output[0])
return final_sequence_output[0], pooled_output, []
else:
encoded_layers = self.encoder(embedding_output, extended_attention_mask)
sequence_output = encoded_layers[0]
pooled_output = self.pooler(sequence_output)
attn_data_list: List[Tensor] = []
if not torch.jit.is_scripting():
if self.output_attentions:
attn_data_list = encoded_layers[1:]
else:
assert (
not self.output_attentions
), "output_attentions not supported in script mode"
return sequence_output, pooled_output, attn_data_list
class VisualBERTForPretraining(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.output_attentions = self.config.output_attentions
self.output_hidden_states = self.config.output_hidden_states
# If bert_model_name is not specified, you will need to specify
# all of the required parameters for BERTConfig and a pretrained
# model won't be loaded
self.bert_model_name = self.config.get("bert_model_name", None)
self.bert_config = BertConfig.from_dict(
OmegaConf.to_container(self.config, resolve=True)
)
if self.bert_model_name is None:
self.bert = VisualBERTBase(
self.bert_config,
visual_embedding_dim=self.config.visual_embedding_dim,
embedding_strategy=self.config.embedding_strategy,
bypass_transformer=self.config.bypass_transformer,
output_attentions=self.config.output_attentions,
output_hidden_states=self.config.output_hidden_states,
)
else:
self.bert = VisualBERTBase.from_pretrained(
self.config.bert_model_name,
config=self.bert_config,
cache_dir=os.path.join(
get_mmf_cache_dir(), "distributed_{}".format(-1)
),
visual_embedding_dim=self.config.visual_embedding_dim,
embedding_strategy=self.config.embedding_strategy,
bypass_transformer=self.config.bypass_transformer,
output_attentions=self.config.output_attentions,
output_hidden_states=self.config.output_hidden_states,
)
self.vocab_size = self.bert.config.vocab_size
# TODO: Once omegaconf fixes int keys issue, bring this back
# See https://github.com/omry/omegaconf/issues/149
# with omegaconf.open_dict(self.config):
# # Add bert config such as hidden_state to our main config
# self.config.update(self.bert.config.to_dict())
if self.bert_model_name is None:
bert_masked_lm = BertForPreTraining(self.bert.config)
else:
bert_masked_lm = BertForPreTraining.from_pretrained(
self.config.bert_model_name,
config=self.bert.config,
cache_dir=os.path.join(
get_mmf_cache_dir(), "distributed_{}".format(-1)
),
)
self.cls = deepcopy(bert_masked_lm.cls)
self.loss_fct = nn.CrossEntropyLoss(ignore_index=-1)
self.init_weights()
def init_weights(self):
if self.config.random_initialize is False:
if self.bert_model_name is None:
# No pretrained model, init weights
self.bert.init_weights()
self.cls.apply(self.bert._init_weights)
self.tie_weights()
def tie_weights(self):
"""Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them
instead.
"""
self.bert._tie_or_clone_weights(
self.cls.predictions.decoder, self.bert.embeddings.word_embeddings
)
def forward(
self,
input_ids: Tensor,
input_mask: Tensor,
attention_mask: Optional[Tensor] = None,
token_type_ids: Optional[Tensor] = None,
visual_embeddings: Optional[Tensor] = None,
visual_embeddings_type: Optional[Tensor] = None,
image_text_alignment: Optional[Tensor] = None,
masked_lm_labels: Optional[Tensor] = None,
) -> Dict[str, Tensor]:
sequence_output, pooled_output, attention_weights = self.bert(
input_ids,
attention_mask,
token_type_ids,
visual_embeddings,
visual_embeddings_type,
image_text_alignment,
)
output_dict: Dict[str, Tensor] = {}
if not torch.jit.is_scripting():
if self.output_attentions:
output_dict["attention_weights"] = attention_weights
if self.output_hidden_states:
output_dict["sequence_output"] = sequence_output
output_dict["pooled_output"] = pooled_output
else:
assert not (
self.output_attentions or self.output_hidden_states
), "output_attentions or output_hidden_states not supported in script mode"
prediction_scores, seq_relationship_score = self.cls(
sequence_output, pooled_output
)
if masked_lm_labels is not None:
output_dict["logits"] = prediction_scores
masked_lm_loss = self.loss_fct(
prediction_scores.contiguous().view(-1, self.vocab_size),
masked_lm_labels.contiguous().view(-1),
)
output_dict["masked_lm_loss"] = masked_lm_loss
output_dict["loss"] = masked_lm_loss
return output_dict
class VisualBERTForClassification(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.output_attentions = self.config.output_attentions
self.output_hidden_states = self.config.output_hidden_states
self.pooler_strategy = self.config.get("pooler_strategy", "default")
# If bert_model_name is not specified, you will need to specify
# all of the required parameters for BERTConfig and a pretrained
# model won't be loaded
self.bert_model_name = getattr(self.config, "bert_model_name", None)
self.bert_config = BertConfig.from_dict(
OmegaConf.to_container(self.config, resolve=True)
)
if self.bert_model_name is None:
self.bert = VisualBERTBase(
self.bert_config,
visual_embedding_dim=self.config.visual_embedding_dim,
embedding_strategy=self.config.embedding_strategy,
bypass_transformer=self.config.bypass_transformer,
output_attentions=self.config.output_attentions,
output_hidden_states=self.config.output_hidden_states,
)
else:
self.bert = VisualBERTBase.from_pretrained(
self.config.bert_model_name,
config=self.bert_config,
cache_dir=os.path.join(
get_mmf_cache_dir(), "distributed_{}".format(-1)
),
visual_embedding_dim=self.config.visual_embedding_dim,
embedding_strategy=self.config.embedding_strategy,
bypass_transformer=self.config.bypass_transformer,
output_attentions=self.config.output_attentions,
output_hidden_states=self.config.output_hidden_states,
)
self.training_head_type = self.config.training_head_type
self.num_labels = self.config.num_labels
self.dropout = nn.Dropout(self.bert.config.hidden_dropout_prob)
if self.config.training_head_type == "nlvr2":
self.bert.config.hidden_size *= 2
self.classifier = nn.Sequential(
BertPredictionHeadTransform(self.bert.config),
nn.Linear(self.bert.config.hidden_size, self.config.num_labels),
)
self.init_weights()
def init_weights(self):
if self.config.random_initialize is False:
if self.bert_model_name is None:
# No pretrained model, init weights
self.bert.init_weights()
# Classifier needs to be initialized always as it is task specific
self.classifier.apply(self.bert._init_weights)
# Set last hidden layer
if "losses" in self.config and self.config.zerobias:
for loss in self.config.losses:
if "bce" in loss["type"]:
self.classifier[1].bias.data.fill_(self.config.biasfill)
def forward(
self,
input_ids: Tensor,
input_mask: Tensor,
attention_mask: Optional[Tensor] = None,
token_type_ids: Optional[Tensor] = None,
visual_embeddings: Optional[Tensor] = None,
visual_embeddings_type: Optional[Tensor] = None,
image_text_alignment: Optional[Tensor] = None,
masked_lm_labels: Optional[Tensor] = None,
) -> Dict[str, Tensor]:
sequence_output, pooled_output, attention_weights = self.bert(
input_ids,
attention_mask,
token_type_ids,
visual_embeddings,
visual_embeddings_type,
image_text_alignment,
)
if self.training_head_type == "nlvr2":
# 2B * H => B * 2H
b, h = pooled_output.size()
pooled_output = torch.cat(
[pooled_output[: b // 2], pooled_output[b // 2 :]], dim=1
)
output_dict: Dict[str, Tensor] = {}
if not torch.jit.is_scripting():
if self.output_attentions:
output_dict["attention_weights"] = attention_weights
if self.output_hidden_states:
output_dict["sequence_output"] = sequence_output
output_dict["pooled_output"] = pooled_output
else:
assert not (
self.output_attentions or self.output_hidden_states
), "output_attentions or output_hidden_states not supported in script mode"
if self.pooler_strategy == "vqa":
# In VQA2 pooling strategy, we use representation from second last token
index_to_gather = input_mask.sum(1) - 2
pooled_output = torch.gather(
sequence_output,
1,
index_to_gather.unsqueeze(-1)
.unsqueeze(-1)
.expand(index_to_gather.size(0), 1, sequence_output.size(-1)),
)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.contiguous().view(-1, self.num_labels)
output_dict["scores"] = reshaped_logits
return output_dict
@registry.register_model("visual_bert")
class VisualBERT(BaseModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.training_head_type: str = self.config.training_head_type
@classmethod
def config_path(cls):
return "configs/models/visual_bert/pretrain.yaml"
def build(self):
if self.training_head_type == "pretraining":
self.model = VisualBERTForPretraining(self.config)
else:
self.model = VisualBERTForClassification(self.config)
if self.config.special_visual_initialize:
self.model.bert.embeddings.initialize_visual_from_pretrained()
if getattr(self.config, "freeze_base", False):
for p in self.model.bert.parameters():
p.requires_grad = False
def flatten(
self,
sample_list: Dict[str, Tensor],
to_be_flattened: List[str],
to_be_flattened_dim: List[str],
) -> Dict[str, Tensor]:
for key in to_be_flattened:
# Make sure these keys are present or otherwise set these keys to None
sample_list[key] = transform_to_batch_sequence(sample_list[key])
for key in to_be_flattened_dim:
sample_list[key] = transform_to_batch_sequence_dim(sample_list[key])
return sample_list
def add_post_flatten_params(
self, sample_list: Dict[str, Tensor]
) -> Dict[str, Tensor]:
sample_list["visual_embeddings_type"] = torch.zeros_like(
sample_list["image_mask"]
)
attention_mask = torch.cat(
(sample_list["input_mask"], sample_list["image_mask"]), dim=-1
)
sample_list["attention_mask"] = attention_mask
if self.training_head_type == "pretraining":
assert sample_list["masked_lm_labels"].size(-1) == sample_list[
"input_mask"
].size(-1)
new_lm_labels = torch.ones_like(attention_mask) * -1
size_masked_lm_labels = sample_list["masked_lm_labels"].size()
assert len(size_masked_lm_labels) == 2
new_lm_labels[
: size_masked_lm_labels[0], : size_masked_lm_labels[1]
] = sample_list["masked_lm_labels"]
sample_list["masked_lm_labels"] = new_lm_labels
return sample_list
def get_optimizer_parameters(self, config):
return get_optimizer_parameters_for_bert(self.model, config)
def flatten_for_bert(self, sample_list: Dict[str, Tensor]) -> Dict[str, Tensor]:
to_be_flattened = ["input_ids", "token_type_ids", "input_mask", "image_mask"]
to_be_flattened_dim = ["visual_embeddings"]
if self.training_head_type == "pretraining":
to_be_flattened.append("masked_lm_labels")
# We want to convert everything into: batch x sequence_length x (dim).
flattened = self.flatten(sample_list, to_be_flattened, to_be_flattened_dim)
return flattened
def update_sample_list_based_on_head(
self, sample_list: Dict[str, Tensor]
) -> Dict[str, Tensor]:
bert_input_ids = sample_list["input_ids"]
bert_input_mask = sample_list["input_mask"]
bert_input_type_ids = sample_list["segment_ids"]
if self.training_head_type == "nlvr2":
if not torch.jit.is_scripting():
bert_input_ids = torch.cat([bert_input_ids, bert_input_ids])
bert_input_mask = torch.cat([bert_input_mask, bert_input_mask])
bert_input_type_ids = torch.cat(
[bert_input_type_ids, bert_input_type_ids]
)
# image input
img0 = getattr(sample_list, "img0", {})
image_feat_variable_0 = getattr(img0, "image_feature_0", None)
img1 = getattr(sample_list, "img1", {})
image_feat_variable_1 = getattr(img1, "image_feature_0", None)
image_feat_variable = torch.cat(
[image_feat_variable_0, image_feat_variable_1]
)
image_info = getattr(img0, "image_info_0", {})
image_dim_variable_0 = getattr(image_info, "max_features", None)
image_info = getattr(img1, "image_info_0", {})
image_dim_variable_1 = getattr(image_info, "max_features", None)
image_dim_variable = torch.cat(
[image_dim_variable_0, image_dim_variable_1]
)
else:
raise RuntimeError("nlvr2 head doesn't support scripting as of now")
else:
if not torch.jit.is_scripting():
image_info = getattr(sample_list, "image_info_0", {})
image_dim_variable = getattr(image_info, "max_features", None)
image_feat_variable = getattr(sample_list, "image_feature_0", None)
else:
image_feat_variable = sample_list["image_feature_0"]
image_dim_variable = None
if image_dim_variable is None:
image_dim_variable = sample_list["image_feature_0"].new_full(
size=(image_feat_variable.size(0), 1),
fill_value=image_feat_variable.size(1),
)
sample_list["visual_embeddings"] = image_feat_variable
sample_list["image_dim"] = image_dim_variable
sample_list["input_ids"] = bert_input_ids
sample_list["input_mask"] = bert_input_mask
sample_list["token_type_ids"] = bert_input_type_ids
return sample_list
def add_custom_params(self, sample_list: Dict[str, Tensor]) -> Dict[str, Tensor]:
visual_embeddings = sample_list["visual_embeddings"]
image_dim = sample_list["image_dim"]
if self.training_head_type == "pretraining":
# pretraining labels
sample_list["masked_lm_labels"] = sample_list["lm_label_ids"]
# image_feat_variable = batch x ( num_choice x ) image_feature_length x dim
# Prepare Mask
image_mask = torch.arange(
visual_embeddings.size(-2), device=visual_embeddings.device
).expand(visual_embeddings.size()[:-1])
if len(image_dim.size()) < len(image_mask.size()):
image_dim = image_dim.unsqueeze(-1)
assert len(image_dim.size()) == len(image_mask.size())
image_mask = image_mask < image_dim
sample_list["image_mask"] = image_mask.long()
return sample_list
# Backward compatibility for code from original VisualBERT
@classmethod
def format_state_key(cls, key):
return (
key.replace("bert.bert", "model.bert")
.replace("bert.cls", "model.cls")
.replace("bert.classifier", "model.classifier")
)
def forward(self, sample_list: Dict[str, Tensor]) -> Dict[str, Tensor]:
if torch.jit.is_scripting():
assert (
"image_feature_0" in sample_list
), "Key 'image_feature_0' is required in TorchScript model"
sample_list = self.update_sample_list_based_on_head(sample_list)
sample_list = self.add_custom_params(sample_list)
sample_list = self.flatten_for_bert(sample_list)
sample_list = self.add_post_flatten_params(sample_list)
output_dict = self.model(
sample_list["input_ids"],
sample_list["input_mask"],
sample_list["attention_mask"],
sample_list["token_type_ids"],
sample_list["visual_embeddings"],
sample_list["visual_embeddings_type"],
getattr_torchscriptable(sample_list, "image_text_alignment", None),
getattr_torchscriptable(sample_list, "masked_lm_labels", None),
)
if self.training_head_type == "pretraining":
if not torch.jit.is_scripting():
loss_key = "{}/{}".format(
sample_list["dataset_name"], sample_list["dataset_type"]
)
output_dict["losses"] = {}
output_dict["losses"][loss_key + "/masked_lm_loss"] = output_dict.pop(
"masked_lm_loss"
)
else:
raise RuntimeError("Pretraining head can't be used in script mode.")
return output_dict
| EXA-1-master | exa/models/mmf-main/mmf/models/visual_bert.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import collections
from copy import deepcopy
import torch
from mmf.common.registry import registry
from mmf.models.base_model import BaseModel
from mmf.modules.encoders import MultiModalEncoderBase
from mmf.utils.build import build_classifier_layer
class UnimodalBase(MultiModalEncoderBase):
def __init__(self, config, *args, **kwargs):
super().__init__(config, *args, **kwargs)
def build(self):
encoders = self._build_encoders(self.config)
# Text Encoder mode
if "modal_encoder" not in self.config:
self.encoder = encoders[0]
# Modal encoder mode
elif "text_encoder" not in self.config:
self.encoder = encoders[1]
else:
raise RuntimeError(
"Unimodal Encoder can't have both text and modal encoder"
)
def forward(self, x, *args, **kwargs):
x = self.encoder(x, *args, **kwargs)
# Case of bert encoder, we only need pooled output
if isinstance(x, collections.abc.Sequence) and len(x) >= 2:
x = x[1]
x = torch.flatten(x, start_dim=1)
return x
@registry.register_model("unimodal_text")
class UnimodalText(BaseModel):
def __init__(self, config, *args, **kwargs):
super().__init__(config)
@classmethod
def config_path(cls):
return "configs/models/unimodal/text.yaml"
def build(self):
self.base = UnimodalBase(self.config)
# As the in_dim is dynamically calculated we need to copy classifier_config
classifier_config = deepcopy(self.config.classifier)
classifier_config.params.in_dim = self.config.text_hidden_size
self.classifier = build_classifier_layer(classifier_config)
def forward(self, sample_list):
# BERT Based Encoders
args = []
if "input_ids" in sample_list:
text = sample_list.input_ids
args.append(sample_list.input_mask)
args.append(sample_list.segment_ids)
else:
text = sample_list.text
embedding = self.base(text, *args)
output = {}
output["scores"] = self.classifier(embedding)
return output
@registry.register_model("unimodal_image")
class UnimodalModal(BaseModel):
def __init__(self, config, *args, **kwargs):
super().__init__(config)
@classmethod
def config_path(cls):
return "configs/models/unimodal/image.yaml"
def build(self):
self.base = UnimodalBase(self.config)
self._is_direct_features_input = self.config.direct_features_input
if self.config.get("freeze_base", False):
for param in self.base.parameters():
param.requires_grad = False
num_features = self.config.modal_encoder.params.num_output_features
# As the in_dim is dynamically calculated we need to copy classifier_config
classifier_config = deepcopy(self.config.classifier)
classifier_config.params.in_dim = num_features * self.config.modal_hidden_size
self.classifier = build_classifier_layer(classifier_config)
def forward(self, sample_list):
# BERT Based Encoders
args = []
if self._is_direct_features_input:
modal = sample_list.image_feature_0
modal = torch.mean(modal, dim=1)
else:
modal = sample_list.image
embedding = self.base(modal, *args)
output = {}
output["scores"] = self.classifier(embedding)
return output
| EXA-1-master | exa/models/mmf-main/mmf/models/unimodal.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import collections
from copy import deepcopy
import torch
from mmf.common.registry import registry
from mmf.models.base_model import BaseModel
from mmf.modules.encoders import MultiModalEncoderBase
from mmf.utils.build import build_classifier_layer
from mmf.utils.modeling import get_bert_configured_parameters
class FusionBase(MultiModalEncoderBase):
def __init__(self, config, *args, **kwargs):
super().__init__(config, *args, **kwargs)
def build(self):
encoders = self._build_encoders(self.config)
text_encoder, modal_encoder = encoders[0], encoders[1]
self._modal_encoder_config = self.config.modal_encoder
self._is_direct_features_input = self.config.direct_features_input
self._encoder_config = getattr(text_encoder, "config", None)
self.text = text_encoder
self.modal = modal_encoder
def forward(
self,
text,
modal,
text_args=None,
modal_args=None,
text_kwargs=None,
modal_kwargs=None,
):
if text_args is None:
text_args = []
if modal_args is None:
modal_args = []
if text_kwargs is None:
text_kwargs = {}
if modal_kwargs is None:
modal_kwargs = {}
text = self.text(text, *text_args, **text_kwargs)
# Case of bert encoder, we only need pooled output. For BertModelJIT encoder
# pooled output is the 2nd in the tuple(sequence, pooled, encoded_layers)
if isinstance(text, collections.abc.Sequence) and len(text) >= 2:
text = text[1]
modal = self.modal(modal, *modal_args, **modal_kwargs)
modal = torch.flatten(modal, start_dim=1)
text = torch.flatten(text, start_dim=1)
return text, modal
@registry.register_model("concat_bert")
class ConcatBERT(BaseModel):
def __init__(self, config, *args, **kwargs):
super().__init__(config)
self._is_direct_features_input = config.direct_features_input
@classmethod
def config_path(cls):
return "configs/models/fusions/concat_bert.yaml"
def build(self):
self.base = FusionBase(self.config)
num_features = self.config.num_features
if not self._is_direct_features_input:
num_features = self.config.modal_encoder.params.num_output_features
# As the in_dim is dynamically calculated we need to copy classifier_config
classifier_config = deepcopy(self.config.classifier)
classifier_config.params.in_dim = num_features * self.config.modal_hidden_size
classifier_config.params.in_dim += self.config.text_hidden_size
self.classifier = build_classifier_layer(classifier_config)
if self.config.freeze_text or self.config.freeze_complete_base:
for p in self.base.text.parameters():
p.requires_grad = False
if self.config.freeze_modal or self.config.freeze_complete_base:
for p in self.base.modal.parameters():
p.requires_grad = False
def get_optimizer_parameters(self, config):
# For finetuning setup, we have classifier
lr = config.optimizer.params.lr
model_config = config.model_config.get(config.model, {})
finetune_lr_multiplier = model_config.get("finetune_lr_multiplier", 1)
# Finetune the bert pretrained part with finetune_lr_multiplier if it is set
parameters = get_bert_configured_parameters(
self.base, lr * finetune_lr_multiplier
)
parameters += get_bert_configured_parameters(self.classifier, lr)
return parameters
def forward(self, sample_list):
text = sample_list.input_ids
mask = sample_list.input_mask
segment = sample_list.segment_ids
if self._is_direct_features_input:
modal = sample_list.image_feature_0
else:
modal = sample_list.image
text_embedding, modal_embedding = self.base(text, modal, [mask, segment])
embedding = torch.cat([text_embedding, modal_embedding], dim=-1)
output = {}
output["scores"] = self.classifier(embedding)
return output
@registry.register_model("concat_bow")
class ConcatBoW(BaseModel):
def __init__(self, config, *args, **kwargs):
super().__init__(config)
self._is_direct_features_input = config.direct_features_input
@classmethod
def config_path(cls):
return "configs/models/fusions/concat_bow.yaml"
def build(self):
self.base = FusionBase(self.config)
num_features = self.config.num_features
if not self._is_direct_features_input:
num_features = self.config.modal_encoder.params.num_output_features
# As the in_dim is dynamically calculated we need to copy classifier_config
classifier_config = deepcopy(self.config.classifier)
classifier_config.params.in_dim = num_features * self.config.modal_hidden_size
classifier_config.params.in_dim += self.config.text_hidden_size
self.classifier = build_classifier_layer(classifier_config)
def forward(self, sample_list):
text = sample_list.text
if self._is_direct_features_input:
modal = sample_list.image_feature_0
else:
modal = sample_list.image
text_embedding, modal_embedding = self.base(text, modal)
embedding = torch.cat([text_embedding, modal_embedding], dim=-1)
output = {}
output["scores"] = self.classifier(embedding)
return output
@registry.register_model("late_fusion")
class LateFusion(BaseModel):
def __init__(self, config, *args, **kwargs):
super().__init__(config)
self._is_direct_features_input = config.direct_features_input
@classmethod
def config_path(cls):
return "configs/models/fusions/late_fusion.yaml"
def build(self):
self.base = FusionBase(self.config)
num_features = self.config.num_features
if not self._is_direct_features_input:
num_features = self.config.modal_encoder.params.num_output_features
# As the in_dim is dynamically calculated we need to copy classifier_config
modal_classifier_config = deepcopy(self.config.modal_classifier)
modal_classifier_config.params.in_dim = (
num_features * self.config.modal_hidden_size
)
self.modal_classifier = build_classifier_layer(modal_classifier_config)
text_classifier_config = deepcopy(self.config.text_classifier)
text_classifier_config.params.in_dim = self.config.text_hidden_size
self.text_classifier = build_classifier_layer(text_classifier_config)
def forward(self, sample_list):
text = sample_list.input_ids
mask = sample_list.input_mask
segment = sample_list.segment_ids
if self._is_direct_features_input:
modal = sample_list.image_feature_0
else:
modal = sample_list.image
text_embedding, modal_embedding = self.base(text, modal, [mask, segment])
text = self.text_classifier(text_embedding)
modal = self.modal_classifier(modal_embedding)
output = {}
output["scores"] = (text + modal) / 2
return output
| EXA-1-master | exa/models/mmf-main/mmf/models/fusions.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
from mmf.common.registry import registry
from mmf.models.base_model import BaseModel
from mmf.modules.embeddings import BiLSTMTextEmbedding
from mmf.modules.layers import BCNet, BiAttention, FCNet, WeightNormClassifier
from torch import nn
@registry.register_model("ban")
class BAN(BaseModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self._global_config = registry.get("config")
self._datasets = self._global_config.datasets.split(",")
@classmethod
def config_path(cls):
return "configs/models/ban/defaults.yaml"
def build(self):
self._build_word_embedding()
self._init_text_embedding()
self._init_classifier()
self._init_bilinear_attention()
def _build_word_embedding(self):
text_processor = registry.get(self._datasets[0] + "_text_processor")
vocab = text_processor.vocab
self.word_embedding = vocab.get_embedding(torch.nn.Embedding, embedding_dim=300)
def _init_text_embedding(self):
module_config = self.config.text_embedding
q_mod = BiLSTMTextEmbedding(
module_config.num_hidden,
module_config.emb_size,
module_config.num_layers,
module_config.dropout,
module_config.bidirectional,
module_config.rnn_type,
)
self.q_emb = q_mod
def _init_bilinear_attention(self):
module_config = self.config.bilinear_attention
num_hidden = self.config.text_embedding.num_hidden
v_dim = module_config.visual_feat_dim
v_att = BiAttention(v_dim, num_hidden, num_hidden, module_config.gamma)
b_net = []
q_prj = []
for _ in range(module_config.gamma):
b_net.append(
BCNet(v_dim, num_hidden, num_hidden, None, k=module_config.bc_net.k)
)
q_prj.append(
FCNet(
dims=[num_hidden, num_hidden],
act=module_config.fc_net.activation,
dropout=module_config.fc_net.dropout,
)
)
self.b_net = nn.ModuleList(b_net)
self.q_prj = nn.ModuleList(q_prj)
self.v_att = v_att
def _init_classifier(self):
num_hidden = self.config.text_embedding.num_hidden
num_choices = registry.get(self._datasets[0] + "_num_final_outputs")
dropout = self.config.classifier.dropout
self.classifier = WeightNormClassifier(
num_hidden, num_choices, num_hidden * 2, dropout
)
def forward(self, sample_list):
v = sample_list.image_feature_0
q = self.word_embedding(sample_list.text)
q_emb = self.q_emb.forward_all(q)
b_emb = [0] * self.config.bilinear_attention.gamma
att, logits = self.v_att.forward_all(v, q_emb)
for g in range(self.config.bilinear_attention.gamma):
g_att = att[:, g, :, :]
b_emb[g] = self.b_net[g].forward_with_weights(v, q_emb, g_att)
q_emb = self.q_prj[g](b_emb[g].unsqueeze(1)) + q_emb
logits = self.classifier(q_emb.sum(1))
return {"scores": logits}
| EXA-1-master | exa/models/mmf-main/mmf/models/ban.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from mmf.models.pythia import Pythia
from mmf.modules.decoders import VisDialDiscriminator
class VisDialMultiModalModel(Pythia):
def __init__(self, config):
super().__init__(config)
def build(self):
self._init_text_embedding()
self._init_image_encoders()
self._init_image_embeddings()
self._init_combine_layer()
self._init_decoder()
self._init_extras()
def _init_text_embedding(self):
parent = super()
parent._init_text_embedding("text_embeddings", False)
parent._init_text_embedding("history_embeddings", True)
def get_optimizer_parameters(self, config):
# TODO: Update after implementing decoder
params = [
{"params": self.img_embeddings_list.parameters()},
{"params": self.text_embeddings.parameters()},
{"params": self.multi_modal_combine_layer.parameters()},
{"params": self.decoder.projection_layer.parameters()},
{
"params": self.img_feat_encoders.parameters(),
"lr": (config.optimizer.params.lr * 0.1),
},
]
return params
def _update_text_embedding_args(self, args):
parent = super()
parent._update_text_embedding_args(args)
# Add embedding vectors to args
args.embedding_vectors = self.config.embedding_vectors
def _init_decoder(self):
embedding = self.text_embeddings[0].module
embedding_dim = self.text_embeddings[0].embedding_dim
hidden_dim = self.multi_modal_combine_layer.out_dim
self.decoder = VisDialDiscriminator(
{"embedding_dim": embedding_dim, "hidden_dim": hidden_dim}, embedding
)
def combine_embeddings(self, *args):
return self.multi_modal_combine_layer(*args)
def calculate_logits(self, joint_embedding, **kwargs):
return self.decoder(joint_embedding, kwargs)
def forward(
self, texts, answer_options, histories, image_features, image_dims, **kwargs
):
texts = texts.view(-1, texts.size(2))
histories = histories.view(-1, histories.size(2))
text_embedding_total = self.process_text_embedding(texts)
histories_total = self.process_text_embedding(histories, "history_embeddings")
for idx, image_feature in enumerate(image_features):
feature_size = image_feature.size()[2:]
image_features[idx] = image_feature.view(-1, *feature_size)
size = image_dims.size()[2:]
image_dims = image_dims.view(-1, *size)
assert len(image_features) == len(
self.img_feat_encoders
), "number of image feature model doesnot equal \
to number of image features"
image_embedding_total = self.process_image_embedding(
image_features, image_dims, text_embedding_total
)
if self.inter_model is not None:
image_embedding_total = self.inter_model(image_embedding_total)
joint_embedding = self.combine_embeddings(
image_embedding_total, text_embedding_total, histories_total
)
decoder_info = {
"answer_options": answer_options,
"answer_options_len": kwargs["answer_options_len"],
}
return self.calculate_logits(joint_embedding, **decoder_info)
| EXA-1-master | exa/models/mmf-main/mmf/models/visdial_multi_modal.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import collections.abc
import logging
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple
import torch
from mmf.common.registry import registry
from mmf.models.base_model import BaseModel
from mmf.models.transformers.heads.utils import build_heads_dict
from mmf.modules.encoders import TransformerEncoder, ViTEncoder
from mmf.modules.losses import MMFLoss
from mmf.utils.build import build_encoder
from mmf.utils.modeling import get_bert_configured_parameters
from omegaconf import MISSING, OmegaConf
from torch import nn, Tensor
logger = logging.getLogger()
class ViLTImageEmbedding(nn.Module):
"""
Patch embedding used for ViLT.
https://arxiv.org/pdf/2102.03334.pdf
Implementation based off
https://github.com/dandelin/ViLT/blob/master/vilt/modules/vilt_module.py
Using huggingface ViT modules.
Can be built with random init or the embeddings weights from an exisiting
ViT model from huggingface. Model list: availible at
https://huggingface.co/models?other=vit&sort=downloads
"""
def __init__(
self,
random_init: bool = True,
pretrained_model_name: str = "google/vit-base-patch16-224",
image_size: Optional[List] = None,
hidden_dropout_prob: Optional[float] = None,
hidden_size: Optional[int] = None,
patch_size: Optional[int] = None,
num_channels: Optional[int] = None,
*args,
**kwargs,
):
super().__init__()
config = OmegaConf.create(
{"random_init": random_init, "pretrained_model_name": pretrained_model_name}
)
if image_size is not None:
config.image_size = image_size
if hidden_dropout_prob is not None:
config.hidden_dropout_prob = hidden_dropout_prob
if hidden_size is not None:
config.hidden_size = hidden_size
if patch_size is not None:
config.patch_size = patch_size
if num_channels is not None:
config.num_channels = num_channels
encoder = ViTEncoder(config)
self.embedding = encoder.embeddings
hidden_size = encoder.hf_config.hidden_size
self.token_type_embeddings = nn.Embedding(2, hidden_size)
def forward(self, image: Tensor) -> Tensor:
if image.dim() == 5:
image = image.permute(1, 0, 2, 3, 4).flatten(start_dim=0, end_dim=1)
img_embeddings = self.embedding(image)
img_segment_ids = torch.ones(
img_embeddings.size()[:-1],
dtype=img_embeddings.dtype,
device=img_embeddings.device,
).long()
img_type_embed = self.token_type_embeddings(img_segment_ids)
img_embeddings = img_embeddings + img_type_embed
return img_embeddings
class ViLTTextEmbedding(nn.Module):
def __init__(
self,
random_init: bool = True,
bert_model_name: str = "bert-base-uncased",
hidden_size: Optional[int] = None,
max_position_embeddings: Optional[int] = None,
*args,
**kwargs,
):
super().__init__()
config = OmegaConf.create(
{"bert_model_name": bert_model_name, "random_init": random_init}
)
if hidden_size is not None:
config.hidden_size = hidden_size
if max_position_embeddings is not None:
config.max_position_embeddings = max_position_embeddings
text_encoder = TransformerEncoder(config)
self.text_embeddings = text_encoder.embeddings
# the hidden_size param enables hidden_size overrides
# if hidden_size is None, hidden_size is loaded
# from the default hf config for the model
# the actual size of the embeddings will always be in the encoder configs
hidden_size = text_encoder.config.hidden_size
self.token_type_embeddings = nn.Embedding(2, hidden_size)
def forward(self, input_ids: Tensor, segment_ids: Tensor) -> Tensor:
text_embedding = self.text_embeddings(input_ids, token_type_ids=segment_ids)
# official vilt repo adds type embeddings twice, once in the bert embeddings
# and a separate time directly
text_type_embed = self.token_type_embeddings(segment_ids)
return text_embedding + text_type_embed
@registry.register_model("vilt")
class ViLT(BaseModel):
@dataclass
class Config(BaseModel.Config):
name: str = "ViLT"
text_embeddings: Any = MISSING
image_encoder: Any = MISSING
@classmethod
def config_path(cls):
return "configs/models/vilt/defaults.yaml"
def build(self):
self.text_embeddings = ViLTTextEmbedding(**self.config.text_embeddings)
self.image_embeddings = ViLTImageEmbedding(**self.config.image_encoder.params)
self.encoder = build_encoder(self.config.image_encoder)
head_configs = self.config.get("heads", {})
self.tasks = self.config.get("tasks", head_configs.keys())
if isinstance(self.tasks, str):
self.tasks = self.tasks.split(",")
self.losses = nn.ModuleDict()
self.heads_dict = build_heads_dict(head_configs, self.tasks, self.losses)
self.modality_keys = self.modality_type = ["text", "image"]
def init_losses(self):
loss_configs = self.config.get("losses", {})
for loss_name, loss_config in loss_configs.items():
self.losses[loss_name] = MMFLoss(loss_config)
def forward(self, sample_list: Dict[str, Tensor]) -> Dict[str, Tensor]:
text_embedding = self.text_embeddings(
sample_list["input_ids"], sample_list["segment_ids"]
)
image_embedding = self.image_embeddings(sample_list["image"])
self.preprocess_sample(sample_list, image_embedding)
# Feed through encoder
embeddings = torch.cat([text_embedding, image_embedding], dim=1)
attention_mask = self.get_attention_mask(
sample_list, text_embedding, image_embedding
)
sequence, _ = self.encoder(embeddings, attention_mask=attention_mask)
if sequence.dim() != 3:
sequence = sequence.unsqueeze(1)
outputs = self.heads_dict(sample_list["dataset_name"], sequence, sample_list)
return outputs
def preprocess_sample(
self, sample_list: Dict[str, Tensor], image_embedding: Tensor
):
head_names = self.heads_dict.head_names
if isinstance(head_names, collections.abc.Mapping):
head_names = head_names[sample_list["dataset_name"]]
head_string = " ".join(head_names)
prepare_itm = "itm" in head_string
prepare_mlm = "mlm" in head_string
if prepare_itm:
sample_list["itm_labels"] = self._infer_itm_labels(sample_list)
if prepare_mlm:
sample_list["mlm_labels"] = self._infer_mlm_labels(
sample_list, image_embedding.size()[:-1]
)
self._encode_mlm(sample_list, image_embedding)
def get_optimizer_parameters(self, config):
if hasattr(self.encoder, "get_optimizer_parameters"):
params = self.encoder.get_optimizer_parameters(config)
else:
params = [{"params": self.encoder.parameters()}]
params += get_bert_configured_parameters(self.text_embeddings)
params += get_bert_configured_parameters(self.heads_dict)
params += [{"params": self.image_embeddings.parameters()}]
return params
def get_attention_mask(
self,
sample_list: Dict[str, Tensor],
text_embedding: Tensor,
image_embedding: Tensor,
) -> Tensor:
text_mask = getattr(sample_list, "input_mask", None)
image_mask = getattr(sample_list, "image_mask", None)
if text_mask is None and image_mask is None:
return None
if text_mask is None:
text_mask = torch.ones(
text_embedding.size()[:-1],
dtype=text_embedding.dtype,
device=text_embedding.device,
)
if image_mask is None:
image_mask = torch.ones(
image_embedding.size()[:-1],
dtype=image_embedding.dtype,
device=image_embedding.device,
)
attention_mask = torch.cat((text_mask, image_mask), dim=-1)
return attention_mask
def _infer_itm_labels(self, sample_list: Dict[str, Tensor]) -> Dict[str, Tensor]:
input_ids = sample_list["input_ids"]
itm_labels = {}
if "is_correct" in sample_list:
itm_labels["is_correct"] = sample_list["is_correct"]
else:
itm_labels["is_correct"] = torch.tensor(
True, dtype=torch.long, device=input_ids.device
)
return itm_labels
def _infer_mlm_labels(
self, sample_list: Dict[str, Tensor], image_embeddings_size: Tuple[int, int]
):
input_ids = sample_list["input_ids"]
mlm_labels = {}
current_text_idx = 0
if "lm_label_ids" in sample_list:
if sample_list["lm_label_ids"].dim() > 2:
mlm_labels["text"] = sample_list["lm_label_ids"][:, current_text_idx]
current_text_idx += 1
else:
mlm_labels["text"] = sample_list["lm_label_ids"]
else:
mlm_labels["text"] = torch.full(
input_ids.size(),
fill_value=-1,
dtype=torch.long,
device=input_ids.device,
)
mlm_labels["image"] = torch.full(
image_embeddings_size,
fill_value=-1,
dtype=torch.long,
device=input_ids.device,
)
mlm_labels["combined_labels"] = torch.cat(
[mlm_labels["text"], mlm_labels["image"]], dim=-1
)
return mlm_labels
def _encode_mlm(self, sample_list: Dict[str, Tensor], image_embedding: Tensor):
assert "lm_label_ids" in sample_list
input_ids = sample_list.get("input_ids_masked", sample_list["input_ids"])
segment_ids = sample_list["segment_ids"]
text_embedding = self.text_embeddings(input_ids, segment_ids)
embeddings = torch.cat([image_embedding, text_embedding], dim=1)
attention_mask = self.get_attention_mask(
sample_list, text_embedding, image_embedding
)
sequence, _ = self.encoder(embeddings, attention_mask=attention_mask)
if sequence.dim() != 3:
sequence = sequence.unsqueeze(1)
sample_list["hs_masked_for_mlm"] = sequence
| EXA-1-master | exa/models/mmf-main/mmf/models/vilt.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Initial version was taken from https://github.com/ChenRocks/UNITER/
# and adapted for MMF.
import copy
import logging
import random
from collections import namedtuple
from collections.abc import MutableMapping
from dataclasses import asdict, dataclass, field
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from mmf.common.registry import registry
from mmf.models import BaseModel
from mmf.modules.losses import MMFLoss
from mmf.utils.general import retry_n
from omegaconf import DictConfig, MISSING, OmegaConf
from torch import nn, Tensor
try:
from transformers3.modeling_bert import BertConfig, BertEmbeddings, BertModel
except ImportError:
from transformers.modeling_bert import BertConfig, BertEmbeddings, BertModel
NUM_RETRIES = 6
EMPTY_CONFIG = OmegaConf.create({})
DEFAULT_PRETRAINING_HEAD_CONFIGS = {
"mlm": {"type": "mlm"},
"itm": {"type": "itm"},
"mrc": {"type": "mrc"},
"mrfr": {"type": "mrfr"},
"wra": {"type": "wra"},
}
DEFAULT_PRETRAINING_TASKS = "mlm,itm,mrc,mrfr,wra"
logger = logging.getLogger()
class UNITERImageEmbeddings(nn.Module):
"""
Image Embeddings used by UNITER.
Code modified from https://github.com/ChenRocks/UNITER/blob/master/model/model.py
Performs a linear projection then normalization over image and position features.
"""
def __init__(
self,
img_dim: int = 2048,
hidden_size: int = 768,
eps: float = 1e-12,
hidden_dropout_prob: float = 0,
pos_dim: int = 7,
):
super().__init__()
self.img_linear = nn.Linear(img_dim, hidden_size)
self.img_layer_norm = nn.LayerNorm(hidden_size, eps=eps)
self.pos_layer_norm = nn.LayerNorm(hidden_size, eps=eps)
self.pos_linear = nn.Linear(pos_dim, hidden_size)
self.mask_embedding = nn.Embedding(2, img_dim, padding_idx=0)
self.final_layer_norm = nn.LayerNorm(hidden_size, eps=eps)
self.dropout = nn.Dropout(hidden_dropout_prob)
def forward(
self,
img_feat: Tensor,
img_pos_feat: Tensor,
type_embeddings: Tensor,
img_masks: Optional[Tensor] = None,
) -> Tensor:
if img_masks is not None:
self.mask_embedding.weight.data[0, :].fill_(0)
mask = self.mask_embedding(img_masks.long())
img_feat = img_feat + mask
transformed_im = self.img_layer_norm(self.img_linear(img_feat))
transformed_pos = self.pos_layer_norm(self.pos_linear(img_pos_feat))
embeddings = transformed_im + transformed_pos + type_embeddings
embeddings = self.final_layer_norm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class UNITERModelBase(nn.Module):
"""UNITER embedding and transformer trunk for V-L modeling.
Modified from https://github.com/ChenRocks/UNITER/ for MMF.
https://arxiv.org/pdf/1909.11740.pdf
By default, this model uses the pretrained bert-base-uncased
transformer trunk with from huggingface.
To train on this model through MMF, look at the UNITER model,
which supports pretraining and finetuning of UNITERModelBase
with configurable heads.
For an example of using this model standalone,
take a look at its unit test in `test_uniter.py`.
"""
def __init__(
self,
random_init: bool = False,
bert_model_name: str = "bert-base-uncased",
img_dim: int = 2048,
hidden_size: int = 768,
hidden_dropout_prob: float = 0,
text_embeddings: DictConfig = EMPTY_CONFIG,
encoder: DictConfig = EMPTY_CONFIG,
):
super().__init__()
bert_config = retry_n(
NUM_RETRIES,
BertConfig.from_pretrained,
bert_model_name,
**OmegaConf.to_container(text_embeddings),
)
self.text_embeddings = BertEmbeddings(bert_config)
self.img_embeddings = UNITERImageEmbeddings(
img_dim=img_dim,
hidden_size=hidden_size,
hidden_dropout_prob=hidden_dropout_prob,
)
bert_model_name = bert_model_name
hf_config = retry_n(
NUM_RETRIES,
BertConfig.from_pretrained,
bert_model_name,
**OmegaConf.to_container(encoder),
)
if random_init:
bert_model = BertModel(hf_config)
else:
bert_model = retry_n(
NUM_RETRIES,
BertModel.from_pretrained,
bert_model_name,
config=hf_config,
)
self.encoder = bert_model.encoder
self.pooler = bert_model.pooler
def _compute_txt_embeddings(
self,
input_ids: Tensor,
position_ids: Tensor,
token_type_ids: Optional[Tensor] = None,
) -> Tensor:
output = self.text_embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
)
return output
def _compute_img_embeddings(
self,
img_feat: Tensor,
img_pos_feat: Tensor,
img_masks: Optional[Tensor] = None,
img_type_ids: Optional[Tensor] = None,
) -> Tensor:
if img_type_ids is None:
img_type_ids = torch.ones_like(img_feat[:, :, 0].long())
img_type_embeddings = self.text_embeddings.token_type_embeddings(img_type_ids)
output = self.img_embeddings(
img_feat, img_pos_feat, img_type_embeddings, img_masks
)
return output
def _compute_img_txt_embeddings(
self,
input_ids: Tensor,
position_ids: Tensor,
img_feat: Tensor,
img_pos_feat: Tensor,
img_masks: Optional[Tensor] = None,
txt_type_ids: Optional[Tensor] = None,
img_type_ids: Optional[Tensor] = None,
) -> Tensor:
txt_emb = self._compute_txt_embeddings(input_ids, position_ids, txt_type_ids)
img_emb = self._compute_img_embeddings(
img_feat, img_pos_feat, img_masks, img_type_ids
)
embedding_output = torch.cat([txt_emb, img_emb], dim=1)
return embedding_output
def forward(
self,
input_ids: Tensor,
position_ids: Tensor,
img_feat: Tensor,
img_pos_feat: Tensor,
attention_mask: Tensor,
img_masks: Optional[Tensor] = None,
txt_type_ids: Optional[Tensor] = None,
img_type_ids: Optional[Tensor] = None,
input_modality: str = "image-text",
) -> Tuple[Tensor, Tensor]:
# compute self-attention mask
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype
) # fp16 compatibility
# https://github.com/huggingface/transformers/issues/542 for details
# on why we add very negative values to attention scores
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# embedding layer
if input_modality == "image":
# image only
embedding_output = self._compute_img_embeddings(
img_feat, img_pos_feat, img_masks, img_type_ids
)
elif input_modality == "text":
# text only
embedding_output = self._compute_txt_embeddings(
input_ids, position_ids, txt_type_ids
)
else:
embedding_output = self._compute_img_txt_embeddings(
input_ids,
position_ids,
img_feat,
img_pos_feat,
img_masks,
txt_type_ids,
img_type_ids,
)
encoded_layers = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
output_hidden_states=True,
)
layers = namedtuple("TransformerOutput", ["final_layer", "hidden_layers"])
return layers(encoded_layers[0], encoded_layers[1])
def _infer_with_heads(
processed_sample_list: Dict[str, Tensor],
uniter_model: Any,
heads: Dict[str, Any],
losses: Dict[str, Any],
) -> Dict[str, Tensor]:
sequence_output = uniter_model(
processed_sample_list["input_ids"],
processed_sample_list["position_ids"],
processed_sample_list["image_feat"],
processed_sample_list["img_pos_feat"],
processed_sample_list["attention_mask"],
img_masks=processed_sample_list["image_mask"],
).final_layer
dataset_name = processed_sample_list["dataset_name"]
task = processed_sample_list.get("task", dataset_name)
outputs = heads[task](sequence_output, processed_sample_list=processed_sample_list)
if isinstance(outputs, MutableMapping) and "losses" in outputs:
return outputs
logits = outputs
if isinstance(outputs, MutableMapping) and "scores" in outputs:
logits = outputs["scores"]
logits = logits.contiguous().view(-1, logits.size(-1))
output = losses[dataset_name](processed_sample_list, {"scores": logits})
return {"losses": output, "scores": logits}
class UNITERForClassification(nn.Module):
"""UNITER wrapper for classification
Example params:
head_configs = {"vqa2": {"type": "mlp", "num_labels": 3129}}
losses_configs = {"vqa2": "logit_bce"}
tasks = "vqa2"
"""
def __init__(
self,
head_configs: Dict,
loss_configs: Dict,
tasks: Union[str, List],
random_init: bool = False,
bert_model_name: str = "bert-base-uncased",
img_dim: int = 2048,
hidden_size: int = 768,
hidden_dropout_prob: float = 0,
text_embeddings: Any = EMPTY_CONFIG,
encoder: Any = EMPTY_CONFIG,
):
super().__init__()
self.loss_configs = loss_configs
self.uniter = UNITERModelBase(
random_init=random_init,
bert_model_name=bert_model_name,
img_dim=img_dim,
hidden_size=hidden_size,
hidden_dropout_prob=hidden_dropout_prob,
text_embeddings=text_embeddings,
encoder=encoder,
)
self.heads = nn.ModuleDict()
self.tasks = tasks
if isinstance(self.tasks, str):
self.tasks = self.tasks.split(",")
for task in self.tasks:
assert task in head_configs, (
f"Task {task} is specified in your model configs"
+ " but there is no head configured for the task. "
+ "Head configs can be added under model_config.heads "
+ "in your yaml configs. Either remove this task if UNITER"
+ " is not meant to run on a dataset named {task}"
+ " or add a head config."
)
head_config = head_configs[task]
head_type = head_config.get("type", "mlp")
head_class = registry.get_transformer_head_class(head_type)
self.heads[task] = head_class(head_config)
self.init_losses()
def init_losses(self):
self.losses = nn.ModuleDict()
for task in self.tasks:
if task not in self.loss_configs:
logger.warning(
f"No loss defined for {task}. Head is expected "
+ "to return dict with 'losses'"
)
continue
loss_config = self.loss_configs[task]
self.losses[task] = MMFLoss(loss_config)
def forward(self, processed_sample_list: Dict[str, Tensor]) -> Dict[str, Tensor]:
return _infer_with_heads(
processed_sample_list, self.uniter, self.heads, self.losses
)
class UNITERForPretraining(nn.Module):
"""UNITER wrapper for pretraining"""
def __init__(
self,
head_configs: Optional[Dict] = None,
loss_configs: Optional[Dict] = None,
tasks: Union[List, str] = DEFAULT_PRETRAINING_TASKS,
mask_probability: float = 0,
random_init: bool = False,
bert_model_name: str = "bert-base-uncased",
img_dim: int = 2048,
hidden_size: int = 768,
hidden_dropout_prob: float = 0,
text_embeddings: Any = EMPTY_CONFIG,
encoder: Any = EMPTY_CONFIG,
):
super().__init__()
if head_configs is None:
head_configs = copy.deepcopy(DEFAULT_PRETRAINING_HEAD_CONFIGS)
if loss_configs is None:
loss_configs = {}
self.loss_configs = loss_configs
self.mask_probability = mask_probability
self.uniter = UNITERModelBase(
random_init=random_init,
bert_model_name=bert_model_name,
img_dim=img_dim,
hidden_size=hidden_size,
hidden_dropout_prob=hidden_dropout_prob,
text_embeddings=text_embeddings,
encoder=encoder,
)
self.heads = nn.ModuleDict()
self.tasks = tasks
if isinstance(self.tasks, str):
self.tasks = self.tasks.split(",")
for task in self.tasks:
head_config = head_configs[task]
head_type = head_config.get("type", "mlp")
head_class = registry.get_transformer_head_class(head_type)
if head_type == "mrfr":
self.heads[task] = head_class(
self.uniter.img_embeddings.img_linear.weight, **head_config
)
elif head_type in ("itm", "mlm", "mlp"):
self.heads[task] = head_class(head_config)
else:
self.heads[task] = head_class(**head_config)
self.init_losses()
def init_losses(self):
self.losses = nn.ModuleDict()
for task in self.tasks:
if task not in self.loss_configs:
logger.warning(
f"No loss defined for {task}. Head is expected "
+ "to return dict with 'losses'"
)
continue
loss_config = self.loss_configs[task]
self.losses[task] = MMFLoss(loss_config)
def forward(self, processed_sample_list: Dict[str, Tensor]) -> Dict[str, Tensor]:
assert "is_correct" in processed_sample_list, (
"UNITER pretraining requires mismatched captions."
+ " Please add 'false_caption': true under dataset_config in your "
+ "yaml configs."
)
self._process_sample_list_for_pretraining(processed_sample_list)
task = processed_sample_list["task"]
if task == "mlm":
self._preprocess_mlm(processed_sample_list)
elif task == "itm":
self._preprocess_itm(processed_sample_list)
elif task == "mrc":
self._preprocess_mrc(processed_sample_list)
elif task == "mrfr":
self._preprocess_mrfr(processed_sample_list)
elif task == "wra":
self._preprocess_wra(processed_sample_list)
else:
raise ValueError(f"Task {task} is not supported for pretraining!")
return _infer_with_heads(
processed_sample_list, self.uniter, self.heads, self.losses
)
def _process_sample_list_for_pretraining(
self, processed_sample_list: Dict[str, Tensor]
):
task = processed_sample_list["task"]
if task in ("mrfr", "mrc"):
self._add_image_feat_masked(processed_sample_list)
# mrc assumes cls prob is a key in sample list,
# having cls prob as a key in sample list makes it easier
# mask negative pairs due to mismatched captions
processed_sample_list["cls_prob"] = torch.tensor(
processed_sample_list["image_info_0"]["cls_prob"]
)
if task not in ("wra", "itm"):
self._remove_mismatched_captions(processed_sample_list)
def _add_image_feat_masked(self, processed_sample_list: Dict[str, Tensor]):
img_feat_masked = torch.clone(processed_sample_list["image_feat"])
num_feat = img_feat_masked.size(1)
img_masks = [
self._get_img_mask(self.mask_probability, num_feat)
for _ in range(img_feat_masked.size(0))
]
img_masks = torch.tensor(img_masks).bool().to(img_feat_masked.device)
img_masks_ext = img_masks.unsqueeze(-1).expand_as(img_feat_masked)
processed_sample_list["image_feat_masked"] = img_feat_masked.data.masked_fill(
img_masks_ext, 0
)
processed_sample_list["image_mask"] = img_masks
def _get_img_mask(self, mask_prob: float, num_bb: int) -> Tensor:
img_mask = list(map(bool, np.random.binomial(1, mask_prob, num_bb)))
if not any(img_mask):
# at least mask 1
img_mask[random.choice(range(num_bb))] = True
return img_mask
def _preprocess_mlm(self, processed_sample_list: Dict[str, Tensor]):
assert "lm_label_ids" in processed_sample_list
assert "input_ids_masked" in processed_sample_list
ignore_index = self.heads["mlm"].config.ignore_index
mlm_labels = {}
mlm_labels["text"] = processed_sample_list["lm_label_ids"]
mlm_labels["image"] = torch.full(
processed_sample_list["image_feat"].shape[:2],
fill_value=ignore_index,
dtype=torch.long,
device=mlm_labels["text"].device,
)
mlm_labels["combined_labels"] = torch.cat(
[mlm_labels["text"], mlm_labels["image"]], dim=-1
)
processed_sample_list["mlm_labels"] = mlm_labels
processed_sample_list["input_ids"] = processed_sample_list["input_ids_masked"]
def _preprocess_itm(self, processed_sample_list: Dict[str, Tensor]):
assert "is_correct" in processed_sample_list
processed_sample_list["itm_labels"] = {
"is_correct": processed_sample_list["is_correct"]
}
def _get_feature_mask(self, image_mask, sentence_len):
bs = image_mask.size(0)
padding_for_txt = torch.zeros((bs, sentence_len)).to(image_mask)
concat_mask = torch.cat([padding_for_txt, image_mask], dim=-1)
return concat_mask
def _mask_inputs_in_sample_list(self, processed_sample_list, mask_key):
assert "image_feat_masked" in processed_sample_list
sentence_len = processed_sample_list["input_ids"].size(1)
processed_sample_list[mask_key] = self._get_feature_mask(
processed_sample_list["image_mask"], sentence_len
)
processed_sample_list["image_feat"] = processed_sample_list["image_feat_masked"]
def _preprocess_mrc(self, processed_sample_list: Dict[str, Tensor]):
assert "cls_prob" in processed_sample_list
assert "image_mask" in processed_sample_list
assert "image_feat_masked" in processed_sample_list
mrc_label_key = self.heads["mrc"].mrc_label_key
mrc_mask_key = self.heads["mrc"].mrc_mask_key
image_mask = processed_sample_list["image_mask"]
cls_prob = processed_sample_list["cls_prob"].to(image_mask.device)
img_masks_ext = image_mask.unsqueeze(-1).expand_as(cls_prob) # (n, m, d)
cls_dim = cls_prob.size(2)
cls_prob = cls_prob[img_masks_ext].contiguous().view(-1, cls_dim)
processed_sample_list[mrc_label_key] = cls_prob
self._mask_inputs_in_sample_list(processed_sample_list, mrc_mask_key)
def _preprocess_mrfr(self, processed_sample_list: Dict[str, Tensor]):
assert "image_mask" in processed_sample_list
assert "image_feat_masked" in processed_sample_list
mrfr_target_key = self.heads["mrfr"].mrfr_target_key
mrfr_mask_key = self.heads["mrfr"].mrfr_mask_key
image_mask = processed_sample_list["image_mask"]
image_feat = processed_sample_list["image_feat"]
img_masks_ext = image_mask.unsqueeze(-1).expand_as(image_feat) # (n, m, d)
feat_dim = image_feat.size(2)
feat_targets = image_feat[img_masks_ext].contiguous().view(-1, feat_dim)
processed_sample_list[mrfr_target_key] = feat_targets
self._mask_inputs_in_sample_list(processed_sample_list, mrfr_mask_key)
def _preprocess_wra(self, processed_sample_list: Dict[str, Tensor]):
assert "is_correct" in processed_sample_list
ot_inputs_key = self.heads["wra"].ot_inputs_key
wra_label_key = self.heads["wra"].wra_label_key
txt_lens = [i.size(0) for i in processed_sample_list["input_ids"]]
num_bbs = [f.size(0) for f in processed_sample_list["image_feat"]]
def _compute_pad(lens: List[int]):
max_len = max(lens)
pad = torch.zeros(len(lens), max_len)
for i, l in enumerate(lens):
pad.data[i, l:].fill_(1)
return pad
device = processed_sample_list["input_ids"].device
txt_pad = _compute_pad(txt_lens).to(device).bool()
img_pad = _compute_pad(num_bbs).to(device).bool()
ot_inputs = {"txt_pad": txt_pad, "img_pad": img_pad}
processed_sample_list[ot_inputs_key] = ot_inputs
processed_sample_list[wra_label_key] = processed_sample_list["is_correct"]
def _remove_mismatched_captions(self, processed_sample_list: Dict[str, Tensor]):
assert "is_correct" in processed_sample_list
pos_pairs = processed_sample_list["is_correct"].ne(0)
pos_pairs_mask = torch.where(pos_pairs.any(), pos_pairs, pos_pairs.new([True]))
tensor_names = [
"input_ids",
"input_mask",
"image_feat",
"img_pos_feat",
"attention_mask",
"image_mask",
"image_feat_masked",
"lm_label_ids",
"cls_prob",
]
for name in tensor_names:
x = processed_sample_list.get(name)
if x is None:
continue
if x.dim() == 1:
assert x.size(0) == pos_pairs_mask.size(0), (
f"tensor {name} has shape {x.shape} but expected "
+ f"{pos_pairs_mask.size(0)} at dim 0."
)
x = x[pos_pairs_mask]
else:
x = x[pos_pairs_mask, ::]
@registry.register_model("uniter")
class UNITER(BaseModel):
"""Modification for Joint Vision-Language Encoding"""
@dataclass
class Config:
random_init: bool = False
bert_model_name: str = "bert-base-uncased"
img_dim: int = 2048
hidden_size: int = 768
hidden_dropout_prob: float = 0
text_embeddings: Any = field(default_factory=lambda: {})
encoder: Any = field(default_factory=lambda: {})
heads: Any = MISSING
losses: Any = field(default_factory=lambda: {})
tasks: Any = MISSING
do_pretraining: bool = False
def __init__(self, config):
super().__init__(config)
self.config = OmegaConf.create({**asdict(self.Config()), **config})
self.do_pretraining = self.config.do_pretraining
@classmethod
def config_path(cls):
return "configs/models/uniter/defaults.yaml"
def build(self):
configs = dict(**self.config)
configs["head_configs"] = configs.pop("heads")
configs["loss_configs"] = configs.pop("losses")
params_keys = [
"head_configs",
"loss_configs",
"tasks",
"random_init",
"bert_model_name",
"img_dim",
"hidden_size",
"hidden_dropout_prob",
"text_embeddings",
"encoder",
]
if self.do_pretraining:
# take value from config when the key exists,
# otherwise use constructor defaults
params_keys += ["mask_probability"]
params = {key: configs[key] for key in params_keys if key in configs}
self.uniter = UNITERForPretraining(**params)
else:
params = {key: configs[key] for key in params_keys if key in configs}
self.uniter = UNITERForClassification(**params)
self.tasks = self.config.tasks
if isinstance(self.tasks, str):
self.tasks = self.tasks.split(",")
def init_losses(self):
"""
Defer loss management to submodels,
do nothing when called by build_model.
"""
pass
def add_pos_feat(self, sample_list: Dict[str, Tensor]):
assert "image_info_0" in sample_list
assert "bbox" in sample_list["image_info_0"]
# (x1, y1, x2, y2), dim = (bs, num_feats, 4)
bboxs = torch.tensor(sample_list["image_info_0"]["bbox"])[:, :, :4]
norm_xy = torch.clone(bboxs)
# if bboxs are not normalized, just do it here
if norm_xy[0, 0, 0] < 1:
img_h = (
torch.tensor(sample_list["image_info_0"]["image_height"])
.unsqueeze(1)
.unsqueeze(1)
) # (bs,)
img_w = (
torch.tensor(sample_list["image_info_0"]["image_width"])
.unsqueeze(1)
.unsqueeze(1)
) # (bs,)
max_image_size = torch.cat([img_w, img_h, img_w, img_h], dim=-1)
max_image_size = max_image_size.to(norm_xy.device)
norm_xy /= max_image_size
bbox_w = (norm_xy[:, :, 2] - norm_xy[:, :, 0]).unsqueeze(-1)
bbox_h = (norm_xy[:, :, 3] - norm_xy[:, :, 1]).unsqueeze(-1)
area = bbox_w * bbox_h
# normalized (x1, y1, x2, y2, w, h, area)
pos_feat = torch.cat([norm_xy, bbox_w, bbox_h, area], dim=-1).to(
sample_list["image_feature_0"]
)
sample_list["img_pos_feat"] = pos_feat
def add_custom_params(self, sample_list: Dict[str, Tensor]) -> Dict[str, Tensor]:
image_feat = sample_list["image_feat"] = sample_list["image_feature_0"]
image_info = getattr(sample_list, "image_info_0", {})
image_dim = getattr(image_info, "max_features", None)
sample_list["image_dim"] = image_dim
image_mask = torch.arange(image_feat.size(-2), device=image_feat.device).expand(
image_feat.size()[:-1]
)
if len(image_dim.size()) < len(image_mask.size()):
image_dim = image_dim.unsqueeze(-1)
assert len(image_dim.size()) == len(image_mask.size())
image_mask = image_mask < image_dim
sample_list["image_mask"] = image_mask.long()
sample_list["attention_mask"] = torch.cat(
(sample_list["input_mask"], sample_list["image_mask"]), dim=-1
)
task_index = torch.randint(len(self.tasks), (1,)).item()
sample_list["task"] = self.tasks[task_index]
sample_list["position_ids"] = torch.arange(
0,
sample_list["input_ids"].size(1),
dtype=torch.long,
device=image_feat.device,
).unsqueeze(0)
self.add_pos_feat(sample_list)
return sample_list
def forward(self, sample_list: Dict[str, Tensor]) -> Dict[str, Tensor]:
sample_list = self.add_custom_params(sample_list)
return self.uniter(sample_list)
def get_attention_mask(
self,
sample_list: Dict[str, Tensor],
text_embedding: Tensor,
image_embedding: Tensor,
) -> Tensor:
image_mask = getattr(sample_list, "image_mask", None)
if image_mask is not None and sample_list.input_mask is not None:
attention_mask = torch.cat((sample_list.input_mask, image_mask), dim=-1)
elif image_mask is not None:
text_mask = torch.ones(
text_embedding.size()[:-1],
dtype=text_embedding.dtype,
device=text_embedding.device,
)
attention_mask = torch.cat((image_mask, text_mask), dim=-1)
elif sample_list.input_mask is not None:
image_mask = torch.ones(
image_embedding.size()[:-1],
dtype=image_embedding.dtype,
device=image_embedding.device,
)
attention_mask = torch.cat((image_mask, sample_list.input_mask), dim=-1)
else:
attention_mask = None
return attention_mask
| EXA-1-master | exa/models/mmf-main/mmf/models/uniter.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
from typing import Any, Dict, List, Optional, Tuple
import omegaconf
import torch
from mmf.common.registry import registry
from mmf.models.base_model import BaseModel
from mmf.modules.embeddings import (
PreExtractedEmbedding,
TextEmbedding,
TwoBranchEmbedding,
)
from mmf.modules.layers import BranchCombineLayer, ClassifierLayer
from mmf.utils.build import build_image_encoder
from mmf.utils.general import filter_grads
from omegaconf import DictConfig
@registry.register_model("movie_mcan")
class MoVieMcan(BaseModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self._global_config = registry.get("config")
self._datasets = self._global_config.datasets
if isinstance(self._datasets, str):
self._datasets = self._datasets.split(",")
@classmethod
def config_path(cls):
return "configs/models/movie_mcan/defaults.yaml"
def build(self):
self.image_feature_dim = 2048
self._build_word_embedding()
self._init_text_embeddings("text")
self._init_feature_encoders("image")
self._init_feature_embeddings("image")
self._init_combine_layer("image", "text")
self._init_classifier(self._get_classifier_input_dim())
self._init_extras()
def _build_word_embedding(self):
assert len(self._datasets) > 0
text_processor = registry.get(self._datasets[0] + "_text_processor")
vocab = text_processor.vocab
self.word_embedding = vocab.get_embedding(torch.nn.Embedding, embedding_dim=300)
def _init_text_embeddings(self, attr: str = "text"):
if "embeddings" not in attr:
attr += "_embeddings"
module_config = self.config[attr]
embedding_type = module_config.type
embedding_kwargs = copy.deepcopy(module_config.params)
self._update_text_embedding_args(embedding_kwargs)
embedding = TextEmbedding(embedding_type, **embedding_kwargs)
embeddings_out_dim = embedding.text_out_dim
setattr(self, attr + "_out_dim", embeddings_out_dim)
setattr(self, attr, embedding)
def _update_text_embedding_args(self, args):
# Add model_data_dir to kwargs
args.model_data_dir = self.config.model_data_dir
def _init_feature_encoders(self, attr: str):
feat_encoder = self.config[attr + "_feature_encodings"]
feature_dim = self.config[attr + "_feature_dim"]
setattr(self, attr + "_feature_dim", feature_dim)
feat_encoder_config = copy.deepcopy(feat_encoder)
with omegaconf.open_dict(feat_encoder_config):
feat_encoder_config.params.model_data_dir = self.config.model_data_dir
feat_encoder_config.params.in_dim = feature_dim
feat_model = build_image_encoder(feat_encoder_config, direct_features=False)
setattr(self, attr + "_feature_dim", feat_model.out_dim)
setattr(self, attr + "_feature_encoders", feat_model)
def _init_feature_embeddings(self, attr: str):
embedding_kwargs = self.config[attr + "_feature_embeddings"]["params"]
setattr(
self, attr + "_feature_embeddings_out_dim", embedding_kwargs["hidden_dim"]
)
assert (
getattr(self, attr + "_feature_embeddings_out_dim")
== self.text_embeddings_out_dim
), "dim1: {}, dim2: {}".format(
getattr(self, attr + "_feature_embeddings_out_dim"),
self.text_embeddings_out_dim,
)
feature_embedding = TwoBranchEmbedding(
getattr(self, attr + "_feature_dim"), **embedding_kwargs
)
setattr(self, attr + "_feature_embeddings_list", feature_embedding)
def _get_embeddings_attr(self, attr: str):
embedding_attr1 = attr
if hasattr(self, attr + "_embeddings_out_dim"):
embedding_attr1 = attr + "_embeddings_out_dim"
else:
embedding_attr1 = attr + "_feature_embeddings_out_dim"
return embedding_attr1
def _init_combine_layer(self, attr1: str, attr2: str):
multi_modal_combine_layer = BranchCombineLayer(
getattr(self, self._get_embeddings_attr(attr1)),
getattr(self, self._get_embeddings_attr(attr2)),
)
setattr(
self,
attr1 + "_" + attr2 + "_multi_modal_combine_layer",
multi_modal_combine_layer,
)
def _init_classifier(self, combined_embedding_dim: int):
# TODO: Later support multihead
num_choices = registry.get(self._datasets[0] + "_num_final_outputs")
params = self.config["classifier"].get("params")
if params is None:
params = {}
self.classifier = ClassifierLayer(
self.config.classifier.type,
in_dim=combined_embedding_dim,
out_dim=num_choices,
**params,
)
def _init_extras(self):
self.inter_model = None
def get_optimizer_parameters(self, config: DictConfig) -> List[Dict[str, Any]]:
combine_layer = self.image_text_multi_modal_combine_layer
params = [
{"params": filter_grads(self.word_embedding.parameters())},
{
"params": filter_grads(
self.image_feature_embeddings_list.sga.parameters()
)
},
{
"params": filter_grads(
self.image_feature_embeddings_list.sga_pool.parameters()
)
},
{
"params": filter_grads(
self.image_feature_embeddings_list.cbn.parameters()
),
"lr": (
config.optimizer.params.lr * config.training.encoder_lr_multiply
),
},
{"params": filter_grads(self.text_embeddings.parameters())},
{"params": filter_grads(combine_layer.parameters())},
{"params": filter_grads(self.classifier.parameters())},
{"params": filter_grads(self.image_feature_encoders.parameters())},
]
return params
def get_mapping(self):
mapping = [
"word_embedding",
"image_feature_embeddings_list_sga",
"image_feature_embeddings_list_sga_pool",
"image_feature_embeddings_list_cbn",
"text_embeddings",
"combine_layer",
"classifier",
"image_feature_encoders",
]
return mapping
def _get_classifier_input_dim(self):
return self.image_text_multi_modal_combine_layer.out_dim
def process_text_embedding(
self, sample_list: Dict[str, Any], embedding_attr: str = "text_embeddings"
) -> Tuple[torch.Tensor, torch.Tensor]:
# Get "text" attribute in case of "text_embeddings" case
# and "context" attribute in case of "context_embeddings"
texts = getattr(sample_list, embedding_attr.split("_")[0])
# Get embedding models
text_embedding_model = getattr(self, embedding_attr)
# TODO: Move this logic inside
if isinstance(text_embedding_model, PreExtractedEmbedding):
text_embedding_total = text_embedding_model(sample_list.question_id)
else:
text_embedding_total, text_embedding_vec = text_embedding_model(
texts, sample_list.text_mask
)
return text_embedding_total, text_embedding_vec
def process_feature_embedding(
self,
attr: str,
sample_list: Dict[str, Any],
text_embedding_total: torch.Tensor,
text_embedding_vec: torch.Tensor,
extra: list = [],
batch_size_t: Optional[int] = None,
):
batch_size_t = (
sample_list.get_batch_size() if batch_size_t is None else batch_size_t
)
# Convert list of keys to the actual values
if hasattr(sample_list, "image"):
feature = sample_list.image
feature_encoder = getattr(self, attr + "_feature_encoders")
encoded_feature = feature_encoder(feature)
b, c, h, w = encoded_feature.shape
padded_feat = torch.zeros(
(b, c, 32, 32), dtype=torch.float, device=encoded_feature.device
)
padded_feat[:, :, :h, :w] = encoded_feature
encoded_feature = padded_feat
else:
feature = sample_list.image_feature_0
feature_encoder = getattr(self, attr + "_feature_encoders")
encoded_feature = feature_encoder(feature)
feature_embedding = getattr(self, attr + "_feature_embeddings_list")
feature_sga, feature_cbn = feature_embedding(
encoded_feature,
text_embedding_total,
text_embedding_vec,
None,
sample_list.text_mask,
)
return feature_sga, feature_cbn
def combine_embeddings(self, *args):
feature_names = args[0]
v1, v2, q = args[1]
layer = "_".join(feature_names) + "_multi_modal_combine_layer"
return getattr(self, layer)(v1, v2, q)
def calculate_logits(self, joint_embedding: torch.Tensor, **kwargs):
return self.classifier(joint_embedding)
def forward(self, sample_list: Dict[str, Any]) -> Dict[str, torch.Tensor]:
sample_list.text_mask = sample_list.text.eq(0)
sample_list.text = self.word_embedding(sample_list.text)
text_embedding_total, text_embedding_vec = self.process_text_embedding(
sample_list
)
feature_sga, feature_cbn = self.process_feature_embedding(
"image", sample_list, text_embedding_total, text_embedding_vec[:, 0]
)
joint_embedding = self.combine_embeddings(
["image", "text"], [feature_sga, feature_cbn, text_embedding_vec[:, 1]]
)
model_output = {"scores": self.calculate_logits(joint_embedding)}
return model_output
| EXA-1-master | exa/models/mmf-main/mmf/models/movie_mcan.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.