python_code
stringlengths
0
992k
repo_name
stringlengths
8
46
file_path
stringlengths
5
162
# Copyright (c) Facebook, Inc. and its affiliates. import gc import unittest import torch from mmf.models.mmbt import MMBT from mmf.modules.hf_layers import undo_replace_with_jit from mmf.utils.build import build_optimizer from omegaconf import OmegaConf from tests.test_utils import SimpleModel, skip_if_no_network class TestOptimizers(unittest.TestCase): def setUp(self): self.config = OmegaConf.create( {"optimizer": {"type": "adam_w", "params": {"lr": 5e-5}}} ) def tearDown(self): del self.config undo_replace_with_jit() gc.collect() def test_build_optimizer_simple_model(self): model = SimpleModel({"in_dim": 1}) model.build() optimizer = build_optimizer(model, self.config) self.assertTrue(isinstance(optimizer, torch.optim.Optimizer)) self.assertEqual(len(optimizer.param_groups), 1) @skip_if_no_network def test_build_optimizer_custom_model(self): model = MMBT.from_params() model.build() self.config.model = model.config.model self.config.model_config = model.config optimizer = build_optimizer(model, self.config) self.assertTrue(isinstance(optimizer, torch.optim.Optimizer)) self.assertEqual(len(optimizer.param_groups), 2)
EXA-1-master
exa/models/mmf-main/tests/modules/test_optimizers.py
# Copyright (c) Facebook, Inc. and its affiliates.
EXA-1-master
exa/models/mmf-main/tests/modules/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. import unittest import mmf.modules.fusions as fusions import torch class TestModuleFusions(unittest.TestCase): def setUp(self): bsize = 2 self.x = [torch.randn(bsize, 10), torch.randn(bsize, 20)] self.input_dims = [self.x[0].shape[-1], self.x[1].shape[-1]] self.output_dims = 2 def test_BlockFusion(self): fusion = fusions.Block(self.input_dims, self.output_dims, mm_dim=20) out = fusion(self.x) if torch.cuda.is_available(): fusion.cuda() out = fusion([self.x[0].cuda(), self.x[1].cuda()]) assert torch.Size([2, 2]) == out.shape def test_BlockTucker(self): fusion = fusions.BlockTucker(self.input_dims, self.output_dims, mm_dim=20) out = fusion(self.x) if torch.cuda.is_available(): fusion.cuda() out = fusion([self.x[0].cuda(), self.x[1].cuda()]) assert torch.Size([2, 2]) == out.shape def test_Mutan(self): fusion = fusions.Mutan(self.input_dims, self.output_dims, mm_dim=20) out = fusion(self.x) if torch.cuda.is_available(): fusion.cuda() out = fusion([self.x[0].cuda(), self.x[1].cuda()]) assert torch.Size([2, 2]) == out.shape def test_Tucker(self): fusion = fusions.Tucker(self.input_dims, self.output_dims, mm_dim=20) out = fusion(self.x) if torch.cuda.is_available(): fusion.cuda() out = fusion([self.x[0].cuda(), self.x[1].cuda()]) assert torch.Size([2, 2]) == out.shape def test_MLB(self): fusion = fusions.MLB(self.input_dims, self.output_dims, mm_dim=20) out = fusion(self.x) if torch.cuda.is_available(): fusion.cuda() out = fusion([self.x[0].cuda(), self.x[1].cuda()]) assert torch.Size([2, 2]) == out.shape def test_MFB(self): fusion = fusions.MFB(self.input_dims, self.output_dims, mm_dim=20) out = fusion(self.x) if torch.cuda.is_available(): fusion.cuda() out = fusion([self.x[0].cuda(), self.x[1].cuda()]) assert torch.Size([2, 2]) == out.shape def test_MFH(self): fusion = fusions.MFH(self.input_dims, self.output_dims, mm_dim=20) out = fusion(self.x) if torch.cuda.is_available(): fusion.cuda() out = fusion([self.x[0].cuda(), self.x[1].cuda()]) assert torch.Size([2, 2]) == out.shape def test_MCB(self): fusion = fusions.MCB(self.input_dims, self.output_dims, mm_dim=100) out = fusion(self.x) if torch.cuda.is_available(): fusion.cuda() out = fusion([self.x[0].cuda(), self.x[1].cuda()]) assert torch.Size([2, 2]) == out.shape def test_LinearSum(self): fusion = fusions.LinearSum(self.input_dims, self.output_dims, mm_dim=20) out = fusion(self.x) if torch.cuda.is_available(): fusion.cuda() out = fusion([self.x[0].cuda(), self.x[1].cuda()]) assert torch.Size([2, 2]) == out.shape def test_ConcatMLP(self): fusion = fusions.ConcatMLP(self.input_dims, self.output_dims, dimensions=[5, 5]) out = fusion(self.x) if torch.cuda.is_available(): fusion.cuda() out = fusion([self.x[0].cuda(), self.x[1].cuda()]) assert torch.Size([2, 2]) == out.shape
EXA-1-master
exa/models/mmf-main/tests/modules/test_fusions.py
# Copyright (c) Facebook, Inc. and its affiliates. import unittest import torch from mmf.modules.vit import ViTModel from omegaconf import OmegaConf from tests.test_utils import setup_proxy, skip_if_old_transformers from torch import nn @skip_if_old_transformers(min_version="4.5.0") class TestViT(unittest.TestCase): def setUp(self): try: import transformers3.models.vit.modeling_vit as vit except ImportError: import transformers.models.vit.modeling_vit as vit setup_proxy() config = { "layer_norm_eps": 0.0001, "hidden_size": 768, "num_hidden_layers": 2, "do_patch_embeddings": False, "add_pooling_layer": False, "return_dict": True, } hf_config = vit.ViTConfig(**config) self.model = ViTModel(hf_config) def test_model_static_constructor_from_config(self): config = OmegaConf.create( { "pretrained_model_name": "google/vit-base-patch16-224", "do_patch_embeddings": False, "add_pooling_layer": False, "return_dict": True, } ) pretrained_model, _ = ViTModel.from_config(config) embeddings = torch.rand(32, 197, 768) output = pretrained_model(embeddings, output_hidden_states=False) self.assertTrue(hasattr(output, "last_hidden_state")) self.assertEqual(output["last_hidden_state"].shape, torch.Size([32, 197, 768])) def test_model_init(self): self.assertTrue(isinstance(self.model, nn.Module)) def test_model_forward(self): embeddings = torch.rand(32, 197, 768) output = self.model(embeddings, output_hidden_states=True) self.assertTrue(hasattr(output, "last_hidden_state")) self.assertEqual(output["last_hidden_state"].shape, torch.Size([32, 197, 768])) self.assertTrue(hasattr(output, "hidden_states")) self.assertEqual(len(output["hidden_states"]), 3)
EXA-1-master
exa/models/mmf-main/tests/modules/test_vit.py
# Copyright (c) Facebook, Inc. and its affiliates. import unittest from mmf.modules.hf_layers import replace_with_jit, undo_replace_with_jit try: from transformers3.modeling_bert import BertSelfAttention except ImportError: from transformers.modeling_bert import BertSelfAttention class TestHFLayers(unittest.TestCase): def test_undo_replace_with_jit(self): original_function = BertSelfAttention.forward replace_with_jit() undo_replace_with_jit() self.assertTrue(BertSelfAttention.forward is original_function)
EXA-1-master
exa/models/mmf-main/tests/modules/test_hf_layers.py
# Copyright (c) Facebook, Inc. and its affiliates. # flake8: noqa: F401 from . import datasets, models
EXA-1-master
exa/models/mmf-main/tests/data/user_dir/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # flake8: noqa: F401 from . import always_one
EXA-1-master
exa/models/mmf-main/tests/data/user_dir/datasets/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. from mmf.common.registry import registry from mmf.datasets.base_dataset_builder import BaseDatasetBuilder from tests.test_utils import NumbersDataset DATASET_LEN = 20 @registry.register_builder("always_one") class AlwaysOneBuilder(BaseDatasetBuilder): def __init__(self): super().__init__("always_one") def build(self, *args, **Kwargs): pass @classmethod def config_path(cls): return "configs/always_one.yaml" def load(self, config, dataset_type="train", *args, **kwargs): dataset = NumbersDataset(DATASET_LEN, data_item_key="input", always_one=True) dataset.dataset_name = self.dataset_name dataset.dataset_type = dataset_type return dataset
EXA-1-master
exa/models/mmf-main/tests/data/user_dir/datasets/always_one.py
# Copyright (c) Facebook, Inc. and its affiliates. # flake8: noqa: F401 from . import simple
EXA-1-master
exa/models/mmf-main/tests/data/user_dir/models/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. from mmf.common.registry import registry from tests.test_utils import SimpleModel @registry.register_model("simple") class CustomSimpleModel(SimpleModel): @classmethod def config_path(cls): return "configs/simple.yaml" def forward(self, sample_list): return {"scores": self.classifier(sample_list.input)}
EXA-1-master
exa/models/mmf-main/tests/data/user_dir/models/simple.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # mmf documentation build configuration file, created by # sphinx-quickstart on Tue Apr 23 10:42:55 2019. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. import datetime import pytorch_sphinx_theme from mmf import version from packaging.version import parse from recommonmark.transform import AutoStructify parsed_version = parse(version.__version__) extensions = [ "sphinx.ext.autodoc", "sphinx.ext.doctest", "sphinx.ext.todo", "sphinx.ext.coverage", "sphinx.ext.mathjax", "sphinx.ext.ifconfig", "sphinx.ext.viewcode", "sphinx.ext.napoleon", "sphinx.ext.intersphinx", "sphinxcontrib.programoutput", "recommonmark", ] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: source_suffix = [".rst", ".md"] # source_suffix = '.rst' # The main toctree document. main_doc = "index" # General information about the project. project = "MMF" copyright = str(datetime.datetime.now().year) + ", Facebook AI Research" author = "Facebook AI Research" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = parsed_version.base_version # The full version, including alpha/beta/rc tags. release = str(parsed_version) # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "pytorch_sphinx_theme" html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] templates_path = ["_templates"] # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = { "includehidden": False, "canonical_url": "https://mmf.sh/api/", "pytorch_project": "docs", } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # This is required for the alabaster theme # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars html_sidebars = { "**": [ "relations.html", # needs 'show_related': True theme option to display "searchbox.html", ] } html_baseurl = "/" # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = "mmfdoc" # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (main_doc, "mmf.tex", "MMF Documentation", "Facebook AI Research", "manual") ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [(main_doc, "mmf", "MMF Documentation", [author], 1)] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( main_doc, "mmf", "MMF Documentation", author, "mmf", "One line description of project.", "Miscellaneous", ) ] github_doc_root = "https://github.com/facebookresearch/mmf/tree/main" # At the bottom of conf.py def setup(app): app.add_config_value( "recommonmark_config", { "url_resolver": lambda url: github_doc_root + url, "auto_toc_tree_section": "Contents", }, True, ) app.add_transform(AutoStructify) app.add_css_file("css/customize.css")
EXA-1-master
exa/models/mmf-main/docs/source/conf.py
# Copyright (c) Facebook, Inc. and its affiliates. import sys __version__ = "1.0.0rc12" msg = "MMF is only compatible with Python 3.6 and newer." if sys.version_info < (3, 6): raise ImportError(msg)
EXA-1-master
exa/models/mmf-main/mmf/version.py
# Copyright (c) Facebook, Inc. and its affiliates. # isort:skip_file # flake8: noqa: F401 from mmf.utils.patch import patch_transformers patch_transformers() from mmf import common, datasets, models, modules, utils from mmf.modules import losses, metrics, optimizers, poolers, schedulers from mmf.version import __version__ __all__ = [ "utils", "common", "modules", "datasets", "models", "losses", "poolers", "schedulers", "optimizers", "metrics", ]
EXA-1-master
exa/models/mmf-main/mmf/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. """ In MMF, for adding new datasets, dataset builder for datasets need to be added. A new dataset builder must inherit ``BaseDatasetBuilder`` class and implement ``load`` and ``build`` functions. ``build`` is used to build a dataset when it is not available. For e.g. downloading the ImDBs for a dataset. In future, we plan to add a ``build`` to add dataset builder to ease setup of MMF. ``load`` is used to load a dataset from specific path. ``load`` needs to return an instance of subclass of ``mmf.datasets.base_dataset.BaseDataset``. See complete example for ``VQA2DatasetBuilder`` here_. Example:: from torch.utils.data import Dataset from mmf.datasets.base_dataset_builder import BaseDatasetBuilder from mmf.common.registry import registry @registry.register_builder("my") class MyBuilder(BaseDatasetBuilder): def __init__(self): super().__init__("my") def load(self, config, dataset_type, *args, **kwargs): ... return Dataset() def build(self, config, dataset_type, *args, **kwargs): ... .. _here: https://github.com/facebookresearch/mmf/blob/main/mmf/datasets/vqa/vqa2/builder.py """ import uuid from typing import Optional import pytorch_lightning as pl from mmf.utils.build import build_dataloader_and_sampler from mmf.utils.logger import log_class_usage from omegaconf import DictConfig from torch.utils.data import Dataset # TODO(asg): Deprecate BaseDatasetBuilder after version release class BaseDatasetBuilder(pl.LightningDataModule): """Base class for implementing dataset builders. See more information on top. Child class needs to implement ``build`` and ``load``. Args: dataset_name (str): Name of the dataset passed from child. """ def __init__(self, dataset_name: Optional[str] = None, *args, **kwargs): super().__init__(*args, **kwargs) if dataset_name is None: # In case user doesn't pass it dataset_name = f"dataset_{uuid.uuid4().hex[:6]}" self.dataset_name = dataset_name self._train_dataset = None self._val_dataset = None self._test_dataset = None log_class_usage("DatasetBuilder", self.__class__) @property def dataset_name(self): return self._dataset_name @dataset_name.setter def dataset_name(self, dataset_name): self._dataset_name = dataset_name def prepare_data(self, config, *args, **kwargs): """ NOTE: The caller to this function should only call this on main process in a distributed settings so that downloads and build only happen on main process and others can just load it. Make sure to call synchronize afterwards to bring all processes in sync. Lightning automatically wraps datamodule in a way that it is only called on a main node, but for extra precaution as lightning can introduce bugs, we should always call this under main process with extra checks on our sides as well. """ self.config = config self.build_dataset(config) def setup(self, stage: Optional[str] = None, config: Optional[DictConfig] = None): if config is None: config = self.config self.config = config self.train_dataset = self.load_dataset(config, "train") self.val_dataset = self.load_dataset(config, "val") self.test_dataset = self.load_dataset(config, "test") @property def train_dataset(self) -> Optional[Dataset]: return self._train_dataset @train_dataset.setter def train_dataset(self, dataset: Optional[Dataset]): self._train_dataset = dataset @property def val_dataset(self) -> Optional[Dataset]: return self._val_dataset @val_dataset.setter def val_dataset(self, dataset: Optional[Dataset]): self._val_dataset = dataset @property def test_dataset(self) -> Optional[Dataset]: return self._test_dataset @test_dataset.setter def test_dataset(self, dataset: Optional[Dataset]): self._test_dataset = dataset def build_dataset(self, config, dataset_type="train", *args, **kwargs): """ Similar to load function, used by MMF to build a dataset for first time when it is not available. This internally calls 'build' function. Override that function in your child class. NOTE: The caller to this function should only call this on main process in a distributed settings so that downloads and build only happen on main process and others can just load it. Make sure to call synchronize afterwards to bring all processes in sync. Args: config (DictConfig): Configuration of this dataset loaded from config. dataset_type (str): Type of dataset, train|val|test .. warning:: DO NOT OVERRIDE in child class. Instead override ``build``. """ self.build(config, dataset_type, *args, **kwargs) def load_dataset(self, config, dataset_type="train", *args, **kwargs): """Main load function use by MMF. This will internally call ``load`` function. Calls ``init_processors`` and ``try_fast_read`` on the dataset returned from ``load`` Args: config (DictConfig): Configuration of this dataset loaded from config. dataset_type (str): Type of dataset, train|val|test Returns: dataset (BaseDataset): Dataset containing data to be trained on .. warning:: DO NOT OVERRIDE in child class. Instead override ``load``. """ dataset = self.load(config, dataset_type, *args, **kwargs) if dataset is not None and hasattr(dataset, "init_processors"): # Checking for init_processors allows us to load some datasets # which don't have processors and don't inherit from BaseDataset dataset.init_processors() return dataset def load(self, config, dataset_type="train", *args, **kwargs): """ This is used to prepare the dataset and load it from a path. Override this method in your child dataset builder class. Args: config (DictConfig): Configuration of this dataset loaded from config. dataset_type (str): Type of dataset, train|val|test Returns: dataset (BaseDataset): Dataset containing data to be trained on """ raise NotImplementedError( "This dataset builder doesn't implement a load method" ) @classmethod def config_path(cls): return None def build(self, config, dataset_type="train", *args, **kwargs): """ This is used to build a dataset first time. Implement this method in your child dataset builder class. Args: config (DictConfig): Configuration of this dataset loaded from config. dataset_type (str): Type of dataset, train|val|test """ raise NotImplementedError( "This dataset builder doesn't implement a build method" ) def build_dataloader( self, dataset_instance: Optional[Dataset], dataset_type: str, *args, **kwargs ): if dataset_instance is None: raise TypeError( f"dataset instance for {dataset_type} hasn't been set and is None" ) dataset_instance.dataset_type = dataset_type dataloader, _ = build_dataloader_and_sampler(dataset_instance, self.config) return dataloader def train_dataloader(self, *args, **kwargs): return self.build_dataloader(self.train_dataset, "train") def val_dataloader(self, *args, **kwargs): return self.build_dataloader(self.val_dataset, "val") def test_dataloader(self, *args, **kwargs): return self.build_dataloader(self.test_dataset, "test") def teardown(self, *args, **kwargs) -> None: pass
EXA-1-master
exa/models/mmf-main/mmf/datasets/base_dataset_builder.py
# Copyright (c) Facebook, Inc. and its affiliates. from mmf.common.registry import registry from mmf.common.sample import SampleList from mmf.utils.general import get_current_device from torch.utils.data.dataset import Dataset class BaseDataset(Dataset): """Base class for implementing a dataset. Inherits from PyTorch's Dataset class but adds some custom functionality on top. Processors mentioned in the configuration are automatically initialized for the end user. Args: dataset_name (str): Name of your dataset to be used a representative in text strings dataset_type (str): Type of your dataset. Normally, train|val|test config (DictConfig): Configuration for the current dataset """ def __init__(self, dataset_name, config, dataset_type="train", *args, **kwargs): super().__init__() if config is None: config = {} self.config = config self._dataset_name = dataset_name self._dataset_type = dataset_type self._global_config = registry.get("config") self._device = get_current_device() self.use_cuda = "cuda" in str(self._device) def load_item(self, idx): """ Implement if you need to separately load the item and cache it. Args: idx (int): Index of the sample to be loaded. """ return def __getitem__(self, idx): """ Basically, __getitem__ of a torch dataset. Args: idx (int): Index of the sample to be loaded. """ raise NotImplementedError def init_processors(self): if "processors" not in self.config: return from mmf.utils.build import build_processors extra_params = {"data_dir": self.config.data_dir} reg_key = f"{self._dataset_name}_{{}}" processor_dict = build_processors( self.config.processors, reg_key, **extra_params ) for processor_key, processor_instance in processor_dict.items(): setattr(self, processor_key, processor_instance) full_key = reg_key.format(processor_key) registry.register(full_key, processor_instance) def prepare_batch(self, batch): """ Can be possibly overridden in your child class. Not supported w Lightning trainer Prepare batch for passing to model. Whatever returned from here will be directly passed to model's forward function. Currently moves the batch to proper device. Args: batch (SampleList): sample list containing the currently loaded batch Returns: sample_list (SampleList): Returns a sample representing current batch loaded """ # Should be a SampleList if not isinstance(batch, SampleList): # Try converting to SampleList batch = SampleList(batch) batch = batch.to(self._device) return batch @property def dataset_type(self): return self._dataset_type @property def name(self): return self._dataset_name @property def dataset_name(self): return self._dataset_name @dataset_name.setter def dataset_name(self, name): self._dataset_name = name @dataset_type.setter def dataset_type(self, dataset_type): self._dataset_type = dataset_type def format_for_prediction(self, report): return [] def verbose_dump(self, *args, **kwargs): return def visualize(self, num_samples=1, *args, **kwargs): raise NotImplementedError( f"{self.dataset_name} doesn't implement visualize function" )
EXA-1-master
exa/models/mmf-main/mmf/datasets/base_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. import functools import types from torch.utils.data import ConcatDataset class MMFConcatDataset(ConcatDataset): # These functions should only be called once even if they return nothing _SINGLE_CALL_FUNCS = [] def __init__(self, datasets): super().__init__(datasets) self._dir_representation = dir(self) def __getattr__(self, name): if "_dir_representation" in self.__dict__ and name in self._dir_representation: return getattr(self, name) elif "datasets" in self.__dict__ and hasattr(self.datasets[0], name): attr = getattr(self.datasets[0], name) # Check if the current attribute is class method function if isinstance(attr, types.MethodType): # if it is the, we to call this function for # each of the child datasets attr = functools.partial(self._call_all_datasets_func, name) return attr else: raise AttributeError(name) def _get_single_call_funcs(self): return MMFConcatDataset._SINGLE_CALL_FUNCS def _call_all_datasets_func(self, name, *args, **kwargs): for dataset in self.datasets: value = getattr(dataset, name)(*args, **kwargs) if value is not None: # TODO: Log a warning here return value # raise RuntimeError("Functions returning values can't be " # "called through MMFConcatDataset") if ( hasattr(dataset, "get_single_call_funcs") and name in dataset.get_single_call_funcs() ): return
EXA-1-master
exa/models/mmf-main/mmf/datasets/concat_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. import math from typing import Dict from mmf.datasets import iteration_strategies from mmf.datasets.multi_dataset_loader import MultiDataLoader from torch.utils.data import DataLoader class LightningMultiDataLoader(MultiDataLoader): """ LightningMultiDataLoader class is used by DatasetLoader class to load multiple datasets and more granular. This class overrides some functions from MultiDataLoader to make them lightning trainer compatible """ def __init__( self, loaders: Dict[str, DataLoader], iteration_strategy: iteration_strategies.IterationStrategy = None, ): super().__init__(loaders, iteration_strategy) def has_len(self): for loader in self.loaders.values(): if not hasattr(loader, "dataset"): continue dataset_instance = loader.dataset if not hasattr(dataset_instance, "__len__"): return False return True def set_lengths(self): self._total_length = 0 if not self.has_len(): self._total_length = math.inf return for loader in self.loaders.values(): # Some loaders might not have dataset attribute # set, in this case we won't consider them in # dataset lengths. if not hasattr(loader, "dataset"): continue dataset_instance = loader.dataset if hasattr(dataset_instance, "__len__"): dataset_instance_length = len(dataset_instance) assert dataset_instance_length, f"dataset: {self.dataset_type} is empty" self._total_length += dataset_instance_length
EXA-1-master
exa/models/mmf-main/mmf/datasets/lightning_multi_dataset_loader.py
# Copyright (c) Facebook, Inc. and its affiliates. import logging from mmf.datasets.lightning_multi_dataset_loader import LightningMultiDataLoader from mmf.datasets.multi_datamodule import MultiDataModule from mmf.datasets.multi_dataset_loader import MultiDataLoader from omegaconf import DictConfig logger = logging.getLogger(__name__) class LightningMultiDataModule(MultiDataModule): def __init__(self, config: DictConfig): super().__init__(config) def _build_multi_dataloader(self, dataset_type: "str" = "train") -> MultiDataLoader: loader_args = {} for key, datamodule in self.datamodules.items(): loader_args[key] = getattr(datamodule, f"{dataset_type}_dataloader")() if not hasattr(loader_args[key], "dataset"): loader_args[key].dataset = getattr( datamodule, f"{dataset_type}_dataset" ) iteration_strategy = self._build_iteration_strategy(self.config, loader_args) loader = LightningMultiDataLoader(loader_args, iteration_strategy) return loader
EXA-1-master
exa/models/mmf-main/mmf/datasets/lightning_multi_datamodule.py
# Copyright (c) Facebook, Inc. and its affiliates. """ MultiDatasetLoader class is used by DatasetLoader class to load multiple datasets and more granular """ import logging import warnings from typing import Dict, Iterator import torch from mmf.common.sample import convert_batch_to_sample_list, SampleList from mmf.datasets import iteration_strategies from mmf.utils.build import build_dataloader_and_sampler, build_dataset from mmf.utils.dataset import dataset_list_from_config from mmf.utils.distributed import ( broadcast_scalar, get_world_size, is_dist_initialized, is_main, is_xla, ) from mmf.utils.general import get_batch_size, get_current_device from omegaconf import OmegaConf from torch.utils.data.dataloader import DataLoader, Sampler logger = logging.getLogger(__name__) class MultiDataLoader: def __init__( self, loaders: Dict[str, DataLoader], iteration_strategy: iteration_strategies.IterationStrategy = None, ): if loaders is None or len(loaders) == 0: warnings.warn( "Empty loaders passed into MultiDataLoader. This can have " "unintended consequences." ) if iteration_strategy is None: iteration_strategy = iteration_strategies.RoundRobinIterationStrategy( OmegaConf.create(), loaders ) self._iteration_strategy = iteration_strategy self._loaders = loaders self._is_main = is_main() self._num_datasets = len(self.loaders) self.dataset_list = list(loaders.keys()) self._iterators = {} self._finished_iterators = {} self.current_index = 0 self.set_lengths() self.set_samplers() def set_lengths(self): self._total_length = 0 for loader in self.loaders.values(): # Some loaders might not have dataset attribute # set, in this case we won't consider them in # dataset lengths. if not hasattr(loader, "dataset"): continue dataset_instance = loader.dataset if hasattr(dataset_instance, "__len__"): dataset_instance_length = len(dataset_instance) assert dataset_instance_length, f"dataset: {self.dataset_type} is empty" self._total_length += dataset_instance_length def set_samplers(self): self.samplers: Dict[str, Sampler] = {} for key, loader in self.loaders.items(): if hasattr(loader, "sampler"): self.samplers[key] = loader.sampler def get_datasets(self): return [loader.dataset for loader in self.loaders.values()] @property def loaders(self) -> Dict[str, DataLoader]: return self._loaders @property def samplers(self) -> Dict[str, Sampler]: return self._samplers @samplers.setter def samplers(self, samplers: Dict[str, Sampler]): self._samplers = samplers @property def num_datasets(self) -> int: return self._num_datasets @property def iterators(self) -> Dict[str, Iterator[SampleList]]: return self._iterators @iterators.setter def iterators(self, iterators: Dict[str, Iterator[SampleList]]): self._iterators = iterators @property def current_loader(self) -> DataLoader: return self.loaders[self.current_dataset_name] @property def iteration_strategy(self) -> iteration_strategies.IterationStrategy: return self._iteration_strategy @property def current_iterator(self) -> DataLoader: return self.iterators[self.current_dataset_name] @property def current_dataset_name(self) -> str: return self.dataset_list[self.current_index] @property def current_dataset(self) -> torch.utils.data.Dataset: if hasattr(self.current_loader, "dataset"): return self.current_loader.dataset else: return None @property def first_loader(self) -> DataLoader: return list(self.loaders.values())[0] def __len__(self) -> int: # Since, this is iterator, we need to return total length == number of batches # and as get_batch_size returns per GPU batch size, it needs to be multiplied # by world size batch_size = get_batch_size() * get_world_size() # Changed the length to accomadate drop_last == True # drop_last is required if the batch is split into multiple cores # some of the cores may not have enough examples. if is_xla(): logging.info( "drop_last is set to True to avoid uneven dimension shapes " "across cores." ) return (self._total_length) // batch_size else: # This assumes drop_last=False for all loaders. See also # build_dataloader_and_sampler(). return (self._total_length + batch_size - 1) // batch_size def __iter__(self): # Clear off old iterators self._finished_iterators = {} self.iterators = {} for key, loader in self.loaders.items(): self.iterators[key] = iter(loader) self.change_dataloader() return self def __next__(self) -> SampleList: """Calculation of next batch is performed using following logic. Current chosen iterator is set in the change_dataloader function based on the chosen iteration strategy which is called everytime prepare_batch is called. If we get the next batch from iterator without any StopIteration exception, we return it as it is. Otherwise, we have two cases: 1. In some iteration strategies (example size proportional), each dataset needs to same number of epochs at any given time, we need to yield StopIteration exception when all iterators are finished. In turn, this will yield to __iter__ all reignite all of the iterators. The code will not reach __iter__ until unless all iterators are exhausted. An iteration strategy should specify this behavior through `should_exhaust_all_iterators` property 2. In other cases of iteration strategies, epochs don't make sense. Think of a case of random (equal) proportional sampling for dataset x and y where x is half the size of y. When x will complete its 2 epochs, y will have only 1 epoch completed. **So please don't use max_epochs or epoch based training in this case as it won't be honored**. If an iterator is finished, we just reignite it in this case and finished iterators variable isn't used. This means that this case will never reach the __iter__ function ever again. Returns: SampleList: sample list instance from currently selected dataset """ try: next_batch = next(self.current_iterator) except StopIteration: if self.iteration_strategy.should_exhaust_all_iterators: self._finished_iterators[self.current_dataset_name] = 1 if len(self._finished_iterators) == self.num_datasets: raise else: self.change_dataloader() next_batch = next(self.current_iterator) else: iterator = iter(self.current_loader) self.iterators[self.current_dataset_name] = iterator next_batch = next(self.current_iterator) # Save dataset name and dataset type beforehand as # prepare_data will change the current index current_dataset_name = self.current_dataset_name current_dataset_type = self.current_dataset.dataset_type next_batch = self.prepare_batch(next_batch) next_batch = convert_batch_to_sample_list(next_batch) next_batch.dataset_name = current_dataset_name next_batch.dataset_type = current_dataset_type return next_batch def change_dataloader(self): choice = 0 if self.num_datasets <= 1: self.current_index = choice return if self._is_main: choice = self.iteration_strategy() # self._finished_iterators will always be empty in case of # non-proportional (equal) sampling while self.dataset_list[choice] in self._finished_iterators: choice = self.iteration_strategy() choice = broadcast_scalar(choice, 0, device=get_current_device()) self.current_index = choice def prepare_batch(self, batch: SampleList) -> SampleList: if self.current_dataset and hasattr(self.current_dataset, "prepare_batch"): batch = self.current_dataset.prepare_batch(batch) self.change_dataloader() return batch def seed_sampler(self, epoch: int): if is_dist_initialized(): for sampler in self.samplers.values(): if sampler is not None and hasattr(sampler, "set_epoch"): sampler.set_epoch(epoch) # TODO: Deprecate in favor of MultiDataModule class MultiDatasetLoader(MultiDataLoader): """ MultiDatasetLoader class that is used for training on multiple datasets together. """ def __init__(self, dataset_type: str = "train"): self._dataset_type = dataset_type self._datasets = [] super().__init__({}) @property def dataset_type(self): return self._dataset_type @property def datasets(self): return self._datasets def load(self, config): self.build_datasets(config) self.build_dataloaders() self.set_lengths() def build_datasets(self, config): self._datasets = [] self.config = config self._given_datasets = dataset_list_from_config(self.config) for dataset in self._given_datasets: if dataset in self.config.dataset_config: dataset_config = self.config.dataset_config[dataset] else: warnings.warn( f"Dataset {dataset} is missing from dataset_config" + " in config. Proceeding with empty config." ) dataset_config = OmegaConf.create() dataset_instance = build_dataset(dataset, dataset_config, self.dataset_type) if dataset_instance is None: continue self.datasets.append(dataset_instance) self.dataset_list.append(dataset) self._num_datasets = len(self.datasets) self.current_index = 0 self._infer_dataset_probabilities() def build_dataloaders(self): assert len(self._datasets) > 0, "Call build_datasets first" for dataset_instance in self.datasets: loader_instance, _ = build_dataloader_and_sampler( dataset_instance, self.config.training ) sampler_instance = loader_instance.sampler self.loaders[dataset_instance.name] = loader_instance self.samplers[dataset_instance.name] = sampler_instance self.current_loader = self.loaders[self.current_dataset_name] def verbose_dump(self, *args, **kwargs): self._chosen_dataset.verbose_dump(*args, **kwargs) # Kept for backwards compatibility for now # TODO: Remove in future. def _infer_dataset_probabilities(self): from mmf.utils.configuration import get_global_config training = get_global_config("training") proportional_sampling = training.get("dataset_size_proportional_sampling", True) if proportional_sampling is True: strategy = iteration_strategies.SizeProportionalIterationStrategy self._iteration_strategy = strategy(OmegaConf.create(), self.loaders) else: self._iteration_strategy = iteration_strategies.RandomIterationStrategy( OmegaConf.create(), self.loaders ) multitasking = get_global_config("multitasking") multitasking_enabled = multitasking.get("enabled", False) assert ( proportional_sampling is True or training.get("max_epochs", None) is None ), "Epoch based training can only be used with size proportional sampling" assert not (proportional_sampling and multitasking_enabled), ( "Multitasking (manually-specified) per-dataset ratios cannot be used " "with size proportional sampling" ) if multitasking_enabled and "sampling_ratios" in multitasking: self._iteration_strategy = iteration_strategies.RatiosIterationStrategy( OmegaConf.create( { "sampling_ratios": multitasking.sampling_ratios, "datasets": self._given_datasets, } ), self._loaders, ) elif proportional_sampling is True: strategy = iteration_strategies.SizeProportionalIterationStrategy self._iteration_strategy = strategy(OmegaConf.create(), self.loaders) else: self._iteration_strategy = iteration_strategies.RandomIterationStrategy( OmegaConf.create(), self.loaders )
EXA-1-master
exa/models/mmf-main/mmf/datasets/multi_dataset_loader.py
# Copyright (c) Facebook, Inc. and its affiliates. import collections import os from mmf.datasets.base_dataset import BaseDataset from mmf.datasets.databases.annotation_database import AnnotationDatabase from mmf.datasets.databases.features_database import FeaturesDatabase from mmf.datasets.databases.image_database import ImageDatabase class MMFDataset(BaseDataset): """This dataset is useful for external open source dataset which usually have annotation files, images and features (which we generate). The dataset takes care of creating annotation db, features db and image db if the configuration follows a set format. Also, you can optionally enable image or features. The class has a resources method which can be overridden to download data. More details to come. """ def __init__( self, dataset_name, config, dataset_type="train", index=0, annotation_database=AnnotationDatabase, *args, **kwargs, ): super().__init__(dataset_name, config, dataset_type, *args, **kwargs) self._index = index self.annotation_database = annotation_database self.annotation_db = self.build_annotation_db() self._use_images = self.config.get("use_images", False) if self._use_images: self.image_db = self.build_image_db() self._use_features = self.config.get("use_features", False) if self._use_features: self.features_db = self.build_features_db() def build_annotation_db(self): annotation_path = self._get_path_based_on_index( self.config, "annotations", self._index ) return self.annotation_database(self.config, annotation_path, self.dataset_type) def build_features_db(self): features_path = self._get_path_based_on_index( self.config, "features", self._index ) return FeaturesDatabase( self.config, features_path, annotation_db=self.annotation_db ) def build_image_db(self): image_path = self._get_path_based_on_index(self.config, "images", self._index) return ImageDatabase(self.config, image_path, annotation_db=self.annotation_db) def _get_path_based_on_index(self, config, attribute, index): if attribute not in config: raise ValueError(f"{attribute} not present in config") config = config.get(attribute, None) if ( self.dataset_type not in config or len(config.get(self.dataset_type, [])) == 0 ): raise ValueError(f"No {attribute} present for type {self.dataset_type}") paths = config[self.dataset_type] if isinstance(paths, str): selected_path = paths else: assert isinstance(paths, collections.abc.MutableSequence) selected_path = paths[self._index] selected_path = self._add_root_dir(selected_path) return selected_path def _add_root_dir(self, path): path = path.split(",") for idx, p in enumerate(path): path[idx] = os.path.join(self.config.data_dir, p) return ",".join(path) def __len__(self): return len(self.annotation_db)
EXA-1-master
exa/models/mmf-main/mmf/datasets/mmf_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. import logging import warnings from dataclasses import dataclass from typing import Dict import numpy as np from mmf.common.registry import registry from mmf.utils.configuration import get_global_config from mmf.utils.dataset import dataset_list_from_config from omegaconf import MISSING, OmegaConf from torch.utils.data import DataLoader logger = logging.getLogger(__name__) class IterationStrategy: """ Base class for defining iteration strategies that will be used for iterating over multiple datasets during multitasking. An IterationStrategy implementation should `__call__` method which returns index of dataset from which next batch must be pulled. Class can also define `should_exhaust_all_iterators` property which defines whether all iterators should be exhausted before reigniting next batch of iterators. For example, in size proportional iteration strategy, all iterators must be finished before starting a new round so that all of them get equal opportunity to present themselves according to their size. Args: config (Config): Object of type Config which should be defined for each iteration strategy for configurable parameters. dataloaders (Dict[str, DataLoader]): A dictionary containing mapping from dataset key to its dataloader. Usage:: from dataclasses import dataclass from mmf.common.registry import registry from mmf.datasets.iterators import IterationStrategy @registry.register_iteration_strategy("my_iteration_strategy") class MyStrategy(IterationStrategy): @dataclass class Config: name: str = "my_strategy" def __init__(self, config, dataloader): ... """ @dataclass class Config: name: str = MISSING def __init__( self, config: Config, dataloaders: Dict[str, DataLoader], *args, **kwargs ): config = OmegaConf.merge(OmegaConf.structured(self.Config), config) self.config = config self.dataloaders = dataloaders @classmethod def from_params(cls, dataloaders: Dict[str, DataLoader], **kwargs): config = OmegaConf.structured(cls.Config(**kwargs)) return cls(config, dataloaders) @property def should_exhaust_all_iterators(self) -> bool: return False def _check_not_epoch_training(self): """ Having this allows easy override of the strategy in non-MMF use cases """ training = get_global_config("training") assert ( training.get("max_epochs", None) is None ), f"{self.__class__.__name__} doesn't make sense with epoch based training" def __call__(self, *args, **kwargs): raise NotImplementedError("__call__ hasn't been implemented") @registry.register_iteration_strategy("constant") class ConstantIterationStrategy(IterationStrategy): """ Always returns a constant number. Useful for mimicing single task training in multitask setup for verification or defaults purposes index to be returned can be specified in config parameter as `idx`. """ @dataclass class Config(IterationStrategy.Config): name: str = "constant" idx: int = 0 def __init__( self, config: Config, dataloaders: Dict[str, DataLoader], *args, **kwargs ): super().__init__(config, dataloaders, *args, **kwargs) self._idx = self.config.idx @property def should_exhaust_all_iterators(self) -> bool: return True def __call__(self, *args, **kwargs): return self._idx @registry.register_iteration_strategy("round_robin") class RoundRobinIterationStrategy(IterationStrategy): """ Samples datasets one by one in round robin fashion. Start index can be specified in config as `start_idx`. Also defaults to size proportional sampling as roundrobin doesn't make sense with validation and testing splits as they need to finish one complete epoch. """ @dataclass class Config(IterationStrategy.Config): name: str = "round_robin" start_idx: int = 0 def __init__( self, config: Config, dataloaders: Dict[str, DataLoader], *args, **kwargs ): super().__init__(config, dataloaders, *args, **kwargs) self._check_not_epoch_training() if "start_idx" in self.config: self._current_idx = self.config.start_idx def __call__(self, *args, **kwargs): nxt = self._current_idx self._current_idx = (self._current_idx + 1) % len(self.dataloaders) return nxt @registry.register_iteration_strategy("random") class RandomIterationStrategy(IterationStrategy): """ Samples random number each time when sampled. Follows test/validation strategy similar to RoundRobin. """ @dataclass class Config(IterationStrategy.Config): name: str = "random" def __init__( self, config: Config, dataloaders: Dict[str, DataLoader], *args, **kwargs ): super().__init__(config, dataloaders, *args, **kwargs) self._check_not_epoch_training() def __call__(self, *args, **kwargs): choice = np.random.choice(len(self.dataloaders), 1)[0] return choice @registry.register_iteration_strategy("size_proportional") class SizeProportionalIterationStrategy(IterationStrategy): """ Samples index based on size of each dataset. Bigger datasets are sampled more and this strategy requires completing all iterators before starting new ones. Default in MMF. """ @dataclass class Config(IterationStrategy.Config): name: str = "size_proportional" def __init__( self, config: Config, dataloaders: Dict[str, DataLoader], *args, **kwargs ): super().__init__(config, dataloaders, *args, **kwargs) self._per_dataset_lengths = [] self._total_length = 0 for loader in self.dataloaders.values(): # Some loaders might not have dataset attribute # set, in this case we need to fail gracefully as we can't # calculate lengths. assert hasattr(loader, "dataset"), ( "loaders need dataset objects to work with " + "'size_proportional' sampling" ) dataset_instance = loader.dataset assert hasattr(dataset_instance, "__len__"), ( "all datasets should have __len__ defined " + "to work with proportional sampling iterator" ) dataset_instance_length = len(dataset_instance) assert ( dataset_instance_length ), f"dataset: {dataset_instance.dataset_type} is empty" self._per_dataset_lengths.append(dataset_instance_length) self._total_length += dataset_instance_length self._dataset_probabilities = self._per_dataset_lengths[:] self._dataset_probabilities = [ prob / self._total_length for prob in self._dataset_probabilities ] def __call__(self, *args, **kwargs): choice = np.random.choice( len(self.dataloaders), 1, p=self._dataset_probabilities )[0] return choice @property def should_exhaust_all_iterators(self): return True @registry.register_iteration_strategy("ratios") class RatiosIterationStrategy(IterationStrategy): """ Samples based on ratios specified as `sampling_ratios` parameter in the config. Default to validation/test strategy as in RoundRobin. `sampling_ratios` defines a dictionary pointing from dataset key to a floating ration specifying how much the dataset should be sampled. Floats together should sum to one. `datasets` is a list of datasets that would be sampled. This should a subset or same as `sampling_ratios.keys()`. """ @dataclass class Config(IterationStrategy.Config): name: str = "ratios" sampling_ratios: Dict[str, float] = MISSING def __init__( self, config: Config, dataloaders: Dict[str, DataLoader], *args, **kwargs ): super().__init__(config, dataloaders, *args, **kwargs) self._check_not_epoch_training() given_datasets = self._get_given_datasets() sampling_ratios = self.config.get("sampling_ratios", {}) probabilities = [] for dataset in given_datasets: assert ( dataset in sampling_ratios ), f"{dataset} must be specified in sampling_ratios param for multitasking" probabilities.append(sampling_ratios[dataset]) # normalize the sampling ratios to sum up to 1 prob_sum = sum(probabilities) assert all(prob >= 0 for prob in probabilities) and prob_sum > 0, ( "sampling_ratios param for multitasking must be all non-negative " "and at least one of them needs to be positive." ) self._dataset_probabilities = [prob / prob_sum for prob in probabilities] logger.info("Using per-dataset sampling probabilities:") for dataset, prob in zip(given_datasets, self._dataset_probabilities): logger.info(f"\t{dataset}: {prob}") def __call__(self, *args, **kwargs): choice = np.random.choice( len(self.dataloaders), 1, p=self._dataset_probabilities )[0] return choice def _get_given_datasets(self): config = registry.get("config") datasets = None if config is not None and "datasets" not in config: datasets = dataset_list_from_config(config) if datasets is None or len(datasets) == 0: warnings.warn( "Either 'datasets' key not in global config or is a empty list. " + "Moving forward with dataset list same as sampling ratios" ) return list(self.config.get("sampling_ratios", {}).keys()) else: return datasets
EXA-1-master
exa/models/mmf-main/mmf/datasets/iteration_strategies.py
# Copyright (c) Facebook, Inc. and its affiliates. from . import processors from .base_dataset import BaseDataset from .base_dataset_builder import BaseDatasetBuilder from .concat_dataset import ConcatDataset from .lightning_multi_datamodule import LightningMultiDataModule from .lightning_multi_dataset_loader import LightningMultiDataLoader from .mmf_dataset import MMFDataset from .mmf_dataset_builder import MMFDatasetBuilder from .multi_dataset_loader import MultiDatasetLoader __all__ = [ "processors", "BaseDataset", "BaseDatasetBuilder", "ConcatDataset", "MultiDatasetLoader", "MMFDataset", "MMFDatasetBuilder", "LightningMultiDataModule", "LightningMultiDataLoader", ]
EXA-1-master
exa/models/mmf-main/mmf/datasets/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. from torch.utils.data.dataset import Subset class MMFSubset(Subset): def __init__(self, dataset, indices): super().__init__(dataset, indices) self._dir_representation = dir(self) def __getattr__(self, name): if "_dir_representation" in self.__dict__ and name in self._dir_representation: return getattr(self, name) elif "dataset" in self.__dict__ and hasattr(self.dataset, name): return getattr(self.dataset, name) else: raise AttributeError(name)
EXA-1-master
exa/models/mmf-main/mmf/datasets/subset_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. import logging import warnings from typing import Dict, List, Optional import pytorch_lightning as pl from mmf.common.sample import SampleList from mmf.common.test_reporter import TestReporter from mmf.datasets.iteration_strategies import IterationStrategy from mmf.datasets.multi_dataset_loader import MultiDataLoader from mmf.utils.build import ( build_iteration_strategy, build_multiple_datamodules, build_test_reporter, ) from mmf.utils.dataset import dataset_list_from_config from mmf.utils.general import get_batch_size from omegaconf import DictConfig, OmegaConf from torch.utils.data import DataLoader logger = logging.getLogger(__name__) class MultiDataModule(pl.LightningDataModule): def __init__(self, config: DictConfig): super().__init__() self.config = config self.batch_size = get_batch_size() self.dataset_list: List[str] = dataset_list_from_config(self.config) self.datamodules: Dict[ str, pl.LightningDataModule ] = build_multiple_datamodules(self.dataset_list, self.config.dataset_config) self.train_loader: Optional[MultiDataLoader] = None self.val_loader: Optional[MultiDataLoader] = None self.test_loader: Optional[MultiDataLoader] = None def train_dataloader(self) -> MultiDataLoader: self.train_loader = self._build_multi_dataloader("train") return self.train_loader def val_dataloader(self) -> MultiDataLoader: self.val_loader = self._build_multi_dataloader("val") return self.val_loader def test_dataloader(self) -> MultiDataLoader: self.test_loader = self._build_multi_dataloader("test") return self.test_loader def _build_iteration_strategy( self, config: DictConfig, dataloaders: Dict[str, DataLoader] ) -> IterationStrategy: disabled = OmegaConf.create({"enabled": False}) if len(self.dataset_list) == 1: logger.info("Multitasking disabled by default for single dataset training") multitasking_config = disabled elif "multitasking" in self.config: multitasking_config = self.config.multitasking else: warnings.warn( "'multitasking' config not defined. Disabling any form of multitasking" ) multitasking_config = disabled return build_iteration_strategy(multitasking_config, dataloaders) def _build_multi_dataloader(self, dataset_type: "str" = "train") -> MultiDataLoader: loader_args = {} for key, datamodule in self.datamodules.items(): loader_args[key] = getattr(datamodule, f"{dataset_type}_dataloader")() if not hasattr(loader_args[key], "dataset"): loader_args[key].dataset = getattr( datamodule, f"{dataset_type}_dataset" ) iteration_strategy = self._build_iteration_strategy(self.config, loader_args) loader = MultiDataLoader(loader_args, iteration_strategy) return loader def teardown(self, *args, **kwargs): for _, datamodule in self.datamodules.items(): if hasattr(datamodule, "teardown"): datamodule.teardown() ############################################################ ######## Functions below are required for MMFTrainer ####### ######## and not used by the PL Trainer ####### ############################################################ def get_test_reporter(self, dataset_type: str) -> TestReporter: test_reporter_config = self._get_test_reporter_config() return build_test_reporter(self.datamodules, test_reporter_config, dataset_type) def _get_test_reporter_config(self): from mmf.utils.configuration import get_global_config return get_global_config("evaluation.reporter") def prepare_batch(self, batch, *args, **kwargs): batch = SampleList(batch) loader = self.get_loader(batch.dataset_type) return loader.prepare_batch(batch) def get_loader(self, dataset_type: str) -> MultiDataLoader: return getattr(self, f"{dataset_type}_loader") def seed_sampler(self, dataset_type: "str", seed: int): loader = self.get_loader(dataset_type) loader.seed_sampler(seed)
EXA-1-master
exa/models/mmf-main/mmf/datasets/multi_datamodule.py
import collections import os import typing import warnings from copy import deepcopy import mmf.utils.download as download import torch from mmf.datasets.base_dataset_builder import BaseDatasetBuilder from mmf.datasets.concat_dataset import MMFConcatDataset from mmf.datasets.subset_dataset import MMFSubset from mmf.utils.configuration import get_global_config, get_mmf_env, get_zoo_config from mmf.utils.general import get_absolute_path from omegaconf import open_dict class MMFDatasetBuilder(BaseDatasetBuilder): ZOO_CONFIG_PATH = None ZOO_VARIATION = None def __init__( self, dataset_name, dataset_class=None, zoo_variation="defaults", *args, **kwargs, ): super().__init__(dataset_name) self.dataset_class = dataset_class self.zoo_type = "datasets" self.zoo_variation = zoo_variation @property def dataset_class(self): return self._dataset_class @dataset_class.setter def dataset_class(self, dataset_class): self._dataset_class = dataset_class @property def zoo_variation(self): return self._zoo_variation @zoo_variation.setter def zoo_variation(self, zoo_variation): self._zoo_variation = zoo_variation @property def zoo_config_path(self): if self.ZOO_CONFIG_PATH is None: self.ZOO_CONFIG_PATH = get_global_config("env.dataset_zoo") return self.ZOO_CONFIG_PATH @zoo_config_path.setter def zoo_config_path(self, zoo_config_path): self.ZOO_CONFIG_PATH = zoo_config_path def set_dataset_class(self, dataset_cls): self.dataset_class = dataset_cls def build(self, config, dataset_type="train", *args, **kwargs): self.config = config requirements = config.get("zoo_requirements", []) if len(requirements) == 0: # If nothing is specified, build the default requirement self._download_requirement(config, self.dataset_name, self.zoo_variation) else: # Else build all of the requirements one by one # Default must also be specified in these requirements if needed for requirement in requirements: self._download_requirement(config, requirement) def _download_requirement( self, config, requirement_key, requirement_variation="defaults" ): version, resources = get_zoo_config( requirement_key, requirement_variation, self.zoo_config_path, self.zoo_type ) if resources is None: return requirement_split = requirement_key.split(".") dataset_name = requirement_split[0] # The dataset variation has been directly passed in the key so use it instead if len(requirement_split) >= 2: dataset_variation = requirement_split[1] else: dataset_variation = requirement_variation # We want to use root env data_dir so that we don't mix up our download # root dir with the dataset ones download_path = os.path.join( get_mmf_env("data_dir"), "datasets", dataset_name, dataset_variation ) download_path = get_absolute_path(download_path) if not isinstance(resources, collections.abc.Mapping): self._download_resources(resources, download_path, version) else: use_features = config.get("use_features", False) use_images = config.get("use_images", False) if use_features: self._download_based_on_attribute( resources, download_path, version, "features" ) if use_images: self._download_based_on_attribute( resources, download_path, version, "images" ) self._download_based_on_attribute( resources, download_path, version, "annotations" ) self._download_resources( resources.get("extras", []), download_path, version ) def load(self, config, dataset_type, *args, **kwargs): self.config = config split_dataset_from_train = self.config.get("split_train", False) if split_dataset_from_train: config = self._modify_dataset_config_for_split(config) annotations = self._read_annotations(config, dataset_type) if annotations is None: return None datasets = [] for imdb_idx in range(len(annotations)): dataset_class = self.dataset_class dataset = dataset_class(config, dataset_type, imdb_idx) datasets.append(dataset) dataset = MMFConcatDataset(datasets) if split_dataset_from_train: dataset = self._split_dataset_from_train(dataset, dataset_type) self.dataset = dataset return self.dataset def _split_dataset_from_train(self, dataset, dataset_type): if dataset_type in self.config.split_train.keys() or dataset_type == "train": start, end = self._calculate_split_for_dataset_type(dataset_type) dataset_length = len(dataset) start, end = round(start * dataset_length), round(end * dataset_length) if start > end: raise ValueError( f"Train split ratio for {dataset_type} must be positive." ) indices = self._generate_permuted_indexes(dataset_length)[start:end] dataset = MMFSubset(dataset, indices) print( f"Dataset type: {dataset_type} length: {len(dataset)} total: {dataset_length}" ) return dataset def _generate_permuted_indexes(self, dataset_length): generator = torch.Generator() generator.manual_seed(self.config.get("split_train.seed", 123456)) return torch.randperm(dataset_length, generator=generator) def _modify_dataset_config_for_split(self, config): with open_dict(config): for data_type in config.split_train: if data_type == "seed": continue if config.use_images: config.images[data_type] = deepcopy(config.images.train) if config.use_features: config.features[data_type] = deepcopy(config.features.train) config.annotations[data_type] = deepcopy(config.annotations.train) return config def _read_annotations(self, config, dataset_type): annotations = config.get("annotations", {}).get(dataset_type, []) # User can pass a single string as well if isinstance(annotations, str): annotations = [annotations] if len(annotations) == 0: warnings.warn( "Dataset type {} is not present or empty in " + "annotations of dataset config or either annotations " + "key is not present. Returning None. " + "This dataset won't be used.".format(dataset_type) ) return None return annotations def _calculate_split_for_dataset_type(self, dataset_type): start = 0.0 for data_type in self.config.split_train: if data_type == "seed": continue if dataset_type == data_type: return (start, start + self.config.split_train[data_type]) start += self.config.split_train[data_type] if start > 1.0: raise ValueError( "Ratios of val plus test should not exceed 100%." + " Need to leave some percentage for training." ) elif start == 1.0: warnings.warn("All data in training set is used for val and/or test.") if dataset_type == "train": return (start, 1.0) def _download_based_on_attribute( self, resources, download_path, version, attribute ): path = os.path.join(download_path, attribute) self._download_resources(resources.get(attribute, []), path, version) def _download_resources(self, resources, path, version): download.download_resources(resources, path, version)
EXA-1-master
exa/models/mmf-main/mmf/datasets/mmf_dataset_builder.py
# Copyright (c) Facebook, Inc. and its affiliates. import collections import os import torch import torchvision import torchvision.datasets.folder as tv_helpers from mmf.utils.file_io import PathManager from mmf.utils.general import get_absolute_path from PIL import Image def get_possible_image_paths(path): image_path = path.split(".") # Image path might contain file extension (e.g. .jpg), # In this case, we want the path without the extension image_path = image_path if len(image_path) == 1 else image_path[:-1] for ext in tv_helpers.IMG_EXTENSIONS: image_ext = ".".join(image_path) + ext if PathManager.isfile(image_ext): path = image_ext break return path def default_loader(path): with PathManager.open(path, "rb") as f: img = Image.open(f) return img.convert("RGB") class ImageDatabase(torch.utils.data.Dataset): """ImageDatabase can be used to load images in MMF. This goes either in conjunction with AnnotationDatabase or can be separately used with function such as `from_path`. MMFDataset initializes its own copy of ImageDatabase if `use_images` is True. Rest everything works same as a normal torch Dataset if you pass the annotation_db as a parameter. For example for item 1 from annotation db, you can pass same id to ImageDatabase to loads its image. If you don't pass it, you have two options. Either use .get which takes in an annotation db item or .from_path which directly takes in an image path. You are free to use your own dataset instead of image database or free to update or ignore MMFDataset's ImageDataset initialization. You can either reinitialize with transform and other params or use any of torchvision's datasets. """ def __init__( self, config, path, annotation_db=None, transform=None, loader=default_loader, is_valid_file=None, image_key=None, *args, **kwargs, ): """Initialize an instance of ImageDatabase Args: torch ([type]): [description] config (DictConfig): Config object from dataset_config path (str): Path to images folder annotation_db (AnnotationDB, optional): Annotation DB to be used to be figure out image paths. Defaults to None. transform (callable, optional): Transform to be called upon loaded image. Defaults to None. loader (callable, optional): Custom loader for image which given a path returns a PIL Image. Defaults to torchvision's default loader. is_valid_file (callable, optional): Custom callable to filter out invalid files. If image is invalid, {"images": []} will returned which you can filter out in your dataset. Defaults to None. image_key (str, optional): Key that points to image path in annotation db. If not specified, ImageDatabase will make some intelligent guesses about the possible key. Defaults to None. """ super().__init__() self.config = config self.base_path = get_absolute_path(path) self.transform = transform self.annotation_db = annotation_db self.loader = loader self.image_key = config.get("image_key", None) self.image_key = image_key if image_key else self.image_key self.is_valid_file = is_valid_file @property def annotation_db(self): return self._annotation_db @annotation_db.setter def annotation_db(self, annotation_db): self._annotation_db = annotation_db @property def transform(self): return self._transform @transform.setter def transform(self, transform): if isinstance(transform, collections.abc.MutableSequence): transform = torchvision.Compose(transform) self._transform = transform def __len__(self): self._check_annotation_db_present() return len(self.annotation_db) def __getitem__(self, idx): self._check_annotation_db_present() item = self.annotation_db[idx] return self.get(item) def _check_annotation_db_present(self): if not self.annotation_db: raise AttributeError( "'annotation_db' must be set for the database to use __getitem__." + " Use image_database.annotation_db to set it." ) def get(self, item): possible_images = self._get_attrs(item) return self.from_path(possible_images) def from_path(self, paths, use_transforms=True): if isinstance(paths, str): paths = [paths] assert isinstance( paths, collections.abc.Iterable ), "Path needs to a string or an iterable" loaded_images = [] for image in paths: image = os.path.join(self.base_path, image) path = get_possible_image_paths(image) valid = self.is_valid_file(path) if self.is_valid_file is not None else True if not valid: continue if not path: # Create the full path without extension so it can be printed # for the error possible_path = ".".join(image.split(".")[:-1]) raise RuntimeError( "Image not found at path {}.{{jpeg|jpg|svg|png}}.".format( possible_path ) ) image = self.open_image(path) if self.transform and use_transforms: image = self.transform(image) loaded_images.append(image) return {"images": loaded_images} def open_image(self, path): return self.loader(path) def _get_attrs(self, item): """Returns possible attribute that can point to image id Args: item (Object): Object from the DB Returns: List[str]: List of possible images that will be copied later """ if self.image_key: image = item[self.image_key] if isinstance(image, str): image = [image] return image image = None pick = None attrs = self._get_possible_attrs() for attr in attrs: image = item.get(attr, None) if image is not None: pick = attr break if pick == "identifier" and "left_url" in item and "right_url" in item: return [image + "-img0", image + "-img1"] else: return [image] def _get_possible_attrs(self): return [ "Flickr30kID", "Flikr30kID", "identifier", "image_path", "image_name", "img", "image_id", ]
EXA-1-master
exa/models/mmf-main/mmf/datasets/databases/image_database.py
# Copyright (c) Facebook, Inc. and its affiliates. import logging from multiprocessing.pool import ThreadPool import tqdm from mmf.datasets.databases.image_database import ImageDatabase from mmf.datasets.databases.readers.feature_readers import FeatureReader from mmf.utils.distributed import is_main from mmf.utils.general import get_absolute_path logger = logging.getLogger(__name__) class FeaturesDatabase(ImageDatabase): def __init__( self, config, path, annotation_db=None, feature_key=None, *args, **kwargs ): super().__init__(config, path, annotation_db, *args, **kwargs) self.feature_readers = [] self.feature_dict = {} self.feature_key = config.get("feature_key", "feature_path") self.feature_key = feature_key if feature_key else self.feature_key self._fast_read = config.get("fast_read", False) path = path.split(",") for image_feature_dir in path: feature_reader = FeatureReader( base_path=get_absolute_path(image_feature_dir), depth_first=config.get("depth_first", False), max_features=config.get("max_features", 100), ) self.feature_readers.append(feature_reader) self.paths = path self.annotation_db = annotation_db self._should_return_info = config.get("return_features_info", True) if self._fast_read: path = ", ".join(path) logger.info(f"Fast reading features from {path}") logger.info("Hold tight, this may take a while...") self._threaded_read() def _threaded_read(self): elements = [idx for idx in range(1, len(self.annotation_db))] pool = ThreadPool(processes=4) with tqdm.tqdm(total=len(elements), disable=not is_main()) as pbar: for i, _ in enumerate(pool.imap_unordered(self._fill_cache, elements)): if i % 100 == 0: pbar.update(100) pool.close() def _fill_cache(self, idx): feat_file = self.annotation_db[idx]["feature_path"] features, info = self._read_features_and_info(feat_file) self.feature_dict[feat_file] = (features, info) def _read_features_and_info(self, feat_file): features = [] infos = [] for feature_reader in self.feature_readers: feature, info = feature_reader.read(feat_file) # feature = torch.from_numpy(feature).share_memory_() features.append(feature) infos.append(info) if not self._should_return_info: infos = None return features, infos def _get_image_features_and_info(self, feat_file): assert isinstance(feat_file, str) image_feats, infos = self.feature_dict.get(feat_file, (None, None)) if image_feats is None: image_feats, infos = self._read_features_and_info(feat_file) return image_feats, infos def __len__(self): self._check_annotation_db_present() return len(self.annotation_db) def __getitem__(self, idx): self._check_annotation_db_present() image_info = self.annotation_db[idx] return self.get(image_info) def get(self, item): feature_path = item.get(self.feature_key, None) if feature_path is None: feature_path = self._get_feature_path_based_on_image(item) return self.from_path(feature_path) def from_path(self, path): assert isinstance(path, str) if "genome" in path and path.endswith(".npy"): path = str(int(path.split("_")[-1].split(".")[0])) + ".npy" features, infos = self._get_image_features_and_info(path) item = {} for idx, image_feature in enumerate(features): item["image_feature_%s" % idx] = image_feature if infos is not None: # infos[idx].pop("cls_prob", None) item["image_info_%s" % idx] = infos[idx] return item def _get_feature_path_based_on_image(self, item): image_path = self._get_attrs(item)[0] if isinstance(image_path, int): return f"{image_path}.npy" feature_path = ".".join(image_path.split(".")[:-1]) + ".npy" return feature_path
EXA-1-master
exa/models/mmf-main/mmf/datasets/databases/features_database.py
# Copyright (c) Facebook, Inc. and its affiliates. import mmf.datasets.databases.readers # noqa from .annotation_database import AnnotationDatabase from .features_database import FeaturesDatabase from .image_database import ImageDatabase from .scene_graph_database import SceneGraphDatabase __all__ = [ "AnnotationDatabase", "FeaturesDatabase", "ImageDatabase", "SceneGraphDatabase", ]
EXA-1-master
exa/models/mmf-main/mmf/datasets/databases/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. from mmf.datasets.databases.annotation_database import AnnotationDatabase class SceneGraphDatabase(AnnotationDatabase): def __init__(self, config, scene_graph_path, *args, **kwargs): super().__init__(config, scene_graph_path, *args, **kwargs) self.data_dict = {} for item in self.data: self.data_dict[item["image_id"]] = item def __getitem__(self, idx): return self.data_dict[idx]
EXA-1-master
exa/models/mmf-main/mmf/datasets/databases/scene_graph_database.py
# Copyright (c) Facebook, Inc. and its affiliates. import json import numpy as np import torch from mmf.utils.file_io import PathManager from mmf.utils.general import get_absolute_path class AnnotationDatabase(torch.utils.data.Dataset): """ Dataset for Annotations used in MMF TODO: Update on docs sprint """ def __init__(self, config, path, *args, **kwargs): super().__init__() self.metadata = {} self.config = config self.start_idx = 0 path = get_absolute_path(path) self.load_annotation_db(path) def load_annotation_db(self, path): if path.find("visdial") != -1 or path.find("visual_dialog") != -1: self._load_visual_dialog(path) elif path.endswith(".npy"): self._load_npy(path) elif path.endswith(".jsonl"): self._load_jsonl(path) elif path.endswith(".json"): self._load_json(path) else: raise ValueError("Unknown file format for annotation db") def _load_jsonl(self, path): with PathManager.open(path, "r") as f: db = f.readlines() for idx, line in enumerate(db): db[idx] = json.loads(line.strip("\n")) self.data = db self.start_idx = 0 def _load_npy(self, path): with PathManager.open(path, "rb") as f: self.db = np.load(f, allow_pickle=True) self.start_idx = 0 if type(self.db) == dict: self.metadata = self.db.get("metadata", {}) self.data = self.db.get("data", []) else: # TODO: Deprecate support for this self.metadata = {"version": 1} self.data = self.db # Handle old imdb support if "image_id" not in self.data[0]: self.start_idx = 1 if len(self.data) == 0: self.data = self.db def _load_json(self, path): with PathManager.open(path, "r") as f: data = json.load(f) self.metadata = data.get("metadata", {}) self.data = data.get("data", []) if len(self.data) == 0: raise RuntimeError("Dataset is empty") def _load_visual_dialog(self, path): from mmf.datasets.builders.visual_dialog.database import VisualDialogDatabase self.data = VisualDialogDatabase(path) self.metadata = self.data.metadata self.start_idx = 0 def __len__(self): return len(self.data) - self.start_idx def __getitem__(self, idx): data = self.data[idx + self.start_idx] # Hacks for older IMDBs if "answers" not in data: if "all_answers" in data and "valid_answers" not in data: data["answers"] = data["all_answers"] if "valid_answers" in data: data["answers"] = data["valid_answers"] # TODO: Clean up VizWiz IMDB from copy tokens if "answers" in data and data["answers"][-1] == "<copy>": data["answers"] = data["answers"][:-1] return data def get_version(self): return self.metadata.get("version", None)
EXA-1-master
exa/models/mmf-main/mmf/datasets/databases/annotation_database.py
# Copyright (c) Facebook, Inc. and its affiliates.
EXA-1-master
exa/models/mmf-main/mmf/datasets/databases/readers/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. import math import os import pickle from typing import Any import lmdb import numpy as np import torch from mmf.utils.file_io import PathManager def load_feat(feat_path: str, convert_to_tensor: bool = False) -> Any: with PathManager.open(feat_path, "rb") as f: if feat_path.endswith("npy"): feat = np.load(f, allow_pickle=True) if convert_to_tensor: feat = torch.from_numpy(feat) elif feat_path.endswith("pth"): feat = torch.load(f, map_location=torch.device("cpu")) else: raise AssertionError("Unknown feature type") return feat class FeatureReader: def __init__(self, base_path, depth_first, max_features=None): """Feature Reader class for reading features. Note: Deprecation: ndim and image_feature will be deprecated later and the format will be standardize using features from detectron. Parameters ---------- ndim : int Number of expected dimensions in features depth_first : bool CHW vs HWC max_features : int Number of maximum bboxes to keep Returns ------- type Description of returned object. """ self.base_path = base_path ndim = None self.feat_reader = None self.depth_first = depth_first self.max_features = max_features self.ndim = ndim def _init_reader(self): # Currently all lmdb features are with ndim == 2 if self.base_path.endswith(".lmdb"): self.feat_reader = LMDBFeatureReader(self.max_features, self.base_path) elif self.ndim == 2 or self.ndim == 0: if self.max_features is None: self.feat_reader = FasterRCNNFeatureReader() else: # TODO: Fix later when we move to proper standardized features # if isinstance(self.image_feature.item(0), dict): # self.feat_reader = \ # PaddedFeatureRCNNWithBBoxesFeatureReader( # self.max_features # ) # else: self.feat_reader = PaddedFasterRCNNFeatureReader(self.max_features) elif self.ndim == 3 and not self.depth_first: self.feat_reader = Dim3FeatureReader() elif self.ndim == 4 and self.depth_first: self.feat_reader = CHWFeatureReader(self.max_features) elif self.ndim == 4 and not self.depth_first: self.feat_reader = HWCFeatureReader() else: raise TypeError("unknown image feature format") def read(self, image_feat_path): if not image_feat_path.endswith("npy") and not image_feat_path.endswith("pth"): return None image_feat_path = os.path.join(self.base_path, image_feat_path) if self.feat_reader is None: # Currently all lmdb features are with ndim == 2 so we are # avoiding loading the lmdb to determine feature ndim if not self.base_path.endswith(".lmdb") and self.ndim is None: feat = load_feat(image_feat_path) self.ndim = feat.ndim self._init_reader() return self.feat_reader.read(image_feat_path) class FasterRCNNFeatureReader: def read(self, image_feat_path): feat = load_feat(image_feat_path, convert_to_tensor=True) return feat, None class CHWFeatureReader: def __init__(self, max_features=None): self.max_features = max_features if self.max_features: patch_dim = math.ceil(math.sqrt(self.max_features)) self.img_h = patch_dim self.img_w = patch_dim def read(self, image_feat_path): feat = load_feat(image_feat_path, convert_to_tensor=True) assert feat.shape[0] == 1, "batch is not 1" b, c, h, w = feat.shape if self.max_features: padded_feat = torch.zeros((b, c, self.img_h, self.img_w), dtype=torch.float) padded_feat[:, :, :h, :w] = feat feat = padded_feat feat = feat.squeeze(0) return feat, None class Dim3FeatureReader: def read(self, image_feat_path): tmp = load_feat(image_feat_path) _, _, c_dim = tmp.shape image_feature = torch.from_numpy(np.reshape(tmp, (-1, c_dim))) return image_feature, None class HWCFeatureReader: def read(self, image_feat_path): tmp = load_feat(image_feat_path) assert tmp.shape[0] == 1, "batch is not 1" _, _, _, c_dim = tmp.shape image_feature = torch.from_numpy(np.reshape(tmp, (-1, c_dim))) return image_feature, None class PaddedFasterRCNNFeatureReader: def __init__(self, max_loc): self.max_loc = max_loc self.first = True self.take_item = False def _load(self, image_feat_path): image_info = {} image_info["features"] = load_feat(image_feat_path) info_path = "{}_info.npy".format(image_feat_path.split(".npy")[0]) if PathManager.exists(info_path): image_info.update(load_feat(info_path).item()) return image_info def read(self, image_feat_path): image_info = self._load(image_feat_path) if self.first: self.first = False if ( image_info["features"].size == 1 and "image_feat" in image_info["features"].item() ): self.take_item = True image_feature = image_info["features"] if self.take_item: item = image_info["features"].item() if "image_text" in item: image_info["image_text"] = item["image_text"] image_info["is_ocr"] = item["image_bbox_source"] image_feature = item["image_feat"] if "info" in item: if "image_text" in item["info"]: image_info.update(item["info"]) image_feature = item["feature"] # Handle case of features with class probs if ( image_info["features"].size == 1 and "features" in image_info["features"].item() ): item = image_info["features"].item() image_feature = item["features"] image_info["image_height"] = item["image_height"] image_info["image_width"] = item["image_width"] # Resize these to self.max_loc image_loc, _ = image_feature.shape image_info["cls_prob"] = np.zeros( (self.max_loc, item["cls_prob"].shape[1]), dtype=np.float32 ) image_info["cls_prob"][0:image_loc] = item["cls_prob"][: self.max_loc, :] image_info["bbox"] = np.zeros( (self.max_loc, item["bbox"].shape[1]), dtype=np.float32 ) image_info["bbox"][0:image_loc] = item["bbox"][: self.max_loc, :] image_info["num_boxes"] = item["num_boxes"] # Handle the case of ResNet152 features if len(image_feature.shape) > 2: shape = image_feature.shape image_feature = image_feature.reshape(-1, shape[-1]) image_loc, image_dim = image_feature.shape tmp_image_feat = np.zeros((self.max_loc, image_dim), dtype=np.float32) tmp_image_feat[0:image_loc] = image_feature[: self.max_loc, :] # noqa image_feature = torch.from_numpy(tmp_image_feat) del image_info["features"] image_info["max_features"] = torch.tensor(image_loc, dtype=torch.long) return image_feature, image_info class LMDBFeatureReader(PaddedFasterRCNNFeatureReader): def __init__(self, max_loc, base_path): super().__init__(max_loc) self.db_path = base_path if not PathManager.exists(self.db_path): raise RuntimeError( "{} path specified for LMDB features doesn't exists.".format( self.db_path ) ) self.env = None def _init_db(self): self.env = lmdb.open( self.db_path, subdir=os.path.isdir(self.db_path), readonly=True, lock=False, readahead=False, meminit=False, ) with self.env.begin(write=False, buffers=True) as txn: self.image_ids = pickle.loads(txn.get(b"keys")) self.image_id_indices = { self.image_ids[i]: i for i in range(0, len(self.image_ids)) } def _load(self, image_file_path): if self.env is None: self._init_db() split = os.path.relpath(image_file_path, self.db_path).split(".npy")[0] try: image_id = int(split.split("_")[-1]) # Try fetching to see if it actually exists otherwise fall back to # default img_id_idx = self.image_id_indices[str(image_id).encode()] except (ValueError, KeyError): # The image id is complex or involves folder, use it directly image_id = str(split).encode() img_id_idx = self.image_id_indices[image_id] with self.env.begin(write=False, buffers=True) as txn: image_info = pickle.loads(txn.get(self.image_ids[img_id_idx])) return image_info class PaddedFeatureRCNNWithBBoxesFeatureReader: def __init__(self, max_loc): self.max_loc = max_loc def read(self, image_feat_path): image_feat_bbox = load_feat(image_feat_path) image_boxes = image_feat_bbox.item().get("image_bboxes") tmp_image_feat = image_feat_bbox.item().get("image_feature") image_loc, image_dim = tmp_image_feat.shape tmp_image_feat_2 = np.zeros((self.max_loc, image_dim), dtype=np.float32) tmp_image_feat_2[ 0:image_loc, ] = tmp_image_feat # noqa tmp_image_feat_2 = torch.from_numpy(tmp_image_feat_2) tmp_image_box = np.zeros((self.max_loc, 4), dtype=np.int32) tmp_image_box[0:image_loc] = image_boxes tmp_image_box = torch.from_numpy(tmp_image_box) image_info = { "image_bbox": tmp_image_box, "max_features": torch.tensor(image_loc, dtype=torch.int), } return tmp_image_feat_2, image_info
EXA-1-master
exa/models/mmf-main/mmf/datasets/databases/readers/feature_readers.py
# Copyright (c) Facebook, Inc. and its affiliates.
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. from mmf.common.registry import registry from mmf.datasets.builders.coco import MaskedCOCOBuilder from .masked_dataset import MaskedConceptualCaptionsDataset @registry.register_builder("masked_conceptual_captions") class MaskedConceptualCaptionsBuilder(MaskedCOCOBuilder): def __init__(self): super().__init__() self.dataset_name = "masked_conceptual_captions" self.set_dataset_class(MaskedConceptualCaptionsDataset) @classmethod def config_path(cls): return "configs/datasets/conceptual_captions/masked.yaml"
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/conceptual_captions/masked_builder.py
# Copyright (c) Facebook, Inc. and its affiliates. from mmf.datasets.builders.coco import MaskedCOCODataset class MaskedConceptualCaptionsDataset(MaskedCOCODataset): def __init__(self, config, dataset_type, imdb_file_index, *args, **kwargs): super().__init__(config, dataset_type, imdb_file_index, *args, **kwargs) self.dataset_name = "masked_conceptual_captions" self._two_sentence = config.get("two_sentence", True) self._false_caption = config.get("false_caption", True) self._two_sentence_probability = config.get("two_sentence_probability", 0.5) self._false_caption_probability = config.get("false_caption_probability", 0.5)
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/conceptual_captions/masked_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. __all__ = [ "ConceptualCaptionsBuilder", "ConceptualCaptionsDataset", "MaskedConceptualCaptionsBuilder", "MaskedConceptualCaptionsDataset", ] from .builder import ConceptualCaptionsBuilder from .dataset import ConceptualCaptionsDataset from .masked_builder import MaskedConceptualCaptionsBuilder from .masked_dataset import MaskedConceptualCaptionsDataset
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/conceptual_captions/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. from mmf.common.registry import registry from mmf.datasets.builders.coco import COCOBuilder from .dataset import ConceptualCaptionsDataset @registry.register_builder("conceptual_captions") class ConceptualCaptionsBuilder(COCOBuilder): def __init__(self): super().__init__() self.dataset_name = "conceptual_captions" self.set_dataset_class(ConceptualCaptionsDataset) @classmethod def config_path(cls): return "configs/datasets/conceptual_captions/defaults.yaml"
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/conceptual_captions/builder.py
# Copyright (c) Facebook, Inc. and its affiliates. import torch from mmf.common.sample import Sample from mmf.datasets.builders.coco import COCODataset class ConceptualCaptionsDataset(COCODataset): def __init__(self, config, dataset_type, imdb_file_index, *args, **kwargs): super().__init__(config, dataset_type, imdb_file_index, *args, **kwargs) self.dataset_name = "conceptual_captions" def load_item(self, idx): sample_info = self.annotation_db[idx] current_sample = Sample() processed_caption = self.text_processor({"text": sample_info["captions"][0]}) current_sample.text = processed_caption["text"] current_sample.caption_len = torch.tensor( len(processed_caption["text"]), dtype=torch.int ) if isinstance(sample_info["image_id"], int): current_sample.image_id = torch.tensor( sample_info["image_id"], dtype=torch.int ) else: current_sample.image_id = sample_info["image_id"] if self._use_features is True: features = self.features_db[idx] current_sample.update(features) current_sample.answers = torch.stack([processed_caption["text"]]) return current_sample
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/conceptual_captions/dataset.py
# Copyright (c) Facebook, Inc. and its affiliates.
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/glue/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. import logging import torch from mmf.common.registry import registry from mmf.common.sample import Sample from mmf.datasets.base_dataset import BaseDataset from mmf.datasets.mmf_dataset_builder import MMFDatasetBuilder from mmf.utils.general import retry_n MAX_RETRIES = 9 logger = logging.getLogger() class GLUEDataset(BaseDataset): DATASET_KEY_MAP = { "text_a": { "glue_mnli_mismatched": "premise", "glue_qnli": "question", "glue_sst2": "sentence", "glue_qqp": "question1", }, "text_b": { "glue_mnli_mismatched": "hypothesis", "glue_qnli": "sentence", "glue_sst2": None, "glue_qqp": "question2", }, } def __init__(self, config, dataset_type, imdb_idx): try: from datasets import load_dataset except ModuleNotFoundError: logger.error( "Please install 'datasets' library by running `pip install datasets`." ) raise dataset_name = f"glue_{config.task}" super().__init__(dataset_name, config, dataset_type, imdb_idx) if dataset_type == "val": # datasets library uses validation as val set. dataset_type = "validation" # For MNLI-MM, set train to MNLI-M train task = config.task if config.task.startswith("mnli") and dataset_type == "train": task = "mnli" self.dataset = retry_n( MAX_RETRIES, load_dataset, "glue", task, split=dataset_type ) def __len__(self): return len(self.dataset) def __getitem__(self, idx): annotation = self.dataset[idx] current_sample = Sample() text_processor_input = { "text_a": annotation[self.DATASET_KEY_MAP["text_a"][self.dataset_name]] } text_b = annotation.get(self.DATASET_KEY_MAP["text_b"][self.dataset_name], None) if text_b is not None: text_processor_input["text_b"] = text_b current_sample.update(self.text_processor(text_processor_input)) current_sample.targets = torch.tensor(annotation["label"], dtype=torch.long) return current_sample @registry.register_builder("glue_sst2") @registry.register_builder("glue_mnli_mismatched") @registry.register_builder("glue_qqp") @registry.register_builder("glue_qnli") class GLUEBuilder(MMFDatasetBuilder): def __init__(self, dataset_name="glue", dataset_class=GLUEDataset, *args, **kwargs): super().__init__(dataset_name, dataset_class) self.dataset_name = dataset_name self.set_dataset_class(dataset_class) @classmethod def config_path(cls): return "configs/datasets/glue/defaults.yaml" def build(self, *args, **kwargs): # Will be built automatically by datasets library return def load(self, config, dataset_type, *args, **kwargs): self.dataset_name = f"{self.dataset_name}_{config.task}" return super().load(config, dataset_type)
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/glue/builder.py
# Copyright (c) Facebook, Inc. and its affiliates.
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/visual_entailment/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # from mmf.common.registry import registry from mmf.datasets.builders.visual_entailment.dataset import VisualEntailmentDataset from mmf.datasets.builders.vqa2.builder import VQA2Builder @registry.register_builder("visual_entailment") class VisualEntailmentBuilder(VQA2Builder): def __init__(self): super().__init__() self.dataset_name = "visual_entailment" self.dataset_class = VisualEntailmentDataset @classmethod def config_path(cls): return "configs/datasets/visual_entailment/defaults.yaml"
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/visual_entailment/builder.py
import copy import json import torch from mmf.common.sample import Sample from mmf.datasets.builders.vqa2 import VQA2Dataset LABEL_TO_INT_MAPPING = {"entailment": 0, "neutral": 1, "contradiction": 2} class VisualEntailmentDataset(VQA2Dataset): def __init__(self, config, dataset_type, imdb_file_index, *args, **kwargs): super().__init__( config, dataset_type, imdb_file_index, dataset_name="visual_entailment", *args, **kwargs, ) def load_item(self, idx): sample_info = self.annotation_db[idx] current_sample = Sample() processed_sentence = self.text_processor({"text": sample_info["sentence2"]}) current_sample.text = processed_sentence["text"] if "input_ids" in processed_sentence: current_sample.update(processed_sentence) if self._use_features is True: # Remove sentence id from end identifier = sample_info["Flikr30kID"].split(".")[0] # Load img0 and img1 features sample_info["feature_path"] = "{}.npy".format(identifier) features = self.features_db[idx] if hasattr(self, "transformer_bbox_processor"): features["image_info_0"] = self.transformer_bbox_processor( features["image_info_0"] ) current_sample.update(features) else: image_path = sample_info["Flikr30kID"] current_sample.image = self.image_db.from_path(image_path)["images"][0] label = LABEL_TO_INT_MAPPING[sample_info["gold_label"]] current_sample.targets = torch.tensor(label, dtype=torch.long) return current_sample def format_for_prediction(self, report): return []
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/visual_entailment/dataset.py
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/clevr/__init__.py
import json import logging import math import os import zipfile from collections import Counter from mmf.common.constants import CLEVR_DOWNLOAD_URL from mmf.common.registry import registry from mmf.datasets.base_dataset_builder import BaseDatasetBuilder from mmf.datasets.builders.clevr.dataset import CLEVRDataset from mmf.utils.download import download from mmf.utils.general import get_mmf_root logger = logging.getLogger(__name__) @registry.register_builder("clevr") class CLEVRBuilder(BaseDatasetBuilder): def __init__(self): super().__init__("clevr") self.dataset_class = CLEVRDataset @classmethod def config_path(cls): return "configs/datasets/clevr/defaults.yaml" def build(self, config, dataset_type): download_folder = os.path.join( get_mmf_root(), config.data_dir, config.data_folder ) file_name = CLEVR_DOWNLOAD_URL.split("/")[-1] local_filename = os.path.join(download_folder, file_name) extraction_folder = os.path.join( download_folder, ".".join(file_name.split(".")[:-1]) ) self.data_folder = extraction_folder # Either if the zip file is already present or if there are some # files inside the folder we don't continue download process if os.path.exists(local_filename): logger.info("CLEVR dataset is already present. Skipping download.") return if ( os.path.exists(extraction_folder) and len(os.listdir(extraction_folder)) != 0 ): return logger.info("Downloading the CLEVR dataset now") download(CLEVR_DOWNLOAD_URL, download_folder, CLEVR_DOWNLOAD_URL.split("/")[-1]) logger.info("Downloaded. Extracting now. This can take time.") with zipfile.ZipFile(local_filename, "r") as zip_ref: zip_ref.extractall(download_folder) def load(self, config, dataset_type, *args, **kwargs): self.dataset = CLEVRDataset(config, dataset_type, data_folder=self.data_folder) return self.dataset def update_registry_for_model(self, config): registry.register( self.dataset_name + "_text_vocab_size", self.dataset.text_processor.get_vocab_size(), ) registry.register( self.dataset_name + "_num_final_outputs", self.dataset.answer_processor.get_vocab_size(), )
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/clevr/builder.py
import json import os import numpy as np import torch from mmf.common.sample import Sample from mmf.datasets.base_dataset import BaseDataset from mmf.utils.distributed import is_main, synchronize from mmf.utils.general import get_mmf_root from mmf.utils.text import tokenize, VocabFromText from PIL import Image _CONSTANTS = { "questions_folder": "questions", "dataset_key": "clevr", "empty_folder_error": "CLEVR dataset folder is empty.", "questions_key": "questions", "question_key": "question", "answer_key": "answer", "train_dataset_key": "train", "images_folder": "images", "vocabs_folder": "vocabs", } _TEMPLATES = { "data_folder_missing_error": "Data folder {} for CLEVR is not present.", "question_json_file": "CLEVR_{}_questions.json", "vocab_file_template": "{}_{}_vocab.txt", } class CLEVRDataset(BaseDataset): """Dataset for CLEVR. CLEVR is a reasoning task where given an image with some 3D shapes you have to answer basic questions. Args: dataset_type (str): type of dataset, train|val|test config (DictConfig): Configuration Node representing all of the data necessary to initialize CLEVR dataset class data_folder: Root folder in which all of the data will be present if passed replaces default based on data_dir and data_folder in config. """ def __init__(self, config, dataset_type, data_folder=None, *args, **kwargs): super().__init__(_CONSTANTS["dataset_key"], config, dataset_type) self._data_folder = data_folder self._data_dir = os.path.join(get_mmf_root(), config.data_dir) if not self._data_folder: self._data_folder = os.path.join(self._data_dir, config.data_folder) if not os.path.exists(self._data_folder): raise RuntimeError( _TEMPLATES["data_folder_missing_error"].format(self._data_folder) ) # Check if the folder was actually extracted in the subfolder if config.data_folder in os.listdir(self._data_folder): self._data_folder = os.path.join(self._data_folder, config.data_folder) if len(os.listdir(self._data_folder)) == 0: raise FileNotFoundError(_CONSTANTS["empty_folder_error"]) self.load() def load(self): self.image_path = os.path.join( self._data_folder, _CONSTANTS["images_folder"], self._dataset_type ) with open( os.path.join( self._data_folder, _CONSTANTS["questions_folder"], _TEMPLATES["question_json_file"].format(self._dataset_type), ) ) as f: self.questions = json.load(f)[_CONSTANTS["questions_key"]] # Vocab should only be built in main process, as it will repetition of same task if is_main(): self._build_vocab(self.questions, _CONSTANTS["question_key"]) self._build_vocab(self.questions, _CONSTANTS["answer_key"]) synchronize() def __len__(self): return len(self.questions) def _get_vocab_path(self, attribute): return os.path.join( self._data_dir, _CONSTANTS["vocabs_folder"], _TEMPLATES["vocab_file_template"].format(self.dataset_name, attribute), ) def _build_vocab(self, questions, attribute): # Vocab should only be built from "train" as val and test are not observed in training if self._dataset_type != _CONSTANTS["train_dataset_key"]: return vocab_file = self._get_vocab_path(attribute) # Already exists, no need to recreate if os.path.exists(vocab_file): return # Create necessary dirs if not present os.makedirs(os.path.dirname(vocab_file), exist_ok=True) sentences = [question[attribute] for question in questions] build_attributes = self.config.build_attributes # Regex is default one in tokenize i.e. space kwargs = { "min_count": build_attributes.get("min_count", 1), "keep": build_attributes.get("keep", [";", ","]), "remove": build_attributes.get("remove", ["?", "."]), } if attribute == _CONSTANTS["answer_key"]: kwargs["only_unk_extra"] = False vocab = VocabFromText(sentences, **kwargs) with open(vocab_file, "w") as f: f.write("\n".join(vocab.word_list)) def __getitem__(self, idx): data = self.questions[idx] # Each call to __getitem__ from dataloader returns a Sample class object which # collated by our special batch collator to a SampleList which is basically # a attribute based batch in layman terms current_sample = Sample() question = data["question"] tokens = tokenize(question, keep=[";", ","], remove=["?", "."]) processed = self.text_processor({"tokens": tokens}) current_sample.text = processed["text"] processed = self.answer_processor({"answers": [data["answer"]]}) current_sample.answers = processed["answers"] current_sample.targets = processed["answers_scores"] image_path = os.path.join(self.image_path, data["image_filename"]) image = np.true_divide(Image.open(image_path).convert("RGB"), 255) image = image.astype(np.float32) current_sample.image = torch.from_numpy(image.transpose(2, 0, 1)) return current_sample
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/clevr/dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. from mmf.common.registry import registry from mmf.datasets.builders.visual_genome.builder import VisualGenomeBuilder from mmf.datasets.builders.visual_genome.masked_dataset import MaskedVisualGenomeDataset @registry.register_builder("masked_visual_genome") class MaskedVisualGenomeBuilder(VisualGenomeBuilder): def __init__(self): super().__init__() self.dataset_name = "masked_visual_genome" self.dataset_class = MaskedVisualGenomeDataset @classmethod def config_path(cls): return "configs/datasets/visual_genome/masked.yaml"
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/visual_genome/masked_builder.py
# Copyright (c) Facebook, Inc. and its affiliates. from mmf.common.sample import Sample from mmf.datasets.mmf_dataset import MMFDataset class MaskedVisualGenomeDataset(MMFDataset): def __init__(self, config, dataset_type, imdb_file_index, *args, **kwargs): super().__init__( "masked_visual_genome", config, dataset_type, imdb_file_index, *args, **kwargs, ) self._add_answer = config.get("add_answer", True) def __getitem__(self, idx): sample_info = self.annotation_db[idx] sample_info = self._preprocess_answer(sample_info) sample_info["question_id"] = sample_info["id"] current_sample = Sample() if self._use_features is True: features = self.features_db[idx] if hasattr(self, "transformer_bbox_processor"): features["image_info_0"] = self.transformer_bbox_processor( features["image_info_0"] ) if self.config.get("use_image_feature_masks", False): current_sample.update( { "image_labels": self.masked_region_processor( features["image_feature_0"] ) } ) current_sample.update(features) current_sample = self._add_masked_question(sample_info, current_sample) if self._add_answer: current_sample = self.add_answer_info(sample_info, current_sample) return current_sample def _preprocess_answer(self, sample_info): sample_info["answers"] = [ self.vg_answer_preprocessor( {"text": sample_info["answers"][0]}, remove=["?", ",", ".", "a", "an", "the"], )["text"] ] return sample_info def add_answer_info(self, sample_info, sample): if "answers" in sample_info: answers = sample_info["answers"] answer_processor_arg = {"answers": answers} processed_soft_copy_answers = self.answer_processor(answer_processor_arg) sample.targets = processed_soft_copy_answers["answers_scores"] return sample def _add_masked_question(self, sample_info, current_sample): question = sample_info["question"] processed = self.masked_token_processor( {"text_a": question, "text_b": None, "is_correct": -1} ) processed.pop("tokens") current_sample.update(processed) return current_sample
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/visual_genome/masked_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates.
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/visual_genome/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. import logging import os import shutil from mmf.common.constants import VISUAL_GENOME_CONSTS from mmf.common.registry import registry from mmf.datasets.builders.visual_genome.dataset import VisualGenomeDataset from mmf.datasets.builders.vqa2.builder import VQA2Builder from mmf.utils.download import decompress, download from mmf.utils.general import get_mmf_root logger = logging.getLogger(__name__) @registry.register_builder("visual_genome") class VisualGenomeBuilder(VQA2Builder): def __init__(self): super().__init__() self.dataset_name = "visual_genome" self.dataset_proper_name = "Visual Genome" self.dataset_class = VisualGenomeDataset @classmethod def config_path(cls): return "configs/datasets/visual_genome/defaults.yaml" def build(self, config, dataset_type): self._dataset_type = dataset_type self._config = config data_folder = os.path.join(get_mmf_root(), self._config.data_dir) # Since the imdb tar file contains all of the sets, we won't download them # except in case of train if self._dataset_type != "train": return self._download_and_extract_imdb(data_folder) self._download_and_extract_features(data_folder) def _download_and_extract_imdb(self, data_folder): download_folder = os.path.join(data_folder, "imdb") vocab_folder = os.path.join(data_folder, "vocabs") vocab_file = os.path.join(vocab_folder, VISUAL_GENOME_CONSTS["synset_file"]) os.makedirs(vocab_folder, exist_ok=True) self._download_and_extract( "vocabs", VISUAL_GENOME_CONSTS["vocabs"], data_folder ) extraction_folder = self._download_and_extract( "imdb_url", VISUAL_GENOME_CONSTS["imdb_url"], download_folder ) if not os.path.exists(vocab_file): shutil.move( os.path.join(extraction_folder, VISUAL_GENOME_CONSTS["synset_file"]), vocab_file, ) def _download_and_extract_features(self, data_folder): self._download_and_extract( "features_url", VISUAL_GENOME_CONSTS["features_url"], data_folder ) def _download_and_extract(self, key, url, download_folder): file_type = key.split("_")[0] os.makedirs(download_folder, exist_ok=True) local_filename = url.split("/")[-1] extraction_folder = os.path.join(download_folder, local_filename.split(".")[0]) local_filename = os.path.join(download_folder, local_filename) if ( os.path.exists(local_filename) or ( os.path.exists(extraction_folder) and len(os.listdir(extraction_folder)) ) != 0 ): logger.info( f"{self.dataset_proper_name} {file_type} already present. " + "Skipping download." ) return extraction_folder logger.info(f"Downloading the {self.dataset_proper_name} {file_type} now.") download(url, download_folder, url.split("/")[-1]) logger.info( f"Extracting the {self.dataset_proper_name} {file_type} now. " + "This may take time" ) decompress(download_folder, url.split("/")[-1]) return extraction_folder
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/visual_genome/builder.py
# Copyright (c) Facebook, Inc. and its affiliates. import copy import json import torch from mmf.common.sample import Sample, SampleList from mmf.datasets.builders.vqa2 import VQA2Dataset from mmf.datasets.databases.scene_graph_database import SceneGraphDatabase _CONSTANTS = {"image_id_key": "image_id"} class VisualGenomeDataset(VQA2Dataset): def __init__(self, config, dataset_type, imdb_file_index, *args, **kwargs): super().__init__( config, dataset_type, imdb_file_index, dataset_name="visual_genome", *args, **kwargs, ) self._return_scene_graph = config.return_scene_graph self._return_objects = config.return_objects self._return_relationships = config.return_relationships self._no_unk = config.get("no_unk", False) self.scene_graph_db = None build_scene_graph_db = ( self._return_scene_graph or self._return_objects or self._return_relationships ) if build_scene_graph_db: scene_graph_file = config.scene_graph_files[dataset_type][imdb_file_index] scene_graph_file = self._get_absolute_path(scene_graph_file) self.scene_graph_db = SceneGraphDatabase(config, scene_graph_file) def load_item(self, idx): sample_info = self.annotation_db[idx] sample_info = self._preprocess_answer(sample_info) sample_info["question_id"] = sample_info["id"] if self._check_unk(sample_info): return self.load_item((idx + 1) % len(self.annotation_db)) current_sample = super().load_item(idx) current_sample = self._load_scene_graph(idx, current_sample) return current_sample def _get_image_id(self, idx): return self.annotation_db[idx][_CONSTANTS["image_id_key"]] def _get_image_info(self, idx): # Deep copy so that we can directly update the nested dicts return copy.deepcopy(self.scene_graph_db[self._get_image_id(idx)]) def _preprocess_answer(self, sample_info): sample_info["answers"] = [ self.vg_answer_preprocessor( {"text": sample_info["answers"][0]}, remove=["?", ",", ".", "a", "an", "the"], )["text"] ] return sample_info def _check_unk(self, sample_info): if not self._no_unk: return False else: index = self.answer_processor.word2idx(sample_info["answers"][0]) return index == self.answer_processor.answer_vocab.UNK_INDEX def _load_scene_graph(self, idx, sample): if self.scene_graph_db is None: return sample image_info = self._get_image_info(idx) regions = image_info["regions"] objects, object_map = self._load_objects(idx) if self._return_objects: sample.objects = objects relationships, relationship_map = self._load_relationships(idx, object_map) if self._return_relationships: sample.relationships = relationships regions, _ = self._load_regions(idx, object_map, relationship_map) if self._return_scene_graph: sample.scene_graph = regions return sample def _load_objects(self, idx): image_info = self._get_image_info(idx) image_height = image_info["height"] image_width = image_info["width"] object_map = {} objects = [] for obj in image_info["objects"]: obj["synsets"] = self.synset_processor({"tokens": obj["synsets"]})["text"] obj["names"] = self.name_processor({"tokens": obj["names"]})["text"] obj["height"] = obj["h"] / image_height obj.pop("h") obj["width"] = obj["w"] / image_width obj.pop("w") obj["y"] /= image_height obj["x"] /= image_width obj["attributes"] = self.attribute_processor({"tokens": obj["attributes"]})[ "text" ] obj = Sample(obj) object_map[obj["object_id"]] = obj objects.append(obj) objects = SampleList(objects) return objects, object_map def _load_relationships(self, idx, object_map): if self._return_relationships is None and self._return_scene_graph is None: return None, None image_info = self._get_image_info(idx) relationship_map = {} relationships = [] for relationship in image_info["relationships"]: relationship["synsets"] = self.synset_processor( {"tokens": relationship["synsets"]} )["text"] relationship["predicate"] = self.predicate_processor( {"tokens": relationship["predicate"]} )["text"] relationship["object"] = object_map[relationship["object_id"]] relationship["subject"] = object_map[relationship["subject_id"]] relationship = Sample(relationship) relationship_map[relationship["relationship_id"]] = relationship relationships.append(relationship) relationships = SampleList(relationships) return relationships, relationship_map def _load_regions(self, idx, object_map, relationship_map): if self._return_scene_graph is None: return None, None image_info = self._get_image_info(idx) image_height = image_info["height"] image_width = image_info["width"] region_map = {} regions = [] for region in image_info["regions"]: for synset in region["synsets"]: synset["entity_name"] = self.name_processor( {"tokens": [synset["entity_name"]]} )["text"] synset["synset_name"] = self.synset_processor( {"tokens": [synset["synset_name"]]} )["text"] region["height"] /= image_height region["width"] /= image_width region["y"] /= image_height region["x"] /= image_width relationships = [] objects = [] for relationship_idx in region["relationships"]: relationships.append(relationship_map[relationship_idx]) for object_idx in region["objects"]: objects.append(object_map[object_idx]) region["relationships"] = relationships region["objects"] = objects region["phrase"] = self.text_processor({"text": region["phrase"]})["text"] region = Sample(region) region_map[region["region_id"]] = region regions.append(region) regions = SampleList(regions) return regions, region_map
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/visual_genome/dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. from mmf.datasets.builders.coco.detection_dataset import DetectionCOCODataset class DetectionVisualGenomeDataset(DetectionCOCODataset): def __init__(self, config, dataset_type, imdb_file_index, *args, **kwargs): super().__init__(config, dataset_type, imdb_file_index, *args, **kwargs) if "name" in kwargs: name = kwargs["name"] elif "dataset_name" in kwargs: name = kwargs["dataset_name"] else: name = "detection_visual_genome" self.dataset_name = name
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/visual_genome/detection_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. from mmf.common.registry import registry from mmf.datasets.builders.visual_genome.detection_dataset import ( DetectionVisualGenomeDataset, ) from mmf.datasets.mmf_dataset_builder import MMFDatasetBuilder @registry.register_builder("detection_visual_genome") class DetectionVisualGenomeBuilder(MMFDatasetBuilder): def __init__(self): super().__init__( dataset_name="detection_visual_genome", dataset_class=DetectionVisualGenomeDataset, ) @classmethod def config_path(cls): return "configs/datasets/visual_genome/detection.yaml"
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/visual_genome/detection_builder.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # from mmf.common.registry import registry from mmf.datasets.builders.mmimdb.masked_dataset import MaskedMMImdbDataset from mmf.datasets.builders.vqa2.builder import VQA2Builder @registry.register_builder("masked_mmimdb") class MaskedMMImdbBuilder(VQA2Builder): def __init__(self): super().__init__() self.dataset_name = "masked_mmimdb" self.dataset_class = MaskedMMImdbDataset @classmethod def config_path(cls): return "configs/datasets/mmimdb/masked.yaml"
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/mmimdb/masked_builder.py
import random from mmf.common.sample import Sample from mmf.datasets.builders.vqa2.dataset import VQA2Dataset class MaskedMMImdbDataset(VQA2Dataset): def __init__(self, config, dataset_type, imdb_file_index, *args, **kwargs): super().__init__( config, dataset_type, imdb_file_index, dataset_name="masked_mmimdb", *args, **kwargs, ) self._add_answer = config.get("add_answer", True) def load_item(self, idx): sample_info = self.annotation_db[idx] current_sample = Sample() if self._use_features is True: features = self.features_db[idx] if hasattr(self, "transformer_bbox_processor"): features["image_info_0"] = self.transformer_bbox_processor( features["image_info_0"] ) if self.config.get("use_image_feature_masks", False): current_sample.update( { "image_labels": self.masked_region_processor( features["image_feature_0"] ) } ) current_sample.update(features) current_sample = self._add_masked_question(sample_info, current_sample) return current_sample def _add_masked_question(self, sample_info, current_sample): plot = sample_info["plot"] if isinstance(plot, list): plot = plot[0] question = plot random_answer = random.choice(sample_info["genres"]) processed = self.masked_token_processor( {"text_a": question, "text_b": random_answer, "is_correct": -1} ) processed.pop("tokens") current_sample.update(processed) return current_sample
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/mmimdb/masked_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates.
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/mmimdb/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # from mmf.common.registry import registry from mmf.datasets.builders.mmimdb.dataset import ( MMIMDbFeaturesDataset, MMIMDbImageDataset, ) from mmf.datasets.builders.vqa2.builder import VQA2Builder @registry.register_builder("mmimdb") class MMIMDbBuilder(VQA2Builder): def __init__(self): super().__init__() self.dataset_name = "mmimdb" self.dataset_class = MMIMDbImageDataset @classmethod def config_path(cls): return "configs/datasets/mmimdb/defaults.yaml" def load(self, config, dataset_type, *args, **kwargs): config = config if config.use_features: self.dataset_class = MMIMDbFeaturesDataset self.dataset = super().load(config, dataset_type, *args, **kwargs) return self.dataset
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/mmimdb/builder.py
import copy import json import torch from mmf.common.sample import Sample from mmf.datasets.mmf_dataset import MMFDataset class MMIMDbFeaturesDataset(MMFDataset): def __init__(self, config, dataset_type, imdb_file_index, *args, **kwargs): super().__init__( "mmimdb", config, dataset_type, imdb_file_index, *args, **kwargs ) assert ( self._use_features ), "config's 'use_features' must be true to use feature dataset" def __getitem__(self, idx): sample_info = self.annotation_db[idx] current_sample = Sample() plot = sample_info["plot"] if isinstance(plot, list): plot = plot[0] processed_sentence = self.text_processor({"text": plot}) current_sample.text = processed_sentence["text"] if "input_ids" in processed_sentence: current_sample.update(processed_sentence) if self._use_features is True: features = self.features_db[idx] if hasattr(self, "transformer_bbox_processor"): features["image_info_0"] = self.transformer_bbox_processor( features["image_info_0"] ) current_sample.update(features) processed = self.answer_processor({"answers": sample_info["genres"]}) current_sample.answers = processed["answers"] current_sample.targets = processed["answers_scores"] return current_sample class MMIMDbImageDataset(MMFDataset): def __init__(self, config, dataset_type, imdb_file_index, *args, **kwargs): super().__init__( "mmimdb", config, dataset_type, imdb_file_index, *args, **kwargs ) assert ( self._use_images ), "config's 'use_images' must be true to use image dataset" def init_processors(self): super().init_processors() # Assign transforms to the image_db self.image_db.transform = self.image_processor def __getitem__(self, idx): sample_info = self.annotation_db[idx] current_sample = Sample() plot = sample_info["plot"] if isinstance(plot, list): plot = plot[0] processed_sentence = self.text_processor({"text": plot}) current_sample.text = processed_sentence["text"] if "input_ids" in processed_sentence: current_sample.update(processed_sentence) if self._use_images is True: current_sample.image = self.image_db[idx]["images"][0] processed = self.answer_processor({"answers": sample_info["genres"]}) current_sample.answers = processed["answers"] current_sample.targets = processed["answers_scores"] return current_sample
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/mmimdb/dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. from mmf.common.registry import registry from mmf.datasets.builders.coco import MaskedCOCOBuilder from .masked_dataset import MaskedSBUDataset @registry.register_builder("masked_sbu") class MaskedSBUBuilder(MaskedCOCOBuilder): def __init__(self): super().__init__() self.dataset_name = "masked_sbu" self.set_dataset_class(MaskedSBUDataset) @classmethod def config_path(cls): return "configs/datasets/sbu_captions/masked.yaml"
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/sbu_captions/masked_builder.py
# Copyright (c) Facebook, Inc. and its affiliates. from mmf.datasets.builders.coco import MaskedCOCODataset class MaskedSBUDataset(MaskedCOCODataset): def __init__(self, config, dataset_type, imdb_file_index, *args, **kwargs): super().__init__(config, dataset_type, imdb_file_index, *args, **kwargs) self.dataset_name = "masked_sbu" self._two_sentence = config.get("two_sentence", True) self._false_caption = config.get("false_caption", True) self._two_sentence_probability = config.get("two_sentence_probability", 0.5) self._false_caption_probability = config.get("false_caption_probability", 0.5)
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/sbu_captions/masked_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. __all__ = ["MaskedSBUBuilder", "MaskedSBUDataset"] from .masked_builder import MaskedSBUBuilder from .masked_dataset import MaskedSBUDataset
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/sbu_captions/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates.
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/ocrvqa/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. from mmf.common.registry import Registry from mmf.datasets.builders.ocrvqa.dataset import OCRVQADataset from mmf.datasets.builders.textvqa.builder import TextVQABuilder @Registry.register_builder("ocrvqa") class OCRVQABuilder(TextVQABuilder): def __init__(self): super().__init__() self.dataset_name = "ocrvqa" self.set_dataset_class(OCRVQADataset) @classmethod def config_path(cls): return "configs/datasets/ocrvqa/defaults.yaml"
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/ocrvqa/builder.py
# Copyright (c) Facebook, Inc. and its affiliates. from mmf.datasets.builders.textvqa.dataset import TextVQADataset class OCRVQADataset(TextVQADataset): def __init__(self, config, dataset_type, imdb_file_index, *args, **kwargs): super().__init__(config, dataset_type, imdb_file_index, *args, **kwargs) self.dataset_name = "ocrvqa" def preprocess_sample_info(self, sample_info): # Do nothing in this case return sample_info
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/ocrvqa/dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. __all__ = ["VinVLBuilder", "VinVLDataset"] from .builder import VinVLBuilder from .dataset import VinVLDataset
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/vinvl/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # from mmf.common.registry import registry from mmf.datasets.builders.vinvl.dataset import VinVLDataset from mmf.datasets.mmf_dataset_builder import MMFDatasetBuilder from omegaconf import open_dict @registry.register_builder("vinvl") class VinVLBuilder(MMFDatasetBuilder): def __init__( self, dataset_name="vinvl", dataset_class=VinVLDataset, *args, **kwargs ): super().__init__(dataset_name, dataset_class, dataset_type="train_val") self.dataset_class = VinVLDataset @classmethod def config_path(cls): return "configs/datasets/vinvl/defaults.yaml" def load(self, config, dataset_type, *args, **kwargs): """The VinVL dataset is a dataset that augments an existing dataset within MMF. VinVL requires unique inputs for finetuning and pretraining unsupported by general datasets. To enable this functionality on arbitrary datasets, the VinVL dataset contains a base dataset, and returns an augmented version of samples from the base dataset. For more details, read the VinVL dataset docstring. The Builder: This class is a builder for the VinVL dataset. As the VinVL dataset must be constructed with an instance to a base dataset, configured by the client in the VinVL configs yaml. This builder class instantiates 2 datasets, then passes the base dataset to the VinVL dataset instance. The VinVL config is expected to have the following stucture, ```yaml dataset_config: vinvl: base_dataset_name: vqa2 label_map: <path to label map> base_dataset: ${dataset_config.vqa2} processors: text_processor: type: vinvl_text_tokenizer params: ... ``` Where base_dataset is the yaml config for the base dataset in this example vqa2. And base_dataset_name is vqa2. Returns: VinVLDataset: Instance of the VinVLDataset class which contains an base dataset instance. """ base_dataset_name = config.get("base_dataset_name", "vqa2") base_dataset_config = config.get("base_dataset", config) # instantiate base dataset # instantiate base dataser builder base_dataset_builder_class = registry.get_builder_class(base_dataset_name) base_dataset_builder_instance = base_dataset_builder_class() # build base dataset instance base_dataset_builder_instance.build_dataset(base_dataset_config) base_dataset = base_dataset_builder_instance.load_dataset( base_dataset_config, dataset_type ) if hasattr(base_dataset_builder_instance, "update_registry_for_model"): base_dataset_builder_instance.update_registry_for_model(base_dataset_config) # instantiate vinvl dataset vinvl_text_processor = config["processors"]["text_processor"] with open_dict(base_dataset_config): base_dataset_config["processors"]["text_processor"] = vinvl_text_processor base_dataset_config["label_map"] = config["label_map"] vinvl_dataset = super().load(base_dataset_config, dataset_type, *args, **kwargs) vinvl_dataset.set_base_dataset(base_dataset) return vinvl_dataset
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/vinvl/builder.py
# Copyright (c) Facebook, Inc. and its affiliates. import json import logging import random from mmf.datasets.mmf_dataset import MMFDataset logger = logging.getLogger(__name__) class VinVLDataset(MMFDataset): """The VinVL dataset is a dataset that augments an existing dataset within MMF. VinVL requires unique inputs for finetuning and pretraining unsupported by general datasets. To enable this functionality on arbitrary datasets, the VinVL dataset contains a base dataset, and returns an augmented version of samples from the base dataset. For example, the VQA2 dataset may return a sample {image, text} The VinVL dataset when asked for a sample, will return {image, text', rand_caption, rand_label} text' = text + labels rand_caption = text from a random example rand_label = obj detection labels text for a random example Why does this exist? VinVL samples contain rand_caption, and rand_label which require random choice from the annotations db, and features_db. Currently general text_processors do not have access to these databases, instead randomness like mismatched_captions in masked coco are implemented on the dataset level. To support VinVL finetuning and pretraining on general datasets, without a major refactor, the VinVL builder and dataset introduce a new design pattern to enable processor access to databases. Interface and Assumptions: The VinVL dataset assumes: The sample returned by the base dataset contains a key "text" with string text. There exists a label_map json file path in the dataset config for a json obj containing idx_to_attribute and idx_to_label maps. VinVL OD uses VG labels, and this map can be downloaded from https://penzhanwu2.blob.core.windows.net/sgg/ sgg_benchmark/vinvl_model_zoo/VG-SGG-dicts-vgoi6-clipped.json The features_db points to features generated from the VinVL feature extraction script, consult the VinVL feature extraction tutorial for more details. """ def __init__(self, config, dataset_type, *args, **kwargs): if "name" in kwargs: name = kwargs["name"] elif "dataset_name" in kwargs: name = kwargs["dataset_name"] else: name = "vinvl" super().__init__(name, config, dataset_type, *args, **kwargs) self.add_tags = not "test" == self._dataset_type self.label_map = self.load_label_map(config.get("label_map")) def set_base_dataset(self, base_dataset): self.base_dataset = base_dataset def init_processors(self): super().init_processors() def __len__(self): return len(self.annotation_db) def __getitem__(self, idx): return self.load_item(idx) def load_item(self, idx): base_sample = self.base_dataset.load_item(idx) # assumes sample contains key "text" that is the string text # when using on vqa2 which returns tokens under key "text" # change the vqa2 dataset class to return "text" text_processor_argument = {"text": base_sample["text"]} if self.add_tags: text_processor_argument["text_b"] = self.get_label_str(base_sample) random_caption_idx = random.randint(0, len(self.annotation_db) - 1) random_caption_sample = self.base_dataset.load_item(random_caption_idx) random_caption = random_caption_sample["text"] text_processor_argument["random_captions"] = [random_caption] random_labels_idx = random.randint(0, len(self.annotation_db) - 1) random_labels_sample = self.base_dataset.load_item(random_labels_idx) random_image_tags_str = self.get_label_str(random_labels_sample) text_processor_argument["random_labels"] = [random_image_tags_str] processed_caption = self.text_processor(text_processor_argument) base_sample.update(processed_caption) return base_sample def load_label_map(self, map_path): with open(map_path) as f: return json.loads(f.read()) def get_label_str(self, sample): image_labels = sample["image_info_0"].get("labels", []) label_map = self.label_map.get("idx_to_label", {}) label_str = " ".join([label_map.get(str(id), "") for id in image_labels]) image_attr_labels = sample["image_info_0"].get("attr_labels", []) attr_map = self.label_map.get("idx_to_attribute", {}) attr_str = " ".join([attr_map.get(str(id), "") for id in image_attr_labels]) accum_str = label_str + " " + attr_str return accum_str
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/vinvl/dataset.py
# Copyright (c) Facebook, Inc. and its affiliates.
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/textvqa/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. from mmf.common.registry import registry from mmf.datasets.builders.textvqa.dataset import TextVQADataset from mmf.datasets.mmf_dataset_builder import MMFDatasetBuilder @registry.register_builder("textvqa") class TextVQABuilder(MMFDatasetBuilder): def __init__( self, dataset_name="textvqa", dataset_class=TextVQADataset, *args, **kwargs ): super().__init__(dataset_name, dataset_class, *args, **kwargs) @classmethod def config_path(cls): return "configs/datasets/textvqa/defaults.yaml" # TODO: Deprecate this method and move configuration updates directly to processors def update_registry_for_model(self, config): if hasattr(self.dataset, "text_processor"): registry.register( self.dataset_name + "_text_vocab_size", self.dataset.text_processor.get_vocab_size(), ) registry.register( f"{self.dataset_name}_text_processor", self.dataset.text_processor ) if hasattr(self.dataset, "answer_processor"): registry.register( self.dataset_name + "_num_final_outputs", self.dataset.answer_processor.get_vocab_size(), ) registry.register( f"{self.dataset_name}_answer_processor", self.dataset.answer_processor )
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/textvqa/builder.py
# Copyright (c) Facebook, Inc. and its affiliates. import numpy as np import torch from mmf.common.sample import Sample from mmf.datasets.mmf_dataset import MMFDataset from mmf.utils.distributed import byte_tensor_to_object, object_to_byte_tensor from mmf.utils.text import word_tokenize class TextVQADataset(MMFDataset): def __init__(self, config, dataset_type, imdb_file_index, *args, **kwargs): super().__init__("textvqa", config, dataset_type, index=imdb_file_index) self.use_ocr = self.config.use_ocr self.use_ocr_info = self.config.use_ocr_info def preprocess_sample_info(self, sample_info): path = self._get_path_based_on_index(self.config, "annotations", self._index) # NOTE, TODO: Code duplication w.r.t to STVQA, revisit # during dataset refactor to support variable dataset classes if "stvqa" in path: feature_path = sample_info["feature_path"] append = "train" if self.dataset_type == "test": append = "test_task3" if not feature_path.startswith(append): feature_path = append + "/" + feature_path sample_info["feature_path"] = feature_path return sample_info # COCO Annotation DBs have corrext feature_path elif "COCO" not in sample_info["feature_path"]: sample_info["feature_path"] = sample_info["image_path"].replace( ".jpg", ".npy" ) return sample_info def postprocess_evalai_entry(self, entry): return entry # Do nothing def format_for_prediction(self, report): answer_processor = self.answer_processor batch_size = len(report.question_id) pred_answers = report.scores.argmax(dim=-1).view(batch_size, -1) answer_space_size = answer_processor.get_true_vocab_size() image_ids = report.image_id.cpu().numpy() context_tokens = report.context_tokens.cpu().numpy() predictions = [] for idx, question_id in enumerate(report.question_id): # collect VQA answers image_id = byte_tensor_to_object(image_ids[idx]) tokens = byte_tensor_to_object(context_tokens[idx]) answer_words = [] pred_source = [] for answer_id in pred_answers[idx].tolist(): if answer_id >= answer_space_size: answer_id -= answer_space_size answer_words.append(word_tokenize(tokens[answer_id])) pred_source.append("OCR") else: if answer_id == answer_processor.EOS_IDX: break answer_words.append( answer_processor.answer_vocab.idx2word(answer_id) ) pred_source.append("VOCAB") # join all the answer tokens with space # (this should be correct for almost all cases) pred_answer = " ".join(answer_words).replace(" 's", "'s") entry = { "question_id": question_id.item(), "image_id": image_id, "answer": pred_answer, "pred_source": pred_source, } entry = self.postprocess_evalai_entry(entry) predictions.append(entry) return predictions def __getitem__(self, idx): sample_info = self.annotation_db[idx] sample_info = self.preprocess_sample_info(sample_info) current_sample = Sample() # breaking change from VQA2Dataset: load question_id current_sample.question_id = torch.tensor( sample_info["question_id"], dtype=torch.int ) if isinstance(sample_info["image_id"], int): current_sample.image_id = str(sample_info["image_id"]) else: current_sample.image_id = sample_info["image_id"] if self._use_features is True: features = self.features_db[idx] current_sample.update(features) current_sample = self.add_sample_details(sample_info, current_sample) current_sample = self.add_answer_info(sample_info, current_sample) # only the 'max_features' key is needed # pop other keys to minimize data loading overhead if hasattr(current_sample, "image_info_0"): for k in list(current_sample.image_info_0): if k != "max_features": current_sample.image_info_0.pop(k) if hasattr(current_sample, "image_info_1"): for k in list(current_sample.image_info_1): if k != "max_features": current_sample.image_info_1.pop(k) return current_sample def add_sample_details(self, sample_info, sample): sample.image_id = object_to_byte_tensor(sample.image_id) # 1. Load text (question words) question_str = ( sample_info["question"] if "question" in sample_info else sample_info["question_str"] ) text_processor_args = {"text": question_str} if "question_tokens" in sample_info: text_processor_args["tokens"] = sample_info["question_tokens"] processed_question = self.text_processor(text_processor_args) if "input_ids" in processed_question: sample.text = processed_question["input_ids"] sample.text_len = torch.tensor( len(processed_question["tokens"]), dtype=torch.long ) else: # For GLoVe based processors sample.text = processed_question["text"] sample.text_len = processed_question["length"] # 2. Load object # object bounding box information if "obj_normalized_boxes" in sample_info and hasattr(self, "copy_processor"): sample.obj_bbox_coordinates = self.copy_processor( {"blob": sample_info["obj_normalized_boxes"]} )["blob"] # 3. Load OCR if not self.use_ocr: # remove all OCRs from the sample # (i.e. make an empty OCR list) sample_info["ocr_tokens"] = [] sample_info["ocr_info"] = [] if "ocr_normalized_boxes" in sample_info: sample_info["ocr_normalized_boxes"] = np.zeros((0, 4), np.float32) # clear OCR visual features if "image_feature_1" in sample: sample.image_feature_1 = torch.zeros_like(sample.image_feature_1) return sample # Preprocess OCR tokens if hasattr(self, "ocr_token_processor"): ocr_tokens = [ self.ocr_token_processor({"text": token})["text"] for token in sample_info["ocr_tokens"] ] else: ocr_tokens = sample_info["ocr_tokens"] # Get FastText embeddings for OCR tokens context = self.context_processor({"tokens": ocr_tokens}) sample.context = context["text"] sample.ocr_tokens = context["tokens"] sample.context_tokens = object_to_byte_tensor(context["tokens"]) sample.context_feature_0 = context["text"] sample.context_info_0 = Sample() sample.context_info_0.max_features = context["length"] # Get PHOC embeddings for OCR tokens if hasattr(self, "phoc_processor"): context_phoc = self.phoc_processor({"tokens": ocr_tokens}) sample.context_feature_1 = context_phoc["text"] sample.context_info_1 = Sample() sample.context_info_1.max_features = context_phoc["length"] # OCR order vectors if self.config.get("use_order_vectors", False): order_vectors = np.eye(len(sample.ocr_tokens), dtype=np.float32) order_vectors = torch.from_numpy(order_vectors) order_vectors[context["length"] :] = 0 sample.order_vectors = order_vectors # OCR bounding box information if "ocr_normalized_boxes" in sample_info and hasattr(self, "copy_processor"): # New imdb format: OCR bounding boxes are already pre-computed max_len = self.config.processors.answer_processor.params.max_length sample.ocr_bbox_coordinates = self.copy_processor( {"blob": sample_info["ocr_normalized_boxes"]} )["blob"][:max_len] elif self.use_ocr_info and "ocr_info" in sample_info: # Old imdb format: OCR bounding boxes are computed on-the-fly # from ocr_info sample.ocr_bbox_coordinates = self.bbox_processor( {"info": sample_info["ocr_info"]} )["bbox"].coordinates return sample def add_answer_info(self, sample_info, sample): # Load real answers from sample_info answers = sample_info.get("answers", []) answer_processor_arg = {"answers": answers} answer_processor_arg["tokens"] = sample.pop("ocr_tokens", []) processed_answers = self.answer_processor(answer_processor_arg) assert not self.config.fast_read, ( "In TextVQADataset, online OCR sampling is incompatible " "with fast_read, so fast_read is currently not supported." ) sample.update(processed_answers) sample.answers = object_to_byte_tensor(answers) if "answers_scores" in sample: sample.targets = sample.pop("answers_scores") return sample
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/textvqa/dataset.py
import json import torch class VisualDialogDatabase(torch.utils.data.Dataset): def __init__(self, imdb_path): super().__init__() self._load_json(imdb_path) self._metadata = {} @property def metadata(self): return self._metadata @metadata.setter def metadata(self, x): self._metadata = x def _load_json(self, imdb_path): with open(imdb_path, "r"): data = json.load(imdb_path) self._is_test = data["split"] == "test" self._question = data["questions"] self._answers = data["answers"] self._dialogs = data["dialogs"] # Test has only one round per dialog self._multiplier = 1 if self._is_test else 10 self._qa_length = len(self._dialogs) * self._multiplier def __len__(self): return self._qa_length def __getitem__(self, idx): data = {} dialog_id = idx / self._multiplier round_id = idx % self._multiplier dialog = self._dialogs[dialog_id] data["id"] = idx data["dialog_id"] = dialog_id data["round_id"] = round_id round = dialog["dialog"][round_id] data["question"] = self._questions[round["question"]] # data["answers"] = [self.]
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/visual_dialog/database.py
# Copyright (c) Facebook, Inc. and its affiliates.
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/visual_dialog/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. import logging import os import shutil from mmf.common.constants import VISUAL_DIALOG_CONSTS from mmf.common.registry import registry from mmf.datasets.builders.visual_dialog.dataset import VisualDialogDataset from mmf.datasets.builders.visual_genome.builder import VisualGenomeBuilder from mmf.utils.download import decompress, download from mmf.utils.general import get_mmf_root logger = logging.getLogger(__name__) @registry.register_builder("visual_dialog") class VisualDialogBuilder(VisualGenomeBuilder): def __init__(self): super().__init__() self.dataset_name = "visual_dialog" self.dataset_class = VisualDialogDataset @classmethod def config_path(cls): return "configs/datasets/visual_dialog/defaults.yaml" def build(self, config, dataset_type): self._dataset_type = dataset_type self._config = config data_folder = os.path.join(get_mmf_root(), self._config.data_dir) self._download_and_extract_imdb(data_folder) if self._dataset_type != "train": return self._download_and_extract( "vocabs", VISUAL_DIALOG_CONSTS["vocabs"], data_folder ) self._download_and_extract_features(data_folder) def _download_and_extract_imdb(self, data_folder): download_folder = os.path.join(data_folder, "imdb") self._download_and_extract( "imdb_url", VISUAL_DIALOG_CONSTS["imdb_url"][self._dataset_type], download_folder, ) def _download_and_extract_features(self, data_folder): # Visual Dialog features will contain val and test self._download_and_extract( "features_url", VISUAL_DIALOG_CONSTS["features_url"]["visual_dialog"], data_folder, ) # But since train is same as COCO, we reuse those features if already downloaded self._download_and_extract( "features_url", VISUAL_DIALOG_CONSTS["features_url"]["coco"], data_folder )
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/visual_dialog/builder.py
import copy import json import torch from mmf.common.sample import Sample from mmf.datasets.builders.visual_dialog.database import VisualDialogDatabase from mmf.datasets.builders.vqa2 import VQA2Dataset class VisualDialogDataset(VQA2Dataset): def __init__(self, config, dataset_type, imdb_file_index, *args, **kwargs): super().__init__( config, dataset_type, imdb_file_index, dataset_name="visual_dialog", *args, **kwargs, ) discriminative = config.discriminative self._discriminative = discriminative.enabled self._return_indices = discriminative.return_indices self._no_unk = config.no_unk self._return_history = config.return_history
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/visual_dialog/dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. from mmf.common.registry import registry from mmf.datasets.builders.coco2017.masked_dataset import MaskedCoco2017Dataset from mmf.datasets.mmf_dataset_builder import MMFDatasetBuilder @registry.register_builder("masked_coco2017") class MaskedFlickr30kBuilder(MMFDatasetBuilder): def __init__( self, dataset_name="masked_coco2017", dataset_class=MaskedCoco2017Dataset, *args, **kwargs, ): super().__init__(dataset_name, dataset_class, *args, **kwargs) @classmethod def config_path(cls): return "configs/datasets/coco2017/masked.yaml"
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/coco2017/masked_builder.py
# Copyright (c) Facebook, Inc. and its affiliates. from mmf.common.typings import MMFDatasetConfigType from mmf.datasets.builders.localized_narratives.masked_dataset import ( MaskedLocalizedNarrativesDatasetMixin, ) from mmf.datasets.mmf_dataset import MMFDataset class MaskedCoco2017Dataset(MaskedLocalizedNarrativesDatasetMixin, MMFDataset): def __init__( self, config: MMFDatasetConfigType, dataset_type: str, index: int, *args, **kwargs, ): super().__init__( "masked_coco2017", config, dataset_type, index, *args, **kwargs )
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/coco2017/masked_dataset.py
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/coco2017/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. from mmf.common.registry import registry from mmf.datasets.builders.localized_narratives.masked_dataset import ( MaskedLocalizedNarrativesDataset, ) from mmf.datasets.mmf_dataset_builder import MMFDatasetBuilder @registry.register_builder("masked_localized_narratives") class MaskedLocalizedNarrativesBuilder(MMFDatasetBuilder): def __init__( self, dataset_name="masked_localized_narratives", dataset_class=MaskedLocalizedNarrativesDataset, *args, **kwargs, ): super().__init__(dataset_name, dataset_class, *args, **kwargs) @classmethod def config_path(cls): return "configs/datasets/localized_narratives/masked.yaml"
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/localized_narratives/masked_builder.py
# Copyright (c) Facebook, Inc. and its affiliates. from abc import ABC from mmf.common.sample import Sample from mmf.common.typings import MMFDatasetConfigType from mmf.datasets.builders.localized_narratives.database import ( LocalizedNarrativesAnnotationDatabase, ) from mmf.datasets.mmf_dataset import MMFDataset class MaskedLocalizedNarrativesDatasetMixin(ABC): def build_annotation_db(self) -> LocalizedNarrativesAnnotationDatabase: annotation_path = self._get_path_based_on_index( self.config, "annotations", self._index ) return LocalizedNarrativesAnnotationDatabase(self.config, annotation_path) def __getitem__(self, idx: int) -> Sample: sample_info = self.annotation_db[idx] current_sample = Sample() processed_caption = self.masked_token_processor( {"text_a": sample_info["caption"], "text_b": "", "is_correct": True} ) current_sample.update(processed_caption) current_sample.image_id = sample_info["image_id"] current_sample.feature_path = sample_info["feature_path"] # Get the image features if self._use_features: features = self.features_db[idx] image_info_0 = features["image_info_0"] if image_info_0 and "image_id" in image_info_0.keys(): image_info_0["feature_path"] = image_info_0["image_id"] image_info_0.pop("image_id") current_sample.update(features) elif self._use_images: image_id = sample_info["image_id"] dataset = sample_info["dataset_id"] if "mscoco" in dataset: image_id = image_id.rjust(12, "0") assert ( len(self.image_db.from_path(image_id)["images"]) != 0 ), f"image id: {image_id} not found" current_sample.image = self.image_db.from_path(image_id)["images"][0] return current_sample class MaskedLocalizedNarrativesDataset( MaskedLocalizedNarrativesDatasetMixin, MMFDataset ): def __init__( self, config: MMFDatasetConfigType, dataset_type: str, index: int, *args, **kwargs, ): super().__init__( "masked_localized_narratives", config, dataset_type, index, *args, **kwargs ) def init_processors(self): super().init_processors() if self._use_images: self.image_db.transform = self.image_processor
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/localized_narratives/masked_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. import json from typing import List, NamedTuple, Optional from mmf.datasets.databases.annotation_database import AnnotationDatabase class TimedPoint(NamedTuple): x: float y: float t: float class TimedUtterance(NamedTuple): utterance: str start_time: float end_time: float class LocalizedNarrative(NamedTuple): dataset_id: str image_id: str annotator_id: int caption: str timed_caption: Optional[List[TimedUtterance]] = None traces: Optional[List[List[TimedPoint]]] = None voice_recording: Optional[str] = None def __repr__(self): truncated_caption = ( self.caption[:60] + "..." if len(self.caption) > 63 else self.caption ) truncated_timed_caption = self.timed_caption[0].__str__() truncated_traces = self.traces[0][0].__str__() return ( f"{{\n" f" dataset_id: {self.dataset_id},\n" f" image_id: {self.image_id},\n" f" annotator_id: {self.annotator_id},\n" f" caption: {truncated_caption},\n" f" timed_caption: [{truncated_timed_caption}, ...],\n" f" traces: [[{truncated_traces}, ...], ...],\n" f" voice_recording: {self.voice_recording}\n" f"}}" ) class LocalizedNarrativesAnnotationDatabase(AnnotationDatabase): def __init__(self, config, path, *args, **kwargs): super().__init__(config, path, *args, **kwargs) def load_annotation_db(self, path): data = [] with open(path) as f: for line in f: annotation = json.loads(line) loc_narr = LocalizedNarrative(**annotation) data.append( { "dataset_id": loc_narr.dataset_id, "image_id": loc_narr.image_id, "caption": loc_narr.caption, "feature_path": self._feature_path( loc_narr.dataset_id, loc_narr.image_id ), } ) self.data = data def _feature_path(self, dataset_id, image_id): if "mscoco" in dataset_id.lower(): return image_id.rjust(12, "0") + ".npy" return image_id + ".npy"
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/localized_narratives/database.py
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/localized_narratives/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates.
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/hateful_memes/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. import os import warnings from mmf.common.registry import registry from mmf.datasets.builders.hateful_memes.dataset import ( HatefulMemesFeaturesDataset, HatefulMemesImageDataset, ) from mmf.datasets.mmf_dataset_builder import MMFDatasetBuilder from mmf.utils.configuration import get_mmf_env from mmf.utils.file_io import PathManager from mmf.utils.general import get_absolute_path @registry.register_builder("hateful_memes") class HatefulMemesBuilder(MMFDatasetBuilder): def __init__( self, dataset_name="hateful_memes", dataset_class=HatefulMemesImageDataset, *args, **kwargs, ): super().__init__(dataset_name, dataset_class, *args, **kwargs) self.dataset_class = HatefulMemesImageDataset @classmethod def config_path(self): return "configs/datasets/hateful_memes/defaults.yaml" def load(self, config, dataset_type, *args, **kwargs): config = config if config.use_features: self.dataset_class = HatefulMemesFeaturesDataset self.dataset = super().load(config, dataset_type, *args, **kwargs) return self.dataset def build(self, config, *args, **kwargs): # First, check whether manual downloads have been performed data_dir = get_mmf_env(key="data_dir") test_path = get_absolute_path( os.path.join( data_dir, "datasets", self.dataset_name, "defaults", "annotations", "train.jsonl", ) ) # NOTE: This doesn't check for files, but that is a fine assumption for now assert PathManager.exists(test_path), ( "Hateful Memes Dataset doesn't do automatic downloads; please " + "follow instructions at https://fb.me/hm_prerequisites" ) super().build(config, *args, **kwargs) def update_registry_for_model(self, config): if hasattr(self.dataset, "text_processor") and hasattr( self.dataset.text_processor, "get_vocab_size" ): registry.register( self.dataset_name + "_text_vocab_size", self.dataset.text_processor.get_vocab_size(), ) registry.register(self.dataset_name + "_num_final_outputs", 2)
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/hateful_memes/builder.py
# Copyright (c) Facebook, Inc. and its affiliates. import copy import os import numpy as np import omegaconf import torch from mmf.common.sample import Sample from mmf.datasets.mmf_dataset import MMFDataset from mmf.utils.general import get_mmf_root from mmf.utils.visualize import visualize_images from PIL import Image from torchvision import transforms class HatefulMemesFeaturesDataset(MMFDataset): def __init__(self, config, *args, dataset_name="hateful_memes", **kwargs): super().__init__(dataset_name, config, *args, **kwargs) assert ( self._use_features ), "config's 'use_images' must be true to use image dataset" self.is_multilabel = self.config.get("is_multilabel", False) def preprocess_sample_info(self, sample_info): image_path = sample_info["img"] # img/02345.png -> 02345 feature_path = image_path.split("/")[-1].split(".")[0] # Add feature_path key for feature_database access sample_info["feature_path"] = f"{feature_path}.npy" return sample_info def __getitem__(self, idx): sample_info = self.annotation_db[idx] sample_info = self.preprocess_sample_info(sample_info) current_sample = Sample() processed_text = self.text_processor({"text": sample_info["text"]}) current_sample.text = processed_text["text"] if "input_ids" in processed_text: current_sample.update(processed_text) current_sample.id = torch.tensor(int(sample_info["id"]), dtype=torch.int) # Instead of using idx directly here, use sample_info to fetch # the features as feature_path has been dynamically added features = self.features_db.get(sample_info) if hasattr(self, "transformer_bbox_processor"): features["image_info_0"] = self.transformer_bbox_processor( features["image_info_0"] ) current_sample.update(features) fg_dataset_type = self.config.get("fg_dataset_type", None) if fg_dataset_type: current_sample = self.process_fg_labels( fg_dataset_type=fg_dataset_type, sample_info=sample_info, current_sample=current_sample, ) else: if "label" in sample_info: current_sample.targets = torch.tensor( sample_info["label"], dtype=torch.long ) return current_sample def process_fg_labels(self, fg_dataset_type, sample_info, current_sample): """ If fg_dataset_type is present, it means we are using the Hateful Memes Fine Grained datasets. It is the same hateful memes datasets but have additional labels for protected groups and attack vectors. More details see: https://github.com/facebookresearch/fine_grained_hateful_memes """ ds_type_to_label = { "attack": sample_info["top_attacks"], "pc": sample_info["top_protectedcats"], "pc_attack": sample_info["top_protectedcats"] + sample_info["top_attacks"], "hateful_pc_attack": sample_info["top_protectedcats"] + sample_info["top_attacks"] + ["hateful" if int(sample_info["label"]) == 1 else "not_hateful"], } processed = self.answer_processor( {"answers": ds_type_to_label[fg_dataset_type]} ) current_sample.answers = processed["answers"] current_sample.targets = processed["answers_scores"] return current_sample def format_for_prediction(self, report): if self.is_multilabel: return generate_multilabel_prediction(report) else: return generate_binary_prediction(report) class HatefulMemesImageDataset(MMFDataset): def __init__(self, config, *args, dataset_name="hateful_memes", **kwargs): super().__init__(dataset_name, config, *args, **kwargs) assert ( self._use_images ), "config's 'use_images' must be true to use image dataset" self.is_multilabel = self.config.get("is_multilabel", False) def init_processors(self): super().init_processors() # Assign transforms to the image_db self.image_db.transform = self.image_processor def __getitem__(self, idx): sample_info = self.annotation_db[idx] current_sample = Sample() processed_text = self.text_processor({"text": sample_info["text"]}) current_sample.text = processed_text["text"] if "input_ids" in processed_text: current_sample.update(processed_text) current_sample.id = torch.tensor(int(sample_info["id"]), dtype=torch.int) # Get the first image from the set of images returned from the image_db current_sample.image = self.image_db[idx]["images"][0] if "label" in sample_info: current_sample.targets = torch.tensor( sample_info["label"], dtype=torch.long ) return current_sample def format_for_prediction(self, report): if self.is_multilabel: return generate_multilabel_prediction(report) else: return generate_binary_prediction(report) def visualize(self, num_samples=1, use_transforms=False, *args, **kwargs): image_paths = [] random_samples = np.random.randint(0, len(self), size=num_samples) for idx in random_samples: image_paths.append(self.annotation_db[idx]["img"]) images = self.image_db.from_path(image_paths, use_transforms=use_transforms) visualize_images(images["images"], *args, **kwargs) def generate_binary_prediction(report): scores = torch.nn.functional.softmax(report.scores, dim=1) _, labels = torch.max(scores, 1) # Probability that the meme is hateful, (1) probabilities = scores[:, 1] predictions = [] for idx, image_id in enumerate(report.id): proba = probabilities[idx].item() label = labels[idx].item() predictions.append({"id": image_id.item(), "proba": proba, "label": label}) return predictions def generate_multilabel_prediction(report): scores = torch.sigmoid(report.scores) return [ {"id": image_id.item(), "scores": scores[idx].tolist()} for idx, image_id in enumerate(report.id) ]
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/hateful_memes/dataset.py
# Copyright (c) Facebook, Inc. and its affiliates.
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/stvqa/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. from mmf.common.registry import Registry from mmf.datasets.builders.stvqa.dataset import STVQADataset from mmf.datasets.builders.textvqa.builder import TextVQABuilder @Registry.register_builder("stvqa") class STVQABuilder(TextVQABuilder): def __init__(self): super().__init__() self.dataset_name = "stvqa" self.set_dataset_class(STVQADataset) @classmethod def config_path(cls): return "configs/datasets/stvqa/defaults.yaml"
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/stvqa/builder.py
# Copyright (c) Facebook, Inc. and its affiliates. from mmf.datasets.builders.textvqa.dataset import TextVQADataset class STVQADataset(TextVQADataset): def __init__(self, config, dataset_type, imdb_file_index, *args, **kwargs): super().__init__(config, dataset_type, imdb_file_index, *args, **kwargs) self.dataset_name = "stvqa" def preprocess_sample_info(self, sample_info): feature_path = sample_info["feature_path"] append = "train" if self.dataset_type == "test": append = "test_task3" if not feature_path.startswith(append): feature_path = append + "/" + feature_path sample_info["feature_path"] = feature_path return sample_info
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/stvqa/dataset.py
from mmf.common.registry import registry from mmf.datasets.builders.coco.builder import COCOBuilder from .masked_dataset import MaskedCOCODataset @registry.register_builder("masked_coco") class MaskedCOCOBuilder(COCOBuilder): def __init__(self): super().__init__() self.dataset_name = "masked_coco" self.set_dataset_class(MaskedCOCODataset) def update_registry_for_model(self, config): registry.register( self.dataset_name + "_text_vocab_size", self.dataset.masked_token_processor.get_vocab_size(), ) @classmethod def config_path(cls): return "configs/datasets/coco/masked.yaml"
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/coco/masked_builder.py
import random from mmf.common.sample import Sample from mmf.datasets.builders.coco import COCODataset class MaskedCOCODataset(COCODataset): def __init__(self, config, dataset_type, imdb_file_index, *args, **kwargs): super().__init__(config, dataset_type, imdb_file_index, *args, **kwargs) self.dataset_name = "masked_coco" self._two_sentence = config.get("two_sentence", True) self._false_caption = config.get("false_caption", True) self._two_sentence_probability = config.get("two_sentence_probability", 0.5) self._false_caption_probability = config.get("false_caption_probability", 0.5) def load_item(self, idx): sample_info = self.annotation_db[idx] current_sample = Sample() if self._use_features: features = self.features_db[idx] if hasattr(self, "transformer_bbox_processor"): features["image_info_0"] = self.transformer_bbox_processor( features["image_info_0"] ) if self.config.get("use_image_feature_masks", False): current_sample.update( { "image_labels": self.masked_region_processor( features["image_feature_0"] ) } ) current_sample.update(features) else: image_path = str(sample_info["image_name"]) + ".jpg" current_sample.image = self.image_db.from_path(image_path)["images"][0] current_sample = self._add_masked_caption(sample_info, current_sample) return current_sample def _add_masked_caption(self, sample_info, current_sample): captions = sample_info["captions"] image_id = sample_info["image_id"] num_captions = len(captions) selected_caption_index = random.randint(0, num_captions - 1) other_caption_indices = [ i for i in range(num_captions) if i != selected_caption_index ] selected_caption = captions[selected_caption_index] other_caption = None is_correct = -1 if self._two_sentence: if random.random() > self._two_sentence_probability: other_caption = self._get_mismatching_caption(image_id) is_correct = False else: other_caption = captions[random.choice(other_caption_indices)] is_correct = True elif self._false_caption: if random.random() < self._false_caption_probability: selected_caption = self._get_mismatching_caption(image_id) is_correct = False else: is_correct = True processed = self.masked_token_processor( { "text_a": selected_caption, "text_b": other_caption, "is_correct": is_correct, } ) processed.pop("tokens") current_sample.update(processed) return current_sample def _get_mismatching_caption(self, image_id): other_item = self.annotation_db[random.randint(0, len(self.annotation_db) - 1)] while other_item["image_id"] == image_id: other_item = self.annotation_db[ random.randint(0, len(self.annotation_db) - 1) ] other_caption = other_item["captions"][ random.randint(0, len(other_item["captions"]) - 1) ] return other_caption
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/coco/masked_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. __all__ = [ "COCOBuilder", "COCODataset", "DetectionCOCOBuilder", "DetectionCOCODataset", "MaskedCOCOBuilder", "MaskedCOCODataset", ] from .builder import COCOBuilder from .dataset import COCODataset from .detection_builder import DetectionCOCOBuilder from .detection_dataset import DetectionCOCODataset from .masked_builder import MaskedCOCOBuilder from .masked_dataset import MaskedCOCODataset
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/coco/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # from mmf.common.registry import registry from mmf.datasets.builders.coco.dataset import COCODataset from mmf.datasets.builders.textcaps.dataset import TextCapsDataset from mmf.datasets.builders.vqa2 import VQA2Builder @registry.register_builder("coco") class COCOBuilder(VQA2Builder): def __init__(self): super().__init__() self.dataset_name = "coco" self.set_dataset_class(COCODataset) # TODO: Deprecate this method and move configuration updates directly to processors def update_registry_for_model(self, config): registry.register( self.dataset_name + "_text_vocab_size", self.dataset.text_processor.get_vocab_size(), ) if hasattr(self.dataset, "answer_processor"): registry.register( self.dataset_name + "_num_final_outputs", self.dataset.answer_processor.get_vocab_size(), ) registry.register( self.dataset_name + "_answer_processor", self.dataset.answer_processor ) @classmethod def config_path(cls): return "configs/datasets/coco/defaults.yaml" def load(self, config, *args, **kwargs): annotation_style = config.get("annotation_style", self.dataset_name) if annotation_style == "textcaps": self.dataset_class = TextCapsDataset dataset = super().load(config, *args, **kwargs) dataset.dataset_name = self.dataset_name return dataset
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/coco/builder.py
# Copyright (c) Facebook, Inc. and its affiliates. import torch from mmf.common.sample import Sample from mmf.datasets.builders.vqa2 import VQA2Dataset from mmf.utils.distributed import byte_tensor_to_object, object_to_byte_tensor class COCODataset(VQA2Dataset): def __init__(self, config, dataset_type, imdb_file_index, *args, **kwargs): super().__init__( config, dataset_type, imdb_file_index, dataset_name="coco", *args, **kwargs ) def preprocess_sample_info(self, sample_info): # COCO Annotation DBs have corrext feature_path if "COCO" not in sample_info["feature_path"]: sample_info["feature_path"] = sample_info["image_path"].replace( ".jpg", ".npy" ) return sample_info def load_item(self, idx): sample_info = self.annotation_db[idx] sample_info = self.preprocess_sample_info(sample_info) current_sample = Sample() if self._dataset_type != "test": text_processor_argument = {"tokens": sample_info["caption_tokens"]} processed_caption = self.text_processor(text_processor_argument) current_sample.text = processed_caption["text"] current_sample.caption_id = torch.tensor( sample_info["caption_id"], dtype=torch.int ) current_sample.caption_len = torch.tensor( len(sample_info["caption_tokens"]), dtype=torch.int ) current_sample.image_id = object_to_byte_tensor(sample_info["image_id"]) if self._use_features: features = self.features_db[idx] current_sample.update(features) else: image_path = str(sample_info["image_name"]) + ".jpg" current_sample.image = self.image_db.from_path(image_path)["images"][0] # Add reference captions to sample current_sample = self.add_reference_caption(sample_info, current_sample) return current_sample def add_reference_caption(self, sample_info, sample): reference_list = [] for reference in sample_info["reference_tokens"]: text_processor_argument = {"tokens": reference} processed_reference = self.text_processor(text_processor_argument) reference_list.append(processed_reference["text"]) # Restrict to minimum reference captions available per image sample.answers = torch.stack(reference_list)[: self.config.min_captions_per_img] return sample def format_for_prediction(self, report): captions = report.captions.tolist() predictions = [] remove_unk_from_caption_prediction = getattr( self.config, "remove_unk_from_caption_prediction", False ) for idx, image_id in enumerate(report.image_id): image_id = byte_tensor_to_object(image_id) caption = self.caption_processor(captions[idx])["caption"] if remove_unk_from_caption_prediction: caption = caption.replace("<unk>", "") caption = caption.replace(" ", " ").strip() if isinstance(image_id, torch.Tensor): image_id = image_id.item() predictions.append({"image_id": image_id, "caption": caption}) return predictions
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/coco/dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. import os from typing import Dict import torch import torch.nn.functional as F import torchvision from mmf.common.sample import Sample from mmf.datasets.base_dataset import BaseDataset from mmf.utils.distributed import gather_tensor_along_batch, object_to_byte_tensor from torch import nn, Tensor class DetectionCOCODataset(BaseDataset): def __init__(self, config, dataset_type, imdb_file_index, *args, **kwargs): if "name" in kwargs: name = kwargs["name"] elif "dataset_name" in kwargs: name = kwargs["dataset_name"] else: name = "detection_coco" super().__init__(name, config, dataset_type, *args, **kwargs) self.dataset_name = name image_dir = self.config.images[self._dataset_type][imdb_file_index] self.image_dir = os.path.join(self.config.data_dir, image_dir) coco_json = self.config.annotations[self._dataset_type][imdb_file_index] self.coco_json = os.path.join(self.config.data_dir, coco_json) self.coco_dataset = torchvision.datasets.CocoDetection( self.image_dir, self.coco_json ) self.postprocessors = {"bbox": PostProcess()} def __getitem__(self, idx): img, target = self.coco_dataset[idx] image_id = self.coco_dataset.ids[idx] target = {"image_id": image_id, "annotations": target} img, target = self._load_coco_annotations( img, target, load_attributes=self.config.load_attributes ) transform_out = self.detection_image_and_target_processor( {"img": img, "target": target, "dataset_type": self._dataset_type} ) img = transform_out["img"] target = transform_out["target"] current_sample = Sample() current_sample.image_id = torch.tensor(image_id, dtype=torch.long) current_sample.image = img current_sample.targets_enc = object_to_byte_tensor( target, max_size=self.config.max_target_enc_size ) current_sample.orig_size = target["orig_size"].clone().detach() return current_sample def __len__(self): return len(self.coco_dataset) def format_for_prediction(self, report): # gather detection output keys across processes pred_boxes = gather_tensor_along_batch(report.pred_boxes) pred_logits = gather_tensor_along_batch(report.pred_logits) orig_size = gather_tensor_along_batch(report.orig_size) outputs = {"pred_logits": pred_logits, "pred_boxes": pred_boxes} if hasattr(report, "attr_logits"): attr_logits = gather_tensor_along_batch(report.attr_logits) outputs["attr_logits"] = attr_logits image_ids = report.image_id.tolist() results = self.postprocessors["bbox"](outputs, orig_size) predictions = [] for image_id, r in zip(image_ids, results): scores = r["scores"].tolist() labels = r["labels"].tolist() # convert boxes from xyxy to xywh xmin, ymin, xmax, ymax = r["boxes"].unbind(1) boxes_xywh = torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1) boxes_xywh = boxes_xywh.tolist() # group the boxes by image_id for de-duplication in `on_prediction_end` # (duplication is introduced by DistributedSampler) predictions.append( ( image_id, [ { "image_id": image_id, "category_id": labels[k], "bbox": box_xywh, "score": scores[k], } for k, box_xywh in enumerate(boxes_xywh) ], ) ) if "attr_scores" in r: attr_scores = r["attr_scores"].tolist() attr_labels = r["attr_labels"].tolist() for k in range(len(boxes_xywh)): predictions[-1][1][k]["attr_score"] = attr_scores[k] predictions[-1][1][k]["attr_label"] = attr_labels[k] return predictions def on_prediction_end(self, predictions): # de-duplicate the predictions (duplication is introduced by DistributedSampler) prediction_dict = {image_id: entries for image_id, entries in predictions} unique_entries = [] for image_id in sorted(prediction_dict): unique_entries.extend(prediction_dict[image_id]) return unique_entries def _load_coco_annotations(self, image, target, load_attributes=False): w, h = image.size image_id = target["image_id"] image_id = torch.tensor([image_id]) anno = target["annotations"] anno = [obj for obj in anno if "iscrowd" not in obj or obj["iscrowd"] == 0] boxes = [obj["bbox"] for obj in anno] boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4) boxes[:, 2:] += boxes[:, :2] boxes[:, 0::2].clamp_(min=0, max=w) boxes[:, 1::2].clamp_(min=0, max=h) classes = [obj["category_id"] for obj in anno] classes = torch.tensor(classes, dtype=torch.int64) attributes = None if load_attributes: # load the attribute annotations in the Visual Genome dataset # following vqa-maskrcnn-benchmark, -1 will be used as ignore label # (https://gitlab.com/meetshah1995/vqa-maskrcnn-benchmark) MAX_ATTR_NUM = 16 attributes = -torch.ones(len(classes), MAX_ATTR_NUM, dtype=torch.int64) for n_obj, obj in enumerate(anno): attributes[n_obj] = torch.as_tensor( obj["attribute_ids_max16"], dtype=torch.int64 ) keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) boxes = boxes[keep] classes = classes[keep] if attributes is not None: attributes = attributes[keep] target = {} target["boxes"] = boxes target["orig_boxes"] = boxes target["labels"] = classes if attributes is not None: target["attributes"] = attributes target["image_id"] = image_id # for conversion to coco api area = torch.tensor([obj["area"] for obj in anno]) target["area"] = area[keep] target["orig_area"] = target["area"] iscrowd = torch.tensor([obj.get("iscrowd", 0) for obj in anno]) target["iscrowd"] = iscrowd[keep] target["orig_size"] = torch.as_tensor([int(h), int(w)]) target["size"] = torch.as_tensor([int(h), int(w)]) return image, target class PostProcess(nn.Module): # Mostly copy-pasted from # https://github.com/facebookresearch/detr/blob/master/models/detr.py @torch.no_grad() def forward(self, outputs: Dict[str, Tensor], target_sizes: Tensor): out_logits, out_bbox = outputs["pred_logits"], outputs["pred_boxes"] assert len(out_logits) == len(target_sizes) assert target_sizes.shape[1] == 2 prob = F.softmax(out_logits, -1) scores, labels = prob[..., :-1].max(-1) # convert to [x0, y0, x1, y1] format from mmf.utils.box_ops import box_cxcywh_to_xyxy boxes = box_cxcywh_to_xyxy(out_bbox) # and from relative [0, 1] to absolute [0, height] coordinates img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) boxes = boxes * scale_fct[:, None, :] results = [ {"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes) ] if "attr_logits" in outputs: assert len(outputs["attr_logits"]) == len(results) attr_scores, attr_labels = outputs["attr_logits"].max(-1) for idx, r in enumerate(results): r["attr_scores"] = attr_scores[idx] r["attr_labels"] = attr_labels[idx] return results
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/coco/detection_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. from mmf.common.registry import registry from mmf.datasets.builders.coco.detection_dataset import DetectionCOCODataset from mmf.datasets.mmf_dataset_builder import MMFDatasetBuilder @registry.register_builder("detection_coco") class DetectionCOCOBuilder(MMFDatasetBuilder): def __init__(self): super().__init__( dataset_name="detection_coco", dataset_class=DetectionCOCODataset ) @classmethod def config_path(cls): return "configs/datasets/coco/detection.yaml"
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/coco/detection_builder.py
import json import pandas as pd import torch from mmf.utils.file_io import PathManager class CaptionsDatabase(torch.utils.data.Dataset): """ Dataset for Flickr Annotations """ SPLITS = {"train": ["train"], "val": ["val"], "test": ["test"]} def __init__(self, config, splits_path, dataset_type: str, *args, **kwargs): super().__init__() self.config = config self.dataset_type = dataset_type self.splits = self.SPLITS[self.dataset_type] self._load_annotation_db(splits_path) def _load_annotation_db(self, splits_path): data = [] with PathManager.open(splits_path, "r") as f: annotations_json = json.load(f) for image in annotations_json["images"]: if image["split"] in self.splits: data.append( { "image_path": image["filename"], "sentences": [s["raw"] for s in image["sentences"]], } ) if len(data) == 0: raise RuntimeError("Dataset is empty") self.samples_factor = len(data[0]["sentences"]) self.data = data def __len__(self): return len(self.data) def __getitem__(self, idx): return self.data[idx] class COCOAnnotationDatabase(CaptionsDatabase): """ Dataset for COCO Annotations with extra 30K samples for training """ SPLITS = {"train": ["train", "restval"], "val": ["val"], "test": ["test"]} def _load_annotation_db(self, splits_path): data = [] with PathManager.open(splits_path, "r") as f: annotations_json = json.load(f) for image in annotations_json["images"]: if image["split"] in self.splits: image_path = image["filename"] # hard-fix for the extra images from val if image["split"] == "train": image_path = "../train2014/" + image_path elif image["split"] == "restval": image_path = "../val2014/" + image_path elif image["split"] == "val": image_path = "../val2014/" + image_path elif image["split"] == "test": image_path = "../val2014/" + image_path else: raise NotImplementedError data.append( { "image_path": image_path, # Cap for 5 captions "sentences": [s["raw"] for s in image["sentences"][:5]], } ) if len(data) == 0: raise RuntimeError("Dataset is empty") self.samples_factor = len(data[0]["sentences"]) self.data = data class ConceptualCaptionsDatabase(CaptionsDatabase): """ Dataset for conceptual caption database """ SPLITS = {"train": ["train"], "val": ["val"], "test": ["test"]} def _load_annotation_db(self, splits_path): df = pd.read_csv( splits_path, compression="gzip", sep="\t", names=["caption", "file"] ) self.data = df self.samples_factor = 1 if len(self.data) == 0: raise RuntimeError("Dataset is empty") def __getitem__(self, idx): df_i = self.data.iloc[idx] data = {"sentences": [df_i["caption"]], "image_path": df_i["file"]} return data
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/retrieval/datasets.py
# Copyright (c) Facebook, Inc. and its affiliates. __all__ = ["RetrievalDataset", "RetrievalBuilder"] from .builder import RetrievalBuilder from .dataset import RetrievalDataset
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/retrieval/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # from mmf.common.registry import registry from mmf.datasets.builders.retrieval.dataset import RetrievalDataset from mmf.datasets.mmf_dataset_builder import MMFDatasetBuilder @registry.register_builder("retrieval") class RetrievalBuilder(MMFDatasetBuilder): def __init__( self, dataset_name="retrieval", dataset_class=RetrievalDataset, *args, **kwargs ): super().__init__(dataset_name, dataset_class, *args, **kwargs) @classmethod def config_path(cls): return "config/datasets/retrieval/flickr30k_defaults.yaml"
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/retrieval/builder.py
import random import torch from mmf.common.sample import Sample, SampleList from mmf.common.typings import MMFDatasetConfigType from mmf.datasets.builders.retrieval.datasets import ( CaptionsDatabase, COCOAnnotationDatabase, ConceptualCaptionsDatabase, ) from mmf.datasets.mmf_dataset import MMFDataset ANNOTATIONS_DATABASE = { "flickr": CaptionsDatabase, "coco": COCOAnnotationDatabase, "cc": ConceptualCaptionsDatabase, } class RetrievalDataset(MMFDataset): def __init__( self, config: MMFDatasetConfigType, dataset_type: str, index: int, *args, **kwargs, ): self.annotation_class = config.get("annotations_parser", "flickr") super().__init__( "retrieval", config, dataset_type, index, ANNOTATIONS_DATABASE[self.annotation_class], *args, **kwargs, ) def init_processors(self): super().init_processors() # Assign transforms to the image_db if self._dataset_type == "train": self.image_db.transform = self.train_image_processor else: self.image_db.transform = self.eval_image_processor def _get_valid_text_attribute(self, sample_info): if "captions" in sample_info: return "captions" if "sentences" in sample_info: return "sentences" raise AttributeError("No valid text attribution was found") def __getitem__(self, idx): if self._dataset_type == "train": sample_info = self.annotation_db[idx] text_attr = self._get_valid_text_attribute(sample_info) current_sample = Sample() sentence = random.sample(sample_info[text_attr], 1)[0] processed_sentence = self.text_processor({"text": sentence}) current_sample.text = processed_sentence["text"] if "input_ids" in processed_sentence: current_sample.update(processed_sentence) current_sample.image = self.image_db[idx]["images"][0] current_sample.ann_idx = torch.tensor(idx, dtype=torch.long) else: sample_info = self.annotation_db[idx] text_attr = self._get_valid_text_attribute(sample_info) sample_list = [] for s_idx, sentence in enumerate(sample_info[text_attr]): sentence_sample = Sample() processed_sentence = self.text_processor({"text": sentence}) sentence_sample.raw_text = sentence sentence_sample.text = processed_sentence["text"] if "input_ids" in processed_sentence: sentence_sample.update(processed_sentence) sentence_sample.text_index = ( idx * self.annotation_db.samples_factor + s_idx ) sample_list.append(sentence_sample) current_sample = SampleList(sample_list) current_sample.image = self.image_db[idx]["images"][0] current_sample.image_path = self.annotation_db[idx]["image_path"] current_sample.image_index = idx current_sample.targets = None # Dummy for Loss return current_sample
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/retrieval/dataset.py
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/charades/__init__.py
from mmf.common.registry import registry from mmf.datasets.builders.charades.dataset import CharadesDataset from mmf.datasets.mmf_dataset_builder import MMFDatasetBuilder @registry.register_builder("charades") class CharadesBuilder(MMFDatasetBuilder): def __init__( self, dataset_name="charades", dataset_class=CharadesDataset, *args, **kwargs ): super().__init__(dataset_name) self.dataset_class = CharadesDataset @classmethod def config_path(cls): return "configs/datasets/charades/defaults.yaml"
EXA-1-master
exa/models/mmf-main/mmf/datasets/builders/charades/builder.py