python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
"""Type symbols class."""
import copy
import os
from typing import Dict, List, Optional, Set, Union
from tqdm.auto import tqdm
from bootleg.symbols.constants import edit_op
from bootleg.utils import utils
from bootleg.utils.classes.nested_vocab_tries import TwoLayerVocabularyScoreTrie
def _convert_to_trie(qid2typenames, max_types):
all_typenames = set()
qid2typenames_filt = {}
for q, typs in qid2typenames.items():
all_typenames.update(set(typs))
qid2typenames_filt[q] = typs[:max_types]
qid2typenames_trie = TwoLayerVocabularyScoreTrie(
input_dict=qid2typenames_filt,
vocabulary=all_typenames,
max_value=max_types,
)
return qid2typenames_trie
class TypeSymbols:
"""Type Symbols class for managing type metadata."""
def __init__(
self,
qid2typenames: Union[Dict[str, List[str]], TwoLayerVocabularyScoreTrie],
max_types: Optional[int] = 10,
edit_mode: Optional[bool] = False,
verbose: Optional[bool] = False,
):
"""Type Symbols initializer."""
if max_types <= 0:
raise ValueError("max_types must be greater than 0")
self.max_types = max_types
self.edit_mode = edit_mode
self.verbose = verbose
if self.edit_mode:
self._load_edit_mode(
qid2typenames,
)
else:
self._load_non_edit_mode(
qid2typenames,
)
def _load_edit_mode(
self, qid2typenames: Union[Dict[str, List[str]], TwoLayerVocabularyScoreTrie]
):
"""Load qid to type mappings in edit mode."""
if isinstance(qid2typenames, TwoLayerVocabularyScoreTrie):
self._qid2typenames: Union[
Dict[str, List[str]], TwoLayerVocabularyScoreTrie
] = qid2typenames.to_dict(keep_score=False)
else:
self._qid2typenames: Union[
Dict[str, List[str]], TwoLayerVocabularyScoreTrie
] = {q: typs[: self.max_types] for q, typs in qid2typenames.items()}
self._all_typenames: Union[Set[str], None] = set(
[t for typeset in self._qid2typenames.values() for t in typeset]
)
self._typename2qids: Union[Dict[str, set], None] = {}
for qid in tqdm(
self._qid2typenames,
total=len(self._qid2typenames),
desc="Building edit mode objs",
disable=not self.verbose,
):
for typname in self._qid2typenames[qid]:
if typname not in self._typename2qids:
self._typename2qids[typname] = set()
self._typename2qids[typname].add(qid)
# In case extra types in vocab without qids
for typname in self._all_typenames:
if typname not in self._typename2qids:
self._typename2qids[typname] = set()
def _load_non_edit_mode(
self, qid2typenames: Union[Dict[str, List[str]], TwoLayerVocabularyScoreTrie]
):
"""Load qid to type mappings in non edit mode (read only mode)."""
if isinstance(qid2typenames, dict):
self._qid2typenames: Union[
Dict[str, List[str]], TwoLayerVocabularyScoreTrie
] = _convert_to_trie(qid2typenames, self.max_types)
else:
self._qid2typenames: Union[
Dict[str, List[str]], TwoLayerVocabularyScoreTrie
] = qid2typenames
self._all_typenames: Union[Set[str], None] = None
self._typename2qids: Union[Dict[str, set], None] = None
def save(self, save_dir, prefix=""):
"""Dump the type symbols.
Args:
save_dir: directory string to save
prefix: prefix to add to beginning to file
"""
utils.ensure_dir(str(save_dir))
utils.dump_json_file(
filename=os.path.join(save_dir, "config.json"),
contents={
"max_types": self.max_types,
},
)
if isinstance(self._qid2typenames, dict):
qid2typenames = _convert_to_trie(self._qid2typenames, self.max_types)
qid2typenames.dump(os.path.join(save_dir, f"{prefix}qid2typenames"))
else:
self._qid2typenames.dump(os.path.join(save_dir, f"{prefix}qid2typenames"))
@classmethod
def load_from_cache(cls, load_dir, prefix="", edit_mode=False, verbose=False):
"""Load type symbols from load_dir.
Args:
load_dir: directory to load from
prefix: prefix to add to beginning to file
edit_mode: edit mode flag
verbose: verbose flag
Returns: TypeSymbols
"""
config = utils.load_json_file(filename=os.path.join(load_dir, "config.json"))
max_types = config["max_types"]
# For backwards compatibility, check if trie directory exists, otherwise load from json
type_load_dir = os.path.join(load_dir, f"{prefix}qid2typenames")
if not os.path.exists(type_load_dir):
qid2typenames: Union[
Dict[str, List[str]], TwoLayerVocabularyScoreTrie
] = utils.load_json_file(
filename=os.path.join(load_dir, f"{prefix}qid2typenames.json")
)
else:
qid2typenames: Union[
Dict[str, List[str]], TwoLayerVocabularyScoreTrie
] = TwoLayerVocabularyScoreTrie(load_dir=type_load_dir, max_value=max_types)
return cls(qid2typenames, max_types, edit_mode, verbose)
def get_all_types(self):
"""Return all typenames."""
if isinstance(self._qid2typenames, dict):
return self._all_typenames
else:
return set(self._qid2typenames.vocab_keys())
def get_types(self, qid):
"""Get the type names associated with the given QID.
Args:
qid: QID
Returns: list of typename strings
"""
if isinstance(self._qid2typenames, dict):
types = self._qid2typenames.get(qid, [])
else:
if self._qid2typenames.is_key_in_trie(qid):
# TwoLayerVocabularyScoreTrie assumes values are list of pairs - we only want type name which is first
types = self._qid2typenames.get_value(qid, keep_score=False)
else:
types = []
return types
def get_qid2typename_dict(self):
"""Return dictionary of qid to typenames.
Returns: Dict of QID to list of typenames.
"""
if isinstance(self._qid2typenames, dict):
return copy.deepcopy(self._qid2typenames)
else:
return self._qid2typenames.to_dict(keep_score=False)
# ============================================================
# EDIT MODE OPERATIONS
# ============================================================
@edit_op
def get_entities_of_type(self, typename):
"""Get all entity QIDs of type ``typename``.
Args:
typename: typename
Returns: List
"""
if typename not in self._all_typenames:
raise ValueError(f"{typename} is not a type in the typesystem")
# This will not be None as we are in edit mode
return self._typename2qids.get(typename, [])
@edit_op
def add_type(self, qid, typename):
"""Add the type to the QID.
If the QID already has maximum types, the
last type is removed and replaced by ``typename``.
Args:
qid: QID
typename: type name
"""
if typename not in self._all_typenames:
self._all_typenames.add(typename)
self._typename2qids[typename] = set()
# Update qid->type mappings
if typename not in self._qid2typenames[qid]:
# Remove last type if too many types
if len(self._qid2typenames[qid]) >= self.max_types:
type_to_remove = self._qid2typenames[qid][-1]
self.remove_type(qid, type_to_remove)
self._qid2typenames[qid].append(typename)
# As we are in edit mode, self._typename2qids will not be None
self._typename2qids[typename].add(qid)
return
@edit_op
def remove_type(self, qid, typename):
"""Remove the type from the QID.
Args:
qid: QID
typename: type name to remove
"""
if typename not in self._all_typenames:
raise ValueError(
f"The type {typename} is not in our vocab. We only support adding types in our vocab."
)
if typename not in self._qid2typenames[qid]:
return
assert (
typename in self._typename2qids
), f"Invalid state a typename is in self._typename2qids for {typename} and {qid}"
self._qid2typenames[qid].remove(typename)
# As we are in edit mode, self._typename2qids will not be None
# Further, we want to keep the typename even if list is empty as our type system doesn't change
self._typename2qids[typename].remove(qid)
return
@edit_op
def add_entity(self, qid, types):
"""
Add an entity QID with its types to our mappings.
Args:
qid: QID
types: list of type names
"""
for typename in types:
if typename not in self._all_typenames:
self._all_typenames.add(typename)
self._typename2qids[typename] = set()
# Add the qid to the qid dicts so we can call the add/remove functions
self._qid2typenames[qid] = []
for typename in types:
self._qid2typenames[qid].append(typename)
# Cutdown to max types
self._qid2typenames[qid] = self._qid2typenames[qid][: self.max_types]
# Add to typenames to qids
for typename in self._qid2typenames[qid]:
self._typename2qids[typename].add(qid)
return
@edit_op
def reidentify_entity(self, old_qid, new_qid):
"""Rename ``old_qid`` to ``new_qid``.
Args:
old_qid: old QID
new_qid: new QID
"""
assert (
old_qid in self._qid2typenames and new_qid not in self._qid2typenames
), f"Internal Error: checks on existing versus new qid for {old_qid} and {new_qid} failed"
# Update qid2typenames
self._qid2typenames[new_qid] = self._qid2typenames[old_qid]
del self._qid2typenames[old_qid]
# Update qid2typenames
for typename in self._qid2typenames[new_qid]:
self._typename2qids[typename].remove(old_qid)
self._typename2qids[typename].add(new_qid)
@edit_op
def prune_to_entities(self, entities_to_keep):
"""Remove all entities except those in ``entities_to_keep``.
Args:
entities_to_keep: Set of entities to keep
"""
# Update qid2typenames
self._qid2typenames = {
k: v for k, v in self._qid2typenames.items() if k in entities_to_keep
}
# Update qid2typenames, keeping the typenames even if empty lists
for typename in self._typename2qids:
self._typename2qids[typename] = self._typename2qids[typename].intersection(
entities_to_keep
)
| bootleg-master | bootleg/symbols/type_symbols.py |
"""Constants."""
import logging
import os
from distutils.util import strtobool
from functools import wraps
from bootleg import log_rank_0_info
logger = logging.getLogger(__name__)
USE_STRIP = strtobool(os.environ.get("BOOTLEG_STRIP", "true"))
USE_LOWER = strtobool(os.environ.get("BOOTLEG_LOWER", "true"))
LANG_CODE = os.environ.get("BOOTLEG_LANG_CODE", "en")
log_rank_0_info(
logger,
f"Setting BOOTLEG_STRIP to {USE_STRIP} and BOOTLEG_LOWER to {USE_LOWER} and BOOTLEG_LANG_CODE to {LANG_CODE}. "
f"Set these enviorn variables to change behavior.",
)
PAD = "<pad>"
UNK_ID = 0
PAD_ID = -1
CLS_BERT = "[CLS]"
SEP_BERT = "[SEP]"
PAD_BERT = "[PAD]"
BERT_WORD_DIM = 768
MAX_BERT_TOKEN_LEN = 512
SPECIAL_TOKENS = {
"additional_special_tokens": [
"[ent_start]",
"[ent_end]",
"[ent_desc]",
"[ent_kg]",
"[ent_type]",
]
}
FINAL_LOSS = "final_loss"
TRAIN_SPLIT = "train"
DEV_SPLIT = "dev"
TEST_SPLIT = "test"
# dataset keys
ANCHOR_KEY = "gold"
STOP_WORDS = {
"for",
"this",
"haven",
"her",
"are",
"s",
"don't",
"ll",
"isn't",
"been",
"themselves",
"it's",
"needn't",
"haven't",
"shouldn",
"ours",
"d",
"than",
"only",
"ma",
"me",
"after",
"which",
"under",
"then",
"both",
"as",
"can",
"yours",
"hers",
"their",
"hadn't",
"we",
"in",
"off",
"having",
"t",
"up",
"re",
"needn",
"she's",
"below",
"over",
"from",
"all",
"an",
"did",
"most",
"weren't",
"your",
"couldn",
"you've",
"because",
"same",
"didn",
"shouldn't",
"about",
"aren",
"myself",
"while",
"so",
"mightn't",
"very",
"what",
"aren't",
"other",
"won",
"or",
"should've",
"out",
"when",
"doesn",
"of",
"am",
"doing",
"nor",
"above",
"shan't",
"with",
"isn",
"that",
"is",
"yourself",
"him",
"had",
"those",
"just",
"more",
"ain",
"my",
"it",
"won't",
"you",
"yourselves",
"at",
"being",
"between",
"be",
"some",
"o",
"where",
"weren",
"has",
"will",
"wasn't",
"that'll",
"against",
"during",
"ve",
"wouldn't",
"herself",
"such",
"m",
"doesn't",
"itself",
"here",
"and",
"were",
"didn't",
"own",
"through",
"they",
"do",
"you'd",
"once",
"the",
"couldn't",
"hasn't",
"before",
"who",
"any",
"our",
"hadn",
"too",
"no",
"he",
"hasn",
"if",
"why",
"wouldn",
"its",
"on",
"mustn't",
"now",
"again",
"to",
"each",
"whom",
"i",
"by",
"have",
"how",
"theirs",
"not",
"don",
"but",
"there",
"shan",
"ourselves",
"until",
"down",
"mightn",
"wasn",
"few",
"mustn",
"his",
"y",
"you're",
"should",
"does",
"himself",
"was",
"you'll",
"them",
"these",
"she",
"into",
"further",
"a",
}
# profile constants/utils wrappers
def edit_op(func):
"""Edit op."""
@wraps(func)
def wrapper_check_edit_mode(obj, *args, **kwargs):
if obj.edit_mode is False:
raise AttributeError("You must load object in edit_mode=True")
return func(obj, *args, **kwargs)
return wrapper_check_edit_mode
def check_qid_exists(func):
"""Check QID exists."""
@wraps(func)
def wrapper_check_qid(obj, *args, **kwargs):
if len(args) > 0:
qid = args[0]
else:
qid = kwargs["qid"]
if not obj._entity_symbols.qid_exists(qid):
raise ValueError(f"The entity {qid} is not in our dump")
return func(obj, *args, **kwargs)
return wrapper_check_qid
| bootleg-master | bootleg/symbols/constants.py |
"""Symbols init."""
| bootleg-master | bootleg/symbols/__init__.py |
"""Entity symbols."""
import copy
import logging
import os
from typing import Callable, Dict, Optional, Union
from tqdm.auto import tqdm
import bootleg.utils.utils as utils
from bootleg.symbols.constants import edit_op
from bootleg.utils.classes.nested_vocab_tries import (
TwoLayerVocabularyScoreTrie,
VocabularyTrie,
)
logger = logging.getLogger(__name__)
class EntitySymbols:
"""Entity Symbols class for managing entity metadata."""
def __init__(
self,
alias2qids: Union[Dict[str, list], TwoLayerVocabularyScoreTrie],
qid2title: Dict[str, str],
qid2desc: Union[Dict[str, str]] = None,
qid2eid: Optional[VocabularyTrie] = None,
alias2id: Optional[VocabularyTrie] = None,
max_candidates: int = 30,
alias_cand_map_dir: str = "alias2qids",
alias_idx_dir: str = "alias2id",
edit_mode: Optional[bool] = False,
verbose: Optional[bool] = False,
):
"""Entity symbols initializer."""
# We support different candidate mappings for the same set of entities
self.alias_cand_map_dir = alias_cand_map_dir
self.alias_idx_dir = alias_idx_dir
self.max_candidates = max_candidates
self.edit_mode = edit_mode
self.verbose = verbose
if qid2eid is None:
# +1 as 0 is reserved for not-in-cand list entity
qid2eid = {q: i + 1 for i, q in enumerate(qid2title.keys())}
if alias2id is None:
alias2id = {a: i for i, a in enumerate(alias2qids.keys())}
# If edit mode is ON, we must load everything as a dictionary
if self.edit_mode:
self._load_edit_mode(
alias2qids,
qid2title,
qid2desc,
qid2eid,
alias2id,
)
else:
self._load_non_edit_mode(
alias2qids,
qid2title,
qid2desc,
qid2eid,
alias2id,
)
# This assumes that eid of 0 is NO_CAND and eid of -1 is NULL entity; neither are in dict
self.num_entities = len(self._qid2eid)
self.num_entities_with_pad_and_nocand = self.num_entities + 2
def _load_edit_mode(
self,
alias2qids: Union[Dict[str, list], TwoLayerVocabularyScoreTrie],
qid2title: Dict[str, str],
qid2desc: Union[Dict[str, str]],
qid2eid: Union[Dict[str, int], VocabularyTrie],
alias2id: Union[Dict[str, int], VocabularyTrie],
):
"""Load in edit mode.
Loading in edit mode requires all inputs be cast to dictionaries. Tries do not allow value changes.
"""
# Convert to dict for editing
if isinstance(alias2qids, TwoLayerVocabularyScoreTrie):
alias2qids = alias2qids.to_dict()
self._alias2qids: Union[
Dict[str, list], TwoLayerVocabularyScoreTrie
] = alias2qids
self._qid2title: Dict[str, str] = qid2title
self._qid2desc: Dict[str, str] = qid2desc
# Sort by score and filter to max candidates
self._sort_alias_cands(
self._alias2qids, truncate=True, max_cands=self.max_candidates
)
# Cast to dicts in edit mode
if isinstance(qid2eid, VocabularyTrie):
self._qid2eid: Union[Dict[str, int], VocabularyTrie] = qid2eid.to_dict()
else:
self._qid2eid: Union[Dict[str, int], VocabularyTrie] = qid2eid
if isinstance(alias2id, VocabularyTrie):
self._alias2id: Union[Dict[str, int], VocabularyTrie] = alias2id.to_dict()
else:
self._alias2id: Union[Dict[str, int], VocabularyTrie] = alias2id
# Generate reverse indexes for fast editing
self._id2alias: Union[Dict[int, str], Callable[[int], str]] = {
id: al for al, id in self._alias2id.items()
}
self._eid2qid: Union[Dict[int, str], Callable[[int], str]] = {
eid: qid for qid, eid in self._qid2eid.items()
}
self._qid2aliases: Union[Dict[str, set], None] = {}
for al in tqdm(
self._alias2qids,
total=len(self._alias2qids),
desc="Building edit mode objs",
disable=not self.verbose,
):
for qid_pair in self._alias2qids[al]:
if qid_pair[0] not in self._qid2aliases:
self._qid2aliases[qid_pair[0]] = set()
self._qid2aliases[qid_pair[0]].add(al)
assert len(self._qid2eid) == len(self._eid2qid), (
"The qid2eid mapping is not invertable. "
"This means there is a duplicate id value."
)
assert -1 not in self._eid2qid, "-1 can't be an eid"
assert (
0 not in self._eid2qid
), "0 can't be an eid. It's reserved for null candidate"
# For when we need to add new entities
self.max_eid = max(self._eid2qid.keys())
self.max_alid = max(self._id2alias.keys())
def _load_non_edit_mode(
self,
alias2qids: Union[Dict[str, list], TwoLayerVocabularyScoreTrie],
qid2title: Dict[str, str],
qid2desc: Union[Dict[str, str]],
qid2eid: Optional[VocabularyTrie],
alias2id: Optional[VocabularyTrie],
):
"""Load items in read-only Trie mode."""
# Convert to record trie
if isinstance(alias2qids, dict):
self._sort_alias_cands(
alias2qids, truncate=True, max_cands=self.max_candidates
)
alias2qids = TwoLayerVocabularyScoreTrie(
input_dict=alias2qids,
vocabulary=qid2title,
max_value=self.max_candidates,
)
self._alias2qids: Union[
Dict[str, list], TwoLayerVocabularyScoreTrie
] = alias2qids
self._qid2title: Dict[str, str] = qid2title
self._qid2desc: Dict[str, str] = qid2desc
# Convert to Tries for non edit mode
if isinstance(qid2eid, dict):
self._qid2eid: Union[Dict[str, int], VocabularyTrie] = VocabularyTrie(
input_dict=qid2eid
)
else:
self._qid2eid: Union[Dict[str, int], VocabularyTrie] = qid2eid
if isinstance(alias2id, dict):
self._alias2id: Union[Dict[str, int], VocabularyTrie] = VocabularyTrie(
input_dict=alias2id
)
else:
self._alias2id: Union[Dict[str, int], VocabularyTrie] = alias2id
# Make reverse functions for each of use
self._id2alias: Union[
Dict[int, str], Callable[[int], str]
] = lambda x: self._alias2id.get_key(x)
self._eid2qid: Union[
Dict[int, str], Callable[[int], str]
] = lambda x: self._qid2eid.get_key(x)
self._qid2aliases: Union[Dict[str, set], None] = None
assert not self._qid2eid.is_value_in_trie(
0
), "0 can't be an eid. It's reserved for null candidate"
# For when we need to add new entities
self.max_eid = self._qid2eid.get_max_id()
self.max_alid = self._alias2id.get_max_id()
def save(self, save_dir):
"""Dump the entity symbols.
Args:
save_dir: directory string to save
"""
utils.ensure_dir(save_dir)
utils.dump_json_file(
filename=os.path.join(save_dir, "config.json"),
contents={
"max_candidates": self.max_candidates,
},
)
# If in edit mode, must convert back to tris for saving
if isinstance(self._alias2qids, dict):
alias2qids = TwoLayerVocabularyScoreTrie(
input_dict=self._alias2qids,
vocabulary=self._qid2title,
max_value=self.max_candidates,
)
alias2qids.dump(os.path.join(save_dir, self.alias_cand_map_dir))
else:
self._alias2qids.dump(os.path.join(save_dir, self.alias_cand_map_dir))
if isinstance(self._alias2id, dict):
alias2id = VocabularyTrie(input_dict=self._alias2id)
alias2id.dump(os.path.join(save_dir, self.alias_idx_dir))
else:
self._alias2id.dump(os.path.join(save_dir, self.alias_idx_dir))
if isinstance(self._qid2eid, dict):
qid2eid = VocabularyTrie(input_dict=self._qid2eid)
qid2eid.dump(os.path.join(save_dir, "qid2eid"))
else:
self._qid2eid.dump(os.path.join(save_dir, "qid2eid"))
utils.dump_json_file(
filename=os.path.join(save_dir, "qid2title.json"), contents=self._qid2title
)
if self._qid2desc is not None:
utils.dump_json_file(
filename=os.path.join(save_dir, "qid2desc.json"),
contents=self._qid2desc,
)
@classmethod
def load_from_cache(
cls,
load_dir,
alias_cand_map_dir="alias2qids",
alias_idx_dir="alias2id",
edit_mode=False,
verbose=False,
):
"""Load entity symbols from load_dir.
Args:
load_dir: directory to load from
alias_cand_map_dir: alias2qid directory
alias_idx_dir: alias2id directory
edit_mode: edit mode flag
verbose: verbose flag
"""
config = utils.load_json_file(filename=os.path.join(load_dir, "config.json"))
max_candidates = config["max_candidates"]
# For backwards compatibility, check if folder exists - if not, load from json
# Future versions will assume folders exist
alias_load_dir = os.path.join(load_dir, alias_cand_map_dir)
if not os.path.exists(alias_load_dir):
alias2qids: Dict[str, list] = utils.load_json_file(
filename=os.path.join(load_dir, "alias2qids.json")
)
else:
alias2qids: TwoLayerVocabularyScoreTrie = TwoLayerVocabularyScoreTrie(
load_dir=alias_load_dir
)
alias_id_load_dir = os.path.join(load_dir, alias_idx_dir)
alias2id = None
if os.path.exists(alias_id_load_dir):
alias2id: VocabularyTrie = VocabularyTrie(load_dir=alias_id_load_dir)
eid_load_dir = os.path.join(load_dir, "qid2eid")
qid2eid = None
if os.path.exists(eid_load_dir):
qid2eid: VocabularyTrie = VocabularyTrie(load_dir=eid_load_dir)
qid2title: Dict[str, str] = utils.load_json_file(
filename=os.path.join(load_dir, "qid2title.json")
)
qid2desc = None
if os.path.exists(os.path.join(load_dir, "qid2desc.json")):
qid2desc: Dict[str, str] = utils.load_json_file(
filename=os.path.join(load_dir, "qid2desc.json")
)
return cls(
alias2qids,
qid2title,
qid2desc,
qid2eid,
alias2id,
max_candidates,
alias_cand_map_dir,
alias_idx_dir,
edit_mode,
verbose,
)
def _sort_alias_cands(
self, alias2qids: Dict[str, list], truncate: bool = False, max_cands: int = 30
):
"""Sort the candidates for each alias from largest to smallest score, truncating if desired."""
for alias in alias2qids:
# Add second key for determinism in case of same counts
alias2qids[alias] = sorted(
alias2qids[alias], key=lambda x: (x[1], x[0]), reverse=True
)
if truncate:
alias2qids[alias] = alias2qids[alias][:max_cands]
return alias2qids
def get_qid2eid_dict(self):
"""
Get the qid2eid mapping.
Returns: Dict qid2eid mapping
"""
if isinstance(self._qid2eid, dict):
return copy.deepcopy(self._qid2eid)
else:
return self._qid2eid.to_dict()
def get_alias2qids_dict(self):
"""
Get the alias2qids mapping.
Key is alias, value is list of candidate tuple of length two of [QID, sort_value].
Returns: Dict alias2qids mapping
"""
if isinstance(self._alias2qids, dict):
return copy.deepcopy(self._alias2qids)
else:
return self._alias2qids.to_dict()
def get_qid2title_dict(self):
"""
Get the qid2title mapping.
Returns: Dict qid2title mapping
"""
return copy.deepcopy(self._qid2title)
def get_all_alias_vocabtrie(self):
"""
Get a trie of all aliases.
Returns: Vocab trie of all aliases.
"""
if isinstance(self._alias2id, VocabularyTrie):
return self._alias2id
else:
return VocabularyTrie(input_dict=self._alias2id)
def get_all_qids(self):
"""
Get all QIDs.
Returns: Dict_keys of all QIDs
"""
return self._qid2eid.keys()
def get_all_aliases(self):
"""
Get all aliases.
Returns: Dict_keys of all aliases
"""
return self._alias2qids.keys()
def get_all_titles(self):
"""
Get all QID titles.
Returns: Dict_values of all titles
"""
return self._qid2title.values()
def get_qid(self, id):
"""Get the QID associated with EID.
Args:
id: EID
Returns: QID string
"""
if isinstance(self._eid2qid, dict):
return self._eid2qid[id]
else:
return self._eid2qid(id)
def alias_exists(self, alias):
"""Check alias existance.
Args:
alias: alias string
Returns: boolean
"""
if isinstance(self._alias2qids, dict):
return alias in self._alias2id
else:
return self._alias2qids.is_key_in_trie(alias)
def qid_exists(self, qid):
"""Check QID existance.
Args:
alias: QID string
Returns: boolean
"""
if isinstance(self._qid2eid, dict):
return qid in self._qid2eid
else:
return self._qid2eid.is_key_in_trie(qid)
def get_eid(self, id):
"""Get the QID for the EID.
Args:
id: EID int
Returns: QID string
"""
return self._qid2eid[id]
def _get_qid_pairs(self, alias):
"""Get the qid pairs for an alias.
Args:
alias: alias
Returns: List of QID pairs
"""
if isinstance(self._alias2qids, dict):
qid_pairs = self._alias2qids[alias]
else:
qid_pairs = self._alias2qids.get_value(alias)
return qid_pairs
def get_qid_cands(self, alias, max_cand_pad=False):
"""Get the QID candidates for an alias.
Args:
alias: alias
max_cand_pad: whether to pad with '-1' or not if fewer than max_candidates candidates
Returns: List of QID strings
"""
qid_pairs = self._get_qid_pairs(alias)
res = [qid_pair[0] for qid_pair in qid_pairs]
if max_cand_pad:
res = res + ["-1"] * (self.max_candidates - len(res))
return res
def get_qid_count_cands(self, alias, max_cand_pad=False):
"""Get the [QID, sort_value] candidates for an alias.
Args:
alias: alias
max_cand_pad: whether to pad with ['-1',-1] or not if fewer than max_candidates candidates
Returns: List of [QID, sort_value]
"""
qid_pairs = self._get_qid_pairs(alias)
res = qid_pairs
if max_cand_pad:
res = res + ["-1", -1] * (self.max_candidates - len(res))
return res
def get_eid_cands(self, alias, max_cand_pad=False):
"""Get the EID candidates for an alias.
Args:
alias: alias
max_cand_pad: whether to pad with -1 or not if fewer than max_candidates candidates
Returns: List of EID ints
"""
qid_pairs = self._get_qid_pairs(alias)
res = [self._qid2eid[qid_pair[0]] for qid_pair in qid_pairs]
if max_cand_pad:
res = res + [-1] * (self.max_candidates - len(res))
return res
def get_title(self, id):
"""Get title for QID.
Args:
id: QID string
Returns: title string
"""
return self._qid2title[id]
def get_desc(self, id):
"""Get description for QID.
Args:
id: QID string
Returns: title string
"""
if self._qid2desc is None:
return ""
return self._qid2desc.get(id, "")
def get_alias_idx(self, alias):
"""Get the numeric index of an alias.
Args:
alias: alias
Returns: integer representation of alias
"""
return self._alias2id[alias]
def get_alias_from_idx(self, alias_idx):
"""Get the alias from the numeric index.
Args:
alias_idx: alias numeric index
Returns: alias string
"""
if isinstance(self._id2alias, dict):
alias = self._id2alias[alias_idx]
else:
alias = self._id2alias(alias_idx)
return alias
# ============================================================
# EDIT MODE OPERATIONS
# ============================================================
@edit_op
def set_title(self, qid: str, title: str):
"""Set the title for a QID.
Args:
qid: QID
title: title
"""
assert qid in self._qid2eid
self._qid2title[qid] = title
@edit_op
def set_desc(self, qid: str, desc: str):
"""Set the description for a QID.
Args:
qid: QID
desc: description
"""
assert qid in self._qid2eid
self._qid2desc[qid] = desc
@edit_op
def set_score(self, qid: str, mention: str, score: float):
"""Change the mention QID score and resorts candidates.
Highest score is first.
Args:
qid: QID
mention: mention
score: score
"""
if mention not in self._alias2qids:
raise ValueError(f"The mention {mention} is not in our mapping")
qids_only = list(map(lambda x: x[0], self._alias2qids[mention]))
if qid not in set(qids_only):
raise ValueError(
f"The qid {qid} is not already associated with that mention."
)
qid_idx = qids_only.index(qid)
assert self._alias2qids[mention][qid_idx][0] == qid
self._alias2qids[mention][qid_idx][1] = score
self._alias2qids[mention] = sorted(
self._alias2qids[mention], key=lambda x: x[1], reverse=True
)
return
@edit_op
def add_mention(self, qid: str, mention: str, score: float):
"""Add mention to QID with the associated score.
The mention already exists, error thrown to call ``set_score`` instead.
If there are already max candidates to that mention, the last candidate of the
mention is removed in place of QID.
Args:
qid: QID
mention: mention
score: score
"""
# Cast to lower and stripped for aliases
mention = utils.get_lnrm(mention)
# If mention is in mapping, make sure the qid is not
if mention in self._alias2qids:
if qid in set(map(lambda x: x[0], self._alias2qids[mention])):
logger.warning(
f"The QID {qid} is already associated with {mention}. Use set_score if you want to change "
f"the score of an existing mention-qid pair"
)
return
# If mention is not in mapping, add it
if mention not in self._alias2qids:
self._alias2qids[mention] = []
new_al_id = self.max_alid + 1
self.max_alid += 1
assert (
new_al_id not in self._id2alias
), f"{new_al_id} already in self_id2alias"
self._alias2id[mention] = new_al_id
self._id2alias[new_al_id] = mention
# msg = f"You have added a new mention to the dataset. You MUST reprep you data for this to take effect.
# Set data_config.overwrite_preprocessed_data to be True. This warning will now be supressed."
# logger.warning(msg)
# warnings.filterwarnings("ignore", message=msg)
assert (
mention not in self._qid2aliases[qid]
), f"{mention} was a mention for {qid} despite the alias mapping saying otherwise"
# If adding will go beyond max candidates, remove the last candidate. Even if the score is higher,
# the user still wants this mention added.
if len(self._alias2qids[mention]) >= self.max_candidates:
qid_to_remove = self._alias2qids[mention][-1][0]
self.remove_mention(qid_to_remove, mention)
assert (
len(self._alias2qids[mention]) < self.max_candidates
), f"Invalid state: {mention} still has more than {self.max_candidates} candidates after removal"
# Add pair
self._alias2qids[mention].append([qid, score])
self._alias2qids[mention] = sorted(
self._alias2qids[mention], key=lambda x: x[1], reverse=True
)
self._qid2aliases[qid].add(mention)
@edit_op
def remove_mention(self, qid, mention):
"""Remove the mention from those associated with the QID.
Args:
qid: QID
mention: mention to remove
"""
# Make sure the mention and qid pair is already in the mapping
if mention not in self._alias2qids:
return
qids_only = list(map(lambda x: x[0], self._alias2qids[mention]))
if qid not in set(qids_only):
return
# Remove the QID
idx_to_remove = qids_only.index(qid)
self._alias2qids[mention].pop(idx_to_remove)
# If the mention has NO candidates, remove it as a possible mention
if len(self._alias2qids[mention]) == 0:
del self._alias2qids[mention]
al_id = self._alias2id[mention]
del self._alias2id[mention]
del self._id2alias[al_id]
assert (
mention not in self._alias2qids and mention not in self._alias2id
), f"Removal of no candidates mention {mention} failed"
# msg = f"You have removed all candidates for an existing mention, which will now be removed.
# You MUST reprep you data for this to take effect. Set data_config.overwrite_preprocessed_data to be
# True. This warning will now be supressed."
# logger.warning(msg)
# warnings.filterwarnings("ignore", message=msg)
# Remove mention from inverse mapping (will be not None in edit mode)
assert (
mention in self._qid2aliases[qid]
), f"{mention} was not a mention for {qid} despite the reverse being true"
self._qid2aliases[qid].remove(mention)
return
@edit_op
def add_entity(self, qid, mentions, title, desc=""):
"""Add entity QID to our mappings with its mentions and title.
Args:
qid: QID
mentions: List of tuples [mention, score]
title: title
desc: description
"""
assert (
qid not in self._qid2eid
), "Something went wrong with the qid check that this entity doesn't exist"
# Update eid
new_eid = self.max_eid + 1
assert new_eid not in self._eid2qid
self._qid2eid[qid] = new_eid
self._eid2qid[new_eid] = qid
# Update title
self._qid2title[qid] = title
# Update description
self._qid2desc[qid] = desc
# Make empty list to add in add_mention
self._qid2aliases[qid] = set()
# Update mentions
for mention_pair in mentions:
self.add_mention(qid, mention_pair[0], mention_pair[1])
# Update metrics at the end in case of failure
self.max_eid += 1
self.num_entities += 1
self.num_entities_with_pad_and_nocand += 1
@edit_op
def reidentify_entity(self, old_qid, new_qid):
"""Rename ``old_qid`` to ``new_qid``.
Args:
old_qid: old QID
new_qid: new QID
"""
assert (
old_qid in self._qid2eid and new_qid not in self._qid2eid
), f"Internal Error: checks on existing versus new qid for {old_qid} and {new_qid} failed"
# Save state
eid = self._qid2eid[old_qid]
mentions = self.get_mentions(old_qid)
# Update qid2eid
self._qid2eid[new_qid] = self._qid2eid[old_qid]
del self._qid2eid[old_qid]
# Reassign eid
self._eid2qid[eid] = new_qid
# Update qid2title
self._qid2title[new_qid] = self._qid2title[old_qid]
del self._qid2title[old_qid]
# Update qid2desc
self._qid2desc[new_qid] = self.get_desc(old_qid)
del self._qid2desc[old_qid]
# Update qid2aliases
self._qid2aliases[new_qid] = self._qid2aliases[old_qid]
del self._qid2aliases[old_qid]
# Update alias2qids
for mention in mentions:
for i in range(len(self._alias2qids[mention])):
if self._alias2qids[mention][i][0] == old_qid:
self._alias2qids[mention][i][0] = new_qid
break
@edit_op
def prune_to_entities(self, entities_to_keep):
"""Remove all entities except those in ``entities_to_keep``.
Args:
entities_to_keep: Set of entities to keep
"""
# Update qid based dictionaries
self._qid2title = {
k: v for k, v in self._qid2title.items() if k in entities_to_keep
}
if self._qid2desc is not None:
self._qid2desc = {
k: v for k, v in self._qid2desc.items() if k in entities_to_keep
}
self._qid2aliases = {
k: v for k, v in self._qid2aliases.items() if k in entities_to_keep
}
# Reindex the entities to compress the embedding matrix (when model is update)
self._qid2eid = {k: i + 1 for i, k in enumerate(sorted(entities_to_keep))}
self._eid2qid = {eid: qid for qid, eid in self._qid2eid.items()}
# Extract mentions to keep
mentions_to_keep = set().union(*self._qid2aliases.values())
# Reindex aliases
self._alias2id = {v: i for i, v in enumerate(sorted(mentions_to_keep))}
self._id2alias = {id: al for al, id in self._alias2id.items()}
# Rebuild self._alias2qids
new_alias2qids = {}
for al in mentions_to_keep:
new_alias2qids[al] = [
pair for pair in self._alias2qids[al] if pair[0] in entities_to_keep
][: self.max_candidates]
assert len(new_alias2qids[al]) > 0
self._alias2qids = new_alias2qids
self.num_entities = len(self._qid2eid)
self.num_entities_with_pad_and_nocand = self.num_entities + 2
assert self.num_entities == len(entities_to_keep)
# For when we need to add new entities
self.max_eid = max(self._eid2qid.keys())
self.max_alid = max(self._id2alias.keys())
@edit_op
def get_mentions(self, qid):
"""Get the mentions for the QID.
Args:
qid: QID
Returns: List of mentions
"""
# qid2aliases is only created in edit mode to allow for removal of mentions associated with a qid
return self._qid2aliases[qid]
@edit_op
def get_mentions_with_scores(self, qid):
"""Get the mentions and the associated score for the QID.
Args:
qid: QID
Returns: List of tuples [mention, score]
"""
mentions = self._qid2aliases[qid]
res = []
for men in mentions:
for qid_pair in self._alias2qids[men]:
if qid_pair[0] == qid:
res.append([men, qid_pair[1]])
break
return list(sorted(res, key=lambda x: x[1], reverse=True))
| bootleg-master | bootleg/symbols/entity_symbols.py |
"""Bootleg slice dataset."""
import hashlib
import logging
import multiprocessing
import os
import shutil
import time
import traceback
from collections import defaultdict
import numpy as np
import ujson
from tqdm.auto import tqdm
from bootleg import log_rank_0_debug, log_rank_0_info
from bootleg.symbols.constants import ANCHOR_KEY, FINAL_LOSS
from bootleg.utils import data_utils, utils
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example."""
def __init__(self, sent_idx, subslice_idx, anchor, num_alias2pred, slices):
"""Input example initializer."""
self.sent_idx = sent_idx
self.subslice_idx = subslice_idx
self.anchor = anchor
self.num_alias2pred = num_alias2pred
self.slices = slices
def to_dict(self):
"""Turn object to dictionary."""
return {
"sent_idx": self.sent_idx,
"subslice_idx": self.subslice_idx,
"anchor": self.anchor,
"num_alias2pred": self.num_alias2pred,
"slices": self.slices,
}
@classmethod
def from_dict(cls, in_dict):
"""Create object from dictionary."""
return cls(
in_dict["sent_idx"],
in_dict["subslice_idx"],
in_dict["anchor"],
in_dict["num_alias2pred"],
in_dict["slices"],
)
def __repr__(self):
"""Repr."""
return (
f"Sent: {self.sent_idx} Subsent: {self.subslice_idx} Anchors: {self.anchor} "
f"Num Alias2Pred: {self.num_alias2pred} Slices: {self.slices}"
)
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, sent_idx, subslice_idx, alias_slice_incidence, alias2pred_probs):
"""Input feature initializer."""
self.sent_idx = sent_idx
self.subslice_idx = subslice_idx
self.alias_slice_incidence = alias_slice_incidence
self.alias2pred_probs = alias2pred_probs
def to_dict(self):
"""Object to dictionary."""
return {
"sent_idx": self.sent_idx,
"subslice_idx": self.subslice_idx,
"alias_slice_incidence": self.alias_slice_incidence,
"alias2pred_probs": self.alias2pred_probs,
}
@classmethod
def from_dict(cls, in_dict):
"""Create object from dictionary."""
return cls(
in_dict["sent_idx"],
in_dict["subslice_idx"],
in_dict["alias_slice_incidence"],
in_dict["alias2pred_probs"],
)
def get_slice_values(slice_names, line):
"""
Results a dictionary of all slice values for an input example.
Any mention with a slice value of > 0.5 gets assigned that slice. If some
slices are missing from the input, we assign all mentions as not being in
that slice (getting a 0 label value). We also check that slices are
formatted correctly.
Args:
slice_names: slice names to evaluate on
line: input data json line
Returns: Dict of slice name to alias index string to float value of if mention is in a slice or not.
"""
slices = {}
if "slices" in line:
assert type(line["slices"]) is dict
aliases = line["aliases"]
slices = line["slices"]
# remove slices that we don't need
for slice_name in list(slices.keys()):
if slice_name not in slice_names:
del slices[slice_name]
else:
assert len(slices[slice_name]) == len(
aliases
), "Must have a prob label for each mention"
# FINAL_LOSS and BASE_SLICE are in slice_names but are generated by us so we do not want them to be in slices
assert (
FINAL_LOSS not in slices
), f"You can't have {FINAL_LOSS} be slice names. You have {slices.keys()}. {FINAL_LOSS} is used internally."
for slice_name in slice_names:
if slice_name in [FINAL_LOSS]:
continue
# Add slices that are empty
if slice_name not in slices:
slices[slice_name] = {str(i): 0.0 for i in range(len(aliases))}
return slices
def create_examples_initializer(
data_config, slice_names, use_weak_label, split, train_in_candidates
):
"""Create example multiprocessing initialiezr."""
global constants_global
constants_global = {
"slice_names": slice_names,
"use_weak_label": use_weak_label,
"split": split,
"train_in_candidates": train_in_candidates,
}
def create_examples(
dataset,
create_ex_indir,
create_ex_outdir,
meta_file,
data_config,
dataset_threads,
slice_names,
use_weak_label,
split,
):
"""Create examples from the raw input data.
Args:
dataset: dataset file
create_ex_indir: temporary directory where input files are stored
create_ex_outdir: temporary directory to store output files from method
meta_file: metadata file to save the file names/paths for the next step in prep pipeline
data_config: data config
dataset_threads: number of threads
slice_names: list of slices to evaluate on
use_weak_label: whether to use weak labeling or not
split: data split
"""
log_rank_0_debug(logger, "Starting to extract subsentences")
start = time.time()
num_processes = min(dataset_threads, int(0.8 * multiprocessing.cpu_count()))
log_rank_0_debug(logger, "Counting lines")
total_input = sum(1 for _ in open(dataset))
if num_processes == 1:
out_file_name = os.path.join(create_ex_outdir, os.path.basename(dataset))
constants_dict = {
"slice_names": slice_names,
"use_weak_label": use_weak_label,
"split": split,
"train_in_candidates": data_config.train_in_candidates,
}
files_and_counts = {}
res = create_examples_single(
dataset, total_input, out_file_name, constants_dict
)
total_output = res["total_lines"]
max_alias2pred = res["max_alias2pred"]
files_and_counts[res["output_filename"]] = res["total_lines"]
else:
log_rank_0_info(
logger, f"Strating to extract examples with {num_processes} threads"
)
log_rank_0_debug(
logger, "Parallelizing with " + str(num_processes) + " threads."
)
chunk_input = int(np.ceil(total_input / num_processes))
log_rank_0_debug(
logger,
f"Chunking up {total_input} lines into subfiles of size {chunk_input} lines",
)
total_input_from_chunks, input_files_dict = utils.chunk_file(
dataset, create_ex_indir, chunk_input
)
input_files = list(input_files_dict.keys())
input_file_lines = [input_files_dict[k] for k in input_files]
output_files = [
in_file_name.replace(create_ex_indir, create_ex_outdir)
for in_file_name in input_files
]
assert (
total_input == total_input_from_chunks
), f"Lengths of files {total_input} doesn't mathc {total_input_from_chunks}"
log_rank_0_debug(logger, "Done chunking files")
pool = multiprocessing.Pool(
processes=num_processes,
initializer=create_examples_initializer,
initargs=[
data_config,
slice_names,
use_weak_label,
split,
data_config.train_in_candidates,
],
)
total_output = 0
max_alias2pred = 0
input_args = list(zip(input_files, input_file_lines, output_files))
# Store output files and counts for saving in next step
files_and_counts = {}
for res in pool.imap_unordered(create_examples_hlp, input_args, chunksize=1):
total_output += res["total_lines"]
max_alias2pred = max(max_alias2pred, res["max_alias2pred"])
files_and_counts[res["output_filename"]] = res["total_lines"]
pool.close()
pool.join()
utils.dump_json_file(
meta_file,
{
"num_mentions": total_output,
"files_and_counts": files_and_counts,
"max_alias2pred": max_alias2pred,
},
)
log_rank_0_debug(
logger,
f"Done with extracting examples in {time.time()-start}. Total lines seen {total_input}. "
f"Total lines kept {total_output}.",
)
return
def create_examples_hlp(args):
"""Create examples wrapper helper."""
in_file_name, in_file_lines, out_file_name = args
return create_examples_single(
in_file_name, in_file_lines, out_file_name, constants_global
)
def create_examples_single(in_file_name, in_file_lines, out_file_name, constants_dict):
"""Create examples multiprocessing helper."""
split = constants_dict["split"]
use_weak_label = constants_dict["use_weak_label"]
slice_names = constants_dict["slice_names"]
with open(out_file_name, "w") as out_f:
total_subsents = 0
# The memmap stores things differently when you have two integers and we want to keep a2p as an array
# Therefore for force max the minimum max_a2p to be 2
max_a2pred = 2
for ex in tqdm(
open(in_file_name), total=in_file_lines, desc=f"Reading in {in_file_name}"
):
line = ujson.loads(ex)
assert "sent_idx_unq" in line
assert "aliases" in line
assert ANCHOR_KEY in line
sent_idx = line["sent_idx_unq"]
# aliases are assumed to be lower-cased in candidate map
aliases = [alias.lower() for alias in line["aliases"]]
num_alias2pred = len(aliases)
slices = get_slice_values(slice_names, line)
# We need to only use True anchors for eval
anchor = [True for i in range(len(aliases))]
if ANCHOR_KEY in line:
anchor = line[ANCHOR_KEY]
assert len(aliases) == len(anchor)
assert all(isinstance(a, bool) for a in anchor)
if split != "train":
# Reindex aliases to predict to be where anchor == True because we only ever want to predict
# those (it will see all aliases in the forward pass but we will only score the True anchors)
for slice_name in slices:
aliases_to_predict = slices[slice_name]
slices[slice_name] = {
i: aliases_to_predict[i]
for i in aliases_to_predict
if anchor[int(i)] is True
}
# Add in FINAL LOSS slice
if split != "train":
slices[FINAL_LOSS] = {
str(i): 1.0 for i in range(len(aliases)) if anchor[i] is True
}
else:
slices[FINAL_LOSS] = {str(i): 1.0 for i in range(len(aliases))}
# If not use_weak_label, only the anchor is True aliases will be given to the model
# We must re-index alias to predict to be in terms of anchors == True
# Ex: anchors = [F, T, T, F, F, T]
# If dataset_is_eval, let
# a2p = [2,5] (a2p must only be for T anchors)
# AFTER NOT USE_WEAK_LABEL, DATA WILL BE ONLY THE TRUE ANCHORS
# a2p needs to be [1, 2] for the 3rd and 6th true become the 2nd and 3rd after not weak labelling
# If dataset_is_eval is False, let
# a2p = [0,2,4,5] (a2p can be anything)
if not use_weak_label:
assert (
ANCHOR_KEY in line
), "Cannot toggle off data weak labelling without anchor info"
# The number of aliases will be reduced to the number of true anchors
num_alias2pred = sum(anchor)
# We must correct this mapping because the indexing will change when we remove False anchors (see
# comment example above)
slices = data_utils.correct_not_augmented_dict_values(anchor, slices)
# print("ANCHOR", anchor, "LINE", line, "SLICeS", slices)
# Remove slices that have no aliases to predict
for slice_name in list(slices.keys()):
if len(slices[slice_name]) <= 0:
del slices[slice_name]
all_false_anchors = all([anc is False for anc in anchor])
# For nicer code downstream, we make sure FINAL_LOSS is in here
# Only cases where it won't be is if use_weak_labels is True and the split is train
# (then we may have all false anchors)
if FINAL_LOSS not in slices:
assert (
all_false_anchors
), f"If {FINAL_LOSS} isn't in slice, it must be that all anchors are False. This is not true"
assert (
split != "train" or not use_weak_label
), "As all anchors are false, this must happen if you are evaling or training and using weak labels"
# TODO: optimizer here
# for i in range(0, num_alias2pred, 1):
# subset_slices = {}
# for slice_name in list(slices.keys()):
# subset_slices[slice_name] = dict(str(j):slice[slice_name][str(j)] for
# j in range(i:i+1))
# ex = InputExample(
# sent_idx=sent_idx,
# subslice_idx=i,
# anchor=anchor,
# num_alias2pred=num_alias2pred,
# slices=slices)
# examples.append(ex)
subslice_idx = 0
total_subsents += 1
max_a2pred = max(max_a2pred, num_alias2pred)
out_f.write(
ujson.dumps(
InputExample(
sent_idx=sent_idx,
subslice_idx=subslice_idx,
anchor=anchor,
num_alias2pred=num_alias2pred,
slices=slices,
).to_dict()
)
+ "\n"
)
return {
"total_lines": total_subsents,
"output_filename": out_file_name,
"max_alias2pred": max_a2pred,
}
def convert_examples_to_features_and_save_initializer(save_dataset_name, storage):
"""Convert to features multiprocessing initializer."""
global mmap_file_global
mmap_file_global = np.memmap(save_dataset_name, dtype=storage, mode="r+")
def convert_examples_to_features_and_save(
meta_file, dataset_threads, slice_names, save_dataset_name, storage
):
"""Convert the prepped examples into input features.
Saves in memmap files. These are used in the __get_item__ method.
Args:
meta_file: metadata file where input file paths are saved
dataset_threads: number of threads
slice_names: list of slice names to evaluation on
save_dataset_name: data file name to save
storage: data storage type (for memmap)
"""
log_rank_0_debug(logger, "Starting to extract subsentences")
start = time.time()
num_processes = min(dataset_threads, int(0.8 * multiprocessing.cpu_count()))
log_rank_0_info(
logger, f"Starting to build and save features with {num_processes} threads"
)
log_rank_0_debug(logger, "Counting lines")
total_input = utils.load_json_file(meta_file)["num_mentions"]
max_alias2pred = utils.load_json_file(meta_file)["max_alias2pred"]
files_and_counts = utils.load_json_file(meta_file)["files_and_counts"]
# IMPORTANT: for distributed writing to memmap files, you must create them in w+ mode before
# being opened in r+ mode by workers
memmap_file = np.memmap(
save_dataset_name, dtype=storage, mode="w+", shape=(total_input,), order="C"
)
# Save -1 in sent_idx to check that things are loaded correctly later
memmap_file[slice_names[0]]["sent_idx"][:] = -1
input_args = []
# Saves where in memap file to start writing
offset = 0
for i, in_file_name in enumerate(files_and_counts.keys()):
input_args.append(
{
"file_name": in_file_name,
"in_file_lines": files_and_counts[in_file_name],
"save_file_offset": offset,
"ex_print_mod": int(np.ceil(total_input / 20)),
"slice_names": slice_names,
"max_alias2pred": max_alias2pred,
}
)
offset += files_and_counts[in_file_name]
if num_processes == 1:
assert len(input_args) == 1
total_output = convert_examples_to_features_and_save_single(
input_args[0], memmap_file
)
else:
log_rank_0_debug(
logger,
"Initializing pool. This make take a few minutes.",
)
pool = multiprocessing.Pool(
processes=num_processes,
initializer=convert_examples_to_features_and_save_initializer,
initargs=[save_dataset_name, storage],
)
total_output = 0
for res in pool.imap_unordered(
convert_examples_to_features_and_save_hlp, input_args, chunksize=1
):
total_output += res
pool.close()
pool.join()
# Verify that sentences are unique and saved correctly
mmap_file = np.memmap(save_dataset_name, dtype=storage, mode="r")
all_uniq_ids = set()
for i in tqdm(range(total_input), desc="Checking sentence uniqueness"):
assert (
mmap_file[slice_names[0]]["sent_idx"][i] != -1
), f"Index {i} has -1 sent idx"
uniq_id = str(
f"{mmap_file[slice_names[0]]['sent_idx'][i]}.{mmap_file[slice_names[0]]['subslice_idx'][i]}"
)
assert (
uniq_id not in all_uniq_ids
), f"Idx {uniq_id} is not unique and already in data"
all_uniq_ids.add(uniq_id)
log_rank_0_debug(
logger,
f"Done with extracting examples in {time.time() - start}. Total lines seen {total_input}. "
f"Total lines kept {total_output}",
)
return
def convert_examples_to_features_and_save_hlp(input_dict):
"""Convert to features helper."""
return convert_examples_to_features_and_save_single(input_dict, mmap_file_global)
def convert_examples_to_features_and_save_single(input_dict, mmap_file):
"""Convert examples to features multiprocessing helper."""
file_name = input_dict["file_name"]
in_file_lines = input_dict["in_file_lines"]
save_file_offset = input_dict["save_file_offset"]
ex_print_mod = input_dict["ex_print_mod"]
max_alias2pred = input_dict["max_alias2pred"]
slice_names = input_dict["slice_names"]
total_saved_features = 0
for idx, in_line in tqdm(
enumerate(open(file_name)),
total=in_file_lines,
desc=f"Processing slice {file_name}",
):
example = InputExample.from_dict(ujson.loads(in_line))
example_idx = save_file_offset + idx
sent_idx = example.sent_idx
subslice_idx = example.subslice_idx
slices = example.slices
row_data = {}
for slice_name in slice_names:
# We use the information in "slices" key to generate two pieces of info
# 1. Binary info for if a mention is in the slice
# 2. Probabilistic info for the prob the mention is in the slice, this is used to train indicator heads
if slice_name in slices:
# Set indices of aliases to predict relevant to slice to 1-hot vector
slice_indexes = np.array([0] * (max_alias2pred))
for idx in slices[slice_name]:
# We consider an example as "in the slice" if it's probability is greater than 0.5
slice_indexes[int(idx)] = slices[slice_name][idx] > 0.5
alias_slice_incidence = slice_indexes
else:
# Set to zero for all aliases if no aliases in example occur in the slice
alias_slice_incidence = np.array([0] * max_alias2pred)
# Add probabilistic labels for training indicators
if slice_name in slices:
# padded values are -1 so they are masked in score function
slices_padded = np.array([-1.0] * (max_alias2pred))
for idx in slices[slice_name]:
# The indexes needed to be string for json
slices_padded[int(idx)] = slices[slice_name][idx]
alias2pred_probs = slices_padded
else:
alias2pred_probs = np.array([-1] * max_alias2pred)
total_saved_features += 1
# Write slice indices into record array
feature = InputFeatures(
sent_idx=sent_idx,
subslice_idx=subslice_idx,
alias_slice_incidence=alias_slice_incidence,
alias2pred_probs=alias2pred_probs,
)
# We are storing mmap file in column format, so column name first
mmap_file[slice_name]["sent_idx"][example_idx] = feature.sent_idx
mmap_file[slice_name]["subslice_idx"][example_idx] = feature.subslice_idx
mmap_file[slice_name]["alias_slice_incidence"][
example_idx
] = feature.alias_slice_incidence
mmap_file[slice_name]["prob_labels"][example_idx] = feature.alias2pred_probs
if example_idx % ex_print_mod == 0:
for slice_name in row_data:
# Make one string for distributed computation consistency
output_str = ""
output_str += f'*** Example Slice "{slice_name}" ***' + "\n"
output_str += (
f"sent_idx: {example.sent_idx}" + "\n"
)
output_str += (
f"subslice_idx: {example.subslice_idx}" + "\n"
)
output_str += (
f"anchor: {example.anchor}" + "\n"
)
output_str += (
f"slices: {example.slices.get(slice_name, {})}"
+ "\n"
) # Sometimes slices are emtpy if all anchors are false
output_str += "*** Feature ***" + "\n"
output_str += (
f"alias_slice_incidence: {row_data[slice_name].alias_slice_incidence}"
+ "\n"
)
output_str += (
f"alias2pred_probs: {row_data[slice_name].alias2pred_probs}"
+ "\n"
)
print(output_str)
mmap_file.flush()
return total_saved_features
class BootlegSliceDataset:
"""
Slice dataset class.
Our dataset class for holding data slices (or subpopulations).
Each mention can be part of 0 or more slices. When running eval, we use
the SliceDataset to determine which mentions are part of what slices. Importantly, although the model
"sees" all mentions, only GOLD anchor links are evaluated for eval (splits of test/dev).
Args:
main_args: main arguments
dataset: dataset file
use_weak_label: whether to use weak labeling or not
entity_symbols: entity symbols
dataset_threads: number of processes to use
split: data split
"""
def __init__(
self,
main_args,
dataset,
use_weak_label,
entity_symbols,
dataset_threads,
split="train",
):
"""Slice dataset initializer."""
global_start = time.time()
log_rank_0_info(logger, f"Building slice dataset for {split} from {dataset}.")
spawn_method = main_args.run_config.spawn_method
data_config = main_args.data_config
orig_spawn = multiprocessing.get_start_method()
multiprocessing.set_start_method(spawn_method, force=True)
self.slice_names = data_utils.get_eval_slices(data_config.eval_slices)
self.get_slice_dt = lambda max_a2p: np.dtype(
[
("sent_idx", int),
("subslice_idx", int),
("alias_slice_incidence", int, (max_a2p,)),
("prob_labels", float, (max_a2p,)),
]
)
self.get_storage = lambda max_a2p: np.dtype(
[
(slice_name, self.get_slice_dt(max_a2p))
for slice_name in self.slice_names
]
)
# Folder for all mmap saved files -> if call from cand_gen code, save_data_folder will fail
try:
save_dataset_folder = data_utils.get_save_data_folder(
data_config, use_weak_label, dataset
)
except AttributeError:
save_dataset_folder = data_utils.get_save_data_folder_candgen(
data_config, use_weak_label, dataset
)
utils.ensure_dir(save_dataset_folder)
# Folder for temporary output files
temp_output_folder = os.path.join(
data_config.data_dir, data_config.data_prep_dir, f"prep_{split}_slice_files"
)
utils.ensure_dir(temp_output_folder)
# Input step 1
create_ex_indir = os.path.join(temp_output_folder, "create_examples_input")
utils.ensure_dir(create_ex_indir)
# Input step 2
create_ex_outdir = os.path.join(temp_output_folder, "create_examples_output")
utils.ensure_dir(create_ex_outdir)
# Meta data saved files
meta_file = os.path.join(temp_output_folder, "meta_data.json")
# File for standard training data
hash = hashlib.sha1(str(self.slice_names).encode("UTF-8")).hexdigest()[:10]
self.save_dataset_name = os.path.join(
save_dataset_folder, f"ned_slices_{hash}.bin"
)
self.save_data_config_name = os.path.join(
save_dataset_folder, "ned_slices_config.json"
)
# =======================================================================================
# SLICE DATA
# =======================================================================================
log_rank_0_debug(logger, "Loading dataset...")
log_rank_0_debug(logger, f"Seeing if {self.save_dataset_name} exists")
if data_config.overwrite_preprocessed_data or (
not os.path.exists(self.save_dataset_name)
):
st_time = time.time()
try:
log_rank_0_info(
logger,
f"Building dataset from scratch. Saving to {save_dataset_folder}",
)
create_examples(
dataset,
create_ex_indir,
create_ex_outdir,
meta_file,
data_config,
dataset_threads,
self.slice_names,
use_weak_label,
split,
)
max_alias2pred = utils.load_json_file(meta_file)["max_alias2pred"]
convert_examples_to_features_and_save(
meta_file,
dataset_threads,
self.slice_names,
self.save_dataset_name,
self.get_storage(max_alias2pred),
)
utils.dump_json_file(
self.save_data_config_name, {"max_alias2pred": max_alias2pred}
)
log_rank_0_debug(
logger, f"Finished prepping data in {time.time() - st_time}"
)
except Exception as e:
tb = traceback.TracebackException.from_exception(e)
logger.error(e)
logger.error("\n".join(tb.stack.format()))
shutil.rmtree(save_dataset_folder, ignore_errors=True)
raise
log_rank_0_info(
logger,
f"Loading data from {self.save_dataset_name} and {self.save_data_config_name}",
)
max_alias2pred = utils.load_json_file(self.save_data_config_name)[
"max_alias2pred"
]
self.data, self.sent_to_row_id_dict = self.build_data_dict(
self.save_dataset_name, self.get_storage(max_alias2pred)
)
assert len(self.data) > 0
assert len(self.sent_to_row_id_dict) > 0
log_rank_0_debug(logger, "Removing temporary output files")
shutil.rmtree(temp_output_folder, ignore_errors=True)
# Set spawn back to original/default, which is "fork" or "spawn". This is needed for the Meta.config to
# be correctly passed in the collate_fn.
multiprocessing.set_start_method(orig_spawn, force=True)
log_rank_0_info(
logger,
f"Final slice data initialization time from {split} is {time.time() - global_start}s",
)
@classmethod
def build_data_dict(cls, save_dataset_name, storage):
"""
Build the slice dataset from saved file.
Loads the memmap slice dataset and create a mapping from sentence
index to row index.
Args:
save_dataset_name: saved memmap file name
storage: storage type of memmap file
Returns: numpy memmap data, Dict of sentence index to row in data
"""
sent_to_row_id_dict = defaultdict(list)
data = np.expand_dims(
np.memmap(save_dataset_name, dtype=storage, mode="r").view(np.recarray),
axis=1,
)
# Get any slice name for getting the sentence index
slice_name = data[0].dtype.names[0]
for i in tqdm(range(len(data)), desc="Building sent idx to row idx mapping"):
sent_idx = data[i][slice_name]["sent_idx"][0]
sent_to_row_id_dict[sent_idx].append(i)
return data, dict(sent_to_row_id_dict)
def contains_sentidx(self, sent_idx):
"""Return true if the sentence index is in the dataset.
Args:
sent_idx: sentence index
Returns: bool whether in dataset or not
"""
return sent_idx in self.sent_to_row_id_dict
def get_slice_incidence_arr(self, sent_idx, alias_orig_list_pos):
"""
Get slice incident array.
Given the sentence index and the list of aliases to get slice
indices for (may have -1 indicating no alias), return a dictionary of
slice_name -> 0/1 incidence array of if each alias in
alias_orig_list_pos was in the slice or not (-1 for no alias).
Args:
sent_idx: sentence index
alias_orig_list_pos: list of alias positions in input data list
(due to sentence splitting, aliases may be split up)
Returns: Dict of slice name -> 0/1 incidence array
"""
assert (
sent_idx in self.sent_to_row_id_dict
), f"Sentence {sent_idx} not in {self.save_dataset_name}"
alias_orig_list_pos = np.array(alias_orig_list_pos)
row_ids = self.sent_to_row_id_dict[sent_idx]
slices_to_return = {}
for row_i in row_ids:
for slice_name in self.slice_names:
slices_to_return[slice_name] = self.data[row_i][slice_name][
"alias_slice_incidence"
][0][alias_orig_list_pos]
slices_to_return[slice_name][alias_orig_list_pos == -1] = -1
return slices_to_return
| bootleg-master | bootleg/slicing/slice_dataset.py |
"""Slicing initializer."""
| bootleg-master | bootleg/slicing/__init__.py |
import re
import tempfile
from pathlib import Path
from subprocess import call
import argh
from rich.console import Console
from bootleg.utils.utils import load_yaml_file
console = Console(soft_wrap=True)
bert_dir = tempfile.TemporaryDirectory().name
checkpoint_regex = re.compile(r"checkpoint_(\d+\.{0,1}\d*).model.pth")
def find_latest_checkpoint(path):
path = Path(path)
possible_checkpoints = []
for fld in path.iterdir():
res = checkpoint_regex.match(fld.name)
if res:
possible_checkpoints.append([res.group(1), fld])
if len(possible_checkpoints) <= 0:
return possible_checkpoints
newest_sort = sorted(possible_checkpoints, key=lambda x: float(x[0]), reverse=True)
return newest_sort[0][-1]
@argh.arg("--config", help="Path for config")
@argh.arg("--num_gpus", help="Num gpus")
@argh.arg("--batch", help="Batch size")
@argh.arg("--grad_accum", help="Grad accum")
@argh.arg("--cand_gen_run", help="Launch cand get")
def main(
config="configs/gcp/bootleg_test.yaml",
num_gpus=4,
batch=None,
grad_accum=None,
cand_gen_run=False,
):
config = Path(config)
config_d = load_yaml_file(config)
save_path = Path(config_d["emmental"]["log_path"])
seed = config_d["emmental"].get("seed", 1234)
call_file = "bootleg/run.py" if not cand_gen_run else "cand_gen/train.py"
to_call = [
"python3",
"-m",
"torch.distributed.run",
f"--nproc_per_node={num_gpus}",
call_file,
"--config",
str(config),
]
# if this is a second+ run, log path will be {log_path}_{num_steps_trained}
possible_save_paths = save_path.parent.glob(f"{save_path.name}*")
latest_save_path = sorted(
possible_save_paths,
key=lambda x: int(x.name.split("_")[-1])
if x.name.split("_")[-1].isnumeric()
else 0,
reverse=True,
)
save_path = latest_save_path[0] if len(latest_save_path) > 0 else None
if save_path is not None and save_path.exists() and save_path.is_dir():
last_checkpoint = find_latest_checkpoint(save_path)
if last_checkpoint is not None:
to_call.append("--emmental.model_path")
to_call.append(str(save_path / last_checkpoint.name))
num_steps_trained = int(
checkpoint_regex.match(last_checkpoint.name).group(1)
)
assert num_steps_trained == int(
float(checkpoint_regex.match(last_checkpoint.name).group(1))
)
optimizer_path = str(
save_path / last_checkpoint.name.replace("model", "optimizer")
)
scheduler_path = str(
save_path / last_checkpoint.name.replace("model", "scheduler")
)
to_call.append("--emmental.optimizer_path")
to_call.append(optimizer_path)
to_call.append("--emmental.scheduler_path")
to_call.append(scheduler_path)
to_call.append("--emmental.steps_learned")
to_call.append(str(num_steps_trained))
# In case didn't get through epoch, change seed so that data is reshuffled
to_call.append("--emmental.seed")
to_call.append(str(seed + num_steps_trained))
to_call.append("--emmental.log_path")
to_call.append(
str(save_path.parent / f"{save_path.name}_{num_steps_trained}")
)
if batch is not None:
to_call.append("--train_config.batch_size")
to_call.append(str(batch))
if grad_accum is not None:
to_call.append("--emmental.gradient_accumulation_steps")
to_call.append(str(grad_accum))
print(f"CALLING...{' '.join(to_call)}")
call(to_call)
if __name__ == "__main__":
argh.dispatch_command(main)
| bootleg-master | configs/gcp/launch_gcp.py |
import os
import jsonlines
import numpy as np
import pandas as pd
import requests
import tagme
import ujson
from tqdm.auto import tqdm
from bootleg.symbols.entity_profile import EntityProfile
pd.options.display.max_colwidth = 500
def load_train_data(train_file, title_map, entity_profile=None):
"""Loads a jsonl file and creates a pandas DataFrame. Adds candidates, types, and KGs if available."""
num_lines = sum(1 for _ in open(train_file))
rows = []
with jsonlines.open(train_file) as f:
for line in tqdm(f, total=num_lines):
gold_qids = line["qids"]
# for each alias, append a row in the merged result table
for alias_idx in range(len(gold_qids)):
sent_toks = line["sentence"].split()
res = {
"sentence": line["sentence"],
"sent_idx": line["sent_idx_unq"],
"aliases": line["aliases"],
"span": line["spans"][alias_idx],
"slices": line.get("slices", {}),
"alias": line["aliases"][alias_idx],
"real_alias": " ".join(
sent_toks[
line["spans"][alias_idx][0] : line["spans"][alias_idx][1]
]
),
"alias_idx": alias_idx,
"is_gold_label": line["gold"][alias_idx],
"gold_qid": gold_qids[alias_idx],
"gold_title": title_map[gold_qids[alias_idx]]
if gold_qids[alias_idx] != "Q-1"
else "Q-1",
"all_gold_qids": gold_qids,
"gold_label_aliases": [
al
for i, al in enumerate(line["aliases"])
if line["gold"][i] is True
],
"all_is_gold_labels": line["gold"],
"all_spans": line["spans"],
}
slices = []
if "slices" in line:
for sl_name in line["slices"]:
if (
str(alias_idx) in line["slices"][sl_name]
and line["slices"][sl_name][str(alias_idx)] > 0.5
):
slices.append(sl_name)
res["slices"] = slices
if entity_profile is not None:
res["cand_names"] = [
title_map[q[0]]
for i, q in enumerate(
entity_profile.get_qid_count_cands(
line["aliases"][alias_idx]
)
)
]
res["cand_qids"] = [
q[0]
for i, q in enumerate(
entity_profile.get_qid_count_cands(
line["aliases"][alias_idx]
)
)
]
for type_sym in entity_profile.get_all_typesystems():
gold_types = entity_profile.get_types(
gold_qids[alias_idx], type_sym
)
res[f"{type_sym}_gld"] = gold_types
connected_pairs_gld = []
for alias_idx2 in range(len(gold_qids)):
if entity_profile.is_connected(
gold_qids[alias_idx], gold_qids[alias_idx2]
):
connected_pairs_gld.append(gold_qids[alias_idx2])
res["kg_gld"] = connected_pairs_gld
rows.append(res)
return pd.DataFrame(rows)
def load_title_map(entity_dir, entity_mapping_dir="entity_mappings"):
return ujson.load(
open(os.path.join(entity_dir, entity_mapping_dir, "qid2title.json"))
)
def load_cand_map(entity_dir, alias_map_file, entity_mapping_dir="entity_mappings"):
return ujson.load(
open(os.path.join(entity_dir, entity_mapping_dir, alias_map_file))
)
def load_predictions(file):
lines = {}
with jsonlines.open(file) as f:
for line in f:
lines[line["sent_idx_unq"]] = line
return lines
def score_predictions(
orig_file,
pred_file,
title_map,
entity_profile: EntityProfile = None,
abbr: bool = False,
add_type: bool = True,
add_kg: bool = False,
):
"""Loads a jsonl file and joins with the results from dump_preds"""
num_lines = sum(1 for _ in open(orig_file))
preds = load_predictions(pred_file)
rows = []
with jsonlines.open(orig_file) as f:
for line in tqdm(f, total=num_lines):
sub_rows = score_line(
line, abbr, preds, title_map, entity_profile, add_type, add_kg
)
rows.extend(sub_rows)
return pd.DataFrame(rows)
def score_line(
line, abbr, preds, title_map, entity_profile, add_type=True, add_kg=True
):
rows = []
sent_idx = line["sent_idx_unq"]
gold_qids = line["qids"]
pred_qids = preds[sent_idx]["qids"]
gold_label_aliases = [
al for i, al in enumerate(line["aliases"]) if line["gold"][i] is True
]
assert len(gold_qids) == len(pred_qids), "Gold and pred QIDs have different lengths"
# for each alias, append a row in the merged result table
for alias_idx in range(len(gold_qids)):
res = {
"sentence": line["sentence"],
"sent_idx": line["sent_idx_unq"],
"aliases": line["aliases"],
"span": list(line["spans"][alias_idx]),
"slices": line.get("slices", {}),
"alias": line["aliases"][alias_idx],
"alias_idx": alias_idx,
"is_gold_label": line["gold"][alias_idx],
"gold_qid": gold_qids[alias_idx],
"pred_qid": pred_qids[alias_idx],
"gold_title": title_map[gold_qids[alias_idx]]
if gold_qids[alias_idx] not in {"Q-1", "-1"}
else "Q-1",
"pred_title": title_map.get(pred_qids[alias_idx], "CouldnotFind")
if pred_qids[alias_idx] != "NC"
else "NC",
"expandedAbbr": line["expandedAbbr"][alias_idx] if abbr else "",
"all_gold_qids": gold_qids,
"all_pred_qids": pred_qids,
"gold_label_aliases": gold_label_aliases,
"all_is_gold_labels": line["gold"],
"all_spans": line["spans"],
}
slices = []
if "slices" in line:
for sl_name in line["slices"]:
if (
str(alias_idx) in line["slices"][sl_name]
and line["slices"][sl_name][str(alias_idx)] > 0.5
):
slices.append(sl_name)
res["slices"] = slices
if entity_profile is not None:
res["cand_qids"] = []
res["cand_probs"] = []
res["cand_names"] = []
for i, q in enumerate(
entity_profile.get_qid_count_cands(line["aliases"][alias_idx])
):
res["cand_qids"].append(q[0])
res["cand_probs"].append(preds[sent_idx]["cand_probs"][alias_idx][i])
res["cand_names"].append(title_map[q[0]])
if add_type:
for type_sym in entity_profile.get_all_typesystems():
gold_types = entity_profile.get_types(
gold_qids[alias_idx], type_sym
)
pred_types = entity_profile.get_types(
pred_qids[alias_idx], type_sym
)
res[f"{type_sym}_gld"] = gold_types
res[f"{type_sym}_pred"] = pred_types
if add_kg:
connected_pairs_gld = []
connected_pairs_pred = []
for alias_idx2 in range(len(gold_qids)):
if entity_profile.is_connected(
gold_qids[alias_idx], gold_qids[alias_idx2]
):
connected_pairs_gld.append(gold_qids[alias_idx2])
if entity_profile.is_connected(
pred_qids[alias_idx], pred_qids[alias_idx2]
):
connected_pairs_pred.append(pred_qids[alias_idx2])
res["kg_gld"] = connected_pairs_gld
res["kg_pred"] = connected_pairs_pred
rows.append(res)
return rows
def load_mentions(file):
lines = []
with jsonlines.open(file) as f:
for line in f:
new_line = {
"sentence": line["sentence"],
"aliases": line.get("aliases", []),
"spans": line.get("spans", []),
}
lines.append(new_line)
return pd.DataFrame(lines)
def enwiki_title_to_wikidata_id(title: str) -> str:
protocol = "https"
base_url = "en.wikipedia.org/w/api.php"
params = f"action=query&prop=pageprops&format=json&titles={title}"
url = f"{protocol}://{base_url}?{params}"
response = requests.get(url)
json = response.json()
for pages in json["query"]["pages"].values():
wikidata_id = pages["pageprops"]["wikibase_item"]
return wikidata_id
def tagme_annotate(in_file, out_file, threshold=0.1):
with jsonlines.open(in_file) as f_in, jsonlines.open(out_file, "w") as f_out:
for line in f_in:
aliases = []
spans = []
qids = []
probs = []
text = line["sentence"]
text_spans = text.split()
text_span_indices = []
total_len = 0
# get word boundaries for converting char spans to word spans
for i, t in enumerate(text_spans):
text_span_indices.append(total_len)
total_len += len(t) + 1
lunch_annotations = tagme.annotate(text)
# as the threshold increases, the precision increases, but the recall decreases
for ann in lunch_annotations.get_annotations(threshold):
mention = ann.mention
try:
qid = enwiki_title_to_wikidata_id(ann.entity_title)
except Exception:
print(f"No wikidata id found for {ann.entity_title}")
continue
span_start = text_span_indices.index(ann.begin)
try:
span_end = text_span_indices.index(ann.end + 1)
except Exception:
span_end = len(text_spans)
aliases.append(mention)
spans.append([span_start, span_end])
qids.append(qid)
probs.append(ann.score)
line["aliases"] = aliases
line["qids"] = qids
line["spans"] = spans
line["probs"] = probs
line["gold"] = [True for _ in aliases]
f_out.write(line)
# modified from https://github.com/facebookresearch/BLINK/blob/master/elq/vcg_utils/measures.py
def entity_linking_tp_with_overlap(gold, predicted, ignore_entity=False):
"""
Partially adopted from: https://github.com/UKPLab/starsem2018-entity-linking
Counts weak and strong matches
:param gold:
:param predicted:
:return:
>>> entity_linking_tp_with_overlap([('Q7366', 14, 18),('Q780394', 19, 35)],[('Q7366', 14, 16),('Q780394', 19, 35)])
2, 1
>>> entity_linking_tp_with_overlap([('Q7366', 14, 18), ('Q780394', 19, 35)], [('Q7366', 14, 16)])
1, 0
>>> entity_linking_tp_with_overlap([(None, 14, 18), ('Q780394', 19, 35)], [('Q7366', 14, 16)])
0, 0
>>> entity_linking_tp_with_overlap([(None, 14, 18), (None, )], [(None,)])
1, 0
>>> entity_linking_tp_with_overlap([('Q7366', ), ('Q780394', )], [('Q7366', 14, 16)])
1, 0
>>> entity_linking_tp_with_overlap([], [('Q7366', 14, 16)])
0, 0
"""
if not gold or not predicted:
return 0, 0
# Add dummy spans, if no spans are given, everything is overlapping per default
if any(len(e) != 3 for e in gold):
gold = [(e[0], 0, 1) for e in gold]
predicted = [(e[0], 0, 1) for e in predicted]
# Replace None KB ids with empty strings
gold = [("",) + e[1:] if e[0] is None else e for e in gold]
predicted = [("",) + e[1:] if e[0] is None else e for e in predicted]
# ignore_entity for computing mention precision and recall without the entity prediction
if ignore_entity:
gold = [("",) + e[1:] for e in gold]
predicted = [("",) + e[1:] for e in predicted]
gold = sorted(gold, key=lambda x: x[2])
predicted = sorted(predicted, key=lambda x: x[2])
# tracks weak matches
lcs_matrix_weak = np.zeros((len(gold), len(predicted)), dtype=np.int16)
# tracks strong matches
lcs_matrix_strong = np.zeros((len(gold), len(predicted)), dtype=np.int16)
for g_i in range(len(gold)):
for p_i in range(len(predicted)):
gm = gold[g_i]
pm = predicted[p_i]
# increment lcs_matrix_weak
if not (gm[1] >= pm[2] or pm[1] >= gm[2]) and (
gm[0].lower() == pm[0].lower()
):
if g_i == 0 or p_i == 0:
lcs_matrix_weak[g_i, p_i] = 1
else:
lcs_matrix_weak[g_i, p_i] = 1 + lcs_matrix_weak[g_i - 1, p_i - 1]
else:
if g_i == 0 and p_i == 0:
lcs_matrix_weak[g_i, p_i] = 0
elif g_i == 0 and p_i != 0:
lcs_matrix_weak[g_i, p_i] = max(0, lcs_matrix_weak[g_i, p_i - 1])
elif g_i != 0 and p_i == 0:
lcs_matrix_weak[g_i, p_i] = max(lcs_matrix_weak[g_i - 1, p_i], 0)
elif g_i != 0 and p_i != 0:
lcs_matrix_weak[g_i, p_i] = max(
lcs_matrix_weak[g_i - 1, p_i], lcs_matrix_weak[g_i, p_i - 1]
)
# increment lcs_matrix_strong
if (gm[1] == pm[1] and pm[2] == gm[2]) and (gm[0].lower() == pm[0].lower()):
if g_i == 0 or p_i == 0:
lcs_matrix_strong[g_i, p_i] = 1
else:
lcs_matrix_strong[g_i, p_i] = (
1 + lcs_matrix_strong[g_i - 1, p_i - 1]
)
else:
if g_i == 0 and p_i == 0:
lcs_matrix_strong[g_i, p_i] = 0
elif g_i == 0 and p_i != 0:
lcs_matrix_strong[g_i, p_i] = max(
0, lcs_matrix_strong[g_i, p_i - 1]
)
elif g_i != 0 and p_i == 0:
lcs_matrix_strong[g_i, p_i] = max(
lcs_matrix_strong[g_i - 1, p_i], 0
)
elif g_i != 0 and p_i != 0:
lcs_matrix_strong[g_i, p_i] = max(
lcs_matrix_strong[g_i - 1, p_i], lcs_matrix_strong[g_i, p_i - 1]
)
weak_match_count = lcs_matrix_weak[len(gold) - 1, len(predicted) - 1]
strong_match_count = lcs_matrix_strong[len(gold) - 1, len(predicted) - 1]
return weak_match_count, strong_match_count
def convert_line_tuple(line):
qids = line["qids"]
spans = line["spans"]
pairs = zip(qids, spans)
pairs = [(pair[0], pair[1][0], pair[1][1]) for pair in pairs]
return pairs
# modified from https://github.com/facebookresearch/BLINK
def compute_metrics(pred_file, gold_file, md_step_only=False, threshold=0.0):
# align by sentence index
pred_results = {}
with jsonlines.open(pred_file) as f:
for line in f:
pred_results[line["sent_idx_unq"]] = line
gold_results = {}
with jsonlines.open(gold_file) as f:
for line in f:
gold_results[line["sent_idx_unq"]] = line
assert len(pred_results) == len(
gold_results
), f"{len(pred_results)} {len(gold_results)}"
num_mentions_actual = 0
num_mentions_pred = 0
weak_match_total = 0
strong_match_total = 0
errors = []
for sent_idx in pred_results:
gold_line = gold_results[sent_idx]
pred_line = pred_results[sent_idx]
pred_triples = convert_line_tuple(pred_results[sent_idx])
gold_triples = convert_line_tuple(gold_results[sent_idx])
# filter out triples below the threshold
if len(pred_triples) > 0 and "probs" in pred_line:
assert len(pred_triples) == len(pred_line["probs"])
pred_triples = [
pt
for (pt, prob) in zip(pred_triples, pred_line["probs"])
if prob > threshold
]
weak_match_count, strong_match_count = entity_linking_tp_with_overlap(
pred_triples, gold_triples, ignore_entity=md_step_only
)
num_mentions_actual += len(gold_triples)
num_mentions_pred += len(pred_triples)
weak_match_total += weak_match_count
strong_match_total += strong_match_count
if weak_match_count != len(gold_triples) or weak_match_count != len(
pred_triples
):
pred_qids = [p[0] for p in pred_triples]
pred_spans = [[p[1], p[2]] for p in pred_triples]
pred_probs = []
if "probs" in pred_line:
pred_probs = [p for p in pred_line["probs"] if p > threshold]
assert len(pred_qids) == len(pred_probs)
errors.append(
{
"sent_idx": sent_idx,
"text": gold_line["sentence"],
"gold_aliases": gold_line["aliases"],
"gold_qids": gold_line["qids"],
"gold_spans": gold_line["spans"],
"pred_aliases": pred_line["aliases"],
"pred_qids": pred_qids,
"pred_spans": pred_spans,
"pred_probs": pred_probs,
}
)
print("WEAK MATCHING")
precision = weak_match_total / num_mentions_pred
recall = weak_match_total / num_mentions_actual
print(f"precision = {weak_match_total} / {num_mentions_pred} = {precision}")
print(f"recall = {weak_match_total} / {num_mentions_actual} = {recall}")
print(f"f1 = {precision*recall*2/(precision+recall)}")
print("\nEXACT MATCHING")
precision = strong_match_total / num_mentions_pred
recall = strong_match_total / num_mentions_actual
print(f"precision = {strong_match_total} / {num_mentions_pred} = {precision}")
print(f"recall = {strong_match_total} / {num_mentions_actual} = {recall}")
print(f"f1 = {precision*recall*2/(precision+recall)}")
return pd.DataFrame(errors)
| bootleg-master | tutorials/utils.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
from monodepth.depth_model_registry import get_depth_model, get_depth_model_list
from depth_fine_tuning import DepthFineTuningParams
from scale_calibration import ScaleCalibrationParams
from utils import frame_sampling, frame_range
from tools.colmap_processor import COLMAPParams
from tools.make_video import MakeVideoParams
class Video3dParamsParser:
def __init__(self):
self.parser = argparse.ArgumentParser()
self.initialized = False
def initialize(self):
self.parser.add_argument("--op",
choices=["all", "extract_frames"], default="all")
self.parser.add_argument("--path", type=str,
help="Path to all the input (except for the video) and output files "
" are stored.")
self.parser.add_argument("--video_file", type=str,
help="Path to input video file. Will be ignored if `color_full` and "
"`frames.txt` are already present.")
self.parser.add_argument("--configure",
choices=["default", "kitti"], default="default")
self.add_video_args()
self.add_flow_args()
self.add_calibration_args()
self.add_fine_tuning_args()
self.add_make_video_args()
self.initialized = True
def add_video_args(self):
self.parser.add_argument("--size", type=int, default=384,
help="Size of the (long image dimension of the) output depth maps.")
self.parser.add_argument("--align", type=int, default=0,
help="Alignment requirement of the depth size (i.e, forcing each"
" image dimension to be an integer multiple). If set <= 0 it will"
" be set automatically, based on the requirements of the depth network.")
def add_flow_args(self):
self.parser.add_argument(
"--flow_ops",
nargs="*",
help="optical flow operation: exhausted optical flow for all the pairs in"
" dense_frame_range or consective that computes forward backward flow"
" between consecutive frames.",
choices=frame_sampling.SamplePairsMode.names(),
default=["hierarchical2"],
)
self.parser.add_argument(
"--flow_checkpoint", choices=["FlowNet2", "FlowNet2-KITTI"],
default="FlowNet2"
)
self.parser.add_argument("--overlap_ratio", type=float, default=0.2)
def add_calibration_args(self):
COLMAPParams.add_arguments(self.parser)
ScaleCalibrationParams.add_arguments(self.parser)
def add_fine_tuning_args(self):
DepthFineTuningParams.add_arguments(self.parser)
self.parser.add_argument(
"--model_type", type=str, choices=get_depth_model_list(),
default="mc"
)
self.parser.add_argument(
"--frame_range", default="",
type=frame_range.parse_frame_range,
help="Range of depth to fine-tune, e.g., 0,2-10,21-40."
)
def add_make_video_args(self):
self.parser.add_argument("--make_video", action="store_true")
MakeVideoParams.add_arguments(self.parser)
def print(self):
print("------------ Parameters -------------")
args = vars(self.params)
for k, v in sorted(args.items()):
if type(v) == frame_range.NamedOptionalSet:
print(f"{k}: '{v.name}'")
else:
print(f"{k}: {v}")
print("-------------------------------------")
def parse(self, args=None, namespace=None):
if not self.initialized:
self.initialize()
self.params = self.parser.parse_args(args, namespace=namespace)
if self.params.configure == "kitti":
self.params.flow_checkpoint = "FlowNet2-KITTI"
self.params.model_type = "monodepth2"
self.params.overlap_ratio = 0.5
if 'matcher' in self.params:
self.params.matcher = 'sequential'
# Resolve unspecified parameters
model = get_depth_model(self.params.model_type)
if self.params.align <= 0:
self.params.align = model.align
if self.params.learning_rate <= 0:
self.params.learning_rate = model.learning_rate
if self.params.lambda_view_baseline < 0:
self.params.lambda_view_baseline = model.lambda_view_baseline
self.print()
return self.params
| consistent_depth-main | params.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import cv2
import numpy as np
import os
from os.path import join as pjoin
import logging
from typing import Optional, Set
import torch
from utils.helpers import SuppressedStdout
from loaders.video_dataset import _dtype, load_color
from tools.colmap_processor import COLMAPParams, COLMAPProcessor
from utils import (
image_io,
geometry,
load_colmap,
visualization,
)
from utils.helpers import print_banner
from utils.torch_helpers import _device
class ScaleCalibrationParams:
@staticmethod
def add_arguments(parser):
parser.add_argument(
"--dense_frame_ratio", type=float, default=0.95,
help="threshold on percentage of successully computed dense depth frames."
)
parser.add_argument("--dense_pixel_ratio", type=float, default=0.3,
help="ratio of valid dense depth pixels for that frame to valid")
def prepare_colmap_color(video):
"""
If there is no dynamic object mask (in `mask_dynamic`) then just
use `color_full` to do colmap so return `color_full`. Otherwise, set
dynamic part to be black. `mask_dynamic` is 1 in static part
and 0 in dynamic part. So in this case, return 'color_colmap_dense'
Returns:
output_directory
"""
print('Preparint color input for COLMAP...')
out_dir = pjoin(video.path, 'color_colmap_dense')
dynamic_mask_dir = pjoin(video.path, 'mask_dynamic')
color_src_dir = pjoin(video.path, 'color_full')
if not os.path.isdir(dynamic_mask_dir):
return color_src_dir
if video.check_frames(out_dir, 'png'):
return out_dir
name_fmt = 'frame_{:06d}.png'
os.makedirs(out_dir, exist_ok=True)
for i in range(video.frame_count):
name = name_fmt.format(i)
im = cv2.imread(pjoin(color_src_dir, name))
seg_fn = pjoin(dynamic_mask_dir, name)
seg = (cv2.imread(seg_fn, 0) > 0)[..., np.newaxis]
masked = im * seg
cv2.imwrite(pjoin(out_dir, name), masked)
assert video.check_frames(out_dir, 'png')
return out_dir
def make_camera_params_from_colmap(path, sparse_dir):
cameras, images, points3D = load_colmap.read_model(path=sparse_dir, ext=".bin")
size_new = image_io.load_raw_float32_image(
pjoin(path, "color_down", "frame_{:06d}.raw".format(0))
).shape[:2][::-1]
intrinsics, extrinsics = load_colmap.convert_calibration(
cameras, images, size_new
)
return intrinsics, extrinsics
def visualize_calibration_pair(
extrinsics, intrinsics, depth_fmt, color_fmt, id_pair, vis_dir
):
assert len(id_pair) == 2
depth_fns = [depth_fmt.format(id) for id in id_pair]
if any(not os.path.isfile(fn) for fn in depth_fns):
return
color_fns = [color_fmt.format(id) for id in id_pair]
colors = [load_color(fn, channels_first=True) for fn in color_fns]
colors = torch.stack(colors, dim=0).to(_device)
inv_depths = [image_io.load_raw_float32_image(fn) for fn in depth_fns]
depths = 1.0 / torch.tensor(inv_depths, device=_device).unsqueeze(-3)
def select_tensor(x):
return torch.tensor(x[list(id_pair)], device=_device, dtype=_dtype)
extr = select_tensor(extrinsics)
intr = select_tensor(intrinsics)
colors_warped_to_ref = geometry.warp_image(colors, depths, extr, intr, [1, 0])
def vis(x):
x = np.clip(x.permute(1, 2, 0).cpu().numpy(), a_min=0, a_max=1)
x = x[..., ::-1] * 255 # RGB to BGR, [0, 1] to [0, 255]
return x
os.makedirs(vis_dir, exist_ok=True)
for id, tgt_id, color_warped, color in zip(
id_pair, id_pair[::-1], colors_warped_to_ref, colors
):
cv2.imwrite(pjoin(vis_dir, "frame_{:06d}.png".format(id)), vis(color))
cv2.imwrite(
pjoin(vis_dir, "frame_{:06d}_warped_to_{:06d}.png".format(tgt_id, id)),
vis(color_warped),
)
def visualize_all_calibration(
extrinsics, intrinsics, depth_fmt, color_fmt, frame_range, vis_dir
):
id_pairs = [
(frame_range.index_to_frame[i], frame_range.index_to_frame[0])
for i in range(1, len(frame_range))
]
for id_pair in id_pairs:
visualize_calibration_pair(
extrinsics, intrinsics, depth_fmt, color_fmt, id_pair, vis_dir
)
def check_frames(
src_dir, src_ext, dst_dir, dst_ext,
frame_names: Optional[Set[str]] = None
):
if not os.path.isdir(src_dir):
assert frame_names is not None
names = list(frame_names)
else:
names = [n.replace(src_ext, dst_ext)
for n in os.listdir(src_dir) if n.endswith(src_ext)]
names = [n for n in names if frame_names is None or n in frame_names]
return all(
os.path.isfile(pjoin(dst_dir, n))
for n in names
)
def calibrate_scale(video, out_dir, frame_range, args):
# COLMAP reconstruction.
print_banner("COLMAP reconstruction")
colmap_dir = pjoin(video.path, 'colmap_dense')
src_meta_file = pjoin(colmap_dir, "metadata.npz")
colmap = COLMAPProcessor(args.colmap_bin_path)
dense_dir = colmap.dense_dir(colmap_dir, 0)
if os.path.isfile(src_meta_file):
print("Checked metadata file exists.")
else:
color_dir = prepare_colmap_color(video)
if not colmap.check_dense(
dense_dir, color_dir, valid_ratio=args.dense_frame_ratio
):
path_args = [color_dir, colmap_dir]
mask_path = pjoin(video.path, 'colmap_mask')
if os.path.isdir(mask_path):
path_args.extend(['--mask_path', mask_path])
colmap_args = COLMAPParams().parse_args(
args=path_args + ['--dense_max_size', str(args.size)],
namespace=args
)
colmap.process(colmap_args)
intrinsics, extrinsics = make_camera_params_from_colmap(
video.path, colmap.sparse_dir(colmap_dir, 0)
)
np.savez(src_meta_file, intrinsics=intrinsics, extrinsics=extrinsics)
# Convert COLMAP dense depth maps to .raw file format.
print_banner("Convert COLMAP depth maps")
converted_depth_fmt = pjoin(
video.path, "depth_colmap_dense", "depth", "frame_{:06d}.raw"
)
# convert colmap dense depths to .raw
converted_depth_dir = os.path.dirname(converted_depth_fmt)
dense_depth_dir = pjoin(dense_dir, "stereo", "depth_maps")
frames = frame_range.frames()
if not check_frames(
dense_depth_dir, colmap.dense_depth_suffix(), converted_depth_dir, "",
frame_names={f"frame_{i:06d}.png" for i in frames},
):
os.makedirs(converted_depth_dir, exist_ok=True)
colmap_depth_fmt = pjoin(
dense_depth_dir, "frame_{:06d}.png" + colmap.dense_depth_suffix()
)
for i in frames:
colmap_depth_fn = colmap_depth_fmt.format(i)
if not os.path.isfile(colmap_depth_fn):
logging.warning(
"[SCALE CALIBRATION] %s does not exist.",
colmap_depth_fn
)
continue
cmp_depth = load_colmap.read_array(colmap_depth_fn)
inv_cmp_depth = 1.0 / cmp_depth
ix = np.isinf(inv_cmp_depth) | (inv_cmp_depth < 0)
inv_cmp_depth[ix] = float("nan")
image_io.save_raw_float32_image(
converted_depth_fmt.format(i), inv_cmp_depth
)
with SuppressedStdout():
visualization.visualize_depth_dir(
converted_depth_dir, converted_depth_dir,
force=True, min_percentile=0, max_percentile=99,
)
# Compute scaled depth maps
print_banner("Compute per-frame scales")
scaled_depth_dir = pjoin(out_dir, "depth_scaled_by_colmap_dense", "depth")
scaled_depth_fmt = pjoin(scaled_depth_dir, "frame_{:06d}.raw")
scales_file = pjoin(out_dir, "scales.csv")
src_depth_fmt = pjoin(
video.path, f"depth_{args.model_type}", "depth", "frame_{:06d}.raw"
)
frames = frame_range.frames()
if (
check_frames(
converted_depth_dir, ".png",
os.path.dirname(scaled_depth_fmt), ".raw"
)
and os.path.isfile(scales_file)
):
src_to_colmap_scales = np.loadtxt(scales_file, delimiter=',')
assert src_to_colmap_scales.shape[0] >= len(frames) * args.dense_frame_ratio \
and src_to_colmap_scales.shape[1] == 2, \
(f"scales shape is {src_to_colmap_scales.shape} does not match "
+ f"({len(frames)}, 2) with threshold {args.dense_frame_ratio}")
print("Existing scales file loaded.")
else:
# Scale depth maps
os.makedirs(scaled_depth_dir, exist_ok=True)
src_to_colmap_scales_map = {}
for i in frames:
converted_depth_fn = converted_depth_fmt.format(i)
if not os.path.isfile(converted_depth_fn):
logging.warning("[SCALE CALIBRATION] %s does not exist",
converted_depth_fn)
continue
# convert colmap_depth to raw
inv_cmp_depth = image_io.load_raw_float32_image(converted_depth_fn)
# compute scale for init depths
inv_src_depth = image_io.load_raw_float32_image(src_depth_fmt.format(i))
# src_depth * scale = (1/inv_src_depth) * scale == cmp_depth
inv_cmp_depth = cv2.resize(
inv_cmp_depth, inv_src_depth.shape[:2][::-1],
interpolation=cv2.INTER_NEAREST
)
ix = np.isfinite(inv_cmp_depth)
if np.sum(ix) / ix.size < args.dense_pixel_ratio:
# not enough pixels are valid and hence the frame is invalid.
continue
scales = (inv_src_depth / inv_cmp_depth)[ix]
scale = np.median(scales)
print(f"Scale[{i}]: median={scale}, std={np.std(scales)}")
# scale = np.median(inv_depth) * np.median(cmp_depth)
src_to_colmap_scales_map[i] = float(scale)
scaled_inv_src_depth = inv_src_depth / scale
image_io.save_raw_float32_image(
scaled_depth_fmt.format(i), scaled_inv_src_depth
)
with SuppressedStdout():
visualization.visualize_depth_dir(
scaled_depth_dir, scaled_depth_dir, force=True
)
# Write scales.csv
xs = sorted(src_to_colmap_scales_map.keys())
ys = [src_to_colmap_scales_map[x] for x in xs]
src_to_colmap_scales = np.stack((np.array(xs), np.array(ys)), axis=-1)
np.savetxt(scales_file, src_to_colmap_scales, delimiter=",")
valid_frames = {int(s) for s in src_to_colmap_scales[:, 0]}
# Scale the extrinsics' translations
scaled_meta_file = pjoin(out_dir, "metadata_scaled.npz")
if os.path.isfile(scaled_meta_file):
print("Scaled metadata file exists.")
else:
scales = src_to_colmap_scales[:, 1]
mean_scale = scales.mean()
print(f"[scales] mean={mean_scale}, std={np.std(scales)}")
with np.load(src_meta_file) as meta_colmap:
intrinsics = meta_colmap["intrinsics"]
extrinsics = meta_colmap["extrinsics"]
extrinsics[..., -1] /= mean_scale
np.savez(
scaled_meta_file,
intrinsics=intrinsics,
extrinsics=extrinsics,
scales=src_to_colmap_scales,
)
color_fmt = pjoin(video.path, "color_down", "frame_{:06d}.raw")
vis_dir = pjoin(out_dir, "vis_calibration_dense")
visualize_all_calibration(
extrinsics, intrinsics, scaled_depth_fmt,
color_fmt, frame_range, vis_dir,
)
return valid_frames
| consistent_depth-main | scale_calibration.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import copy
import cv2
import numpy as np
import os
import torch
from third_party.flownet2.models import FlowNet2
from third_party.OpticalFlowToolkit.lib.flowlib import flow_to_image
from utils.image_io import save_raw_float32_image
class FlowInfer(torch.utils.data.Dataset):
def __init__(self, list_file, size=None, isRGB=True, start_pos=0):
super(FlowInfer, self).__init__()
self.size = size
txt_file = open(list_file, "r")
self.frame1_list = []
self.frame2_list = []
self.output_list = []
self.isRGB = isRGB
for line in txt_file:
line = line.strip(" ")
line = line.strip("\n")
line_split = line.split(" ")
self.frame1_list.append(line_split[0])
self.frame2_list.append(line_split[1])
self.output_list.append(line_split[2])
if start_pos > 0:
self.frame1_list = self.frame1_list[start_pos:]
self.frame2_list = self.frame2_list[start_pos:]
self.output_list = self.output_list[start_pos:]
txt_file.close()
def __len__(self):
return len(self.frame1_list)
def __getitem__(self, idx):
frame1 = cv2.imread(self.frame1_list[idx])
frame2 = cv2.imread(self.frame2_list[idx])
if self.isRGB:
frame1 = frame1[:, :, ::-1]
frame2 = frame2[:, :, ::-1]
output_path = self.output_list[idx]
frame1 = self._img_tf(frame1)
frame2 = self._img_tf(frame2)
frame1_tensor = torch.from_numpy(frame1).permute(2, 0, 1).contiguous().float()
frame2_tensor = torch.from_numpy(frame2).permute(2, 0, 1).contiguous().float()
return frame1_tensor, frame2_tensor, output_path
def _img_tf(self, img):
img = cv2.resize(img, (self.size[1], self.size[0]))
return img
def detectAndDescribe(image):
# detect and extract features from the image
descriptor = cv2.xfeatures2d.SURF_create()
(kps, features) = descriptor.detectAndCompute(image, None)
# convert the keypoints from KeyPoint objects to NumPy
# arrays
kps = np.float32([kp.pt for kp in kps])
# return a tuple of keypoints and features
return (kps, features)
def matchKeypoints(kpsA, kpsB, featuresA, featuresB, ratio=0.75, reprojThresh=4.0):
# compute the raw matches and initialize the list of actual
# matches
matcher = cv2.DescriptorMatcher_create("BruteForce")
rawMatches = matcher.knnMatch(featuresA, featuresB, 2)
matches = []
# loop over the raw matches
for m in rawMatches:
# ensure the distance is within a certain ratio of each
# other (i.e. Lowe's ratio test)
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
matches.append((m[0].trainIdx, m[0].queryIdx))
# computing a homography requires at least 4 matches
if len(matches) > 4:
# construct the two sets of points
ptsA = np.float32([kpsA[i] for (_, i) in matches])
ptsB = np.float32([kpsB[i] for (i, _) in matches])
# compute the homography between the two sets of points
(H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC, reprojThresh)
# return the matches along with the homograpy matrix
# and status of each matched point
return (matches, H, status)
# otherwise, no homograpy could be computed
return None
def parse_args():
parser = argparse.ArgumentParser("Compute optical flow from im1 to im2")
parser.add_argument("--im1", nargs="+")
parser.add_argument("--im2", nargs="+")
parser.add_argument("--out", nargs="+")
parser.add_argument(
"--pretrained_model_flownet2",
type=str,
default="./pretrained_models/FlowNet2_checkpoint.pth.tar",
)
# parser.add_argument('--img_size', type=list, default=(512, 1024, 3))
parser.add_argument("--rgb_max", type=float, default=255.0)
parser.add_argument("--fp16", action="store_true")
parser.add_argument("--homography", type=bool, default=1)
parser.add_argument(
"--size",
type=int,
nargs=2,
default=None,
help="If size is not None, resize the flow to size."
+ " O.w., resize based on max_size and divide.",
)
parser.add_argument("--max_size", type=int, default=None)
parser.add_argument("--divide", type=int, default=1)
parser.add_argument("--visualize", type=bool, default=False)
args = parser.parse_args()
return args
def getimage(img1_path, img2_path, size=None):
frame1 = cv2.imread(img1_path)
frame2 = cv2.imread(img2_path)
if size is not None:
frame1 = cv2.resize(frame1[:, :, ::-1], (size[1], size[0]))
frame2 = cv2.resize(frame2[:, :, ::-1], (size[1], size[0]))
imgH, imgW, _ = frame1.shape
(kpsA, featuresA) = detectAndDescribe(frame1)
(kpsB, featuresB) = detectAndDescribe(frame2)
try:
(_, H_BA, _) = matchKeypoints(kpsB, kpsA, featuresB, featuresA)
except Exception:
H_BA = np.array([1.0, 0, 0, 0, 1.0, 0, 0, 0, 1.0]).reshape(3, 3)
NoneType = type(None)
if type(H_BA) == NoneType:
H_BA = np.array([1.0, 0, 0, 0, 1.0, 0, 0, 0, 1.0]).reshape(3, 3)
try:
np.linalg.inv(H_BA)
except Exception:
H_BA = np.array([1.0, 0, 0, 0, 1.0, 0, 0, 0, 1.0]).reshape(3, 3)
img2_registered = cv2.warpPerspective(frame2, H_BA, (imgW, imgH))
frame1_tensor = torch.from_numpy(frame1).permute(2, 0, 1).contiguous().float()
frame2_tensor = torch.from_numpy(frame2).permute(2, 0, 1).contiguous().float()
frame2_reg_tensor = (
torch.from_numpy(img2_registered).permute(2, 0, 1).contiguous().float()
)
return frame1_tensor, frame2_tensor, frame2_reg_tensor, H_BA
def infer(args, Flownet, device, img1_name, img2_name):
img1, img2, img2_reg, H_BA = getimage(img1_name, img2_name)
_, imgH, imgW = img1.shape
img1 = img1[None, :, :]
img2 = img2[None, :, :]
img2_reg = img2_reg[None, :, :]
img1 = img1.to(device)
img2 = img2.to(device)
img2_reg = img2_reg.to(device)
if args.homography != 1:
sz = img1.size()
img1_view = img1.view(sz[0], sz[1], 1, sz[2], sz[3])
img2_view = img2.view(sz[0], sz[1], 1, sz[2], sz[3])
inputs = torch.cat((img1_view, img2_view), dim=2)
flow = Flownet(inputs)[0].permute(1, 2, 0).data.cpu().numpy()
else:
sz = img1.size()
img1_view = img1.view(sz[0], sz[1], 1, sz[2], sz[3])
img2_reg_view = img2_reg.view(sz[0], sz[1], 1, sz[2], sz[3])
inputs = torch.cat((img1_view, img2_reg_view), dim=2)
flow = Flownet(inputs)[0].permute(1, 2, 0).data.cpu().numpy()
(fy, fx) = np.mgrid[0:imgH, 0:imgW].astype(np.float32)
fxx = copy.deepcopy(fx) + flow[:, :, 0]
fyy = copy.deepcopy(fy) + flow[:, :, 1]
(fxxx, fyyy, fz) = np.linalg.inv(H_BA).dot(
np.concatenate(
(
fxx.reshape(1, -1),
fyy.reshape(1, -1),
np.ones_like(fyy).reshape(1, -1),
),
axis=0,
)
)
fxxx, fyyy = fxxx / fz, fyyy / fz
flow = np.concatenate(
(
fxxx.reshape(imgH, imgW, 1) - fx.reshape(imgH, imgW, 1),
fyyy.reshape(imgH, imgW, 1) - fy.reshape(imgH, imgW, 1),
),
axis=2,
)
return flow
def resize_flow(flow, size):
resized_width, resized_height = size
H, W = flow.shape[:2]
scale = np.array((resized_width / float(W), resized_height / float(H))).reshape(
1, 1, -1
)
resized = cv2.resize(
flow, dsize=(resized_width, resized_height), interpolation=cv2.INTER_CUBIC
)
resized *= scale
return resized
def process(args):
N = len(args.im1)
assert N == len(args.im2) and N == len(args.out)
device = torch.device("cuda:0")
Flownet = FlowNet2(args)
print(f"Loading pretrained model from '{args.pretrained_model_flownet2}'.")
flownet2_ckpt = torch.load(args.pretrained_model_flownet2)
Flownet.load_state_dict(flownet2_ckpt["state_dict"])
Flownet.to(device)
Flownet.eval()
for im1, im2, out in zip(args.im1, args.im2, args.out):
if os.path.isfile(out):
continue
flow = infer(args, Flownet, device, im1, im2)
flow = resize_flow(flow, args.size)
os.makedirs(os.path.dirname(out), exist_ok=True)
save_raw_float32_image(out, flow)
if args.visualize:
vis = flow_to_image(flow)
cv2.imwrite(os.path.splitext(out)[0] + ".png", vis)
if __name__ == "__main__":
args = parse_args()
process(args)
| consistent_depth-main | optical_flow_flownet2_homography.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import cv2
import itertools
import json
import math
import os
from os.path import join as pjoin
import time
import torch
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import torchvision.utils as vutils
from typing import Dict
from utils.helpers import SuppressedStdout
from monodepth.depth_model_registry import get_depth_model
import optimizer
from loaders.video_dataset import VideoDataset, VideoFrameDataset
from loss.joint_loss import JointLoss
from loss.loss_params import LossParams
from utils import image_io, visualization
from utils.torch_helpers import to_device
class DepthFineTuningParams:
"""Options about finetune parameters.
"""
@staticmethod
def add_arguments(parser):
parser = LossParams.add_arguments(parser)
parser.add_argument(
"--optimizer",
default="Adam",
choices=optimizer.OPTIMIZER_NAMES,
help="optimizer to train the network",
)
parser.add_argument(
"--val_epoch_freq",
type=int,
default=1,
help="validation epoch frequency.",
)
parser.add_argument("--learning_rate", type=float, default=0,
help="Learning rate for the training. If <= 0 it will be set"
" automatically to the default for the specified model adapter.")
parser.add_argument("--batch_size", type=int, default=4)
parser.add_argument("--num_epochs", type=int, default=20)
parser.add_argument("--log_dir", help="folder to log tensorboard summary")
parser.add_argument('--display_freq', type=int, default=100,
help='frequency of showing training results on screen')
parser.add_argument('--print_freq', type=int, default=1,
help='frequency of showing training results on console')
parser.add_argument('--save_epoch_freq', type=int, default=1,
help='frequency of saving checkpoints at the end of epochs')
return parser
def log_loss_stats(
writer: SummaryWriter,
name_prefix: str,
loss_meta: Dict[str, torch.Tensor],
n: int,
log_histogram: bool = False,
):
"""
loss_meta: sub_loss_name: individual losses
"""
for sub_loss_name, loss_value in loss_meta.items():
sub_loss_full_name = name_prefix + "/" + sub_loss_name
writer.add_scalar(
sub_loss_full_name + "/max", loss_value.max(), n,
)
writer.add_scalar(
sub_loss_full_name + "/min", loss_value.min(), n,
)
writer.add_scalar(
sub_loss_full_name + "/mean", loss_value.mean(), n,
)
if log_histogram:
writer.add_histogram(sub_loss_full_name, loss_value, n)
def write_summary(
writer, mode_name, input_images, depth, metadata, n_iter
):
DIM = -3
B = depth.shape[0]
inv_depth_pred = depth.unsqueeze(DIM)
mask = torch.stack(metadata['geometry_consistency']['masks'], dim=1)
def to_vis(x):
return x[:8].transpose(0, 1).reshape((-1,) + x.shape[DIM:])
writer.add_image(
mode_name + '/image',
vutils.make_grid(to_vis(input_images), nrow=B, normalize=True), n_iter)
writer.add_image(
mode_name + '/pred_full',
vutils.make_grid(to_vis(1.0 / inv_depth_pred), nrow=B, normalize=True), n_iter)
writer.add_image(
mode_name + '/mask',
vutils.make_grid(to_vis(mask), nrow=B, normalize=True), n_iter)
def log_loss(
writer: SummaryWriter,
mode_name: str,
loss: torch.Tensor,
loss_meta: Dict[str, torch.Tensor],
niters: int,
):
main_loss_name = mode_name + "/loss"
writer.add_scalar(main_loss_name, loss, niters)
log_loss_stats(writer, main_loss_name, loss_meta, niters)
def make_tag(params):
return (
LossParams.make_str(params)
+ f"_LR{params.learning_rate}"
+ f"_BS{params.batch_size}"
+ f"_O{params.optimizer.lower()}"
)
class DepthFineTuner:
def __init__(self, range_dir, frames, params):
self.frames = frames
self.params = params
self.base_dir = params.path
self.range_dir = range_dir
self.out_dir = pjoin(self.range_dir, make_tag(params))
os.makedirs(self.out_dir, exist_ok=True)
print(f"Fine-tuning directory: '{self.out_dir}'")
self.checkpoints_dir = pjoin(self.out_dir, "checkpoints")
os.makedirs(self.checkpoints_dir, exist_ok=True)
model = get_depth_model(params.model_type)
self.model = model()
num_gpus = torch.cuda.device_count()
print(f"Using {num_gpus} GPUs.")
if num_gpus > 1:
self.params.batch_size *= num_gpus
print(f"Adjusting batch size to {self.params.batch_size}.")
self.reference_disparity = {}
self.vis_depth_scale = None
def save_depth(self, dir: str = None, frames=None):
if dir is None:
dir = self.out_dir
if frames is None:
frames = self.frames
color_fmt = pjoin(self.base_dir, "color_down", "frame_{:06d}.raw")
depth_dir = pjoin(dir, "depth")
depth_fmt = pjoin(depth_dir, "frame_{:06d}")
dataset = VideoFrameDataset(color_fmt, frames)
data_loader = DataLoader(
dataset, batch_size=1, shuffle=False, num_workers=4
)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
self.model.eval()
os.makedirs(depth_dir, exist_ok=True)
for data in data_loader:
data = to_device(data)
stacked_images, metadata = data
frame_id = metadata["frame_id"][0]
depth = self.model.forward(stacked_images, metadata)
depth = depth.detach().cpu().numpy().squeeze()
inv_depth = 1.0 / depth
image_io.save_raw_float32_image(
depth_fmt.format(frame_id) + ".raw", inv_depth)
with SuppressedStdout():
visualization.visualize_depth_dir(depth_dir, depth_dir, force=True)
def fine_tune(self, writer=None):
meta_file = pjoin(self.range_dir, "metadata_scaled.npz")
dataset = VideoDataset(self.base_dir, meta_file)
train_data_loader = DataLoader(
dataset,
batch_size=self.params.batch_size,
shuffle=True,
num_workers=4,
pin_memory=torch.cuda.is_available(),
)
val_data_loader = DataLoader(
dataset,
batch_size=self.params.batch_size,
shuffle=False,
num_workers=4,
pin_memory=torch.cuda.is_available(),
)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
criterion = JointLoss(self.params,
parameters_init=[p.clone() for p in self.model.parameters()])
if writer is None:
log_dir = pjoin(self.out_dir, "tensorboard")
os.makedirs(log_dir, exist_ok=True)
writer = SummaryWriter(log_dir=log_dir)
opt = optimizer.create(
self.params.optimizer,
self.model.parameters(),
self.params.learning_rate,
betas=(0.9, 0.999)
)
eval_dir = pjoin(self.out_dir, "eval")
os.makedirs(eval_dir, exist_ok=True)
self.model.train()
def suffix(epoch, niters):
return "_e{:04d}_iter{:06d}".format(epoch, niters)
def validate(epoch, niters):
loss_meta = self.eval_and_save(
criterion, val_data_loader, suffix(epoch, niters)
)
if writer is not None:
log_loss_stats(
writer, "validation", loss_meta, epoch, log_histogram=True
)
print(f"Done Validation for epoch {epoch} ({niters} iterations)")
self.vis_depth_scale = None
validate(0, 0)
# Training loop.
total_iters = 0
for epoch in range(self.params.num_epochs):
epoch_start_time = time.perf_counter()
for data in train_data_loader:
data = to_device(data)
stacked_img, metadata = data
depth = self.model(stacked_img, metadata)
opt.zero_grad()
loss, loss_meta = criterion(
depth, metadata, parameters=self.model.parameters())
pairs = metadata['geometry_consistency']['indices']
pairs = pairs.cpu().numpy().tolist()
print(f"Epoch = {epoch}, pairs = {pairs}, loss = {loss[0]}")
if torch.isnan(loss):
print("Loss is NaN. Skipping.")
continue
loss.backward()
opt.step()
total_iters += stacked_img.shape[0]
if writer is not None and total_iters % self.params.print_freq == 0:
log_loss(writer, 'Train', loss, loss_meta, total_iters)
if writer is not None and total_iters % self.params.display_freq == 0:
write_summary(
writer, 'Train', stacked_img, depth, metadata, total_iters
)
epoch_end_time = time.perf_counter()
epoch_duration = epoch_end_time - epoch_start_time
print(f"Epoch {epoch} took {epoch_duration:.2f}s.")
if (epoch + 1) % self.params.val_epoch_freq == 0:
validate(epoch + 1, total_iters)
if (epoch + 1) % self.params.save_epoch_freq == 0:
file_name = pjoin(self.checkpoints_dir, f"{epoch + 1:04d}.pth")
self.model.save(file_name)
# Validate the last epoch, unless it was just done in the loop above.
if self.params.num_epochs % self.params.val_epoch_freq != 0:
validate(self.params.num_epochs, total_iters)
print("Finished Training")
def eval_and_save(self, criterion, data_loader, suf) -> Dict[str, torch.Tensor]:
"""
Note this function asssumes the structure of the data produced by data_loader
"""
N = len(data_loader.dataset)
loss_dict = {}
saved_frames = set()
total_index = 0
max_frame_index = 0
all_pairs = []
for _, data in zip(range(N), data_loader):
data = to_device(data)
stacked_img, metadata = data
with torch.no_grad():
depth = self.model(stacked_img, metadata)
batch_indices = (
metadata["geometry_consistency"]["indices"].cpu().numpy().tolist()
)
# Update the maximum frame index and pairs list.
max_frame_index = max(max_frame_index, max(itertools.chain(*batch_indices)))
all_pairs += batch_indices
# Compute and store losses.
_, loss_meta = criterion(
depth, metadata, parameters=self.model.parameters(),
)
for loss_name, losses in loss_meta.items():
if loss_name not in loss_dict:
loss_dict[loss_name] = {}
for indices, loss in zip(batch_indices, losses):
loss_dict[loss_name][str(indices)] = loss.item()
# Save depth maps.
inv_depths_batch = 1.0 / depth.cpu().detach().numpy()
if self.vis_depth_scale is None:
# Single scale for the whole dataset.
self.vis_depth_scale = inv_depths_batch.max()
for inv_depths, indices in zip(inv_depths_batch, batch_indices):
for inv_depth, index in zip(inv_depths, indices):
# Only save frames not saved before.
if index in saved_frames:
continue
saved_frames.add(index)
fn_pre = pjoin(
self.out_dir, "eval", "depth_{:06d}{}".format(index, suf)
)
image_io.save_raw_float32_image(fn_pre + ".raw", inv_depth)
inv_depth_vis = visualization.visualize_depth(
inv_depth, depth_min=0, depth_max=self.vis_depth_scale
)
cv2.imwrite(fn_pre + ".png", inv_depth_vis)
total_index += 1
loss_meta = {
loss_name: torch.tensor(tuple(loss.values()))
for loss_name, loss in loss_dict.items()
}
loss_dict["mean"] = {k: v.mean().item() for k, v in loss_meta.items()}
with open(pjoin(self.out_dir, "eval", "loss{}.json".format(suf)), "w") as f:
json.dump(loss_dict, f)
# Print verbose summary to stdout.
index_width = int(math.ceil(math.log10(max_frame_index)))
loss_names = list(loss_dict.keys())
loss_names.remove("mean")
loss_format = {}
for name in loss_names:
max_value = max(loss_dict[name].values())
width = math.ceil(math.log10(max_value))
loss_format[name] = f"{width+7}.6f"
for pair in sorted(all_pairs):
line = f"({pair[0]:{index_width}d}, {pair[1]:{index_width}d}): "
line += ", ".join(
[f"{name}: {loss_dict[name][str(pair)]:{loss_format[name]}}"
for name in loss_names]
)
print(line)
print("Mean: " + " " * (2 * index_width) + ", ".join(
[f"{name}: {loss_dict[name][str(pair)]:{loss_format[name]}}"
for name in loss_names]
))
return loss_meta
| consistent_depth-main | depth_fine_tuning.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import cv2
import json
import numpy as np
import os
from os.path import join as pjoin
import torch
from third_party.OpticalFlowToolkit.lib import flowlib
from utils.url_helpers import get_model_from_url
import optical_flow_flownet2_homography
from utils import (
consistency, geometry, image_io, visualization
)
from utils.helpers import dotdict, mkdir_ifnotexists
from utils.torch_helpers import _device
def warp_by_flow(color, flow):
def to_tensor(x):
return torch.tensor(x.reshape((-1,) + x.shape)).to(_device).permute(0, 3, 1, 2)
color = to_tensor(color)
flow = to_tensor(flow)
N, _, H, W = flow.shape
pixel = geometry.pixel_grid(1, (H, W))
uv = pixel + flow
warped = geometry.sample(color, uv)
return warped.permute(0, 2, 3, 1).squeeze().detach().cpu().numpy()
class Flow:
def __init__(self, path, out_path):
self.path = path
self.out_path = out_path
# Max size at which flow can be computed.
@staticmethod
def max_size():
return 1024
def check_good_flow_pairs(self, frame_pairs, overlap_ratio):
flow_list_path = pjoin(self.out_path, "flow_list_%.2f.json" % overlap_ratio)
if os.path.isfile(flow_list_path):
return flow_list_path
def ratio(mask):
return np.sum(mask > 0) / np.prod(mask.shape[:2])
mask_fmt = pjoin(self.path, "mask", "mask_{:06d}_{:06d}.png")
result_pairs = []
checked_pairs = set()
for pair in frame_pairs:
if pair in checked_pairs:
continue
cur_pairs = [pair, pair[::-1]]
checked_pairs.update(cur_pairs)
mask_fns = [mask_fmt.format(*ids) for ids in cur_pairs]
masks = [cv2.imread(fn, 0) for fn in mask_fns]
mask_ratios = [ratio(m) for m in masks]
if all(r >= overlap_ratio for r in mask_ratios):
result_pairs.extend(cur_pairs)
else:
print("Bad frame pair(%d, %d). Overlap_ratio=" % (pair[0], pair[1]),
mask_ratios)
print(f"Filtered {len(result_pairs)} / {len(frame_pairs)} good frame pairs")
if len(result_pairs) == 0:
raise Exception("No good frame pairs are found.")
frame_dists = np.array([np.abs(i - j) for (i, j) in result_pairs])
print(
"Frame distance statistics: max = %d, mean = %d, median = %d" %
(np.amax(frame_dists), np.mean(frame_dists), np.median(frame_dists))
)
with open(flow_list_path, "w") as f:
json.dump(list(result_pairs), f)
return flow_list_path
def check_flow_files(self, index_pairs):
flow_dir = "%s/flow" % self.path
for (i, j) in index_pairs:
file = "%s/flow_%06d_%06d.raw" % (flow_dir, i, j)
if not os.path.exists(file):
return False
return True
def compute_flow(self, index_pairs, checkpoint):
"""Run Flownet2 with specific <checkpoint> (FlowNet2 or finetuned on KITTI)
Note that we don't fit homography first for FlowNet2-KITTI model.
"""
model_name = checkpoint.lower()
if model_name == "flownet2-kitti":
model_file = get_model_from_url(
"https://www.dropbox.com/s/mme80czrpbqal7k/flownet2-kitti.pth.tar?dl=1",
model_name + ".pth",
)
else:
model_file = f"checkpoints/{model_name}.pth"
mkdir_ifnotexists("%s/flow" % self.path)
if self.check_flow_files(index_pairs):
return
frame_dir = "%s/color_flow" % self.path
frame1_fns = [
"%s/frame_%06d.png" % (frame_dir, pair[0]) for pair in index_pairs
]
frame2_fns = [
"%s/frame_%06d.png" % (frame_dir, pair[1]) for pair in index_pairs
]
out_fns = [
"%s/flow/flow_%06d_%06d.raw" % (self.path, i, j)
for (i, j) in index_pairs
]
tmp = image_io.load_raw_float32_image(
pjoin(self.path, "color_down", "frame_{:06d}.raw".format(0))
)
size = tmp.shape[:2][::-1]
print("Resizing flow to", size)
args = dotdict()
args.pretrained_model_flownet2 = model_file
args.im1 = list(frame1_fns)
args.im2 = list(frame2_fns)
args.out = list(out_fns)
args.size = size
args.fp16 = False
args.homography = 'KITTI' not in checkpoint
args.rgb_max = 255.0
args.visualize = False
optical_flow_flownet2_homography.process(args)
self.check_flow_files(index_pairs)
def visualize_flow(self, warp=False):
flow_fmt = pjoin(self.path, "flow", "flow_{:06d}_{:06d}.raw")
mask_fmt = pjoin(self.path, "mask", "mask_{:06d}_{:06d}.png")
color_fmt = pjoin(self.path, "color_down", "frame_{:06d}.raw")
vis_fmt = pjoin(self.path, "vis_flow", "frame_{:06d}_{:06d}.png")
warp_fmt = pjoin(self.path, "vis_flow_warped", "frame_{:06d}_{:06d}_warped.png")
def get_indices(name):
strs = os.path.splitext(name)[0].split("_")[1:]
return sorted((int(s) for s in strs))
for fmt in (vis_fmt, warp_fmt):
os.makedirs(os.path.dirname(fmt), exist_ok=True)
flow_names = os.listdir(os.path.dirname(flow_fmt))
for flow_name in flow_names:
indices = get_indices(flow_name)
if os.path.isfile(vis_fmt.format(*indices)) and (
not warp or os.path.isfile(warp_fmt.format(*indices))
):
continue
indices_pair = [indices, indices[::-1]]
flow_fns = [flow_fmt.format(*idxs) for idxs in indices_pair]
mask_fns = [mask_fmt.format(*idxs) for idxs in indices_pair]
color_fns = [color_fmt.format(idx) for idx in indices]
flows = [image_io.load_raw_float32_image(fn) for fn in flow_fns]
flow_ims = [flowlib.flow_to_image(np.copy(flow)) for flow in flows]
colors = [image_io.load_raw_float32_image(fn) * 255 for fn in color_fns]
masks = [cv2.imread(fn, 0) for fn in mask_fns]
masked_colors = [
visualization.apply_mask(im, mask) for im, mask in zip(colors, masks)
]
masked_flows = [
visualization.apply_mask(im, mask) for im, mask in zip(flow_ims, masks)
]
masked = np.hstack(masked_colors + masked_flows)
original = np.hstack(colors + flow_ims)
visual = np.vstack((original, masked))
cv2.imwrite(vis_fmt.format(*indices), visual)
if warp:
warped = [
warp_by_flow(color, flow)
for color, flow in zip(colors[::-1], flows)
]
for idxs, im in zip([indices, indices[::-1]], warped):
cv2.imwrite(warp_fmt.format(*idxs), im)
def mask_valid_correspondences(self, flow_thresh=1, color_thresh=1):
flow_fmt = pjoin(self.path, "flow", "flow_{:06d}_{:06d}.raw")
mask_fmt = pjoin(self.path, "mask", "mask_{:06d}_{:06d}.png")
color_fmt = pjoin(self.path, "color_down", "frame_{:06d}.raw")
def get_indices(name):
strs = os.path.splitext(name)[0].split("_")[1:]
return [int(s) for s in strs]
os.makedirs(os.path.dirname(mask_fmt), exist_ok=True)
flow_names = os.listdir(os.path.dirname(flow_fmt))
for flow_name in flow_names:
indices = get_indices(flow_name)
if os.path.isfile(mask_fmt.format(*indices)):
continue
indices_pair = [indices, indices[::-1]]
flow_fns = [flow_fmt.format(*idxs) for idxs in indices_pair]
mask_fns = [mask_fmt.format(*idxs) for idxs in indices_pair]
color_fns = [color_fmt.format(idx) for idx in indices]
flows = [image_io.load_raw_float32_image(fn) for fn in flow_fns]
colors = [image_io.load_raw_float32_image(fn) for fn in color_fns]
masks = consistency.consistent_flow_masks(
flows, colors, flow_thresh, color_thresh
)
for mask, mask_fn in zip(masks, mask_fns):
cv2.imwrite(mask_fn, mask * 255)
| consistent_depth-main | flow.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import os
from os.path import join as pjoin
import shutil
from depth_fine_tuning import DepthFineTuner
from flow import Flow
from scale_calibration import calibrate_scale
from tools import make_video as mkvid
from utils.frame_range import FrameRange, OptionalSet
from utils.helpers import print_banner, print_title
from video import (Video, sample_pairs)
class DatasetProcessor:
def __init__(self, writer=None):
self.writer = writer
def create_output_path(self, params):
range_tag = f"R{params.frame_range.name}"
flow_ops_tag = "-".join(params.flow_ops)
name = f"{range_tag}_{flow_ops_tag}_{params.model_type}"
out_dir = pjoin(self.path, name)
os.makedirs(out_dir, exist_ok=True)
return out_dir
def extract_frames(self, params):
print_banner("Extracting PTS")
self.video.extract_pts()
print_banner("Extracting frames")
self.video.extract_frames()
def pipeline(self, params):
self.extract_frames(params)
print_banner("Downscaling frames (raw)")
self.video.downscale_frames("color_down", params.size, "raw")
print_banner("Downscaling frames (png)")
self.video.downscale_frames("color_down_png", params.size, "png")
print_banner("Downscaling frames (for flow)")
self.video.downscale_frames("color_flow", Flow.max_size(), "png", align=64)
frame_range = FrameRange(
frame_range=params.frame_range.set, num_frames=self.video.frame_count,
)
frames = frame_range.frames()
print_banner("Compute initial depth")
ft = DepthFineTuner(self.out_dir, frames, params)
initial_depth_dir = pjoin(self.path, f"depth_{params.model_type}")
if not self.video.check_frames(pjoin(initial_depth_dir, "depth"), "raw"):
ft.save_depth(initial_depth_dir)
valid_frames = calibrate_scale(self.video, self.out_dir, frame_range, params)
# frame range for finetuning:
ft_frame_range = frame_range.intersection(OptionalSet(set(valid_frames)))
print("Filtered out frames",
sorted(set(frame_range.frames()) - set(ft_frame_range.frames())))
print_banner("Compute flow")
frame_pairs = sample_pairs(ft_frame_range, params.flow_ops)
self.flow.compute_flow(frame_pairs, params.flow_checkpoint)
print_banner("Compute flow masks")
self.flow.mask_valid_correspondences()
flow_list_path = self.flow.check_good_flow_pairs(
frame_pairs, params.overlap_ratio
)
shutil.copyfile(flow_list_path, pjoin(self.path, "flow_list.json"))
print_banner("Visualize flow")
self.flow.visualize_flow(warp=True)
print_banner("Fine-tuning")
ft.fine_tune(writer=self.writer)
print_banner("Compute final depth")
if not self.video.check_frames(pjoin(ft.out_dir, "depth"), "raw", frames):
ft.save_depth(ft.out_dir, frames)
if params.make_video:
print_banner("Export visualization videos")
self.make_videos(params, ft.out_dir)
return initial_depth_dir, ft.out_dir, frame_range.frames()
def process(self, params):
self.path = params.path
os.makedirs(self.path, exist_ok=True)
self.video_file = params.video_file
self.out_dir = self.create_output_path(params)
self.video = Video(params.path, params.video_file)
self.flow = Flow(params.path, self.out_dir)
print_title(f"Processing dataset '{self.path}'")
print(f"Output directory: {self.out_dir}")
if params.op == "all":
return self.pipeline(params)
elif params.op == "extract_frames":
return self.extract_frames(params)
else:
raise RuntimeError("Invalid operation specified.")
def make_videos(self, params, ft_depth_dir):
args = [
"--color_dir", pjoin(self.path, "color_down_png"),
"--out_dir", pjoin(self.out_dir, "videos"),
"--depth_dirs",
pjoin(self.path, f"depth_{params.model_type}"),
pjoin(self.path, "depth_colmap_dense"),
pjoin(ft_depth_dir, "depth"),
]
gt_dir = pjoin(self.path, "depth_gt")
if os.path.isdir(gt_dir):
args.append(gt_dir)
vid_params = mkvid.MakeVideoParams().parser.parse_args(
args,
namespace=params
)
logging.info("Make videos {}".format(vid_params))
mkvid.main(vid_params)
| consistent_depth-main | process.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
from params import Video3dParamsParser
from process import DatasetProcessor
if __name__ == "__main__":
parser = Video3dParamsParser()
params = parser.parse()
dp = DatasetProcessor()
dp.process(params)
| consistent_depth-main | main.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import cv2
import logging
import os
from os.path import join as pjoin
import sys
import tempfile
from utils import (frame_sampling, image_io)
from utils.helpers import mkdir_ifnotexists
ffmpeg = "ffmpeg"
ffprobe = "ffprobe"
def sample_pairs(frame_range, flow_ops):
#TODO: update the frame range with reconstruction range
name_mode_map = frame_sampling.SamplePairsMode.name_mode_map()
opts = [
frame_sampling.SamplePairsOptions(mode=name_mode_map[op]) for op in flow_ops
]
pairs = frame_sampling.SamplePairs.sample(
opts, frame_range=frame_range, two_way=True
)
print(f"Sampled {len(pairs)} frame pairs.")
return pairs
class Video:
def __init__(self, path, video_file=None):
self.path = path
self.video_file = video_file
def check_extracted_pts(self):
pts_file = "%s/frames.txt" % self.path
if not os.path.exists(pts_file):
return False
with open(pts_file, "r") as file:
lines = file.readlines()
self.frame_count = int(lines[0])
width = int(lines[1])
height = int(lines[2])
print("%d frames detected (%d x %d)." % (self.frame_count, width, height))
if len(lines) != self.frame_count + 3:
sys.exit("frames.txt has wrong number of lines")
print("frames.txt exists, checked OK.")
return True
return False
def extract_pts(self):
if self.check_extracted_pts():
# frames.txt exists and checked OK.
return
if not os.path.exists(self.video_file):
sys.exit("ERROR: input video file '%s' not found.", self.video_file)
# Get width and height
tmp_file = tempfile.mktemp(".png")
cmd = "%s -i %s -vframes 1 %s" % (ffmpeg, self.video_file, tmp_file)
print(cmd)
res = os.popen(cmd).read()
image = image_io.load_image(tmp_file)
height = image.shape[0]
width = image.shape[1]
os.remove(tmp_file)
if os.path.exists(tmp_file):
sys.exit("ERROR: unable to remove '%s'" % tmp_file)
# Get PTS
def parse_line(line, token):
if line[: len(token)] != token:
sys.exit("ERROR: record is malformed, expected to find '%s'." % token)
return line[len(token) :]
ffprobe_cmd = "%s %s -select_streams v:0 -show_frames" % (
ffprobe,
self.video_file,
)
cmd = ffprobe_cmd + " | grep pkt_pts_time"
print(cmd)
res = os.popen(cmd).read()
pts = []
for line in res.splitlines():
pts.append(parse_line(line, "pkt_pts_time="))
self.frame_count = len(pts)
print("%d frames detected." % self.frame_count)
pts_file = "%s/frames.txt" % self.path
with open(pts_file, "w") as file:
file.write("%d\n" % len(pts))
file.write("%s\n" % width)
file.write("%s\n" % height)
for t in pts:
file.write("%s\n" % t)
self.check_extracted_pts()
def check_frames(self, frame_dir, extension, frames=None):
if not os.path.isdir(frame_dir):
return False
files = os.listdir(frame_dir)
files = [n for n in files if n.endswith(extension)]
if len(files) == 0:
return False
if frames is None:
frames = range(self.frame_count)
if len(files) != len(frames):
sys.exit(
"ERROR: expected to find %d files but found %d in '%s'"
% (self.frame_count, len(files), frame_dir)
)
for i in frames:
frame_file = "%s/frame_%06d.%s" % (frame_dir, i, extension)
if not os.path.exists(frame_file):
sys.exit("ERROR: did not find expected file '%s'" % frame_file)
print("Frames found, checked OK.")
return True
def extract_frames(self):
frame_dir = "%s/color_full" % self.path
mkdir_ifnotexists(frame_dir)
if self.check_frames(frame_dir, "png"):
# Frames are already extracted and checked OK.
return
if not os.path.exists(self.video_file):
sys.exit("ERROR: input video file '%s' not found.", self.video_file)
cmd = "%s -i %s -start_number 0 -vsync 0 %s/frame_%%06d.png" % (
ffmpeg,
self.video_file,
frame_dir,
)
print(cmd)
os.popen(cmd).read()
count = len(os.listdir(frame_dir))
if count != self.frame_count:
sys.exit(
"ERROR: %d frames extracted, but %d PTS entries."
% (count, self.frame_count)
)
self.check_frames(frame_dir, "png")
def downscale_frames(
self, subdir, max_size, ext, align=16, full_subdir="color_full"
):
full_dir = pjoin(self.path, full_subdir)
down_dir = pjoin(self.path, subdir)
mkdir_ifnotexists(down_dir)
if self.check_frames(down_dir, ext):
# Frames are already extracted and checked OK.
return
for i in range(self.frame_count):
full_file = "%s/frame_%06d.png" % (full_dir, i)
down_file = ("%s/frame_%06d." + ext) % (down_dir, i)
suppress_messages = (i > 0)
image = image_io.load_image(
full_file, max_size=max_size, align=align,
suppress_messages=suppress_messages
)
image = image[..., ::-1] # Channel swizzle
if ext == "raw":
image_io.save_raw_float32_image(down_file, image)
else:
cv2.imwrite(down_file, image * 255)
self.check_frames(down_dir, ext)
| consistent_depth-main | video.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import logging
import sys
import os
from os.path import join as pjoin
import shutil
import subprocess
from typing import Tuple, Optional, List
import cv2
LOG = logging.getLogger()
LOG.setLevel("INFO")
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
ch = logging.StreamHandler()
ch.setFormatter(formatter)
LOG.handlers = []
LOG.addHandler(ch)
class MakeVideoParams:
def __init__(self):
self.parser = argparse.ArgumentParser(
"Create videos from color and depth frames. "
"If <video3d_dir> is specified, <color_dir> and <depth_dirs> "
"only need to be relative directory to <video3d_dir>."
" <color_dir> and each of the <depth_dirs> should contain "
"the same number of frames."
)
self.parser.add_argument(
"--color_dir", default="color_down_png", help="directory of color images"
)
self.parser.add_argument(
"--depth_dirs", nargs="*", help="directory of depth images"
)
self.parser.add_argument("--out_dir", help="output directory for the video")
self.parser.add_argument("--ext", help="video extension", default=".mp4")
self.parser.add_argument(
"--frame_fmt", help="frame format", default="frame_%06d.png"
)
self.parser.add_argument(
"--video3d_dir", help="directory for the 3D video", default=None
)
self.add_arguments(self.parser)
@staticmethod
def add_arguments(parser):
parser.add_argument(
"--ffmpeg", help="specify the path to the ffmpeg bin", default="ffmpeg"
)
def parse_args():
return MakeVideoParams().parser.parse_args()
def num_frames(dir, ext):
return len([fn for fn in os.listdir(dir) if os.path.splitext(fn)[-1] == ext])
def augment_args(args):
if args.video3d_dir is not None:
args.color_dir = pjoin(args.video3d_dir, args.color_dir)
args.depth_dirs = [pjoin(args.video3d_dir, dir) for dir in args.depth_dirs]
args.out_dir = pjoin(args.video3d_dir, args.out_dir)
# depth_dir can include or omit the "depth" suffix
# number of frames should be equal
frame_ext = os.path.splitext(args.frame_fmt)[-1]
n = num_frames(args.color_dir, frame_ext)
assert n > 0
DEPTH = "depth"
args.depth_names = []
valid_depth_dirs = []
for depth_dir in args.depth_dirs:
names = os.listdir(depth_dir)
if DEPTH in names and len(names) == 1:
depth_dir = pjoin(depth_dir, DEPTH)
depth_frames = num_frames(depth_dir, frame_ext)
if depth_frames == n:
valid_depth_dirs.append(depth_dir)
else:
logging.warning("[Warning] %d vs. %d in %s" % (depth_frames, n, depth_dir))
continue
p_head, p_tail = os.path.split(depth_dir)
if p_tail == DEPTH:
p_head, p_tail = os.path.split(p_head)
args.depth_names.append(p_tail)
args.depth_dirs = valid_depth_dirs
return args
def frame_size(frame_fmt: str, frame_index: int = 0):
im_fn = frame_fmt % frame_index
return cv2.imread(im_fn).shape[:2][::-1]
def make_resized_filename(prefix: str, size: Tuple[int, int], ext: str):
return prefix + ("_" + str(size)) + ext
def make_resized_filename_if_exists(
prefix: str, ext: str, size: Optional[Tuple[int, int]] = None
) -> str:
unsized_fn = prefix + ext
if size is None:
return unsized_fn
sized_fn = make_resized_filename(prefix, size, ext)
if os.path.isfile(sized_fn):
return sized_fn
return unsized_fn
def make_video(
ffmpeg: str,
frame_fmt: str,
out_prefix: str,
ext: str = ".mp4",
size: Optional[Tuple[int, int]] = None,
crf: int = 1,
) -> None:
out_fn = out_prefix + ext
if not os.path.isfile(out_fn):
cmd = [
ffmpeg,
"-r", "30",
"-i", frame_fmt,
"-vcodec", "libx264",
"-pix_fmt", "yuv420p",
"-crf", str(crf),
"-vf", "pad=ceil(iw/2)*2:ceil(ih/2)*2",
out_fn,
]
print(subprocess.run(cmd, check=True))
# resize the video if size is specified
if size is None:
return
in_size = frame_size(frame_fmt)
if in_size == size:
return
resized_out_fn = make_resized_filename(out_prefix, size, ext)
if os.path.isfile(resized_out_fn):
return
resize_cmd = [
ffmpeg,
"-i",
out_fn,
"-vf",
"scale=" + ":".join([str(x) for x in size]),
resized_out_fn,
]
print(subprocess.run(resize_cmd, check=True))
def make_overlay(depth_fmt: str, color_fmt: str, overlay_fmt: str) -> None:
n = num_frames(os.path.dirname(color_fmt), os.path.splitext(color_fmt)[-1])
for i in range(n):
color = cv2.imread(color_fmt % i)
depth = cv2.imread(depth_fmt % i)
if depth.shape != color.shape:
depth = cv2.resize(depth, color.shape[:2][::-1])
gray = cv2.cvtColor(color, cv2.COLOR_BGR2GRAY)
overlay = gray.reshape(gray.shape[:2] + (-1,)) / 2.0 + depth / 2.0
cv2.imwrite(overlay_fmt % i, overlay)
def stack_videos(
ffmpeg: str,
fn_prefixes: List[str],
out_dir: str,
ext: str = ".mp4",
size: Optional[Tuple[int, int]] = None,
crf: int = 1,
) -> str:
out_pre = "_".join([os.path.basename(pre) for pre in fn_prefixes])
out_fn = pjoin(out_dir, out_pre + ext)
if os.path.isfile(out_fn):
return out_fn
vid_fns = [
make_resized_filename_if_exists(pre, ext, size=size) for pre in fn_prefixes
]
cmd = [ffmpeg]
for vid_fn in vid_fns:
cmd.extend(["-i", vid_fn])
cmd.extend(["-filter_complex", "hstack=inputs=" + str(len(vid_fns))])
cmd.extend(["-crf", str(crf)])
cmd.append(out_fn)
print(subprocess.run(cmd, check=True))
return out_fn
def make_depth_videos(
ffmpeg: str,
depth_fmt: str,
color_fmt: str,
out_prefix: str,
ext: str = ".mp4",
size: Optional[Tuple[int, int]] = None,
) -> None:
# make a video using the depth frames
make_video(ffmpeg, depth_fmt, out_prefix, ext=ext, size=size)
# color depth overlay
overlay_prefix = out_prefix + "-overlay"
overlay_fn = overlay_prefix + ext
if os.path.isfile(overlay_fn):
return
overlay_dir = out_prefix
os.makedirs(overlay_dir, exist_ok=True)
overlay_fmt = pjoin(overlay_dir, os.path.basename(depth_fmt))
make_overlay(depth_fmt, color_fmt, overlay_fmt)
make_video(ffmpeg, overlay_fmt, overlay_prefix, ext=ext, size=size)
shutil.rmtree(overlay_dir)
stack_videos(
ffmpeg,
[out_prefix, overlay_prefix],
os.path.dirname(out_prefix),
ext=ext,
size=size,
)
def main(args):
COLOR_NAME = "color"
args = augment_args(args)
size = frame_size(pjoin(args.color_dir, args.frame_fmt))
os.makedirs(args.out_dir, exist_ok=True)
color_video_prefix = pjoin(args.out_dir, COLOR_NAME)
make_video(
args.ffmpeg,
pjoin(args.color_dir, args.frame_fmt),
color_video_prefix,
ext=args.ext,
)
depth_video_prefixes = [pjoin(args.out_dir, name) for name in args.depth_names]
for depth_dir, prefix in zip(args.depth_dirs, depth_video_prefixes):
make_depth_videos(
args.ffmpeg,
pjoin(depth_dir, args.frame_fmt),
pjoin(args.color_dir, args.frame_fmt),
prefix,
size=size,
ext=args.ext,
)
if len(args.depth_dirs) > 0:
stack_videos(
args.ffmpeg,
[color_video_prefix] + depth_video_prefixes,
args.out_dir,
size=size,
ext=args.ext,
)
# merge overlay videos
overlay_video_prefixes = []
for pre in depth_video_prefixes:
overlay_video_prefixes.extend([pre, pre + "-overlay"])
stack_videos(
args.ffmpeg, overlay_video_prefixes, args.out_dir, size=size, ext=args.ext
)
return 0
if __name__ == "__main__":
sys.exit(main(parse_args()))
| consistent_depth-main | tools/make_video.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import logging
import os
from os.path import join as pjoin
import subprocess
import sys
import numpy as np
class COLMAPParams:
def __init__(self):
self.parser = argparse.ArgumentParser()
self.parser.add_argument("image_path", help="image path")
self.parser.add_argument("workspace_path", help="workspace path")
self.parser.add_argument(
"--mask_path",
help="path for mask to exclude feature extration from those regions",
default=None,
)
self.parser.add_argument(
"--dense_max_size", type=int, help='Max size for dense COLMAP', default=384,
)
self.add_arguments(self.parser)
@staticmethod
def add_arguments(parser):
parser.add_argument(
"--colmap_bin_path",
help="path to colmap bin. COLMAP 3.6 is required to enable mask_path",
default='colmap'
)
parser.add_argument(
"--sparse", help="disable dense reconstruction", action='store_true'
)
parser.add_argument(
"--initialize_pose", help="Intialize Pose", action='store_true'
)
parser.add_argument(
"--camera_params", help="prior camera parameters", default=None
)
parser.add_argument(
"--camera_model", help="camera_model", default='SIMPLE_PINHOLE'
)
parser.add_argument(
"--refine_intrinsics",
help="refine camera parameters. Not used when camera_params is None",
action="store_true"
)
parser.add_argument(
"--matcher", choices=["exhaustive", "sequential"], default="exhaustive",
help="COLMAP matcher ('exhaustive' or 'sequential')"
)
def parse_args(self, args=None, namespace=None):
return self.parser.parse_args(args, namespace=namespace)
class COLMAPProcessor:
def __init__(self, colmap_bin: str = 'colmap'):
self.colmap_bin = colmap_bin
def process(self, args):
os.makedirs(args.workspace_path, exist_ok=True)
self.extract_features(args)
self.match(args)
if args.initialize_pose:
self.triangulate(args)
else:
self.map(args)
models = os.listdir(self.sparse_dir(args.workspace_path))
num_models = len(models)
logging.info('#models = %d', num_models)
if num_models > 1:
logging.error(
"COLMAP reconstructs more than one model (#models=%d)",
num_models
)
if 'sparse' not in vars(args) or not args.sparse:
for sub_model in models:
self.dense(sub_model, args)
def extract_features(self, args):
cmd = [
self.colmap_bin,
'feature_extractor',
'--database_path', self.db_path(args.workspace_path),
'--image_path', args.image_path,
'--ImageReader.camera_model', args.camera_model,
'--ImageReader.single_camera', '1'
]
if args.camera_params:
cmd.extend(['--ImageReader.camera_params', args.camera_params])
if args.mask_path:
cmd.extend(['--ImageReader.mask_path', args.mask_path])
if args.initialize_pose:
cmd.extend(['--SiftExtraction.num_threads', '1'])
cmd.extend(['--SiftExtraction.gpu_index', '0'])
run(cmd)
def match(self, args):
cmd = [
self.colmap_bin,
f'{args.matcher}_matcher',
'--database_path', self.db_path(args.workspace_path),
'--SiftMatching.guided_matching', '1',
]
if args.matcher == "sequential":
cmd.extend([
'--SequentialMatching.overlap', '50',
'--SequentialMatching.quadratic_overlap', '0',
])
run(cmd)
def triangulate(self, args):
if self.check_sparse(self.sparse_dir(args.workspace_path, model_index=0)):
return
pose_init_dir = self.pose_init_dir(args.workspace_path)
assert self.check_sparse(pose_init_dir)
sparse_dir = self.sparse_dir(args.workspace_path, model_index=0)
os.makedirs(sparse_dir, exist_ok=True)
cmd = [
self.colmap_bin,
'point_triangulator',
'--database_path', self.db_path(args.workspace_path),
'--image_path', args.image_path,
'--output_path', sparse_dir,
'--input_path', pose_init_dir,
'--Mapper.ba_refine_focal_length', '0',
'--Mapper.ba_local_max_num_iterations', '0',
'--Mapper.ba_global_max_num_iterations', '1',
]
run(cmd)
def map(self, args):
if self.check_sparse(self.sparse_dir(args.workspace_path, model_index=0)):
return
sparse_dir = self.sparse_dir(args.workspace_path)
os.makedirs(sparse_dir, exist_ok=True)
cmd = [
self.colmap_bin,
'mapper',
'--database_path', self.db_path(args.workspace_path),
'--image_path', args.image_path,
'--output_path', sparse_dir,
# add the following options for KITTI evaluation. Should help in general.
'--Mapper.abs_pose_min_inlier_ratio', '0.5',
'--Mapper.abs_pose_min_num_inliers', '50',
'--Mapper.init_max_forward_motion', '1',
'--Mapper.ba_local_num_images', '15',
]
if args.camera_params and not args.refine_intrinsics:
cmd.extend([
'--Mapper.ba_refine_focal_length', '0',
'--Mapper.ba_refine_extra_params', '0',
])
run(cmd)
def dense(self, recon_model: str, args):
dense_dir = self.dense_dir(args.workspace_path, model_index=recon_model)
if self.check_dense(dense_dir, args.image_path):
return
os.makedirs(dense_dir, exist_ok=True)
cmd = [
self.colmap_bin,
'image_undistorter',
'--image_path', args.image_path,
'--input_path',
self.sparse_dir(args.workspace_path, model_index=recon_model),
'--output_path', dense_dir,
'--output_type', "COLMAP",
'--max_image_size', str(args.dense_max_size),
]
run(cmd)
cmd = [
self.colmap_bin,
'patch_match_stereo',
'--workspace_path', dense_dir,
'--workspace_format', "COLMAP",
'--PatchMatchStereo.max_image_size', str(args.dense_max_size),
]
run(cmd)
@staticmethod
def dense_depth_suffix():
return ".geometric.bin"
@staticmethod
def db_path(workspace):
return pjoin(workspace, 'database.db')
@staticmethod
def sparse_dir(workspace, model_index=None):
p = pjoin(workspace, 'sparse')
if model_index is None:
return p
return pjoin(p, str(model_index))
@staticmethod
def dense_dir(workspace, model_index=None):
p = pjoin(workspace, 'dense')
if model_index is None:
return p
return pjoin(p, str(model_index))
@staticmethod
def pose_init_dir(workspace):
return pjoin(workspace, 'pose_init')
@staticmethod
def check_sparse(sparse_model_dir: str):
return any(
all(
(os.path.isfile(pjoin(sparse_model_dir, name))
for name in ["cameras" + ext, "images" + ext])
)
for ext in ['.bin', '.txt']
)
@classmethod
def check_dense(cls, dense_model_dir: str, image_path: str, valid_ratio=1):
assert valid_ratio <= 1
depth_fmt = pjoin(
dense_model_dir, "stereo", "depth_maps", "{}" + cls.dense_depth_suffix()
)
color_names = os.listdir(image_path)
num_valid = np.sum(os.path.isfile(depth_fmt.format(n)) for n in color_names)
return (num_valid / len(color_names)) >= valid_ratio
def run(cmd):
print(' '.join(cmd))
subprocess.run(cmd)
def main(args):
processor = COLMAPProcessor(args.colmap_bin)
processor.process(args)
return 0
def parse_args():
return COLMAPParams().parser.parse_args()
if __name__ == '__main__':
sys.exit(main(parse_args()))
| consistent_depth-main | tools/colmap_processor.py |
#!/usr/bin/env python3
from torch.optim.optimizer import Optimizer
from torch.optim import Adam
OPTIMIZER_MAP = {
"Adam": Adam,
}
OPTIMIZER_NAMES = OPTIMIZER_MAP.keys()
OPTIMIZER_CLASSES = OPTIMIZER_MAP.values()
def create(optimizer_name: str, *args, **kwargs) -> Optimizer:
return OPTIMIZER_MAP[optimizer_name](*args, **kwargs)
| consistent_depth-main | optimizer/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import torch.nn as nn
from utils.torch_helpers import _device
from utils.geometry import (
pixel_grid,
focal_length,
project,
pixels_to_points,
reproject_points,
sample,
)
def select_tensors(x):
"""
x (B, N, C, H, W) -> (N, B, C, H, W)
Each batch (B) is composed of a pair or more samples (N).
"""
return x.transpose(0, 1)
def weighted_mse_loss(input, target, weights, dim=1, eps=1e-6):
"""
Args:
input (B, C, H, W)
target (B, C, H, W)
weights (B, 1, H, W)
Returns:
scalar
"""
assert (
input.ndimension() == target.ndimension()
and input.ndimension() == weights.ndimension()
)
# normalize to sum=1
B = weights.shape[0]
weights_sum = torch.sum(weights.view(B, -1), dim=-1).view(B, 1, 1, 1)
weights_sum = torch.clamp(weights_sum, min=eps)
weights_n = weights / weights_sum
sq_error = torch.sum((input - target) ** 2, dim=dim, keepdim=True) # BHW
return torch.sum((weights_n * sq_error).reshape(B, -1), dim=1)
def weighted_rmse_loss(input, target, weights, dim=1, eps=1e-6):
"""
Args:
input (B, C, H, W)
target (B, C, H, W)
weights (B, 1, H, W)
Returns:
scalar = weighted_mean(rmse_along_dim)
"""
assert (
input.ndimension() == target.ndimension()
and input.ndimension() == weights.ndimension()
)
# normalize to sum=1
B = weights.shape[0]
weights_sum = torch.sum(weights.view(B, -1), dim=-1).view(B, 1, 1, 1)
weights_sum = torch.clamp(weights_sum, min=eps)
weights_n = weights / weights_sum
diff = torch.norm(input - target, dim=dim, keepdim=True)
return torch.sum((weights_n * diff).reshape(B, -1), dim=1)
def weighted_mean_loss(x, weights, eps=1e-6):
"""
Args:
x (B, ...)
weights (B, ...)
Returns:
a scalar
"""
assert x.ndimension() == weights.ndimension() and x.shape[0] == weights.shape[0]
# normalize to sum=1
B = weights.shape[0]
weights_sum = torch.sum(weights.view(B, -1), dim=-1).view(B, 1, 1, 1)
weights_sum = torch.clamp(weights_sum, min=eps)
weights_n = weights / weights_sum
return torch.sum((weights_n * x).reshape(B, -1), dim=1)
class ConsistencyLoss(nn.Module):
def __init__(self, opt):
super().__init__()
self.opt = opt
self.dist = torch.abs
def geometry_consistency_loss(self, points_cam, metadata, pixels):
"""Geometry Consistency Loss.
For each pair as specified by indices,
geom_consistency = reprojection_error + disparity_error
reprojection_error is measured in the screen space of each camera in the pair.
Args:
points_cam (B, N, 3, H, W): points in local camera coordinate.
pixels (B, N, 2, H, W)
metadata: dictionary of related metadata to compute the loss. Here assumes
metadata include entries as below.
{
'extrinsics': torch.tensor (B, N, 3, 4), # extrinsics of each frame.
Each (3, 4) = [R, t]
'intrinsics': torch.tensor (B, N, 4), # (fx, fy, cx, cy)
'geometry_consistency':
{
'flows': (B, 2, H, W),) * 2 in pixels.
For k in range(2) (ref or tgt),
pixel p = pixels[indices[b, k]][:, i, j]
correspond to
p + flows[k][b, :, i, j]
in frame indices[b, (k + 1) % 2].
'masks': ((B, 1, H, W),) * 2. Masks of valid flow
matches. Values are 0 or 1.
}
}
"""
geom_meta = metadata["geometry_consistency"]
points_cam_pair = select_tensors(points_cam)
extrinsics = metadata["extrinsics"]
extrinsics_pair = select_tensors(extrinsics)
intrinsics = metadata["intrinsics"]
intrinsics_pair = select_tensors(intrinsics)
pixels_pair = select_tensors(pixels)
flows_pair = (flows for flows in geom_meta["flows"])
masks_pair = (masks for masks in geom_meta["masks"])
reproj_losses, disp_losses = [], []
inv_idxs = [1, 0]
for (
points_cam_ref,
tgt_points_cam_tgt,
pixels_ref,
flows_ref,
masks_ref,
intrinsics_ref,
intrinsics_tgt,
extrinsics_ref,
extrinsics_tgt,
) in zip(
points_cam_pair,
points_cam_pair[inv_idxs],
pixels_pair,
flows_pair,
masks_pair,
intrinsics_pair,
intrinsics_pair[inv_idxs],
extrinsics_pair,
extrinsics_pair[inv_idxs],
):
# change to camera space for target_camera
points_cam_tgt = reproject_points(
points_cam_ref, extrinsics_ref, extrinsics_tgt
)
matched_pixels_tgt = pixels_ref + flows_ref
pixels_tgt = project(points_cam_tgt, intrinsics_tgt)
if self.opt.lambda_reprojection > 0:
reproj_dist = torch.norm(pixels_tgt - matched_pixels_tgt,
dim=1, keepdim=True)
reproj_losses.append(
weighted_mean_loss(self.dist(reproj_dist), masks_ref)
)
if self.opt.lambda_view_baseline > 0:
# disparity consistency
f = torch.mean(focal_length(intrinsics_ref))
# warp points in target image grid target camera coordinates to
# reference image grid
warped_tgt_points_cam_tgt = sample(
tgt_points_cam_tgt, matched_pixels_tgt
)
disp_diff = 1.0 / points_cam_tgt[:, -1:, ...] \
- 1.0 / warped_tgt_points_cam_tgt[:, -1:, ...]
disp_losses.append(
f * weighted_mean_loss(self.dist(disp_diff), masks_ref)
)
B = points_cam_pair[0].shape[0]
dtype = points_cam_pair[0].dtype
reproj_loss = (
self.opt.lambda_reprojection
* torch.mean(torch.stack(reproj_losses, dim=-1), dim=-1)
if len(reproj_losses) > 0
else torch.zeros(B, dtype=dtype, device=_device)
)
disp_loss = (
self.opt.lambda_view_baseline
* torch.mean(torch.stack(disp_losses, dim=-1), dim=-1)
if len(disp_losses) > 0
else torch.zeros(B, dtype=dtype, device=_device)
)
batch_losses = {"reprojection": reproj_loss, "disparity": disp_loss}
return torch.mean(reproj_loss + disp_loss), batch_losses
def __call__(
self,
depths,
metadata,
):
"""Compute total loss.
The network predicts a set of depths results. The number of samples, N, is
not the batch_size, but computed based on the loss.
For instance, geometry_consistency_loss requires pairs as samples, then
N = 2 .
If with more losses, say triplet one from temporal_consistency_loss. Then
N = 2 + 3.
Args:
depths (B, N, H, W): predicted_depths
metadata: dictionary of related metadata to compute the loss. Here assumes
metadata include data as below. But each loss assumes more.
{
'extrinsics': torch.tensor (B, N, 3, 4), # extrinsics of each frame.
Each (3, 4) = [R, t]
'intrinsics': torch.tensor (B, N, 4),
# (fx, fy, cx, cy) for each frame in pixels
}
Returns:
loss: python scalar. And set self.total_loss
"""
def squeeze(x):
return x.reshape((-1,) + x.shape[2:])
def unsqueeze(x, N):
return x.reshape((-1, N) + x.shape[1:])
depths = depths.unsqueeze(-3)
intrinsics = metadata["intrinsics"]
B, N, C, H, W = depths.shape
pixels = pixel_grid(B * N, (H, W))
points_cam = pixels_to_points(squeeze(intrinsics), squeeze(depths), pixels)
pixels = unsqueeze(pixels, N)
points_cam = unsqueeze(points_cam, N)
return self.geometry_consistency_loss(points_cam, metadata, pixels)
| consistent_depth-main | loss/consistency_loss.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
class LossParams:
"""
Loss related parameters
"""
@staticmethod
def add_arguments(parser):
parser.add_argument(
"--lambda_view_baseline",
type=float,
default=-1,
help="The baseline to define weight to penalize disparity difference."
" If < 0 it will be set automatically to the default for the"
" specified model adapter.",
)
parser.add_argument(
"--lambda_reprojection",
type=float,
default=1.0,
help="weight for reprojection loss.",
)
parser.add_argument(
"--lambda_parameter",
type=float,
default=0,
help="weight for network parameter regularization loss.",
)
return parser
@staticmethod
def make_str(opt):
return (
"B{}".format(opt.lambda_view_baseline)
+ "_R{}".format(opt.lambda_reprojection)
+ '_PL1-{}'.format(opt.lambda_parameter)
)
| consistent_depth-main | loss/loss_params.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
class ParameterLoss(torch.nn.Module):
def __init__(self, parameters_init, opt):
self.parameters_init = parameters_init
self.opt = opt
assert opt.lambda_parameter > 0
def __call__(self, parameters):
sq_diff = [torch.abs(p - pi.data)
for p, pi in zip(parameters, self.parameters_init)]
sq_sum = torch.sum(torch.cat([d.flatten() for d in sq_diff]))
loss = self.opt.lambda_parameter * sq_sum
return loss, {"parameter_loss": loss.reshape(1, -1)}
| consistent_depth-main | loss/parameter_loss.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import List, Optional
import torch
from torch.nn import Parameter
from .parameter_loss import ParameterLoss
from .consistency_loss import ConsistencyLoss
from utils.torch_helpers import _device
from loaders.video_dataset import _dtype
class JointLoss(torch.nn.Module):
def __init__(self, opt, parameters_init=None):
super().__init__()
self.opt = opt
if opt.lambda_parameter > 0:
assert parameters_init is not None
self.parameter_loss = ParameterLoss(parameters_init, opt)
if opt.lambda_view_baseline > 0 or opt.lambda_reprojection > 0:
self.consistency_loss = ConsistencyLoss(opt)
def __call__(
self,
depths,
metadata,
parameters: Optional[List[Parameter]] = None,
):
loss = torch.zeros(1, dtype=_dtype, device=_device)
batch_losses = {}
if self.opt.lambda_parameter > 0:
assert parameters is not None
para_loss, para_batch_losses = self.parameter_loss(parameters)
loss += para_loss
batch_losses.update(para_batch_losses)
if self.opt.lambda_view_baseline > 0 or self.opt.lambda_reprojection > 0:
consis_loss, consis_batch_losses = self.consistency_loss(
depths, metadata,
)
loss += consis_loss
batch_losses.update(consis_batch_losses)
return loss, batch_losses
| consistent_depth-main | loss/joint_loss.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
from collections import namedtuple
from enum import Enum, unique, auto
from typing import Iterable, NamedTuple, Dict, Any, Set
import numpy as np
from .frame_range import FrameRange
@unique
class SamplePairsMode(Enum):
EXHAUSTED = 0
CONSECUTIVE = auto()
HIERARCHICAL = auto()
HIERARCHICAL2 = auto()
@classmethod
def name_mode_map(cls):
return {v.name.lower(): v for v in cls}
@classmethod
def names(cls):
return [v.name.lower() for v in cls]
# param is default to {} while mode is required
class SamplePairsOptions(NamedTuple):
mode: SamplePairsMode
params: Dict[str, Any] = {}
Pair = namedtuple("Pair", ["first", "second"])
Pairs_t = Set[Pair]
class SamplePairs:
@classmethod
def sample(
cls,
opts: Iterable[SamplePairsOptions],
frame_range: FrameRange,
two_way=False,
) -> Pairs_t:
num_frames = len(frame_range)
rel_pairs = set()
for opt in opts:
rel_pairs = rel_pairs.union(cls.factory(num_frames, opt, two_way))
pairs = set()
for rel_pair in rel_pairs:
pair = Pair(
frame_range.index_to_frame[rel_pair[0]],
frame_range.index_to_frame[rel_pair[1]]
)
# Filter out pairs where no end is in depth_frames. Can be optimized
# when constructing these pairs
if (pair[0] in frame_range.frames() or pair[1] in frame_range.frames()):
pairs.add(pair)
return pairs
@classmethod
def factory(
cls, num_frames: int, opt: SamplePairsOptions, two_way: bool
) -> Pairs_t:
funcs = {
SamplePairsMode.EXHAUSTED: cls.sample_exhausted,
SamplePairsMode.CONSECUTIVE: cls.sample_consecutive,
SamplePairsMode.HIERARCHICAL: cls.sample_hierarchical,
SamplePairsMode.HIERARCHICAL2: cls.sample_hierarchical2,
}
return funcs[opt.mode](num_frames, two_way, **opt.params)
@staticmethod
def sample_hierarchical(
num_frames: int,
two_way: bool,
min_dist=1,
max_dist=None,
include_mid_point=False,
) -> Pairs_t:
"""
Args:
min_dist, max_dist: minimum and maximum distance to the neighbour
"""
assert min_dist >= 1
if max_dist is None:
max_dist = num_frames - 1
min_level = np.ceil(np.log2(min_dist)).astype(int)
max_level = np.floor(np.log2(max_dist)).astype(int)
step_level = (lambda l: max(0, l - 1)) if include_mid_point else (lambda l: l)
signs = (-1, 1) if two_way else (1,)
pairs = set()
for level in range(min_level, max_level + 1):
dist = 1 << level
step = 1 << step_level(level)
for start in range(0, num_frames, step):
for sign in signs:
end = start + sign * dist
if end < 0 or end >= num_frames:
continue
pairs.add(Pair(start, end))
return pairs
@classmethod
def sample_hierarchical2(
cls, num_frames: int, two_way: bool, min_dist=1, max_dist=None
) -> Pairs_t:
return cls.sample_hierarchical(
num_frames,
two_way,
min_dist=min_dist,
max_dist=max_dist,
include_mid_point=True,
)
@classmethod
def sample_consecutive(cls, num_frames: int, two_way: bool) -> Pairs_t:
return cls.sample_hierarchical(num_frames, two_way, min_dist=1, max_dist=1)
@staticmethod
def sample_exhausted(cls, num_frames: int, two_way: bool) -> Pairs_t:
second_frame_range = (
(lambda i, N: range(N)) if two_way else (lambda i, N: range(i + 1, N))
)
pairs = set()
for i in range(num_frames):
for j in second_frame_range(i, num_frames):
if i != j:
pairs.add(Pair(i, j))
return pairs
@classmethod
def to_one_way(cls, pairs) -> Pairs_t:
def ordered(pair):
if pair[0] > pair[1]:
return Pair(*pair[::-1])
return Pair(*pair)
return {ordered(p) for p in pairs}
def to_in_range(pairs, frame_range=None):
if frame_range is None:
return pairs
def in_range(idx):
return frame_range[0] <= idx and idx < frame_range[1]
return [pair for pair in pairs if all(in_range(i) for i in pair)]
| consistent_depth-main | utils/frame_sampling.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Set, Optional
from collections import namedtuple
# set is an OptionalSet as below
NamedOptionalSet = namedtuple("NamedOptionalSet", ["name", "set"])
class OptionalSet:
def __init__(self, set: Optional[Set] = None):
self.set = set
def intersection(self, other):
if self.set is None:
return other
if other.set is None:
return self
return OptionalSet(set=self.set.intersection(other.set))
def __str__(self):
return str(self.set)
class FrameRange:
"""
Compute the indices of frames we are interested in from the specified range.
"""
def __init__(
self,
frame_range: OptionalSet,
num_frames: int = None,
):
full_range = OptionalSet(set=set(range(num_frames))
if num_frames is not None else None)
self.update(frame_range.intersection(full_range))
def intersection(self, other: OptionalSet):
return FrameRange(self.frame_range.intersection(other))
def update(self, frame_range: OptionalSet):
assert frame_range.set is not None
self.frame_range = frame_range
# Continuous index of all frames in the range.
all_frames = sorted(self.frame_range.set)
self.index_to_frame = {i: f for i, f in enumerate(all_frames)}
def frames(self):
return sorted(self.index_to_frame.values())
def __len__(self):
return len(self.index_to_frame)
def parse_frame_range(frame_range_str: str) -> NamedOptionalSet:
"""
Create a frame range from a string, e.g.: 1-10,15,21-40,51-62.
"""
if len(frame_range_str) == 0:
return NamedOptionalSet(name=frame_range_str, set=OptionalSet())
range_strs = frame_range_str.split(',')
def parse_sub_range(sub_range_str: str):
splits = [int(s) for s in sub_range_str.split('-', maxsplit=1)]
if len(splits) == 1:
return splits
start, end = splits
assert start <= end
return range(start, end + 1)
frame_range = set()
for range_str in range_strs:
frame_range.update(parse_sub_range(range_str))
# Convert the range to a friendly string representation, e.g.,
# 6,6,5,8,0,2-4,5-6,10,9 -> "0,2-6,8-10"
it = iter(sorted(frame_range))
ranges = []
start = next(it)
last_index = start
def add_range(ranges):
if last_index == start:
ranges.append(f"{start}")
else:
ranges.append(f"{start}-{last_index}")
for i in it:
if i < 0:
raise ValueError("Frame indices must be positive.")
assert(i > last_index)
if i - last_index > 1:
add_range(ranges)
start = i
last_index = i
add_range(ranges)
name = ",".join(ranges)
return NamedOptionalSet(name=name, set=OptionalSet(frame_range))
| consistent_depth-main | utils/frame_range.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import numpy as np
import os
from PIL import Image
import cv2
import struct
from subprocess import call
import warnings
import six
if six.PY2:
class ResourceWarning(RuntimeWarning):
pass
# Needed to suppress ResourceWarning for unclosed image file on dev server.
warnings.simplefilter("ignore", ResourceWarning)
warnings.simplefilter("ignore", UserWarning)
# resizes the image
def resize_to_target(image, max_size, align=1, suppress_messages=False):
if not suppress_messages:
print("Original size: %d x %d" % (image.shape[1], image.shape[0]))
H, W = image.shape[:2]
long_side = float(max(W, H))
scale = min(1.0, max_size / long_side)
resized_height = int(H * scale)
resized_width = int(W * scale)
if resized_width % align != 0:
resized_width = align * round(resized_width / align)
if not suppress_messages:
print("Rounding width to closest multiple of %d." % align)
if resized_height % align != 0:
resized_height = align * round(resized_height / align)
if not suppress_messages:
print("Rounding height to closest multiple of %d." % align)
if not suppress_messages:
print("Resized: %d x %d" % (resized_width, resized_height))
image = cv2.resize(
image, (resized_width, resized_height), interpolation=cv2.INTER_AREA
)
return image
# Reads an image and returns a normalized float buffer (0-1 range). Corrects
# rotation based on EXIF tags.
def load_image(file_name, max_size=None, align=1, suppress_messages=False):
img, angle = load_image_angle(
file_name, max_size, align=align, suppress_messages=suppress_messages
)
return img
def load_image_angle(
file_name, max_size=None, min_size=None,
angle=0, align=1, suppress_messages=False
):
with Image.open(file_name) as img:
if hasattr(img, "_getexif") and img._getexif() is not None:
# orientation tag in EXIF data is 274
exif = dict(img._getexif().items())
# adjust the rotation
if 274 in exif:
if exif[274] == 8:
angle = 90
elif exif[274] == 6:
angle = 270
elif exif[274] == 3:
angle = 180
if angle != 0:
img = img.rotate(angle, expand=True)
img = np.float32(img) / 255.0
if max_size is not None:
if min_size is not None:
img = cv2.resize(
img, (max_size, min_size), interpolation=cv2.INTER_AREA)
else:
img = resize_to_target(
img, max_size, align=align, suppress_messages=suppress_messages
)
return img, angle
return [[]], 0.0
# Load image from binary file in the same way as read in C++ with
# #include "compphotolib/core/CvUtil.h"
# freadimg(fileName, image);
def load_raw_float32_image(file_name):
with open(file_name, "rb") as f:
CV_CN_MAX = 512
CV_CN_SHIFT = 3
CV_32F = 5
I_BYTES = 4
Q_BYTES = 8
h = struct.unpack("i", f.read(I_BYTES))[0]
w = struct.unpack("i", f.read(I_BYTES))[0]
cv_type = struct.unpack("i", f.read(I_BYTES))[0]
pixel_size = struct.unpack("Q", f.read(Q_BYTES))[0]
d = ((cv_type - CV_32F) >> CV_CN_SHIFT) + 1
assert d >= 1
d_from_pixel_size = pixel_size // 4
if d != d_from_pixel_size:
raise Exception(
"Incompatible pixel_size(%d) and cv_type(%d)" % (pixel_size, cv_type)
)
if d > CV_CN_MAX:
raise Exception("Cannot save image with more than 512 channels")
data = np.frombuffer(f.read(), dtype=np.float32)
result = data.reshape(h, w) if d == 1 else data.reshape(h, w, d)
return result
# Save image to binary file, so that it can be read in C++ with
# #include "compphotolib/core/CvUtil.h"
# freadimg(fileName, image);
def save_raw_float32_image(file_name, image):
with open(file_name, "wb") as f:
CV_CN_MAX = 512
CV_CN_SHIFT = 3
CV_32F = 5
dims = image.shape
h = 0
w = 0
d = 1
if len(dims) == 2:
h, w = image.shape
float32_image = np.transpose(image).astype(np.float32)
else:
h, w, d = image.shape
float32_image = np.transpose(image, [2, 1, 0]).astype("float32")
cv_type = CV_32F + ((d - 1) << CV_CN_SHIFT)
pixel_size = d * 4
if d > CV_CN_MAX:
raise Exception("Cannot save image with more than 512 channels")
f.write(struct.pack("i", h))
f.write(struct.pack("i", w))
f.write(struct.pack("i", cv_type))
f.write(struct.pack("Q", pixel_size)) # Write size_t ~ uint64_t
# Set buffer size to 16 MiB to hide the Python loop overhead.
buffersize = max(16 * 1024 ** 2 // image.itemsize, 1)
for chunk in np.nditer(
float32_image,
flags=["external_loop", "buffered", "zerosize_ok"],
buffersize=buffersize,
order="F",
):
f.write(chunk.tobytes("C"))
def save_image(file_name, image):
ext = os.path.splitext(file_name)[1].lower()
if ext == ".raw":
save_raw_float32_image(file_name, image)
else:
image = 255.0 * image
image = Image.fromarray(image.astype("uint8"))
image.save(file_name)
def save_depth_map_colored(file_name, depth_map, color_binary):
save_image(file_name, depth_map)
color_depth_name = os.path.splitext(file_name)[0] + "_color.jpg"
if color_binary != "":
call([color_binary, "--inputFile", file_name, "--outputFile", color_depth_name])
# main print_function
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input_image", type=str, help="input image")
parser.add_argument("--output_image", type=str, help="output image")
parser.add_argument(
"--max_size", type=int, default=768, help="max size of long image dimension"
)
args, unknown = parser.parse_known_args()
img = load_image(args.input_image, int(args.max_size))
save_image(args.output_image, img)
| consistent_depth-main | utils/image_io.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from third_party.colmap.scripts.python.read_write_model import (
CAMERA_MODELS,
rotmat2qvec,
Camera,
BaseImage,
write_model
)
# for exporting these functions to the rest of the code
from third_party.colmap.scripts.python.read_dense import read_array
from third_party.colmap.scripts.python.read_write_model import (
qvec2rotmat,
read_images_binary,
read_points3d_binary,
read_cameras_binary,
read_model,
)
CAMERA_NAME_TO_IDS = {
c.model_name: c.model_id for c in CAMERA_MODELS
}
# maps colmap point xc to normal coordinate frame x
# x = ROT_COLMAP_TO_NORMAL * x
ROT_COLMAP_TO_NORMAL = np.diag([1, -1, -1])
def intrinsics_to_camera(intrinsics, src_im_size=None, dst_im_size=None, eps=0.01):
"""Convert metadata intrinsics to COLMAP Camera.
Only support shared SIMPLE_PINHOLE camera.
Args:
intrinsics: (N, 4) where each row is fx, fy, cx, cy.
Assume intrinsics is the same across all frames.
src_im_size: image size corresponding to intrinsics
dst_im_size: the image size we want to convert to
"""
fxy, cxy = intrinsics[0][:2], intrinsics[0][-2:]
if src_im_size is None:
src_im_size = (2 * cxy).astype(int)
if dst_im_size is None:
dst_im_size = src_im_size
ratio = np.array(dst_im_size) / np.array(src_im_size).astype(float)
fxy *= ratio
cxy *= ratio
if np.abs(fxy[0] - fxy[1]) < eps:
model = 'SIMPLE_PINHOLE'
params = np.array((fxy[0], cxy[0], cxy[1]))
else:
model = 'PINHOLE'
params = np.array((fxy[0], fxy[1], cxy[0], cxy[1]))
camera = Camera(
id=1, model=model,
width=dst_im_size[0], height=dst_im_size[1],
params=params
)
return {camera.id: camera}
def extrinsics_to_images(extrinsics):
images = {}
for i, extr in enumerate(extrinsics):
R, t = extr[:, :3], extr[:, -1:]
Rc = ROT_COLMAP_TO_NORMAL.dot(R.T).dot(ROT_COLMAP_TO_NORMAL.T)
tc = -Rc.dot(ROT_COLMAP_TO_NORMAL.T).dot(t)
frame_id = i + 1
image = BaseImage(
id=frame_id, qvec=rotmat2qvec(Rc), tvec=tc.flatten(),
camera_id=1, name="frame_%06d.png" % i,
xys=[], point3D_ids=[]
)
images[image.id] = image
return images
def to_colmap(intrinsics, extrinsics, src_im_size=None, dst_im_size=None):
"""Convert Extrinsics and intrinsics to an empty COLMAP project with no points.
"""
cameras = intrinsics_to_camera(
intrinsics, src_im_size=src_im_size, dst_im_size=dst_im_size
)
images = extrinsics_to_images(extrinsics)
points3D = {}
return cameras, images, points3D
def save_colmap(
path, intrinsics, extrinsics, src_im_size=None, dst_im_size=None, ext=".txt"
):
cameras, images, points3D = to_colmap(intrinsics, extrinsics,
src_im_size=src_im_size, dst_im_size=dst_im_size)
write_model(cameras, images, points3D, path, ext)
def cameras_to_intrinsics(cameras, camera_ids, size_new):
"""
Args:
size_new: image size after resizing and produce equivalent intrinsics
for this size
"""
# params = f, cx, cy
assert all(
(c.model == "SIMPLE_PINHOLE" or c.model == "PINHOLE"
or c.model == "SIMPLE_RADIAL"
for c in cameras.values()))
intrinsics = []
for id in camera_ids:
c = cameras[id]
if c.model == "SIMPLE_PINHOLE":
f, cx, cy = c.params
fxy = np.array([f, f])
elif c.model == "PINHOLE":
fx, fy, cx, cy = c.params
fxy = np.array([fx, fy])
elif c.model == "SIMPLE_RADIAL":
f, cx, cy, r = c.params
fxy = np.array([f, f])
else:
raise AssertionError()
ratio = np.array(size_new) / np.array((c.width, c.height))
fxy = fxy * ratio
cxy = np.array((cx, cy)) * ratio
intrinsics.append(np.concatenate((fxy, cxy)))
return np.stack(intrinsics, axis=0)
def images_to_extrinsics(images, image_ids):
"""Let p be in local camera coordinates. x in global coordinates.
Rc, tc be rotation and translation from colmap
p = Rc * x + tc, i.e., x = Rc^T * p - Rc^T * tc
But we want to generate R, t, s.t.,
x = Rx+t,
so R = Rc^T, t = - Rc^T * tc
Note that colmap uses a different coordinate system where y points down and
z points to the world.
"""
extrinsics = []
for id in image_ids:
im = images[id]
Rc, tc = im.qvec2rotmat(), im.tvec
R, t = Rc.T, -Rc.T.dot(tc.reshape(-1, 1))
R = ROT_COLMAP_TO_NORMAL.dot(R).dot(ROT_COLMAP_TO_NORMAL.T)
t = ROT_COLMAP_TO_NORMAL.dot(t)
extrinsics.append(np.concatenate([R, t], axis=1))
return np.stack(extrinsics, axis=0)
def convert_points3D(pts3D: np.ndarray):
"""
points (3, N)
"""
return ROT_COLMAP_TO_NORMAL.dot(pts3D)
def ordered_image_ids(images):
return sorted(images.keys(), key=lambda id: images[id].name)
def convert_calibration(cameras, images, size_new):
sorted_im_ids = ordered_image_ids(images)
sorted_cam_ids = [images[id].camera_id for id in sorted_im_ids]
intrinsics = cameras_to_intrinsics(cameras, sorted_cam_ids, size_new)
extrinsics = images_to_extrinsics(images, sorted_im_ids)
return intrinsics, extrinsics
| consistent_depth-main | utils/load_colmap.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import torch.nn
def sample(data, uv):
"""Sample data (H, W, <C>) by uv (H, W, 2) (in pixels). """
shape = data.shape
# data from (H, W, <C>) to (1, C, H, W)
data = data.reshape(data.shape[:2] + (-1,))
data = torch.tensor(data).permute(2, 0, 1)[None, ...]
# (H, W, 2) -> (1, H, W, 2)
uv = torch.tensor(uv)[None, ...]
H, W = shape[:2]
# grid needs to be in [-1, 1] and (B, H, W, 2)
size = torch.tensor((W, H), dtype=uv.dtype).view(1, 1, 1, -1)
grid = (2 * uv / size - 1).to(data.dtype)
tensor = torch.nn.functional.grid_sample(data, grid, padding_mode="border")
# from (1, C, H, W) to (H, W, <C>)
return tensor.permute(0, 2, 3, 1).reshape(shape).numpy()
def sse(x, y, axis=-1):
"""Sum of suqare error"""
d = x - y
return np.sum(d * d, axis=axis)
def consistency_mask(im_ref, im_tgt, flow, threshold, diff_func=sse):
H, W = im_ref.shape[:2]
im_ref = im_ref.reshape(H, W, -1)
im_tgt = im_tgt.reshape(H, W, -1)
x, y = np.arange(W), np.arange(H)
X, Y = np.meshgrid(x, y)
u, v = flow[..., 0], flow[..., 1]
idx_x, idx_y = u + X, v + Y
# first constrain to within the image
mask = np.all(
np.stack((idx_x >= 0, idx_x <= W - 1, 0 <= idx_y, idx_y <= H - 1), axis=-1),
axis=-1,
)
im_tgt_to_ref = sample(im_tgt, np.stack((idx_x, idx_y), axis=-1))
mask = np.logical_and(mask, diff_func(im_ref, im_tgt_to_ref) < threshold)
return mask
def consistent_flow_masks(flows, colors, flow_thresh, color_thresh):
# mask from flow consistency
masks_flow = [
consistency_mask(flow_ref, -flow_tgt, flow_ref, flow_thresh ** 2)
for flow_ref, flow_tgt in zip(flows, flows[::-1])
]
# mask from photometric consistency
C = colors[0].shape[-1]
masks_photo = [
consistency_mask(c_ref, c_tgt, flow_ref, C * (color_thresh ** 2))
for c_ref, c_tgt, flow_ref in zip(colors, colors[::-1], flows)
]
# merge the two
masks = [np.logical_and(mf, mp) for mf, mp in zip(masks_flow, masks_photo)]
return masks
| consistent_depth-main | utils/consistency.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import os
from os.path import join as pjoin
import wget
from zipfile import ZipFile
def get_model_from_url(
url: str, local_path: str, is_zip: bool = False, path_root: str = "checkpoints"
) -> str:
local_path = pjoin(path_root, local_path)
if os.path.exists(local_path):
print(f"Found cache {local_path}")
return local_path
# download
local_path = local_path.rstrip(os.sep)
download_path = local_path if not is_zip else f"{local_path}.zip"
os.makedirs(os.path.dirname(download_path), exist_ok=True)
if os.path.isfile(download_path):
print(f"Found cache {download_path}")
else:
print(f"Dowloading {url} to {download_path} ...")
wget.download(url, download_path)
if is_zip:
print(f"Unziping {download_path} to {local_path}")
with ZipFile(download_path, 'r') as f:
f.extractall(local_path)
os.remove(download_path)
return local_path
| consistent_depth-main | utils/url_helpers.py |
consistent_depth-main | utils/__init__.py |
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import cv2
import numpy
import os
import subprocess
import sys
import logging
from matplotlib.cm import get_cmap
from . import image_io
CM_MAGMA = (numpy.array([get_cmap('magma').colors]).
transpose([1, 0, 2]) * 255)[..., ::-1].astype(numpy.uint8)
def visualize_depth(depth, depth_min=None, depth_max=None):
"""Visualize the depth map with colormap.
Rescales the values so that depth_min and depth_max map to 0 and 1,
respectively.
"""
if depth_min is None:
depth_min = numpy.amin(depth)
if depth_max is None:
depth_max = numpy.amax(depth)
depth_scaled = (depth - depth_min) / (depth_max - depth_min)
depth_scaled = depth_scaled ** 0.5
depth_scaled_uint8 = numpy.uint8(depth_scaled * 255)
return ((cv2.applyColorMap(
depth_scaled_uint8, CM_MAGMA) / 255) ** 2.2) * 255
def visualize_depth_dir(
src_dir: str, dst_dir: str, force: bool = False, extension: str = ".raw",
min_percentile: float = 0, max_percentile: float = 100,
):
src_files = []
dst_files = []
for file in sorted(os.listdir(src_dir)):
base, ext = os.path.splitext(file)
if ext.lower() == extension:
src_files.append(file)
dst_files.append(f"{base}.png")
if len(src_files) == 0:
return
# Check if all dst_files already exist
dst_exists = True
for file in dst_files:
if not os.path.exists(f"{dst_dir}/{file}"):
dst_exists = False
break
if not force and dst_exists:
return
d_min = sys.float_info.max
d_max = sys.float_info.min
for src_file in src_files:
print("reading '%s'." % src_file)
if extension == ".raw":
disparity = image_io.load_raw_float32_image(f"{src_dir}/{src_file}")
else:
disparity = cv2.imread(f"{src_dir}/{src_file}")
ix = numpy.isfinite(disparity)
if numpy.sum(ix) == 0:
logging.warning(f"{src_file} has 0 valid depth")
continue
valid_disp = disparity[ix]
d_min = min(d_min, numpy.percentile(valid_disp, min_percentile))
d_max = max(d_max, numpy.percentile(valid_disp, max_percentile))
for i in range(len(src_files)):
src_file = src_files[i]
dst_file = dst_files[i]
print(f"reading '{src_file}'.")
if os.path.exists(f"{dst_dir}/{dst_file}") and not force:
print(f"skipping existing file '{dst_file}'.")
else:
if extension == ".raw":
disparity = image_io.load_raw_float32_image(
f"{src_dir}/{src_file}")
else:
disparity = cv2.imread(f"{src_dir}/{src_file}")
disparity_vis = visualize_depth(disparity, d_min, d_max)
print(f"writing '{dst_file}'.")
cv2.imwrite(f"{dst_dir}/{dst_file}", disparity_vis)
def create_video(pattern: str, output_file: str, ffmpeg_bin: str = 'ffmpeg'):
if not os.path.exists(output_file):
cmd = [ffmpeg_bin, "-r", "30",
"-i", pattern,
"-c:v", "libx264",
"-crf", "27",
"-pix_fmt", "yuv420p",
output_file]
subprocess.call(cmd)
def apply_mask(im, mask, mask_color=None):
im = im.reshape(im.shape[:2] + (-1,))
C = im.shape[-1]
mask = mask.reshape(mask.shape[:2] + (-1,)) > 0
if mask_color is None:
mask_color = numpy.array([0, 255, 0] if C == 3 else 1)
mask_color = mask_color.reshape(1, 1, C)
inv_mask = (1 - mask) * mask_color
result = 0.7 * im + 0.3 * inv_mask
return result.squeeze()
| consistent_depth-main | utils/visualization.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
from typing import Tuple
def reproject(pts3d: np.ndarray, extr: np.ndarray) -> np.ndarray:
assert pts3d.shape[0] == extr.shape[0] and pts3d.shape[0] == 3
p_dim, _ = pts3d.shape
R, t = extr[:, :p_dim], extr[:, -1:]
return R.T.dot(pts3d - t)
def focal_length(intr: np.ndarray):
return intr[:2]
def principal_point(intrinsics):
"""
Args:
intrinsics: (fx, fy, cx, cy)
"""
return intrinsics[2:]
# # center version
# H, W = shape
# return torch.tensor(((W - 1) / 2.0, (H - 1) / 2.0), device=_device)
def project(pts3d: np.ndarray, intr: np.ndarray) -> np.ndarray:
"""
Args:
pts3d (3, N)
intr (4)
Returns:
pixels (2, N)
"""
rays = pts3d / -pts3d[-1:]
fxy = focal_length(intr)
uvs = rays[:2] * fxy.reshape(-1, 1)
cs = principal_point(intr)
# to pixels: (i, j) = (u, -v) + (cx, cy)
uvs[1] = -uvs[1]
pixels = uvs + cs.reshape(-1, 1)
return pixels
def sample(depth: np.ndarray, pixels: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Args:
depth (H, W)
pixels (2, N)
Returns:
depths (M): depths at corresponding pixels with nearest neighbour sampling,
M <= N, because some depth can be invalid
ix (N): whether a pixels[:, i] is inside the image
"""
pixels_nn = (pixels + 0.5).astype(int)
H, W = depth.shape
ix = np.all(
(
0 <= pixels_nn[0], pixels_nn[0] <= W - 1,
0 <= pixels_nn[1], pixels_nn[1] <= H - 1,
),
axis=0,
)
pixels_valid = pixels_nn[:, ix]
indices = pixels_valid[1] * W + pixels_valid[0]
ds = depth.flatten()[indices]
return ds, ix
| consistent_depth-main | utils/geometry_np.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
_device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
def to_device(data):
if isinstance(data, torch.Tensor):
data = data.to(_device, non_blocking=True)
return data
if isinstance(data, dict):
for k, v in data.items():
data[k] = to_device(v)
return data
# list or tuple
for i, v in enumerate(data):
data[i] = to_device(v)
return data
| consistent_depth-main | utils/torch_helpers.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
from .torch_helpers import _device
from typing import List
def pixel_grid(batch_size, shape):
"""Returns pixel grid of size (batch_size, 2, H, W).
pixel positions (x, y) are in range [0, W-1] x [0, H-1]
top left is (0, 0).
"""
H, W = shape
x = torch.linspace(0, W - 1, W, device=_device)
y = torch.linspace(0, H - 1, H, device=_device)
Y, X = torch.meshgrid(y, x)
pixels = torch.stack((X, Y), dim=0)[None, ...]
return pixels.expand(batch_size, -1, -1, -1)
def principal_point(intrinsics, shape):
"""
Args:
intrinsics: (fx, fy, cx, cy)
shape: (H, W)
"""
return intrinsics[:, 2:]
# # center version
# H, W = shape
# return torch.tensor(((W - 1) / 2.0, (H - 1) / 2.0), device=_device)
def focal_length(intrinsics):
return intrinsics[:, :2]
def pixels_to_rays(pixels, intrinsics):
"""Convert pixels to rays in camera space using intrinsics.
Args:
pixels (B, 2, H, W)
intrinsics (B, 4): (fx, fy, cx, cy)
Returns:
rays: (B, 3, H, W), where z component is -1, i.e., rays[:, -1] = -1
"""
# Assume principal point is ((W-1)/2, (H-1)/2).
B, _, H, W = pixels.shape
cs = principal_point(intrinsics, (H, W))
# Convert to [-(W-1)/2, (W-1)/2] x [-(H-1)/2, (H-1)/2)] and bottom left is (0, 0)
uvs = pixels - cs.view(-1, 2, 1, 1)
uvs[:, 1] = -uvs[:, 1] # flip v
# compute rays (u/fx, v/fy, -1)
fxys = focal_length(intrinsics).view(-1, 2, 1, 1)
rays = torch.cat(
(uvs / fxys, -torch.ones((B, 1, H, W), dtype=uvs.dtype, device=_device)), dim=1
)
return rays
def project(points, intrinsics):
"""Project points in camera space to pixel coordinates based on intrinsics.
Args:
points (B, 3, H, W)
intrinsics (B, 4): (fx, fy, cx, cy)
Returns:
pixels (B, 2, H, W)
"""
rays = points / -points[:, -1:]
# rays in pixel unit
fxys = focal_length(intrinsics)
uvs = rays[:, :2] * fxys.view(-1, 2, 1, 1)
B, _, H, W = uvs.shape
cs = principal_point(intrinsics, (H, W))
# to pixels: (i, j) = (u, -v) + (cx, cy)
uvs[:, 1] = -uvs[:, 1] # flip v
pixels = uvs + cs.view(-1, 2, 1, 1)
return pixels
def pixels_to_points(intrinsics, depths, pixels):
"""Convert pixels to 3D points in camera space. (Camera facing -z direction)
Args:
intrinsics:
depths (B, 1, H, W)
pixels (B, 2, H, W)
Returns:
points (B, 3, H, W)
"""
rays = pixels_to_rays(pixels, intrinsics)
points = rays * depths
return points
def reproject_points(points_cam_ref, extrinsics_ref, extrinsics_tgt):
"""Reproject points in reference camera coordinate to target camera coordinate
Args:
points_cam_ref (B, 3, H, W): points in reference camera coordinate.
extrinsics_ref (B, 3, 4): [R, t] of reference camera.
extrinsics_tgt (B, 3, 4): [R, t] of target_camera.
Returns:
points_cam_tgt (B, 3, H, W): points in target camera coordinate.
"""
B, p_dim, H, W = points_cam_ref.shape
assert p_dim == 3, "dimension of point {} != 3".format(p_dim)
# t + R * p where t of (B, 3, 1), R of (B, 3, 3) and p of (B, 3, H*W)
R_ref = extrinsics_ref[..., :p_dim]
t_ref = extrinsics_ref[..., -1:]
points_world = torch.baddbmm(t_ref, R_ref, points_cam_ref.view(B, p_dim, -1))
# Reproject to target:
# R'^T * (p - t') where t' of (B, 3, 1), R' of (B, 3, 3) and p of (B, 3, H*W)
R_tgt = extrinsics_tgt[..., :p_dim]
t_tgt = extrinsics_tgt[..., -1:]
points_cam_tgt = torch.bmm(R_tgt.transpose(1, 2), points_world - t_tgt)
return points_cam_tgt.view(B, p_dim, H, W)
def depth_to_points(depths, intrinsics):
"""
Args:
depths: (B, 1, H, W)
intrinsics: (B, num_params)
"""
B, _, H, W = depths.shape
pixels = pixel_grid(B, (H, W))
points_cam = pixels_to_points(intrinsics, depths, pixels)
return points_cam
def calibrate_scale(extrinsics, intrinsics, depths):
"""Given depths, compute the global scale to adjust the extrinsics.
Given a pair of depths, intrinsics, extrinsics, unproject the depth maps,
rotate these points based on camera rotation and compute the center for each one.
The distance between these centers should be of the same scale as the translation
between the cameras. Therefore, let mu1, mu2 and t1, t2 be the two scene centers
and the two camera projection centers. Then
-scale * (t1 - t2) = mu1 - mu2.
Therefore,
scale = -dt.dot(dmu) / dt.dot(dt), where dt = t1 - t2, dmu = mu1 - mu2.
Args:
intrinsics (2, num_params)
extrinsics (2, 3, 4): each one is [R, t]
depths (2, 1, H, W)
"""
assert (
extrinsics.shape[0] == intrinsics.shape[0]
and intrinsics.shape[0] == depths.shape[0]
)
points_cam = depth_to_points(depths, intrinsics)
B, p_dim, H, W = points_cam.shape
Rs = extrinsics[..., :p_dim]
ts = extrinsics[..., p_dim]
points_rot = torch.bmm(Rs, points_cam.view(B, p_dim, -1))
mus = torch.mean(points_rot, axis=-1)
# TODO(xuanluo): generalize this to more framse B>2 via variances of the points.
assert B == 2
dmu = mus[0] - mus[1]
dt = ts[0] - ts[1]
t_scale = -dt.dot(dmu) / dt.dot(dt)
return t_scale
def warping_field(extrinsics, intrinsics, depths, tgt_ids: List[int]):
""" Generate the warping field to warp the other frame the current frame.
Args:
intrinsics (N, num_params)
extrinsics (N, 3, 4): each one is [R, t]
depths (N, 1, H, W)
tgt_ids (N, 1): warp frame tgt_ids[i] to i
Returns:
uvs (N, 2, H, W): sampling the other frame tgt_ids[i] with uvs[i] produces
the current frame i.
"""
assert (
extrinsics.shape[0] == intrinsics.shape[0]
and intrinsics.shape[0] == depths.shape[0]
)
points_cam = depth_to_points(depths, intrinsics)
extrinsics_tgt = extrinsics[tgt_ids]
points_tgt_cam = reproject_points(points_cam, extrinsics, extrinsics_tgt)
uv_tgt = project(points_tgt_cam, intrinsics[tgt_ids])
return uv_tgt
def sample(data, uv):
"""Sample data (B, C, H, W) by uv (B, 2, H, W) (in pixels). """
H, W = data.shape[2:]
# grid needs to be in [-1, 1] and (B, H, W, 2)
# NOTE: divide by (W-1, H-1) instead of (W, H) because uv is in [-1,1]x[-1,1]
size = torch.tensor((W - 1, H - 1), dtype=uv.dtype).view(1, -1, 1, 1).to(_device)
grid = (2 * uv / size - 1).permute(0, 2, 3, 1)
return torch.nn.functional.grid_sample(data, grid, padding_mode="border")
def warp_image(images, depths, extrinsics, intrinsics, tgt_ids: List[int]):
""" Warp target images to the reference image based on depths and camera params
Warp images[tgt_ids[i]] to images[i].
Args:
images (N, C, H, W)
depths (N, 1, H, W)
extrinsics (N, 3, 4)
intrinsics (N, 4)
tgt_ids (N, 1)
Returns:
images_warped
"""
uv_tgt = warping_field(extrinsics, intrinsics, depths, tgt_ids)
images_warped_to_ref = sample(images[tgt_ids], uv_tgt)
return images_warped_to_ref
| consistent_depth-main | utils/geometry.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
from numpy.linalg import inv
import cv2
from sklearn import linear_model
def resize_small(gt, x, interp=cv2.INTER_NEAREST):
"""
Resize to match the smaller image.
"""
def size(x):
return x.shape[:2][::-1]
size_gt, size_x = size(gt), size(x)
if size_gt == size_x:
return gt, x
if np.prod(size_gt) < np.prod(size_x):
x = cv2.resize(x, size_gt, interpolation=interp)
else:
gt = cv2.resize(gt, size_x, interpolation=interp)
return gt, x
# calibration
def calibrate_scale_shift(gt, x):
ix = np.isfinite(gt) & np.isfinite(x)
gt = gt[ix]
x = x[ix]
x2s = (x * x).flatten().sum()
xs = x.flatten().sum()
os = np.ones_like(x.flatten()).sum()
xgs = (x * gt).flatten().sum()
gs = gt.sum()
A = np.array([
[x2s, xs],
[xs, os]
])
b = np.array(
[xgs, gs]
).T
s, t = inv(A).dot(b)
return np.array([s, t])
def calibrate_scale_shift_RANSAC(
gt, x, max_trials=100000, stop_prob=0.999
):
ix = np.isfinite(gt) & np.isfinite(x)
gt = gt[ix].reshape(-1, 1)
x = x[ix].reshape(-1, 1)
ransac = linear_model.RANSACRegressor(
max_trials=max_trials, stop_probability=stop_prob
)
ransac.fit(x, gt)
s = ransac.estimator_.coef_[0, 0]
t = ransac.estimator_.intercept_[0]
return s, t
def calibrate_scale(gt, x, reduce=np.median):
ix = np.isfinite(gt) & np.isfinite(x)
ratios = gt[ix] / x[ix]
return reduce(ratios)
# conversion
def cvt_by_scale_shift(depth, calib_data):
s, t = calib_data
return depth * s + t
CALIB_METHOD_MAP = {
"scale": calibrate_scale,
"scale-shift": calibrate_scale_shift,
"ransac": calibrate_scale_shift_RANSAC,
}
def calibrate(gt, x, method: str):
return CALIB_METHOD_MAP[method](gt, x)
| consistent_depth-main | utils/calibrate.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import sys
class dotdict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def mkdir_ifnotexists(dir):
if os.path.exists(dir):
return
os.mkdir(dir)
def print_title(text):
print()
print("-" * len(text))
print(text)
print("-" * len(text))
def print_banner(text):
w = 12 + len(text)
print()
print("*" * w)
print(f"{'*' * 4} {text} {'*' * 4}")
print("*" * w)
class SuppressedStdout:
def __enter__(self):
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, "w")
def __exit__(self, exception_type, exception_value, traceback):
sys.stdout.close()
sys.stdout = self._original_stdout
| consistent_depth-main | utils/helpers.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import os
from os.path import join as pjoin
from typing import Dict, Tuple
import numpy as np
from . import load_colmap, image_io as tr
from .geometry_np import reproject, project, sample
def store_visible_points_per_image(
points3D: Dict[int, load_colmap.Point3D]
) -> Dict[int, np.ndarray]:
"""
returns dictionary where key is the
image id: int
and the value is
3D points (3, N) that are visible in each image
(Note: currently images do not contain this info, but a list of -1's)
"""
map_img_to_pt3D = {}
for _cur_key, cur_point in points3D.items():
# assert(cur_key == cur_point) # They are the same by design
for img_id in cur_point.image_ids:
if img_id in map_img_to_pt3D:
map_img_to_pt3D[img_id].append(cur_point.xyz)
else:
map_img_to_pt3D[img_id] = [cur_point.xyz]
for img_id, pt_list in map_img_to_pt3D.items():
map_img_to_pt3D[img_id] = load_colmap.convert_points3D(np.array(pt_list).T)
return map_img_to_pt3D
def vote_scale(
scales: np.ndarray, min_percentile_thresh: int = 10, max_percentile_thresh: int = 90
) -> float:
"""
Note if len(scales) is really small, e.g., len(scales) == 2, it will return nan
"""
m = np.percentile(scales, min_percentile_thresh)
M = np.percentile(scales, max_percentile_thresh)
ix = np.logical_and(m <= scales, scales <= M)
scale = np.mean(scales[ix])
return scale
def calibrate_frame_w_sparse_points(
pts3d: np.ndarray, intr: np.ndarray, extr: np.ndarray, inv_depth: np.ndarray
) -> float:
"""
Args:
pts3d (3, N)
intr (4)
extr (3, 4)
depth (H, W)
Returns:
scale: depth * scale = -pts_in_local_camera_coordinate.z
"""
# points 3d in local camera coordinate
# FIXME: deal with the case when a point is behind the camera
pts3d_cam = reproject(pts3d, extr)
pts2d = project(pts3d_cam, intr)
inv_depths, ix = sample(inv_depth, pts2d)
ds = -pts3d[-1, :][ix] # Note negative sign
scales = ds * inv_depths
return vote_scale(scales)
def calibrate_w_sparse_colmap(
colmap_dir: str, dense_depth_dir: str
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Args:
colmap_dir: sparse colmap directory containing
cameras.bin/txt, points3D.bin/txt, images.bin/txt
dense_depth_dir: folder name for dense depth directory
scales_fn (optional): dump per frame scale
Returns:
Calibrated intrinsics and extrinsics
intrinsics (N, 4)
extrinsics (N, 3, 4)
scales (N)
"""
cameras, images, points3D = load_colmap.read_model(path=colmap_dir, ext=".bin")
# compute intrinsics, extrinsics
depth_names = [
x for x in os.listdir(dense_depth_dir) if os.path.splitext(x)[-1] == ".raw"
]
size = tr.load_raw_float32_image(pjoin(dense_depth_dir, depth_names[0])).shape[:2][
::-1
]
intrinsics, extrinsics = load_colmap.convert_calibration(cameras, images, size)
# TODO: make the following up to compute the scale a single function
map_img_to_pt3D = store_visible_points_per_image(points3D)
ordered_im_ids = load_colmap.ordered_image_ids(images)
scales = np.empty(intrinsics.shape[0])
for i, im_id in enumerate(ordered_im_ids):
if im_id not in map_img_to_pt3D:
scales[i] = np.nan
print('[WARNING] %s does not have visible feature point' % images[im_id].name)
im_name = images[im_id].name
depth_fn = pjoin(dense_depth_dir, os.path.splitext(im_name)[0] + ".raw")
inv_depth = tr.load_raw_float32_image(depth_fn)
pts3D = map_img_to_pt3D[im_id]
scale = calibrate_frame_w_sparse_points(
pts3D, intrinsics[i], extrinsics[i], inv_depth
)
scales[i] = scale
mean_scale = scales[~np.isnan(scales)].mean()
extrinsics[..., -1] /= mean_scale
return intrinsics, extrinsics, scales
| consistent_depth-main | utils/calibration.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import cv2
from os.path import join as pjoin
import json
import math
import numpy as np
import torch.utils.data as data
import torch
from typing import Optional
from utils import image_io, frame_sampling as sampling
_dtype = torch.float32
def load_image(
path: str,
channels_first: bool,
check_channels: Optional[int] = None,
post_proc_raw=lambda x: x,
post_proc_other=lambda x: x,
) -> torch.FloatTensor:
if os.path.splitext(path)[-1] == ".raw":
im = image_io.load_raw_float32_image(path)
im = post_proc_raw(im)
else:
im = cv2.imread(path, cv2.IMREAD_UNCHANGED)
im = post_proc_other(im)
im = im.reshape(im.shape[:2] + (-1,))
if check_channels is not None:
assert (
im.shape[-1] == check_channels
), "receive image of shape {} whose #channels != {}".format(
im.shape, check_channels
)
if channels_first:
im = im.transpose((2, 0, 1))
# to torch
return torch.tensor(im, dtype=_dtype)
def load_color(path: str, channels_first: bool) -> torch.FloatTensor:
"""
Returns:
torch.tensor. color in range [0, 1]
"""
im = load_image(
path,
channels_first,
post_proc_raw=lambda im: im[..., [2, 1, 0]] if im.ndim == 3 else im,
post_proc_other=lambda im: im / 255,
)
return im
def load_flow(path: str, channels_first: bool) -> torch.FloatTensor:
"""
Returns:
flow tensor in pixels.
"""
flow = load_image(path, channels_first, check_channels=2)
return flow
def load_mask(path: str, channels_first: bool) -> torch.ByteTensor:
"""
Returns:
mask takes value 0 or 1
"""
mask = load_image(path, channels_first, check_channels=1) > 0
return mask.to(_dtype)
class VideoDataset(data.Dataset):
"""Load 3D video frames and related metadata for optimizing consistency loss.
File organization of the corresponding 3D video dataset should be
color_down/frame_{__ID__:06d}.raw
flow/flow_{__REF_ID__:06d}_{__TGT_ID__:06d}.raw
mask/mask_{__REF_ID__:06d}_{__TGT_ID__:06d}.png
metadata.npz: {'extrinsics': (N, 3, 4), 'intrinsics': (N, 4)}
<flow_list.json>: [[i, j], ...]
"""
def __init__(self, path: str, meta_file: str = None):
"""
Args:
path: folder path of the 3D video
"""
self.color_fmt = pjoin(path, "color_down", "frame_{:06d}.raw")
if not os.path.isfile(self.color_fmt.format(0)):
self.color_fmt = pjoin(path, "color_down", "frame_{:06d}.png")
self.mask_fmt = pjoin(path, "mask", "mask_{:06d}_{:06d}.png")
self.flow_fmt = pjoin(path, "flow", "flow_{:06d}_{:06d}.raw")
if meta_file is not None:
with open(meta_file, "rb") as f:
meta = np.load(f)
self.extrinsics = torch.tensor(meta["extrinsics"], dtype=_dtype)
self.intrinsics = torch.tensor(meta["intrinsics"], dtype=_dtype)
assert (
self.extrinsics.shape[0] == self.intrinsics.shape[0]
), "#extrinsics({}) != #intrinsics({})".format(
self.extrinsics.shape[0], self.intrinsics.shape[0]
)
flow_list_fn = pjoin(path, "flow_list.json")
if os.path.isfile(flow_list_fn):
with open(flow_list_fn, "r") as f:
self.flow_indices = json.load(f)
else:
names = os.listdir(os.path.dirname(self.flow_fmt))
self.flow_indices = [
self.parse_index_pair(name)
for name in names
if os.path.splitext(name)[-1] == os.path.splitext(self.flow_fmt)[-1]
]
self.flow_indices = sampling.to_in_range(self.flow_indices)
self.flow_indices = list(sampling.SamplePairs.to_one_way(self.flow_indices))
def parse_index_pair(self, name):
strs = os.path.splitext(name)[0].split("_")[-2:]
return [int(s) for s in strs]
def __getitem__(self, index: int):
"""Fetch tuples of data. index = i * (i-1) / 2 + j, where i > j for pair (i,j)
So [-1+sqrt(1+8k)]/2 < i <= [1+sqrt(1+8k))]/2, where k=index. So
i = floor([1+sqrt(1+8k))]/2)
j = k - i * (i - 1) / 2.
The number of image frames fetched, N, is not the 1, but computed
based on what kind of consistency to be measured.
For instance, geometry_consistency_loss requires random pairs as samples.
So N = 2.
If with more losses, say triplet one from temporal_consistency_loss. Then
N = 2 + 3.
Returns:
stacked_images (N, C, H, W): image frames
targets: {
'extrinsics': torch.tensor (N, 3, 4), # extrinsics of each frame.
Each (3, 4) = [R, t].
point_wolrd = R * point_cam + t
'intrinsics': torch.tensor (N, 4), # (fx, fy, cx, cy) for each frame
'geometry_consistency':
{
'indices': torch.tensor (2),
indices for corresponding pairs
[(ref_index, tgt_index), ...]
'flows': ((2, H, W),) * 2 in pixels.
For k in range(2) (ref or tgt),
pixel p = pixels[indices[b, k]][:, i, j]
correspond to
p + flows[k][b, :, i, j]
in frame indices[b, (k + 1) % 2].
'masks': ((1, H, W),) * 2. Masks of valid flow matches
to compute the consistency in training.
Values are 0 or 1.
}
}
"""
pair = self.flow_indices[index]
indices = torch.tensor(pair)
intrinsics = torch.stack([self.intrinsics[k] for k in pair], dim=0)
extrinsics = torch.stack([self.extrinsics[k] for k in pair], dim=0)
images = torch.stack(
[load_color(self.color_fmt.format(k), channels_first=True) for k in pair],
dim=0,
)
flows = [
load_flow(self.flow_fmt.format(k_ref, k_tgt), channels_first=True)
for k_ref, k_tgt in [pair, pair[::-1]]
]
masks = [
load_mask(self.mask_fmt.format(k_ref, k_tgt), channels_first=True)
for k_ref, k_tgt in [pair, pair[::-1]]
]
metadata = {
"extrinsics": extrinsics,
"intrinsics": intrinsics,
"geometry_consistency": {
"indices": indices,
"flows": flows,
"masks": masks,
},
}
if getattr(self, "scales", None):
if isinstance(self.scales, dict):
metadata["scales"] = torch.stack(
[torch.Tensor([self.scales[k]]) for k in pair], dim=0
)
else:
metadata["scales"] = torch.Tensor(
[self.scales, self.scales]).reshape(2, 1)
return (images, metadata)
def __len__(self):
return len(self.flow_indices)
class VideoFrameDataset(data.Dataset):
"""Load video frames from
color_fmt.format(frame_id)
"""
def __init__(self, color_fmt, frames=None):
"""
Args:
color_fmt: e.g., <video_dir>/frame_{:06d}.raw
"""
self.color_fmt = color_fmt
if frames is None:
files = os.listdir(os.path.dirname(self.color_fmt))
self.frames = range(len(files))
else:
self.frames = frames
def __getitem__(self, index):
"""Fetch image frame.
Returns:
image (C, H, W): image frames
"""
frame_id = self.frames[index]
image = load_color(self.color_fmt.format(frame_id), channels_first=True)
meta = {"frame_id": frame_id}
return image, meta
def __len__(self):
return len(self.frames)
| consistent_depth-main | loaders/video_dataset.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
from utils.url_helpers import get_model_from_url
from .midas_v2.midas_net import MidasNet
from .depth_model import DepthModel
class MidasV2Model(DepthModel):
# Requirements and default settings
align = 32
learning_rate = 0.0001
lambda_view_baseline = 0.0001
def __init__(self, support_cpu: bool = False, pretrained: bool = True):
super().__init__()
if support_cpu:
# Allow the model to run on CPU when GPU is not available.
self.device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu")
else:
# Rather raise an error when GPU is not available.
self.device = torch.device("cuda")
self.model = MidasNet(non_negative=True)
# Load pretrained checkpoint
if pretrained:
checkpoint = (
"https://github.com/intel-isl/MiDaS/releases/download/v2/model-f46da743.pt"
)
state_dict = torch.hub.load_state_dict_from_url(
checkpoint, progress=True, check_hash=True
)
self.model.load_state_dict(state_dict)
num_gpus = torch.cuda.device_count()
if num_gpus > 1:
self.model = torch.nn.DataParallel(self.model)
self.model.to(self.device)
self.norm_mean = torch.Tensor(
[0.485, 0.456, 0.406]).reshape(1, -1, 1, 1)
self.norm_stdev = torch.Tensor(
[0.229, 0.224, 0.225]).reshape(1, -1, 1, 1)
def estimate_depth(self, images):
# Reshape ...CHW -> XCHW
shape = images.shape
C, H, W = shape[-3:]
input_ = images.reshape(-1, C, H, W).to(self.device)
input_ = (input_ - self.norm_mean.to(self.device)) / \
self.norm_stdev.to(self.device)
output = self.model(input_)
# Reshape X1HW -> BNHW
depth = output.reshape(shape[:-3] + output.shape[-2:])
# Convert from disparity to depth
depth = depth.reciprocal()
return depth
def save(self, file_name):
state_dict = self.model.state_dict()
torch.save(state_dict, file_name)
| consistent_depth-main | monodepth/midas_v2_model.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import torch.autograd as autograd
from utils.helpers import SuppressedStdout
from utils.url_helpers import get_model_from_url
from .mannequin_challenge.models import pix2pix_model
from .mannequin_challenge.options.train_options import TrainOptions
from .depth_model import DepthModel
class MannequinChallengeModel(DepthModel):
# Requirements and default settings
align = 16
learning_rate = 0.0004
lambda_view_baseline = 0.1
def __init__(self):
super().__init__()
parser = TrainOptions()
parser.initialize()
params = parser.parser.parse_args(["--input", "single_view"])
params.isTrain = False
model_file = get_model_from_url(
"https://storage.googleapis.com/mannequinchallenge-data/checkpoints/best_depth_Ours_Bilinear_inc_3_net_G.pth",
"mc.pth"
)
class FixedMcModel(pix2pix_model.Pix2PixModel):
# Override the load function, so we can load the snapshot stored
# in our specific location.
def load_network(self, network, network_label, epoch_label):
return torch.load(model_file)
with SuppressedStdout():
self.model = FixedMcModel(params)
def train(self):
self.model.switch_to_train()
def eval(self):
self.model.switch_to_eval()
def parameters(self):
return self.model.netG.parameters()
def estimate_depth(self, images):
images = autograd.Variable(images.cuda(), requires_grad=False)
# Reshape ...CHW -> XCHW
shape = images.shape
C, H, W = shape[-3:]
images = images.reshape(-1, C, H, W)
self.model.prediction_d, _ = self.model.netG.forward(images)
# Reshape X1HW -> BNHW
out_shape = shape[:-3] + self.model.prediction_d.shape[-2:]
self.model.prediction_d = self.model.prediction_d.reshape(out_shape)
self.model.prediction_d = torch.exp(self.model.prediction_d)
self.model.prediction_d = self.model.prediction_d.squeeze(-3)
return self.model.prediction_d
def save(self, file_name):
state_dict = self.model.netG.state_dict()
torch.save(state_dict, file_name)
| consistent_depth-main | monodepth/mannequin_challenge_model.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
from os.path import join as pjoin
import torch
from utils.url_helpers import get_model_from_url
from .depth_model import DepthModel
from .monodepth2.networks.resnet_encoder import ResnetEncoder
from .monodepth2.networks.depth_decoder import DepthDecoder
class Monodepth2Model(DepthModel):
# Requirements and default settings
align = 1
learning_rate = 0.00004
lambda_view_baseline = 1
def __init__(self):
super().__init__()
self.device = torch.device("cuda")
model_url = "https://storage.googleapis.com/niantic-lon-static/research/monodepth2/mono+stereo_1024x320.zip"
local_model_dir = get_model_from_url(model_url, "monodepth2_mono+stereo_1024x320/", is_zip=True)
encoder_model_file = pjoin(local_model_dir, "encoder.pth")
depth_model_file = pjoin(local_model_dir, "depth.pth")
self.encoder = ResnetEncoder(18, False)
loaded_dict_enc = torch.load(encoder_model_file, map_location=self.device)
# extract the height and width of image that this model was trained with
self.feed_height = loaded_dict_enc['height']
self.feed_width = loaded_dict_enc['width']
print(f"Model was trained at {self.feed_width} x {self.feed_height}.")
filtered_dict_enc = {
k: v for k, v in loaded_dict_enc.items() if k in self.encoder.state_dict()
}
self.encoder.load_state_dict(filtered_dict_enc)
self.encoder.to(self.device)
self.depth_decoder = DepthDecoder(
num_ch_enc=self.encoder.num_ch_enc, scales=range(4))
loaded_dict = torch.load(depth_model_file, map_location=self.device)
self.depth_decoder.load_state_dict(loaded_dict)
self.depth_decoder.to(self.device)
def train(self):
self.encoder.train()
self.depth_decoder.train()
def eval(self):
self.encoder.eval()
self.depth_decoder.eval()
def parameters(self):
return list(self.encoder.parameters()) + list(self.depth_decoder.parameters())
def estimate_depth(self, images):
# import pdb; pdb.set_trace()
# Reshape ...CHW -> NCHW
shape = images.shape
C, H, W = shape[-3:]
images = images.reshape(-1, C, H, W)
# Estimate depth
feed_size = [self.feed_height, self.feed_width]
images = torch.nn.functional.interpolate(
images, size=feed_size, mode='bicubic', align_corners=False)
features = self.encoder(images)
outputs = self.depth_decoder(features)
disparity = outputs[("disp", 0)]
disparity = torch.nn.functional.interpolate(
disparity, size=[H, W], mode='bicubic', align_corners=False)
depth = disparity.reciprocal()
# Reshape N1HW -> ...1HW
out_shape = shape[:-3] + depth.shape[-2:]
depth = depth.reshape(out_shape)
return depth
def save(self, file_name):
pass
| consistent_depth-main | monodepth/monodepth2_model.py |
consistent_depth-main | monodepth/__init__.py |
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
from abc import abstractmethod
import torch
class DepthModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, images, metadata=None):
"""
Images should be feed in the format (N, C, H, W). Channels are in BGR
order and values in [0, 1].
Metadata is not used by the depth models itself, only here, for value
transformations.
metadata["scales"]: (optional, can be None) specifies a post-scale
transformation of the depth values. Format (1, N, 1).
"""
depth = self.estimate_depth(images)
if metadata is not None:
if "scales" in metadata:
factor = metadata["scales"].unsqueeze(3).cuda()
depth = depth * factor
return depth
@abstractmethod
def estimate_depth(self, images, metadata) -> torch.Tensor:
pass
@abstractmethod
def save(self, label):
pass
| consistent_depth-main | monodepth/depth_model.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
from .depth_model import DepthModel
from .mannequin_challenge_model import MannequinChallengeModel
from .midas_v2_model import MidasV2Model
from .monodepth2_model import Monodepth2Model
from typing import List
def get_depth_model_list() -> List[str]:
return ["mc", "midas2", "monodepth2"]
def get_depth_model(type: str) -> DepthModel:
if type == "mc":
return MannequinChallengeModel
elif type == "midas2":
return MidasV2Model
elif type == "monodepth2":
return Monodepth2Model
else:
raise ValueError(f"Unsupported model type '{type}'.")
def create_depth_model(type: str) -> DepthModel:
model_class = get_depth_model(type)
return model_class()
| consistent_depth-main | monodepth/depth_model_registry.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os.path as osp
import setuptools
cur_dir = osp.dirname(osp.realpath(__file__))
requirementPath = osp.join(cur_dir, "requirements.txt")
install_requires = []
with open(requirementPath) as f:
install_requires = f.read().splitlines()
setuptools.setup(
name="imitation_learning",
version="0.1",
author="Andrew Szot",
author_email="",
description="imitation_learning",
url="",
install_requires=install_requires,
packages=setuptools.find_packages(),
)
| bc-irl-main | setup.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import os.path as osp
from collections import defaultdict
from typing import Dict, Optional
import gym.spaces as spaces
import hydra
import numpy as np
import torch
import torch.nn as nn
from hydra.utils import instantiate as hydra_instantiate
from omegaconf import DictConfig, OmegaConf
from rl_utils.common import (Evaluator, compress_dict, get_size_for_space,
set_seed)
from rl_utils.envs import create_vectorized_envs
from rl_utils.logging import Logger
from imitation_learning.policy_opt.policy import Policy
from imitation_learning.policy_opt.ppo import PPO
from imitation_learning.policy_opt.storage import RolloutStorage
@hydra.main(config_path="config", config_name="default")
def main(cfg) -> Dict[str, float]:
set_seed(cfg.seed)
device = torch.device(cfg.device)
set_env_settings = {
k: hydra_instantiate(v) if isinstance(v, DictConfig) else v
for k, v in cfg.env.env_settings.items()
}
envs = create_vectorized_envs(
cfg.env.env_name,
cfg.num_envs,
seed=cfg.seed,
device=device,
**set_env_settings,
)
steps_per_update = cfg.num_steps * cfg.num_envs
num_updates = int(cfg.num_env_steps) // steps_per_update
cfg.obs_shape = envs.observation_space.shape
cfg.action_dim = get_size_for_space(envs.action_space)
cfg.action_is_discrete = isinstance(cfg.action_dim, spaces.Discrete)
cfg.total_num_updates = num_updates
logger: Logger = hydra_instantiate(cfg.logger, full_cfg=cfg)
storage: RolloutStorage = hydra_instantiate(cfg.storage, device=device)
policy: Policy = hydra_instantiate(cfg.policy)
policy = policy.to(device)
updater = hydra_instantiate(cfg.policy_updater, policy=policy, device=device)
evaluator: Evaluator = hydra_instantiate(
cfg.evaluator,
envs=envs,
vid_dir=logger.vid_path,
updater=updater,
logger=logger,
device=device,
)
start_update = 0
if cfg.load_checkpoint is not None:
ckpt = torch.load(cfg.load_checkpoint)
updater.load_state_dict(ckpt["updater"], should_load_opt=cfg.resume_training)
if cfg.load_policy:
policy.load_state_dict(ckpt["policy"])
if cfg.resume_training:
start_update = ckpt["update_i"] + 1
eval_info = {"run_name": logger.run_name}
if cfg.only_eval:
eval_result = evaluator.evaluate(policy, cfg.num_eval_episodes, 0)
logger.collect_infos(eval_result, "eval.", no_rolling_window=True)
eval_info.update(eval_result)
logger.interval_log(0, 0)
logger.close()
return eval_info
obs = envs.reset()
storage.init_storage(obs)
for update_i in range(start_update, num_updates):
is_last_update = update_i == num_updates - 1
for step_idx in range(cfg.num_steps):
with torch.no_grad():
act_data = policy.act(
storage.get_obs(step_idx),
storage.recurrent_hidden_states[step_idx],
storage.masks[step_idx],
)
next_obs, reward, done, info = envs.step(act_data["actions"])
storage.insert(next_obs, reward, done, info, **act_data)
logger.collect_env_step_info(info)
updater.update(policy, storage, logger, envs=envs)
storage.after_update()
if cfg.eval_interval != -1 and (
update_i % cfg.eval_interval == 0 or is_last_update
):
with torch.no_grad():
eval_result = evaluator.evaluate(
policy, cfg.num_eval_episodes, update_i
)
logger.collect_infos(eval_result, "eval.", no_rolling_window=True)
eval_info.update(eval_result)
if cfg.log_interval != -1 and (
update_i % cfg.log_interval == 0 or is_last_update
):
logger.interval_log(update_i, steps_per_update * (update_i + 1))
if cfg.save_interval != -1 and (
(update_i + 1) % cfg.save_interval == 0 or is_last_update
):
save_name = osp.join(logger.save_path, f"ckpt.{update_i}.pth")
torch.save(
{
"policy": policy.state_dict(),
"updater": updater.state_dict(),
"update_i": update_i,
},
save_name,
)
print(f"Saved to {save_name}")
eval_info["last_ckpt"] = save_name
logger.close()
return eval_info
if __name__ == "__main__":
main()
| bc-irl-main | imitation_learning/run.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| bc-irl-main | imitation_learning/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hydra
from omegaconf import OmegaConf
from imitation_learning.run import main
@hydra.main(config_path="config", config_name="default")
def run_and_eval(cfg):
eval_cfg = OmegaConf.merge(cfg, cfg.eval_args)
eval_cfg.load_policy = False
assert eval_cfg.load_checkpoint != "" and eval_cfg.load_checkpoint is not None
print("Evaluating reward function")
main(eval_cfg)
if __name__ == "__main__":
run_and_eval()
| bc-irl-main | imitation_learning/eval.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
from collections import defaultdict
from functools import partial
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from hydra.utils import call, instantiate
from omegaconf import DictConfig
from torch.nn.utils import spectral_norm
from torch.utils.data import DataLoader
from imitation_learning.common.plotting import plot_actions
from imitation_learning.common.utils import (create_next_obs,
extract_transition_batch,
log_finished_rewards)
def wass_grad_pen(
expert_state,
expert_action,
policy_state,
policy_action,
use_actions,
disc,
):
num_dims = len(expert_state.shape) - 1
alpha = torch.rand(expert_state.size(0), 1)
alpha_state = (
alpha.view(-1, *[1 for _ in range(num_dims)])
.expand_as(expert_state)
.to(expert_state.device)
)
mixup_data_state = alpha_state * expert_state + (1 - alpha_state) * policy_state
mixup_data_state.requires_grad = True
inputs = [mixup_data_state]
if use_actions:
alpha_action = alpha.expand_as(expert_action).to(expert_action.device)
mixup_data_action = (
alpha_action * expert_action + (1 - alpha_action) * policy_action
)
mixup_data_action.requires_grad = True
inputs.append(mixup_data_action)
else:
mixup_data_action = []
# disc = disc_fn(cur_obs=mixup_data_state, actions=mixup_data_action)
disc = disc.g(mixup_data_state)
ones = torch.ones(disc.size()).to(disc.device)
grad = torch.autograd.grad(
outputs=disc,
inputs=inputs,
grad_outputs=ones,
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
grad_pen = (grad.norm(2, dim=1) - 1).pow(2).mean()
return grad_pen
class GAIL(nn.Module):
def __init__(
self,
discriminator: DictConfig,
policy_updater: DictConfig,
get_dataset_fn,
batch_size: int,
num_discrim_batches: int,
discrim_opt: DictConfig,
reward_update_freq: int,
device,
policy,
num_envs,
spectral_norm,
grad_pen,
**kwargs,
):
super().__init__()
self.discriminator = instantiate(discriminator).to(device)
self.policy_updater = instantiate(policy_updater, policy=policy)
self._grad_pen = grad_pen
if spectral_norm:
self._apply_spectral_norm()
self.dataset = call(get_dataset_fn)
self.expert_data = DataLoader(self.dataset, batch_size, shuffle=True)
self.discrim_opt = instantiate(
discrim_opt, params=self.discriminator.parameters()
)
self.reward_update_freq = reward_update_freq
self._n_updates = 0
self.batch_size = batch_size
self.num_discrim_batches = num_discrim_batches
self.device = device
self._ep_rewards = torch.zeros(num_envs, device=self.device)
def _apply_spectral_norm(self):
for name, module in self.discriminator.named_modules():
# Only applies the spectral transformation to the high-level
# modules and goes into the sequential modules and applies to
# each element.
if name == "" or "." in name:
continue
if isinstance(module, nn.Sequential):
new_layers = []
for i in range(len(module)):
layer = module[i]
if isinstance(layer, nn.Linear):
layer = spectral_norm(layer)
new_layers.append(layer)
setattr(self.discriminator, name, nn.Sequential(*new_layers))
elif isinstance(module, nn.Linear):
setattr(self.discriminator, name, spectral_norm(module))
def state_dict(self, **kwargs):
return {
**super().state_dict(**kwargs),
"discrim_opt": self.discrim_opt.state_dict(),
}
def load_state_dict(self, state_dict, should_load_opt):
opt_state = state_dict.pop("discrim_opt")
if should_load_opt:
self.discrim_opt.load_state_dict(opt_state)
return super().load_state_dict(state_dict)
def viz_reward(self, cur_obs=None, action=None, next_obs=None) -> torch.Tensor:
return self.discriminator.get_reward(
cur_obs=cur_obs, actions=action, next_obs=next_obs, viz_reward=True
)
def _get_agent_samples(self, rollouts):
num_batches = len(rollouts) // self.batch_size
agent_data = rollouts.data_generator(num_batches, get_next_obs=True)
if self.num_discrim_batches != -1:
agent_data = itertools.islice(agent_data, self.num_discrim_batches)
return agent_data
def _update_discriminator(self, policy, rollouts, logger):
num_batches = len(rollouts) // self.batch_size
agent_data = self._get_agent_samples(rollouts)
for expert_batch, agent_batch in zip(self.expert_data, agent_data):
expert_d = self.discriminator(
cur_obs=expert_batch["observations"],
actions=expert_batch["actions"],
next_obs=expert_batch["next_observations"],
masks=(~expert_batch["terminals"].bool()).float(),
policy=policy,
)
agent_d = self.discriminator(
cur_obs=agent_batch["obs"],
actions=agent_batch["action"],
next_obs=agent_batch["next_obs"],
masks=agent_batch["mask"],
policy=policy,
)
expert_loss = F.binary_cross_entropy_with_logits(
expert_d, torch.ones_like(expert_d, device=self.device)
)
agent_loss = F.binary_cross_entropy_with_logits(
agent_d, torch.zeros_like(agent_d, device=self.device)
)
loss = expert_loss + agent_loss
# disc_fn = partial(self.discriminator, policy=policy)
if self._grad_pen != 0.0:
n_expert = len(expert_batch["observations"])
grad_pen = wass_grad_pen(
expert_batch["observations"],
expert_batch["actions"],
agent_batch["obs"][:n_expert],
agent_batch["action"][:n_expert],
False,
self.discriminator,
)
loss += self._grad_pen * grad_pen
self.discrim_opt.zero_grad()
loss.backward()
self.discrim_opt.step()
logger.collect_info("expert_loss", expert_loss.item())
logger.collect_info("agent_loss", agent_loss.item())
logger.collect_info("discim_loss", loss.item())
def update(self, policy, rollouts, logger, **kwargs):
if (
self.reward_update_freq != -1
and self._n_updates % self.reward_update_freq == 0
):
self._update_discriminator(policy, rollouts, logger)
obs, actions, next_obs, masks = extract_transition_batch(rollouts)
with torch.no_grad():
rollouts.rewards = self.discriminator.get_reward(
cur_obs=obs,
actions=actions,
next_obs=next_obs,
masks=masks,
policy=policy,
)
self._ep_rewards = log_finished_rewards(rollouts, self._ep_rewards, logger)
self.policy_updater.update(policy, rollouts, logger)
self._n_updates += 1
| bc-irl-main | imitation_learning/gail/updater.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum, auto
from typing import Tuple
import torch
import torch.nn as nn
from rl_utils.common import make_mlp_layers
class GailRewardType(Enum):
AIRL = auto()
GAIL = auto()
RAW = auto()
class GailDiscriminator(nn.Module):
def __init__(
self,
obs_shape: Tuple[int],
action_dim: int,
reward_hidden_dim: int,
n_hidden_layers: int,
cost_take_dim: int,
use_actions: bool,
reward_type: str,
):
super().__init__()
self.cost_take_dim = cost_take_dim
input_size = obs_shape[0] if cost_take_dim == -1 else cost_take_dim
if use_actions:
input_size += action_dim
self.action_input = use_actions
self.discrim_net = nn.Sequential(
*make_mlp_layers(input_size, 1, reward_hidden_dim, n_hidden_layers)
)
self.reward_type = GailRewardType[reward_type]
def forward(self, cur_obs=None, actions=None, **kwargs):
if self.cost_take_dim != -1:
if cur_obs is not None:
cur_obs = cur_obs[:, :, : self.cost_take_dim]
if self.action_input:
inputs = torch.cat([cur_obs, actions], -1)
else:
inputs = cur_obs
return self.discrim_net(inputs)
def get_reward(self, cur_obs=None, actions=None, **kwargs):
d_val = self.forward(cur_obs, actions)
s = torch.sigmoid(d_val)
eps = 1e-20
if self.reward_type == GailRewardType.AIRL:
reward = (s + eps).log() - (1 - s + eps).log()
elif self.reward_type == GailRewardType.GAIL:
reward = (s + eps).log()
elif self.reward_type == GailRewardType.RAW:
reward = d_val
return reward
| bc-irl-main | imitation_learning/gail/discriminator.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from functools import partial
import numpy as np
import torch
import torch.nn as nn
from rl_utils.models import (FixedCategorical, FixedNormal, SimpleCNN,
build_rnn_state_encoder)
def init_weights(m, gain=1):
if isinstance(m, nn.Linear):
torch.nn.init.orthogonal_(m.weight, gain=gain)
m.bias.data.fill_(0.0)
class Categorical(nn.Module):
def __init__(self, num_inputs, num_outputs):
super().__init__()
self.linear = nn.Linear(num_inputs, num_outputs)
self.apply(partial(init_weights, gain=0.01))
def forward(self, x):
x = self.linear(x)
return FixedCategorical(logits=x)
class DiagGaussian(nn.Module):
def __init__(self, num_inputs, num_outputs, std_init, squash_mean):
super().__init__()
if squash_mean:
self.fc_mean = nn.Sequential(
nn.Linear(num_inputs, num_outputs),
nn.Tanh(),
)
else:
self.fc_mean = nn.Linear(num_inputs, num_outputs)
self.logstd = nn.Parameter(torch.full((1, num_outputs), float(std_init)))
self.apply(init_weights)
def forward(self, x):
action_mean = self.fc_mean(x)
action_logstd = self.logstd.expand_as(action_mean)
return FixedNormal(action_mean, action_logstd.exp())
class Policy(nn.Module):
def __init__(
self,
obs_shape,
action_dim,
action_is_discrete,
hidden_size,
recurrent_hidden_size,
is_recurrent,
std_init=0.0,
squash_mean=False,
):
super().__init__()
if isinstance(obs_shape, dict):
is_visual_obs = any([len(v) == 3 for k, v in obs_shape.items()])
else:
is_visual_obs = len(obs_shape) == 3
if is_visual_obs:
self.backbone = SimpleCNN(obs_shape, hidden_size)
input_size = hidden_size
else:
self.backbone = nn.Sequential()
input_size = obs_shape[0]
if is_recurrent:
self.rnn_encoder = build_rnn_state_encoder(
recurrent_hidden_size, recurrent_hidden_size
)
else:
# Pass through
self.rnn_encoder = lambda hidden, hxs, _: (hidden, hxs)
self.actor = nn.Sequential(
nn.Linear(input_size, hidden_size),
nn.Tanh(),
nn.Linear(hidden_size, hidden_size),
nn.Tanh(),
)
self.critic = nn.Sequential(
nn.Linear(input_size, hidden_size),
nn.Tanh(),
nn.Linear(hidden_size, hidden_size),
nn.Tanh(),
nn.Linear(hidden_size, 1),
)
self.apply(partial(init_weights, gain=np.sqrt(2)))
if action_is_discrete:
self.actor_dist = Categorical(hidden_size, action_dim)
else:
self.actor_dist = DiagGaussian(
hidden_size, action_dim, std_init, squash_mean
)
def get_value(self, obs, hxs, masks):
hidden, _ = self.forward(obs, hxs, masks)
return self.critic(hidden)
def evaluate_actions(self, obs, hxs, masks, action):
hidden, hxs = self.forward(obs, hxs, masks)
critic_value = self.critic(hidden)
actor_hidden = self.actor(hidden)
dist = self.actor_dist(actor_hidden)
action_log_probs = dist.log_probs(action)
dist_entropy = dist.entropy()
return {
"log_prob": action_log_probs,
"value": critic_value,
"dist_entropy": dist_entropy,
}
def forward(self, obs, hxs, masks):
hidden = self.backbone(obs)
hidden, hxs = self.rnn_encoder(hidden, hxs, masks)
return hidden, hxs
def get_action_dist(self, obs, hxs, masks):
hidden, hxs = self.forward(obs, hxs, masks)
actor_hidden = self.actor(hidden)
return self.actor_dist(actor_hidden)
def act(self, obs, hxs, masks, is_eval=False):
hidden, hxs = self.forward(obs, hxs, masks)
critic_value = self.critic(hidden)
actor_hidden = self.actor(hidden)
dist = self.actor_dist(actor_hidden)
if is_eval:
action = dist.mode()
else:
action = dist.sample()
action_log_probs = dist.log_probs(action)
dist_entropy = dist.entropy()
return {
"actions": action,
"action_log_probs": action_log_probs,
"value_preds": critic_value,
"hxs": hxs,
"extra": {
"dist_entropy": dist_entropy,
},
}
| bc-irl-main | imitation_learning/policy_opt/policy.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| bc-irl-main | imitation_learning/policy_opt/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import defaultdict
from typing import Dict, Optional
import torch
def _flatten_helper(T, N, _tensor):
return _tensor.view(T * N, *_tensor.size()[2:])
class RolloutStorage:
def __init__(
self,
num_steps,
num_processes,
obs_shape,
action_dim,
action_is_discrete,
recurrent_hidden_state_size,
device,
fetch_final_obs,
):
super().__init__()
if isinstance(obs_shape, dict):
self.obs_keys = obs_shape
else:
self.obs_keys = {None: obs_shape}
self.obs: Dict[Optional[str], torch.Tensor] = {}
for k, space_shape in self.obs_keys.items():
ob = torch.zeros(num_steps + 1, num_processes, *space_shape)
self.obs[k] = ob
self.rewards = torch.zeros(num_steps, num_processes, 1)
self.value_preds = torch.zeros(num_steps + 1, num_processes, 1)
self.action_log_probs = torch.zeros(num_steps, num_processes, 1)
self.recurrent_hidden_states = torch.zeros(
num_steps + 1, num_processes, recurrent_hidden_state_size
)
self.actions = torch.zeros(num_steps, num_processes, action_dim)
if action_is_discrete:
self.actions = self.actions.long()
self.masks = torch.zeros(num_steps + 1, num_processes, 1)
self.bad_masks = torch.ones(num_steps + 1, num_processes, 1)
if fetch_final_obs:
self.final_obs = torch.zeros(num_steps, num_processes, *space_shape)
else:
self.final_obs = None
self.num_steps = num_steps
self.n_procs = num_processes
self.step = 0
self.to(device)
def compute_masks(self, done, infos):
# If done then clean the history of observations.
masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done])
bad_masks = torch.FloatTensor(
[[0.0] if "bad_transition" in info.keys() else [1.0] for info in infos]
)
return masks, bad_masks
def __len__(self):
return self.num_steps * self.n_procs
def init_storage(self, obs):
for k in self.obs_keys:
if k is None:
self.obs[k][0].copy_(obs)
else:
self.obs[k][0].copy_(obs[k])
self.masks = self.masks.zero_()
self.bad_masks = self.bad_masks.zero_()
self.recurrent_hidden_states = self.recurrent_hidden_states.zero_()
def to(self, device):
for k in self.obs_keys:
self.obs[k] = self.obs[k].to(device)
if self.final_obs is not None:
self.final_obs = self.final_obs.to(device)
self.rewards = self.rewards.to(device)
self.value_preds = self.value_preds.to(device)
self.action_log_probs = self.action_log_probs.to(device)
self.actions = self.actions.to(device)
self.masks = self.masks.to(device)
self.bad_masks = self.bad_masks.to(device)
self.recurrent_hidden_states = self.recurrent_hidden_states.to(device)
def insert(
self,
next_obs,
rewards,
done,
infos,
actions,
value_preds,
action_log_probs,
hxs,
**kwargs,
):
masks, bad_masks = self.compute_masks(done, infos)
for k in self.obs_keys:
if k is None:
self.obs[k][self.step + 1].copy_(next_obs)
else:
self.obs[k][self.step + 1].copy_(next_obs[k])
self.actions[self.step].copy_(actions)
self.action_log_probs[self.step].copy_(action_log_probs)
self.value_preds[self.step].copy_(value_preds)
self.rewards[self.step].copy_(rewards)
self.masks[self.step + 1].copy_(masks)
self.bad_masks[self.step + 1].copy_(bad_masks)
self.recurrent_hidden_states[self.step + 1].copy_(hxs)
if self.final_obs is not None:
for env_i, info in enumerate(infos):
if "final_obs" in info:
self.final_obs[self.step, env_i].copy_(info["final_obs"])
self.step = (self.step + 1) % self.num_steps
def get_obs(self, idx):
ret_d = {}
for k in self.obs_keys:
if k is None:
return self.obs[k][idx]
ret_d[k] = self.obs[k][idx]
return ret_d
def after_update(self):
for k in self.obs_keys:
self.obs[k][0].copy_(self.obs[k][-1])
self.masks[0].copy_(self.masks[-1])
self.bad_masks[0].copy_(self.bad_masks[-1])
self.recurrent_hidden_states[0].copy_(self.recurrent_hidden_states[-1])
def data_generator(self, num_mini_batch, get_next_obs=False, **add_data):
if get_next_obs and self.final_obs is None:
raise ValueError(
"Must fetch final observations if getting the next observation"
)
if get_next_obs and len(self.obs_keys) > 1:
raise ValueError("Cannot fetch next obseration with dictionary observation")
num_processes = self.rewards.size(1)
if num_processes < num_mini_batch:
raise ValueError(
f"Number of processes {num_processes} is smaller than num mini batch {num_mini_batch}"
)
num_envs_per_batch = num_processes // num_mini_batch
perm = torch.randperm(num_processes)
for start_ind in range(0, num_processes, num_envs_per_batch):
ret_data = {
"obs": defaultdict(list),
"hxs": [],
"action": [],
"value": [],
"mask": [],
"prev_log_prob": [],
"reward": [],
"add_batch": defaultdict(list),
}
if get_next_obs:
ret_data["next_obs"] = []
for offset in range(num_envs_per_batch):
ind = perm[start_ind + offset]
for k in self.obs_keys:
ret_data["obs"][k].append(self.obs[k][:-1, ind])
if get_next_obs:
mask = self.masks[1:, ind]
final_obs = self.final_obs[:, ind]
# This code assumes observation dict has only 1 key.
first_key = next(iter(self.obs_keys))
obs = self.obs[first_key][1:, ind]
ret_data["next_obs"].append((mask * obs) + ((1 - mask) * final_obs))
ret_data["hxs"].append(self.recurrent_hidden_states[0:1, ind])
ret_data["action"].append(self.actions[:, ind])
ret_data["value"].append(self.value_preds[:-1, ind])
ret_data["reward"].append(self.rewards[:, ind])
ret_data["mask"].append(self.masks[:-1, ind])
ret_data["prev_log_prob"].append(self.action_log_probs[:, ind])
for k, v in add_data.items():
ret_data["add_batch"][k].append(v[:, ind])
T, N = self.num_steps, num_envs_per_batch
for k, v in ret_data.items():
if k == "hxs":
ret_data[k] = torch.stack(v, 1).view(N, -1)
elif isinstance(v, dict):
for sub_k, sub_v in v.items():
ret_data[k][sub_k] = _flatten_helper(
T, N, torch.stack(sub_v, 1)
)
else:
ret_data[k] = _flatten_helper(T, N, torch.stack(v, 1))
# Pop the add batch keys out a level
add_batch = ret_data.pop("add_batch")
ret_data.update(add_batch)
# No need to return obs dict if there's only one thing in
# dictionary
if len(ret_data["obs"]) == 1:
ret_data["obs"] = next(iter(ret_data["obs"].values()))
yield ret_data
| bc-irl-main | imitation_learning/policy_opt/storage.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict
import torch
import torch.nn as nn
from hydra.utils import instantiate as hydra_instantiate
from imitation_learning.policy_opt.policy import Policy
class PPO:
def __init__(
self,
use_gae: bool,
gae_lambda: float,
gamma: float,
use_clipped_value_loss: bool,
clip_param: bool,
value_loss_coef: float,
entropy_coef: float,
max_grad_norm: float,
num_mini_batch: int,
num_epochs: int,
optimizer_params: Dict[str, Any],
num_steps: int,
num_envs: int,
policy: Policy,
**kwargs,
):
super().__init__()
self.use_gae = use_gae
self.gae_lambda = gae_lambda
self.gamma = gamma
self.use_clipped_value_loss = use_clipped_value_loss
self.clip_param = clip_param
self.value_loss_coef = value_loss_coef
self.entropy_coef = entropy_coef
self.max_grad_norm = max_grad_norm
self.num_mini_batch = num_mini_batch
self.num_epochs = num_epochs
self.num_steps = num_steps
self.num_envs = num_envs
self.opt: torch.optim.Optimizer = hydra_instantiate(
optimizer_params, params=policy.parameters()
)
self.returns = torch.zeros(self.num_steps + 1, self.num_envs, 1)
def state_dict(self):
return {"opt": self.opt.state_dict()}
def load_state_dict(self, state_dict, should_load_opt):
opt_state = state_dict.pop("opt")
if should_load_opt:
self.opt.load_state_dict(opt_state)
def update(
self,
policy,
storage,
logger,
**kwargs,
):
with torch.no_grad():
last_value = policy.get_value(
storage.get_obs(-1),
storage.recurrent_hidden_states[-1],
storage.masks[-1],
)
advantages = self.compute_derived(
policy,
storage.rewards,
storage.masks,
storage.bad_masks,
storage.value_preds,
last_value,
)
for _ in range(self.num_epochs):
data_gen = storage.data_generator(
self.num_mini_batch, returns=self.returns[:-1], advantages=advantages
)
for sample in data_gen:
ac_eval = policy.evaluate_actions(
sample["obs"],
sample["hxs"],
sample["mask"],
sample["action"],
)
ratio = torch.exp(ac_eval["log_prob"] - sample["prev_log_prob"])
surr1 = ratio * sample["advantages"]
surr2 = (
torch.clamp(
ratio,
1.0 - self.clip_param,
1.0 + self.clip_param,
)
* sample["advantages"]
)
action_loss = -torch.min(surr1, surr2).mean(0)
if self.use_clipped_value_loss:
value_pred_clipped = sample["value"] + (
ac_eval["value"] - sample["value"]
).clamp(-self.clip_param, self.clip_param)
value_losses = (ac_eval["value"] - sample["returns"]).pow(2)
value_losses_clipped = (value_pred_clipped - sample["returns"]).pow(
2
)
value_loss = (
0.5 * torch.max(value_losses, value_losses_clipped).mean()
)
else:
value_loss = (
0.5 * (sample["returns"] - ac_eval["value"]).pow(2).mean()
)
loss = (
value_loss * self.value_loss_coef
+ action_loss
- ac_eval["dist_entropy"].mean() * self.entropy_coef
)
self.opt.zero_grad()
loss.backward()
if self.max_grad_norm > 0:
nn.utils.clip_grad_norm_(policy.parameters(), self.max_grad_norm)
self.opt.step()
logger.collect_info("value_loss", value_loss.mean().item())
logger.collect_info("action_loss", action_loss.mean().item())
logger.collect_info(
"dist_entropy", ac_eval["dist_entropy"].mean().item()
)
def compute_derived(
self,
policy,
rewards,
masks,
bad_masks,
value_preds,
last_value,
):
if self.use_gae:
value_preds[-1] = last_value
gae = 0
for step in reversed(range(rewards.size(0))):
delta = (
rewards[step]
+ self.gamma * value_preds[step + 1] * masks[step + 1]
- value_preds[step]
)
gae = delta + self.gamma * self.gae_lambda * masks[step + 1] * gae
gae = gae * bad_masks[step + 1]
self.returns[step] = gae + value_preds[step]
else:
self.returns[-1] = last_value
for step in reversed(range(rewards.size(0))):
self.returns[step] = (
self.returns[step + 1] * self.gamma * masks[step + 1]
+ rewards[step]
) * bad_masks[step + 1] + (1 - bad_masks[step + 1]) * value_preds[step]
advantages = self.returns[:-1] - value_preds[:-1]
# Normalize the advantages
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-5)
return advantages
| bc-irl-main | imitation_learning/policy_opt/ppo.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from higher.optim import DifferentiableOptimizer
from hydra.utils import instantiate
from omegaconf import DictConfig
class DifferentiablePPO(nn.Module):
def __init__(
self,
use_gae: bool,
gae_lambda: float,
gamma: float,
use_clipped_value_loss: bool,
clip_param: bool,
value_loss_coef: float,
entropy_coef: float,
max_grad_norm: float,
num_mini_batch: int,
num_epochs: int,
):
super().__init__()
self.use_gae = use_gae
self.gae_lambda = gae_lambda
self.gamma = gamma
self.use_clipped_value_loss = use_clipped_value_loss
self.clip_param = clip_param
self.entropy_coef = entropy_coef
self.max_grad_norm = max_grad_norm
self.num_mini_batch = num_mini_batch
self.num_epochs = num_epochs
self.value_loss_coef = value_loss_coef
def update(
self,
policy,
storage,
logger,
optimizer: DifferentiableOptimizer,
rewards,
) -> None:
with torch.no_grad():
last_value = policy.get_value(
storage.get_obs(-1),
storage.recurrent_hidden_states[-1],
storage.masks[-1],
)
advantages, returns = self.compute_derived(
rewards,
storage.masks,
storage.bad_masks,
storage.value_preds.detach(),
last_value,
)
for _ in range(self.num_epochs):
data_gen = storage.data_generator(
self.num_mini_batch, returns=returns[:-1], advantages=advantages
)
for sample in data_gen:
ac_eval = policy.evaluate_actions(
sample["obs"],
sample["hxs"],
sample["mask"],
sample["action"],
)
ratio = torch.exp(ac_eval["log_prob"] - sample["prev_log_prob"])
surr1 = ratio * sample["advantages"]
surr2 = (
torch.clamp(
ratio,
1.0 - self.clip_param,
1.0 + self.clip_param,
)
* sample["advantages"]
)
action_loss = -torch.min(surr1, surr2).mean(0)
value_target = sample["returns"].detach()
if self.use_clipped_value_loss:
value_pred_clipped = sample["value"] + (
ac_eval["value"] - sample["value"]
).clamp(-self.clip_param, self.clip_param)
value_losses = (ac_eval["value"] - value_target).pow(2)
value_losses_clipped = (value_pred_clipped - value_target).pow(2)
value_loss = (
0.5 * torch.max(value_losses, value_losses_clipped).mean()
)
else:
value_loss = 0.5 * (value_target - ac_eval["value"]).pow(2).mean()
loss = (
value_loss * self.value_loss_coef
+ action_loss
- ac_eval["dist_entropy"].mean() * self.entropy_coef
)
# if self.max_grad_norm > 0:
# nn.utils.clip_grad_norm_(policy.parameters(), self.max_grad_norm)
optimizer.step(loss)
logger.collect_info("value_loss", value_loss.mean().item())
logger.collect_info("action_loss", action_loss.mean().item())
logger.collect_info(
"dist_entropy", ac_eval["dist_entropy"].mean().item()
)
def compute_derived(
self,
rewards,
masks,
bad_masks,
value_preds,
last_value,
):
num_steps, num_envs = rewards.shape[:2]
returns = torch.zeros(num_steps + 1, num_envs, 1, device=last_value.device)
if self.use_gae:
value_preds[-1] = last_value
gae = 0
for step in reversed(range(rewards.size(0))):
delta = (
rewards[step]
+ self.gamma * value_preds[step + 1] * masks[step + 1]
- value_preds[step]
)
gae = delta + self.gamma * self.gae_lambda * masks[step + 1] * gae
gae = gae * bad_masks[step + 1]
returns[step] = gae + value_preds[step]
else:
returns[-1] = last_value
for step in reversed(range(rewards.size(0))):
returns[step] = (
returns[step + 1] * self.gamma * masks[step + 1] + rewards[step]
) * bad_masks[step + 1] + (1 - bad_masks[step + 1]) * value_preds[step]
advantages = returns[:-1] - value_preds[:-1]
# Normalize the advantages
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-5)
return advantages, returns
| bc-irl-main | imitation_learning/bc_irl/differentiable_ppo.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| bc-irl-main | imitation_learning/bc_irl/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable
import higher
import torch
import torch.nn as nn
from hydra.utils import call, instantiate
from omegaconf import DictConfig
from rl_utils.common import DictDataset
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from imitation_learning.common.plotting import plot_actions
from imitation_learning.common.utils import (extract_transition_batch,
log_finished_rewards)
class BCIRL(nn.Module):
def __init__(
self,
reward: DictConfig,
inner_updater: DictConfig,
get_dataset_fn,
batch_size: int,
inner_opt: DictConfig,
reward_opt: DictConfig,
irl_loss: DictConfig,
plot_interval: int,
norm_expert_actions: bool,
n_inner_iters: int,
num_steps: int,
reward_update_freq: int,
storage_cfg: DictConfig,
device,
total_num_updates: int,
num_envs: int,
use_lr_decay: bool,
policy_init_fn: Callable[[nn.Module, nn.Module], nn.Module],
force_num_env_steps_lr_decay: float = -1.0,
**kwargs,
):
super().__init__()
if inner_updater is not None:
self.inner_updater = instantiate(inner_updater)
self.reward = instantiate(reward).to(device)
self.dataset = call(get_dataset_fn)
self.data_loader = DataLoader(self.dataset, batch_size, shuffle=True)
self.inner_opt = inner_opt
self.reward_opt = instantiate(reward_opt, params=self.reward.parameters())
self._n_updates = 0
self.use_lr_decay = use_lr_decay
self.policy_init_fn = policy_init_fn
if force_num_env_steps_lr_decay > 0:
use_total_num_updates = force_num_env_steps_lr_decay // (
num_envs * num_steps
)
else:
use_total_num_updates = total_num_updates
self.lr_scheduler = LambdaLR(
optimizer=self.reward_opt,
lr_lambda=lambda x: 1 - (self._n_updates / use_total_num_updates),
)
self.irl_loss = instantiate(irl_loss)
self.data_loader_iter = iter(self.data_loader)
self.plot_interval = plot_interval
self.norm_expert_actions = norm_expert_actions
self.n_inner_iters = n_inner_iters
self.num_steps = num_steps
self.reward_update_freq = reward_update_freq
self.storage_cfg = storage_cfg
self.device = device
self.all_rollouts = [
instantiate(self.storage_cfg, device=self.device)
for _ in range(self.n_inner_iters - 1)
]
self._ep_rewards = torch.zeros(num_envs, device=self.device)
def state_dict(self):
return {
**super().state_dict(),
"reward_opt": self.reward_opt.state_dict(),
}
def load_state_dict(self, state_dict, should_load_opt):
opt_state = state_dict.pop("reward_opt")
if should_load_opt:
self.reward_opt.load_state_dict(opt_state)
return super().load_state_dict(state_dict)
def viz_reward(self, cur_obs=None, action=None, next_obs=None) -> torch.Tensor:
return self.reward(cur_obs, action, next_obs)
def _irl_loss_step(self, policy, logger):
expert_batch = next(self.data_loader_iter, None)
if expert_batch is None:
self.data_loader_iter = iter(self.data_loader)
expert_batch = next(self.data_loader_iter, None)
expert_actions = expert_batch["actions"].to(self.device)
expert_obs = expert_batch["observations"].to(self.device)
if self.norm_expert_actions:
# Clip expert actions to be within [-1,1]. Actions have no effect
# out of that range
expert_actions = torch.clamp(expert_actions, -1.0, 1.0)
dist = policy.get_action_dist(expert_obs, None, None)
pred_actions = dist.mean
irl_loss_val = self.irl_loss(expert_actions, pred_actions)
irl_loss_val.backward(retain_graph=True)
logger.collect_info("irl_loss", irl_loss_val.item())
if self._n_updates % self.plot_interval == 0:
plot_actions(
pred_actions.detach().cpu(),
expert_actions.detach().cpu(),
self._n_updates,
logger.vid_dir,
)
@property
def inner_lr(self):
return self.inner_opt["lr"]
def update(self, policy, rollouts, logger, envs):
self.reward_opt.zero_grad()
policy = call(self.policy_init_fn, old_policy=policy).to(self.device)
policy_opt = instantiate(
self.inner_opt, lr=self.inner_lr, params=policy.parameters()
)
# Setup Meta loop
with higher.innerloop_ctx(
policy,
policy_opt,
) as (dpolicy, diffopt):
for inner_i in range(self.n_inner_iters):
obs, actions, next_obs, masks = extract_transition_batch(rollouts)
rollouts.rewards = self.reward(obs, actions, next_obs)
if inner_i == 0:
self._ep_rewards = log_finished_rewards(
rollouts, self._ep_rewards, logger
)
# Inner loop policy update
self.inner_updater.update(
dpolicy, rollouts, logger, diffopt, rollouts.rewards
)
if inner_i != self.n_inner_iters - 1:
new_rollouts = self.all_rollouts[inner_i - 1]
for k in rollouts.obs_keys:
new_rollouts.obs[k][0].copy_(rollouts.obs[k][-1])
new_rollouts.masks[0].copy_(rollouts.masks[-1])
new_rollouts.bad_masks[0].copy_(rollouts.bad_masks[-1])
new_rollouts.recurrent_hidden_states[0].copy_(
rollouts.recurrent_hidden_states[-1]
)
# Collect the next batch of data.
new_rollouts.after_update()
for step_idx in range(self.num_steps):
with torch.no_grad():
act_data = policy.act(
new_rollouts.get_obs(step_idx),
new_rollouts.recurrent_hidden_states[step_idx],
new_rollouts.masks[step_idx],
)
next_obs, reward, done, info = envs.step(act_data["action"])
new_rollouts.insert(next_obs, reward, done, info, **act_data)
rollouts = new_rollouts
# Compute IRL loss
self._irl_loss_step(dpolicy, logger)
if (
self.reward_update_freq != -1
and self._n_updates % self.reward_update_freq == 0
):
self.reward_opt.step()
if hasattr(self.reward, "log"):
self.reward.log(logger)
policy.load_state_dict(dpolicy.state_dict())
if self.use_lr_decay and self.reward_update_freq != -1:
# Step even if we did not update so we properly decay to 0.
self.lr_scheduler.step()
logger.collect_info("reward_lr", self.lr_scheduler.get_last_lr()[0])
self._n_updates += 1
| bc-irl-main | imitation_learning/bc_irl/updater.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum, auto
import torch
import torch.nn as nn
from hydra.utils import instantiate
from rl_utils.common import make_mlp_layers
def full_reset_init(old_policy, policy_cfg, **kwargs):
return instantiate(policy_cfg)
def reg_init(old_policy, **kwargs):
return old_policy
class StructuredReward(nn.Module):
def __init__(self, obs_shape, **kwargs):
super().__init__()
self.center = nn.Parameter(torch.randn(obs_shape[0]))
def forward(self, X):
return -1.0 * ((X - self.center) ** 2).mean(-1, keepdims=True)
def log(self, logger):
for i, center_val in enumerate(self.center):
logger.collect_info(f"reward_{i}", center_val.item())
class GtReward(nn.Module):
def __init__(
self,
):
pass
def forward(self, cur_obs=None, actions=None, next_obs=None):
cur_dist = torch.linalg.norm(cur_obs, dim=-1)
reward = torch.full(cur_dist.shape, -self._slack)
assign = -self._slack * cur_dist
should_give_reward = cur_dist < self._reward_thresh
reward[should_give_reward] = assign[should_give_reward]
return reward
| bc-irl-main | imitation_learning/bc_irl/rewards.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
from collections import defaultdict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from hydra.utils import call, instantiate
from omegaconf import DictConfig
from rl_utils.common.net_utils import make_mlp_layers
from torch.utils.data import DataLoader
from imitation_learning.common.plotting import plot_actions
from imitation_learning.common.utils import (create_next_obs,
extract_transition_batch,
log_finished_rewards)
from imitation_learning.gail.updater import GAIL
class fIRL(GAIL):
"""
From https://github.com/twni2016/f-IRL/blob/a3f1ec66f29c6d659abba630f70f8ae2e59ebe1e/firl/divs/f_div_disc.py
"""
def __init__(
self,
discriminator: DictConfig,
reward: DictConfig,
policy_updater: DictConfig,
get_dataset_fn,
batch_size: int,
num_discrim_batches: int,
reward_opt: DictConfig,
discrim_opt: DictConfig,
reward_update_freq: int,
importance_sampling: bool,
div_type: str,
device,
policy,
num_envs,
**kwargs,
):
super().__init__(
discriminator,
policy_updater,
get_dataset_fn,
batch_size,
num_discrim_batches,
discrim_opt,
reward_update_freq,
device,
policy,
num_envs,
)
self.reward = instantiate(reward).to(device)
self._div_type = div_type
self._importance_sampling = importance_sampling
self.reward_opt = instantiate(reward_opt, params=self.reward.parameters())
def state_dict(self, **kwargs):
return {
**super().state_dict(**kwargs),
"reward_opt": self.reward_opt.state_dict(),
}
def load_state_dict(self, state_dict, should_load_opt):
opt_state = state_dict.pop("reward_opt")
if should_load_opt:
self.reward_opt.load_state_dict(opt_state)
return super().load_state_dict(state_dict, should_load_opt)
def viz_reward(self, cur_obs=None, action=None, next_obs=None) -> torch.Tensor:
# Intentional to assign cur obs to next obs so we show reward for that state.
return self.reward(next_obs=cur_obs)
def _update_reward(self, policy, rollouts, logger):
agent_data = self._get_agent_samples(rollouts)
for expert_batch, agent_batch in zip(self.expert_data, agent_data):
# Combine experience from both.
with torch.no_grad():
obs = torch.cat(
[
expert_batch["next_observations"],
agent_batch["next_obs"],
],
0,
)
actions = torch.cat([expert_batch["actions"], agent_batch["action"]], 0)
logits = self.discriminator(cur_obs=obs)
# JS
if self._div_type == "fkl":
t1 = torch.exp(logits)
elif self._div_type == "rkl":
t1 = logits
elif self._div_type == "js":
t1 = F.softplus(logits)
else:
raise ValueError()
t1 = -t1
t2 = self.reward(next_obs=obs)
if self._importance_sampling:
with torch.no_grad():
traj_reward = t2.detach().clone()
traj_log_prob = policy.evaluate_actions(obs, None, None, actions)[
"log_prob"
]
IS_ratio = F.softmax(traj_reward - traj_log_prob, dim=0)
loss = (IS_ratio * t1 * t2).mean() - (
(IS_ratio * t1).mean() * (IS_ratio * t2).mean()
)
else:
loss = (t1 * t2).mean() - (t1.mean() * t2.mean())
self.reward_opt.zero_grad()
loss.backward()
self.reward_opt.step()
logger.collect_info("reward_loss", loss.item())
def update(self, policy, rollouts, logger, **kwargs):
if (
self.reward_update_freq != -1
and self._n_updates % self.reward_update_freq == 0
):
self._update_discriminator(policy, rollouts, logger)
self._update_reward(policy, rollouts, logger)
obs, actions, next_obs, masks = extract_transition_batch(rollouts)
with torch.no_grad():
rollouts.rewards = self.reward(next_obs=next_obs)
self._ep_rewards = log_finished_rewards(rollouts, self._ep_rewards, logger)
self.policy_updater.update(policy, rollouts, logger)
self._n_updates += 1
| bc-irl-main | imitation_learning/f_irl/updater.py |
bc-irl-main | imitation_learning/config/logger/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import torch.nn as nn
from hydra.utils import call, instantiate
from omegaconf import DictConfig
from rl_utils.common import DictDataset, make_mlp_layers
from torch.utils.data import DataLoader
from imitation_learning.common.utils import (create_next_obs,
extract_transition_batch,
log_finished_rewards)
class MaxEntIRL(nn.Module):
def __init__(
self,
reward: DictConfig,
reward_opt: DictConfig,
get_dataset_fn,
batch_size: int,
num_cost_epochs: int,
device,
policy_updater: DictConfig,
should_update_reward: bool,
policy,
num_envs,
grid_lower=-1.5,
grid_upper=1.5,
grid_density=310,
**kwargs
):
super().__init__()
self.reward = instantiate(reward).to(device)
self.policy_updater = instantiate(policy_updater, policy=policy)
self.demo_obs = call(get_dataset_fn)["next_observations"]
self.reward_opt = instantiate(reward_opt, params=self.reward.parameters())
self.num_cost_epochs = num_cost_epochs
self._ep_rewards = torch.zeros(num_envs, device=device)
self.should_update_reward = should_update_reward
# instead of policy samples we use a grid - which we can do in this low dimensional toy example,
# this will give us "exact" max ent
self.grid_samples = (
torch.stack(
torch.meshgrid(
torch.linspace(grid_lower, grid_upper, grid_density),
torch.linspace(grid_lower, grid_upper, grid_density),
indexing="ij",
),
-1,
)
.to(device)
.view(-1, 2)
)
def state_dict(self, **kwargs):
return {
**super().state_dict(**kwargs),
"reward_opt": self.reward_opt.state_dict(),
}
def load_state_dict(self, state_dict, should_load_opt):
opt_state = state_dict.pop("reward_opt")
if should_load_opt:
self.reward_opt.load_state_dict(opt_state)
return super().load_state_dict(state_dict)
def viz_reward(self, cur_obs=None, action=None, next_obs=None) -> torch.Tensor:
return self.reward(next_obs=next_obs)
def update(self, policy, rollouts, logger, **kwargs):
if self.should_update_reward:
for _ in range(self.num_cost_epochs):
reward_samples = self.reward(next_obs=self.grid_samples)
# we sample a demo
reward_demos = self.reward(next_obs=self.demo_obs)
# optimize reward
loss_ME = -(
torch.mean(reward_demos)
- (
torch.logsumexp(reward_samples, dim=0)
- np.log(len(reward_samples))
)
)
self.reward_opt.zero_grad()
loss_ME.backward()
self.reward_opt.step()
logger.collect_info("irl_loss", loss_ME.item())
else:
with torch.no_grad():
_, _, next_obs, _ = extract_transition_batch(rollouts)
rollouts.rewards = self.reward(next_obs=next_obs)
self._ep_rewards = log_finished_rewards(
rollouts, self._ep_rewards, logger
)
self.policy_updater.update(policy, rollouts, logger)
| bc-irl-main | imitation_learning/maxent/updater.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Tuple
import torch
import torch.nn as nn
from rl_utils.common import make_mlp_layers
class AirlDiscriminator(nn.Module):
def __init__(
self,
obs_shape: Tuple[int],
action_dim: int,
reward_hidden_dim: int,
cost_take_dim: int,
n_hidden_layers: int,
use_shaped_reward: bool,
gamma: float,
airl_reward_bonus: float,
):
super().__init__()
self.cost_take_dim = cost_take_dim
state_size = obs_shape[0] if cost_take_dim == -1 else cost_take_dim
self.g = nn.Sequential(
*make_mlp_layers(state_size, 1, reward_hidden_dim, n_hidden_layers)
)
self.h = nn.Sequential(
*make_mlp_layers(state_size, 1, reward_hidden_dim, n_hidden_layers)
)
self.use_shaped_reward = use_shaped_reward
self.gamma = gamma
self.airl_reward_bonus = airl_reward_bonus
def f(self, cur_obs, next_obs, masks, force_no_shaped=False, **kwargs):
rs = self.g(cur_obs)
if self.use_shaped_reward and not force_no_shaped:
vs = self.h(cur_obs)
next_vs = self.h(next_obs)
return rs + (self.gamma * masks * next_vs) - vs
else:
return rs
def forward(self, cur_obs, next_obs, actions, masks, policy, **kwargs):
log_p = self.f(cur_obs, next_obs, masks)
with torch.no_grad():
log_q = policy.evaluate_actions(cur_obs, {}, masks, actions)["log_prob"]
return log_p - log_q
def get_reward(
self,
cur_obs,
next_obs,
masks=None,
actions=None,
policy=None,
viz_reward=False,
**kwargs
):
log_p = self.f(cur_obs, next_obs, masks=masks, force_no_shaped=viz_reward)
if viz_reward:
return log_p
with torch.no_grad():
log_q = policy.evaluate_actions(cur_obs, {}, masks, actions)["log_prob"]
logits = log_p - (self.airl_reward_bonus * log_q)
s = torch.sigmoid(logits)
eps = 1e-20
return (s + eps).log() - (1 - s + eps).log()
| bc-irl-main | imitation_learning/airl/discriminator.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from hydra.utils import call, instantiate
from omegaconf import DictConfig
from rl_utils.common import DictDataset, make_mlp_layers
from torch.utils.data import DataLoader
from imitation_learning.common.utils import (create_next_obs,
extract_transition_batch,
log_finished_rewards)
class GCL(nn.Module):
def __init__(
self,
reward: DictConfig,
reward_opt: DictConfig,
get_dataset_fn,
batch_size: int,
device,
policy_updater: DictConfig,
should_update_reward: bool,
policy,
num_envs,
**kwargs
):
super().__init__()
self.reward = instantiate(reward).to(device)
self.policy_updater = instantiate(policy_updater, policy=policy)
self.batch_size = batch_size
self.dataset = call(get_dataset_fn)
self.expert_data = DataLoader(self.dataset, batch_size, shuffle=True)
self.reward_opt = instantiate(reward_opt, params=self.reward.parameters())
self._ep_rewards = torch.zeros(num_envs, device=device)
self.should_update_reward = should_update_reward
def state_dict(self, **kwargs):
return {
**super().state_dict(**kwargs),
"reward_opt": self.reward_opt.state_dict(),
}
def load_state_dict(self, state_dict, should_load_opt):
opt_state = state_dict.pop("reward_opt")
if should_load_opt:
self.reward_opt.load_state_dict(opt_state)
return super().load_state_dict(state_dict)
def viz_reward(self, cur_obs=None, action=None, next_obs=None) -> torch.Tensor:
return self.reward(next_obs=next_obs)
def update(self, policy, rollouts, logger, **kwargs):
if self.should_update_reward:
obs, actions, next_obs, masks = extract_transition_batch(rollouts)
reward_samples = []
num_batches = len(rollouts) // self.batch_size
agent_data = rollouts.data_generator(num_batches, get_next_obs=True)
for expert_batch, agent_batch in zip(self.expert_data, agent_data):
ac_eval = policy.evaluate_actions(
agent_batch["obs"],
agent_batch["hxs"],
agent_batch["mask"],
agent_batch["action"],
)
reward_demos = self.reward(next_obs=expert_batch["next_observations"])
reward_samples = self.reward(next_obs=agent_batch["next_obs"])
loss_IOC = -(
torch.mean(reward_demos)
- (
torch.logsumexp(
reward_samples - ac_eval["log_prob"],
dim=0,
keepdim=True,
)
- torch.log(torch.Tensor([len(reward_samples)]))
)
)
self.reward_opt.zero_grad()
loss_IOC.backward()
self.reward_opt.step()
logger.collect_info("irl_loss", loss_IOC.item())
with torch.no_grad():
_, _, next_obs, _ = extract_transition_batch(rollouts)
rollouts.rewards = self.reward(next_obs=next_obs)
self._ep_rewards = log_finished_rewards(rollouts, self._ep_rewards, logger)
self.policy_updater.update(policy, rollouts, logger)
| bc-irl-main | imitation_learning/gcl/updater.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os.path as osp
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import seaborn as sns
import torch
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.colors import LogNorm
from matplotlib.patches import Rectangle
from mpl_toolkits.axes_grid1 import make_axes_locatable
from rl_utils.common import Evaluator, group_trajectories
from rl_utils.envs.pointmass import PointMassObstacleEnv
class PMDistReward:
def __init__(self, slack, **kwargs):
self.slack = slack
def __call__(self, cur_pos, prev_pos, action):
cur_dist = torch.linalg.norm(cur_pos, dim=-1)
prev_dist = torch.linalg.norm(prev_pos, dim=-1)
return ((prev_dist - cur_dist) - self.slack).view(-1, 1)
class PMDistActionPenReward:
def __init__(self, slack, action_pen, **kwargs):
self.slack = slack
self.action_pen = action_pen
def __call__(self, cur_pos, prev_pos, action):
cur_dist = torch.linalg.norm(cur_pos, dim=-1)
prev_dist = torch.linalg.norm(prev_pos, dim=-1)
return (
(prev_dist - cur_dist)
- self.slack
- (self.action_pen * torch.linalg.norm(action, dim=-1))
).view(-1, 1)
class PMSparseReward:
def __init__(self, succ_dist, **kwargs):
self._succ_dist = succ_dist
def __call__(self, cur_pos, prev_pos, action):
cur_dist = torch.linalg.norm(cur_pos, dim=-1)
reward = torch.full(cur_dist.shape, -0.1)
reward[cur_dist < self._succ_dist] = 1.0
return reward.view(-1, 1)
class PMSparseDenseReward:
def __init__(self, reward_thresh, slack, **kwargs):
self._reward_thresh = reward_thresh
self._slack = slack
def __call__(self, cur_pos, prev_pos, action):
cur_dist = torch.linalg.norm(cur_pos, dim=-1)
reward = torch.full(cur_dist.shape, -self._slack)
assign = -self._slack * cur_dist
should_give_reward = cur_dist < self._reward_thresh
reward[should_give_reward] = assign[should_give_reward]
return reward.view(-1, 1)
def viz_trajs(trajs: torch.Tensor, plt_lim, agent_point_size, fig, ax, with_arrows):
pal = sns.color_palette("rocket", as_cmap=True)
traj_len = trajs.size(1)
assert len(trajs.shape) == 3
assert trajs.shape[-1] == 2
scatter_len = 0.1
for i in range(trajs.size(1)):
ax.scatter(
trajs[:, i, 0],
trajs[:, i, 1],
color=[pal((i + 1) / traj_len) for _ in range(trajs.size(0))],
s=180,
# s=agent_point_size,
cmap=pal,
)
if with_arrows:
for i in range(trajs.size(0)):
traj_x = trajs[i, :, 0]
traj_y = trajs[i, :, 1]
for t in range(trajs.size(1) - 1):
offset = np.array(
[traj_x[t + 1] - traj_x[t], traj_y[t + 1] - traj_y[t]]
)
offset_dist = np.linalg.norm(offset)
point_offset = offset * (scatter_len / offset_dist)
if offset_dist < 0.05:
continue
ax.arrow(
x=traj_x[t] + point_offset[0],
y=traj_y[t] + point_offset[1],
dx=offset[0] - (2 * point_offset[0]),
dy=offset[1] - (2 * point_offset[1]),
length_includes_head=True,
width=0.04,
head_length=0.05,
# color=np.array([236, 240, 241, 200]) / 255.0,
color=np.array([44, 62, 80]) / 255.0,
# color=np.array([0, 0, 0, 255]) / 255.0,
)
ax.set_xlim(-plt_lim, plt_lim)
ax.set_ylim(-plt_lim, plt_lim)
def plot_obstacles(obstacle_transform, obstacle_len, obstacle_width):
points = torch.tensor(
[
[-obstacle_len, -obstacle_width, 1],
[-obstacle_len, obstacle_width, 1],
[obstacle_len, -obstacle_width, 1],
[obstacle_len, obstacle_width, 1],
]
)
obstacle_points = obstacle_transform @ points
class PointMassVisualizer(Evaluator):
def __init__(
self,
envs,
rnn_hxs_dim,
num_render,
vid_dir,
fps,
save_traj_name,
updater,
agent_point_size,
plt_lim,
num_demo_plot,
plt_density,
plot_il,
plot_expert: bool,
logger,
device,
with_arrows: bool = False,
is_final_render: bool = False,
**kwargs,
):
super().__init__(envs, rnn_hxs_dim, num_render, vid_dir, fps, save_traj_name)
self._agent_point_size = agent_point_size
self._plt_lim = plt_lim
self._plt_density = plt_density
self._plot_il = plot_il
self.logger = logger
self.device = device
self.is_final_render = is_final_render
self.with_arrows = with_arrows
if plot_il and plot_expert:
dones = updater.dataset.get_data("terminals")
grouped_trajs = group_trajectories(dones, **updater.dataset.all_data)
obs_trajs = (
torch.stack([traj["observations"] for traj in grouped_trajs], dim=0)
.detach()
.cpu()
)
add_str = ""
if num_demo_plot < obs_trajs.size(0):
plot_idxs = torch.randint(high=len(obs_trajs), size=(num_demo_plot,))
obs_trajs = obs_trajs[plot_idxs]
add_str = f" (Subsampled {num_demo_plot})"
fig, ax = plt.subplots(figsize=(4, 4))
viz_trajs(
obs_trajs,
plt_lim,
agent_point_size,
fig,
ax,
self.is_final_render,
)
self.plot_obstacle(ax)
ax.set_title(f"Expert Demos{add_str}")
self.save("demos", fig)
plt.clf()
self._updater = updater
def save(self, name, fig):
if self.is_final_render:
full_path = osp.join(self._vid_dir, f"{name}.pdf")
print(f"Saved to {full_path}")
fig.savefig(full_path, bbox_inches="tight", dpi=100)
else:
full_path = osp.join(self._vid_dir, f"{name}.png")
fig.savefig(full_path)
return full_path
def plot_obstacle(self, ax):
if isinstance(self._envs, PointMassObstacleEnv):
for obs, (obs_T, w, l) in zip(
self._envs._params.square_obstacles, self._envs._square_obs_T
):
origin = torch.tensor([-(w / 2), -(l / 2), 1])
origin = obs_T @ origin
color = "orangered"
rect = Rectangle(
origin[:2],
w,
l,
angle=obs.rot_deg,
linewidth=2,
edgecolor=color,
facecolor=color,
)
ax.add_patch(rect)
def plot_reward(self, reward_fn, fig, ax):
with torch.no_grad():
coords = torch.stack(
torch.meshgrid(
torch.linspace(-self._plt_lim, self._plt_lim, self._plt_density),
torch.linspace(-self._plt_lim, self._plt_lim, self._plt_density),
indexing="ij",
),
-1,
).to(self.device)
reward_vals = reward_fn(cur_obs=coords, next_obs=coords).cpu()
max_reward_idx = reward_vals.argmax()
flat_coords = coords.view(-1, 2)
# print("Reward Max XY Coordinate", flat_coords[max_reward_idx])
im_fig = ax.imshow(
reward_vals,
extent=[
-self._plt_lim,
self._plt_lim,
-self._plt_lim,
self._plt_lim,
],
origin="lower",
)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
self.plot_obstacle(ax)
def fmt(x, pos):
return str(x).ljust(5)
fig.colorbar(
im_fig,
cax=cax,
orientation="vertical",
format=ticker.FuncFormatter(fmt),
)
def evaluate(self, policy, num_episodes, eval_i):
fig, ax = plt.subplots(figsize=(4, 4))
if self.is_final_render:
ax.axis("off")
if self._plot_il:
self.plot_reward(
self._updater.viz_reward,
fig,
ax,
)
self.save(f"reward_{eval_i}", fig)
# Intentionally don't clear plot so the evaluation rollouts are
# overlaid on reward.
eval_result = super().evaluate(policy, num_episodes, eval_i)
if len(self.eval_trajs_dones):
grouped_trajs = group_trajectories(
torch.stack(self.eval_trajs_dones, dim=0),
obs=torch.stack(self.eval_trajs_obs, dim=0),
)
obs_trajs = (
torch.stack([traj["obs"] for traj in grouped_trajs], dim=0)
.detach()
.cpu()
)
viz_trajs(
obs_trajs,
self._plt_lim,
self._agent_point_size,
fig,
ax,
self.with_arrows,
)
if not self._plot_il:
self.plot_obstacle(ax)
if not self.is_final_render:
ax.set_title(
f"Evaluation Rollouts (Update {eval_i}, Dist {eval_result['dist_to_goal']:.4f}) "
)
eval_rollouts_path = self.save(f"eval_rollouts_{eval_i}", fig)
plt.clf()
if not self.is_final_render:
self.logger.collect_img("reward", eval_rollouts_path)
return eval_result
| bc-irl-main | imitation_learning/common/pointmass_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os.path as osp
import matplotlib.pyplot as plt
import numpy as np
def plot_actions(pred_actions, gt_actions, n_updates, save_dir):
assert pred_actions.shape == gt_actions.shape
action_len, action_dim = pred_actions.shape
for action_dim_i in range(action_dim):
plt.scatter(
np.arange(action_len), pred_actions[:, action_dim_i], label="Predicted"
)
plt.scatter(np.arange(action_len), gt_actions[:, action_dim_i], label="Expert")
plt.legend()
plt.title(f"Action Update Batch Dim {action_dim_i} @ {n_updates}")
plt.savefig(osp.join(save_dir, f"actions_{n_updates}_{action_dim_i}.png"))
plt.clf()
| bc-irl-main | imitation_learning/common/plotting.py |
from enum import Enum, auto
import torch
import torch.nn as nn
from rl_utils.common import make_mlp_layers
class RewardInputType(Enum):
ACTION = auto()
NEXT_STATE = auto()
CUR_NEXT_STATE = auto()
class NeuralReward(nn.Module):
def __init__(
self,
obs_shape,
action_dim,
reward_hidden_dim,
n_hidden_layers,
cost_take_dim=-1,
include_tanh=False,
reward_type=None,
clamp_max=None,
):
super().__init__()
if reward_type is None:
self.reward_type = RewardInputType.NEXT_STATE
else:
self.reward_type = RewardInputType[reward_type]
self.cost_take_dim = cost_take_dim
obs_size = obs_shape[0] if cost_take_dim == -1 else cost_take_dim
if self.reward_type == RewardInputType.ACTION:
input_size = obs_size + action_dim
elif self.reward_type == RewardInputType.NEXT_STATE:
input_size = obs_size
elif self.reward_type == RewardInputType.CUR_NEXT_STATE:
input_size = obs_size + obs_size
net_layers = make_mlp_layers(input_size, 1, reward_hidden_dim, n_hidden_layers)
if include_tanh:
net_layers.append(nn.Tanh())
self.net = nn.Sequential(*net_layers)
self.clamp_max = clamp_max
def forward(self, cur_obs=None, actions=None, next_obs=None):
if self.cost_take_dim != -1:
if cur_obs is not None:
cur_obs = cur_obs[:, :, : self.cost_take_dim]
if next_obs is not None:
next_obs = next_obs[:, :, : self.cost_take_dim]
if self.reward_type == RewardInputType.ACTION:
inputs = [cur_obs, actions]
elif self.reward_type == RewardInputType.NEXT_STATE:
inputs = [next_obs]
elif self.reward_type == RewardInputType.CUR_NEXT_STATE:
inputs = [cur_obs, next_obs]
else:
raise ValueError()
inputs = torch.cat(inputs, dim=-1)
ret = self.net(inputs)
if self.clamp_max is not None:
ret = torch.clamp(ret, min=-self.clamp_max, max=self.clamp_max)
return ret
| bc-irl-main | imitation_learning/common/net.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| bc-irl-main | imitation_learning/common/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, Tuple
import torch
from rl_utils.common import DictDataset
def log_finished_rewards(
rollouts,
rolling_ep_rewards: torch.Tensor,
logger,
) -> torch.Tensor:
"""
:param rolling_ep_rewards: tensor of shape (num_envs,)
"""
num_steps, num_envs = rollouts.rewards.shape[:2]
done_episodes_rewards = []
for env_i in range(num_envs):
for step_i in range(num_steps):
rolling_ep_rewards[env_i] += rollouts.rewards[step_i, env_i].item()
if rollouts.masks[step_i + 1, env_i].item() == 0.0:
done_episodes_rewards.append(rolling_ep_rewards[env_i].item())
rolling_ep_rewards[env_i] = 0
logger.collect_info_list("inferred_episode_reward", done_episodes_rewards)
return rolling_ep_rewards
def extract_transition_batch(rollouts):
obs = next(iter(rollouts.obs.values()))
cur_obs = obs[:-1]
masks = rollouts.masks[1:]
next_obs = (masks * obs[1:]) + ((1 - masks) * rollouts.final_obs)
actions = rollouts.actions
return cur_obs, actions, next_obs, masks
def create_next_obs(dataset: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
obs = dataset["observations"].detach()
final_final_obs = dataset["infos"][-1]["final_obs"]
next_obs = torch.cat([obs[1:], final_final_obs.unsqueeze(0)], 0)
num_eps = 1
for i in range(obs.shape[0] - 1):
cur_info = dataset["infos"][i]
if "final_obs" in cur_info:
num_eps += 1
next_obs[i] = cur_info["final_obs"].detach()
if num_eps != dataset["terminals"].sum():
raise ValueError(
f"Inconsistency in # of episodes {num_eps} vs {dataset['terminals'].sum()}"
)
dataset["next_observations"] = next_obs.detach()
return dataset
def get_dataset_data(dataset_path: str, env_name: str):
return create_next_obs(torch.load(dataset_path))
def get_transition_dataset(dataset_path: str, env_name: str):
dataset = get_dataset_data(dataset_path, env_name)
return DictDataset(
dataset,
[
"observations",
"actions",
"rewards",
"terminals",
"next_observations",
],
)
| bc-irl-main | imitation_learning/common/utils.py |
from setuptools import setup, find_packages
setup(
name='treedlib',
version='0.1.3',
description='Library of tree features.',
packages=find_packages(),
install_requires=[
'lxml',
],
classifiers=[
"License :: OSI Approved :: MIT License",
],
url='https://github.com/HazyResearch/treedlib',
author='Hazy Research',
)
| treedlib-master | setup.py |
from feature_template import *
from itertools import chain
def get_mention_templates(cid, d):
"""
Generate the DDLib mention features as per
http://deepdive.stanford.edu/doc/basics/gen_feats.html
"""
return [
# The set of POS tags comprising the mention
Indicator(Mention(cid), 'pos'),
# The set of NER tags comprising the mention
Indicator(Mention(cid), 'ner'),
# The set of lemmas comprising the mention
Indicator(Mention(cid), 'lemma'),
# The set of words comprising the mention
Indicator(Mention(cid), 'word'),
# TODO: Sum of the lengths of the words comprising the mention
# Whether the first word in the mention starts with a capital letter
RgxIndicator('^[A-Z].*$', 'word', 'STARTS_WITH_CAPITAL', Mention(cid)),
# The lemma n-grams in a window of size 3 *of siblings* to the left and right of the mention
# *Note* that this differs from ddlib in only considering sibling nodes
# (which is the same if the tree is just a chain in original sequence order...)
Indicator(Left(Mention(cid)), 'lemma'),
Indicator(Right(Mention(cid)), 'lemma'),
# Indicator feature of whether a keyword is part of the mention
Indicator(Keyword(d, Mention(cid)), 'word'),
# Indicator features of whether keywords appear in sentence
Indicator(Keyword(d), 'word'),
# Shortest dependency path between mention and keyword
Indicator(Between(Mention(cid), Keyword(d)), 'lemma'),
Indicator(Between(Mention(cid), Keyword(d)), 'dep_label')]
def get_mention_features(cid, d, root):
return chain.from_iterable(t.apply(root) for t in get_mention_templates(cid, d))
def get_relation_templates(cid1, cid2, d):
"""
Generate the DDLib relation features as per
http://deepdive.stanford.edu/doc/basics/gen_feats.html
"""
return [
# The set of POS tags comprising the mention
Indicator(Between(Mention(cid1), Mention(cid2)), 'pos'),
# The set of NER tags comprising the mention
Indicator(Between(Mention(cid1), Mention(cid2)), 'ner'),
# The set of lemmas comprising the mention
Indicator(Between(Mention(cid1), Mention(cid2)), 'lemma'),
# The set of words comprising the mention
Indicator(Between(Mention(cid1), Mention(cid2)), 'word'),
# TODO: Sum of the lengths of the words comprising the mentions
# Whether the first word in the mentions starts with a capital letter
RgxIndicator('^[A-Z].*$', 'word', 'STARTS_WITH_CAPITAL_1', Mention(cid1)),
RgxIndicator('^[A-Z].*$', 'word', 'STARTS_WITH_CAPITAL_2', Mention(cid2)),
# The n-grams of up to size 3 of the lemmas and ners of the nodes in between the mentions
Indicator(Between(Mention(cid1), Mention(cid2), 3), 'lemma'),
Indicator(Between(Mention(cid1), Mention(cid2), 3), 'ner'),
# The lemma and ner n-grams in a window of size 3 *of siblings* to the left and right
# of the mention
# *Note* that this differs from ddlib in only considering sibling nodes
# (which is the same if the tree is just a chain in original sequence order...)
Indicator(Left(Mention(cid1)), 'lemma'),
Indicator(Left(Mention(cid2)), 'lemma'),
Indicator(Right(Mention(cid1)), 'lemma'),
Indicator(Right(Mention(cid2)), 'lemma'),
Indicator(Left(Mention(cid1)), 'ner'),
Indicator(Left(Mention(cid2)), 'ner'),
Indicator(Right(Mention(cid1)), 'ner'),
Indicator(Right(Mention(cid2)), 'ner'),
# Indicator feature of whether a keyword is part of the mention
Indicator(Keyword(d, Mention(cid1)), 'word'),
Indicator(Keyword(d, Mention(cid2)), 'word'),
# Indicator features of whether keywords appear in sentence
Indicator(Keyword(d), 'word'),
# Shortest dependency path between mention and keyword
Indicator(Between(Mention(cid1), Keyword(d)), 'lemma'),
Indicator(Between(Mention(cid1), Keyword(d)), 'dep_label'),
Indicator(Between(Mention(cid2), Keyword(d)), 'lemma'),
Indicator(Between(Mention(cid2), Keyword(d)), 'dep_label')]
def get_relation_features(cid1, cid2, d, root):
return chain.from_iterable(t.apply(root) for t in get_relation_templates(cid1, cid2, d))
| treedlib-master | archive/basic_features.py |
from itertools import chain
import re
# op_0 : X -> p(T)
# op_1 : p(T) -> p(T)
# ind : p(T) -> {0,1}^F
class FeatureTemplate:
"""Base feature template class"""
def __init__(self):
self.label = None
self.xpaths = set(['//node'])
self.subsets = None # subtrees / p(T)
def apply(self, root):
"""
Applies the feature template to the XML tree (the root of which is provided as input)
Generates string feature representations
Relies on a generator subfunction, _get_result_sets, which returns result sets of nodes,
then optionally gets subsets (e.g. for n-grams)
"""
for res in self._get_result_sets(root):
if len(res) > 0:
res_sets = [res] if self.subsets is None else subsets(res, self.subsets)
for res_set in res_sets:
yield self._feat_str(res_set)
def _get_result_sets(self, root):
"""Default result set generator- just applies each xpath to the root node"""
for xpath in self.xpaths:
yield root.xpath(xpath)
def _feat_str(self, res_set):
return '%s[%s]' % (self.label, '_'.join(map(str, res_set)))
def apply_and_print(self, root):
"""Helper function to apply and then print the features one per line"""
for f in self.apply(root):
print(f)
def __repr__(self):
return "<%s, XPaths='%s', subsets=%s>" % (self.label, self.xpaths, self.subsets)
def subsets(x, L):
"""Return all subsets of length 1, 2, ..., min(l, len(x)) from x"""
return chain.from_iterable([x[s:s+l+1] for s in range(len(x)-l)] for l in range(min(len(x),L)))
class Mention(FeatureTemplate):
"""The feature comprising the set of nodes making up the mention"""
def __init__(self, cid, subsets=None):
self.label = 'MENTION'
self.xpaths = set(["//node[@cid='%s']" % str(cid)])
self.subsets = subsets
class Indicator(FeatureTemplate):
"""
Outermost indicator feature, which just maps a specific attribute onto the inputted
feature template and uses that feature template's apply function
"""
def __init__(self, f, attrib):
self.f = f
self.label = '%s-%s' % (attrib.upper(), f.label)
self.xpaths = set('%s/@%s' % (xpath, attrib) for xpath in f.xpaths)
self.subsets = f.subsets
def apply(self, root):
self.f.label = self.label
self.f.xpaths = self.xpaths
return self.f.apply(root)
class Left(FeatureTemplate):
"""
The feature comprising the set of *sibling* nodes to the left of the input feature's nodes
"""
def __init__(self, f, subsets=3):
self.label = 'LEFT-OF-%s' % f.label
self.xpaths = set(xpath + '/preceding-sibling::node' for xpath in f.xpaths)
self.subsets = subsets
def _get_result_sets(self, root):
"""Only take the N nodes to the left, where N is = self.subsets"""
for xpath in self.xpaths:
yield root.xpath(xpath)[::-1][:self.subsets][::-1]
class Right(FeatureTemplate):
"""
The feature comprising the set of *sibling* nodes to the right of the input feature's nodes
"""
def __init__(self, f, subsets=3):
self.label = 'RIGHT-OF-%s' % f.label
self.xpaths = set(xpath + '/following-sibling::node' for xpath in f.xpaths)
self.subsets = subsets
def _get_result_sets(self, root):
"""Only take the N nodes to the right, where N is = self.subsets"""
for xpath in self.xpaths:
yield root.xpath(xpath)[:self.subsets]
class Between(FeatureTemplate):
"""
The set of nodes *between* two node sets
"""
def __init__(self, f1, f2, subsets=None):
self.label = 'BETWEEN-%s-%s' % (f1.label, f2.label)
self.xpaths = set(['/ancestor-or-self::node'])
self.xpaths1 = f1.xpaths
self.xpaths2 = f2.xpaths
self.subsets = subsets
def _get_result_sets(self, root):
"""
Get the path between the two node sets by getting the lowest shared parent,
then concatenating the two ancestor paths at this shared parent
"""
for xpath in self.xpaths:
for xpath1 in self.xpaths1:
for xpath2 in self.xpaths2:
p1 = root.xpath(xpath1 + xpath)
p2 = root.xpath(xpath2 + xpath)
shared = set(p1).intersection(p2)
b1 = []
b2 = []
for node in reversed(p1):
b1.append(node)
if node in shared: break
for node in reversed(p2):
if node in shared: break
b2.append(node)
# Return only the path *between*, filtering out the self nodes
# NOTE: This is different for node vs. edge attributes...
res = b1 + b2[::-1]
if 'dep_label' in xpath:
yield res
else:
yield list(filter(lambda n : (len(p1)==0 or n!=p1[-1]) and (len(p2)==0 or n!=p2[-1]), res))
# TODO: Only handles single-word keywords right now!!!
class Keyword(FeatureTemplate):
"""Searches for matches against a dictionary (list) of keywords"""
def __init__(self, kws, f=None):
self.label = 'KEYWORD' if f is None else 'KEYWORD-IN-%s' % f.label
xs = ['//node'] if f is None else f.xpaths
self.xpaths = set(chain.from_iterable(["%s[@word='%s']" % (x,w) for w in kws] for x in xs))
self.subsets = None
# Should these classes be semantically separated, into NodeSet and FeatureIndicator,
# Where some composition of FeatureIndicator classes are applied to some composition of
# NodeSet classes...?
# TODO: Handle multi-word properly!! Or just scrap this, do rgx in python...
class RgxIndicator(FeatureTemplate):
"""Indicator of whether a regex matches the node set"""
def __init__(self, rgx, attrib, label, f):
self.label = label
self.xpaths = set("%s/@%s[re:test(., '%s')]" % (xpath, attrib, rgx) for xpath in f.xpaths)
self.subsets = None
def apply(self, root):
for xpath in self.xpaths:
if len(root.xpath(xpath, namespaces={'re':"http://exslt.org/regular-expressions"})) > 0:
yield self.label.upper()
| treedlib-master | archive/feature_template.py |
from collections import namedtuple
import re
import sys
def print_gen(gen):
"""Print the results of a generator one-per-line"""
for e in gen:
print(e)
def print_error(err_string):
"""Function to write to stderr"""
sys.stderr.write("ERROR[UDF]: " + str(err_string) + "\n")
BOOL_PARSER = {
't' : True,
'f' : False,
'NULL' : None,
'\\N' : None
}
TYPE_PARSERS = {
'text' : lambda x : str(x.replace('\n', ' ')),
'int' : lambda x : int(x.strip()),
'float' : lambda x : float(x.strip()),
'boolean' : lambda x : BOOL_PARSER(x.lower().strip())
}
def parse_ptsv_element(s, t, sep='|^|', sep2='|~|'):
"""
Parse an element in psql-compatible tsv format, i.e. {-format arrays
based on provided type and type-parser dictionary
"""
# Interpret null first regardless of type
if len(s) == 0 or s == '\\N':
return None
# Handle lists recursively first
elif '[]' in t:
if re.search(r'^\{|\}$', s):
split = re.split(r'\"?\s*,\s*\"?', re.sub(r'^\{\s*\"?|\"?\s*\}$', '', s))
else:
split = s.split(sep)
return [parse_ptsv_element(ss, t[:-2], sep=sep2) for ss in split]
# Else parse using parser
else:
try:
parser = TYPE_PARSERS[t]
except KeyError:
raise Exception("Unsupported type: %s" % t)
return parser(s)
class Row:
def __str__(self):
return '<Row(' + ', '.join("%s=%s" % x for x in self.__dict__.items()) + ')>'
def __repr__(self):
return str(self)
def _asdict(self):
return self.__dict__
class PTSVParser:
"""
Initialized with a list of duples (field_name, field_type)
Is a factory for simple Row class
Parsed from Postgres-style TSV input lines
"""
def __init__(self, fields):
self.fields = fields
self.n = len(fields)
def parse_line(self, line):
row = Row()
attribs = line.rstrip().split('\t')
if len(attribs) != self.n:
raise ValueError("%s attributes for %s fields:\n%s" % (len(attribs), self.n, line))
for i,attrib in enumerate(attribs):
field_name, field_type = self.fields[i]
setattr(row, field_name, parse_ptsv_element(attrib, field_type))
return row
def parse_stdin(self):
for line in sys.stdin:
yield self.parse_line(line)
def pg_array_escape(tok):
"""
Escape a string that's meant to be in a Postgres array.
We double-quote the string and escape backslashes and double-quotes.
"""
return '"%s"' % str(tok).replace('\\', '\\\\').replace('"', '\\\\"')
def list_to_pg_array(l):
"""Convert a list to a string that PostgreSQL's COPY FROM understands."""
return '{%s}' % ','.join(pg_array_escape(x) for x in l)
def print_tsv(out_record):
"""Print a tuple as output of TSV extractor."""
values = []
for x in out_record:
if isinstance(x, list) or isinstance(x, tuple):
cur_val = list_to_pg_array(x)
elif x is None:
cur_val = r'\N'
else:
cur_val = x
values.append(cur_val)
print('\t'.join(str(x) for x in values))
| treedlib-master | treedlib/util.py |
import os
# Set TREEDLIB_APP env var, for use in libs we load
os.environ["TREEDLIB_LIB"] = os.path.dirname(os.path.realpath(__file__))
# Load treedlib libs
from treedlib.util import *
from treedlib.structs import *
from treedlib.templates import *
from treedlib.features import *
| treedlib-master | treedlib/__init__.py |
from treedlib.templates import *
import lxml.etree as et
def compile_relation_feature_generator(dictionaries=None, opts={}, is_multary=False):
"""
Given optional arguments, returns a generator function which accepts an xml root
and two lists of mention indexes, and will generate relation features for this relation
Optional args are:
* dictionaries: should be a dictionary of lists of phrases, where the key is the dict name
* opts: see defaults above
* is_multary: whether to use multiple mentions or binary mentions
"""
# TODO: put globals into opts
#BASIC_ATTRIBS_REL = ['word', 'lemma', 'pos', 'ner', 'dep_label']
BASIC_ATTRIBS_REL = ['lemma', 'dep_label']
m0 = Mention(0)
m1 = Mention(1)
btwn = Between(m0, m1)
dl = LengthBin(btwn, [3,4,6])
sl = LengthBin(SeqBetween(), [5,8,14])
# Basic relation feature templates
templates = [
# The full dependency path between
#[Indicator(btwn, a) for a in BASIC_ATTRIBS_REL],
#Indicator(btwn, 'dep_label,lemma'),
# The *first element on the* path to the root: ngram lemmas along it
Ngrams(Parents(btwn, 3), 'lemma', (1,3)),
# The ngrams between
#[Combinations(dl, Ngrams(btwn, a, (2,3))) for a in BASIC_ATTRIBS_REL],
#Combinations(dl, Ngrams(btwn, 'dep_label,lemma', (2,3))),
[Ngrams(btwn, a, (1,3)) for a in BASIC_ATTRIBS_REL],
Ngrams(btwn, 'dep_label,lemma', (1,3)),
# The VBs and NNs between
#[Combinations(dl, Ngrams(Filter(btwn, 'pos', p), 'lemma', (1,3))) for p in ['VB', 'NN']],
[Ngrams(Filter(btwn, 'pos', p), 'lemma', (1,3)) for p in ['VB', 'NN']],
# The ngrams on the seq between
Ngrams(SeqBetween(), 'lemma', (1,3)),
# The siblings of each mention
LeftNgrams(LeftSiblings(m0), 'lemma'),
LeftNgrams(LeftSiblings(m1), 'lemma'),
RightNgrams(RightSiblings(m0), 'lemma'),
RightNgrams(RightSiblings(m1), 'lemma'),
# The ngrams on the *word sequence* between
#Combinations(sl, Ngrams(SeqBetween(), 'lemma', (1,3))),
#Combinations(sl, Ngrams(Filter(SeqBetween(), 'pos', 'VB'), 'lemma', (1,2))),
# The length bin features
sl,
dl
]
# Add dictionary features
if dictionaries:
for d_name, d in dictionaries.items():
templates.append(DictionaryIntersect(btwn, d_name, d))
templates.append(DictionaryIntersect(SeqBetween(), d_name, d))
# return generator function
if is_multary:
return Compile(templates).apply_multary_relation
return Compile(templates).apply_relation
"""
For calibrating the bin sizes
"""
get_relation_binning_features = Compile([
Indicator(Between(Mention(0), Mention(1)), 'word'),
Indicator(SeqBetween(), 'word')
]).apply_relation
| treedlib-master | treedlib/features.py |
from itertools import chain
import re
import lxml.etree as et
from collections import defaultdict
# NODESET:
# ===========
class NodeSet:
"""
NodeSet objects are functions f : 2^T -> 2^T
---------------
They are applied compositionally and lazily, by constructing an xpath query
We use these to get the *subtree* or set of nodes that our indicicator features will
operate over
"""
def __init__(self, label='NODESET', xpath='//*', psort=None):
self.label = label
self.xpath = xpath
self.psort = psort # Attribute to sort on post-xpath execution
def __repr__(self):
return '<%s, xpath="%s">' % (self.label, self.xpath)
class Mention(NodeSet):
"""Gets candidate mention nodes"""
def __init__(self, cid=0):
self.label = 'MENTION'
self.xpath = "//*[{%s}]" % str(cid)
class LeftSiblings(NodeSet):
"""Gets preceding siblings"""
def __init__(self, ns, w=1):
self.__dict__.update(ns.__dict__) # inherit child object's attributes
self.label = 'LEFT-OF-%s' % ns.label
self.xpath = '%s[1]/preceding-sibling::*[position() <= %s]' % (ns.xpath, w)
class RightSiblings(NodeSet):
"""Gets following siblings"""
def __init__(self, ns, w=1):
self.__dict__.update(ns.__dict__) # inherit child object's attributes
self.label = 'RIGHT-OF-%s' % ns.label
self.xpath = '%s[1]/following-sibling::*[position() <= %s]' % (ns.xpath, w)
# TODO: These should be "Descendants" / "Ancestors"...
class Children(NodeSet):
"""Gets children of the node set"""
def __init__(self, ns):
self.__dict__.update(ns.__dict__) # inherit child object's attributes
self.label = 'CHILDREN-OF-%s' % ns.label
self.xpath = ns.xpath + '[1]/*'
class Parents(NodeSet):
"""Gets parents of the node set"""
def __init__(self, ns, num_parents=1):
self.__dict__.update(ns.__dict__) # inherit child object's attributes
self.label = 'PARENTS-OF-%s' % ns.label
self.xpath = ns.xpath + '[1]/ancestor::*[position()<%s]' % (num_parents + 1)
class Between(NodeSet):
"""
Gets the nodes between two node sets
Note: this requires some ugly xpath... could change this to non-xpath method
"""
def __init__(self, ns1, ns2):
self.__dict__.update(ns1.__dict__) # inherit *FIRST* child object's attributes
self.label = 'BETWEEN-%s-and-%s' % (ns1.label, ns2.label)
self.xpath = "{0}[1]/ancestor-or-self::*[count(. | {1}[1]/ancestor-or-self::*) = count({1}[1]/ancestor-or-self::*)][1]/descendant-or-self::*[((count(.{0}) = count({0})) or (count(.{1}) = count({1})))]".format(ns1.xpath, ns2.xpath)
class SeqBetween(NodeSet):
"""
Gets the sequence of nodes in between, according to *sentence* (not dep tree) order
"""
def __init__(self, seq_attrib='word_idx'):
# TODO: Extend to take in pair of NodeSets?
self.xpath = '//*'
self.label = 'SEQ-BETWEEN'
self.seq_attrib = seq_attrib # Logic gets pushed to Indicator...
self.psort = seq_attrib # Specify that post-xpath sorting needs to be done
class Filter(NodeSet):
"""
Gets a subset of the nodes filtered by some node attribute
Note the option to do exact match or starts with (could be expanded; useful for POS now...)
"""
def __init__(self, ns, filter_attr, filter_by, starts_with=True):
self.__dict__.update(ns.__dict__) # inherit child object's attributes
self.label = 'FILTER-BY(%s=%s):%s' % (filter_attr, filter_by, ns.label)
temp = "[starts-with(@%s, '%s')]" if starts_with else "[@%s='%s']"
self.xpath = ns.xpath + temp % (filter_attr, filter_by)
# INDICATOR:
# ===========
def compile_dict_sub(brown_clusters_path=None, user_dicts=[]):
"""
Takes in a list of tuples of form (DICT_LABEL, set_of_words)
AND/OR a file path to a tsv file list of (word, brown cluster id) lines
And returns a single dictionary mapping from word -> DICT_LABEL, based on priority ordering
Assume user dicts take priority over brown clusters...
"""
dict_sub = {}
# User ditionaries
for dict_label, words in user_dicts:
for word in words:
if word not in dict_sub:
dict_sub[word] = dict_label
# Brown clusters
if brown_clusters_path is not None:
with open(brown_clusters_path, 'rb') as f:
for line in f:
word, cluster_id = line.rstrip().split('\t')
dict_sub[word] = 'BC-%s' % cluster_id
return dict_sub
class Indicator:
"""
Indicator objects are functions f : 2^T -> {0,1}^F
---------------
Indicator objects take a NodeSet, an attibute or attributes, and apply some indicator
function to the specified attributes of the NodeSet
"""
def __init__(self, ns, attribs):
self.ns = ns
self.attribs = attribs
def apply(self, root, cids, cid_attrib='word_idx', feat_label=True, inv_tag=True, stopwords=None, dict_sub={}):
"""
Apply the feature template to the xml tree provided
A list of lists of candidate mention ids are passed in, as well as a cid_attrib
These identify the candidate mentions refered to by index in Mention
For example, cids=[[1,2]], cid_attrib='word_idx' will have mention 0 as the set of nodes
that have word index 1 and 2
"""
# Sub in the candidate mention identifiers provided
m = [" or ".join("@%s='%s'" % (cid_attrib, c) for c in cid) for cid in cids]
xpath = self.ns.xpath.format(*m)
# INV tag if binary relation
inv = 'INV_' if inv_tag and len(cids) == 2 and cids[0][0] > cids[1][0] else ''
# Get nodes
nodes = root.xpath(xpath)
# Filter stopwords
if stopwords is not None and len(stopwords) > 0:
nodes = list(filter(lambda n : n.get('word') not in stopwords and n.get('lemma') not in stopwords, nodes))
# Perform seq filter here
if hasattr(self.ns, 'seq_attrib') and self.ns.seq_attrib is not None:
seqa = self.ns.seq_attrib
b = (cids[0][-1], cids[-1][0]) if cids[0][-1] < cids[-1][0] else (cids[-1][-1], cids[0][0])
nodes = list(filter(lambda n : n.get(seqa) is not None and int(n.get(seqa)) > b[0] and int(n.get(seqa)) < b[1], nodes))
# If sort specified, perform here
if hasattr(self.ns, 'psort') and self.ns.psort is not None:
nodes.sort(key=lambda n : int(n.get(self.ns.psort)))
# Specifically handle single attrib or multiple attribs per node here
try:
attribs = re.split(r'\s*,\s*', self.attribs)
res = ['|'.join(str(node.get(a)) for a in attribs) for node in nodes]
label = '%s%s:%s' % (inv, '|'.join(attribs).upper(), self.ns.label)
# Check each result value against a dictionary which maps string -> DICT_NAME,
# and replace with the value "DICT_NAME"
# NOTE: Only apply to word/lemma indicators for now
if len(attribs) == 1 and attribs[0] in ('word', 'lemma') and len(dict_sub) > 0:
res = [dict_sub.get(a, a) for a in res]
except AttributeError:
res = nodes
label = '%s%s' % (inv, self.ns.label)
# Only yield if non-zero result set; process through _get_features fn
if len(res) > 0:
for feat in self._get_features(res):
if feat_label:
yield '%s[%s]' % (label, feat)
else:
yield feat
def _get_features(self, res):
"""
Given a result set of attribute values, return a set of strings representing the features
This should be the default method to replace for Indicator objects
"""
return [' '.join(res)]
def print_apply(self, root, cids, cid_attrib='word_idx', feat_label=True, dict_sub={}, stopwords=None):
for feat in self.apply(root, cids, cid_attrib, feat_label=feat_label, dict_sub=dict_sub, stopwords=stopwords):
print(feat)
def result_set(self, root, cids, cid_attrib='word_idx', feat_label=False, dict_sub={}, stopwords=None):
"""Get results as a set- mostly for use in DSR applications"""
res = set()
for feat in self.apply(root, cids, cid_attrib=cid_attrib, feat_label=feat_label, dict_sub=dict_sub, stopwords=stopwords):
res.add(feat)
return res
def __repr__(self):
return '<%s:%s:%s, xpath="%s">' % (self.__class__.__name__, self.attribs, self.ns.label, self.ns.xpath)
class Ngrams(Indicator):
"""
Return indicator features over the ngrams of a result set
If ng arg is an int, will get ngrams of *exactly* this length
If ng arg is a list/tuple, will get all ngrams of this range, *inclusive*
"""
def __init__(self, ns, attribs, ng):
self.ns = ns
self.attribs = attribs
if (type(ng) == int and ng > 0) or (type(ng) in [list, tuple] and ng[0] > 0):
self.ng = ng
else:
raise ValueError("Improper ngram range: %s" % ng)
def _get_features(self, res):
if type(self.ng) == int:
r = [self.ng - 1]
else:
r = range(self.ng[0] - 1, min(len(res), self.ng[1]))
return chain.from_iterable([' '.join(res[s:s+l+1]) for s in range(len(res)-l)] for l in r)
class RightNgrams(Indicator):
"""Return all the ngrams which start at position 0"""
def _get_features(self, res):
return [' '.join(res[:l]) for l in range(1, len(res)+1)]
class LeftNgrams(Indicator):
"""Return all the ngrams which start at position 0"""
def _get_features(self, res):
return [' '.join(res[l:]) for l in range(len(res))]
class Regexp(Indicator):
"""
Return indicator features if the regular expression applied to the
concatenation of the result set strings is not None
"""
def __init__(self, ns, attribs, rgx, rgx_label, sep=' '):
self.ns = ns
self.attribs = attribs
self.rgx = rgx
self.rgx_label = rgx_label
self.sep = sep
self.psort = 'word_idx' # Sort by word order...
def _get_features(self, res):
match = re.search(self.rgx, self.sep.join(res))
if match is not None:
yield 'RGX:%s' % self.rgx_label
class LengthBin(Indicator):
"""
Return indicator features for the length (size) of the node set
binned according to provided values
bins should be a list of INTS
"""
def __init__(self, ns, bin_divs):
self.ns = ns
self.bins = []
for i,d in enumerate(bin_divs):
if i == 0:
self.bins.append((0,d-1))
else:
self.bins.append((bin_divs[i-1],d-1))
def _get_features(self, res):
lbin = None
l = len(res)
for b in self.bins:
if l >= b[0] and l <= b[1]:
lbin = b
break
if lbin is None:
lbin = (self.bins[-1][1]+1, 'inf')
yield 'LEN:%s-%s' % lbin
# TODO: Make this way more efficient...?
class DictionaryIntersect(Indicator):
"""
Return an indicator feature for whether the input nodeset intersects with any phrase in
the given dictionary
"""
def __init__(self, ns, d_name, d, d_attrib='word', caseless=True):
self.ns = ns
self.d_name = d_name
self.d_attrib = d_attrib
self.caseless = caseless
# Split the dictionary up by phrase length (i.e. # of tokens)
self.dl = defaultdict(lambda : set())
for phrase in d:
if caseless:
phrase = phrase.lower()
self.dl[len(phrase.split())].add(phrase)
self.dl.update((k, frozenset(v)) for k,v in self.dl.items())
# Get the ngram range for this dictionary
self.ng_range = range(max(1, min(self.dl.keys())), max(self.dl.keys())+1)
def apply(self, root, cids, cid_attrib='word_idx', feat_label=True):
"""
We replace the default apply method because we first need to get the full sequence,
match against ngrams of this, then math via cid_attrib against the input NodeSet
We do this because we need to catch e.g. when a multi-word phrase in the dictionary
only partially-overlaps with the NodeSet (this should count as a match!)
"""
# First get full sequence
fs = list(map(lambda x : x.get(self.d_attrib), sorted(root.xpath("//*[@word_idx]"), key=lambda x : int(x.get('word_idx')))))
# Next do sequence n-gram matching
dcids = set()
for l in self.ng_range:
for i in range(0, len(fs)-l+1):
phrase = ' '.join(fs[i:i+l]).lower() if self.caseless else ' '.join(fs[i:i+l])
if phrase in self.dl[l]:
dcids.update(range(i, i+l))
# Finally, just look for intersect via XPATH + using the super method
# TODO: How to call parent method here!?
if len(dcids) > 0:
self.ns.xpath += '[' + " or ".join("@word_idx='%s'" % i for i in dcids) + ']'
m = [" or ".join("@%s='%s'" % (cid_attrib, c) for c in cid) for cid in cids]
xpath = self.ns.xpath.format(*m)
if len(root.xpath(xpath)) > 0:
yield "DICTIONARY-MATCH:%s:%s" % (self.d_name, self.ns.label)
# COMBINATOR:
# ===========
class Combinator:
"""
Combinator objects are functions f : {0,1}^F x {0,1}^F -> {0,1}^F
---------------
Combinator objects take two (or more?) Indicator objects and map to feature space
"""
def __init__(self, ind1, ind2):
self.ind1 = ind1
self.ind2 = ind2
def apply(self, root, cids, cid_attrib='word_idx', dict_sub={}, stopwords=None):
return self.ind1.apply(root, cids, cid_attrib, dict_sub=dict_sub, stopwords=stopwords)
def print_apply(self, root, cids, cid_attrib='word_idx', dict_sub={}, stopwords=None):
return self.apply(root, cids, cid_attrib, dict_sub=dict_sub, stopwords=stopwords)
class Combinations(Combinator):
"""Generates all *pairs* of features"""
def apply(self, root, cids, cid_attrib='word_idx', dict_sub={}, stopwords=None):
for f1 in self.ind1.apply(root, cids, cid_attrib, dict_sub=dict_sub, stopwords=stopwords):
for f2 in self.ind2.apply(root, cids, cid_attrib, dict_sub=dict_sub, stopwords=stopwords):
yield '%s+%s' % (f1, f2)
# Compile Operator: Compiles a set of feature templates
# =====================================================
class Compile:
"""
Compiles a set of functions f_i : 2^T -> {0,1}^F_i to a single function 2^T -> {0,1}^F
where F <= \sum_i F_i
i.e. we can do filtering and/or merging at this point (?)
"""
def __init__(self, op_list):
self.op_list = op_list
def _iterops(self):
"""Iterate over the operators provided, accepting list of single or list elements"""
for ops in self.op_list:
if type(ops) == list:
for op in ops:
yield op
# Guard against e.g. generators where after one iteration through, is expended
# Thus after being applied to one data item, would be done!!
elif hasattr(ops, '__iter__'):
raise ValueError("Iterables of operators in Compile must be list type.")
else:
yield ops
def apply(self, root, cids, cid_attrib='word_idx', dict_sub={}, stopwords=None):
# Ensure that root is parsed
if type(root) == str:
root = et.fromstring(root)
# Apply the feature templates
for op in self._iterops():
for f in op.apply(root, cids, cid_attrib, dict_sub=dict_sub, stopwords=stopwords):
yield f
def result_set(self, root, cids, cid_attrib='word_idx', dict_sub={}, stopwords=None):
"""Takes the union of the result sets"""
# Ensure that root is parsed
if type(root) == str:
root = et.fromstring(root)
# Apply the feature templates
res = set()
for op in self._iterops():
res.update(op.result_set(root, cids, cid_attrib, dict_sub=dict_sub, stopwords=stopwords))
return res
def apply_mention(self, root, mention_idxs, dict_sub={}, stopwords=None):
return self.apply(root, [mention_idxs], dict_sub=dict_sub, stopwords=stopwords)
def apply_relation(self, root, mention1_idxs, mention2_idxs, dict_sub={}, stopwords=None):
return self.apply(root, [mention1_idxs, mention2_idxs], dict_sub=dict_sub, stopwords=stopwords)
def apply_multary_relation(self, root, mentions, dict_sub={}, stopwords=None):
return self.apply(root, mentions, dict_sub=dict_sub, stopwords=stopwords)
def __repr__(self):
return '\n'.join(str(op) for op in self._iterops())
| treedlib-master | treedlib/templates.py |
import json
import os
import re
import lxml.etree as et
import sys
# This should be set by the lib wrapper __init__.py file
APP_HOME = os.environ["TREEDLIB_LIB"]
# Load IPython display functionality libs if possible i.e. if in IPython
try:
from IPython.core.display import display_html, HTML, display_javascript, Javascript
except:
pass
class XMLTree:
"""
A generic tree representation which takes XML as input
Includes subroutines for conversion to JSON & for visualization based on js form
"""
def __init__(self, xml_root, words=None):
"""Calls subroutines to generate JSON form of XML input"""
self.root = xml_root
self.words = words
# create a unique id for e.g. canvas id in notebook
self.id = str(abs(hash(self.to_str())))
def _to_json(self, root):
js = {
'attrib': dict(root.attrib),
'children': []
}
for i,c in enumerate(root):
js['children'].append(self._to_json(c))
return js
def to_json(self):
return self._to_json(self.root)
def to_str(self):
return et.tostring(self.root)
def render_tree(self, highlight=[]):
"""
Renders d3 visualization of the d3 tree, for IPython notebook display
Depends on html/js files in vis/ directory, which is assumed to be in same dir...
"""
# HTML
WORD = '<span class="word-' + self.id + '-%s">%s</span>'
words = ' '.join(WORD % (i,w) for i,w in enumerate(self.words)) if self.words else ''
html = open('%s/vis/tree-chart.html' % APP_HOME).read() % (self.id, self.id, words)
display_html(HTML(data=html))
# JS
JS_LIBS = ["http://d3js.org/d3.v3.min.js"]
js = open('%s/vis/tree-chart.js' % APP_HOME).read() % (self.id, json.dumps(self.to_json()), str(highlight))
display_javascript(Javascript(data=js, lib=JS_LIBS))
def corenlp_to_xmltree(s, prune_root=True):
"""
Transforms an object with CoreNLP dep_path and dep_parent attributes into an XMLTree
Will include elements of any array having the same dimensiion as dep_* as node attributes
Also adds special word_idx attribute corresponding to original sequence order in sentence
"""
# Convert input object to dictionary
if type(s) != dict:
try:
s = s.__dict__ if hasattr(s, '__dict__') else dict(s)
except:
raise ValueError("Cannot convert input object to dict")
# Use the dep_parents array as a guide: ensure it is present and a list of ints
if not('dep_parents' in s and type(s['dep_parents']) == list):
raise ValueError("Input CoreNLP object must have a 'dep_parents' attribute which is a list")
try:
dep_parents = list(map(int, s['dep_parents']))
except:
raise ValueError("'dep_parents' attribute must be a list of ints")
# Also ensure that we are using CoreNLP-native indexing (root=0, 1-base word indexes)!
b = min(dep_parents)
if b != 0:
dep_parents = list(map(lambda j : j - b, dep_parents))
# Parse recursively
root = corenlp_to_xmltree_sub(s, dep_parents, 0)
# Often the return tree will have several roots, where one is the actual root
# And the rest are just singletons not included in the dep tree parse...
# We optionally remove these singletons and then collapse the root if only one child left
if prune_root:
for c in root:
if len(c) == 0:
root.remove(c)
if len(root) == 1:
root = root.findall("./*")[0]
return XMLTree(root, words=s['words'])
def corenlp_to_xmltree_sub(s, dep_parents, rid=0):
i = rid - 1
attrib = {}
N = len(dep_parents)
# Add all attributes that have the same shape as dep_parents
if i >= 0:
for k,v in filter(lambda t : type(t[1]) == list and len(t[1]) == N, s.items()):
if v[i] is not None:
attrib[singular(k)] = ''.join(c for c in str(v[i]) if ord(c) < 128)
# Add word_idx if not present
if 'word_idx' not in attrib:
attrib['word_idx'] = str(i)
# Build tree recursively
root = et.Element('node', attrib=attrib)
for i,d in enumerate(dep_parents):
if d == rid:
root.append(corenlp_to_xmltree_sub(s, dep_parents, i+1))
return root
def singular(s):
"""Get singular form of word s (crudely)"""
return re.sub(r'e?s$', '', s, flags=re.I)
def html_table_to_xmltree(html):
"""HTML/XML table to XMLTree object"""
node = et.fromstring(re.sub(r'>\s+<', '><', html.strip()))
xml = html_table_to_xmltree_sub(node)
return XMLTree(xml)
def html_table_to_xmltree_sub(node):
"""
Take the XML/HTML table and convert each word in leaf nodes into its own node
Note: Ideally this text would be run through CoreNLP?
"""
# Split text into Token nodes
# NOTE: very basic token splitting here... (to run through CoreNLP?)
if node.text is not None:
for tok in re.split(r'\s+', node.text):
node.append(et.Element('token', attrib={'word':tok}))
# Recursively append children
for c in node:
node.append(html_table_to_xmltree_sub(c))
return node
| treedlib-master | treedlib/structs.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from distutils.core import setup
setup(
name="bela",
version="0.1",
packages=["bela"],
) | BELA-main | setup.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# HACK: Need to import protobuf before pytorch_lightning to prevent Segmentation Fault: https://github.com/protocolbuffers/protobuf/issues/11934
from google.protobuf import descriptor as _descriptor
| BELA-main | bela/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import hydra
from bela.conf.config import MainConfig
from omegaconf import OmegaConf
from pytorch_lightning.trainer import Trainer
@hydra.main(config_path="conf", config_name="config")
def main(cfg: MainConfig):
print(OmegaConf.to_yaml(cfg))
os.environ["NCCL_NSOCKS_PERTHREAD"] = "4"
os.environ["NCCL_SOCKET_NTHREADS"] = "2"
if cfg.get("debug_mode"):
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
os.environ["NCCL_BLOCKING_WAIT"] = "1"
os.environ["PL_SKIP_CPU_COPY_ON_DDP_TEARDOWN"] = "1"
transform = hydra.utils.instantiate(cfg.task.transform)
datamodule = hydra.utils.instantiate(cfg.datamodule, transform=transform)
task = hydra.utils.instantiate(cfg.task, datamodule=datamodule, _recursive_=False)
checkpoint_callback = hydra.utils.instantiate(cfg.checkpoint_callback)
trainer = Trainer(**cfg.trainer, callbacks=[checkpoint_callback])
if cfg.test_only:
ckpt_path = cfg.task.load_from_checkpoint
trainer.test(
model=task,
ckpt_path=ckpt_path,
verbose=True,
datamodule=datamodule,
)
else:
trainer.fit(task, datamodule=datamodule)
print(f"*** Best model path is {checkpoint_callback.best_model_path}")
trainer.test(
model=None,
ckpt_path="best",
verbose=True,
datamodule=datamodule,
)
if __name__ == "__main__":
main()
| BELA-main | bela/main.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
import mmap
from typing import List, Optional
import torch
from pytorch_lightning import LightningDataModule
from bela.transforms.joint_el_transform import JointELTransform
logger = logging.getLogger()
def get_seq_lengths(batch: List[List[int]]):
return [len(example) for example in batch]
class EntityCatalogue:
def __init__(self, idx_path):
logger.info(f"Reading entity catalogue index {idx_path}")
self.idx = {}
with open(idx_path, "rt") as fd:
for idx, line in enumerate(fd):
ent_id = line.strip()
self.idx[ent_id] = idx
def __len__(self):
return len(self.idx)
def __getitem__(self, entity_id):
ent_index = self.idx[entity_id]
return ent_index
def __contains__(self, entity_id):
return entity_id in self.idx
class ElMatchaDataset(torch.utils.data.Dataset):
"""
A memory mapped dataset for EL in Matcha format
Each example in this dataset contains several mentions.
We laso filter out mentions, that are not present in entity catalogue
"""
def __init__(
self,
path,
ent_catalogue,
use_raw_text,
use_augmentation=False,
augmentation_frequency=0.1,
):
self.ent_catalogue = ent_catalogue
self.use_raw_text = use_raw_text
self.use_augmentation = use_augmentation
self.augmentation_frequency = augmentation_frequency
logger.info(f"Downloading file {path}")
# TODO: Maybe we should lazily load the file to speed up datamodule instanciation (e.g. in model_eval.py)
self.file = open(path, mode="r")
self.mm = mmap.mmap(self.file.fileno(), 0, prot=mmap.PROT_READ)
self.offsets = []
self.count = 0
logger.info(f"Build mmap index for {path}")
line = self.mm.readline()
offset = 0
while line:
self.offsets.append(offset)
self.count += 1
offset = self.mm.tell()
line = self.mm.readline()
def __len__(self):
return self.count
def _add_char_offsets(self, tokens, gt_entities):
offsets = []
token_lengths = []
current_pos = 0
for token in tokens:
offsets.append(current_pos)
token_lengths.append(len(token))
current_pos += len(token) + 1
updated_gt_entities = []
for gt_entity in gt_entities:
offset, length, entity, ent_type = gt_entity[:4]
char_offset = offsets[offset]
char_length = (
sum(token_lengths[offset + idx] for idx in range(length)) + length - 1
)
updated_gt_entities.append(
(offset, length, entity, ent_type, char_offset, char_length)
)
return updated_gt_entities
def __getitem__(self, index):
offset = self.offsets[index]
self.mm.seek(offset)
line = self.mm.readline()
example = json.loads(line)
gt_entities = []
if self.use_raw_text and "original_text" not in example:
example["gt_entities"] = self._add_char_offsets(
example["text"], example["gt_entities"]
)
example["original_text"] = " ".join(example["text"])
for gt_entity in example["gt_entities"]:
if self.use_raw_text:
_, _, entity, ent_type, offset, length = gt_entity[:6]
else:
offset, length, entity, ent_type = gt_entity[:4]
if ent_type != "wiki":
continue
if entity in self.ent_catalogue:
gt_entities.append((offset, length, self.ent_catalogue[entity]))
gt_entities = sorted(gt_entities)
# blink predicts
blink_predicts = None
blink_scores = None
if "blink_predicts" in example:
blink_predicts = []
blink_scores = []
for predict, scores in zip(
example["blink_predicts"], example["blink_scores"]
):
candidates = []
candidates_scores = []
for candidate, score in zip(predict, scores):
if candidate in self.ent_catalogue:
candidates.append(self.ent_catalogue[candidate])
candidates_scores.append(score)
blink_predicts.append(candidates)
blink_scores.append(candidates_scores)
# MD model predicts
md_pred_offsets = example.get("md_pred_offsets")
md_pred_lengths = example.get("md_pred_lengths")
md_pred_scores = example.get("md_pred_scores")
result = {
"data_example_id": example.get("document_id") or example.get("data_example_id", ""),
"text": example["original_text"] if self.use_raw_text else example["text"],
"gt_entities": gt_entities,
"blink_predicts": blink_predicts,
"blink_scores": blink_scores,
"md_pred_offsets": md_pred_offsets,
"md_pred_lengths": md_pred_lengths,
"md_pred_scores": md_pred_scores,
}
return result
class JointELDataModule(LightningDataModule):
"""
Read data from EL datatset and prepare mention/entity pairs tensors
"""
def __init__(
self,
transform: JointELTransform,
# Dataset args
train_path: str,
val_path: str,
test_path: str,
ent_catalogue_idx_path: str,
batch_size: int = 2,
val_batch_size: Optional[int] = None,
test_batch_size: Optional[int] = None,
drop_last: bool = False, # drop last batch if len(dataset) not multiple of batch_size
num_workers: int = 0, # increasing this bugs out right now
use_raw_text: bool = True,
use_augmentation: bool = False,
augmentation_frequency: float = 0.1,
shuffle: bool = True,
*args,
**kwargs,
):
super().__init__()
self.batch_size = batch_size
self.val_batch_size = val_batch_size or batch_size
self.test_batch_size = test_batch_size or batch_size
self.drop_last = drop_last
self.num_workers = num_workers
self.transform = transform
self.ent_catalogue = EntityCatalogue(ent_catalogue_idx_path)
self.shuffle = shuffle
self.datasets = {
"train": ElMatchaDataset(
train_path,
self.ent_catalogue,
use_raw_text=use_raw_text,
use_augmentation=use_augmentation,
augmentation_frequency=augmentation_frequency,
) if train_path else None,
"valid": ElMatchaDataset(
val_path,
self.ent_catalogue,
use_raw_text=use_raw_text,
) if val_path else None,
"test": ElMatchaDataset(
test_path,
self.ent_catalogue,
use_raw_text=use_raw_text,
) if test_path else None,
}
def train_dataloader(self):
return torch.utils.data.DataLoader(
self.datasets["train"],
batch_size=self.batch_size,
num_workers=self.num_workers,
collate_fn=self.collate_train,
shuffle=self.shuffle,
drop_last=self.drop_last,
)
def val_dataloader(self):
return torch.utils.data.DataLoader(
self.datasets["valid"],
shuffle=False,
batch_size=self.val_batch_size,
num_workers=self.num_workers,
collate_fn=self.collate_eval,
drop_last=self.drop_last,
)
def test_dataloader(self):
return torch.utils.data.DataLoader(
self.datasets["test"],
shuffle=False,
batch_size=self.test_batch_size,
num_workers=self.num_workers,
collate_fn=self.collate_eval,
drop_last=self.drop_last,
)
def collate_eval(self, batch):
return self.collate(batch, False)
def collate_train(self, batch):
return self.collate(batch, True)
def collate(self, batch, is_train):
"""
Input:
batch: List[Example]
Example fields:
- "text": List[str] - post tokens
- "gt_entities": List[Tuple[int, int, int]] - GT entities in text,
offset, length, entity id
- "blink_predicts": List[List[int]] - list of entity ids for each MD prediction
- "blink_scores": List[List[float]] - list of BLINK scores
- "md_pred_offsets": List[int] - mention offsets predicted by MD
- "md_pred_lengths": List[int] - mention lengths
- "md_pred_scores": List[float] - MD scores
"""
data_example_ids = []
texts = []
offsets = []
lengths = []
entities = []
for example in batch:
data_example_ids.append(example["data_example_id"])
texts.append(example["text"])
example_offsets = []
example_lengths = []
example_entities = []
for offset, length, entity_id in example["gt_entities"]:
example_offsets.append(offset)
example_lengths.append(length)
example_entities.append(entity_id)
offsets.append(example_offsets)
lengths.append(example_lengths)
entities.append(example_entities)
model_inputs = self.transform(
{
"texts": texts,
"mention_offsets": offsets,
"mention_lengths": lengths,
"entities": entities,
}
)
collate_output = {
"data_example_ids": data_example_ids,
"input_ids": model_inputs["input_ids"],
"attention_mask": model_inputs["attention_mask"],
"mention_offsets": model_inputs["mention_offsets"],
"mention_lengths": model_inputs["mention_lengths"],
"entities": model_inputs["entities"],
"tokens_mapping": model_inputs["tokens_mapping"],
}
if "sp_tokens_boundaries" in model_inputs:
collate_output["sp_tokens_boundaries"] = model_inputs[
"sp_tokens_boundaries"
]
return collate_output
| BELA-main | bela/datamodule/joint_el_datamodule.py |
BELA-main | bela/tests/__init__.py |
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import os
import torch
import torch
from bela.transforms.joint_el_transform import JointELTransform
from bela.datamodule.joint_el_datamodule import JointELDataModule
def assert_equal_tensor_dict(test_case, result, expected):
"""
Compare tensors/values in the dict and assert if they are not equal.
The dict could countain multiple levels of nesting.
"""
for key, value in expected.items():
if isinstance(value, dict):
assert_equal_tensor_dict(test_case, result[key], value)
else:
if isinstance(value, torch.Tensor):
test_case.assertTrue(
torch.equal(result[key], value), f"{key} is not equal"
)
else:
test_case.assertEqual(result[key], value, f"{key} is not equal")
class TestJointELDataModule(unittest.TestCase):
def setUp(self):
torch.manual_seed(0)
self.base_dir = os.path.join(os.path.dirname(__file__), "data")
self.data_path = os.path.join(self.base_dir, "el_matcha_joint.jsonl")
self.ent_catalogue_idx_path = os.path.join(self.base_dir, "el_catalogue.idx")
self.transform = JointELTransform()
def test_joint_el_datamodule(self):
dm = JointELDataModule(
transform=self.transform,
train_path=self.data_path,
val_path=self.data_path,
test_path=self.data_path,
ent_catalogue_idx_path=self.ent_catalogue_idx_path,
use_raw_text=False,
batch_size=2,
)
batches = list(dm.train_dataloader())
self.assertEqual(len(batches), 1)
expected_batches = [
{
"input_ids": torch.tensor(
[
[
0,
44517,
98809,
7687,
83,
142,
14941,
23182,
101740,
11938,
35509,
23,
88437,
3915,
9020,
2,
],
[
0,
360,
9020,
70,
10323,
111,
30715,
136,
70,
14098,
117604,
2,
1,
1,
1,
1,
],
]
),
"attention_mask": torch.tensor(
[
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
]
),
"mention_offsets": torch.tensor([[1, 14], [2, 0]]),
"mention_lengths": torch.tensor([[3, 1], [1, 0]]),
"entities": torch.tensor([[1, 0], [0, 0]]),
"tokens_mapping": torch.tensor(
[
[
[1, 2],
[2, 3],
[3, 4],
[4, 5],
[5, 6],
[6, 7],
[7, 8],
[8, 9],
[9, 10],
[10, 11],
[11, 12],
[12, 14],
[14, 15],
],
[
[1, 2],
[2, 3],
[3, 4],
[4, 5],
[5, 6],
[6, 7],
[7, 8],
[8, 9],
[9, 10],
[10, 11],
[0, 1],
[0, 1],
[0, 1],
],
]
),
}
]
for result, expected in zip(batches, expected_batches):
assert_equal_tensor_dict(self, result, expected)
if __name__ == '__main__':
unittest.main()
| BELA-main | bela/tests/test_datamodules.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from bela.models.hf_encoder import HFEncoder
from bela.transforms.joint_el_transform import JointELTransform
class TestHFEncoder(unittest.TestCase):
def test_xlmr_encoder(self):
transform = JointELTransform()
model = HFEncoder(model_path="xlm-roberta-base")
model_inputs = transform(
{
"texts": [
[
"Some",
"simple",
"text",
"about",
"Real",
"Madrid",
"and",
"Barcelona",
],
["Hola", "amigos", "!"],
["Cristiano", "Ronaldo", "juega", "en", "la", "Juventus"],
],
"mention_offsets": [
[4, 7],
[1],
[0, 5],
],
"mention_lengths": [
[2, 1],
[1],
[2, 1],
],
"entities": [
[1, 2],
[3],
[102041, 267832],
],
}
)
output = model(
input_ids=model_inputs["input_ids"],
attention_mask=model_inputs["attention_mask"],
)
if __name__ == '__main__':
unittest.main()
| BELA-main | bela/tests/test_models.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from bela.transforms.joint_el_transform import JointELTransform, JointELXlmrRawTextTransform
class TestJointELXlmrTransforms(unittest.TestCase):
def test_blink_mention_xlmr_transform(self):
transform = JointELTransform()
model_inputs = transform(
{
"texts": [
[
"Some",
"simple",
"text",
"about",
"Real",
"Madrid",
"and",
"Barcelona",
],
["Hola", "amigos", "!"],
["Cristiano", "Ronaldo", "juega", "en", "la", "Juventus"],
],
"mention_offsets": [
[4, 7],
[1],
[0, 5],
],
"mention_lengths": [
[2, 1],
[1],
[2, 1],
],
"entities": [
[1, 2],
[3],
[102041, 267832],
],
}
)
expected_model_inputs = {
"input_ids": torch.tensor(
[
[0, 31384, 8781, 7986, 1672, 5120, 8884, 136, 5755, 2],
[0, 47958, 19715, 711, 2, 1, 1, 1, 1, 1],
[0, 96085, 43340, 1129, 2765, 22, 21, 65526, 2, 1],
]
),
"attention_mask": torch.tensor(
[
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
]
),
"mention_offsets": torch.tensor([[5, 8], [2, 0], [1, 7]]),
"mention_lengths": torch.tensor([[2, 1], [1, 0], [2, 1]]),
"entities": torch.tensor([[1, 2], [3, 0], [102041, 267832]]),
"tokens_mapping": torch.tensor(
[
[[1, 2], [2, 3], [3, 4], [4, 5], [
5, 6], [6, 7], [7, 8], [8, 9]],
[
[1, 2],
[2, 3],
[3, 4],
[0, 1],
[0, 1],
[0, 1],
[0, 1],
[0, 1],
],
[
[1, 2],
[2, 3],
[3, 5],
[5, 6],
[6, 7],
[7, 8],
[0, 1],
[0, 1],
],
]
),
}
for key, value in expected_model_inputs.items():
self.assertTrue(
torch.all(model_inputs[key].eq(value)), f"{key} not equal")
def test_joint_el_raw_text_xlmr_transform(self):
transform = JointELXlmrRawTextTransform()
model_inputs = transform(
{
"texts": [
"Some simple text about Real Madrid and Barcelona",
"Cristiano Ronaldo juega en la Juventus",
"Hola amigos!",
" Hola amigos! ", # test extra spaces
],
"mention_offsets": [
[23, 39],
[0, 30],
[5],
[10],
],
"mention_lengths": [
[11, 9],
[17, 8],
[6],
[6],
],
"entities": [
[1, 2],
[102041, 267832],
[3],
[3],
],
}
)
expected_model_inputs = {
"input_ids": torch.tensor(
[
[0, 31384, 8781, 7986, 1672, 5120, 8884, 136, 5755, 2],
[0, 96085, 43340, 1129, 2765, 22, 21, 65526, 2, 1],
[0, 47958, 19715, 38, 2, 1, 1, 1, 1, 1],
# Whitespaces are ignored
[0, 47958, 19715, 38, 2, 1, 1, 1, 1, 1],
]
),
"attention_mask": torch.tensor(
[
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
]
),
"mention_offsets": torch.tensor([[5, 8], [1, 7], [2, 0], [2, 0]]),
"mention_lengths": torch.tensor([[2, 1], [2, 1], [1, 0], [1, 0]]),
"entities": torch.tensor([[1, 2], [102041, 267832], [3, 0], [3, 0]]),
"tokens_mapping": torch.tensor(
[
[[1, 2], [2, 3], [3, 4], [4, 5], [
5, 6], [6, 7], [7, 8], [8, 9]],
[
[1, 2],
[2, 3],
[3, 4],
[4, 5],
[5, 6],
[6, 7],
[7, 8],
[0, 1],
],
[
[1, 2],
[2, 3],
[3, 4],
[0, 1],
[0, 1],
[0, 1],
[0, 1],
[0, 1],
],
[
[1, 2],
[2, 3],
[3, 4],
[0, 1],
[0, 1],
[0, 1],
[0, 1],
[0, 1],
],
]
),
"sp_tokens_boundaries": torch.tensor(
[
[
[0, 4],
[4, 11],
[11, 16],
[16, 22],
[22, 27],
[27, 34],
[34, 38],
[38, 48],
],
[
[0, 9],
[9, 17],
[17, 20],
[20, 23],
[23, 26],
[26, 29],
[29, 38],
[0, 1],
],
[[0, 4], [4, 11], [11, 12], [0, 1],
[0, 1], [0, 1], [0, 1], [0, 1]],
[[0, 4+3], [4+3, 11+5], [11+5, 12+5], [0, 1],
[0, 1], [0, 1], [0, 1], [0, 1]], # Add whitespaces
]
),
}
for key, value in expected_model_inputs.items():
self.assertTrue(torch.all(model_inputs[key].eq(
value)), f"{key} not equal: {model_inputs[key]=} != {value=}")
def test_joint_el_raw_text_xlmr_transform_2(self):
examples = [
{
'original_text': ' La Carta de las Naciones Unidas: tratado fundacional de las Naciones Unidas que establece que las obligaciones con las Naciones Unidas prevalecen sobre todas las demás obligaciones del tratado y es vinculante para todos los miembros de las Naciones Unidas. Tipo de documento: tratado., Fecha de la firma: 26 de junio de 1945., Lugar de la firma: San Francisco, California, Estados Unidos., Entrada en vigor: 24 de octubre de 1945., Firmantes: Ratificado por China, Francia, la Unión Soviética, el Reino Unido, Estados Unidos y por la mayoría de estados signatarios., Artículos: 193., Secciones: 20 (preámbulo y 19 capítulos):, *Preámbulo de la Carta de las Naciones Unidas., *Capítulo I: Propósitos y principios., *Capítulo II: Miembros., *Capítulo III: Órganos, *Capítulo IV: La Asamblea General., *Capítulo V: El Consejo de Seguridad, *Capítulo VI: Solución pacífica de controversias., *Capítulo VII: Acción con respecto a las amenazas a la paz, las rupturas de la paz y los actos de agresión., *Capítulo VIII: Acuerdos Regionales., *Capítulo IX: Cooperación internacional económica y social., *Capítulo X: El Consejo Económico y Social., *Capítulo XI: Declaración sobre los territorios no autónomos., *Capítulo XII: Sistema Internacional de Administración Fiduciaria., *Capítulo XIII: El Consejo de Administración Fiduciaria., *Capítulo XIV: La Corte Internacional de Justicia., *Capítulo XV: La Secretaría., *Capítulo XVI: Disposiciones varias., *Capítulo XVII: Arreglos transitorios de seguridad., *Capítulo XVIII: Enmiendas., *Capítulo XIX: Ratificación y firma. ',
'gt_entities': [
[0, 0, 'Q171328', 'wiki', 4, 28],
[0, 0, 'Q131569', 'wiki', 34, 7],
[0, 0, 'Q1065', 'wiki', 61, 15],
[0, 0, 'Q1065', 'wiki', 120, 15],
[0, 0, 'Q131569', 'wiki', 186, 7],
[0, 0, 'Q1065', 'wiki', 241, 15],
[0, 0, 'Q49848', 'wiki', 267, 9],
[0, 0, 'Q384515', 'wiki', 278, 7],
[0, 0, 'Q205892', 'wiki', 288, 5],
[0, 0, 'Q188675', 'wiki', 300, 5],
[0, 0, 'Q2661', 'wiki', 307, 11],
[0, 0, 'Q5240', 'wiki', 322, 4],
[0, 0, 'Q62', 'wiki', 348, 13],
[0, 0, 'Q99', 'wiki', 363, 10],
[0, 0, 'Q30', 'wiki', 375, 14],
[0, 0, 'Q2955', 'wiki', 410, 13],
[0, 0, 'Q148', 'wiki', 460, 5],
[0, 0, 'Q7275', 'wiki', 547, 7],
[0, 0, 'Q1129448', 'wiki', 601, 9],
[0, 0, 'Q1980247', 'wiki', 616, 9],
[0, 0, 'Q7239343', 'wiki', 630, 44],
[0, 0, 'Q211364', 'wiki', 703, 10],
[0, 0, 'Q160016', 'wiki', 730, 8],
[0, 0, 'Q895526', 'wiki', 756, 7],
[0, 0, 'Q47423', 'wiki', 782, 16],
[0, 0, 'Q37470', 'wiki', 817, 20],
[0, 0, 'Q1255828', 'wiki', 874, 13],
[0, 0, 'Q1728608', 'wiki', 891, 105],
[0, 0, 'Q454', 'wiki', 945, 3]
],
},
]
texts = [example['original_text'] for example in examples]
mention_offsets = [[offset for _, _, _, _, offset,
_ in example['gt_entities']] for example in examples]
mention_lengths = [[length for _, _, _, _, _,
length in example['gt_entities']] for example in examples]
entities = [[0 for _, _, _, _, _, _ in example['gt_entities']]
for example in examples]
batch = {
"texts": texts,
"mention_offsets": mention_offsets,
"mention_lengths": mention_lengths,
"entities": entities,
}
transform = JointELXlmrRawTextTransform()
model_inputs = transform(batch)
expected_mention_offsets = [[2, 9, 14, 25, 37, 48, 55, 57, 60, 64, 66, 70, 78, 81, 83, 91, 104, 125, 141, 146, 151, 174, 183, 194, 205, 216, 230]]
expected_mention_lengths = [[6, 1, 3, 3, 1, 3, 1, 1, 2, 1, 3, 1, 2, 1, 2, 3, 1, 1, 3, 2, 11, 1, 3, 3, 2, 3, 2]]
self.assertEqual(
model_inputs['mention_offsets'].tolist(), expected_mention_offsets)
self.assertEqual(
model_inputs['mention_lengths'].tolist(), expected_mention_lengths)
if __name__ == '__main__':
unittest.main()
| BELA-main | bela/tests/test_transforms.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import defaultdict
from functools import lru_cache
from bela.evaluation.model_eval import ModelEval
from bela.transforms.spm_transform import SPMTransform
@lru_cache
def get_sp_transform():
return SPMTransform(max_seq_len=100000)
def get_windows(text, window_length=254, overlap=127):
sp_transform = get_sp_transform()
tokens = sp_transform([text])[0]
tokens = tokens[1:-1]
windows = []
for window_start in range(0, len(tokens), window_length - overlap):
start_pos = tokens[window_start][1]
if window_start + window_length >= len(tokens):
end_pos = tokens[-1][2]
else:
end_pos = tokens[window_start + window_length][2]
windows.append((start_pos, end_pos))
return windows
def convert_predictions_to_dict(example_predictions):
if len(example_predictions) > 0:
offsets, lengths, entities, md_scores, el_scores = zip(*example_predictions)
else:
offsets, lengths, entities, md_scores, el_scores = [], [], [], [], []
return {
"offsets": offsets,
"lengths": lengths,
"entities": entities,
"md_scores": md_scores,
"el_scores": el_scores,
}
def group_predictions_by_example(all_predictions, extended_examples):
grouped_predictions = defaultdict(list)
for prediction, extended_example in zip(all_predictions, extended_examples):
window_start = extended_example["window_start"]
prediction = dict(prediction)
prediction["offsets"] = [
offset + window_start for offset in prediction["offsets"]
]
grouped_predictions[extended_example["document_id"]].append((prediction))
predictions = {}
for document_id, example_prediction_list in grouped_predictions.items():
example_predictions = []
for prediction in example_prediction_list:
for offset, length, ent, md_score, el_score in zip(
prediction["offsets"],
prediction["lengths"],
prediction["entities"],
prediction["md_scores"],
prediction["el_scores"],
):
example_predictions.append((offset, length, ent, md_score, el_score))
example_predictions = sorted(example_predictions)
predictions[document_id] = example_predictions
return predictions
def merge_predictions(example_predictions):
filtered_example_predictions = []
current_end = None
current_offset = None
current_length = None
current_ent_id = None
current_md_score = None
current_el_score = None
for offset, length, ent_id, md_score, el_score in example_predictions:
if current_end is None:
current_end = offset + length
current_offset = offset
current_length = length
current_ent_id = ent_id
current_md_score = md_score
current_el_score = el_score
continue
if offset < current_end:
# intersection of two predictions
if md_score > current_md_score:
current_ent_id = ent_id
current_offset = offset
current_length = length
current_md_score = md_score
current_el_score = el_score
else:
filtered_example_predictions.append(
(
current_offset,
current_length,
current_ent_id,
current_md_score,
current_el_score,
)
)
current_ent_id = ent_id
current_offset = offset
current_length = length
current_md_score = md_score
current_el_score = el_score
current_end = offset + length
if current_offset is not None:
filtered_example_predictions.append(
(
current_offset,
current_length,
current_ent_id,
current_md_score,
current_el_score,
)
)
return filtered_example_predictions
def get_predictions_using_windows(model_eval: ModelEval, test_data, batch_size=1024, window_length=254, window_overlap=10, do_merge_predictions=True):
extended_examples = []
for example in test_data:
assert "document_id" in example or "data_example_id" in example
document_id = example.get("document_id") or example["data_example_id"]
text = example["original_text"]
windows = get_windows(text, window_length, window_overlap)
for idx, (start_pos, end_pos) in enumerate(windows):
new_text = text[start_pos:end_pos]
extended_examples.append(
{
"document_id": document_id,
"original_text": new_text,
"gt_entities": example["gt_entities"],
"window_idx": idx,
"window_start": start_pos,
"window_end": end_pos,
}
)
all_predictions = model_eval.get_predictions(
extended_examples, batch_size=batch_size
)
predictions_dict = group_predictions_by_example(all_predictions, extended_examples)
predictions = []
for example in test_data:
assert "document_id" in example or "data_example_id" in example
document_id = example.get("document_id") or example["data_example_id"]
text = example["original_text"]
example_predictions = predictions_dict[document_id]
if do_merge_predictions:
example_predictions = merge_predictions(example_predictions)
example_predictions = convert_predictions_to_dict(example_predictions)
predictions.append(example_predictions)
return predictions
| BELA-main | bela/utils/prediction_utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
class DummyPathManager:
def get_local_path(self, path, *args, **kwargs):
return path
def open(self, path, *args, **kwargs):
return open(path, *args, **kwargs)
PathManager = DummyPathManager()
| BELA-main | bela/utils/utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from typing import Any, Dict, Optional, List
@dataclass
class Entity:
entity_id: str # E.g. "Q3312129"
offset: int
length: int
text: str
entity_type: Optional[str] = None # E.g. wiki
md_score: Optional[float] = None
el_score: Optional[float] = None
@property
def mention(self):
return self.text[self.offset : self.offset + self.length]
@property
def extended_mention(self):
"""Mentin in surrounding context (10 chars), with the mention in brackets"""
left_context = self.text[max(0, self.offset - 10) : self.offset]
right_context = self.text[self.offset + self.length : self.offset + self.length + 10]
# Add ... if the context is truncated
if self.offset - 10 > 0:
left_context = "..." + left_context
if self.offset + self.length + 10 < len(self.text):
right_context = right_context + "..."
return f"{left_context}[{self.mention}]{right_context}"
def __repr__(self):
str_repr = f'Entity<mention="{self.extended_mention}", entity_id={self.entity_id}'
if self.md_score is not None and self.el_score is not None:
str_repr += f", md_score={self.md_score:.2f}, el_score={self.el_score:.2f}"
str_repr += ">"
return str_repr
def __eq__(self, other):
return self.offset == other.offset and self.length == other.length and self.entity_id == other.entity_id
class Sample:
text: str
sample_id: Optional[str] = None
ground_truth_entities: Optional[List[Entity]] = None
predicted_entities: Optional[List[Entity]] = None
def __init__(self, text, sample_id=None, ground_truth_entities=None, predicted_entities=None):
self.text = text
self.sample_id = sample_id
self.ground_truth_entities = ground_truth_entities
self.predicted_entities = predicted_entities
if self.ground_truth_entities is not None and self.predicted_entities is not None:
self.compute_scores()
def compute_scores(self):
self.true_positives = [
predicted_entity
for predicted_entity in self.predicted_entities
if predicted_entity in self.ground_truth_entities
]
self.false_positives = [
predicted_entity
for predicted_entity in self.predicted_entities
if predicted_entity not in self.ground_truth_entities
]
self.false_negatives = [
ground_truth_entity
for ground_truth_entity in self.ground_truth_entities
if ground_truth_entity not in self.predicted_entities
]
# Bag of entities
self.ground_truth_entity_ids = set(
[ground_truth_entity.entity_id for ground_truth_entity in self.ground_truth_entities]
)
self.predicted_entity_ids = set(
[predicted_entity.entity_id for predicted_entity in self.predicted_entities]
)
self.true_positives_boe = [
predicted_entity_id
for predicted_entity_id in self.predicted_entity_ids
if predicted_entity_id in self.ground_truth_entity_ids
]
self.false_positives_boe = [
predicted_entity_id
for predicted_entity_id in self.predicted_entity_ids
if predicted_entity_id not in self.ground_truth_entity_ids
]
self.false_negatives_boe = [
ground_truth_entity_id
for ground_truth_entity_id in self.ground_truth_entity_ids
if ground_truth_entity_id not in self.predicted_entity_ids
]
def __repr__(self):
repr_str = f'Sample(text="{self.text[:100]}..."'
if self.ground_truth_entities is not None:
repr_str += f", ground_truth_entities={self.ground_truth_entities[:3]}..."
if self.predicted_entities is not None:
repr_str += f", predicted_entities={self.predicted_entities[:3]}..."
repr_str += ")"
return repr_str
def print(self, max_display_length=1000):
print(f"{self.text[:max_display_length]=}")
if self.ground_truth_entities is not None:
print("***************** Ground truth entities *****************")
print(f"{len(self.ground_truth_entities)=}")
for ground_truth_entity in self.ground_truth_entities:
if ground_truth_entity.offset + ground_truth_entity.length > max_display_length:
continue
print(ground_truth_entity)
if self.predicted_entities is not None:
print("***************** Predicted entities *****************")
print(f"{len(self.predicted_entities)=}")
for predicted_entity in self.predicted_entities:
if predicted_entity.offset + predicted_entity.length > max_display_length:
continue
print(predicted_entity)
def convert_jsonl_data_to_samples(jsonl_data: List[Dict[str, Any]]) -> List[Sample]:
"""Converts the jsonl data to a list of samples."""
samples = []
for example in jsonl_data:
ground_truth_entities = [
Entity(entity_id=entity_id, offset=offset, length=length, text=example["original_text"])
for _, _, entity_id, _, offset, length in example["gt_entities"]
]
sample = Sample(text=example["original_text"], ground_truth_entities=ground_truth_entities)
samples.append(sample)
return samples
def convert_predictions_to_entities(example_predictions: Dict[str, List], text) -> List[Entity]:
"""Converts the predictions of a single example to a list of entities."""
predicted_entities = [
Entity(entity_id=entity_id, offset=offset, length=length, md_score=md_score, el_score=el_score, text=text)
for offset, length, entity_id, md_score, el_score in zip(
example_predictions["offsets"],
example_predictions["lengths"],
example_predictions["entities"],
example_predictions["md_scores"],
example_predictions["el_scores"],
)
]
return predicted_entities
def convert_jsonl_data_and_predictions_to_samples(
jsonl_data: List[Dict[str, Any]], predictions: Dict[str, List], md_threshold, el_threshold
) -> List[Sample]:
samples = convert_jsonl_data_to_samples(jsonl_data)
for sample, example_predictions in zip(samples, predictions):
predicted_entities = convert_predictions_to_entities(example_predictions, sample.text)
predicted_entities = [
entity for entity in predicted_entities if entity.el_score > el_threshold and entity.md_score > md_threshold
]
sample.predicted_entities = predicted_entities
sample.compute_scores()
samples.append(sample)
return samples
| BELA-main | bela/utils/analysis_utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import torch.nn as nn
from transformers import AutoModel, AutoConfig
class HFEncoder(nn.Module):
def __init__(
self,
model_path: str = "xlm-roberta-base",
projection_dim: Optional[int] = None,
):
super().__init__()
self.transformer = AutoModel.from_pretrained(model_path)
self.embedding_dim = self.transformer.encoder.config.hidden_size
def forward(self, input_ids, attention_mask=None):
output = self.transformer(input_ids=input_ids, attention_mask=attention_mask)
last_layer = output["last_hidden_state"]
sentence_rep = last_layer[:, 0, :]
return sentence_rep, last_layer
| BELA-main | bela/models/hf_encoder.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
from transformers import AutoTokenizer
class HFTransform(nn.Module):
def __init__(
self,
model_path: str = "xlm-roberta-base",
max_seq_len: int = 256,
add_special_tokens: bool = True,
):
super().__init__()
self.tokenizer = AutoTokenizer.from_pretrained(model_path)
self.sep_token = self.tokenizer.sep_token
self.max_seq_len = max_seq_len
self.add_special_tokens = add_special_tokens
def forward(self, texts):
return self.tokenizer(
texts,
return_tensors=None,
padding=False,
truncation=True,
max_length=self.max_seq_len,
add_special_tokens=self.add_special_tokens,
)["input_ids"]
| BELA-main | bela/transforms/hf_transform.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: sentencepiece.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='sentencepiece.proto',
package='sentencepiece',
syntax='proto2',
serialized_options=b'H\003',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x13sentencepiece.proto\x12\rsentencepiece\"\xdf\x01\n\x11SentencePieceText\x12\x0c\n\x04text\x18\x01 \x01(\t\x12>\n\x06pieces\x18\x02 \x03(\x0b\x32..sentencepiece.SentencePieceText.SentencePiece\x12\r\n\x05score\x18\x03 \x01(\x02\x1a\x62\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\r\x12\x0f\n\x07surface\x18\x03 \x01(\t\x12\r\n\x05\x62\x65gin\x18\x04 \x01(\r\x12\x0b\n\x03\x65nd\x18\x05 \x01(\r*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"J\n\x16NBestSentencePieceText\x12\x30\n\x06nbests\x18\x01 \x03(\x0b\x32 .sentencepiece.SentencePieceTextB\x02H\x03'
)
_SENTENCEPIECETEXT_SENTENCEPIECE = _descriptor.Descriptor(
name='SentencePiece',
full_name='sentencepiece.SentencePieceText.SentencePiece',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='piece', full_name='sentencepiece.SentencePieceText.SentencePiece.piece', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='id', full_name='sentencepiece.SentencePieceText.SentencePiece.id', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='surface', full_name='sentencepiece.SentencePieceText.SentencePiece.surface', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='begin', full_name='sentencepiece.SentencePieceText.SentencePiece.begin', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='end', full_name='sentencepiece.SentencePieceText.SentencePiece.end', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(200, 536870912), ],
oneofs=[
],
serialized_start=153,
serialized_end=251,
)
_SENTENCEPIECETEXT = _descriptor.Descriptor(
name='SentencePieceText',
full_name='sentencepiece.SentencePieceText',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='text', full_name='sentencepiece.SentencePieceText.text', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='pieces', full_name='sentencepiece.SentencePieceText.pieces', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='score', full_name='sentencepiece.SentencePieceText.score', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_SENTENCEPIECETEXT_SENTENCEPIECE, ],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(200, 536870912), ],
oneofs=[
],
serialized_start=39,
serialized_end=262,
)
_NBESTSENTENCEPIECETEXT = _descriptor.Descriptor(
name='NBestSentencePieceText',
full_name='sentencepiece.NBestSentencePieceText',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='nbests', full_name='sentencepiece.NBestSentencePieceText.nbests', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=264,
serialized_end=338,
)
_SENTENCEPIECETEXT_SENTENCEPIECE.containing_type = _SENTENCEPIECETEXT
_SENTENCEPIECETEXT.fields_by_name['pieces'].message_type = _SENTENCEPIECETEXT_SENTENCEPIECE
_NBESTSENTENCEPIECETEXT.fields_by_name['nbests'].message_type = _SENTENCEPIECETEXT
DESCRIPTOR.message_types_by_name['SentencePieceText'] = _SENTENCEPIECETEXT
DESCRIPTOR.message_types_by_name['NBestSentencePieceText'] = _NBESTSENTENCEPIECETEXT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SentencePieceText = _reflection.GeneratedProtocolMessageType('SentencePieceText', (_message.Message,), {
'SentencePiece' : _reflection.GeneratedProtocolMessageType('SentencePiece', (_message.Message,), {
'DESCRIPTOR' : _SENTENCEPIECETEXT_SENTENCEPIECE,
'__module__' : 'sentencepiece_pb2'
# @@protoc_insertion_point(class_scope:sentencepiece.SentencePieceText.SentencePiece)
})
,
'DESCRIPTOR' : _SENTENCEPIECETEXT,
'__module__' : 'sentencepiece_pb2'
# @@protoc_insertion_point(class_scope:sentencepiece.SentencePieceText)
})
_sym_db.RegisterMessage(SentencePieceText)
_sym_db.RegisterMessage(SentencePieceText.SentencePiece)
NBestSentencePieceText = _reflection.GeneratedProtocolMessageType('NBestSentencePieceText', (_message.Message,), {
'DESCRIPTOR' : _NBESTSENTENCEPIECETEXT,
'__module__' : 'sentencepiece_pb2'
# @@protoc_insertion_point(class_scope:sentencepiece.NBestSentencePieceText)
})
_sym_db.RegisterMessage(NBestSentencePieceText)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| BELA-main | bela/transforms/sentencepiece_pb2.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import os
import torch.nn as nn
import sentencepiece as spm
from .sentencepiece_pb2 import SentencePieceText
class SPMTransform(nn.Module):
def __init__(
self,
sp_model_path: Optional[str] = None,
max_seq_len: int = 256,
add_special_tokens: bool = True,
):
super().__init__()
sp_model_path = sp_model_path or os.path.join(os.path.dirname(__file__), "../data/sp_model")
self.processor = spm.SentencePieceProcessor(sp_model_path)
self.sep_token = '</s>'
self.unk_token_id = 3
self.max_seq_len = max_seq_len
self.add_special_tokens = add_special_tokens
def forward(self, texts):
output = []
for text in texts:
spt = SentencePieceText()
spt.ParseFromString(self.processor.encode_as_serialized_proto(text))
current_offset = 0
leading_whitespaces_count = 0
for char in text:
if char.isspace():
leading_whitespaces_count += 1
else:
break
token_ids_with_offsets = []
if self.add_special_tokens:
token_ids_with_offsets.append((0,0,0))
for idx, piece in enumerate(spt.pieces):
if piece.id != 0:
token_id = piece.id + 1
else:
token_id = self.unk_token_id
if idx == 0:
# if we process first token, append leading whitespacess count to the sp token length
token_ids_with_offsets.append((token_id, current_offset, current_offset + len(piece.surface) + leading_whitespaces_count))
current_offset += len(piece.surface) + leading_whitespaces_count
else:
token_ids_with_offsets.append((token_id, current_offset, current_offset + len(piece.surface)))
current_offset += len(piece.surface)
# take into account special tokens
if idx == self.max_seq_len - 3:
break
if self.add_special_tokens:
token_ids_with_offsets.append((2,current_offset,0))
output.append(token_ids_with_offsets)
return output
| BELA-main | bela/transforms/spm_transform.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pad_sequence
from bela.transforms.hf_transform import HFTransform
from bela.transforms.spm_transform import SPMTransform
class ReadState(Enum):
ReadAlphaNum = 1
ReadSpace = 2
ReadOther = 3
def insert_spaces(text: str) -> Tuple[str, List[int]]:
"""
The raw string inputs are sometimes miss spaces between
text pieces, like smiles could joint text:
[smile]Some text.[smile] another text.
This function modify text string to separate alphanumeric tokens
from any other tokens to make models live easier. The above example
will become:
[smile] Some text . [smile] another text .
"""
out_str: str = ""
insertions: List[int] = []
# In the beginning of the string we assume we just read some space
state: ReadState = ReadState.ReadSpace
for idx, char in enumerate(utf8_chars(text)):
if state == ReadState.ReadSpace:
if unicode_isspace(char):
pass
elif unicode_isalnum(char):
state = ReadState.ReadAlphaNum
else:
state = ReadState.ReadOther
elif state == ReadState.ReadAlphaNum:
if unicode_isspace(char):
state = ReadState.ReadSpace
elif unicode_isalnum(char):
pass
else:
out_str += " "
insertions.append(idx)
state = ReadState.ReadOther
elif state == ReadState.ReadOther:
if unicode_isspace(char):
state = ReadState.ReadSpace
elif unicode_isalnum(char):
out_str += " "
insertions.append(idx)
state = ReadState.ReadAlphaNum
else:
pass
out_str += char
return out_str, insertions
def lower_bound(a: List[int], x: int) -> int:
lo: int = 0
hi: int = len(a)
while lo < hi:
mid = (lo + hi) // 2
if a[mid] < x:
lo = mid + 1
else:
hi = mid
return lo
def align_start(start: int, starts: List[int]) -> int:
new_start: int = start
if start not in starts:
if len(starts) > 0:
lb = lower_bound(starts, start)
if lb == len(starts) or starts[lb] != start:
new_start = starts[max(0, lb - 1)]
return new_start
def align_end(end: int, ends: List[int]) -> int:
new_end: int = end
if end not in ends:
if len(ends) > 0:
lb = lower_bound(ends, end)
if lb < len(ends):
new_end = ends[lb]
return new_end
def pieces_to_texts(
texts_pieces_token_ids: List[List[int]],
texts: List[List[str]],
texts_mention_offsets: List[List[int]],
texts_mention_lengths: List[List[int]],
bos_idx: int,
eos_idx: int,
max_seq_len: int = 256,
):
"""
Function takes an array with SP tokenized word tokens and original texts
and convert youda tokenized batch to SP tokenized batch. Mention offsets
and lengths are also converted with respect to SP tokens.
Inputs:
1) texts_pieces_token_ids: List with sp tokens per text token
2) texts: original yoda tokenized texts
3) texts_mention_offsets: mention offsets in original texts
4) texts_mention_lengths: mention lengths in original texts
5) bos_idx: tokenizer bos index
6) eos_idx: tokenizer eos index
7) max_seq_len: tokenizer max sequence length
Outputs:
new_texts_token_ids: List[List[int]] - text batch with sp tokens
new_seq_lengths: List[int] - sp tokenized texts lengths
new_mention_offsets: List[List[int]] - converted mention offsets
new_mention_lengths: List[List[int]] - converted mention lengths
"""
new_texts_token_ids: List[List[int]] = []
new_seq_lengths: List[int] = []
new_mention_offsets: List[List[int]] = []
new_mention_lengths: List[List[int]] = []
tokens_mapping: List[List[List[int]]] = [] # bs x idx x 2
pieces_offset = 0
for text, old_mention_offsets, old_mention_lengths in zip(
texts,
texts_mention_offsets,
texts_mention_lengths,
):
mapping: List[Tuple[int, int]] = []
text_token_ids: List[int] = [bos_idx]
mention_offsets: List[int] = []
mention_lengths: List[int] = []
for token_ids in texts_pieces_token_ids[
pieces_offset : pieces_offset + len(text)
]:
token_ids = token_ids[1:-1]
current_pos = len(text_token_ids)
mapping.append((current_pos, current_pos + len(token_ids)))
text_token_ids.extend(token_ids)
text_token_ids = text_token_ids[: max_seq_len - 1]
text_token_ids.append(eos_idx)
for old_offset, old_length in zip(old_mention_offsets, old_mention_lengths):
new_offset = mapping[old_offset][0]
new_end = mapping[old_offset + old_length - 1][1]
new_length = new_end - new_offset
if new_end > max_seq_len - 1:
break
mention_offsets.append(new_offset)
mention_lengths.append(new_length)
new_texts_token_ids.append(text_token_ids)
new_seq_lengths.append(len(text_token_ids))
new_mention_offsets.append(mention_offsets)
new_mention_lengths.append(mention_lengths)
mapping = [[start, end] for start, end in mapping if end < max_seq_len]
tokens_mapping.append(mapping)
pieces_offset += len(text)
return (
new_texts_token_ids,
new_seq_lengths,
new_mention_offsets,
new_mention_lengths,
tokens_mapping,
)
@torch.jit.script
def pad_tokens_mapping(tokens_mapping: List[List[List[int]]]) -> List[List[List[int]]]:
seq_lens: List[int] = []
for seq in tokens_mapping:
seq_lens.append(len(seq))
pad_to_length = max(seq_lens)
for mapping in tokens_mapping:
padding = pad_to_length - len(mapping)
if padding >= 0:
for _ in range(padding):
mapping.append([0, 1])
else:
for _ in range(-padding):
mapping.pop()
return tokens_mapping
@torch.jit.script
def pad_tokens_mapping(
tokens_mapping: List[List[Tuple[int, int]]]
) -> List[List[Tuple[int, int]]]:
seq_lens: List[int] = []
for seq in tokens_mapping:
seq_lens.append(len(seq))
pad_to_length = max(seq_lens)
for mapping in tokens_mapping:
padding = pad_to_length - len(mapping)
if padding >= 0:
for _ in range(padding):
mapping.append((0, 1))
else:
for _ in range(-padding):
mapping.pop()
return tokens_mapping
@torch.jit.script
def pad_2d(
batch: List[List[int]], seq_lens: List[int], pad_idx: int, max_len: int = -1
) -> List[List[int]]:
pad_to_length = max(seq_lens)
if max_len > 0:
pad_to_length = min(pad_to_length, max_len)
for sentence in batch:
padding = pad_to_length - len(sentence)
if padding >= 0:
for _ in range(padding):
sentence.append(pad_idx)
else:
for _ in range(-padding):
sentence.pop()
return batch
class JointELCollate(torch.nn.Module):
def __init__(
self,
pad_idx: int = 1,
token_ids_column: str = "input_ids",
seq_lens_column: str = "seq_lens",
pad_mask_column: str = "attention_mask",
mention_pad_idx: int = 0,
mention_offsets_column: str = "mention_offsets",
mention_lengths_column: str = "mention_lengths",
mentions_seq_lengths_column: str = "mentions_seq_lengths",
entities_column: str = "entities",
tokens_mapping_column: str = "tokens_mapping",
sp_tokens_boundaries_column: str = "sp_tokens_boundaries",
insertions_column: str = "insertions",
):
super().__init__()
self._pad_idx = pad_idx
self.token_ids_column = token_ids_column
self.seq_lens_column = seq_lens_column
self.pad_mask_column = pad_mask_column
self._mention_pad_idx = mention_pad_idx
self.mention_offsets_column = mention_offsets_column
self.mention_lengths_column = mention_lengths_column
self.entities_column = entities_column
self.mentions_seq_lengths_column = mentions_seq_lengths_column
self.tokens_mapping_column = tokens_mapping_column
self.sp_tokens_boundaries_column = sp_tokens_boundaries_column
self.insertions_column = insertions_column
def forward(self, batch: Dict[str, Any]) -> Dict[str, torch.Tensor]:
token_ids = batch[self.token_ids_column]
assert torch.jit.isinstance(token_ids, List[List[int]])
seq_lens = batch[self.seq_lens_column]
assert torch.jit.isinstance(seq_lens, List[int])
tokens_mapping = batch[self.tokens_mapping_column]
assert torch.jit.isinstance(tokens_mapping, List[List[List[int]]])
pad_token_ids = pad_sequence(
[torch.tensor(ids, dtype=torch.long) for ids in token_ids],
batch_first=True,
padding_value=float(self._pad_idx),
)
pad_mask = torch.ne(pad_token_ids, self._pad_idx).to(dtype=torch.long)
model_inputs: Dict[str, torch.Tensor] = {
self.token_ids_column: pad_token_ids,
self.pad_mask_column: pad_mask,
}
model_inputs[self.tokens_mapping_column] = torch.tensor(
pad_tokens_mapping(tokens_mapping),
dtype=torch.long,
)
if self.mention_offsets_column in batch:
mention_offsets = batch[self.mention_offsets_column]
assert torch.jit.isinstance(mention_offsets, List[List[int]])
mention_lengths = batch[self.mention_lengths_column]
assert torch.jit.isinstance(mention_lengths, List[List[int]])
mentions_seq_lengths = batch[self.mentions_seq_lengths_column]
assert torch.jit.isinstance(mentions_seq_lengths, List[int])
entities = batch[self.entities_column]
assert torch.jit.isinstance(entities, List[List[int]])
model_inputs[self.mention_offsets_column] = torch.tensor(
pad_2d(
mention_offsets,
seq_lens=mentions_seq_lengths,
pad_idx=self._mention_pad_idx,
),
dtype=torch.long,
)
model_inputs[self.mention_lengths_column] = torch.tensor(
pad_2d(
mention_lengths,
seq_lens=mentions_seq_lengths,
pad_idx=self._mention_pad_idx,
),
dtype=torch.long,
)
model_inputs[self.entities_column] = torch.tensor(
pad_2d(
entities,
seq_lens=mentions_seq_lengths,
pad_idx=self._mention_pad_idx,
),
dtype=torch.long,
)
if self.sp_tokens_boundaries_column in batch:
sp_tokens_boundaries = batch[self.sp_tokens_boundaries_column]
assert torch.jit.isinstance(sp_tokens_boundaries, List[List[List[int]]])
model_inputs[self.sp_tokens_boundaries_column] = torch.tensor(
pad_tokens_mapping(sp_tokens_boundaries),
dtype=torch.long,
)
if self.insertions_column in batch:
insertions = batch[self.insertions_column]
assert torch.jit.isinstance(insertions, List[List[int]])
insertions_seq_lens: List[int] = []
for seq in insertions:
insertions_seq_lens.append(len(seq))
model_inputs[self.insertions_column] = torch.tensor(
pad_2d(
insertions,
seq_lens=insertions_seq_lens,
pad_idx=-1,
),
dtype=torch.long,
)
return model_inputs
class JointELTransform(HFTransform):
def __init__(
self,
model_path: str = "xlm-roberta-base",
max_seq_len: int = 256,
texts_column: str = "texts",
mention_offsets_column: str = "mention_offsets",
mention_lengths_column: str = "mention_lengths",
mentions_seq_lengths_column: str = "mentions_seq_lengths",
entities_column: str = "entities",
token_ids_column: str = "input_ids",
seq_lens_column: str = "seq_lens",
pad_mask_column: str = "attention_mask",
tokens_mapping_column: str = "tokens_mapping",
):
super().__init__(model_path=model_path)
if 'xlm' in model_path:
self.bos_idx = self.tokenizer.bos_token_id
self.eos_idx = self.tokenizer.eos_token_id
elif 'bert' in model_path:
self.bos_idx = self.tokenizer.cls_token_id
self.eos_idx = self.tokenizer.sep_token_id
self.max_seq_len = max_seq_len
self.texts_column = texts_column
self.token_ids_column = token_ids_column
self.seq_lens_column = seq_lens_column
self.pad_mask_column = pad_mask_column
self.mention_offsets_column = mention_offsets_column
self.mention_lengths_column = mention_lengths_column
self.mentions_seq_lengths_column = mentions_seq_lengths_column
self.entities_column = entities_column
self.tokens_mapping_column = tokens_mapping_column
self._collate = JointELCollate(
pad_idx=self.tokenizer.pad_token_id,
token_ids_column=token_ids_column,
seq_lens_column=seq_lens_column,
pad_mask_column=pad_mask_column,
mention_offsets_column=mention_offsets_column,
mention_lengths_column=mention_lengths_column,
mentions_seq_lengths_column=mentions_seq_lengths_column,
entities_column=entities_column,
tokens_mapping_column=tokens_mapping_column,
)
def transform(self, batch: Dict[str, Any]) -> Dict[str, Any]:
texts = batch[self.texts_column]
torch.jit.isinstance(texts, List[List[str]])
mention_offsets = batch[self.mention_offsets_column]
torch.jit.isinstance(mention_offsets, List[List[int]])
mention_lengths = batch[self.mention_lengths_column]
torch.jit.isinstance(mention_lengths, List[List[int]])
entities = batch[self.entities_column]
torch.jit.isinstance(entities, List[List[int]])
texts_pieces = [token for tokens in texts for token in tokens]
texts_pieces_token_ids: List[List[int]] = super().forward(
texts_pieces
)
(
token_ids,
seq_lens,
mention_offsets,
mention_lengths,
tokens_mapping,
) = pieces_to_texts(
texts_pieces_token_ids,
texts,
mention_offsets,
mention_lengths,
bos_idx=self.bos_idx,
eos_idx=self.eos_idx,
max_seq_len=self.max_seq_len,
)
entities = [
text_entities[: len(text_mention_offsets)]
for text_entities, text_mention_offsets in zip(entities, mention_offsets)
]
mentions_seq_lens: List[int] = [
len(text_mention_offsets) for text_mention_offsets in mention_offsets
]
return {
self.token_ids_column: token_ids,
self.seq_lens_column: seq_lens,
self.mention_offsets_column: mention_offsets,
self.mention_lengths_column: mention_lengths,
self.mentions_seq_lengths_column: mentions_seq_lens,
self.entities_column: entities,
self.tokens_mapping_column: tokens_mapping,
}
def forward(self, batch: Dict[str, Any]) -> Dict[str, torch.Tensor]:
return self._collate(self.transform(batch))
class JointELXlmrRawTextTransform(SPMTransform):
def __init__(
self,
sp_model_path: Optional[str] = None,
vocab_path: Optional[str] = None,
max_seq_len: int = 256,
insert_spaces: bool = False,
mention_boundaries_on_word_boundaries: bool = False,
align_mention_offsets_to_word_boundaries: bool = False,
texts_column: str = "texts",
mention_offsets_column: str = "mention_offsets",
mention_lengths_column: str = "mention_lengths",
mentions_seq_lengths_column: str = "mentions_seq_lengths",
entities_column: str = "entities",
token_ids_column: str = "input_ids",
seq_lens_column: str = "seq_lens",
pad_mask_column: str = "attention_mask",
tokens_mapping_column: str = "tokens_mapping",
sp_tokens_boundaries_column: str = "sp_tokens_boundaries",
insertions_column: str = "insertions",
):
super().__init__(
sp_model_path=sp_model_path,
max_seq_len=max_seq_len,
add_special_tokens=False,
)
self.bos_idx = 0
self.eos_idx = 2
self.pad_idx = 1
self.max_seq_len = max_seq_len
self.insert_spaces = insert_spaces
self.mention_boundaries_on_word_boundaries = (
mention_boundaries_on_word_boundaries
)
self.align_mention_offsets_to_word_boundaries = (
align_mention_offsets_to_word_boundaries
)
self.texts_column = texts_column
self.mention_offsets_column = mention_offsets_column
self.mention_lengths_column = mention_lengths_column
self.mentions_seq_lengths_column = mentions_seq_lengths_column
self.entities_column = entities_column
self.token_ids_column = token_ids_column
self.seq_lens_column = seq_lens_column
self.tokens_mapping_column = tokens_mapping_column
self.sp_tokens_boundaries_column = sp_tokens_boundaries_column
self.insertions_column = insertions_column
self._collate = JointELCollate(
pad_idx=self.pad_idx,
token_ids_column=token_ids_column,
seq_lens_column=seq_lens_column,
pad_mask_column=pad_mask_column,
mention_offsets_column=mention_offsets_column,
mention_lengths_column=mention_lengths_column,
mentions_seq_lengths_column=mentions_seq_lengths_column,
entities_column=entities_column,
tokens_mapping_column=tokens_mapping_column,
sp_tokens_boundaries_column=sp_tokens_boundaries_column,
)
def _calculate_alpha_num_boundaries(self, texts: List[str]) -> List[List[List[int]]]:
"""Returns for each text, a list of lists of start and end indices of alpha-numeric substrings (~=words)."""
alpha_num_boundaries: List[List[List[int]]] = []
for text in texts:
example_alpha_num_boundaries: List[List[int]] = []
cur_alpha_num_start: int = -1
for idx, char in enumerate(text):
if char.isalnum():
if cur_alpha_num_start == -1:
cur_alpha_num_start = idx
else:
if cur_alpha_num_start != -1:
example_alpha_num_boundaries.append([cur_alpha_num_start, idx])
cur_alpha_num_start = -1
if cur_alpha_num_start != -1:
example_alpha_num_boundaries.append([cur_alpha_num_start, len(text)])
alpha_num_boundaries.append(example_alpha_num_boundaries)
return alpha_num_boundaries
def _calculate_token_mapping(
self,
sp_token_ids: List[List[int]],
sp_token_boundaries: List[List[List[int]]],
word_boundaries: List[List[List[int]]],
) -> List[List[List[int]]]:
# Prepare list of possible mention start, ends pairs in terms of SP tokens.
if self.mention_boundaries_on_word_boundaries:
token_mapping: List[List[List[int]]] = []
for ex_word_boundaries, ex_sp_token_boundaries in zip(
word_boundaries, sp_token_boundaries
):
ex_token_mapping: List[List[int]] = []
sp_idx = 0
for start, end in ex_word_boundaries:
while (
sp_idx < len(ex_sp_token_boundaries)
and start >= ex_sp_token_boundaries[sp_idx][1]
):
sp_idx += 1
word_sp_start = sp_idx
word_sp_end = sp_idx
while (
word_sp_end < len(ex_sp_token_boundaries)
and end >= ex_sp_token_boundaries[word_sp_end][1]
):
word_sp_end += 1
# check if end token <= max_seq_len - 2 (take into account EOS and BOS tokens)
if word_sp_end <= self.max_seq_len - 2:
# shift word_sp_start and word_sp_end by 1 taking into account EOS
ex_token_mapping.append([word_sp_start + 1, word_sp_end + 1])
else:
break
token_mapping.append(ex_token_mapping)
return token_mapping
else:
# Consider any SP token could be a start or end of the mention.
return [
[
[start, start + 1]
for start in range( # start in range from 1 to maximum 255
1, min(len(example_sp_token_ids) - 2, self.max_seq_len - 2) + 1
)
]
for example_sp_token_ids in sp_token_ids
]
def _convert_mention_offsets(
self,
sp_token_boundaries: List[List[List[int]]],
char_offsets: List[List[int]],
char_lengths: List[List[int]],
) -> Tuple[List[List[int]], List[List[int]]]:
# TODO: Doesn't this do something similar to _calculate_token_mapping?
sp_offsets: List[List[int]] = []
sp_lengths: List[List[int]] = []
for example_char_offsets, example_char_lengths, example_token_boundaries in zip(
char_offsets, char_lengths, sp_token_boundaries
):
example_sp_offsets: List[int] = []
example_sp_lengths: List[int] = []
for offset, length in zip(example_char_offsets, example_char_lengths):
# TODO: There might be a bug here, need to write a test and with edge cases
token_idx = 0
while ( # First we find the first token that starts at or after the offset
token_idx < len(example_token_boundaries)
and example_token_boundaries[token_idx][0] <= offset
):
token_idx += 1
if ( # Then if we overshoot, we decrease by one
token_idx == len(example_token_boundaries)
or example_token_boundaries[token_idx][0] != offset
):
token_idx -= 1
example_sp_offsets.append(token_idx)
token_start_idx = token_idx
while ( # Same method for the end token: find the first token that ends before the end of the mention
token_idx < len(example_token_boundaries)
and example_token_boundaries[token_idx][1] < offset + length
):
token_idx += 1
example_sp_lengths.append(token_idx - token_start_idx + 1)
# take into account BOS token and shift offsets by 1
# also remove all pairs that go beyond max_seq_length - 1
shifted_example_sp_offsets: List[int] = []
for offset, length in zip(example_sp_offsets, example_sp_lengths):
if 1 + offset + length <= self.max_seq_len - 1:
shifted_example_sp_offsets.append(offset + 1)
else:
# last mention went off the limit, skip all next mention
break
example_sp_offsets = shifted_example_sp_offsets
example_sp_lengths = example_sp_lengths[: len(example_sp_offsets)]
sp_offsets.append(example_sp_offsets)
sp_lengths.append(example_sp_lengths)
return sp_offsets, sp_lengths
def _adjust_mention_offsets_and_lengths(
self,
offsets: List[List[int]],
lengths: List[List[int]],
insertions: List[List[int]],
) -> Tuple[List[List[int]], List[List[int]]]:
new_offsets: List[List[int]] = []
new_lengths: List[List[int]] = []
for example_offsets, example_lengths, example_insertions in zip(
offsets, lengths, insertions
):
new_example_offsets: List[int] = []
new_example_lengths: List[int] = []
# assume that offsets, lengths sorted by offsets/lengths
insertion_idx = 0
current_shift = 0
for offset, length in zip(example_offsets, example_lengths):
while (
insertion_idx < len(example_insertions)
and example_insertions[insertion_idx] <= offset
):
current_shift += 1
insertion_idx += 1
new_offset = offset + current_shift
new_length = length
length_insertion_idx = insertion_idx
while (
length_insertion_idx < len(example_insertions)
and example_insertions[length_insertion_idx] < offset + length
):
new_length += 1
length_insertion_idx += 1
new_example_offsets.append(new_offset)
new_example_lengths.append(new_length)
new_offsets.append(new_example_offsets)
new_lengths.append(new_example_lengths)
return new_offsets, new_lengths
def _insert_spaces_to_texts(
self, texts: List[str]
) -> Tuple[List[str], List[List[int]]]:
all_texts: List[str] = []
all_insertions: List[List[int]] = []
for text in texts:
out_text, insertions = insert_spaces(text)
all_texts.append(out_text)
all_insertions.append(insertions)
return all_texts, all_insertions
def _align_mention_offsets_to_word_boundaries(
self,
mention_offsets: List[List[int]],
mention_lengths: List[List[int]],
word_boundaries: List[List[List[int]]],
) -> Tuple[List[List[int]], List[List[int]]]:
"""
In some training examples we can face situations where ground
truth offsets point to the middle of the word, ex:
```
Playlist in "#NuevaPlaylist ➡ Desempo"
mente in "simplemente retirarte"
```
we can align the offsets to the word boundaries, so in the examples
above we will mark `NuevaPlaylist` and `simplemente` as mentions.
"""
new_mention_offsets: List[List[int]] = []
new_mention_lengths: List[List[int]] = []
for ex_mention_offsets, ex_mention_length, ex_word_boundaries in zip(
mention_offsets,
mention_lengths,
word_boundaries,
):
starts: List[int] = []
ends: List[int] = []
for wb in ex_word_boundaries:
starts.append(wb[0])
ends.append(wb[1])
ex_new_mention_offsets: List[int] = []
ex_new_mention_lengths: List[int] = []
for offset, length in zip(ex_mention_offsets, ex_mention_length):
start = align_start(offset, starts)
end = align_end(offset + length, ends)
ex_new_mention_offsets.append(start)
ex_new_mention_lengths.append(end - start)
new_mention_offsets.append(ex_new_mention_offsets)
new_mention_lengths.append(ex_new_mention_lengths)
return new_mention_offsets, new_mention_lengths
def transform(self, batch: Dict[str, Any]) -> Dict[str, Any]:
texts = batch[self.texts_column]
assert torch.jit.isinstance(texts, List[str])
insertions: List[List[int]] = []
if self.insert_spaces:
texts, insertions = self._insert_spaces_to_texts(texts)
word_boundaries = self._calculate_alpha_num_boundaries(texts)
sp_tokens_with_indices: List[List[Tuple[int, int, int]]] = super().forward(texts)
sp_token_ids: List[List[int]] = [
[sp_token for sp_token, _, _ in tokens] for tokens in sp_tokens_with_indices
]
# append bos and eos tokens
sp_token_ids = [[self.bos_idx] + tokens + [self.eos_idx] for tokens in sp_token_ids]
sp_token_boundaries: List[List[List[int]]] = [
[[start, end] for _, start, end in tokens]
for tokens in sp_tokens_with_indices
]
seq_lens: List[int] = [
len(example_token_ids) for example_token_ids in sp_token_ids
]
tokens_mapping: List[List[List[int]]] = self._calculate_token_mapping(
sp_token_ids,
sp_token_boundaries,
word_boundaries,
)
output: Dict[str, Any] = {
self.token_ids_column: sp_token_ids,
self.seq_lens_column: seq_lens,
self.tokens_mapping_column: tokens_mapping,
self.sp_tokens_boundaries_column: sp_token_boundaries,
}
if self.insert_spaces:
output[self.insertions_column] = insertions
if self.mention_offsets_column in batch:
mention_offsets = batch[self.mention_offsets_column]
assert torch.jit.isinstance(mention_offsets, List[List[int]])
mention_lengths = batch[self.mention_lengths_column]
assert torch.jit.isinstance(mention_lengths, List[List[int]])
entities = batch[self.entities_column]
assert torch.jit.isinstance(entities, List[List[int]])
if self.insert_spaces:
(
mention_offsets,
mention_lengths,
) = self._adjust_mention_offsets_and_lengths(
mention_offsets, mention_lengths, insertions
)
if self.align_mention_offsets_to_word_boundaries:
(
mention_offsets,
mention_lengths,
) = self._align_mention_offsets_to_word_boundaries(
mention_offsets,
mention_lengths,
word_boundaries,
)
sp_offsets, sp_lengths = self._convert_mention_offsets(
sp_token_boundaries,
mention_offsets,
mention_lengths,
)
entities: List[List[int]] = [
example_entities[: len(example_mention_offsets)]
for example_entities, example_mention_offsets in zip(
entities, sp_offsets
)
]
mentions_seq_lens: List[int] = [
len(example_mention_offsets) for example_mention_offsets in sp_offsets
]
output[self.mention_offsets_column] = sp_offsets
output[self.mention_lengths_column] = sp_lengths
output[self.mentions_seq_lengths_column] = mentions_seq_lens
output[self.entities_column] = entities
return output
def forward(self, batch: Dict[str, Any]) -> Dict[str, torch.Tensor]:
return self._collate(self.transform(batch)) | BELA-main | bela/transforms/joint_el_transform.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from collections import OrderedDict
from typing import Any, Dict, NamedTuple, Optional, Tuple, Union
import faiss
import faiss.contrib.torch_utils
import hydra
import torch
import torch.nn as nn
from pytorch_lightning import LightningModule
from bela.conf import (
DataModuleConf,
ModelConf,
OptimConf,
TransformConf,
)
import datetime
import os
import numpy as np
logger = logging.getLogger(__name__)
class ClassificationMetrics(NamedTuple):
f1: float
precision: float
recall: float
support: int
tp: int
fp: int
fn: int
# Bag-Of-Entities metrics: we consider targets and predictions as set
# of entities instead of strong matching positions and entities.
boe_f1: float
boe_precision: float
boe_recall: float
boe_support: int
boe_tp: int
boe_fp: int
boe_fn: int
class ClassificationHead(nn.Module):
def __init__(
self,
ctxt_output_dim=768,
):
super(ClassificationHead, self).__init__()
self.mlp = nn.Sequential(
# [mention, candidate, mention - candidate, mention * candidate, md_score, dis_score]
nn.Linear(4 * ctxt_output_dim + 2, ctxt_output_dim),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(ctxt_output_dim, ctxt_output_dim),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(ctxt_output_dim, 1),
)
def forward(self, mentions_repr, entities_repr, md_scores, dis_scores):
features = [
mentions_repr,
entities_repr,
mentions_repr - entities_repr,
mentions_repr * entities_repr,
md_scores,
dis_scores,
]
features = torch.cat(features, 1)
return self.mlp(features)
class SaliencyClassificationHead(nn.Module):
def __init__(
self,
ctxt_output_dim=768,
):
super(SaliencyClassificationHead, self).__init__()
self.mlp = nn.Sequential(
nn.Linear(9 * ctxt_output_dim + 4, ctxt_output_dim),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(ctxt_output_dim, ctxt_output_dim),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(ctxt_output_dim, 1),
)
def forward(
self, cls_tokens_repr, mentions_repr, entities_repr, md_scores, dis_scores
):
cls_mention_dot_product = torch.sum(
cls_tokens_repr * mentions_repr, 1
).unsqueeze(-1)
cls_entity_dot_product = torch.sum(
cls_tokens_repr * entities_repr, 1
).unsqueeze(-1)
features = [
cls_tokens_repr,
mentions_repr,
entities_repr,
mentions_repr - entities_repr,
mentions_repr * entities_repr,
cls_tokens_repr - mentions_repr,
cls_tokens_repr * mentions_repr,
cls_tokens_repr - entities_repr,
cls_tokens_repr * entities_repr,
md_scores,
dis_scores,
cls_mention_dot_product,
cls_entity_dot_product,
]
features = torch.cat(features, 1)
return self.mlp(features)
class SpanEncoder(nn.Module):
def __init__(
self,
mention_aggregation="linear",
ctxt_output_dim=768,
cand_output_dim=768,
dropout=0.1,
):
super(SpanEncoder, self).__init__()
if mention_aggregation == "linear":
self.mention_mlp = nn.Linear(ctxt_output_dim * 2, cand_output_dim)
# elif mention_aggregation == "mlp":
# self.mention_mlp = nn.Sequential(
# nn.Linear(ctxt_output_dim, ctxt_output_dim),
# nn.ReLU(),
# nn.Dropout(dropout),
# nn.Linear(ctxt_output_dim, cand_output_dim),
# )
else:
raise NotImplementedError()
def forward(self, text_encodings, mention_offsets, mention_lengths):
idx = (
torch.arange(mention_offsets.shape[0])
.unsqueeze(1)
.repeat(1, mention_offsets.shape[1])
)
mention_starts = text_encodings[idx, mention_offsets]
mention_ends = text_encodings[
idx,
mention_lengths + mention_offsets - 1,
]
mention_emb = torch.cat([mention_starts, mention_ends], dim=2)
mention_encodings = self.mention_mlp(mention_emb)
return mention_encodings
class MentionScoresHead(nn.Module):
def __init__(
self,
encoder_output_dim=768,
max_mention_length=10,
):
super(MentionScoresHead, self).__init__()
self.max_mention_length = max_mention_length
self.bound_classifier = nn.Linear(encoder_output_dim, 3)
def forward(self, text_encodings, mask_ctxt, tokens_mapping):
"""
Retuns scores for *inclusive* mention boundaries
"""
device = text_encodings.device
# (bs, seqlen, 3)
logits = self.bound_classifier(text_encodings)
# (bs, seqlen, 1); (bs, seqlen, 1); (bs, seqlen, 1)
# start_logprobs, end_logprobs, mention_logprobs = logits.split(1, dim=-1)
start_logprobs = logits[:, :, 0].squeeze(-1)
end_logprobs = logits[:, :, 1].squeeze(-1)
mention_logprobs = logits[:, :, 2].squeeze(-1)
# impossible to choose masked tokens as starts/ends of spans
start_logprobs[mask_ctxt != 1] = float("-inf")
end_logprobs[mask_ctxt != 1] = float("-inf")
mention_logprobs[mask_ctxt != 1] = float("-inf")
# take sum of log softmaxes:
# log p(mention) = log p(start_pos && end_pos) = log p(start_pos) + log p(end_pos)
# DIM: (bs, starts, ends)
mention_scores = start_logprobs.unsqueeze(2) + end_logprobs.unsqueeze(1)
# (bs, starts, ends)
mention_cum_scores = torch.zeros(
mention_scores.size(), dtype=mention_scores.dtype
).to(device)
# add ends
mention_logprobs_end_cumsum = torch.zeros(
mask_ctxt.size(0), dtype=mention_scores.dtype
).to(device)
for i in range(mask_ctxt.size(1)):
mention_logprobs_end_cumsum += mention_logprobs[:, i]
mention_cum_scores[:, :, i] += mention_logprobs_end_cumsum.unsqueeze(-1)
# subtract starts
mention_logprobs_start_cumsum = torch.zeros(
mask_ctxt.size(0), dtype=mention_scores.dtype
).to(device)
for i in range(mask_ctxt.size(1) - 1):
mention_logprobs_start_cumsum += mention_logprobs[:, i]
mention_cum_scores[
:, (i + 1), :
] -= mention_logprobs_start_cumsum.unsqueeze(-1)
# DIM: (bs, starts, ends)
mention_scores += mention_cum_scores
# DIM: (starts, ends, 2) -- tuples of [start_idx, end_idx]
mention_bounds = torch.stack(
[
torch.arange(mention_scores.size(1))
.unsqueeze(-1)
.expand(mention_scores.size(1), mention_scores.size(2)), # start idxs
torch.arange(mention_scores.size(1))
.unsqueeze(0)
.expand(mention_scores.size(1), mention_scores.size(2)), # end idxs
],
dim=-1,
).to(device)
# DIM: (starts, ends)
mention_sizes = (
mention_bounds[:, :, 1] - mention_bounds[:, :, 0] + 1
) # (+1 as ends are inclusive)
# Remove invalids (startpos > endpos, endpos > seqlen) and renormalize
# DIM: (bs, starts, ends)
# valid mention starts mask
select_indices = torch.cat(
[
torch.arange(tokens_mapping.shape[0])
.unsqueeze(1)
.repeat(1, tokens_mapping.shape[1])
.unsqueeze(-1),
tokens_mapping[:, :, 0].unsqueeze(-1).to(torch.device("cpu")),
],
-1,
).flatten(0, 1)
token_starts_mask = torch.zeros(mask_ctxt.size(), dtype=mask_ctxt.dtype)
token_starts_mask[select_indices[:, 0], select_indices[:, 1]] = 1
token_starts_mask[:, 0] = 0
# valid mention ends mask
select_indices = torch.cat(
[
torch.arange(tokens_mapping.shape[0])
.unsqueeze(1)
.repeat(1, tokens_mapping.shape[1])
.unsqueeze(-1),
(tokens_mapping[:, :, 1] - 1).unsqueeze(-1).to(torch.device("cpu")),
],
-1,
).flatten(0, 1)
token_ends_mask = torch.zeros(mask_ctxt.size(), dtype=mask_ctxt.dtype)
token_ends_mask[select_indices[:, 0], select_indices[:, 1]] = 1
token_ends_mask[:, 0] = 0
# valid mention starts*ends mask
valid_starts_ends_mask = torch.bmm(
token_starts_mask.unsqueeze(2), token_ends_mask.unsqueeze(1)
).to(device)
valid_mask = (
(mention_sizes.unsqueeze(0) > 0)
& torch.gt(mask_ctxt.unsqueeze(2), 0)
& torch.gt(valid_starts_ends_mask, 0)
)
# DIM: (bs, starts, ends)
# 0 is not a valid
mention_scores[~valid_mask] = float("-inf") # invalids have logprob=-inf (p=0)
# DIM: (bs, starts * ends)
mention_scores = mention_scores.view(mention_scores.size(0), -1)
# DIM: (bs, starts * ends, 2)
mention_bounds = mention_bounds.view(-1, 2)
mention_bounds = mention_bounds.unsqueeze(0).expand(
mention_scores.size(0), mention_scores.size(1), 2
)
if self.max_mention_length is not None:
mention_scores, mention_bounds = self.filter_by_mention_size(
mention_scores,
mention_bounds,
)
return mention_scores, mention_bounds
def batch_reshape_mask_left(
self,
input_t: torch.Tensor,
selected: torch.Tensor,
pad_idx: Union[int, float] = 0,
left_align_mask: Optional[torch.Tensor] = None,
):
"""
Left-aligns all ``selected" values in input_t, which is a batch of examples.
- input_t: >=2D tensor (N, M, *)
- selected: 2D torch.Bool tensor, 2 dims same size as first 2 dims of `input_t` (N, M)
- pad_idx represents the padding to be used in the output
- left_align_mask: if already precomputed, pass the alignment mask in
(mask on the output, corresponding to `selected` on the input)
Example:
input_t = [[1,2,3,4],[5,6,7,8]]
selected = [[0,1,0,1],[1,1,0,1]]
output = [[2,4,0],[5,6,8]]
"""
batch_num_selected = selected.sum(1)
max_num_selected = batch_num_selected.max()
# (bsz, 2)
repeat_freqs = torch.stack(
[batch_num_selected, max_num_selected - batch_num_selected], dim=-1
)
# (bsz x 2,)
repeat_freqs = repeat_freqs.view(-1)
if left_align_mask is None:
# (bsz, 2)
left_align_mask = (
torch.zeros(input_t.size(0), 2).to(input_t.device).to(torch.bool)
)
left_align_mask[:, 0] = 1
# (bsz x 2,): [1,0,1,0,...]
left_align_mask = left_align_mask.view(-1)
# (bsz x max_num_selected,): [1 xrepeat_freqs[0],0 x(M-repeat_freqs[0]),1 xrepeat_freqs[1],0 x(M-repeat_freqs[1]),...]
left_align_mask = left_align_mask.repeat_interleave(repeat_freqs)
# (bsz, max_num_selected)
left_align_mask = left_align_mask.view(-1, max_num_selected)
# reshape to (bsz, max_num_selected, *)
input_reshape = (
torch.empty(left_align_mask.size() + input_t.size()[2:])
.to(input_t.device, input_t.dtype)
.fill_(pad_idx)
)
input_reshape[left_align_mask] = input_t[selected]
# (bsz, max_num_selected, *); (bsz, max_num_selected)
return input_reshape, left_align_mask
def prune_ctxt_mentions(
self,
mention_logits: torch.Tensor,
mention_bounds: torch.Tensor,
num_cand_mentions: int,
threshold: float,
):
"""
Prunes mentions based on mention scores/logits (by either
`threshold` or `num_cand_mentions`, whichever yields less candidates)
Inputs:
mention_logits: torch.FloatTensor (bsz, num_total_mentions)
mention_bounds: torch.IntTensor (bsz, num_total_mentions)
num_cand_mentions: int
threshold: float
Returns:
torch.FloatTensor(bsz, max_num_pred_mentions): top mention scores/logits
torch.IntTensor(bsz, max_num_pred_mentions, 2): top mention boundaries
torch.BoolTensor(bsz, max_num_pred_mentions): mask on top mentions
torch.BoolTensor(bsz, total_possible_mentions): mask for reshaping from total possible mentions -> max # pred mentions
"""
# (bsz, num_cand_mentions); (bsz, num_cand_mentions)
num_cand_mentions = min(num_cand_mentions, mention_logits.shape[1])
top_mention_logits, mention_pos = mention_logits.topk(
num_cand_mentions, sorted=True
)
# (bsz, num_cand_mentions, 2)
# [:,:,0]: index of batch
# [:,:,1]: index into top mention in mention_bounds
mention_pos = torch.stack(
[
torch.arange(mention_pos.size(0))
.to(mention_pos.device)
.unsqueeze(-1)
.expand_as(mention_pos),
mention_pos,
],
dim=-1,
)
# (bsz, num_cand_mentions)
top_mention_pos_mask = torch.sigmoid(top_mention_logits) > threshold
# (total_possible_mentions, 2)
# tuples of [index of batch, index into mention_bounds] of what mentions to include
mention_pos = mention_pos[
top_mention_pos_mask
| (
# 2nd part of OR: if nothing is > threshold, use topK that are > -inf
((top_mention_pos_mask.sum(1) == 0).unsqueeze(-1))
& (top_mention_logits > float("-inf"))
)
]
mention_pos = mention_pos.view(-1, 2)
# (bsz, total_possible_mentions)
# mask of possible logits
mention_pos_mask = torch.zeros(mention_logits.size(), dtype=torch.bool).to(
mention_pos.device
)
mention_pos_mask[mention_pos[:, 0], mention_pos[:, 1]] = 1
# (bsz, max_num_pred_mentions, 2)
chosen_mention_bounds, chosen_mention_mask = self.batch_reshape_mask_left(
mention_bounds, mention_pos_mask, pad_idx=0
)
# (bsz, max_num_pred_mentions)
chosen_mention_logits, _ = self.batch_reshape_mask_left(
mention_logits,
mention_pos_mask,
pad_idx=float("-inf"),
left_align_mask=chosen_mention_mask,
)
return (
chosen_mention_logits,
chosen_mention_bounds,
chosen_mention_mask,
mention_pos_mask,
)
def filter_by_mention_size(
self, mention_scores: torch.Tensor, mention_bounds: torch.Tensor
):
"""
Filter all mentions > maximum mention length
mention_scores: torch.FloatTensor (bsz, num_mentions)
mention_bounds: torch.LongTensor (bsz, num_mentions, 2)
"""
# (bsz, num_mentions)
mention_bounds_mask = (
mention_bounds[:, :, 1] - mention_bounds[:, :, 0] <= self.max_mention_length
)
# (bsz, num_filtered_mentions)
mention_scores = mention_scores[mention_bounds_mask]
mention_scores = mention_scores.view(mention_bounds_mask.size(0), -1)
# (bsz, num_filtered_mentions, 2)
mention_bounds = mention_bounds[mention_bounds_mask]
mention_bounds = mention_bounds.view(mention_bounds_mask.size(0), -1, 2)
return mention_scores, mention_bounds
class JointELTask(LightningModule):
def __init__(
self,
transform: TransformConf,
model: ModelConf,
datamodule: DataModuleConf,
optim: OptimConf,
embeddings_path: str,
faiss_index_path: Optional[str] = None,
n_retrieve_candidates: int = 10,
eval_compure_recall_at: Tuple[int] = (1, 10, 100),
warmup_steps: int = 0,
load_from_checkpoint: Optional[str] = None,
only_train_disambiguation: bool = False,
train_el_classifier: bool = True,
train_saliency: bool = True,
md_threshold: float = 0.2,
el_threshold: float = 0.4,
saliency_threshold: float = 0.4,
use_gpu_index: bool = False,
):
super().__init__()
# encoder setup
self.encoder_conf = model
self.optim_conf = optim
self.embeddings_path = embeddings_path
self.faiss_index_path = faiss_index_path
self.n_retrieve_candidates = n_retrieve_candidates
self.eval_compure_recall_at = eval_compure_recall_at
self.warmup_steps = warmup_steps
self.load_from_checkpoint = load_from_checkpoint
self.disambiguation_loss = nn.CrossEntropyLoss()
self.md_loss = nn.BCEWithLogitsLoss()
self.el_loss = nn.BCEWithLogitsLoss()
self.saliency_loss = nn.BCEWithLogitsLoss()
self.only_train_disambiguation = only_train_disambiguation
self.train_el_classifier = train_el_classifier
self.train_saliency = train_saliency
self.md_threshold = md_threshold
self.el_threshold = el_threshold
self.saliency_threshold = saliency_threshold
self.use_gpu_index = use_gpu_index
@staticmethod
def _get_encoder_state(state, encoder_name):
encoder_state = OrderedDict()
for key, value in state["state_dict"].items():
if key.startswith(encoder_name):
encoder_state[key[len(encoder_name) + 1 :]] = value
return encoder_state
def setup_gpu_index(self):
gpu_id = self.local_rank
flat_config = faiss.GpuIndexFlatConfig()
flat_config.device = gpu_id
flat_config.useFloat16 = True
res = faiss.StandardGpuResources()
self.faiss_index = faiss.GpuIndexFlatIP(res, self.embedding_dim, flat_config)
self.faiss_index.add(self.embeddings)
def setup(self, stage: str):
if stage == "test":
return
# resetting call_configure_sharded_model_hook attribute so that we could configure model
self.call_configure_sharded_model_hook = False
self.embeddings = torch.load(self.embeddings_path)
self.embedding_dim = len(self.embeddings[0])
self.embeddings.requires_grad = False
self.encoder = hydra.utils.instantiate(
self.encoder_conf,
)
self.project_encoder_op = nn.Identity()
if self.encoder.embedding_dim != self.embedding_dim:
self.project_encoder_op = nn.Sequential(
nn.Linear(self.encoder.embedding_dim, self.embedding_dim),
nn.LayerNorm(self.embedding_dim),
)
self.span_encoder = SpanEncoder(
ctxt_output_dim=self.embedding_dim,
cand_output_dim=self.embedding_dim,
)
self.mention_encoder = MentionScoresHead(
encoder_output_dim=self.embedding_dim,
)
self.el_encoder = ClassificationHead(
ctxt_output_dim=self.embedding_dim,
)
if self.train_saliency:
self.saliency_encoder = SaliencyClassificationHead(
ctxt_output_dim=self.embedding_dim,
)
if self.load_from_checkpoint is not None:
logger.info(f"Load encoders state from {self.load_from_checkpoint}")
with open(self.load_from_checkpoint, "rb") as f:
checkpoint = torch.load(f, map_location=torch.device("cpu"))
encoder_state = self._get_encoder_state(checkpoint, "encoder")
self.encoder.load_state_dict(encoder_state)
span_encoder_state = self._get_encoder_state(checkpoint, "span_encoder")
self.span_encoder.load_state_dict(span_encoder_state)
project_encoder_op_state = self._get_encoder_state(
checkpoint, "project_encoder_op"
)
if len(project_encoder_op_state) > 0:
self.project_encoder_op.load_state_dict(project_encoder_op_state)
mention_encoder_state = self._get_encoder_state(
checkpoint, "mention_encoder"
)
if len(mention_encoder_state) > 0:
self.mention_encoder.load_state_dict(mention_encoder_state)
el_encoder_state = self._get_encoder_state(checkpoint, "el_encoder")
if len(el_encoder_state) > 0:
self.el_encoder.load_state_dict(el_encoder_state)
saliency_encoder_state = self._get_encoder_state(
checkpoint, "saliency_encoder"
)
if len(saliency_encoder_state) > 0 and self.train_saliency:
self.saliency_encoder.load_state_dict(saliency_encoder_state)
self.optimizer = hydra.utils.instantiate(self.optim_conf, self.parameters())
if self.use_gpu_index:
logger.info(f"Setup GPU index")
self.setup_gpu_index()
# self.embeddings = None
else:
logger.info(f"Setup CPU index")
assert self.faiss_index_path is not None
self.faiss_index = faiss.read_index(self.faiss_index_path)
def sim_score(self, mentions_repr, entities_repr):
# bs x emb_dim , bs x emb_dim
scores = torch.sum(mentions_repr * entities_repr, 1)
return scores
def forward(
self,
text_inputs,
attention_mask,
mention_offsets,
mention_lengths,
):
# encode query and contexts
_, last_layer = self.encoder(text_inputs, attention_mask)
text_encodings = last_layer
text_encodings = self.project_encoder_op(text_encodings)
mentions_repr = self.span_encoder(
text_encodings, mention_offsets, mention_lengths
)
return text_encodings, mentions_repr
def configure_optimizers(self):
return self.optimizer
def _disambiguation_training_step(
self, mentions_repr, mention_offsets, mention_lengths, entities_ids
):
device = mentions_repr.get_device()
# flat mentions and entities indices (mentions_num x embedding_dim)
flat_mentions_repr = mentions_repr[mention_lengths != 0]
flat_entities_ids = entities_ids[mention_lengths != 0]
if flat_mentions_repr.shape[0] == 0:
return None
# obtain positive entities representations
if self.use_gpu_index:
entities_repr = torch.stack(
[
self.faiss_index.reconstruct(flat_id)
for flat_id in flat_entities_ids.tolist()
]
).to(device)
else:
entities_repr = self.embeddings[flat_entities_ids.to("cpu")].to(device)
# compute scores for positive entities
pos_scores = self.sim_score(flat_mentions_repr, entities_repr)
# retrieve candidates indices
if self.use_gpu_index:
(
_,
neg_cand_indices,
neg_cand_repr,
) = self.faiss_index.search_and_reconstruct(
flat_mentions_repr.detach().cpu().numpy().astype(np.float32),
self.n_retrieve_candidates,
)
neg_cand_indices = torch.from_numpy(neg_cand_indices).to(device)
neg_cand_repr = torch.from_numpy(neg_cand_repr).to(device)
else:
(
_,
neg_cand_indices,
) = self.faiss_index.search(
flat_mentions_repr.detach().cpu().numpy().astype(np.float32),
self.n_retrieve_candidates,
)
# get candidates embeddings
neg_cand_repr = (
self.embeddings[neg_cand_indices.flatten()]
.reshape(
neg_cand_indices.shape[0], # bs
neg_cand_indices.shape[1], # n_retrieve_candidates
self.embeddings.shape[1], # emb dim
)
.to(device)
)
neg_cand_indices = torch.from_numpy(neg_cand_indices).to(device)
# compute scores (bs x n_retrieve_candidates)
neg_cand_scores = torch.bmm(
flat_mentions_repr.unsqueeze(1), neg_cand_repr.transpose(1, 2)
).squeeze(1)
# zero score for the positive entities
neg_cand_scores[
neg_cand_indices.eq(
flat_entities_ids.unsqueeze(1).repeat([1, self.n_retrieve_candidates])
)
] = float("-inf")
# append positive scores to neg scores (bs x (1 + n_retrieve_candidates))
scores = torch.hstack([pos_scores.unsqueeze(1), neg_cand_scores])
# cosntruct targets
targets = torch.tensor([0] * neg_cand_scores.shape[0]).to(device)
loss = self.disambiguation_loss(scores, targets)
return loss
def _md_training_step(
self,
text_encodings,
text_pad_mask,
gold_mention_offsets,
gold_mention_lengths,
entities_ids,
tokens_mapping,
):
device = text_encodings.get_device()
mention_logits, mention_bounds = self.mention_encoder(
text_encodings,
text_pad_mask,
tokens_mapping,
)
gold_mention_ends = gold_mention_offsets + gold_mention_lengths - 1
gold_mention_bounds = torch.cat(
[gold_mention_offsets.unsqueeze(-1), gold_mention_ends.unsqueeze(-1)], -1
)
gold_mention_bounds[gold_mention_lengths == 0] = -1
gold_mention_pos_idx = (
(
mention_bounds.unsqueeze(1)
- gold_mention_bounds.unsqueeze(
2
) # (bs, num_mentions, start_pos * end_pos, 2)
)
.abs()
.sum(-1)
== 0
).nonzero()
# (bs, total_possible_spans)
gold_mention_binary = torch.zeros(
mention_logits.size(), dtype=mention_logits.dtype
).to(device)
gold_mention_binary[gold_mention_pos_idx[:, 0], gold_mention_pos_idx[:, 2]] = 1
# prune masked spans
mask = mention_logits != float("-inf")
masked_mention_logits = mention_logits[mask]
masked_gold_mention_binary = gold_mention_binary[mask]
return (
self.md_loss(masked_mention_logits, masked_gold_mention_binary),
mention_logits,
mention_bounds,
)
def _el_training_step(
self,
text_encodings,
mention_logits,
mention_bounds,
gold_mention_offsets,
gold_mention_lengths,
entities_ids,
tokens_mapping,
):
"""
Train "rejection" head.
Inputs:
text_encodings: last layer output of text encoder
mention_logits: mention scores produced by mention detection head
mention_bounds: mention bounds (start, end (inclusive)) by MD head
gold_mention_offsets: ground truth mention offsets
gold_mention_lengths: ground truth mention lengths
entities_ids: entity ids for ground truth mentions
tokens_mapping: sentencepiece to text token mapping
Returns:
el_loss: sum of entity linking loss over all predicted mentions
"""
device = text_encodings.get_device()
# get predicted mention_offsets and mention_bounds by MD model
(
chosen_mention_logits,
chosen_mention_bounds,
chosen_mention_mask,
mention_pos_mask,
) = self.mention_encoder.prune_ctxt_mentions(
mention_logits,
mention_bounds,
num_cand_mentions=50,
threshold=self.md_threshold,
)
mention_offsets = chosen_mention_bounds[:, :, 0]
mention_lengths = (
chosen_mention_bounds[:, :, 1] - chosen_mention_bounds[:, :, 0] + 1
)
# get mention representations for predicted mentions
mentions_repr = self.span_encoder(
text_encodings, mention_offsets, mention_lengths
)
mention_lengths[mention_offsets == 0] = 0
# flat mentions and entities indices (mentions_num x embedding_dim)
flat_mentions_repr = mentions_repr[mention_lengths != 0]
flat_mentions_scores = torch.sigmoid(
chosen_mention_logits[mention_lengths != 0]
)
flat_mentions_repr = flat_mentions_repr[flat_mentions_scores > 0]
# cand_scores, cand_indices = self.faiss_index.search(
# flat_mentions_repr.detach().cpu().numpy(), 1
# )
# cand_scores = torch.from_numpy(cand_scores)
# cand_indices = torch.from_numpy(cand_indices)
cand_scores, cand_indices = self.faiss_index.search(
flat_mentions_repr.detach().cpu().numpy().astype(np.float32),
1,
)
if self.use_gpu_index:
cand_scores = torch.from_numpy(cand_scores)
cand_indices = torch.from_numpy(cand_indices)
# iterate over predicted and gold mentions to create targets for
# predicted mentions
targets = []
for (
e_mention_offsets,
e_mention_lengths,
e_gold_mention_offsets,
e_gold_mention_lengths,
e_entities,
) in zip(
mention_offsets.detach().cpu().tolist(),
mention_lengths.detach().cpu().tolist(),
gold_mention_offsets.cpu().tolist(),
gold_mention_lengths.cpu().tolist(),
entities_ids.cpu().tolist(),
):
e_gold_targets = {
(offset, length): ent
for offset, length, ent in zip(
e_gold_mention_offsets, e_gold_mention_lengths, e_entities
)
}
e_targets = [
e_gold_targets.get((offset, length), -1)
for offset, length in zip(e_mention_offsets, e_mention_lengths)
]
targets.append(e_targets)
targets = torch.tensor(targets, device=device)
flat_targets = targets[mention_lengths != 0][flat_mentions_scores > 0]
md_scores = flat_mentions_scores[flat_mentions_scores > 0].unsqueeze(-1)
# flat_entities_repr = self.embeddings[cand_indices.squeeze(1)].to(device)
if self.use_gpu_index:
flat_entities_repr = torch.stack(
[
self.faiss_index.reconstruct(flat_id)
for flat_id in cand_indices.squeeze(1).tolist()
]
).to(device)
else:
flat_entities_repr = self.embeddings[cand_indices.squeeze(1)].to(device)
cand_scores = cand_scores.to(device)
cand_indices = cand_indices.to(device)
predictions = self.el_encoder(
flat_mentions_repr, flat_entities_repr, md_scores, cand_scores
).squeeze(1)
binary_targets = (flat_targets == cand_indices.squeeze(1)).double()
el_loss = self.el_loss(predictions, binary_targets)
return el_loss
def training_step(self, batch, batch_idx):
"""
This receives queries, each with mutliple contexts.
"""
text_inputs = batch["input_ids"] # bs x mention_len
text_pad_mask = batch["attention_mask"]
gold_mention_offsets = batch["mention_offsets"] # bs x max_mentions_num
gold_mention_lengths = batch["mention_lengths"] # bs x max_mentions_num
entities_ids = batch["entities"] # bs x max_mentions_num
tokens_mapping = batch["tokens_mapping"] # bs x max_tokens_in_input x 2
# mention representations (bs x max_mentions_num x embedding_dim)
text_encodings, mentions_repr = self(
text_inputs, text_pad_mask, gold_mention_offsets, gold_mention_lengths
)
dis_loss = self._disambiguation_training_step(
mentions_repr,
gold_mention_offsets,
gold_mention_lengths,
entities_ids,
)
if dis_loss is not None:
self.log("dis_loss", dis_loss, prog_bar=True)
loss = dis_loss
if not self.only_train_disambiguation:
md_loss, mention_logits, mention_bounds = self._md_training_step(
text_encodings,
text_pad_mask,
gold_mention_offsets,
gold_mention_lengths,
entities_ids,
tokens_mapping,
)
self.log("md_loss", md_loss, prog_bar=True)
if loss is not None:
loss += md_loss
else:
loss = md_loss
if self.train_el_classifier:
el_loss = self._el_training_step(
text_encodings,
mention_logits,
mention_bounds,
gold_mention_offsets,
gold_mention_lengths,
entities_ids,
tokens_mapping,
)
self.log("el_loss", el_loss, prog_bar=True)
loss += el_loss
self.log("train_loss", loss, prog_bar=True)
assert not torch.isnan(loss)
return loss
def _disambiguation_eval_step(
self,
mentions_repr,
mention_offsets,
mention_lengths,
entities_ids,
):
device = mentions_repr.device
# flat mentions and entities indices (mentions_num x embedding_dim)
flat_mentions_repr = mentions_repr[mention_lengths != 0]
flat_entities_ids = entities_ids[mention_lengths != 0]
# obtain positive entities representations
# entities_repr = self.embeddings[flat_entities_ids.to("cpu")].to(device)
if self.use_gpu_index:
entities_repr = torch.stack(
[
self.faiss_index.reconstruct(flat_id)
for flat_id in flat_entities_ids.tolist()
]
).to(device)
else:
entities_repr = self.embeddings[flat_entities_ids.to("cpu")].to(device)
# compute scores for positive entities
pos_scores = self.sim_score(flat_mentions_repr, entities_repr)
# candidates to retrieve
n_retrieve_candidates = max(self.eval_compure_recall_at)
# retrieve negative candidates ids and scores
neg_cand_scores, neg_cand_indices = self.faiss_index.search(
flat_mentions_repr.detach().cpu().numpy().astype(np.float32),
n_retrieve_candidates,
)
neg_cand_scores = torch.from_numpy(neg_cand_scores).to(device)
neg_cand_indices = torch.from_numpy(neg_cand_indices).to(device)
# zero score for the positive entities
neg_cand_scores[
neg_cand_indices.eq(
flat_entities_ids.unsqueeze(1).repeat([1, n_retrieve_candidates])
)
] = float("-inf")
# append positive scores to neg scores
scores = torch.hstack([pos_scores.unsqueeze(1), neg_cand_scores])
# cosntruct targets
targets = torch.tensor([0] * neg_cand_scores.shape[0]).to(device)
loss = self.disambiguation_loss(scores, targets)
# compute recall at (1, 10, 100)
flat_entities_ids = flat_entities_ids.cpu().tolist()
neg_cand_indices = neg_cand_indices.cpu().tolist()
recalls = []
for k in self.eval_compure_recall_at:
recall = sum(
entity_id in cand_entity_ids[:k]
for entity_id, cand_entity_ids in zip(
flat_entities_ids, neg_cand_indices
)
)
recalls.append(recall)
return (
recalls,
len(flat_entities_ids),
loss,
)
def _joint_eval_step(
self,
text_inputs,
text_pad_mask,
gold_mention_offsets,
gold_mention_lengths,
entities_ids,
tokens_mapping,
):
device = text_inputs.device
# encode query and contexts
_, last_layer = self.encoder(text_inputs)
text_encodings = last_layer
text_encodings = self.project_encoder_op(text_encodings)
mention_logits, mention_bounds = self.mention_encoder(
text_encodings, text_pad_mask, tokens_mapping
)
(
chosen_mention_logits,
chosen_mention_bounds,
chosen_mention_mask,
mention_pos_mask,
) = self.mention_encoder.prune_ctxt_mentions(
mention_logits,
mention_bounds,
num_cand_mentions=50,
threshold=self.md_threshold,
)
mention_offsets = chosen_mention_bounds[:, :, 0]
mention_lengths = (
chosen_mention_bounds[:, :, 1] - chosen_mention_bounds[:, :, 0] + 1
)
mentions_repr = self.span_encoder(
text_encodings, mention_offsets, mention_lengths
)
mention_lengths[mention_offsets == 0] = 0
# flat mentions and entities indices (mentions_num x embedding_dim)
flat_mentions_repr = mentions_repr[mention_lengths != 0]
mentions_scores = torch.sigmoid(chosen_mention_logits)
# flat_mentions_repr = flat_mentions_repr[flat_mentions_scores > 0]
# retrieve candidates top-1 ids and scores
cand_scores, cand_indices = self.faiss_index.search(
flat_mentions_repr.detach().cpu().numpy().astype(np.float32), 1
)
if self.train_el_classifier:
# flat_entities_repr = self.embeddings[cand_indices.squeeze(1)].to(device)
if self.use_gpu_index:
flat_entities_repr = torch.stack(
[
self.faiss_index.reconstruct(flat_id)
for flat_id in cand_indices.squeeze(1).tolist()
]
).to(device)
else:
flat_entities_repr = self.embeddings[cand_indices.squeeze(1)].to(device)
flat_mentions_scores = mentions_scores[mention_lengths != 0].unsqueeze(-1)
cand_scores = torch.from_numpy(cand_scores).to(device)
el_scores = torch.sigmoid(
self.el_encoder(
flat_mentions_repr,
flat_entities_repr,
flat_mentions_scores,
cand_scores,
)
).squeeze(1)
gold_mention_offsets = gold_mention_offsets.cpu().tolist()
gold_mention_lengths = gold_mention_lengths.cpu().tolist()
entities_ids = entities_ids.cpu().tolist()
el_targets = []
for offsets, lengths, example_ent_ids in zip(
gold_mention_offsets,
gold_mention_lengths,
entities_ids,
):
el_targets.append(
{
(offset, length): ent_id
for offset, length, ent_id in zip(offsets, lengths, example_ent_ids)
if length != 0
}
)
mention_offsets = mention_offsets.detach().cpu().tolist()
mention_lengths = mention_lengths.detach().cpu().tolist()
mentions_scores = mentions_scores.detach().cpu().tolist()
el_predictions = []
cand_idx = 0
for offsets, lengths, md_scores in zip(
mention_offsets, mention_lengths, mentions_scores
):
example_predictions = {}
for offset, length, md_score in zip(offsets, lengths, md_scores):
if length != 0:
if md_score >= self.md_threshold:
if (
not self.train_el_classifier
or el_scores[cand_idx] >= self.el_threshold
):
example_predictions[(offset, length)] = cand_indices[
cand_idx
][0]
cand_idx += 1
el_predictions.append(example_predictions)
return el_targets, el_predictions
def _eval_step(self, batch, batch_idx):
text_inputs = batch["input_ids"] # bs x mention_len
text_pad_mask = batch["attention_mask"]
mention_offsets = batch["mention_offsets"] # bs x max_mentions_num
mention_lengths = batch["mention_lengths"] # bs x max_mentions_num
entities_ids = batch["entities"] # bs x max_mentions_num
tokens_mapping = batch["tokens_mapping"]
if self.only_train_disambiguation:
text_encodings, mentions_repr = self(
text_inputs, text_pad_mask, mention_offsets, mention_lengths
)
return self._disambiguation_eval_step(
mentions_repr,
mention_offsets,
mention_lengths,
entities_ids,
)
return self._joint_eval_step(
text_inputs,
text_pad_mask,
mention_offsets,
mention_lengths,
entities_ids,
tokens_mapping,
)
def _compute_disambiguation_metrics(self, outputs, log_prefix):
total_recalls = [0] * len(self.eval_compure_recall_at)
total_ent_count = 0
total_loss = 0
for recalls, count, loss in outputs:
for idx in range(len(total_recalls)):
total_recalls[idx] += recalls[idx]
total_ent_count += count
total_loss += loss
metrics = {
log_prefix + "_ent_count": total_ent_count,
log_prefix + "_loss": total_loss,
}
for idx, recall_at in enumerate(self.eval_compure_recall_at):
metrics[log_prefix + f"_recall_at_{recall_at}"] = (
total_recalls[idx] / total_ent_count
)
return metrics
@staticmethod
def calculate_classification_metrics(targets, predictions):
tp, fp, support = 0, 0, 0
boe_tp, boe_fp, boe_support = 0, 0, 0
for example_targets, example_predictions in zip(targets, predictions):
for pos, ent in example_targets.items():
support += 1
if pos in example_predictions and example_predictions[pos] == ent:
tp += 1
for pos, ent in example_predictions.items():
if pos not in example_targets or example_targets[pos] != ent:
fp += 1
example_targets_set = set(example_targets.values())
example_predictions_set = set(example_predictions.values())
for ent in example_targets_set:
boe_support += 1
if ent in example_predictions_set:
boe_tp += 1
for ent in example_predictions_set:
if ent not in example_targets_set:
boe_fp += 1
def compute_f1_p_r(tp, fp, fn):
precision = tp / (tp + fp) if (tp + fp) > 0 else 0
recall = tp / (tp + fn) if (tp + fn) > 0 else 0
f1 = 2 * tp / (2 * tp + fp + fn) if (2 * tp + fp + fn) > 0 else 0
return f1, precision, recall
fn = support - tp
boe_fn = boe_support - boe_tp
f1, precision, recall = compute_f1_p_r(tp, fp, fn)
boe_f1, boe_precision, boe_recall = compute_f1_p_r(boe_tp, boe_fp, boe_fn)
return ClassificationMetrics(
f1=f1,
precision=precision,
recall=recall,
support=support,
tp=tp,
fp=fp,
fn=fn,
boe_f1=boe_f1,
boe_precision=boe_precision,
boe_recall=boe_recall,
boe_support=boe_support,
boe_tp=boe_tp,
boe_fp=boe_fp,
boe_fn=boe_fn,
)
def _compute_el_metrics(self, outputs, log_prefix):
el_targets = []
el_predictions = []
for (
batch_el_targets,
batch_el_predictions,
) in outputs:
el_targets.extend(batch_el_targets)
el_predictions.extend(batch_el_predictions)
el_metrics = self.calculate_classification_metrics(el_targets, el_predictions)
metrics = {
log_prefix + "_f1": el_metrics.f1,
log_prefix + "_precision": el_metrics.precision,
log_prefix + "_recall": el_metrics.recall,
log_prefix + "_support": el_metrics.support,
log_prefix + "_tp": el_metrics.tp,
log_prefix + "_fp": el_metrics.fp,
log_prefix + "_fn": el_metrics.fn,
log_prefix + "_boe_f1": el_metrics.boe_f1,
log_prefix + "_boe_precision": el_metrics.boe_precision,
log_prefix + "_boe_recall": el_metrics.boe_recall,
log_prefix + "_boe_support": el_metrics.boe_support,
log_prefix + "_boe_tp": el_metrics.boe_tp,
log_prefix + "_boe_fp": el_metrics.boe_fp,
log_prefix + "_boe_fn": el_metrics.boe_fn,
}
return metrics
def _eval_epoch_end(self, outputs, log_prefix="valid"):
if self.only_train_disambiguation:
metrics = self._compute_disambiguation_metrics(outputs, log_prefix)
else:
metrics = self._compute_el_metrics(outputs, log_prefix)
print("EVAL:")
print(metrics)
self.log_dict(metrics, on_epoch=True, sync_dist=True)
def validation_step(self, batch, batch_idx):
return self._eval_step(batch, batch_idx)
def validation_epoch_end(self, valid_outputs):
self._eval_epoch_end(valid_outputs)
def test_step(self, batch, batch_idx):
return self._eval_step(batch, batch_idx)
def test_epoch_end(self, test_outputs):
self._eval_epoch_end(test_outputs, "test")
def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
"""
This hook will be called before loading state_dict from a checkpoint.
setup("fit") will build the model before loading state_dict
Args:
checkpoint: A dictionary with variables from the checkpoint.
"""
self.setup("fit")
| BELA-main | bela/task/joint_el_task.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
import yaml
from hydra.experimental import compose, initialize_config_module
import hydra
import torch
from tqdm import tqdm
import json
import faiss
import logging
from typing import Union, List, Dict, Any, Tuple
logger = logging.getLogger(__name__)
def load_file(path: Union[str, Path]) -> List[Dict[str, Any]]:
all_data = []
with open(path, 'rt') as fd:
for line in tqdm(fd):
data = json.loads(line)
all_data.append(data)
return all_data
def convert_sp_to_char_offsets(
text: str,
sp_offsets: List[int],
sp_lengths: List[int],
sp_tokens_boundaries: List[List[int]],
) -> Tuple[List[int], List[int]]:
"""
Function convert sentecepiece offsets and lengths to character level
offsets and lengths for a given `text`.
"""
char_offsets: List[int] = []
char_lengths: List[int] = []
text_utf8_chars: List[str] = [char for char in text]
for sp_offset, sp_length in zip(sp_offsets, sp_lengths):
# sp_offsets include cls_token, while boundaries doesn't
if sp_offset == 0:
continue
sp_offset = sp_offset - 1
char_offset = sp_tokens_boundaries[sp_offset][0]
char_end = sp_tokens_boundaries[sp_offset + sp_length - 1][1]
# sp token boundaries include whitespaces, so remove them
while text_utf8_chars[char_offset].isspace():
char_offset += 1
assert char_offset < len(text_utf8_chars)
char_offsets.append(char_offset)
char_lengths.append(char_end - char_offset)
return char_offsets, char_lengths
class ModelEval:
def __init__(
self,
checkpoint_path,
config_name="joint_el_mel",
embeddings_path=None,
ent_catalogue_idx_path=None
):
self.device = torch.device("cuda:0")
logger.info("Create task")
with initialize_config_module("bela/conf"):
cfg = compose(config_name=config_name)
cfg.task.load_from_checkpoint = checkpoint_path
cfg.task.embeddings_path = embeddings_path or cfg.task.embeddings_path
cfg.datamodule.ent_catalogue_idx_path = ent_catalogue_idx_path or cfg.datamodule.ent_catalogue_idx_path
cfg.datamodule.train_path = None
cfg.datamodule.val_path = None
cfg.datamodule.test_path = None
self.checkpoint_path = checkpoint_path
self.transform = hydra.utils.instantiate(cfg.task.transform)
datamodule = hydra.utils.instantiate(cfg.datamodule, transform=self.transform)
self.task = hydra.utils.instantiate(cfg.task, datamodule=datamodule, _recursive_=False)
self.task.setup("train")
self.task = self.task.eval()
self.task = self.task.to(self.device)
self.embeddings = self.task.embeddings
self.faiss_index = self.task.faiss_index
logger.info("Create ent index")
self.ent_idx = []
for ent in datamodule.ent_catalogue.idx:
self.ent_idx.append(ent)
def create_gpu_index(self, gpu_id=0):
flat_config = faiss.GpuIndexFlatConfig()
flat_config.device = gpu_id
flat_config.useFloat16 = True
res = faiss.StandardGpuResources()
self.faiss_index = faiss.GpuIndexFlatIP(res, embeddings.shape[1], flat_config)
self.faiss_index.add(self.embeddings)
def lookup(
self,
query: torch.Tensor,
):
scores, indices = self.faiss_index.search(query, k=1)
return scores.squeeze(-1).to(self.device), indices.squeeze(-1).to(self.device)
def process_batch(self, texts):
batch: Dict[str, Any] = {"texts": texts}
model_inputs = self.transform(batch)
token_ids = model_inputs["input_ids"].to(self.device)
text_pad_mask = model_inputs["attention_mask"].to(self.device)
tokens_mapping = model_inputs["tokens_mapping"].to(self.device)
sp_tokens_boundaries = model_inputs["sp_tokens_boundaries"].tolist()
with torch.no_grad():
_, last_layer = self.task.encoder(token_ids)
text_encodings = last_layer
text_encodings = self.task.project_encoder_op(text_encodings)
mention_logits, mention_bounds = self.task.mention_encoder(
text_encodings, text_pad_mask, tokens_mapping
)
(
chosen_mention_logits,
chosen_mention_bounds,
chosen_mention_mask,
mention_pos_mask,
) = self.task.mention_encoder.prune_ctxt_mentions(
mention_logits,
mention_bounds,
num_cand_mentions=50,
threshold=self.task.md_threshold,
)
mention_offsets = chosen_mention_bounds[:, :, 0]
mention_lengths = (
chosen_mention_bounds[:, :, 1] - chosen_mention_bounds[:, :, 0] + 1
)
mention_lengths[mention_offsets == 0] = 0
mentions_repr = self.task.span_encoder(
text_encodings, mention_offsets, mention_lengths
)
# flat mentions and entities indices (mentions_num x embedding_dim)
flat_mentions_repr = mentions_repr[mention_lengths != 0]
mentions_scores = torch.sigmoid(chosen_mention_logits)
# retrieve candidates top-1 ids and scores
cand_scores, cand_indices = self.lookup(
flat_mentions_repr.detach()
)
entities_repr = self.embeddings[cand_indices.to(self.embeddings.device)].to(self.device)
chosen_mention_limits: List[int] = (
chosen_mention_mask.int().sum(-1).detach().cpu().tolist()
)
flat_mentions_scores = mentions_scores[mention_lengths != 0].unsqueeze(-1)
cand_scores = cand_scores.unsqueeze(-1)
el_scores = torch.sigmoid(
self.task.el_encoder(
flat_mentions_repr,
entities_repr,
flat_mentions_scores,
cand_scores,
)
).squeeze(1)
predictions = []
cand_idx = 0
example_idx = 0
for offsets, lengths, md_scores in zip(
mention_offsets, mention_lengths, mentions_scores
):
ex_sp_offsets = []
ex_sp_lengths = []
ex_entities = []
ex_md_scores = []
ex_el_scores = []
for offset, length, md_score in zip(offsets, lengths, md_scores):
if length != 0:
if md_score >= self.task.md_threshold:
ex_sp_offsets.append(offset.detach().cpu().item())
ex_sp_lengths.append(length.detach().cpu().item())
ex_entities.append(self.ent_idx[cand_indices[cand_idx].detach().cpu().item()])
ex_md_scores.append(md_score.item())
ex_el_scores.append(el_scores[cand_idx].item())
cand_idx += 1
char_offsets, char_lengths = convert_sp_to_char_offsets(
texts[example_idx],
ex_sp_offsets,
ex_sp_lengths,
sp_tokens_boundaries[example_idx],
)
predictions.append(
{
"offsets": char_offsets,
"lengths": char_lengths,
"entities": ex_entities,
"md_scores": ex_md_scores,
"el_scores": ex_el_scores,
}
)
example_idx += 1
return predictions
def process_disambiguation_batch(self, texts, mention_offsets, mention_lengths, entities):
batch: Dict[str, Any] = {
"texts": texts,
"mention_offsets": mention_offsets,
"mention_lengths": mention_lengths,
"entities": entities,
}
model_inputs = self.transform(batch)
token_ids = model_inputs["input_ids"].to(self.device)
mention_offsets = model_inputs["mention_offsets"]
mention_lengths = model_inputs["mention_lengths"]
tokens_mapping = model_inputs["tokens_mapping"].to(self.device)
sp_tokens_boundaries = model_inputs["sp_tokens_boundaries"].tolist()
with torch.no_grad():
_, last_layer = self.task.encoder(token_ids)
text_encodings = last_layer
text_encodings = self.task.project_encoder_op(text_encodings)
mentions_repr = self.task.span_encoder(
text_encodings, mention_offsets, mention_lengths
)
flat_mentions_repr = mentions_repr[mention_lengths != 0]
# retrieve candidates top-1 ids and scores
cand_scores, cand_indices = self.lookup(
flat_mentions_repr.detach()
)
predictions = []
cand_idx = 0
example_idx = 0
for offsets, lengths in zip(
mention_offsets, mention_lengths,
):
ex_sp_offsets = []
ex_sp_lengths = []
ex_entities = []
ex_dis_scores = []
for offset, length in zip(offsets, lengths):
if length != 0:
ex_sp_offsets.append(offset.detach().cpu().item())
ex_sp_lengths.append(length.detach().cpu().item())
ex_entities.append(self.ent_idx[cand_indices[cand_idx].detach().cpu().item()])
ex_dis_scores.append(cand_scores[cand_idx].detach().cpu().item())
cand_idx += 1
char_offsets, char_lengths = convert_sp_to_char_offsets(
texts[example_idx],
ex_sp_offsets,
ex_sp_lengths,
sp_tokens_boundaries[example_idx],
)
predictions.append({
"offsets": char_offsets,
"lengths": char_lengths,
"entities": ex_entities,
"scores": ex_dis_scores
})
example_idx+= 1
return predictions
def get_predictions(self, test_data, batch_size=256):
all_predictions = []
for batch_start in tqdm(range(0,len(test_data),batch_size)):
batch = test_data[batch_start:batch_start+batch_size]
texts = [example['original_text'] for example in batch]
predictions = self.process_batch(texts)
all_predictions.extend(predictions)
return all_predictions
def get_disambiguation_predictions(self, test_data, batch_size=256):
all_predictions = []
for batch_start in tqdm(range(0,len(test_data),batch_size)):
batch = test_data[batch_start:batch_start+batch_size]
texts = [example['original_text'] for example in batch]
mention_offsets = [[offset for _,_,_,_,offset,_ in example['gt_entities']] for example in batch]
mention_lengths = [[length for _,_,_,_,_,length in example['gt_entities']] for example in batch]
entities = [[0 for _,_,_,_,_,_ in example['gt_entities']] for example in batch]
predictions = self.process_disambiguation_batch(texts, mention_offsets, mention_lengths, entities)
all_predictions.extend(predictions)
return all_predictions
@staticmethod
def compute_scores(data, predictions, md_threshold=0.2, el_threshold=0.05):
tp, fp, support = 0, 0, 0
tp_boe, fp_boe, support_boe = 0, 0, 0
predictions_per_example = []
for example, example_predictions in zip(data, predictions):
example_targets = {
(offset,length):ent_id
for _,_,ent_id,_,offset,length in example['gt_entities']
}
example_predictions = {
(offset, length):ent_id
for offset, length, ent_id, md_score, el_score in zip(
example_predictions['offsets'],
example_predictions['lengths'],
example_predictions['entities'],
example_predictions['md_scores'],
example_predictions['el_scores'],
)
if (el_score > el_threshold and md_score > md_threshold)
}
predictions_per_example.append((len(example_targets), len(example_predictions)))
for pos, ent in example_targets.items():
support += 1
if pos in example_predictions and example_predictions[pos] == ent:
tp += 1
for pos, ent in example_predictions.items():
if pos not in example_targets or example_targets[pos] != ent:
fp += 1
example_targets_set = set(example_targets.values())
example_predictions_set = set(example_predictions.values())
for ent in example_targets_set:
support_boe += 1
if ent in example_predictions_set:
tp_boe += 1
for ent in example_predictions_set:
if ent not in example_targets_set:
fp_boe += 1
def safe_division(a, b):
if b == 0:
return 0
else:
return a / b
def compute_f1_p_r(tp, fp, fn):
precision = safe_division(tp, (tp + fp))
recall = safe_division(tp, (tp + fn))
f1 = safe_division(2 * tp, (2 * tp + fp + fn))
return f1, precision, recall
fn = support - tp
fn_boe = support_boe - tp_boe
return compute_f1_p_r(tp, fp, fn), compute_f1_p_r(tp_boe, fp_boe, fn_boe) | BELA-main | bela/evaluation/model_eval.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
from typing import List, Any
# @manual "//github/facebookresearch/hydra:hydra"
from hydra.core.config_store import ConfigStore
from omegaconf import MISSING
defaults = [
"_self_",
{"task": "joint_el_task"},
# Model
# {"task/model": "xlmr"},
# Transform
# {"task/transform": "joint_el_xlmr_transform"},
# Optim
# {"task/optim": "adamw"},
# Data
# {"datamodule": "joint_el_datamodule"},
# Trainer
# {"trainer": "gpu_1_host"},
{"checkpoint_callback": "default"},
]
@dataclass
class MainConfig:
defaults: List[Any] = field(default_factory=lambda: defaults)
task: Any = MISSING
datamodule: Any = MISSING
trainer: Any = MISSING
test_only: bool = False
checkpoint_callback: Any = MISSING
cs = ConfigStore.instance()
cs.store(name="config", node=MainConfig)
| BELA-main | bela/conf/config.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
from . import config
@dataclass
class TransformConf:
pass
@dataclass
class DataModuleConf:
pass
@dataclass
class OptimConf:
pass
@dataclass
class ModelConf:
pass
| BELA-main | bela/conf/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from itertools import product
from tqdm import tqdm
import numpy as np
from bela.evaluation.model_eval import ModelEval, load_file
from bela.utils.prediction_utils import get_predictions_using_windows
if __name__ == "__main__":
# Finetuned on aida
checkpoint_path = "/checkpoints/movb/bela/aida/lightning_logs/version_0/checkpoints/checkpoint_1.ckpt"
model_eval = ModelEval(checkpoint_path, config_name="joint_el_mel_new_index")
results = {}
print(f"{model_eval.checkpoint_path=}")
for md_threshold, el_threshold in tqdm(list(product(np.arange(0, 0.6, 0.2), repeat=2))):
model_eval.task.md_threshold = md_threshold
model_eval.task.el_threshold = el_threshold
print(f"{model_eval.task.md_threshold=}")
print(f"{model_eval.task.el_threshold=}")
test_data_path = "/fsx/louismartin/bela/retrieved_from_aws_backup/ndecao/TACKBP2015/train_bela_format_all_languages.jsonl"
print(f"Processing {test_data_path}")
test_data = load_file(test_data_path)
predictions = get_predictions_using_windows(model_eval, test_data, window_length=256)
(f1, precision, recall), (f1_boe, precision_boe, recall_boe) = ModelEval.compute_scores(test_data, predictions)
print(f"F1 = {f1:.4f}, precision = {precision:.4f}, recall = {recall:.4f}")
print(f"F1 boe = {f1_boe:.4f}, precision = {precision_boe:.4f}, recall = {recall_boe:.4f}")
results[(md_threshold, el_threshold)] = (f1, precision, recall), (f1_boe, precision_boe, recall_boe)
print(sorted(results.items(), key=lambda x: x[1][0][0], reverse=True))
pickle_path = Path.home() / "tmp/grid_search_thresholds.pkl"
| BELA-main | scripts/grid_search_thresholds.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
warnings.filterwarnings('ignore')
import yaml
from hydra.experimental import compose, initialize_config_module
import hydra
import torch
from tqdm import tqdm
import json
import faiss
import logging
from collections import defaultdict
from dataclasses import dataclass
from typing import Optional, List, Dict, Any, Tuple
from bela.transforms.spm_transform import SPMTransform
from bela.evaluation.model_eval import ModelEval, load_file
from bela.utils.prediction_utils import get_predictions_using_windows, get_sp_transform
def evaluate_model_e2e(checkpoint_path, datasets, md_threshold = 0.2, el_threshold = 0.4, embeddings_path=None, ent_catalogue_idx_path=None):
print(f"Loading model from checkpoint {checkpoint_path}")
model_eval = ModelEval(
checkpoint_path=checkpoint_path,
config_name="joint_el_mel_new",
embeddings_path=embeddings_path,
ent_catalogue_idx_path=ent_catalogue_idx_path
)
model_eval.task.md_threshold = md_threshold
model_eval.task.el_threshold = el_threshold
for test_data_path in datasets:
print(f"Processing {test_data_path}")
test_data = load_file(test_data_path)
for sample in test_data:
if "data_example_id" in sample:
sample["document_id"] = sample["data_example_id"]
predictions = get_predictions_using_windows(model_eval, test_data, window_length=254, window_overlap=10, do_merge_predictions=True)
(f1, precision, recall), (f1_boe, precision_boe, recall_boe) = ModelEval.compute_scores(test_data, predictions)
print(f"F1 = {f1:.4f}, precision = {precision:.4f}, recall = {recall:.4f}")
def convert_examples_for_disambiguation(test_data, transform, skip_unknown_ent_ids=False, ent_idx=None):
old_max_seq_len = transform.max_seq_len
transform.max_seq_len = 10000
new_examples = []
max_mention_token_pos_in_text = 192
skipped_ent_ids = 0
for example in tqdm(test_data):
text = example['original_text']
outputs = transform(dict(texts=[text]))
sp_token_boundaries = outputs['sp_tokens_boundaries'][0]
for _ , _ , ent_id, _ , offset, length in example['gt_entities']:
if skip_unknown_ent_ids and ent_idx is not None and ent_id not in ent_idx:
skipped_ent_ids += 1
continue
token_pos = 0
while token_pos < len(sp_token_boundaries) and offset >= sp_token_boundaries[token_pos][1]:
token_pos += 1
new_text = text
new_offset = offset
if token_pos > max_mention_token_pos_in_text:
shift = sp_token_boundaries[token_pos-max_mention_token_pos_in_text][0].item()
new_text = new_text[shift:]
new_offset = new_offset - shift
assert text[offset:offset+length] == new_text[new_offset:new_offset+length]
new_example = {
'original_text': new_text,
'gt_entities': [[0,0,ent_id,'wiki',new_offset,length]],
}
new_examples.append(new_example)
transform.max_seq_len = old_max_seq_len
return new_examples, skipped_ent_ids
def metrics_disambiguation(test_data, predictions):
support = 0
correct = 0
for example_idx, (example, prediction) in tqdm(enumerate(zip(test_data, predictions))):
if len(prediction['entities']) == 0:
continue
target = example['gt_entities'][0][2]
prediction = prediction['entities'][0]
correct += (target == prediction)
support += 1
accuracy = correct/support
return accuracy, support
def evaluate_model_dis(checkpoint_path, datasets, embeddings_path=None, ent_catalogue_idx_path=None):
print(f"Loading model from checkpoint {checkpoint_path}")
model_eval = ModelEval(
checkpoint_path=checkpoint_path,
config_name="joint_el_mel_new",
embeddings_path=embeddings_path,
ent_catalogue_idx_path=ent_catalogue_idx_path
)
for test_data_path in datasets:
print(f"Processing {test_data_path}")
test_data = load_file(test_data_path)
test_data_for_disambgiation, skipped = convert_examples_for_disambiguation(test_data, model_eval.transform)
predictions = model_eval.get_disambiguation_predictions(test_data_for_disambgiation)
accuracy, support = metrics_disambiguation(test_data_for_disambgiation, predictions)
print(f"Accuracty {accuracy}, support {support}, skipped {skipped}")
embeddings_path = "./models/embeddings.pt"
ent_catalogue_idx_path = "./models/index.txt"
print("End-to-end EL performance on Mewsli-9’-test (under-labeled for end-to-end linking)")
checkpoint_path = "./models/model_mewsli.ckpt"
datasets = [
'./data/mewsli-9-splitted/ar.jsonl_test',
'./data/mewsli-9-splitted/de.jsonl_test',
'./data/mewsli-9-splitted/en.jsonl_test',
'./data/mewsli-9-splitted/es.jsonl_test',
'./data/mewsli-9-splitted/fa.jsonl_test',
'./data/mewsli-9-splitted/ja.jsonl_test',
'./data/mewsli-9-splitted/sr.jsonl_test',
'./data/mewsli-9-splitted/ta.jsonl_test',
'./data/mewsli-9-splitted/tr.jsonl_test',
]
evaluate_model_e2e(
checkpoint_path=checkpoint_path,
datasets=datasets,
embeddings_path=embeddings_path,
ent_catalogue_idx_path=ent_catalogue_idx_path,
)
print("End-to-end EL performance on Mewsli-9’-test (labeled for end-to-end linking)")
checkpoint_path = "./models/model_e2e.ckpt"
datasets = [
'./data/mewsli-9-labelled/ar_labelled.jsonl',
'./data/mewsli-9-labelled/de_labelled.jsonl',
'./data/mewsli-9-labelled/en_labelled.jsonl',
'./data/mewsli-9-labelled/es_labelled.jsonl',
'./data/mewsli-9-labelled/fa_labelled.jsonl',
'./data/mewsli-9-labelled/ja_labelled.jsonl',
'./data/mewsli-9-labelled/sr_labelled.jsonl',
'./data/mewsli-9-labelled/ta_labelled.jsonl',
'./data/mewsli-9-labelled/tr_labelled.jsonl',
]
evaluate_model_e2e(
checkpoint_path=checkpoint_path,
datasets=datasets,
embeddings_path=embeddings_path,
ent_catalogue_idx_path=ent_catalogue_idx_path,
)
print("End-to-end results on AIDA")
checkpoint_path = "./models/model_aida.ckpt"
datasets = [
'./data/aida/aida_testb.jsonl_wikidata',
]
evaluate_model_e2e(
checkpoint_path=checkpoint_path,
datasets=datasets,
embeddings_path=embeddings_path,
ent_catalogue_idx_path=ent_catalogue_idx_path,
)
print("ED accuracy on Mewsli-9")
checkpoint_path = "./models/model_wiki.ckpt"
datasets = [
'./data/mewsli-9/ar.jsonl',
'./data/mewsli-9/de.jsonl',
'./data/mewsli-9/en.jsonl',
'./data/mewsli-9/es.jsonl',
'./data/mewsli-9/fa.jsonl',
'./data/mewsli-9/ja.jsonl',
'./data/mewsli-9/sr.jsonl',
'./data/mewsli-9/ta.jsonl',
'./data/mewsli-9/tr.jsonl',
]
evaluate_model_dis(
checkpoint_path=checkpoint_path,
datasets=datasets,
embeddings_path=embeddings_path,
ent_catalogue_idx_path=ent_catalogue_idx_path,
) | BELA-main | scripts/evaluate.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import os
import pickle
import re
import pandas
import jsonlines
from mgenre.utils import chunk_it, get_wikidata_ids
from tqdm.auto import tqdm, trange
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_dir",
type=str,
default="/checkpoint/ndecao/xlwikifier-wikidata/data",
)
parser.add_argument(
"--output_dir",
type=str,
default="/checkpoint/ndecao/TR2016",
)
parser.add_argument(
"--base_wikidata",
type=str,
default="/checkpoint/ndecao/wikidata",
)
parser.add_argument(
"-d",
"--debug",
help="Print lots of debugging statements",
action="store_const",
dest="loglevel",
const=logging.DEBUG,
default=logging.WARNING,
)
parser.add_argument(
"-v",
"--verbose",
help="Be verbose",
action="store_const",
dest="loglevel",
const=logging.INFO,
)
args, _ = parser.parse_known_args()
logging.basicConfig(level=logging.DEBUG)
filename = os.path.join(args.base_wikidata, "lang_title2wikidataID.pkl")
logging.info("Loading {}".format(filename))
with open(filename, "rb") as f:
lang_title2wikidataID = pickle.load(f)
filename = os.path.join(args.base_wikidata, "lang_redirect2title.pkl")
logging.info("Loading {}".format(filename))
with open(filename, "rb") as f:
lang_redirect2title = pickle.load(f)
filename = os.path.join(args.base_wikidata, "label_or_alias2wikidataID.pkl")
logging.info("Loading {}".format(filename))
with open(filename, "rb") as f:
label_or_alias2wikidataID = pickle.load(f)
for lang in os.listdir(args.input_dir):
logging.info("Converting {}".format(lang))
for split in ("test", "train"):
kilt_dataset = []
for filename in tqdm(
set(
".".join(e.split(".")[:-1])
for e in os.listdir(os.path.join(args.input_dir, lang, split))
)
):
with open(
os.path.join(args.input_dir, lang, split, filename + ".txt")
) as f:
doc = f.read()
with open(
os.path.join(args.input_dir, lang, split, filename + ".mentions")
) as f:
mentions = f.readlines()
for i, mention in enumerate(mentions):
start, end, _, title, is_hard = mention.strip().split("\t")
start, end, is_hard = int(start), int(end), bool(int(is_hard))
wikidataIDs = get_wikidata_ids(
title.replace("_", " "),
lang,
lang_title2wikidataID,
lang_redirect2title,
label_or_alias2wikidataID,
)[0]
meta = {
"left_context": doc[:start].strip(),
"mention": doc[start:end].strip(),
"right_context": doc[end:].strip(),
}
item = {
"id": "TR2016-{}-{}-{}".format(lang, filename, i),
"input": (
meta["left_context"]
+ " [START] "
+ meta["mention"]
+ " [END] "
+ meta["right_context"]
),
"output": [{"answer": list(wikidataIDs)}],
"meta": meta,
"is_hard": is_hard,
}
kilt_dataset.append(item)
filename = os.path.join(
args.output_dir, "{}-kilt-{}.jsonl".format(lang, split)
)
logging.info("Saving {}".format(filename))
with jsonlines.open(filename, "w") as f:
f.write_all(kilt_dataset)
kilt_dataset = [e for e in kilt_dataset if e["is_hard"]]
filename = os.path.join(
args.output_dir, "{}-hard.jsonl".format(filename.split(".")[0])
)
logging.info("Saving {}".format(filename))
with jsonlines.open(filename, "w") as f:
f.write_all(kilt_dataset) | BELA-main | preprocessing_scripts/preprocess_TR2016.py |
BELA-main | mblink/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.