python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import hydra
from mblink.conf.config import MainConfig
from omegaconf import OmegaConf
from pytorch_lightning.trainer import Trainer
@hydra.main(config_path="conf", config_name="config")
def main(cfg: MainConfig):
print(OmegaConf.to_yaml(cfg))
os.environ["NCCL_NSOCKS_PERTHREAD"] = "4"
os.environ["NCCL_SOCKET_NTHREADS"] = "2"
if cfg.get("debug_mode"):
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
os.environ["NCCL_BLOCKING_WAIT"] = "1"
os.environ["PL_SKIP_CPU_COPY_ON_DDP_TEARDOWN"] = "1"
task = hydra.utils.instantiate(cfg.task, _recursive_=False)
assert cfg.task.model.model_path == cfg.task.transform.model_path
transform = hydra.utils.instantiate(cfg.task.transform)
datamodule = hydra.utils.instantiate(cfg.datamodule, transform=transform)
checkpoint_callback = hydra.utils.instantiate(cfg.checkpoint_callback)
trainer = Trainer(**cfg.trainer, callbacks=[checkpoint_callback])
if cfg.test_only:
ckpt_path = cfg.task.load_from_checkpoint
trainer.test(
model=task,
ckpt_path=ckpt_path,
verbose=True,
datamodule=datamodule,
)
else:
trainer.fit(task, datamodule=datamodule)
print(f"*** Best model path is {checkpoint_callback.best_model_path}")
trainer.test(
model=None,
ckpt_path="best",
verbose=True,
datamodule=datamodule,
)
if __name__ == "__main__":
main()
| BELA-main | mblink/main.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
import mmap
from typing import List
import torch
from pytorch_lightning import LightningDataModule
from mblink.utils.utils import (
EntityCatalogueType,
EntityCatalogue,
ElDatasetType,
MultilangEntityCatalogue,
NegativesStrategy,
order_entities,
)
from mblink.transforms.blink_transform import BlinkTransform
logger = logging.getLogger()
class ElMatchaDataset(torch.utils.data.Dataset):
"""
A memory mapped dataset for EL in Matcha format
Each example in this dataset contains several mentions.
We laso filter out mentions, that are not present in entity catalogue
"""
def __init__(
self, path, ent_catalogue, negatives=False, negatives_strategy="higher"
):
self.ent_catalogue = ent_catalogue
self.negatives = negatives
self.negatives_strategy = NegativesStrategy(negatives_strategy)
self.file = open(path, mode="r")
self.mm = mmap.mmap(self.file.fileno(), 0, prot=mmap.PROT_READ)
self.offsets = []
self.count = 0
logger.info(f"Build mmap index for {path}")
line = self.mm.readline()
offset = 0
while line:
data = json.loads(line)
for gt_ent_idx, gt_entity in enumerate(data["gt_entities"]):
ent = gt_entity[2]
if ent in self.ent_catalogue:
self.offsets.append((offset, gt_ent_idx))
self.count += 1
offset = self.mm.tell()
line = self.mm.readline()
def __len__(self):
return self.count
def __getitem__(self, index):
offset, gt_ent_idx = self.offsets[index]
self.mm.seek(offset)
line = self.mm.readline()
data = json.loads(line)
_, _, gt_entity, _, offset, length = data["gt_entities"][gt_ent_idx]
entity_index, entity_tokens = self.ent_catalogue[gt_entity]
text = data['original_text']
result = {
"context_left": " ".join(text[:offset]),
"mention": " ".join(text[offset : offset + length]),
"context_right": " ".join(text[offset + length :]),
"entity_id": gt_entity,
"entity_index": entity_index,
"entity_tokens": entity_tokens,
}
if self.negatives:
assert "gt_hard_negatives" in data
neg_entities_ids = []
neg_entities_indexes = []
neg_entities_tokens = []
for ent in data["gt_hard_negatives"][gt_ent_idx]:
if (
ent == gt_entity
and self.negatives_strategy == NegativesStrategy.HIGHER
):
break
entity_index, entity_tokens = self.ent_catalogue[ent]
neg_entities_ids.append(ent)
neg_entities_indexes.append(entity_index)
neg_entities_tokens.append(entity_tokens)
result["neg_entities_ids"] = neg_entities_ids
result["neg_entities_indexes"] = neg_entities_indexes
result["neg_entities_tokens"] = neg_entities_tokens
return result
class ElBlinkDataset(torch.utils.data.Dataset):
"""
A memory mapped dataset for EL in BLINK format
Each example in this dataset contains one mention.
We laso filter out mentions, that are not present in entity catalogue
"""
def __init__(
self, path, ent_catalogue, negatives=False, negatives_strategy="higher"
):
self.ent_catalogue = ent_catalogue
self.file = open(path, mode="r")
self.mm = mmap.mmap(self.file.fileno(), 0, prot=mmap.PROT_READ)
self.offsets = []
self.count = 0
logger.info(f"Build mmap index for {path}")
line = self.mm.readline()
offset = 0
while line:
data = json.loads(line)
if data["entity_id"] in self.ent_catalogue:
self.offsets.append(offset)
self.count += 1
offset = self.mm.tell()
line = self.mm.readline()
def __len__(self):
return self.count
def __getitem__(self, index):
offset = self.offsets[index]
self.mm.seek(offset)
line = self.mm.readline()
data = json.loads(line)
entity_id = data["entity_id"]
entity_index, entity_tokens = self.ent_catalogue[entity_id]
return {
"context_left": data["context_left"],
"mention": data["mention"],
"context_right": data["context_right"],
"entity_id": entity_id,
"entity_index": entity_index,
"entity_tokens": entity_tokens,
}
class ElBiEncoderDataModule(LightningDataModule):
"""
Read data from EL datatset and prepare mention/entity pairs tensors
"""
def __init__(
self,
transform: BlinkTransform,
# Dataset args
train_path: str,
val_path: str,
test_path: str,
ent_catalogue_path: str,
ent_catalogue_idx_path: str,
dataset_type: str = "matcha",
ent_catalogue_type: str = "simple",
batch_size: int = 2,
negatives: bool = False,
negatives_strategy: str = "higher",
max_negative_entities_in_batch: int = 0,
drop_last: bool = False, # drop last batch if len(dataset) not multiple of batch_size
num_workers: int = 0, # increasing this bugs out right now
*args,
**kwargs,
):
super().__init__()
self.batch_size = batch_size
self.negatives = negatives
self.max_negative_entities_in_batch = max_negative_entities_in_batch
self.drop_last = drop_last
self.num_workers = num_workers
self.transform = transform
ent_catalogue_type = EntityCatalogueType(ent_catalogue_type)
if ent_catalogue_type == EntityCatalogueType.SIMPLE:
self.ent_catalogue = EntityCatalogue(
ent_catalogue_path, ent_catalogue_idx_path
)
elif ent_catalogue_type == EntityCatalogueType.MULTI:
self.ent_catalogue = MultilangEntityCatalogue(
ent_catalogue_path, ent_catalogue_idx_path
)
else:
raise NotImplementedError(
f"Unknown ent_catalogue_type {ent_catalogue_type}"
)
dataset_type = ElDatasetType(dataset_type)
if dataset_type == ElDatasetType.MATCHA:
dataset_cls = ElMatchaDataset
elif dataset_type == ElDatasetType.BLINK:
dataset_cls = ElBlinkDataset
else:
raise NotImplementedError(f"Unknown dataset_type {dataset_type}")
self.datasets = {
"train": dataset_cls(
train_path,
self.ent_catalogue,
negatives=negatives,
negatives_strategy=negatives_strategy,
),
"valid": dataset_cls(val_path, self.ent_catalogue),
"test": dataset_cls(test_path, self.ent_catalogue),
}
def train_dataloader(self):
return torch.utils.data.DataLoader(
self.datasets["train"],
batch_size=self.batch_size,
num_workers=self.num_workers,
collate_fn=self.collate_train,
)
def val_dataloader(self):
return torch.utils.data.DataLoader(
self.datasets["valid"],
shuffle=False,
batch_size=self.batch_size,
num_workers=self.num_workers,
collate_fn=self.collate_eval,
)
def test_dataloader(self):
return torch.utils.data.DataLoader(
self.datasets["test"],
shuffle=False,
batch_size=self.batch_size,
num_workers=self.num_workers,
collate_fn=self.collate_eval,
)
def collate_eval(self, batch):
return self.collate(batch, False)
def collate_train(self, batch):
return self.collate(batch, True)
def collate(self, batch, is_train):
"""
Prepare mention, entity tokens and target tensors
"""
if self.negatives and is_train:
(
left_context,
mention,
right_context,
_,
entity_ids,
entity_token_ids,
_,
neg_entities_ids,
neg_entities_tokens,
) = zip(*[item.values() for item in batch])
else:
left_context, mention, right_context, _, entity_ids, entity_token_ids = zip(
*[item.values() for item in batch]
)
neg_entities_ids = None
neg_entities_tokens = None
entity_token_ids, entity_ids, targets = order_entities(
entity_token_ids,
entity_ids,
neg_entities_ids,
neg_entities_tokens,
self.max_negative_entities_in_batch,
)
pad_length = (
len(batch) + self.max_negative_entities_in_batch - len(entity_token_ids)
)
entity_tensor_mask = [1] * len(entity_token_ids) + [0] * pad_length
entity_token_ids += [
[self.transform.bos_idx, self.transform.eos_idx]
] * pad_length
entity_ids += [0] * pad_length
mention_tensors, entity_tensors = self.transform(
{
"left_context": left_context,
"mention": mention,
"right_context": right_context,
"token_ids": entity_token_ids,
}
)
entity_ids = torch.tensor(entity_ids, dtype=torch.long)
targets = torch.tensor(targets, dtype=torch.long)
entity_tensor_mask = torch.tensor(entity_tensor_mask, dtype=torch.long)
return {
"mentions": mention_tensors,
"entities": entity_tensors,
"entity_ids": entity_ids,
"targets": targets,
"entity_tensor_mask": entity_tensor_mask,
}
| BELA-main | mblink/datamodule/blink_datamodule.py |
BELA-main | mblink/tests/__init__.py |
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import os
import tempfile
import random
import torch
import h5py
import numpy as np
import torch
from mblink.datamodule.blink_datamodule import (
ElBlinkDataset,
ElMatchaDataset,
ElBiEncoderDataModule,
EntityCatalogue,
MultilangEntityCatalogue,
)
from mblink.transforms.blink_transform import (
BlinkTransform,
)
from mblink.utils.utils import assert_equal_tensor_dict
class TestBiEncoderELDataModule(unittest.TestCase):
def setUp(self):
random.seed(0)
torch.manual_seed(0)
self.base_dir = os.path.join(os.path.dirname(__file__), "data")
self.data_path = os.path.join(self.base_dir, "el_matcha.jsonl")
self.ent_catalogue_path = os.path.join(self.base_dir, "el_catalogue.h5")
self.ent_catalogue_idx_path = os.path.join(self.base_dir, "el_catalogue.idx")
self.transform = BlinkTransform(
model_path="bert-large-uncased",
max_mention_len=12,
max_entity_len=64,
add_eos_bos_to_entity=True,
)
self.tokens = {
"London": [
2414,
2003,
1996,
3007,
1998,
2922,
2103,
1997,
2563,
1998,
1996,
2142,
2983,
],
"Chelsea F.C.": [
9295,
2374,
2252,
2003,
2019,
2394,
2658,
2374,
2252,
2241,
1999,
21703,
1010,
2414,
1012,
],
}
def test_ent_catalogue(self):
ent_catalogue = EntityCatalogue(
self.ent_catalogue_path,
self.ent_catalogue_idx_path,
)
self.assertIn("London", ent_catalogue)
self.assertIn("Chelsea F.C.", ent_catalogue)
self.assertNotIn("Moscow", ent_catalogue)
idx, data = ent_catalogue["London"]
self.assertEqual(idx, 0)
self.assertSequenceEqual(data, self.tokens["London"])
idx, data = ent_catalogue["Chelsea F.C."]
self.assertEqual(idx, 1)
self.assertSequenceEqual(data, self.tokens["Chelsea F.C."])
def test_el_matcha_dataset(self):
ent_catalogue = EntityCatalogue(
self.ent_catalogue_path,
self.ent_catalogue_idx_path,
)
ds = ElMatchaDataset(path=self.data_path, ent_catalogue=ent_catalogue)
self.assertEqual(len(ds), 3)
self.assertEqual(
ds[0],
{
"context_left": "",
"mention": "Chelsea Football Club",
"context_right": "is an English professional football club based in Fulham London",
"entity_id": "Chelsea F.C.",
"entity_index": 1,
"entity_tokens": self.tokens["Chelsea F.C."],
},
)
self.assertEqual(
ds[1],
{
"context_left": "Chelsea Football Club is an English professional football club based in Fulham",
"mention": "London",
"context_right": "",
"entity_id": "London",
"entity_index": 0,
"entity_tokens": self.tokens["London"],
},
)
self.assertEqual(
ds[2],
{
"context_left": "In",
"mention": "London",
"context_right": "the capital of England and the United Kingdom",
"entity_id": "London",
"entity_index": 0,
"entity_tokens": self.tokens["London"],
},
)
def test_el_bi_encoder_data_module(self):
dm = ElBiEncoderDataModule(
transform=self.transform,
train_path=self.data_path,
val_path=self.data_path,
test_path=self.data_path,
ent_catalogue_path=self.ent_catalogue_path,
ent_catalogue_idx_path=self.ent_catalogue_idx_path,
batch_size=2,
)
batches = list(dm.train_dataloader())
self.assertEqual(len(batches), 2)
expected_batches = [
{
"mentions": {
"input_ids": torch.tensor(
[
[
101,
1,
9295,
2374,
2252,
2,
2003,
2019,
2394,
2658,
2374,
102,
],
[
101,
2394,
2658,
2374,
2252,
2241,
1999,
21703,
1,
2414,
2,
102,
],
]
),
"attention_mask": torch.tensor(
[
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
]
),
},
"entities": {
"input_ids": torch.tensor(
[
[
101,
9295,
2374,
2252,
2003,
2019,
2394,
2658,
2374,
2252,
2241,
1999,
21703,
1010,
2414,
1012,
102,
],
[
101,
2414,
2003,
1996,
3007,
1998,
2922,
2103,
1997,
2563,
1998,
1996,
2142,
2983,
102,
0,
0,
],
]
),
"attention_mask": torch.tensor(
[
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
]
),
},
"entity_ids": torch.tensor([1, 0]),
"targets": torch.tensor([0, 1]),
"entity_tensor_mask": torch.tensor([1, 1]),
},
{
"mentions": {
"input_ids": torch.tensor(
[
[
101,
1999,
1,
2414,
2,
1996,
3007,
1997,
2563,
1998,
1996,
102,
]
]
),
"attention_mask": torch.tensor(
[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
),
},
"entities": {
"input_ids": torch.tensor(
[
[
101,
2414,
2003,
1996,
3007,
1998,
2922,
2103,
1997,
2563,
1998,
1996,
2142,
2983,
102,
]
]
),
"attention_mask": torch.tensor(
[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
),
},
"entity_ids": torch.tensor([0]),
"targets": torch.tensor([0]),
"entity_tensor_mask": torch.tensor([1]),
},
]
for result, expected in zip(batches, expected_batches):
assert_equal_tensor_dict(self, result, expected)
class TestBiEncoderELDataModuleWithXlmrTransform(unittest.TestCase):
def setUp(self):
random.seed(0)
torch.manual_seed(0)
self.base_dir = os.path.join(os.path.dirname(__file__), "data")
self.data_path = os.path.join(self.base_dir, "el_matcha.jsonl")
self.ent_catalogue_path = os.path.join(self.base_dir, "el_xlmr_catalogue.h5")
self.ent_catalogue_idx_path = os.path.join(self.base_dir, "el_catalogue.idx")
self.transform = BlinkTransform(
model_path="xlm-roberta-base",
mention_start_token=-2,
mention_end_token=-3,
max_mention_len=12,
max_entity_len=32,
)
def test_el_bi_encoder_data_module_with_xlmr_transform(self):
dm = ElBiEncoderDataModule(
transform=self.transform,
train_path=self.data_path,
val_path=self.data_path,
test_path=self.data_path,
ent_catalogue_path=self.ent_catalogue_path,
ent_catalogue_idx_path=self.ent_catalogue_idx_path,
batch_size=2,
)
batches = list(dm.train_dataloader())
self.assertEqual(len(batches), 2)
expected_batches = [
{
"mentions": {
"input_ids": torch.tensor(
[
[
0,
250000,
44517,
98809,
7687,
249999,
83,
142,
14941,
23182,
101740,
2,
],
[
0,
23182,
101740,
11938,
35509,
23,
88437,
3915,
250000,
9020,
249999,
2,
],
]
),
"attention_mask": torch.tensor(
[
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
]
),
},
"entities": {
"input_ids": torch.tensor(
[
[
0,
44517,
563,
5,
441,
5,
250000,
44517,
98809,
7687,
83,
142,
14941,
23182,
101740,
11938,
35509,
23,
88437,
3915,
4,
9020,
5,
215624,
297,
23,
66007,
4,
70,
11938,
98438,
2,
],
[
0,
9020,
250000,
9020,
83,
70,
10323,
136,
142105,
26349,
111,
30715,
136,
70,
14098,
117604,
5,
581,
26349,
9157,
7,
98,
70,
32547,
99321,
90,
23,
70,
127067,
9,
13,
2,
],
]
),
"attention_mask": torch.tensor(
[
[
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
],
[
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
],
]
),
},
"entity_ids": torch.tensor([1, 0]),
"targets": torch.tensor([0, 1]),
"entity_tensor_mask": torch.tensor([1, 1]),
},
{
"mentions": {
"input_ids": torch.tensor(
[
[
0,
360,
250000,
9020,
249999,
70,
10323,
111,
30715,
136,
70,
2,
]
]
),
"attention_mask": torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]),
},
"entities": {
"input_ids": torch.tensor(
[
[
0,
9020,
250000,
9020,
83,
70,
10323,
136,
142105,
26349,
111,
30715,
136,
70,
14098,
117604,
5,
581,
26349,
9157,
7,
98,
70,
32547,
99321,
90,
23,
70,
127067,
9,
13,
2,
]
]
),
"attention_mask": torch.tensor(
[
[
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
]
]
),
},
"entity_ids": torch.tensor([0]),
"targets": torch.tensor([0]),
"entity_tensor_mask": torch.tensor([1]),
},
]
for result, expected in zip(batches, expected_batches):
assert_equal_tensor_dict(self, result, expected)
def test_el_bi_encoder_data_module_with_hard_negatives_with_xlmr_transform(self):
dm = ElBiEncoderDataModule(
transform=self.transform,
train_path=self.data_path,
val_path=self.data_path,
test_path=self.data_path,
ent_catalogue_path=self.ent_catalogue_path,
ent_catalogue_idx_path=self.ent_catalogue_idx_path,
batch_size=2,
negatives=True,
max_negative_entities_in_batch=5,
)
batches = list(dm.train_dataloader())
self.assertEqual(len(batches), 2)
expected_batches = [
{
"mentions": {
"input_ids": torch.tensor(
[
[
0,
250000,
44517,
98809,
7687,
249999,
83,
142,
14941,
23182,
101740,
2,
],
[
0,
23182,
101740,
11938,
35509,
23,
88437,
3915,
250000,
9020,
249999,
2,
],
]
),
"attention_mask": torch.tensor(
[
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
]
),
},
"entities": {
"input_ids": torch.tensor(
[
[
0,
44517,
563,
5,
441,
5,
250000,
44517,
98809,
7687,
83,
142,
14941,
23182,
101740,
11938,
35509,
23,
88437,
3915,
4,
9020,
5,
215624,
297,
23,
66007,
4,
70,
11938,
98438,
2,
],
[
0,
9020,
250000,
9020,
83,
70,
10323,
136,
142105,
26349,
111,
30715,
136,
70,
14098,
117604,
5,
581,
26349,
9157,
7,
98,
70,
32547,
99321,
90,
23,
70,
127067,
9,
13,
2,
],
[0, 2] + [1] * 30,
[0, 2] + [1] * 30,
[0, 2] + [1] * 30,
[0, 2] + [1] * 30,
[0, 2] + [1] * 30,
]
),
"attention_mask": torch.tensor(
[
[1] * 32,
[1] * 32,
[1, 1] + [0] * 30,
[1, 1] + [0] * 30,
[1, 1] + [0] * 30,
[1, 1] + [0] * 30,
[1, 1] + [0] * 30,
]
),
},
"entity_ids": torch.tensor([1, 0, 0, 0, 0, 0, 0]),
"targets": torch.tensor([0, 1]),
"entity_tensor_mask": torch.tensor([1, 1, 0, 0, 0, 0, 0]),
},
{
"mentions": {
"input_ids": torch.tensor(
[
[
0,
360,
250000,
9020,
249999,
70,
10323,
111,
30715,
136,
70,
2,
]
]
),
"attention_mask": torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]),
},
"entities": {
"input_ids": torch.tensor(
[
[
0,
9020,
250000,
9020,
83,
70,
10323,
136,
142105,
26349,
111,
30715,
136,
70,
14098,
117604,
5,
581,
26349,
9157,
7,
98,
70,
32547,
99321,
90,
23,
70,
127067,
9,
13,
2,
],
[
0,
44517,
563,
5,
441,
5,
250000,
44517,
98809,
7687,
83,
142,
14941,
23182,
101740,
11938,
35509,
23,
88437,
3915,
4,
9020,
5,
215624,
297,
23,
66007,
4,
70,
11938,
98438,
2,
],
[0, 2] + [1] * 30,
[0, 2] + [1] * 30,
[0, 2] + [1] * 30,
[0, 2] + [1] * 30,
]
),
"attention_mask": torch.tensor(
[
[1] * 32,
[1] * 32,
[1, 1] + [0] * 30,
[1, 1] + [0] * 30,
[1, 1] + [0] * 30,
[1, 1] + [0] * 30,
]
),
},
"entity_ids": torch.tensor([0, 1, 0, 0, 0, 0]),
"targets": torch.tensor([0]),
"entity_tensor_mask": torch.tensor([1, 1, 0, 0, 0, 0]),
},
]
for result, expected in zip(batches, expected_batches):
assert_equal_tensor_dict(self, result, expected)
class TestMultilangELDataModule(unittest.TestCase):
def setUp(self):
random.seed(0)
torch.manual_seed(0)
self.base_dir = os.path.join(os.path.dirname(__file__), "data")
self.data_path = os.path.join(self.base_dir, "el_blink.jsonl")
self.ent_catalogue_idx_path = os.path.join(
self.base_dir, "el_multi_catalogue.idx"
)
fid, self.ent_catalogue_path = tempfile.mkstemp()
os.close(fid)
self._create_ent_data(self.ent_catalogue_path)
def tearDown(self):
if os.path.isfile(self.ent_catalogue_path):
os.remove(self.ent_catalogue_path)
@staticmethod
def _create_ent_data(file_name):
with h5py.File(file_name, "w") as fd:
fd["en"] = np.array(
[
[3, 101, 25550, 102, 0, 0],
[3, 101, 16765, 102, 0, 0],
[5, 101, 12109, 10104, 14822, 102],
[3, 101, 10829, 102, 0, 0],
]
)
fd["pt"] = np.array(
[
[3, 101, 12264, 102, 0, 0],
[5, 101, 14734, 47630, 27171, 102],
]
)
fd["ru"] = np.array([[5, 101, 59049, 118, 11323, 102]])
def test_multilang_ent_catalogue(self):
ent_catalogue = MultilangEntityCatalogue(
self.ent_catalogue_path,
self.ent_catalogue_idx_path,
)
self.assertIn("Q5146", ent_catalogue)
self.assertIn("Q155", ent_catalogue)
self.assertIn("Q8678", ent_catalogue)
self.assertIn("Q84", ent_catalogue)
self.assertNotIn("London", ent_catalogue)
idx0, data = ent_catalogue["Q5146"]
self.assertSequenceEqual(data, [101, 14734, 47630, 27171, 102])
idx1, data = ent_catalogue["Q155"]
self.assertSequenceEqual(data, [101, 16765, 102])
idx2, data = ent_catalogue["Q8678"]
self.assertSequenceEqual(data, [101, 59049, 118, 11323, 102])
idx3, data = ent_catalogue["Q84"]
self.assertSequenceEqual(data, [101, 10829, 102])
# assert all keys have unique idx numbers
self.assertEqual(sorted([idx0, idx1, idx2, idx3]), [0, 1, 2, 3])
def test_el_blink_dataset(self):
ent_catalogue = MultilangEntityCatalogue(
self.ent_catalogue_path,
self.ent_catalogue_idx_path,
)
ds = ElBlinkDataset(path=self.data_path, ent_catalogue=ent_catalogue)
self.assertEqual(len(ds), 7)
self.assertEqual(
ds[0],
{
"context_left": "Guanabara K\u00f6rfezi (",
"mention": "Portekizce",
"context_right": ": ve de Rio de Janeiro eyaletinde.",
"entity_id": "Q5146",
"entity_index": ent_catalogue["Q5146"][0],
"entity_tokens": ent_catalogue["Q5146"][1],
},
)
self.assertEqual(
ds[6],
{
"context_left": "Serpenti Galerisi (\u0130ngilizce: Serpentine Gallery),",
"mention": "Londra",
"context_right": "\u015fehrindeki Hyde Park\u2019\u0131n bir par\u00e7as\u0131 olan Kensington Gardens.",
"entity_id": "Q84",
"entity_index": ent_catalogue["Q84"][0],
"entity_tokens": ent_catalogue["Q84"][1],
},
)
def test_el_multilang_datamodule(self):
transform = BlinkTransform(
model_path="xlm-roberta-base",
mention_start_token=-2,
mention_end_token=-3,
max_mention_len=12,
max_entity_len=32,
)
dm = ElBiEncoderDataModule(
transform=transform,
train_path=self.data_path,
val_path=self.data_path,
test_path=self.data_path,
ent_catalogue_path=self.ent_catalogue_path,
ent_catalogue_idx_path=self.ent_catalogue_idx_path,
dataset_type="blink",
ent_catalogue_type="multi",
batch_size=2,
mention_start_token=1,
mention_end_token=2,
ent_sep_token=3,
mention_context_length=12,
separate_segments=True,
)
batches = list(dm.train_dataloader())
self.assertEqual(len(batches), 4)
if __name__ == '__main__':
unittest.main()
| BELA-main | mblink/tests/test_datamodules.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from bela.models.hf_encoder import HFEncoder
from bela.transforms.joint_el_transform import JointELTransform
class TestHFEncoder(unittest.TestCase):
def test_xlmr_encoder(self):
transform = JointELTransform()
model = HFEncoder(model_path="xlm-roberta-base")
model_inputs = transform(
{
"texts": [
[
"Some",
"simple",
"text",
"about",
"Real",
"Madrid",
"and",
"Barcelona",
],
["Hola", "amigos", "!"],
["Cristiano", "Ronaldo", "juega", "en", "la", "Juventus"],
],
"mention_offsets": [
[4, 7],
[1],
[0, 5],
],
"mention_lengths": [
[2, 1],
[1],
[2, 1],
],
"entities": [
[1, 2],
[3],
[102041, 267832],
],
}
)
output = model(
input_ids=model_inputs["input_ids"],
attention_mask=model_inputs["attention_mask"],
)
if __name__ == '__main__':
unittest.main()
| BELA-main | mblink/tests/test_models.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from bela.transforms.joint_el_transform import JointELTransform
class TestJointELXlmrTransforms(unittest.TestCase):
def test_blink_mention_xlmr_transform(self):
transform = JointELTransform()
model_inputs = transform(
{
"texts": [
[
"Some",
"simple",
"text",
"about",
"Real",
"Madrid",
"and",
"Barcelona",
],
["Hola", "amigos", "!"],
["Cristiano", "Ronaldo", "juega", "en", "la", "Juventus"],
],
"mention_offsets": [
[4, 7],
[1],
[0, 5],
],
"mention_lengths": [
[2, 1],
[1],
[2, 1],
],
"entities": [
[1, 2],
[3],
[102041, 267832],
],
}
)
expected_model_inputs = {
"input_ids": torch.tensor(
[
[0, 31384, 8781, 7986, 1672, 5120, 8884, 136, 5755, 2],
[0, 47958, 19715, 711, 2, 1, 1, 1, 1, 1],
[0, 96085, 43340, 1129, 2765, 22, 21, 65526, 2, 1],
]
),
"attention_mask": torch.tensor(
[
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
]
),
"mention_offsets": torch.tensor([[5, 8], [2, 0], [1, 7]]),
"mention_lengths": torch.tensor([[2, 1], [1, 0], [2, 1]]),
"entities": torch.tensor([[1, 2], [3, 0], [102041, 267832]]),
"tokens_mapping": torch.tensor(
[
[[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7], [7, 8], [8, 9]],
[
[1, 2],
[2, 3],
[3, 4],
[0, 1],
[0, 1],
[0, 1],
[0, 1],
[0, 1],
],
[
[1, 2],
[2, 3],
[3, 5],
[5, 6],
[6, 7],
[7, 8],
[0, 1],
[0, 1],
],
]
),
}
for key, value in expected_model_inputs.items():
self.assertTrue(
torch.all(model_inputs[key].eq(value)), f"{key} not equal"
)
if __name__ == '__main__':
unittest.main()
| BELA-main | mblink/tests/test_transforms.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
from enum import Enum
from typing import List
import torch
import h5py
logger = logging.getLogger()
class EntityCatalogueType(Enum):
SIMPLE = "simple"
MULTI = "multi"
class ElDatasetType(Enum):
BLINK = "blink"
MATCHA = "matcha"
class NegativesStrategy(Enum):
HIGHER = "higher"
ALL = "all"
def assert_equal_tensor_dict(test_case, result, expected):
"""
Compare tensors/values in the dict and assert if they are not equal.
The dict could countain multiple levels of nesting.
"""
for key, value in expected.items():
if isinstance(value, dict):
assert_equal_tensor_dict(test_case, result[key], value)
else:
if isinstance(value, torch.Tensor):
test_case.assertTrue(
torch.equal(result[key], value), f"{key} is not equal"
)
else:
test_case.assertEqual(result[key], value, f"{key} is not equal")
def get_seq_lengths(batch: List[List[int]]):
return [len(example) for example in batch]
class EntityCatalogue:
def __init__(self, local_path, idx_path):
self.data_file = h5py.File(local_path, "r")
self.data = self.data_file["data"]
logger.info(f"Reading entity catalogue index {idx_path}")
self.idx = {}
with open(idx_path, "rt") as fd:
for idx, line in enumerate(fd):
ent_id = line.strip()
self.idx[ent_id] = idx
def __len__(self):
return len(self.idx)
def __getitem__(self, entity_id):
ent_index = self.idx[entity_id]
value = self.data[ent_index].tolist()
value = value[1 : value[0] + 1]
return ent_index, value
def __contains__(self, entity_id):
return entity_id in self.idx
class MultilangEntityCatalogue:
"""
Entity catalogue where each entity id has descriptions in different languages
Index is a json file, where keys are entity ids. The value is dict where key is
language id and value is triplet (title, count, index). Title is a wikipedia title
of the entity in that language, count is a number of mentions to the entity in
that language and index is a pos of entity tokens in tokens array.
Index example:
{
...
"Q17": {
"en": ["Japan", 230, 10],
"ru": ["Япония", 111, 55]
}
...
}
Tokens file is an h5py file, where datasets keys are language ids and stored arrays
are ent tokens.
"""
def __init__(self, local_path, idx_path):
self.data = h5py.File(local_path, "r")
logger.info(f"Reading entity catalogue index {idx_path}")
with open(idx_path, "rt") as fd:
self.idx = json.load(fd)
# assign unique index number to each entity
for idx, ent_value in enumerate(self.idx.values()):
ent_value["idx"] = idx
def __len__(self):
return len(self.idx)
def __getitem__(self, entity_id):
ent_lang_map = self.idx[entity_id]
# now choose language with most mentions
selected_lang = None
max_count = -1
for lang, val in ent_lang_map.items():
if lang == "idx":
continue
_, count, _ = val
if count > max_count:
max_count = count
selected_lang = lang
assert selected_lang is not None
ent_index = ent_lang_map[selected_lang][2]
value = self.data[selected_lang][ent_index].tolist()
value = value[1 : value[0] + 1]
return ent_lang_map["idx"], value
def __contains__(self, entity_id):
return entity_id in self.idx
def order_entities(
entities_data,
entity_ids,
neg_entities_ids=None,
neg_entities_tokens=None,
max_negative_entities_in_batch=None,
):
"""
This function removes duplicated entities in the entities batch and
constructs the targets.
In bi-encoder model we train on in-batch random and hard negatives. In this
case each mention should have one positive entity class in enttiteis batch.
But it could happen there are two or more mentions in the batch that
referes to the same entitty (this entity would be in the batch 2 and more
times). In this case we could predict class correctly and calculate loss.
To resolve this problem we filter entities and left only one example of
each in the batch.
Returns:
filteres_entities - filtered entities tokens
filtered_entity_ids - filtered entities_ids
targets - array, where each i-th element is a position in embedding's
matrix of entity embedding of i-th corresponding mention.
"""
ent_indexes_map = {}
targets = []
filteres_entities = []
filtered_entity_ids = []
for ent_id, ent_data in zip(entity_ids, entities_data):
if ent_id in ent_indexes_map:
targets.append(ent_indexes_map[ent_id])
else:
ent_idx = len(ent_indexes_map)
targets.append(ent_idx)
ent_indexes_map[ent_id] = ent_idx
filteres_entities.append(ent_data)
filtered_entity_ids.append(ent_id)
# Append `max_negative_entities_in_batch` entities to the end of batch
neg_entities_ids = neg_entities_ids or []
neg_entities_tokens = neg_entities_tokens or []
neg_filteres_entities = []
neg_filtered_entity_ids = []
for item_neg_entities_ids, item_neg_entities_tokens in zip(
neg_entities_ids,
neg_entities_tokens,
):
for neg_entity_id, neg_entity_tokens in zip(
item_neg_entities_ids, item_neg_entities_tokens
):
if neg_entity_id not in ent_indexes_map:
ent_idx = len(ent_indexes_map)
ent_indexes_map[neg_entity_id] = ent_idx
neg_filteres_entities.append(neg_entity_tokens)
neg_filtered_entity_ids.append(neg_entity_id)
if max_negative_entities_in_batch is not None:
neg_filteres_entities = neg_filteres_entities[:max_negative_entities_in_batch]
neg_filtered_entity_ids = neg_filtered_entity_ids[
:max_negative_entities_in_batch
]
filteres_entities.extend(neg_filteres_entities)
filtered_entity_ids.extend(neg_filtered_entity_ids)
return filteres_entities, filtered_entity_ids, targets | BELA-main | mblink/utils/utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
from transformers import AutoModel
from torch import nn
class HFEncoder(nn.Module):
def __init__(
self,
model_path: str = "xlm-roberta-base",
projection_dim: Optional[int] = None,
output_dropout: Optional[float] = 0.0,
):
super().__init__()
self.transformer = AutoModel.from_pretrained(model_path)
self.embedding_dim = self.transformer.encoder.config.hidden_size
self.project = nn.Identity() # to make torchscript happy
if projection_dim:
self.project = nn.Sequential(
nn.Linear(self.embedding_dim, projection_dim), nn.LayerNorm(projection_dim)
)
self.output_dropout = nn.Dropout(output_dropout)
def forward(self, input_ids, attention_mask=None):
output = self.transformer(input_ids=input_ids, attention_mask=attention_mask)
last_layer = output["last_hidden_state"]
sentence_rep = self.project(last_layer[:, 0, :])
return self.output_dropout(sentence_rep), last_layer
| BELA-main | mblink/models/hf_encoder.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
from transformers import AutoTokenizer
class HFTransform(nn.Module):
def __init__(
self,
model_path: str = "xlm-roberta-base",
max_seq_len: int = 256,
add_special_tokens: bool = True,
return_offsets_mapping: bool = True,
):
super().__init__()
self.tokenizer = AutoTokenizer.from_pretrained(model_path)
self.sep_token = self.tokenizer.sep_token
self.max_seq_len = max_seq_len
self.add_special_tokens = add_special_tokens
self.return_offsets_mapping = return_offsets_mapping
self.pad_token_id = self.tokenizer.pad_token_id
def forward(self, texts):
return self.tokenizer(
texts,
return_tensors=None,
padding=False,
truncation=True,
max_length=self.max_seq_len,
add_special_tokens=self.add_special_tokens,
return_offsets_mapping=self.return_offsets_mapping,
)["input_ids"]
| BELA-main | mblink/transforms/hf_transform.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pad_sequence
from mblink.transforms.hf_transform import HFTransform
from mblink.utils.utils import (
EntityCatalogueType,
EntityCatalogue,
MultilangEntityCatalogue,
NegativesStrategy,
order_entities,
)
@torch.jit.script
def pad_2d(
batch: List[List[int]], seq_lens: List[int], pad_idx: int, max_len: int = -1
) -> List[List[int]]:
pad_to_length = max(seq_lens)
if max_len > 0:
pad_to_length = min(pad_to_length, max_len)
for sentence in batch:
padding = pad_to_length - len(sentence)
if padding >= 0:
for _ in range(padding):
sentence.append(pad_idx)
else:
for _ in range(-padding):
sentence.pop()
return batch
def prepare_mention(
context_left: List[int],
mention_tokens: List[int],
context_right: List[int],
max_mention_length: int,
mention_start_token: int,
mention_end_token: int,
bos_idx: int,
eos_idx: int,
):
context_left: List[int] = context_left[1:-1]
mention_tokens: List[int] = mention_tokens[1:-1]
context_right: List[int] = context_right[1:-1]
mention_tokens = mention_tokens[: max_mention_length - 4]
mention_tokens = [mention_start_token] + mention_tokens + [mention_end_token]
left_quota = (max_mention_length - len(mention_tokens)) // 2 - 1
right_quota = max_mention_length - len(mention_tokens) - left_quota - 2
left_add = len(context_left)
right_add = len(context_right)
if left_add <= left_quota:
if right_add > right_quota:
right_quota += left_quota - left_add
else:
if right_add <= right_quota:
left_quota += right_quota - right_add
empty_tokens: List[int] = []
context_left = empty_tokens if left_quota == 0 else context_left[-left_quota:]
context_right = context_right[:right_quota]
context_left = [bos_idx] + context_left
context_right = context_right + [eos_idx]
context_tokens = context_left + mention_tokens + context_right
return context_tokens
# class BlinkMentionRobertaTransform(HFTransform):
# def __init__(
# self,
# mention_start_token: int = -2,
# mention_end_token: int = -3,
# model_path: Optional[str] = None,
# max_seq_len: int = 64,
# ):
# super().__init__(
# model_path=model_path,
# max_seq_len=max_seq_len,
# )
# vocab_length = len(self.tokenizer.vocab)
# self.bos_idx = self.tokenizer.bos_token_id
# self.eos_idx = self.tokenizer.eos_token_id
# self.mention_start_token = (vocab_length + mention_start_token) % vocab_length
# self.mention_end_token = (vocab_length + mention_end_token) % vocab_length
# self.max_mention_length = max_seq_len
# def transform(self, batch: Dict[str, Any]) -> Dict[str, Any]:
# left_context = batch["left_context"]
# torch.jit.isinstance(left_context, List[str])
# right_context = batch["right_context"]
# torch.jit.isinstance(right_context, List[str])
# mention = batch["mention"]
# torch.jit.isinstance(mention, List[str])
# left_token_ids: List[List[int]] = self.tokenizer(left_context)["input_ids"]
# mention_token_ids: List[List[int]] = self.tokenizer(mention)["input_ids"]
# right_token_ids: List[List[int]] = self.tokenizer(right_context)["input_ids"]
# token_ids: List[List[int]] = []
# attention_masks: List[List[int]] = []
# seq_lens: List[int] = []
# for lc_token_ids, m_token_ids, rc_token_ids, in zip(
# left_token_ids,
# mention_token_ids,
# right_token_ids,
# ):
# sentence_token_ids = prepare_mention(
# lc_token_ids,
# m_token_ids,
# rc_token_ids,
# self.max_mention_length,
# self.mention_start_token,
# self.mention_end_token,
# self.bos_idx,
# self.eos_idx,
# )
# token_ids.append(sentence_token_ids)
# attention_mask = [1] * len(sentence_token_ids)
# attention_masks.append(attention_mask)
# seq_lens.append(len(sentence_token_ids))
# attention_masks = pad_2d(
# attention_masks,
# seq_lens,
# pad_idx = 0,
# )
# return {
# "input_ids": token_ids,
# "attention_mask": attention_masks,
# }
# class BlinkEntityPretokenizedTransform(HFTransform):
# def __init__(
# self,
# model_path: Optional[str] = None,
# max_seq_len: int = 64,
# ):
# super().__init__(
# model_path=model_path,
# max_seq_len=max_seq_len,
# )
# self.max_entity_length = max_seq_len
# def transform(self, batch: Dict[str, Any]) -> Dict[str, Any]:
# token_ids = batch["token_ids"]
# torch.jit.isinstance(token_ids, List[List[int]])
# result_token_ids: List[List[int]] = []
# seq_lens: List[int] = []
# attention_masks: List[List[int]] = []
# for token_ids_per_sequence in token_ids:
# if len(token_ids_per_sequence) > self.max_entity_length:
# eos_token = token_ids_per_sequence[-1]
# token_ids_per_sequence = token_ids_per_sequence[
# : self.max_entity_length
# ]
# token_ids_per_sequence[-1] = eos_token
# result_token_ids.append(token_ids_per_sequence)
# seq_len = len(token_ids_per_sequence)
# attention_mask = [1] * len(token_ids_per_sequence)
# attention_masks.append(attention_mask)
# seq_lens.append(seq_len)
# attention_masks = pad_2d(
# attention_masks,
# seq_lens,
# pad_idx = 0,
# )
# return {
# "input_ids": result_token_ids,
# "attention_mask": attention_masks,
# }
# class BlinkTransform(nn.Module):
# def __init__(
# self,
# model_path: Optional[str] = None,
# mention_start_token: int = -2,
# mention_end_token: int = -3,
# max_mention_len: int = 64,
# max_entity_len: int = 64,
# ):
# super().__init__()
# self.mention_transform = BlinkMentionRobertaTransform(
# mention_start_token=mention_start_token,
# mention_end_token=mention_end_token,
# model_path=model_path,
# max_seq_len=max_mention_len,
# )
# self.entity_transform = BlinkEntityPretokenizedTransform(
# model_path=model_path,
# max_seq_len=max_entity_len,
# )
# def forward(
# self, batch: Dict[str, Any]
# ) -> Tuple[Dict[str, torch.Tensor], Dict[str, torch.Tensor]]:
# return self.mention_transform(batch), self.entity_transform(batch)
# @property
# def bos_idx(self):
# return self.mention_transform.bos_idx
# @property
# def eos_idx(self):
# return self.mention_transform.eos_idx
class BlinkTransform(HFTransform):
def __init__(
self,
model_path: str = "bert-base-uncased",
mention_start_token: int = 1,
mention_end_token: int = 2,
max_mention_len: int = 32,
max_entity_len: int = 64,
add_eos_bos_to_entity: bool = False,
):
super().__init__(
model_path=model_path,
)
vocab_length = self.tokenizer.vocab_size
self.mention_start_token = (vocab_length + mention_start_token) % vocab_length
self.mention_end_token = (vocab_length + mention_end_token) % vocab_length
self.max_mention_len = max_mention_len
self.max_entity_len = max_entity_len
self.add_eos_bos_to_entity = add_eos_bos_to_entity
def _transform_mention(
self,
left_context: List[str],
mention: List[str],
right_context: List[str],
) -> List[List[int]]:
token_ids: List[List[int]] = []
for sentence_lc, sentence_mention, sentence_rc, in zip(
left_context,
mention,
right_context,
):
lc_token_ids = self.tokenizer.encode(sentence_lc)
mention_token_ids = self.tokenizer.encode(sentence_mention)
rc_token_ids = self.tokenizer.encode(sentence_rc)
sentence_token_ids = prepare_mention(
lc_token_ids,
mention_token_ids,
rc_token_ids,
self.max_mention_len,
self.mention_start_token,
self.mention_end_token,
self.tokenizer.cls_token_id,
self.tokenizer.sep_token_id,
)
token_ids.append(sentence_token_ids)
return token_ids
def _transform_entity(
self,
entity_token_ids: List[List[int]],
) -> List[List[int]]:
result_token_ids: List[List[int]] = []
for token_ids in entity_token_ids:
if self.add_eos_bos_to_entity:
token_ids = [self.bos_idx] + token_ids + [self.eos_idx]
if len(token_ids) > self.max_entity_len:
token_ids = token_ids[: self.max_entity_len]
token_ids[-1] = self.eos_idx
result_token_ids.append(token_ids)
return result_token_ids
def _to_tensor(self, token_ids, attention_mask_pad_idx=0):
seq_lens = [len(seq) for seq in token_ids]
input_ids = pad_2d(
token_ids,
seq_lens,
pad_idx = self.pad_token_id,
)
attention_mask = [[1]*seq_len for seq_len in seq_lens]
attention_mask = pad_2d(
attention_mask,
seq_lens,
pad_idx = attention_mask_pad_idx,
)
return {
'input_ids': torch.tensor(input_ids),
'attention_mask': torch.tensor(attention_mask),
}
def forward(
self, batch: Dict[str, Any]
) -> Tuple[Dict[str, torch.Tensor], Dict[str, torch.Tensor]]:
left_context = batch["left_context"]
torch.jit.isinstance(left_context, List[str])
mention = batch["mention"]
torch.jit.isinstance(mention, List[str])
right_context = batch["right_context"]
torch.jit.isinstance(right_context, List[str])
entity_token_ids = batch["token_ids"]
torch.jit.isinstance(entity_token_ids, List[List[int]])
mention_token_ids = self._transform_mention(
left_context,
mention,
right_context,
)
mention_tensors = self._to_tensor(
mention_token_ids,
)
entity_token_ids = self._transform_entity(entity_token_ids)
entity_tensors = self._to_tensor(
entity_token_ids,
)
return (mention_tensors, entity_tensors)
@property
def bos_idx(self):
return self.tokenizer.cls_token_id
@property
def eos_idx(self):
return self.tokenizer.sep_token_id
| BELA-main | mblink/transforms/blink_transform.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from collections import OrderedDict
from typing import Optional
from pytorch_lightning.strategies import DDPShardedStrategy, DDPStrategy
from torch.distributed.algorithms.ddp_comm_hooks.default_hooks import (
fp16_compress_hook,
)
import hydra
import torch
import torch.nn as nn
from pytorch_lightning import LightningModule
from mblink.conf import (
DataModuleConf,
ModelConf,
OptimConf,
TransformConf,
)
logger = logging.getLogger(__name__)
class InBatchTripletLoss(nn.Module):
# Paper: In Defense of the Triplet Loss for Person Re-Identification, https://arxiv.org/abs/1703.07737
# Blog post: https://omoindrot.github.io/triplet-loss
def __init__(self, margin: float = 1.0):
super().__init__()
self.margin = margin
def forward(self, scores: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
"""Build the triplet loss over a matrix of computed scores
For each mention distance to correct entity should be greater then distance to all
other entities in batch by margin.
Args:
scores: n_mentions x n_entities matrix of distances between mentions and entities
targets: vector of indices of correct entity for each mention (n_mentions)
"""
one_hot_targets = torch.zeros(scores.shape).bool()
one_hot_targets[torch.arange(targets.shape[0]), targets] = True
pos_scores = scores[one_hot_targets].unsqueeze(1).repeat(1, scores.shape[1] - 1)
neg_scores = scores[~one_hot_targets].reshape(
scores.shape[0], scores.shape[1] - 1
)
loss = torch.relu(self.margin + neg_scores - pos_scores).mean()
return loss
class InBatchMarginLoss(nn.Module):
"""
Pushes positives scores above margin and negatives below 0.
The loss calculated as max(0, maring - positive scores) +
max(0, negative scores).
"""
def __init__(self, margin: float = 100.0, pos_weight=1.0, use_mean=True):
super().__init__()
self.margin = margin
self.pos_weight = pos_weight
self.reduce_op = torch.mean if use_mean else torch.sum
def forward(self, scores: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
one_hot_targets = torch.zeros(scores.shape).bool()
one_hot_targets[torch.arange(targets.shape[0]), targets] = True
pos_scores = scores[one_hot_targets]
neg_scores = scores[~one_hot_targets]
if self.pos_weight is None:
pos_weight = scores.shape[1] - 1
loss = self.reduce_op(
pos_weight * torch.relu(self.margin - pos_scores)
) + self.reduce_op(torch.relu(neg_scores))
return loss
class CombinedLoss(nn.Module):
def __init__(self, first: nn.Module, second: nn.Module, second_weight=1.0):
super().__init__()
self.first = first
self.second = second
self.second_weight = second_weight
def forward(self, scores: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
return self.first(scores, targets) + self.second_weight * self.second(
scores, targets
)
class ElBiEncoderTask(LightningModule):
def __init__(
self,
transform: TransformConf,
model: ModelConf,
datamodule: DataModuleConf,
optim: OptimConf,
in_batch_eval: bool = True, # use only in batch contexts for validation
warmup_steps: int = 0,
filter_entities: bool = True,
loss: str = "cross_entropy",
triplet_loss_margin: float = 1.0,
margin_loss_margin: float = 100.0,
margin_loss_pos_weight: Optional[float] = None,
margin_loss_weight: float = 1.0,
margin_loss_mean: bool = True,
load_from_checkpoint: Optional[str] = None,
fp16_grads: bool = False,
):
super().__init__()
# encoder setup
self.mention_encoder_conf = model
self.entity_encoder_conf = model
self.optim_conf = optim
self.in_batch_eval = in_batch_eval
self.warmup_steps = warmup_steps
self.filter_entities = filter_entities
self.load_from_checkpoint = load_from_checkpoint
self.fp16_grads = fp16_grads
if loss == "cross_entropy":
self.loss = nn.CrossEntropyLoss()
elif loss == "triplet":
self.loss = InBatchTripletLoss(margin=triplet_loss_margin)
elif loss == "margin":
self.loss = CombinedLoss(
nn.CrossEntropyLoss(),
InBatchMarginLoss(
margin=margin_loss_margin,
pos_weight=margin_loss_pos_weight,
mean=margin_loss_mean,
),
margin_loss_weight,
)
else:
raise ValueError(f"Unsupported loss {loss}")
@staticmethod
def _get_encoder_state(state, encoder_name):
encoder_state = OrderedDict()
for key, value in state["state_dict"].items():
if key.startswith(encoder_name):
encoder_state[key[len(encoder_name) + 1 :]] = value
return encoder_state
def setup(self, stage: str):
if stage == "test":
return
# resetting call_configure_sharded_model_hook attribute so that we could configure model
self.call_configure_sharded_model_hook = False
self.mention_encoder = hydra.utils.instantiate(
self.mention_encoder_conf,
_recursive_=False,
)
self.entity_encoder = hydra.utils.instantiate(
self.entity_encoder_conf,
_recursive_=False,
)
if self.load_from_checkpoint is not None:
logger.info(f"Load encoders state from {self.load_from_checkpoint}")
with open(self.load_from_checkpoint, "rb") as f:
checkpoint = torch.load(f, map_location=torch.device("cpu"))
entity_encoder_state = self._get_encoder_state(checkpoint, "entity_encoder")
self.entity_encoder.load_state_dict(entity_encoder_state)
mention_encoder_state = self._get_encoder_state(
checkpoint, "mention_encoder"
)
self.mention_encoder.load_state_dict(mention_encoder_state)
self.optimizer = hydra.utils.instantiate(
self.optim_conf, self.parameters(), _recursive_=False
)
def on_pretrain_routine_start(self):
if self.fp16_grads:
self.trainer.strategy._model.register_comm_hook(None, fp16_compress_hook)
def sim_score(self, mentions_repr, entities_repr):
scores = torch.matmul(mentions_repr, torch.transpose(entities_repr, 0, 1))
return scores
def forward(
self,
mentions_ids,
entities_ids,
):
# encode query and contexts
mentions_repr, _ = self.mention_encoder(mentions_ids) # bs x d
entities_repr, _ = self.entity_encoder(entities_ids) # bs x d
return mentions_repr, entities_repr
def configure_optimizers(self):
return self.optimizer
def training_step(self, batch, batch_idx):
"""
This receives queries, each with mutliple contexts.
"""
mentions = batch["mentions"] # bs x mention_len
entities = batch["entities"] # bs x entity len
entity_ids = batch["entity_ids"] # bs
targets = batch["targets"] # bs
mask = batch["entity_tensor_mask"] # bs
mentions_repr, entities_repr = self(mentions['input_ids'], entities['input_ids'])
if isinstance(self.trainer.strategy, (DDPStrategy, DDPShardedStrategy)):
mentions_to_send = mentions_repr.detach()
entities_to_send = entities_repr.detach()
all_mentions_repr = self.all_gather(mentions_to_send) # num_workers x bs
all_entities_repr = self.all_gather(entities_to_send)
all_targets = self.all_gather(targets)
# we are not filtering duplicated entities now
all_entity_ids = self.all_gather(entity_ids)
all_mask = self.all_gather(mask)
# offset = 0
all_mentions_list = []
all_entities_list = []
all_entity_ids_list = []
all_targets_list = []
# Add current device representations first.
# It is needed so we would not filter calculated on this
# device representations.
all_mentions_list.append(mentions_repr)
entities_repr = entities_repr[mask.bool()]
all_entities_list.append(entities_repr)
all_entity_ids_list.append(entity_ids[mask.bool()].tolist())
all_targets_list.append(targets)
# offset += entities_repr.size(0)
for i in range(all_targets.size(0)):
if i != self.global_rank:
all_mentions_list.append(all_mentions_repr[i])
all_entities_list.append(all_entities_repr[i][all_mask[i].bool()])
all_entity_ids_list.append(
all_entity_ids[i][all_mask[i].bool()].tolist()
)
# all_targets[i] += offset
all_targets_list.append(all_targets[i])
# offset += all_entities_repr[i].size(0)
mentions_repr = torch.cat(all_mentions_list, dim=0) # total_ctx x dim
# entities_repr = torch.cat(all_entities_list, dim=0) # total_query x dim
# targets = torch.cat(all_targets_list, dim=0)
if self.filter_entities:
entities_repr, targets = self._filter_entities_and_targets(
all_entities_list,
all_entity_ids_list,
all_targets_list,
)
else:
entities_repr = torch.cat(all_entities_list, dim=0)
targets = torch.cat(all_targets_list, dim=0)
# entity_ids = torch.flatten(entity_ids)
else:
entities_repr = entities_repr[mask.bool()]
scores = self.sim_score(mentions_repr, entities_repr)
loss = self.loss(scores, targets)
self.log("train_loss", loss, prog_bar=True)
return loss
def _filter_entities_and_targets(
self, all_entities_list, all_entity_ids_list, all_targets_list
):
filtered_entities_repr = []
filtered_targets = []
ent_indexes_map = {}
for entities_repr, entity_ids, targets, in zip(
all_entities_list,
all_entity_ids_list,
all_targets_list,
):
for entity_repr, ent_id in zip(entities_repr, entity_ids):
if ent_id not in ent_indexes_map:
ent_idx = len(ent_indexes_map)
ent_indexes_map[ent_id] = ent_idx
filtered_entities_repr.append(entity_repr)
for target in targets.tolist():
filtered_targets.append(ent_indexes_map[entity_ids[target]])
filtered_entities_repr = torch.stack(filtered_entities_repr, dim=0)
filtered_targets = torch.tensor(
filtered_targets,
dtype=torch.long,
device=filtered_entities_repr.get_device(),
)
return filtered_entities_repr, filtered_targets
def _eval_step(self, batch, batch_idx):
mentions = batch["mentions"] # bs x mention_len
entities = batch["entities"] # bs x entity len
entity_ids = batch["entity_ids"] # bs
targets = batch["targets"] # bs
mask = batch["entity_tensor_mask"] # bs
mentions_repr, entities_repr = self(mentions['input_ids'], entities['input_ids'])
entities_repr = entities_repr[mask.bool()]
scores = self.sim_score(mentions_repr, entities_repr) # bs x ctx_cnt
loss = self.loss(scores, targets)
return (
self.compute_rank_metrics(scores, targets),
mentions_repr,
entities_repr,
targets,
entity_ids,
loss,
)
def compute_rank_metrics(self, scores, target_labels):
# Compute total un_normalized avg_ranks, mrr
values, indices = torch.sort(scores, dim=1, descending=True)
rank = 0
mrr = 0.0
for i, idx in enumerate(target_labels):
gold_idx = (indices[i] == idx).nonzero()
rank += gold_idx.item() + 1
mrr += 1 / (gold_idx.item() + 1)
return rank, mrr
def _eval_epoch_end(self, outputs, log_prefix="valid"):
total_avg_rank, total_ent_count, total_count = 0, 0, 0
total_mrr = 0
total_loss = 0
if self.in_batch_eval:
for metrics, mentions_repr, entities_repr, _, _, loss in outputs:
rank, mrr = metrics
total_avg_rank += rank
total_mrr += mrr
total_ent_count += entities_repr.size(0)
total_count += mentions_repr.size(0)
total_loss += loss
total_ent_count = total_ent_count / len(outputs)
else:
# TODO: collect entities representations over all batches
raise NotImplementedError("Only in-batch eval implementted!")
metrics = {
log_prefix + "_avg_rank": total_avg_rank / total_count,
log_prefix + "_mrr": total_mrr / total_count,
log_prefix + "_ent_count": total_ent_count,
log_prefix + "_loss": total_loss,
}
self.log_dict(metrics, on_epoch=True, sync_dist=True)
def validation_step(self, batch, batch_idx):
return self._eval_step(batch, batch_idx)
def validation_epoch_end(self, valid_outputs):
self._eval_epoch_end(valid_outputs)
def test_step(self, batch, batch_idx):
return self._eval_step(batch, batch_idx)
def test_epoch_end(self, test_outputs):
self._eval_epoch_end(test_outputs, "test")
| BELA-main | mblink/task/blink_task.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
from typing import List, Any
# @manual "//github/facebookresearch/hydra:hydra"
from hydra.core.config_store import ConfigStore
from omegaconf import MISSING
defaults = [
"_self_",
{"task": "blink_task"},
{"checkpoint_callback": "default"},
]
@dataclass
class MainConfig:
defaults: List[Any] = field(default_factory=lambda: defaults)
task: Any = MISSING
datamodule: Any = MISSING
trainer: Any = MISSING
test_only: bool = False
checkpoint_callback: Any = MISSING
cs = ConfigStore.instance()
cs.store(name="config", node=MainConfig)
| BELA-main | mblink/conf/config.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
@dataclass
class TransformConf:
pass
@dataclass
class DataModuleConf:
pass
@dataclass
class OptimConf:
pass
@dataclass
class ModelConf:
pass
| BELA-main | mblink/conf/__init__.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import pandas as pd
import os, sys
from syntactic_testsets.utils import load_vocab
def lstm_probs(output, gold, w2idx):
data = []
for scores, g in zip(output, gold):
scores = scores.split()
form, form_alt = g.split("\t")[6:8]
prob_correct = float(scores[w2idx[form]])
prob_wrong = float(scores[w2idx[form_alt]])
data.append(prob_correct)
data.append(prob_wrong)
return data
lang = sys.argv[1]
model = sys.argv[2]
path_repo = "../data"
path_test_data = path_repo + "/agreement/" + lang + "/generated"
path_output = path_repo + "/agreement/" + lang + "/generated.output_"
path_lm_data = path_repo + "/lm/" + lang
gold = open(path_test_data + ".gold").readlines()
sents = open(path_test_data + ".text").readlines()
data = pd.read_csv(path_test_data + ".tab",sep="\t")
vocab = load_vocab(path_lm_data + "/vocab.txt")
# getting softmax outputs and the probabilities for pairs of test forms
#print("Assembling probabilities for the choice forms")
outputs = {}
probs = pd.DataFrame([])
if os.path.isfile(path_output + model):
#print(model)
output = open(path_output + model).readlines()
#print(len(output))
data[model] = lstm_probs(output, gold, vocab)
### If you want to save table with target singular and plural form probabilities uncomment these lines and change the path ###
#path_result = path_repo + "/results/" + lang + "/" + model + ".tab"
#print("The target singular and plural form probabilities are saved in", path_result)
#data.to_csv(path_result, sep="\t", index=False)
#### Computing accuracy for the model (and frequency baseline) ####
if "freq" in data:
models = [model, "freq"]
else:
models = [model]
fields = ["pattern","constr_id","sent_id","n_attr","punct","len_prefix","len_context","sent","correct_number","type"]
wide_data = data[fields + ["class"] + models].pivot_table(columns=("class"), values=models, index=fields)
for model in models:
correct = wide_data.loc[:, (model, "correct")]
wrong = wide_data.loc[:, (model, "wrong")]
wide_data[(model, "acc")] = (correct > wrong)*100
t = wide_data.reset_index()
a = t.groupby("type").agg({(m,"acc"):"mean" for m in models})
print("Accuracy overall\n", a)
a = pd.concat([t[t.type=="original"].groupby("pattern").agg({(m, "acc"): "mean" for m in models}).rename(columns={'acc': 'orig'}),
t[t.type=="generated"].groupby("pattern").agg({(m, "acc"): "mean" for m in models}).rename(columns={'acc': 'gen'})], axis=1)
print()
print("Accuracy by pattern\n", a)
| colorlessgreenRNNs-main | src/results.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
lm_parser = argparse.ArgumentParser(add_help=False)
lm_parser.add_argument('--data', type=str,
help='location of the data corpus')
lm_parser.add_argument('--model', type=str, default='LSTM',
help='type of recurrent net (RNN_TANH, RNN_RELU, LSTM, GRU)')
lm_parser.add_argument('--emsize', type=int, default=200,
help='size of word embeddings')
lm_parser.add_argument('--nhid', type=int, default=200,
help='number of hidden units per layer')
lm_parser.add_argument('--nlayers', type=int, default=2,
help='number of layers')
lm_parser.add_argument('--dropout', type=float, default=0.2,
help='dropout applied to layers (0 = no dropout)')
lm_parser.add_argument('--tied', action='store_true',
help='tie the word embedding and softmax weights')
lm_parser.add_argument('--lr', type=float, default=20,
help='initial learning rate')
lm_parser.add_argument('--clip', type=float, default=0.25,
help='gradient clipping')
lm_parser.add_argument('--epochs', type=int, default=40,
help='upper epoch limit')
lm_parser.add_argument('--batch_size', type=int, default=20, metavar='N',
help='batch size')
lm_parser.add_argument('--bptt', type=int, default=35,
help='sequence length')
lm_parser.add_argument('--seed', type=int, default=1111,
help='random seed')
lm_parser.add_argument('--cuda', action='store_true',
help='use CUDA')
lm_parser.add_argument('--log-interval', type=int, default=200, metavar='N',
help='report interval')
lm_parser.add_argument('--save', type=str, default='model.pt',
help='path to save the final model')
lm_parser.add_argument('--log', type=str, default='log.txt',
help='path to logging file')
| colorlessgreenRNNs-main | src/language_models/lm_argparser.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
| colorlessgreenRNNs-main | src/language_models/__init__.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch.nn as nn
import torch.utils.data.dataloader
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder.
ntoken: vocab size
nip: embedding size
"""
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False):
super(RNNModel, self).__init__()
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
if rnn_type in ['LSTM', 'GRU']:
self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout)
else:
try:
nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]
except KeyError:
raise ValueError( """An invalid option for `--model` was supplied,
options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']""")
self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout)
self.decoder = nn.Linear(nhid, ntoken)
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
if nhid != ninp:
raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.decoder.weight = self.encoder.weight
self.init_weights()
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.fill_(0)
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, input, hidden):
emb = self.drop(self.encoder(input))
output, hidden = self.rnn(emb, hidden)
output = self.drop(output)
#print(output)
decoded = self.decoder(output.view(output.size(0)*output.size(1), output.size(2)))
return decoded.view(output.size(0), output.size(1), decoded.size(1)), hidden
def init_hidden(self, bsz):
weight = next(self.parameters()).data
if self.rnn_type == 'LSTM':
return (weight.new(self.nlayers, bsz, self.nhid).zero_(),
weight.new(self.nlayers, bsz, self.nhid).zero_())
else:
return weight.new(self.nlayers, bsz, self.nhid).zero_()
| colorlessgreenRNNs-main | src/language_models/model.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
def repackage_hidden(h):
"""Detaches hidden states from their history."""
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple(repackage_hidden(v) for v in h)
def get_batch(source, i, seq_length):
seq_len = min(seq_length, len(source) - 1 - i)
data = source[i:i+seq_len]
# predict the sequences shifted by one word
target = source[i+1:i+1+seq_len].view(-1)
return data, target
def batchify(data, bsz, cuda):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).t().contiguous()
if cuda:
data = data.cuda()
return data
| colorlessgreenRNNs-main | src/language_models/utils.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import argparse
from utils import batchify, get_batch, repackage_hidden
import torch
import torch.nn as nn
from dictionary_corpus import Dictionary, Corpus, tokenize
parser = argparse.ArgumentParser(description='Evaluate perplexity of the dataset, ignoring the <unk> words')
parser.add_argument('--data', type=str, default='./data/penn',
help='location of the data corpus')
parser.add_argument('--test', type=str, default=None,
help='Indicate your test file if different from data/test.txt')
parser.add_argument('--checkpoint', type=str, default='model.pt',
help='path to save the final model')
parser.add_argument('--cuda', action='store_true',
help='use CUDA')
parser.add_argument('--bptt', type=int, default=35,
help='sequence length')
args = parser.parse_args()
def evaluate(data_source):
model.eval()
total_loss = 0
total_len = 0
ntokens = len(dictionary)
hidden = model.init_hidden(eval_batch_size)
unk_idx = dictionary.word2idx["<unk>"]
if args.cuda:
out_type = torch.cuda.LongTensor()
else:
out_type = torch.LongTensor()
with torch.no_grad():
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i, args.bptt)
output, hidden = model(data, hidden)
output_flat = output.view(-1, ntokens)
subset = targets != unk_idx
targets = targets[subset]
output_flat = output_flat[torch.arange(0, output_flat.size(0), out=out_type)[subset]]
total_len += targets.size(0)
total_loss += targets.size(0) * nn.CrossEntropyLoss()(output_flat, targets).item()
hidden = repackage_hidden(hidden)
return total_loss / total_len
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
eval_batch_size = 32
if args.test:
dictionary = Dictionary(args.data)
test = tokenize(dictionary, args.test)
print("Size, OOV", test.size(0), sum(test == dictionary.word2idx["<unk>"]))
test_data = batchify(test, eval_batch_size, args.cuda)
ntokens = len(dictionary)
else:
corpus = Corpus(args.data)
print("Size, OOV", corpus.test.size(0), sum(corpus.test == corpus.dictionary.word2idx["<unk>"]))
test_data = batchify(corpus.test, eval_batch_size, args.cuda)
dictionary = corpus.dictionary
# Load the best saved model.
with open(args.checkpoint, 'rb') as f:
print("Loading the model")
if args.cuda:
model = torch.load(f)
else:
# to convert model trained on cuda to cpu model
model = torch.load(f, map_location=lambda storage, loc: storage)
print("Evaluation on non-unk tokens")
# Run on test data.
test_loss = evaluate(test_data)
print('=' * 89)
print('Test loss {:5.2f} | test ppl {:8.2f}'.format(
test_loss, math.exp(test_loss)))
print('=' * 89)
| colorlessgreenRNNs-main | src/language_models/evaluate_test_perplexity.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import logging
import math
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from dictionary_corpus import Corpus, Dictionary, tokenize
from utils import batchify
import lm_argparser
parser = argparse.ArgumentParser(parents=[lm_argparser.lm_parser],
description="Training and testing ngram LSTM model")
parser.add_argument('--train', action='store_true', default=False,
help='enable training regime')
parser.add_argument('--test', action='store_true', default=False,
help='enable testing regime')
parser.add_argument('--test_path', type=str,
help='path to test file, gold file and vocab file output')
parser.add_argument('--suffix', type=str,
help='suffix for generated output files which will be saved as path.output_suffix')
args = parser.parse_args()
logging.basicConfig(level=logging.INFO, handlers=[logging.StreamHandler(),
logging.FileHandler(args.log)])
logging.info(args)
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder.
ntoken: vocab size
nip: embedding size
"""
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False):
super(RNNModel, self).__init__()
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
if rnn_type in ['LSTM', 'GRU']:
self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout)
else:
try:
nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]
except KeyError:
raise ValueError( """An invalid option for `--model` was supplied,
options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']""")
self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout)
self.decoder = nn.Linear(nhid, ntoken)
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
if nhid != ninp:
raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.decoder.weight = self.encoder.weight
self.init_weights()
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.fill_(0)
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, input, hidden):
emb = self.drop(self.encoder(input))
output, hidden = self.rnn(emb, hidden)
#print("hidden", hidden, hidden[0].size())
# take last output of the sequence
output = output[-1]
#print(output)
#decoded = self.decoder(output.view(output.size(0)*output.size(1), output.size(2)))
#print(output.size())
decoded = self.decoder(output.view(-1, output.size(1)))
#print(output.view(output.size(0)*output.size(1), output.size(2)))
#print(decoded)
return decoded
def init_hidden(self, bsz):
weight = next(self.parameters()).data
if self.rnn_type == 'LSTM':
return (weight.new(self.nlayers, bsz, self.nhid).zero_(),
weight.new(self.nlayers, bsz, self.nhid).zero_())
else:
return weight.new(self.nlayers, bsz, self.nhid).zero_()
def get_batch(source, i, seq_length):
seq_len = min(seq_length, len(source) - 1 - i)
#print("Sequence length", seq_len)
#print(source)
data = source[i:i+seq_len]
#print(data)
#> predict the sequences shifted by one word
target = source[i+seq_len].view(-1)
#print(target)
return data, target
def create_target_mask(test_file, gold_file, index_col):
sents = open(test_file, "r", encoding="utf8").readlines()
golds = open(gold_file, "r", encoding="utf8").readlines()
#TODO optimize by initializaing np.array of needed size and doing indexing
targets = []
for sent, gold in zip(sents, golds):
# constr_id, sent_id, word_id, pos, morph
target_idx = int(gold.split()[index_col])
len_s = len(sent.split(" "))
t_s = [0] * len_s
t_s[target_idx] = 1
#print(sent.split(" ")[target_idx])
targets.extend(t_s)
return np.array(targets)
def evaluate_perplexity(data_source, exclude_oov=False):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
ntokens = len(corpus.dictionary)
len_data = 0
unk_idx = corpus.dictionary.word2idx["<unk>"]
if args.cuda:
torch_range = torch.cuda.LongTensor()
else:
torch_range = torch.LongTensor()
with torch.no_grad():
for i in range(0, data_source.size(0) - 1):
hidden = model.init_hidden(eval_batch_size)
data, targets = get_batch(data_source, i, args.bptt)
#> output has size seq_length x batch_size x vocab_size
output = model(data, hidden)
output_flat = output.view(-1, ntokens)
# excluding OOV
if exclude_oov:
subset = targets != unk_idx
subset = subset.data
targets = targets[subset]
output_flat = output_flat[torch.arange(0, output_flat.size(0), out=torch_range)[subset]]
total_loss += targets.size(0) * nn.CrossEntropyLoss()(output_flat, targets).data
len_data += targets.size(0)
return total_loss[0] / len_data
def evaluate_on_mask(data_source, mask):
model.eval()
idx2word = dictionary.idx2word
for i in range(0, data_source.size(0) - 1):
hidden = model.init_hidden(eval_batch_size)
data, targets = get_batch(data_source, i, args.bptt, evaluation=True)
_, targets_mask = get_batch(mask, i, args.bptt, evaluation=True)
#print(targets_mask.size())
#> output has size seq_length x batch_size x vocab_size
output = model(data, hidden)
output_flat = output.view(-1, ntokens)
log_probs = F.log_softmax(output_flat)
# print("Log probs size", log_probs.size())
# print("Target size", targets.size())
log_probs_np = log_probs.data.cpu().numpy()
subset = targets_mask.data.numpy().astype(bool)
for scores, correct_label in zip(log_probs_np[subset], targets.data.cpu().numpy()[subset]):
print(idx2word[correct_label], scores[correct_label])
f_output.write("\t".join(str(s) for s in scores) + "\n")
#return total_loss[0] /len(data_source)
###############################################################################
# Training code
###############################################################################
def train():
# Turn on training mode which enables dropout.
model.train()
total_loss = 0
start_time = time.time()
criterion = nn.CrossEntropyLoss()
for batch, i in enumerate(range(0, train_data.size(0) - 1)):
#> i is the starting index of the batch
#> batch is the number of the batch
#> data is a tensor of size seq_length x batch_size, where each element is an index from input vocabulary
#> targets is a vector of length seq_length x batch_size
data, targets = get_batch(train_data, i, args.bptt)
hidden = model.init_hidden(args.batch_size)
model.zero_grad()
output = model(data, hidden)
#> output.view(-1, ntokens) transforms a tensor to a longer tensor of size
#> (seq_length x batch_size) x output_vocab_size
#> which matches targets in length
loss = criterion(output.view(-1, ntokens), targets)
loss.backward()
torch.nn.utils.clip_grad_norm(model.parameters(), args.clip)
for p in model.parameters():
p.data.add_(-lr, p.grad.data)
total_loss += loss.data
if batch % args.log_interval == 0 and batch > 0:
cur_loss = total_loss[0] / args.log_interval
elapsed = time.time() - start_time
#logging.info('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | '
# 'loss {:5.2f} | ppl {:8.2f}'.format(
# epoch, batch, len(train_data) // args.bptt, lr,
# elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss)))
logging.info('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | '
'loss {:5.2f}'.format(epoch, batch, len(train_data), lr,
elapsed * 1000 / args.log_interval, cur_loss))
total_loss = 0
start_time = time.time()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
###############################################################################
# Load data
###############################################################################
if args.train:
logging.info("Loading data")
corpus = Corpus(args.data)
# logging.info(corpus.train)
ntokens = len(corpus.dictionary)
logging.info("Vocab size %d", ntokens)
logging.info("Batchying..")
eval_batch_size = 256
train_data = batchify(corpus.train, args.batch_size, args.cuda)
# logging.info("Train data size", train_data.size())
val_data = batchify(corpus.valid, eval_batch_size, args.cuda)
test_data = batchify(corpus.test, eval_batch_size, args.cuda)
logging.info("Building the model")
# model = torch.nn.DataParallel(model.RNNModel(args.model, ntokens, args.emsize, args.nhid, args.nlayers, args.dropout, args.tied),
# dim=1)
model = RNNModel(args.model, ntokens, args.emsize, args.nhid, args.nlayers, args.dropout, args.tied)
if args.cuda:
model.cuda()
# Loop over epochs.
lr = args.lr
best_val_loss = None
try:
for epoch in range(1, args.epochs+1):
epoch_start_time = time.time()
train()
val_loss = evaluate_perplexity(val_data)
logging.info('-' * 89)
logging.info('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss, math.exp(val_loss)))
logging.info('-' * 89)
# Save the model if the validation loss is the best we've seen so far.
if not best_val_loss or val_loss < best_val_loss:
with open(args.save, 'wb') as f:
torch.save(model, f)
best_val_loss = val_loss
else:
# Anneal the learning rate if no improvement has been seen in the validation dataset.
lr /= 4.0
except KeyboardInterrupt:
logging.info('-' * 89)
logging.info('Exiting from training early')
# Load the best saved model.
with open(args.save, 'rb', encoding="utf8") as f:
model = torch.load(f)
# Run on valid data with OOV excluded
test_loss = evaluate_perplexity(val_data, exclude_oov=True)
logging.info('=' * 89)
logging.info('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(test_loss, math.exp(test_loss)))
logging.info('=' * 89)
#####################################
# Testing #
#####################################
if args.test:
dictionary = Dictionary(args.data)
with open(args.save, 'rb', encoding="utf8") as f:
print("Loading the model")
if args.cuda:
model = torch.load(f)
model.cuda()
else:
# to convert model trained on cuda to cpu model
model = torch.load(f, map_location=lambda storage, loc: storage)
model.cpu()
model.eval()
eval_batch_size = 1
ntokens = len(dictionary)
#print("Vocab size", ntokens)
#print("TESTING")
# depends on generation script (constantly modified) - the column where the target word index is written
index_col = 3
mask = create_target_mask(args.test_path + ".text", args.test_path + ".gold", index_col)
mask_data = batchify(torch.LongTensor(mask), eval_batch_size, False)
test_data = batchify(tokenize(dictionary, args.test_path + ".text"), eval_batch_size, args.cuda)
f_output = open(args.test_path + ".output_" + args.suffix, 'w')
evaluate_on_mask(test_data, mask_data)
f_output.close()
| colorlessgreenRNNs-main | src/language_models/ngram_lstm.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import dictionary_corpus
from utils import repackage_hidden, batchify, get_batch
import numpy as np
parser = argparse.ArgumentParser(description='Mask-based evaluation: extracts softmax vectors for specified words')
parser.add_argument('--data', type=str,
help='location of the data corpus for LM training')
parser.add_argument('--checkpoint', type=str,
help='model checkpoint to use')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', action='store_true',
help='use CUDA')
parser.add_argument('--path', type=str, help='path to test file (text) gold file (indices of words to evaluate)')
parser.add_argument('--suffix', type=str, help='suffix for generated output files which will be saved as path.output_suffix')
args = parser.parse_args()
def evaluate(data_source, mask):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
hidden = model.init_hidden(eval_batch_size)
with torch.no_grad():
for i in range(0, data_source.size(0) - 1, seq_len):
# keep continuous hidden state across all sentences in the input file
data, targets = get_batch(data_source, i, seq_len)
_, targets_mask = get_batch(mask, i, seq_len)
output, hidden = model(data, hidden)
output_flat = output.view(-1, vocab_size)
total_loss += len(data) * nn.CrossEntropyLoss()(output_flat, targets)
output_candidates_probs(output_flat, targets, targets_mask)
hidden = repackage_hidden(hidden)
return total_loss.item() / (len(data_source) - 1)
def output_candidates_probs(output_flat, targets, mask):
log_probs = F.log_softmax(output_flat, dim=1)
log_probs_np = log_probs.cpu().numpy()
subset = mask.cpu().numpy().astype(bool)
for scores, correct_label in zip(log_probs_np[subset], targets.cpu().numpy()[subset]):
#print(idx2word[correct_label], scores[correct_label])
f_output.write("\t".join(str(s) for s in scores) + "\n")
def create_target_mask(test_file, gold_file, index_col):
sents = open(test_file, "r").readlines()
golds = open(gold_file, "r").readlines()
#TODO optimize by initializaing np.array of needed size and doing indexing
targets = []
for sent, gold in zip(sents, golds):
# constr_id, sent_id, word_id, pos, morph
target_idx = int(gold.split()[index_col])
len_s = len(sent.split(" "))
t_s = [0] * len_s
t_s[target_idx] = 1
#print(sent.split(" ")[target_idx])
targets.extend(t_s)
return np.array(targets)
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
with open(args.checkpoint, 'rb') as f:
print("Loading the model")
if args.cuda:
model = torch.load(f)
else:
# to convert model trained on cuda to cpu model
model = torch.load(f, map_location = lambda storage, loc: storage)
model.eval()
if args.cuda:
model.cuda()
else:
model.cpu()
eval_batch_size = 1
seq_len = 20
dictionary = dictionary_corpus.Dictionary(args.data)
vocab_size = len(dictionary)
#print("Vocab size", vocab_size)
print("Computing probabilities for target words")
# assuming the mask file contains one number per line indicating the index of the target word
index_col = 0
mask = create_target_mask(args.path + ".text", args.path + ".eval", index_col)
mask_data = batchify(torch.LongTensor(mask), eval_batch_size, args.cuda)
test_data = batchify(dictionary_corpus.tokenize(dictionary, args.path + ".text"), eval_batch_size, args.cuda)
f_output = open(args.path + ".output_" + args.suffix, 'w')
evaluate(test_data, mask_data)
print("Probabilities saved to", args.path + ".output_" + args.suffix)
f_output.close()
| colorlessgreenRNNs-main | src/language_models/evaluate_target_word.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import logging
import math
import time
import torch
import torch.nn as nn
from dictionary_corpus import Corpus
import model
from lm_argparser import lm_parser
from utils import repackage_hidden, get_batch, batchify
parser = argparse.ArgumentParser(parents=[lm_parser],
description="Basic training and evaluation for RNN LM")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO, handlers=[logging.StreamHandler(),
logging.FileHandler(args.log)])
logging.info(args)
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
###############################################################################
# Load data
###############################################################################
logging.info("Loading data")
start = time.time()
corpus = Corpus(args.data)
logging.info("( %.2f )" % (time.time() - start))
ntokens = len(corpus.dictionary)
logging.info("Vocab size %d", ntokens)
logging.info("Batchying..")
eval_batch_size = 10
train_data = batchify(corpus.train, args.batch_size, args.cuda)
val_data = batchify(corpus.valid, eval_batch_size, args.cuda)
test_data = batchify(corpus.test, eval_batch_size, args.cuda)
criterion = nn.CrossEntropyLoss()
###############################################################################
# Build the model
###############################################################################
logging.info("Building the model")
model = model.RNNModel(args.model, ntokens, args.emsize, args.nhid, args.nlayers, args.dropout, args.tied)
if args.cuda:
model.cuda()
###############################################################################
# Training code
###############################################################################
def evaluate(data_source):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
hidden = model.init_hidden(eval_batch_size)
with torch.no_grad():
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i, args.bptt)
#> output has size seq_length x batch_size x vocab_size
output, hidden = model(data, hidden)
#> output_flat has size num_targets x vocab_size (batches are stacked together)
#> ! important, otherwise softmax computation (e.g. with F.softmax()) is incorrect
output_flat = output.view(-1, ntokens)
#output_candidates_info(output_flat.data, targets.data)
total_loss += len(data) * nn.CrossEntropyLoss()(output_flat, targets).item()
hidden = repackage_hidden(hidden)
return total_loss / (len(data_source) - 1)
def train():
# Turn on training mode which enables dropout.
model.train()
total_loss = 0
start_time = time.time()
hidden = model.init_hidden(args.batch_size)
for batch, i in enumerate(range(0, train_data.size(0) - 1, args.bptt)):
data, targets = get_batch(train_data, i, args.bptt)
# truncated BPP
hidden = repackage_hidden(hidden)
model.zero_grad()
output, hidden = model(data, hidden)
loss = criterion(output.view(-1, ntokens), targets)
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
for p in model.parameters():
p.data.add_(-lr, p.grad.data)
total_loss += loss.item()
if batch % args.log_interval == 0 and batch > 0:
cur_loss = total_loss / args.log_interval
elapsed = time.time() - start_time
logging.info('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f}'.format(
epoch, batch, len(train_data) // args.bptt, lr,
elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss)))
total_loss = 0
start_time = time.time()
# Loop over epochs.
lr = args.lr
best_val_loss = None
# At any point you can hit Ctrl + C to break out of training early.
try:
for epoch in range(1, args.epochs + 1):
epoch_start_time = time.time()
train()
val_loss = evaluate(val_data)
logging.info('-' * 89)
logging.info('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss, math.exp(val_loss)))
logging.info('-' * 89)
# Save the model if the validation loss is the best we've seen so far.
if not best_val_loss or val_loss < best_val_loss:
with open(args.save, 'wb') as f:
torch.save(model, f)
best_val_loss = val_loss
else:
# Anneal the learning rate if no improvement has been seen in the validation dataset.
lr /= 4.0
except KeyboardInterrupt:
logging.info('-' * 89)
logging.info('Exiting from training early')
# Load the best saved model.
with open(args.save, 'rb') as f:
model = torch.load(f)
# Run on test data.
test_loss = evaluate(test_data)
logging.info('=' * 89)
logging.info('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(test_loss, math.exp(test_loss)))
logging.info('=' * 89)
| colorlessgreenRNNs-main | src/language_models/main.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import torch
from collections import defaultdict
import logging
class Dictionary(object):
def __init__(self, path):
self.word2idx = {}
self.idx2word = []
self.word2freq = defaultdict(int)
vocab_path = os.path.join(path, 'vocab.txt')
try:
vocab = open(vocab_path, encoding="utf8").read()
self.word2idx = {w: i for i, w in enumerate(vocab.split())}
self.idx2word = [w for w in vocab.split()]
self.vocab_file_exists = True
except FileNotFoundError:
logging.info("Vocab file not found, creating new vocab file.")
self.create_vocab(os.path.join(path, 'train.txt'))
open(vocab_path,"w").write("\n".join([w for w in self.idx2word]))
def add_word(self, word):
self.word2freq[word] += 1
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
#return self.word2idx[word]
def __len__(self):
return len(self.idx2word)
def create_vocab(self, path):
with open(path, 'r', encoding="utf8") as f:
for line in f:
words = line.split()
for word in words:
self.add_word(word)
class Corpus(object):
def __init__(self, path):
self.dictionary = Dictionary(path)
self.train = tokenize(self.dictionary, os.path.join(path, 'train.txt'))
self.valid = tokenize(self.dictionary, os.path.join(path, 'valid.txt'))
self.test = tokenize(self.dictionary, os.path.join(path, 'test.txt'))
def tokenize(dictionary, path):
"""Tokenizes a text file for training or testing to a sequence of indices format
We assume that training and test data has <eos> symbols """
assert os.path.exists(path)
with open(path, 'r', encoding="utf8") as f:
ntokens = 0
for line in f:
words = line.split()
ntokens += len(words)
# Tokenize file content
with open(path, 'r', encoding="utf8") as f:
ids = torch.LongTensor(ntokens)
token = 0
for line in f:
words = line.split()
for word in words:
if word in dictionary.word2idx:
ids[token] = dictionary.word2idx[word]
else:
ids[token] = dictionary.word2idx["<unk>"]
token += 1
return ids
| colorlessgreenRNNs-main | src/language_models/dictionary_corpus.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import subprocess
def query_KenLM(lm_file, file_name, kenlm_path="/private/home/gulordava/kenlm/build/bin/"):
"""
:param lm_file: language model
:param file_name: file with (partial) sentences to test
:return: a list of probabilities of the last word of each sentence
"""
command = kenlm_path + "query " + lm_file + ' < ' + file_name + " -n"
KenLM_query = subprocess.getstatusoutput(command)[1]
lines = KenLM_query.split("\n")
skip = ["This binary file contains probing hash tables.",
"Loading the LM will be faster if you build a binary file."]
if any(s in lines[0] for s in skip):
lines = lines[1:]
result_probs = []
for line in lines:
# last ngram is Total + OOV
try:
result_probs.append(float(line.split('\t')[-2].split(" ")[2]))
except (IndexError, ValueError) as e:
print(line)
return result_probs, lines | colorlessgreenRNNs-main | src/syntactic_testsets/evaluate_utils.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import tree_module as tm
import argparse
import itertools
from collections import defaultdict
import numpy as np
from generate_utils import is_good_form
from utils import load_vocab, ltm_to_word, read_paradigms
def safe_log(x):
np.seterr(divide='ignore', invalid='ignore')
return np.where(x > 0.0001, np.log(x), 0.0)
def cond_entropy(xy):
# normalise
xy = xy / np.sum(xy)
x_ = np.sum(xy, axis=1)
y_ = np.sum(xy, axis=0)
x_y = xy / y_
# print(x_y)
y_x = xy / x_.reshape(x_.shape[0], 1)
# print(y_x)
# Entropies: H(x|y) H(y|x) H(x) H(y)
return np.sum(-xy * safe_log(x_y)), np.sum(-xy * safe_log(y_x)), np.sum(-x_ * safe_log(x_)), np.sum(
-y_ * safe_log(y_))
def pos_structure(nodes, arc):
""" Get a sequence of pos tags for nodes which are direct children of the arc head or the arc child
nodes - the list of nodes of the context Y, between the head and the child (X, Z) of the arc
"""
return tuple([n.pos for n in nodes if n.head_id in [arc.head.index, arc.child.index]])
def inside(tree, a):
if a.child.index < a.head.index:
nodes = tree.nodes[a.child.index: a.head.index - 1]
l = a.child
r = a.head
else:
nodes = tree.nodes[a.head.index: a.child.index - 1]
l = a.head
r = a.child
return nodes, l, r
def features(morph, feature_list):
#Definite=Def|Gender=Masc|Number=Sing|PronType=Art Tense=Past|VerbForm=Part
if not feature_list:
return morph
all_feats = morph.split("|")
feat_values = tuple(f for f in all_feats if f.split("=")[0] in feature_list)
return "|".join(feat_values)
def morph_contexts_frequencies(trees, feature_list):
"""
Collect frequencies for X Y Z tuples, where Y is a context defined by its surface structure
and X and Z are connected by a dependency
:param trees: dependency trees
:return: two dictionaries for left and right dependencies
"""
d_left = defaultdict(lambda: defaultdict(int))
d_right = defaultdict(lambda: defaultdict(int))
for t in trees:
for a in t.arcs:
if 3 < a.length() < 15 and t.is_projective_arc(a):
# print("\n".join(str(n) for n in t.nodes))
nodes, l, r = inside(t, a)
substring = (l.pos,) + pos_structure(nodes, a) + (r.pos,)
# print(substring)
if substring:
if features(l.morph, feature_list) == "" or features(r.morph, feature_list) == "":
continue
#substring = substring + (a.dep_label,)
if a.dir == tm.Arc.LEFT:
d_left[substring][(features(l.morph, feature_list), features(r.morph, feature_list))] += 1
if a.dir == tm.Arc.RIGHT:
d_right[substring][(features(l.morph, feature_list), features(r.morph, feature_list))] += 1
return d_left, d_right
def find_good_patterns(context_dict, freq_threshold):
"""
:param context_dict: is a dictionary of type { Y context : {(X, Z) : count} }
for X Y Z sequences where X and Z could be of any type (tags, morph)
:param freq_threshold: for filtering out too infrequent patterns
:return: list of patterns - tuples (context, left1, left2) == (Y, X1, X2)
(where X1 and X2 occur with different Zs)
"""
patterns = []
for context in context_dict:
left_right_pairs = context_dict[context].keys()
if len(left_right_pairs) == 0:
continue
left, right = zip(*left_right_pairs)
left_v = set(left)
d = context_dict[context]
if len(left_v) < 2:
continue
for l1, l2 in itertools.combinations(left_v, 2):
right_v = [r for (l, r) in left_right_pairs if l in (l1, l2)]
if len(right_v) < 2:
continue
a = np.zeros((2, len(right_v)))
for i, x in enumerate((l1, l2)):
for j, y in enumerate(right_v):
a[(i, j)] = d[(x, y)]
l_r, r_l, l_e, r_e = cond_entropy(a)
mi = l_e - l_r
count_l1 = 0
count_l2 = 0
for l, r in d:
if l == l1:
count_l1 += d[(l, r)]
if l == l2:
count_l2 += d[(l, r)]
#print(l_r, r_l, l_e, r_e, mi)
if l_r < 0.001 and count_l1 > freq_threshold and count_l2 > freq_threshold:
patterns.append((context, l1, l2))
print(context, l_r, mi)
print(l1, l2, count_l1, count_l2)
#for l, r in d:
# if l in (l1, l2) and d[(l, r)] > 0 :
# print(l, r, d[(l, r)])
return patterns
def grep_morph_pattern(trees, context, l_values, dep_dir, feature_list=None):
"""
:param context: Y
:param l_values: l_values are relevant X values
:param dep_dir:
:return: generator of (context-type, l, r, tree, Y nodes) tuples
"""
if feature_list is None:
feature_list = ['Number']
for t in trees:
for a in t.arcs:
if 3 < a.length() < 15 and t.is_projective_arc(a):
if a.child.pos == "PUNCT" or a.head.pos == "PUNCT":
continue
#print("\n".join(str(n) for n in t.nodes))
nodes, l, r = inside(t, a)
if a.dir != dep_dir:
continue
if not any(m in l.morph for m in l_values):
#print(features(l.morph), l_values)
continue
if features(r.morph, feature_list) != features(l.morph, feature_list):
continue
substring = (l.pos,) + pos_structure(nodes, a) + (r.pos,)
if substring == context:
#print(substring, context)
yield context, l, r, t, nodes
def main():
parser = argparse.ArgumentParser(
description='Extracting dependency-based long-distance agreement patterns')
parser.add_argument('--treebank', type=str, required=True,
help='Path of the input treebank file (in a column format)')
parser.add_argument('--output', type=str, required=True,
help="Path for the output files")
parser.add_argument('--features', type=str, default="Number",
help="A list of morphological features which will be used, in Number|Case|Gender format")
parser.add_argument('--freq', type=int, default=5, help="minimal frequency")
parser.add_argument('--vocab', type=str, required=False, help="LM vocab - to compute which sentences have OOV")
parser.add_argument('--paradigms', type=str, required=False, help="File with morphological paradigms - to compute"
"which sentences have both target pairs")
args = parser.parse_args()
if args.vocab:
vocab = load_vocab(args.vocab)
else:
vocab = []
print("Loading trees")
trees = tm.load_trees_from_conll(args.treebank)
# needed for original UD treebanks (e.g. Italian) which contain spans, e.g. 10-12
# annotating mutlimorphemic words as several nodes in the tree
for t in trees:
t.remerge_segmented_morphemes()
if args.features:
args.features = args.features.split("|")
print("Features", args.features)
print("Extracting contexts")
context_left_deps, context_right_deps = morph_contexts_frequencies(trees, args.features)
# filtering very infrequent cases
filter_threshold = 1
context_left_deps = defaultdict(lambda: defaultdict(int), {c: defaultdict(int,
{lr: freq for lr, freq in d.items() if freq > filter_threshold}) for c, d in context_left_deps.items()})
context_right_deps = defaultdict(lambda: defaultdict(int), {c: defaultdict(int,
{lr: freq for lr, freq in d.items() if freq > filter_threshold}) for c, d in context_right_deps.items()})
print("Finding good patterns")
good_patterns_left = find_good_patterns(context_left_deps, args.freq)
good_patterns_right = find_good_patterns(context_right_deps, args.freq)
f_out = open(args.output + "/patterns.txt", "w")
print("Saving patterns and sentences matching them")
ltm_paradigms = ltm_to_word(read_paradigms(args.paradigms))
for p in good_patterns_left:
f_out.write("L\t" + "_".join(x for x in p[0]) + "\t" + "\t".join(p[1:]) + "\n")
print("L\t" + "_".join(x for x in p[0]) + "\t" + "\t".join(p[1:]) + "\n")
f_out_grep = open(args.output + "/L_" + "_".join(x for x in p[0]), "w")
for context, l, r, t, nodes in grep_morph_pattern(trees, p[0], p[1:], tm.Arc.LEFT, args.features):
#print(l.morph + " " + r.morph + "\t" + l.word + " " + " ".join([n.word for n in nodes]) + " " + r.word)
in_vocab = all([n.word in vocab for n in nodes + [l, r]])
in_paradigms = is_good_form(r.word, r.word, r.morph, r.lemma, r.pos, vocab, ltm_paradigms)
f_out_grep.write(features(l.morph, args.features) + " " + features(r.morph, args.features) +
"\t" + str(in_vocab) + str(in_paradigms) + "\t" + l.word + " " + " ".join([n.word for n in nodes]) + " " + r.word + "\n")
f_out_grep.close()
for p in good_patterns_right:
f_out.write("R\t" + "_".join(x for x in p[0]) + "\t" + "\t".join(p[1:]) + "\n")
print("R\t" + "_".join(x for x in p[0]) + "\t" + "\t".join(p[1:]) + "\n")
f_out_grep = open(args.output + "/R_" + "_".join(x for x in p[0]), "w")
for context, l, r, t, nodes in grep_morph_pattern(trees, p[0], p[1:], tm.Arc.RIGHT, args.features):
#print(l.morph + " " + r.morph + "\t" + l.word + " " + " ".join([n.word for n in nodes]) + " " + r.word)
in_vocab = all([n.word in vocab for n in nodes + [l, r]])
in_paradigms = is_good_form(r.word, r.word, r.morph, r.lemma, r.pos, vocab, ltm_paradigms)
f_out_grep.write(features(l.morph, args.features)+ " " + features(r.morph, args.features) +
"\t" + str(in_vocab) + str(in_paradigms) + "\t" + l.word + " " + " ".join([n.word for n in nodes]) + " " + r.word + "\n")
f_out_grep.close()
f_out.close()
if __name__ == "__main__":
main() | colorlessgreenRNNs-main | src/syntactic_testsets/extract_dependency_patterns.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Classes Node, Arc, DependencyTree providing functionality for syntactic dependency trees
"""
from __future__ import print_function, division
import re
from queue import Queue
import conll_utils as conll_utils
class Node(object):
def __init__(self, index=None, word="", lemma="", head_id=None, pos="", dep_label="", morph="_",
size=None, dep_label_new=None):
"""
:param index: int
:param word: str
:param head_id: int
:param pos: str
:param dep_label: str
"""
self.index = index
self.word = word
self.lemma = lemma
self.head_id = head_id
self.pos = pos
self.dep_label = dep_label
self.morph = morph
if dep_label_new is None:
self.dep_label_new = dep_label
else:
self.dep_label_new = dep_label_new
# to assign after tree creation
self.size = size
self.dir = None
def __str__(self):
return "\t".join([str(self.index), self.word, self.pos, self.morph, str(self.head_id), str(self.dep_label)])
def __repr__(self):
return "\t".join([str(v) for (a, v) in self.__dict__.items() if v])
@classmethod
def from_str(cls, string):
index, word, pos, head_id, dep_label = [None if x == "None" else x for x in string.split("\t")]
return Node(index, word, head_id, pos, dep_label)
def __eq__(self, other):
return other is not None and \
self.index == other.index and \
self.word == other.word and \
self.head_id == other.head_id and \
self.pos == other.pos and \
self.dep_label == other.dep_label
def __hash__(self):
return hash(tuple(self.__dict__.values()))
def is_root(self):
generic_root = DependencyTree.generic_root(conll_utils.UD_CONLL_CONFIG)
if self.word == generic_root.word and self.pos == generic_root.pos:
return True
return False
class Arc(object):
LEFT = "L"
RIGHT = "R"
def __init__(self, head, direction, child):
self.head = head
self.dir = direction
self.child = child
self.dep_label = child.dep_label
def __str__(self):
return str(self.head) + " " + self.dir + " " + str(self.child)
def __repr__(self):
return str(self)
@classmethod
def from_str(cls, string):
head_str, dir, child_str = string.split(" ")
return Arc(Node.from_str(head_str), dir, Node.from_str(child_str))
def __eq__(self, other):
if type(other) is type(self):
return self.__dict__ == other.__dict__
return False
def __hash__(self):
return hash(tuple(self.__dict__.values()))
def length(self):
# arcs to ROOT node have length 0
if self.head.is_root():
return 0
else:
return abs(self.child.index - self.head.index)
class DependencyTree(object):
def __init__(self, nodes, arcs, config, fused_nodes):
self.nodes = nodes
self.arcs = arcs
self.assign_sizes_to_nodes()
self.config = config
# for UD annotation to be able to recover original sentence (without split morphemes)
self.fused_nodes = fused_nodes
def __str__(self):
return "\n".join([str(n) for n in self.nodes])
def __repr__(self):
return str(self)
def children(self, head):
children = []
for arc in self.arcs:
if arc.head == head:
children.append(arc.child)
return children
def assign_sizes_to_nodes(self):
for node in self.nodes:
node.size = len(self.children(node)) + 1
def reindex(self, nodes, conll_config):
""" After reordering 'nodes' list reflects the final order of nodes, however the indices of node objects
do not correspond to this order. This function fixes it. """
new_positions = {}
new_nodes = [] # in order
for i in range(len(nodes)):
new_positions[nodes[i].index] = i
for i in range(len(nodes)):
new_nodes.append(nodes[i])
if nodes[i].head_id == conll_config.ROOT_INDEX:
nodes[i].index = i + conll_config.OFFSET
else:
nodes[i].index = i + conll_config.OFFSET
nodes[i].head_id = new_positions[nodes[i].head_id] + conll_config.OFFSET
self.nodes = new_nodes
def remove_node(self, node_x):
assert len(self.children(node_x)) == 0
self.nodes.remove(node_x)
for node in self.nodes:
if node.head_id > node_x.index:
node.head_id = node.head_id - 1
if node.index > node_x.index:
node.index = node.index - 1
for i in range(len(self.fused_nodes)):
start, end, token = self.fused_nodes[i]
if start > node_x.index:
start = start - 1
if end > node_x.index:
end = end - 1
self.fused_nodes[i] = (start, end, token)
def subtree(self, head):
elements = set()
queue = Queue()
queue.put(head)
#head_ = Node(head.index, head.word, head.pos + "X")
elements.add(head)
visited = set()
while not queue.empty():
next_node = queue.get()
if next_node in visited:
continue
visited.add(next_node)
for child in self.children(next_node):
elements.add(child)
queue.put(child)
return sorted(elements, key=lambda element: int(element.index))
def is_projective_arc(self, arc):
st = self.subtree(arc.head)
# all nodes in subtree of the arc head
st_idx = [node.index for node in st]
# span between the child and the head
indexes = range(arc.child.index + 1, arc.head.index) if arc.child.index < arc.head.index else range(
arc.head.index + 1, arc.child.index)
# each node/word between child and head should be part of the subtree
# if not, than the child-head arc is crossed by some other arc and is non-projective
for i in indexes:
if i not in st_idx:
return False
return True
def is_projective(self):
return all(self.is_projective_arc(arc) for arc in self.arcs)
def length(self):
return sum(arc.length() for arc in self.arcs)
def average_branching_factor(self):
heads = [node.head_id for node in self.nodes]
return len(self.nodes)/len(set(heads))
def root(self):
return DependencyTree.generic_root(self.config)
def remerge_segmented_morphemes(self):
"""
UD format only: Remove segmented words and morphemes and substitute them by the original word form
- all children of the segments are attached to the merged word form
- word form features are assigned heuristically (should work for Italian, not sure about other languages)
- pos tag and morphology (zero?) comes from the first morpheme
:return:
"""
for start, end, token in self.fused_nodes:
# assert start + 1 == end, t
self.nodes[start - 1].word = token
for i in range(end - start):
# print(i)
if len(self.children(self.nodes[start])) != 0:
for c in self.children(self.nodes[start]):
c.head_id = self.nodes[start - 1].index
self.arcs.remove(Arc(child=c, head=self.nodes[start], direction=c.dir))
self.arcs.append(Arc(child=c, head=self.nodes[start - 1], direction=c.dir))
assert len(self.children(self.nodes[start])) == 0, (self, start, end, token, i, self.arcs)
self.remove_node(self.nodes[start])
# print(t)
# print(t)
self.fused_nodes = []
@classmethod
def generic_root(cls, conll_config):
return Node(conll_config.ROOT_INDEX, "ROOT", "ROOT", 0, "ROOT", size=0)
@classmethod
def from_sentence(cls, sentence, conll_config):
nodes = []
fused_nodes = []
for i in range(len(sentence)):
row = sentence[i]
if conll_config.MORPH is not None:
morph = row[conll_config.MORPH]
else:
morph = "_"
# saving original word segments separated in UD (e.g. Italian darglielo -> dare + gli + lo)
if conll_config == conll_utils.UD_CONLL_CONFIG:
if re.match(r"[0-9]+-[0-9]+", row[0]):
fused_nodes.append((int(row[0].split("-")[0]), int(row[0].split("-")[1]), row[1]))
continue
# empty elements (e.g. copula in Russian)
if re.match(r"[0-9]+\.[0-9]+", row[0]):
continue
if conll_config.INDEX is not None:
nodes.append(
Node(int(row[conll_config.INDEX]),
row[conll_config.WORD],
row[conll_config.LEMMA],
int(row[conll_config.HEAD_INDEX]),
pos=row[conll_config.POS],
dep_label=row[conll_config.DEP_LABEL],
morph=morph))
else:
nodes.append(Node(i,
row[conll_config.WORD],
row[conll_config.LEMMA],
int(row[conll_config.HEAD_INDEX]),
pos=row[conll_config.POS],
dep_label=row[conll_config.DEP_LABEL],
morph=morph))
arcs = []
for node in nodes:
head_index = int(node.head_id)
head_element = nodes[head_index - conll_config.OFFSET]
if head_index == conll_config.ROOT_INDEX:
arcs.append(Arc(cls.generic_root(conll_config), Arc.LEFT, node))
elif head_index < int(node.index):
arcs.append(Arc(head_element, Arc.RIGHT, node))
node.dir = Arc.RIGHT
else:
arcs.append(Arc(head_element, Arc.LEFT, node))
node.dir = Arc.LEFT
return cls(nodes, arcs, conll_config, fused_nodes)
def pprint(self, conll_config, lower_case=False):
# TODO: change the indices of heads in accordance with the config
s = ""
for node in self.nodes:
row = ["_"] * conll_config.NCOLS
if conll_config.INDEX is not None:
row[conll_config.INDEX] = str(node.index)
if node.word:
if lower_case:
row[conll_config.WORD] = node.word.lower()
else:
row[conll_config.WORD] = node.word
if node.pos:
row[conll_config.POS] = node.pos
if node.morph:
row[conll_config.MORPH] = node.morph
if node.lemma:
row[conll_config.LEMMA] = node.lemma
row[conll_config.HEAD_INDEX] = str(node.head_id)
if node.dep_label:
row[conll_config.DEP_LABEL] = node.dep_label
s = s + "\t".join(row) + "\n"
return s #.encode("utf-8")
def load_trees_from_conll(file_name, config=None):
sentences = conll_utils.read_sentences_from_columns(open(file_name))
# config for the default cases, to facilitate handling of multiple formats at the same time
# for guaranteed performance, config should be supplied
if config is None:
if len(sentences[0][0]) == conll_utils.ZGEN_CONLL_CONFIG.NCOLS:
config = conll_utils.ZGEN_CONLL_CONFIG
elif len(sentences[0][0]) == conll_utils.UD_CONLL_CONFIG.NCOLS:
config = conll_utils.UD_CONLL_CONFIG
else:
print("Unrecognised format of ", file_name)
return None
trees = []
for s in sentences:
trees.append(DependencyTree.from_sentence(s, config))
return trees
| colorlessgreenRNNs-main | src/syntactic_testsets/tree_module.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
from utils import read_paradigms, load_vocab, extract_sent_features, transform_gold, vocab_freqs
import pandas as pd
lang = sys.argv[1]
path_test_data = "/private/home/gulordava/colorlessgreen/data/agreement/" + lang + "/generated"
path_lm_data = "/private/home/gulordava/colorlessgreen/data/lm/" + lang
if lang == "English":
path_paradigms = "/private/home/gulordava/edouard_data/enwiki/paradigms_UD.txt"
if lang == "Italian":
path_paradigms = "/private/home/gulordava/edouard_data/itwiki/paradigms_UD.txt"
if lang == "Italian_srnn":
path_paradigms = "/private/home/gulordava/edouard_data/itwiki/paradigms_UD.txt"
if lang == "Russian":
path_paradigms = "/private/home/gulordava/edouard_data/ruwiki/paradigms_UD.txt"
if lang == "Hebrew":
path_paradigms = "/private/home/gulordava/edouard_data/hewiki/p2"
gold = open(path_test_data + ".gold").readlines()
sents = open(path_test_data + ".text").readlines()
paradigms = read_paradigms(path_paradigms)
output = []
vocab = load_vocab(path_lm_data + "/vocab.txt")
data = transform_gold(gold)
data = pd.DataFrame(data, columns=["pattern_id", "constr_id", "sent_id", "correct_number", "form", "class"])
data.loc[data.sent_id == 0, "type"] = "original"
data.loc[data.sent_id > 0, "type"] = "generated"
# getting simpler pattern labels
patterns = {p: "__".join(p.split("!")[:2]) for p in set(data.pattern_id)}
data["pattern"] = data["pattern_id"].map(patterns)
df_sents = extract_sent_features(sents, gold, vocab, paradigms)
full_df = data.merge(df_sents, on=["pattern_id", "constr_id", "sent_id"])
freq_dict = vocab_freqs(path_lm_data + "/train.txt", vocab)
full_df["freq"] = full_df["form"].map(freq_dict)
fields = ["pattern", "constr_id", "sent_id", "correct_number", "form", "class", "type", "prefix", "n_attr",
"punct", "freq", "len_context", "len_prefix", "sent"]
full_df[fields].to_csv(path_test_data + ".tab", sep="\t", index=False) | colorlessgreenRNNs-main | src/syntactic_testsets/_create_datatable.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
| colorlessgreenRNNs-main | src/syntactic_testsets/__init__.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import print_function
#!/usr/bin/env python
import sys
import re
from collections import namedtuple
ConllConfig = namedtuple('CONLL_config',
['INDEX', 'WORD', 'POS', 'LEMMA', 'MORPH',
'HEAD_INDEX', 'DEP_LABEL',
'OFFSET', 'ROOT_INDEX', 'NCOLS'], verbose=False)
UD_CONLL_CONFIG = ConllConfig(INDEX=0, WORD=1, LEMMA=2, POS=3, MORPH=5,
HEAD_INDEX=6, DEP_LABEL=7, OFFSET=1, ROOT_INDEX=0, NCOLS=10)
UD_CONLL_FINE_POS_CONFIG = ConllConfig(INDEX=0, WORD=1, LEMMA=2, POS=4, MORPH=5,
HEAD_INDEX=6, DEP_LABEL=7, OFFSET=1, ROOT_INDEX=0, NCOLS=10)
CONLL09_CONFIG = ConllConfig(INDEX=0, WORD=1, LEMMA=2, POS=4, MORPH=6, #TODO check morph column id
HEAD_INDEX=8, DEP_LABEL=10, OFFSET=1, ROOT_INDEX=0, NCOLS=12)
ZGEN_CONLL_CONFIG = ConllConfig(INDEX=None, WORD=0, LEMMA=0, POS=1, MORPH=None,
HEAD_INDEX=2, DEP_LABEL=3, OFFSET=0, ROOT_INDEX=-1, NCOLS=4)
ARCS_CONLL_CONFIG = ConllConfig(INDEX=0, WORD=1, LEMMA=1, POS=2, MORPH=None,
HEAD_INDEX=3, DEP_LABEL=6, OFFSET=1, ROOT_INDEX=0, NCOLS=7)
DEP_LABEL_TYPES = {
"core": "ccomp csubj csubjpass dobj iobj nsubj nsubjpass xcomp".split(),
"non_core": """acl discourse nmod advcl dislocated nummod advmod expl parataxis amod foreign remnant appos
goeswith reparandum compound list root -NONE- conj mwe vocative dep name""".split(),
"func": "aux auxpass case cc cop det mark neg".split(),
"other": "punct".split()}
def get_config(name):
if name == "UD":
return UD_CONLL_CONFIG
elif name == "ZGEN":
return ZGEN_CONLL_CONFIG
elif name == "CONLL09":
return CONLL09_CONFIG
elif name == "UD_fine_pos":
return UD_CONLL_FINE_POS_CONFIG
def read_blankline_block(stream):
s = ''
list = []
while True:
line = stream.readline()
# End of file:
if not line:
list.append(s)
return list
# Blank line:
elif line and not line.strip():
list.append(s)
s = ''
# Other line:
# in Google UD some lines can be commented and some can have multiword expressions/fused morphemes introduced by "^11-12 sss"
# and not re.match("[0-9]+-[0-9]+",line) and not line.startswith("<")
elif not line.startswith("#"): # and "_\t_\t_\t_\t_\t" in line):
# and not line.startswith("<"):
s += line
def read_sentences_from_columns(stream):
# grids are sentences in column format
grids = []
for block in read_blankline_block(stream):
block = block.strip()
if not block: continue
grid = [line.split('\t') for line in block.split('\n')]
appendFlag = True
# Check that the grid is consistent.
for row in grid:
if len(row) != len(grid[0]):
print(grid)
#raise ValueError('Inconsistent number of columns:\n%s'% block)
sys.stderr.write('Inconsistent number of columns', block)
appendFlag = False
break
if appendFlag:
grids.append(grid)
return grids
def output_conll(sentences, prefix):
f_gold = open(prefix + "_conll.gold", "w")
f_guess = open(prefix + "_conll.guess", "w")
for sentence in sentences:
for (num, word, pos, correct_dep, guess_dep) in sentence:
f_gold.write("\t".join([num, word, word, pos, pos, "_", correct_dep, "_", "_", "_"]) + "\n")
f_guess.write("\t".join([num, word, word, pos, pos, "_", guess_dep, "_", "_", "_"]) + "\n")
f_gold.write("\n")
f_guess.write("\n")
def pprint(column_sentence):
for row in column_sentence:
print("\t".join([word for word in row]))
print("")
def write_conll(sentences, file_out):
for sentence in sentences:
# print "\n".join("\t".join(word for word in row)for row in sentence)
file_out.write("\n".join("\t".join(word for word in row) for row in sentence))
file_out.write("\n\n")
def pseudo_rand_split(sentences):
i = 0
train = []
test = []
for sentence in sentences:
i += 1
if i < 10:
train.append(sentence)
else:
test.append(sentence)
i = 0
return train, test
'''
def main():
s = conll_utils()
s.read()
s.output_short_sentences()
#s.print_dep_length()
class conll_utils(object):
# num_sents[len(extract_arcs(tree))] += 1
# print "\n".join("%d\t%f\t%f" % (size, counts_real[size]/float(num_sents[size]), counts_rand[size]/float(num_sents[size])) for size in counts_real.keys())
#print dep_length(extract_arcs(tree))
def output_short_sentences(self):
sentences = read_sentences_from_columns(open(self.input))
for sentence in sentences:
if len(sentence) == 10: # and len(sentence) > 8:
pprint(sentence)
def read(self):
#sys.stderr.write("Main..\n")
self.sentences = read_sentences_from_columns(open(self.input))
#print self.sentences
""" self.correct_trees = []
self.guess_trees = []
for sentence in self.sentences:
self.correct_trees.append([row[:4] for row in sentence])
self.guess_trees.append([row[:3] + [row[4]] for row in sentence]) """
def __init__(self):
optparser = optparse.OptionParser()
optparser.add_option("-n", "--num_training_iterations", dest="iterations", default=5, type="int", help="Number of training iterations")
optparser.add_option("-N", "--num_training_sentences", dest="num_sents", default=1000, type="int", help="Number of training sentences to use")
optparser.add_option("-t", "--threshold", dest="threshold", default=0.5, type="float", help="Score threshold for alignment")
optparser.add_option("-d", "--display_count", dest="display_count", default=5, type="int", help="Number of alignments to display")
optparser.add_option("-i", "--input", dest="input", default="test", help="Input file name")
optparser.add_option("-e", "--evaluation", dest="evaluation", default="undirected", help="Type of dependency evaluation")
(opts, args) = optparser.parse_args()
self.input = opts.input
self.evaluation = opts.evaluation
return
def accuracy():
sum = 0
length = 0
print len(self.sentences)
if (self.evaluation == "directed"):
for sentence in self.sentences:
sum += correct_dir(sentence)
length += len(sentence)
#undirected
else:
for (correct_tree, guess_tree) in zip(self.correct_trees, self.guess_trees):
sum += correct_undir(correct_tree, guess_tree)
print "\n".join(str(row) for row in zip(correct_tree, guess_tree))
print correct_undir(correct_tree, guess_tree)
length += len(correct_tree)
print sum / float(length)
for c1, c2 in zip(collect_statistics(self.correct_trees), collect_statistics(self.guess_trees)):
print ' '.join(str(i) for i in c1[0]) + "\t" + str(c1[1])
#output_conll(self.sentences, self.input)
if __name__ == "__main__":
main()
'''
| colorlessgreenRNNs-main | src/syntactic_testsets/conll_utils.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import pandas as pd
from collections import defaultdict
import string
def read_paradigms(path):
""" reads morphological paradigms from a file with token, lemma, tag, morph, freq fields
returns a simple dict: token -> list of all its analyses and their frequencies """
d = defaultdict(list)
with open(path, "r") as f:
for line in f:
token, lemma, tag, morph, freq = line.split("\t")
s_m = morph.split("|")
s_m.sort()
morph = "|".join(s_m)
d[token].append((lemma, tag, morph, int(freq)))
return d
def load_vocab(vocab_file):
f_vocab = open(vocab_file, "r")
vocab = {w: i for i, w in enumerate(f_vocab.read().split())}
f_vocab.close()
return vocab
def ltm_to_word(paradigms):
""" converts standard paradigms dict (token -> list of analyses) to a dict (l_emma, t_ag, m_orph -> word)
(where word in the most frequent form, e.g. between capitalized and non-capitalized Fanno and fanno) """
#paradigms = read_paradigms("/private/home/gulordava/edouard_data/itwiki//paradigms_UD.txt")
paradigms_lemmas = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
for w in paradigms:
for lemma, tag, morph, freq in paradigms[w]:
paradigms_lemmas[(lemma, tag)][morph][w] = int(freq)
best_paradigms_lemmas = defaultdict(lambda: defaultdict(lambda: defaultdict(str)))
for l, t in paradigms_lemmas:
for m in paradigms_lemmas[(l, t)]:
word = sorted(paradigms_lemmas[(l, t)][m].items(), key=lambda x: -x[1])[0][0]
best_paradigms_lemmas[l][t][m] = word
return best_paradigms_lemmas
def vocab_freqs(train_data_file, vocab):
train_data = open(train_data_file).readlines()
freq_dict = {}
for w in vocab:
freq_dict[w] = 0
for line in train_data:
for w in line.split():
if w in vocab:
freq_dict[w] += 1
return freq_dict
"""
def number_agreement_data(sents, gold, ltm_paradigms, vocab):
data = []
sentence_parts = []
for sent, g in zip(sents, gold):
pattern_id, constr_id, sent_id, idx, gold_pos, gold_morph, _, _, _ = g.split()
if "Number=Plur" in gold_morph:
correct_number = "plur"
elif "Number=Sing" in gold_morph:
correct_number = "sing"
else:
continue
sent_part = sent.split()[:int(idx)]
# print(sent_part, gold_pos, gold_morph)
for lemma, form, form_alt in choose_random_forms(ltm_paradigms, vocab, gold_pos, gold_morph):
sentence_parts.append(" ".join(sent_part) + " " + form + "\n")
sentence_parts.append(" ".join(sent_part) + " " + form_alt + "\n")
data.append((pattern_id, int(constr_id), int(sent_id),
lemma, correct_number, form, "correct"))
data.append((pattern_id, int(constr_id), int(sent_id),
lemma, correct_number, form_alt, "wrong"))
return data, sentence_parts
"""
def plurality(morph):
if "Number=Plur" in morph:
return "plur"
elif "Number=Sing" in morph:
return "sing"
else:
return "none"
def transform_gold(gold):
data = []
for g in gold:
pattern_id, constr_id, sent_id, r_idx, r_pos, r_morph, form, form_alt, lemma, l_idx, l_pos, prefix = g.split(
"\t")
correct_number = plurality(r_morph)
data.append((pattern_id, int(constr_id), int(sent_id), correct_number, form, "correct"))
data.append((pattern_id, int(constr_id), int(sent_id), correct_number, form_alt, "wrong"))
return data
def is_attr(word, pos, number, paradigms):
""" verify whether a word is attractor, that is of tag *pos* and of the number opposite of *number* """
if not paradigms[word]:
return False
max_freq = max([p[3] for p in paradigms[word]])
for lemma, tag, morph, freq in paradigms[word]:
# a word can have different tags (be ambiguous)
# we filter out tags which are very infrequent (including wrong tags for functional words)
if freq < max_freq / 10:
continue
if tag == pos and plurality(morph) != "none" and plurality(morph) != number:
return True
return False
def extract_sent_features(sents, gold, vocab, paradigms):
""" Extracting some features of the construction and the sentence for data analysis """
paradigms_word_tag = defaultdict(list)
for w in paradigms:
for lemma, tag, morph, freq in paradigms[w]:
paradigms_word_tag[w].append(tag)
df_sents = []
constr_id_unk = []
n_attractors = []
punct = []
for s, g in zip(sents, gold):
pattern_id, constr_id, sent_id, r_idx, r_pos, r_morph, form, form_alt, lemma, l_idx, l_pos, prefix = g.split("\t")
sent_id = int(sent_id)
r_idx = int(r_idx)
l_idx = int(l_idx)
s_lm = " ".join([w if w in vocab else "<unk>" for w in s.split()[:r_idx]])
n_unk = len([w for w in s.split()[:r_idx] if w not in vocab ])
if sent_id == 0:
constr_id_unk.append((pattern_id, int(constr_id), n_unk))
number = plurality(r_morph)
#print(r_morph, number)
attrs = [w for w in s.split()[l_idx + 1:r_idx] if is_attr(w, l_pos, number, paradigms)]
n_attractors.append((pattern_id, int(constr_id), len(attrs)))
#punct.append((pattern_id, int(constr_id), "PUNCT" in pos_seq))
punct.append((pattern_id, int(constr_id), any(p in prefix.split() for p in string.punctuation)))
#print(s_lm)
#print(attrs)
n_unk = s_lm.count("<unk>")
len_prefix = len(s_lm.split())
len_context = r_idx - l_idx
df_sents.append((pattern_id, int(constr_id), int(sent_id), s.strip(), s_lm, n_unk, len_context, len_prefix))
df_sents = pd.DataFrame(df_sents, columns = ["pattern_id","constr_id", "sent_id", "sent", "prefix", "n_unk","len_context","len_prefix"])
#print(constr_id_unk)
unk = pd.DataFrame(constr_id_unk, columns=["pattern_id", "constr_id", "n_unk_original"])
attr = pd.DataFrame(n_attractors, columns=["pattern_id","constr_id","n_attr"])
punct = pd.DataFrame(punct, columns=["pattern_id","constr_id","punct"])
df_sents = df_sents.merge(unk, on=["pattern_id", "constr_id"])
df_sents = df_sents.merge(attr, on=["pattern_id", "constr_id"])
df_sents = df_sents.merge(punct, on=["pattern_id", "constr_id"])
return df_sents | colorlessgreenRNNs-main | src/syntactic_testsets/utils.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import random
import pandas as pd
import tree_module as tm
from extract_dependency_patterns import grep_morph_pattern
from generate_utils import is_good_form, get_alt_form, match_features, alt_numeral_morph
from utils import read_paradigms, load_vocab, ltm_to_word, extract_sent_features, transform_gold, vocab_freqs
def generate_morph_pattern_test(trees, pattern, paradigms, vocab, n_sentences=10):
arc_dir, context = pattern.split("\t")[:2]
context = tuple(context.split("_"))
l_values = pattern.split("\t")[2:]
pattern_id = pattern.replace("\t", "!")
ltm_paradigms = ltm_to_word(paradigms)
output = []
constr_id = 0
n_vocab_unk = 0
n_paradigms_unk = 0
# 'nodes' constitute Y, without X or Z included
for context, l, r, t, nodes in grep_morph_pattern(trees, context, l_values, arc_dir):
#pos_constr = "_".join(n.pos for n in t.nodes[l.index - 1: r.index])
# filter model sentences with unk and the choice word not in vocab
if not all([n.word in vocab for n in nodes + [l, r]]):
n_vocab_unk += 1
continue
if not is_good_form(r.word, r.word, r.morph, r.lemma, r.pos, vocab, ltm_paradigms):
n_paradigms_unk += 1
continue
prefix = " ".join(n.word for n in t.nodes[:r.index])
for i in range(n_sentences):
# sent_id = 0 - original sentence with good lexical items, other sentences are generated
if i == 0:
new_context = " ".join(n.word for n in t.nodes)
form = r.word
form_alt = get_alt_form(r.lemma,r.pos,r.morph,ltm_paradigms)
lemma = r.lemma
else:
new_context = generate_context(t.nodes, paradigms, vocab)
random_forms = choose_random_forms(ltm_paradigms,vocab, r.pos,r.morph, n_samples=1, gold_word=r.word)
if len(random_forms) > 0:
lemma, form, form_alt = random_forms[0]
else:
# in rare cases, there is no (form, form_alt) both in vocab
# original form and its alternation are not found because e.g. one or the other is not in paradigms
# (they should anyway be in the vocabulary)
lemma, form = r.lemma, r.word
form_alt = get_alt_form(r.lemma, r.pos, r.morph, ltm_paradigms)
# constr_id sent_id Z_index Z_pos Z_gold_morph
gold_str = "\t".join([pattern_id, str(constr_id), str(i),
str(r.index - 1), r.pos, r.morph, form, form_alt, lemma,
str(l.index - 1), l.pos, prefix]) + "\n"
output.append((new_context + " <eos>\n", gold_str))
constr_id += 1
print("Problematic sentences vocab/paradigms", n_vocab_unk, n_paradigms_unk)
return output
def is_content_word(pos):
return pos in ["ADJ", "NOUN", "VERB", "PROPN", "NUM", "ADV"]
def generate_context(nodes, paradigms, vocab):
output = []
for i in range(len(nodes)):
substitutes = []
n = nodes[i]
# substituting content words
if is_content_word(n.pos):
for word in paradigms:
if word == n.word:
continue
# matching capitalization and vowel
if not match_features(word, n.word):
continue
tag_set = set([p[1] for p in paradigms[word]])
# use words with unambiguous POS
if len(tag_set) == 1 and tag_set.pop() == n.pos:
for _, _, morph, freq in paradigms[word]:
if n.morph == morph and int(freq) > 1 and word in vocab:
substitutes.append(word)
if len(substitutes) == 0:
output.append(n.word)
else:
output.append(random.choice(substitutes))
else:
output.append(n.word)
return " ".join(output)
def choose_random_forms(ltm_paradigms, vocab, gold_pos, morph, n_samples=10, gold_word=None):
candidates = set()
#lemma_tag_pairs = ltm_paradigms.keys()
#test_lemmas = [l for l, t in lemma_tag_pairs]
for lemma in ltm_paradigms:
poses = list(ltm_paradigms[lemma].keys())
if len(set(poses)) == 1 and poses.pop() == gold_pos:
form = ltm_paradigms[lemma][gold_pos][morph]
_, morph_alt = alt_numeral_morph(morph)
form_alt = ltm_paradigms[lemma][gold_pos][morph_alt]
if not is_good_form(gold_word, form, morph, lemma, gold_pos, vocab, ltm_paradigms):
continue
candidates.add((lemma, form, form_alt))
if len(candidates) > n_samples:
return random.sample(candidates, n_samples)
else:
return random.sample(candidates, len(candidates))
def main():
parser = argparse.ArgumentParser(description='Generating sentences based on patterns')
parser.add_argument('--treebank', type=str, required=True,
help='input file (in a CONLL column format)')
parser.add_argument('--paradigms', type=str, required=True, help="the dictionary of tokens and their morphological annotations")
parser.add_argument('--vocab', type=str, required=True,help='(LM) Vocabulary to generate words from')
parser.add_argument('--patterns', type=str, required=True)
parser.add_argument('--output', type=str, required=True, help="prefix for generated text and annotation data")
parser.add_argument('--lm_data', type=str, required=False, help="path to LM data to estimate word frequencies")
args = parser.parse_args()
trees = tm.load_trees_from_conll(args.treebank)
for t in trees:
t.remerge_segmented_morphemes()
paradigms = read_paradigms(args.paradigms)
f_text = open(args.output + ".text", "w")
f_gold = open(args.output + ".gold", "w")
f_eval = open(args.output + ".eval", "w")
output = []
vocab = load_vocab(args.vocab)
for line in open(args.patterns, "r"):
print("Generating sentences with pattern", line.strip())
#l_values = ('Gender=Fem|Number=Sing','Gender=Masc|Number=Plur')
data = generate_morph_pattern_test(trees, line.strip(), paradigms, vocab)
output.extend(data)
print("Generated", len(data), "sentences")
random.shuffle(output)
sents, golds = zip(*output)
f_text.writelines(sents)
f_gold.writelines(golds)
# save the index of the target word to evaluate
f_eval.writelines([g.split("\t")[3] + "\n" for g in golds])
##############################################################
# Make a readable data table with fields useful for analysis #
##############################################################
data = transform_gold(golds)
data = pd.DataFrame(data, columns=["pattern_id", "constr_id", "sent_id", "correct_number", "form", "class"])
data.loc[data.sent_id == 0, "type"] = "original"
data.loc[data.sent_id > 0, "type"] = "generated"
# getting simpler pattern labels
patterns = {p: "__".join(p.split("!")[:2]) for p in set(data.pattern_id)}
data["pattern"] = data["pattern_id"].map(patterns)
df_sents = extract_sent_features(sents, golds, vocab, paradigms)
full_df = data.merge(df_sents, on=["pattern_id", "constr_id", "sent_id"])
if args.lm_data:
freq_dict = vocab_freqs(args.lm_data + "/train.txt", vocab)
full_df["freq"] = full_df["form"].map(freq_dict)
fields = ["pattern", "constr_id", "sent_id", "correct_number", "form", "class", "type", "prefix", "n_attr",
"punct","freq", "len_context", "len_prefix", "sent"]
else:
fields = ["pattern", "constr_id", "sent_id", "correct_number", "form", "class", "type", "prefix", "n_attr",
"punct","len_context", "len_prefix", "sent"]
full_df[fields].to_csv(args.output + ".tab", sep="\t", index=False)
if __name__ == "__main__":
main() | colorlessgreenRNNs-main | src/syntactic_testsets/generate_nonsense.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def is_vowel(c):
return c in ["a","o","u","e","i","A","O","U","E","I","è"]
def alt_numeral_morph(morph):
if "Number=Plur" in morph:
morph_alt = morph.replace("Plur", "Sing")
return "plur", morph_alt
elif "Number=Sing" in morph:
morph_alt = morph.replace("Sing", "Plur")
return "sing", morph_alt
def is_good_form(gold_form, new_form, gold_morph, lemma, pos, vocab, ltm_paradigms):
_, alt_morph = alt_numeral_morph(gold_morph)
if not new_form in vocab:
return False
alt_form = ltm_paradigms[lemma][pos][alt_morph]
if not alt_form in vocab:
return False
if gold_form is None:
print(gold_form, gold_morph)
return True
if not match_features(new_form, gold_form):
return False
if not match_features(alt_form, gold_form):
return False
return True
def get_alt_form(lemma, pos, morph, ltm_paradigms):
_, alt_morph = alt_numeral_morph(morph)
return ltm_paradigms[lemma][pos][alt_morph]
def match_features(w1, w2):
return w1[0].isupper() == w2[0].isupper() and is_vowel(w1[0]) == is_vowel(w2[0])
| colorlessgreenRNNs-main | src/syntactic_testsets/generate_utils.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
from collections import defaultdict
from data import data_utils
parser = argparse.ArgumentParser(description='Reading and processing a large gzip file')
parser.add_argument('--input', type=str, required=True,
help='Input path (in a column CONLL UD format)')
parser.add_argument('--output', type=str, required=True, help="Output file name")
parser.add_argument('--nwords', type=int, default='100000000', required=False,
help='How many words to process')
parser.add_argument('--min_freq', type=int, default='5', required=False,
help='Minimal frequency of paradigm to be included in the dictionary')
args = parser.parse_args()
nwords = 0
paradigms = defaultdict(int)
for line in data_utils.read(args.input):
if line.strip() == "" or len(line.split("\t")) < 2:
continue
else:
fields = line.split("\t")
if fields[1].isalpha():
paradigms[(fields[1], fields[2], fields[3], fields[5])] += 1
nwords += 1
if nwords > args.nwords:
break
with open(args.output, 'w') as f:
for p in paradigms:
if paradigms[p] > args.min_freq:
f.write("\t".join(el for el in p) + "\t" + str(paradigms[p]) + "\n")
f.close()
| colorlessgreenRNNs-main | src/data/collect_paradigms.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
file_name = sys.argv[1]
for l in open(file_name):
fields = l.strip().split("\t")
if len(fields) == 10:
morph = fields[5]
# annotate non-singular verbs in present as Plural
if "Tense=Pres" in morph and "VerbForm=Fin" in morph and "Number=Sing" not in morph:
morph = morph + "|Number=Plur"
s_m = morph.split("|")
s_m.sort()
morph = "|".join(s_m)
elif "Number=Sing" in morph:
feats = morph.split("|")
# remove Person=3 annotation (since we don't have it for non-singular cases)
feats = [f for f in feats if "Person=3" not in f]
morph = "|".join(feats)
print("\t".join(fields[:5] + [morph, ] + fields[6:]))
else:
print(l.strip())
| colorlessgreenRNNs-main | src/data/preprocess_EnglishUD_morph.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import logging
from collections import defaultdict
from random import shuffle
from data import data_utils
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, help='Input file path')
parser.add_argument('--output', type=str, help='Output file path')
parser.add_argument('--output_dir', type=str, help='Output path for training/valid/test sets')
parser.add_argument('--vocab', type=int, default=10000, help="The size of vocabulary, default = 10K")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
def create_vocab(path, vocab_size):
counter = defaultdict(int)
for line in data_utils.read(path):
for word in line.replace("\n"," <eos>").split():
counter[word] += 1
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))[:vocab_size]
words = [w for (w, v) in count_pairs]
print(len(counter), count_pairs[vocab_size - 1])
w2idx = dict(zip(words, range(len(words))))
idx2w = dict(zip(range(len(words)), words))
return w2idx, idx2w
def convert_text(input_path, output_path, vocab):
with open(output_path, 'w') as output:
for line in data_utils.read(input_path):
words = [filter_word(word, vocab) for word in line.replace("\n", " <eos>").split()]
output.write(" ".join(words) + "\n")
output.close()
def convert_line(line, vocab):
return [filter_word(word, vocab) for word in line.replace("\n", " <eos>").split()]
def word_to_idx(word, vocab):
if word in vocab:
return vocab[word]
else:
return vocab["<unk>"]
def filter_word(word, vocab):
if word in vocab:
return word
else:
return "<unk>"
def create_corpus(input_path, output_path, vocab):
""" Split data to create training, validation and test corpus """
nlines = 0
f_train = open(output_path + "/train.txt", 'w')
f_valid = open(output_path + "/valid.txt", 'w')
f_test = open(output_path + "/test.txt", 'w')
train = []
for line in data_utils.read(input_path):
if nlines % 10 == 0:
f_valid.write(" ".join(convert_line(line, vocab)) + "\n")
elif nlines % 10 == 1:
f_test.write(" ".join(convert_line(line, vocab)) + "\n")
else:
train.append(" ".join(convert_line(line, vocab)) + "\n")
nlines += 1
shuffle(train)
f_train.writelines(train)
f_train.close()
f_valid.close()
f_test.close()
w2idx, idx2w = create_vocab(args.input, args.vocab)
#convert_text(args.input, args.output, w2idx)
create_corpus(args.input, args.output_dir, w2idx)
| colorlessgreenRNNs-main | src/data/data_vocab_prep.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import gzip
import logging
def read_gzip_stream(path):
with gzip.open(path, 'rt', encoding="UTF-8") as f:
for line in f:
yield line
def read_text_stream(path):
with open(path, 'r', encoding="UTF-8") as f:
for line in f:
yield line
def read(path):
if path.endswith(".gz"):
logging.info("Reading GZIP file")
return read_gzip_stream(path)
else:
return read_text_stream(path)
| colorlessgreenRNNs-main | src/data/data_utils.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import conll_utils
import tree_module as tm
def remove_segmented_morphemes_hebrew(t):
for start, end, token in t.fused_nodes:
# assert start + 1 == end, t
# don't need to change anything
if all(not n.word.startswith("_") and not n.word.endswith("_") for n in t.nodes[start - 1:end]):
# print(start, end, token)
continue
tokens_separated = ""
for n in t.nodes[start - 1:end]:
if not n.word.startswith("_") and not n.word.endswith("_"):
start = start + 1
tokens_separated = tokens_separated + n.word
else:
break
# print("tokens sep", tokens_separated)
head = None
for n in t.nodes[start - 1:end]:
# print(start-1, end-1)
# print(n.head_id)
if n.head_id > end or n.head_id < start:
# in two sentences two parts of a word had two different heads
# in 20 cases several parts of a word had the same head - annotated with 'fixed' dependency
# assert head is None, (t, t.fused_nodes, start, end, t.nodes[start])
# if head is not None and head.head_id == n.head_id:
# print("fixed")
head = n
assert head is not None, (t, t.fused_nodes, start, end, t.nodes[start])
# print(start - 1, end)
# print("head", head)
merged_part = token[len(tokens_separated):]
# print("merged part", )
if merged_part == "":
start = start - 1
else:
t.nodes[start - 1].word = token[len(tokens_separated):]
t.nodes[start - 1].lemma = head.lemma
t.nodes[start - 1].pos = head.pos
t.nodes[start - 1].morph = head.morph
t.nodes[start - 1].dep_label = head.dep_label
# print(t.nodes[start - 1].head_id)
for i in range(end - start):
if t.nodes[start].dep_label == "nmod:poss":
t.nodes[start - 1].morph = t.nodes[start - 1].morph + "|Poss=Yes"
# print(i)
if len(t.children(t.nodes[start])) != 0:
for c in t.children(t.nodes[start]):
c.head_id = t.nodes[start - 1].index
t.arcs.remove(tm.Arc(child=c, head=t.nodes[start], direction=c.dir))
t.arcs.append(tm.Arc(child=c, head=t.nodes[start - 1], direction=c.dir))
assert len(t.children(t.nodes[start])) == 0, (t, start, end, token, i, t.arcs)
t.remove_node(t.nodes[start])
# print(t)
# important, after removal of other nodes so that their dependencies get attached first to the right head
t.nodes[start - 1].head_id = head.head_id
t.fused_nodes = []
path = "/private/home/gulordava/edouard_data/hewiki/hebrew.conllu"
trees = tm.load_trees_from_conll(path)
for i, t in enumerate(trees):
# in place
remove_segmented_morphemes_hebrew(t)
f_trees_new = open(path + "_new", "w")
for t in trees:
f_trees_new.write(t.pprint(conll_utils.UD_CONLL_CONFIG) + "\n")
#print(t.pprint(conll_utils.UD_CONLL_CONFIG)) | colorlessgreenRNNs-main | src/data/hebrew/preprocess_HebrewUD_morph.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
file_name = sys.argv[1]
for l in open(file_name):
fields = l.strip().split("\t")
if len(fields) == 10:
morph = fields[5]
fine_tag = fields[4]
if "NN+POS+PRP" in fine_tag:
morph = morph + "|Poss=Yes"
print("\t".join(fields[:5] + [morph,] + fields[6:]))
else:
print(l.strip())
| colorlessgreenRNNs-main | src/data/hebrew/add_poss_wiki_annotation.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
file_name = sys.argv[1]
for l in open(file_name):
fields = l.strip().split("\t")
if len(fields) == 10:
morph = fields[5]
feats = morph.split("|")
feats = [f for f in feats if "HebBi" not in f and "HebCo" not in f and "Voice" not in f]
morph = "|".join(feats)
print("\t".join(fields[:5] + [morph,] + fields[6:]))
else:
print(l.strip())
| colorlessgreenRNNs-main | src/data/hebrew/remove_binyanim.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
import logging
from dataclasses import dataclass, field
from math import sqrt
from typing import List, Optional, Union
import torch
import torch.nn as nn
logger: logging.Logger = logging.getLogger(__name__)
@dataclass
class MtlConfigs:
mtl_model: str = "att_sp" # consider using enum
num_task_experts: int = 1
num_shared_experts: int = 1
expert_out_dims: List[List[int]] = field(default_factory=list)
self_exp_res_connect: bool = False
expert_archs: Optional[List[List[int]]] = None
gate_archs: Optional[List[List[int]]] = None
num_experts: Optional[int] = None
@dataclass(frozen=True)
class ArchInputs:
num_task: int = 3
task_mlp: List[int] = field(default_factory=list)
mtl_configs: Optional[MtlConfigs] = field(default=None)
# Parameters related to activation function
activation_type: str = "RELU"
class AdaTTSp(nn.Module):
"""
paper title: "AdaTT: Adaptive Task-to-Task Fusion Network for Multitask Learning in Recommendations"
paper link: https://doi.org/10.1145/3580305.3599769
Call Args:
inputs: inputs is a tensor of dimension
[batch_size, self.num_tasks, self.input_dim].
Experts in the same module share the same input.
outputs dimensions: [B, T, D_out]
Example::
AdaTTSp(
input_dim=256,
expert_out_dims=[[128, 128]],
num_tasks=8,
num_task_experts=2,
self_exp_res_connect=True,
)
"""
def __init__(
self,
input_dim: int,
expert_out_dims: List[List[int]],
num_tasks: int,
num_task_experts: int,
self_exp_res_connect: bool = True,
activation: str = "RELU",
) -> None:
super().__init__()
if len(expert_out_dims) == 0:
logger.warning(
"AdaTTSp is noop! size of expert_out_dims which is the number of "
"extraction layers should be at least 1."
)
return
self.num_extraction_layers: int = len(expert_out_dims)
self.num_tasks = num_tasks
self.num_task_experts = num_task_experts
self.total_experts_per_layer: int = num_task_experts * num_tasks
self.self_exp_res_connect = self_exp_res_connect
self.experts = torch.nn.ModuleList()
self.gate_weights = torch.nn.ModuleList()
self_exp_weight_list = []
layer_input_dim = input_dim
for expert_out_dim in expert_out_dims:
self.experts.append(
torch.nn.ModuleList(
[
MLP(layer_input_dim, expert_out_dim, activation)
for i in range(self.total_experts_per_layer)
]
)
)
self.gate_weights.append(
torch.nn.ModuleList(
[
torch.nn.Sequential(
torch.nn.Linear(
layer_input_dim, self.total_experts_per_layer
),
torch.nn.Softmax(dim=-1),
)
for _ in range(num_tasks)
]
)
) # self.gate_weights is of shape L X T, after we loop over all layers.
if self_exp_res_connect and num_task_experts > 1:
params = torch.empty(num_tasks, num_task_experts)
scale = sqrt(1.0 / num_task_experts)
torch.nn.init.uniform_(params, a=-scale, b=scale)
self_exp_weight_list.append(torch.nn.Parameter(params))
layer_input_dim = expert_out_dim[-1]
self.self_exp_weights = nn.ParameterList(self_exp_weight_list)
def forward(
self,
inputs: torch.Tensor,
) -> torch.Tensor:
for layer_i in range(self.num_extraction_layers):
# all task expert outputs.
experts_out = torch.stack(
[
expert(inputs[:, expert_i // self.num_task_experts, :])
for expert_i, expert in enumerate(self.experts[layer_i])
],
dim=1,
) # [B * E (total experts) * D_out]
gates = torch.stack(
[
gate_weight(
inputs[:, task_i, :]
) # W ([B, D]) * S ([D, E]) -> G, dim is [B, E]
for task_i, gate_weight in enumerate(self.gate_weights[layer_i])
],
dim=1,
) # [B, T, E]
fused_experts_out = torch.bmm(
gates,
experts_out,
) # [B, T, E] X [B * E (total experts) * D_out] -> [B, T, D_out]
if self.self_exp_res_connect:
if self.num_task_experts > 1:
# residual from the linear combination of tasks' own experts.
self_exp_weighted = torch.einsum(
"te,bted->btd",
self.self_exp_weights[layer_i],
experts_out.view(
experts_out.size(0),
self.num_tasks,
self.num_task_experts,
-1,
), # [B * E (total experts) * D_out] -> [B * T * E_task * D_out]
) # bmm: [T * E_task] X [B * T * E_task * D_out] -> [B, T, D_out]
fused_experts_out = (
fused_experts_out + self_exp_weighted
) # [B, T, D_out]
else:
fused_experts_out = fused_experts_out + experts_out
inputs = fused_experts_out
return inputs
class AdaTTWSharedExps(nn.Module):
"""
paper title: "AdaTT: Adaptive Task-to-Task Fusion Network for Multitask Learning in Recommendations"
paper link: https://doi.org/10.1145/3580305.3599769
Call Args:
inputs: inputs is a tensor of dimension
[batch_size, self.num_tasks, self.input_dim].
Experts in the same module share the same input.
outputs dimensions: [B, T, D_out]
Example::
AdaTTWSharedExps(
input_dim=256,
expert_out_dims=[[128, 128]],
num_tasks=8,
num_shared_experts=1,
num_task_experts=2,
self_exp_res_connect=True,
)
"""
def __init__(
self,
input_dim: int,
expert_out_dims: List[List[int]],
num_tasks: int,
num_shared_experts: int,
num_task_experts: Optional[int] = None,
num_task_expert_list: Optional[List[int]] = None,
# Set num_task_expert_list for experimenting with a flexible number of
# experts for different task_specific units.
self_exp_res_connect: bool = True,
activation: str = "RELU",
) -> None:
super().__init__()
if len(expert_out_dims) == 0:
logger.warning(
"AdaTTWSharedExps is noop! size of expert_out_dims which is the number of "
"extraction layers should be at least 1."
)
return
self.num_extraction_layers: int = len(expert_out_dims)
self.num_tasks = num_tasks
assert (num_task_experts is None) ^ (num_task_expert_list is None)
if num_task_experts is not None:
self.num_expert_list = [num_task_experts for _ in range(num_tasks)]
else:
# num_expert_list is guaranteed to be not None here.
# pyre-ignore
self.num_expert_list: List[int] = num_task_expert_list
self.num_expert_list.append(num_shared_experts)
self.total_experts_per_layer: int = sum(self.num_expert_list)
self.self_exp_res_connect = self_exp_res_connect
self.experts = torch.nn.ModuleList()
self.gate_weights = torch.nn.ModuleList()
layer_input_dim = input_dim
for layer_i, expert_out_dim in enumerate(expert_out_dims):
self.experts.append(
torch.nn.ModuleList(
[
MLP(layer_input_dim, expert_out_dim, activation)
for i in range(self.total_experts_per_layer)
]
)
)
num_full_active_modules = (
num_tasks
if layer_i == self.num_extraction_layers - 1
else num_tasks + 1
)
self.gate_weights.append(
torch.nn.ModuleList(
[
torch.nn.Sequential(
torch.nn.Linear(
layer_input_dim, self.total_experts_per_layer
),
torch.nn.Softmax(dim=-1),
)
for _ in range(num_full_active_modules)
]
)
) # self.gate_weights is a 2d module list of shape L X T (+ 1), after we loop over all layers.
layer_input_dim = expert_out_dim[-1]
self_exp_weight_list = []
if self_exp_res_connect:
# If any tasks have number of experts not equal to 1, we learn linear combinations of native experts.
if any(num_experts != 1 for num_experts in self.num_expert_list):
for i in range(num_tasks + 1):
num_full_active_layer = (
self.num_extraction_layers - 1
if i == num_tasks
else self.num_extraction_layers
)
params = torch.empty(
num_full_active_layer,
self.num_expert_list[i],
)
scale = sqrt(1.0 / self.num_expert_list[i])
torch.nn.init.uniform_(params, a=-scale, b=scale)
self_exp_weight_list.append(torch.nn.Parameter(params))
self.self_exp_weights = nn.ParameterList(self_exp_weight_list)
self.expert_input_idx: List[int] = []
for i in range(num_tasks + 1):
self.expert_input_idx.extend([i for _ in range(self.num_expert_list[i])])
def forward(
self,
inputs: torch.Tensor,
) -> torch.Tensor:
for layer_i in range(self.num_extraction_layers):
num_full_active_modules = (
self.num_tasks
if layer_i == self.num_extraction_layers - 1
else self.num_tasks + 1
)
# all task expert outputs.
experts_out = torch.stack(
[
expert(inputs[:, self.expert_input_idx[expert_i], :])
for expert_i, expert in enumerate(self.experts[layer_i])
],
dim=1,
) # [B * E (total experts) * D_out]
# gate weights for fusing all experts.
gates = torch.stack(
[
gate_weight(inputs[:, i, :]) # [B, D] * [D, E] -> [B, E]
for i, gate_weight in enumerate(self.gate_weights[layer_i])
],
dim=1,
) # [B, T (+ 1), E]
# add all expert gate weights with native expert weights.
if self.self_exp_res_connect:
prev_idx = 0
use_unit_naive_weights = all(
num_expert == 1 for num_expert in self.num_expert_list
)
for module_i in range(num_full_active_modules):
next_idx = self.num_expert_list[module_i] + prev_idx
if use_unit_naive_weights:
gates[:, module_i, prev_idx:next_idx] += torch.ones(
1, self.num_expert_list[module_i]
)
else:
gates[:, module_i, prev_idx:next_idx] += self.self_exp_weights[
module_i
][layer_i].unsqueeze(0)
prev_idx = next_idx
fused_experts_out = torch.bmm(
gates,
experts_out,
) # [B, T (+ 1), E (total)] X [B * E (total) * D_out] -> [B, T (+ 1), D_out]
inputs = fused_experts_out
return inputs
class MLP(nn.Module):
"""
Args:
input_dim (int):
mlp_arch (List[int]):
activation (str):
Call Args:
input (torch.Tensor): tensor of shape (B, I)
Returns:
output (torch.Tensor): MLP result
Example::
mlp = MLP(100, [100])
"""
def __init__(
self,
input_dim: int,
mlp_arch: List[int],
activation: str = "RELU",
bias: bool = True,
) -> None:
super().__init__()
mlp_net = []
for mlp_dim in mlp_arch:
mlp_net.append(
nn.Linear(in_features=input_dim, out_features=mlp_dim, bias=bias)
)
if activation == "RELU":
mlp_net.append(nn.ReLU())
else:
raise ValueError("only RELU is included currently")
input_dim = mlp_dim
self.mlp_net = nn.Sequential(*mlp_net)
def forward(
self,
input: torch.Tensor,
) -> torch.Tensor:
return self.mlp_net(input)
class SharedBottom(nn.Module):
def __init__(
self, input_dim: int, hidden_dims: List[int], num_tasks: int, activation: str
) -> None:
super().__init__()
self.bottom_projection = MLP(input_dim, hidden_dims, activation)
self.num_tasks: int = num_tasks
def forward(
self,
input: torch.Tensor,
) -> torch.Tensor:
# input dim [T, D_in]
# output dim [B, T, D_out]
return self.bottom_projection(input).unsqueeze(1).expand(-1, self.num_tasks, -1)
class CrossStitch(torch.nn.Module):
"""
cross-stitch
paper title: "Cross-stitch Networks for Multi-task Learning".
paper link: https://openaccess.thecvf.com/content_cvpr_2016/papers/Misra_Cross-Stitch_Networks_for_CVPR_2016_paper.pdf
"""
def __init__(
self,
input_dim: int,
expert_archs: List[List[int]],
num_tasks: int,
activation: str = "RELU",
) -> None:
super().__init__()
self.num_layers: int = len(expert_archs)
self.num_tasks = num_tasks
self.experts = torch.nn.ModuleList()
self.stitchs = torch.nn.ModuleList()
expert_input_dim = input_dim
for layer_ind in range(self.num_layers):
self.experts.append(
torch.nn.ModuleList(
[
MLP(
expert_input_dim,
expert_archs[layer_ind],
activation,
)
for _ in range(self.num_tasks)
]
)
)
self.stitchs.append(
torch.nn.Linear(
self.num_tasks,
self.num_tasks,
bias=False,
)
)
expert_input_dim = expert_archs[layer_ind][-1]
def forward(self, input: torch.Tensor) -> torch.Tensor:
"""
input dim [B, T, D_in]
output dim [B, T, D_out]
"""
x = input
for layer_ind in range(self.num_layers):
expert_out = torch.stack(
[
expert(x[:, expert_ind, :]) # [B, D_out]
for expert_ind, expert in enumerate(self.experts[layer_ind])
],
dim=1,
) # [B, T, D_out]
stitch_out = self.stitchs[layer_ind](expert_out.transpose(1, 2)).transpose(
1, 2
) # [B, T, D_out]
x = stitch_out
return x
class MLMMoE(torch.nn.Module):
"""
Multi-level Multi-gate Mixture of Experts
This code implements a multi-level extension of the MMoE model, as described in the
paper titled "Modeling Task Relationships in Multi-task Learning with Multi-gate
Mixture-of-Experts".
Paper link: https://dl.acm.org/doi/10.1145/3219819.3220007
To run the original MMoE, use only one fusion level. For example, set expert_archs as
[[96, 48]].
To configure multiple fusion levels, set expert_archs as something like [[96], [48]].
"""
def __init__(
self,
input_dim: int,
expert_archs: List[List[int]],
gate_archs: List[List[int]],
num_tasks: int,
num_experts: int,
activation: str = "RELU",
) -> None:
super().__init__()
self.num_layers: int = len(expert_archs)
self.num_tasks: int = num_tasks
self.num_experts = num_experts
self.experts = torch.nn.ModuleList()
self.gates = torch.nn.ModuleList()
expert_input_dim = input_dim
for layer_ind in range(self.num_layers):
self.experts.append(
torch.nn.ModuleList(
[
MLP(
expert_input_dim,
expert_archs[layer_ind],
activation,
)
for _ in range(self.num_experts)
]
)
)
self.gates.append(
torch.nn.ModuleList(
[
torch.nn.Sequential(
MLP(
input_dim,
gate_archs[layer_ind],
activation,
),
torch.nn.Linear(
gate_archs[layer_ind][-1]
if gate_archs[layer_ind]
else input_dim,
self.num_experts,
),
torch.nn.Softmax(dim=-1),
)
for _ in range(
self.num_experts
if layer_ind < self.num_layers - 1
else self.num_tasks
)
]
)
)
expert_input_dim = expert_archs[layer_ind][-1]
def forward(self, input: torch.Tensor) -> torch.Tensor:
"""
input dim [B, D_in]
output dim [B, T, D_out]
"""
x = input.unsqueeze(1).expand([-1, self.num_experts, -1]) # [B, E, D_in]
for layer_ind in range(self.num_layers):
expert_out = torch.stack(
[
expert(x[:, expert_ind, :]) # [B, D_out]
for expert_ind, expert in enumerate(self.experts[layer_ind])
],
dim=1,
) # [B, E, D_out]
gate_out = torch.stack(
[
gate(input) # [B, E]
for gate_ind, gate in enumerate(self.gates[layer_ind])
],
dim=1,
) # [B, T, E]
gated_out = torch.matmul(gate_out, expert_out) # [B, T, D_out]
x = gated_out
return x
class PLE(nn.Module):
"""
PLE module is based on the paper "Progressive Layered Extraction (PLE): A
Novel Multi-Task Learning (MTL) Model for Personalized Recommendations".
Paper link: https://doi.org/10.1145/3383313.3412236
PLE aims to address negative transfer and seesaw phenomenon in multi-task
learning. PLE distinguishes shared and task-specic experts explicitly and
adopts a progressive routing mechanism to extract and separate deeper
semantic knowledge gradually. When there is only one extraction layer, PLE
falls back to CGC.
Args:
input_dim: input embedding dimension
expert_out_dims (List[List[int]]): dimension of an expert's output at
each layer. This list's length equals the number of extraction
layers
num_tasks: number of tasks
num_task_experts: number of experts for each task module at each layer.
* If the number of experts is the same for all tasks, use an
integer here.
* If the number of experts is different for different tasks, use a
list of integers here.
num_shared_experts: number of experts for shared module at each layer
Call Args:
inputs: inputs is a tensor of dimension [batch_size, self.num_tasks + 1,
self.input_dim]. Task specific module inputs are placed first, followed
by shared module input. (Experts in the same module share the same input)
Returns:
output: output of extraction layer to be feed into task-specific tower
networks. It's a list of tensors, each of which is for one task.
Example::
PLE(
input_dim=256,
expert_out_dims=[[128]],
num_tasks=8,
num_task_experts=2,
num_shared_experts=2,
)
"""
def __init__(
self,
input_dim: int,
expert_out_dims: List[List[int]],
num_tasks: int,
num_task_experts: Union[int, List[int]],
num_shared_experts: int,
activation: str = "RELU",
) -> None:
super().__init__()
if len(expert_out_dims) == 0:
raise ValueError("Expert out dims cannot be empty list")
self.num_extraction_layers: int = len(expert_out_dims)
self.num_tasks = num_tasks
self.num_task_experts = num_task_experts
if type(num_task_experts) is int:
self.total_experts_per_layer: int = (
num_task_experts * num_tasks + num_shared_experts
)
else:
self.total_experts_per_layer: int = (
sum(num_task_experts) + num_shared_experts
)
assert len(num_task_experts) == num_tasks
self.num_shared_experts = num_shared_experts
self.experts = nn.ModuleList()
expert_input_dim = input_dim
for expert_out_dim in expert_out_dims:
self.experts.append(
nn.ModuleList(
[
MLP(expert_input_dim, expert_out_dim, activation)
for i in range(self.total_experts_per_layer)
]
)
)
expert_input_dim = expert_out_dim[-1]
self.gate_weights = nn.ModuleList()
selector_dim = input_dim
for i in range(self.num_extraction_layers):
expert_out_dim = expert_out_dims[i]
# task specific gates.
if type(num_task_experts) is int:
gate_weights_in_layer = nn.ModuleList(
[
nn.Sequential(
nn.Linear(
selector_dim, num_task_experts + num_shared_experts
),
nn.Softmax(dim=-1),
)
for i in range(num_tasks)
]
)
else:
gate_weights_in_layer = nn.ModuleList(
[
nn.Sequential(
nn.Linear(
selector_dim, num_task_experts[i] + num_shared_experts
),
nn.Softmax(dim=-1),
)
for i in range(num_tasks)
]
)
# Shared module gates. Note last layer has only task specific module gates for task towers later.
if i != self.num_extraction_layers - 1:
gate_weights_in_layer.append(
nn.Sequential(
nn.Linear(selector_dim, self.total_experts_per_layer),
nn.Softmax(dim=-1),
)
)
self.gate_weights.append(gate_weights_in_layer)
selector_dim = expert_out_dim[-1]
if type(self.num_task_experts) is list:
experts_idx_2_task_idx = []
for i in range(num_tasks):
# pyre-ignore
experts_idx_2_task_idx += [i] * self.num_task_experts[i]
experts_idx_2_task_idx += [num_tasks] * num_shared_experts
self.experts_idx_2_task_idx: List[int] = experts_idx_2_task_idx
def forward(
self,
inputs: torch.Tensor,
) -> torch.Tensor:
for layer_i in range(self.num_extraction_layers):
# all task specific and shared experts' outputs.
# Note first num_task_experts * num_tasks experts are task specific,
# last num_shared_experts experts are shared.
if type(self.num_task_experts) is int:
experts_out = torch.stack(
[
self.experts[layer_i][expert_i](
inputs[
:,
# pyre-ignore
min(expert_i // self.num_task_experts, self.num_tasks),
:,
]
)
for expert_i in range(self.total_experts_per_layer)
],
dim=1,
) # [B * E (num experts) * D_out]
else:
experts_out = torch.stack(
[
self.experts[layer_i][expert_i](
inputs[
:,
self.experts_idx_2_task_idx[expert_i],
:,
]
)
for expert_i in range(self.total_experts_per_layer)
],
dim=1,
) # [B * E (num experts) * D_out]
gates_out = []
# Loop over all the gates in the layer. Note for the last layer,
# there is no shared gating network.
prev_idx = 0
for gate_i in range(len(self.gate_weights[layer_i])):
# This is for shared gating network, which uses all the experts.
if gate_i == self.num_tasks:
selected_matrix = experts_out # S_share
# This is for task gating network, which only uses shared and its own experts.
else:
if type(self.num_task_experts) is int:
task_experts_out = experts_out[
:,
# pyre-ignore
(gate_i * self.num_task_experts) : (gate_i + 1)
# pyre-ignore
* self.num_task_experts,
:,
] # task specific experts
else:
# pyre-ignore
next_idx = prev_idx + self.num_task_experts[gate_i]
task_experts_out = experts_out[
:,
prev_idx:next_idx,
:,
] # task specific experts
prev_idx = next_idx
shared_experts_out = experts_out[
:,
-self.num_shared_experts :,
:,
] # shared experts
selected_matrix = torch.concat(
[task_experts_out, shared_experts_out], dim=1
) # S_k with dimension of [B * E_selected * D_out]
gates_out.append(
torch.bmm(
self.gate_weights[layer_i][gate_i](
inputs[:, gate_i, :]
).unsqueeze(dim=1),
selected_matrix,
)
# W * S -> G
# [B, 1, E_selected] X [B * E_selected * D_out] -> [B, 1, D_out]
)
inputs = torch.cat(gates_out, dim=1) # [B, T, D_out]
return inputs
class CentralTaskArch(nn.Module):
def __init__(
self,
mtl_configs: MtlConfigs,
opts: ArchInputs,
input_dim: int,
) -> None:
super().__init__()
self.opts = opts
assert len(mtl_configs.expert_out_dims) > 0, "expert_out_dims is empty."
self.num_tasks: int = opts.num_task
self.mtl_model: str = mtl_configs.mtl_model
logger.info(f"mtl_model is {mtl_configs.mtl_model}")
expert_out_dims: List[List[int]] = mtl_configs.expert_out_dims
# AdaTT-sp
# consider consolidating the implementation of att_sp and att_g.
if mtl_configs.mtl_model == "att_sp":
self.mtl_arch: nn.Module = AdaTTSp(
input_dim=input_dim,
expert_out_dims=expert_out_dims,
num_tasks=self.num_tasks,
num_task_experts=mtl_configs.num_task_experts,
self_exp_res_connect=mtl_configs.self_exp_res_connect,
activation=opts.activation_type,
)
# AdaTT-general
elif mtl_configs.mtl_model == "att_g":
self.mtl_arch: nn.Module = AdaTTWSharedExps(
input_dim=input_dim,
expert_out_dims=expert_out_dims,
num_tasks=self.num_tasks,
num_task_experts=mtl_configs.num_task_experts,
num_shared_experts=mtl_configs.num_shared_experts,
self_exp_res_connect=mtl_configs.self_exp_res_connect,
activation=opts.activation_type,
)
# PLE
elif mtl_configs.mtl_model == "ple":
self.mtl_arch: nn.Module = PLE(
input_dim=input_dim,
expert_out_dims=expert_out_dims,
num_tasks=self.num_tasks,
num_task_experts=mtl_configs.num_task_experts,
num_shared_experts=mtl_configs.num_shared_experts,
activation=opts.activation_type,
)
# cross-stitch
elif mtl_configs.mtl_model == "cross_st":
self.mtl_arch: nn.Module = CrossStitch(
input_dim=input_dim,
expert_archs=mtl_configs.expert_out_dims,
num_tasks=self.num_tasks,
activation=opts.activation_type,
)
# multi-layer MMoE or MMoE
elif mtl_configs.mtl_model == "mmoe":
self.mtl_arch: nn.Module = MLMMoE(
input_dim=input_dim,
expert_archs=mtl_configs.expert_out_dims,
gate_archs=[[] for i in range(len(mtl_configs.expert_out_dims))],
num_tasks=self.num_tasks,
num_experts=mtl_configs.num_shared_experts,
activation=opts.activation_type,
)
# shared bottom
elif mtl_configs.mtl_model == "share_bottom":
self.mtl_arch: nn.Module = SharedBottom(
input_dim,
[dim for dims in expert_out_dims for dim in dims],
self.num_tasks,
opts.activation_type,
)
else:
raise ValueError("invalid model type")
task_modules_input_dim = expert_out_dims[-1][-1]
self.task_modules: nn.ModuleList = nn.ModuleList(
[
nn.Sequential(
MLP(
task_modules_input_dim, self.opts.task_mlp, opts.activation_type
),
torch.nn.Linear(self.opts.task_mlp[-1], 1),
)
for i in range(self.num_tasks)
]
)
def forward(
self,
task_arch_input: torch.Tensor,
) -> List[torch.Tensor]:
if self.mtl_model in ["att_sp", "cross_st"]:
task_arch_input = task_arch_input.unsqueeze(1).expand(
-1, self.num_tasks, -1
)
elif self.mtl_model in ["att_g", "ple"]:
task_arch_input = task_arch_input.unsqueeze(1).expand(
-1, self.num_tasks + 1, -1
)
task_specific_outputs = self.mtl_arch(task_arch_input)
task_arch_output = [
task_module(task_specific_outputs[:, i, :])
for i, task_module in enumerate(self.task_modules)
]
return task_arch_output | AdaTT-main | mtl_lib.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import json
import argparse
import numpy as np
class BisonEval:
def __init__(self, anno, pred):
if pred.getBisonIds() != anno.getBisonIds():
print('[Warning] The prediction does not' +
'cover the entire set of bison data.' +
'The evaluation is running on the {}'.format(
len(pred.getBisonIds())) +
'subset from prediction file.')
self.params = {'bison_ids': pred.getBisonIds()}
self.anno = anno
self.pred = pred
def evaluate(self):
accuracy = []
for bison_id in self.params['bison_ids']:
accuracy.append(self.anno[bison_id]['true_image_id'] ==
self.pred[bison_id])
mean_accuracy = np.mean(accuracy)
print("[Result] Mean BISON accuracy on {}: {:.2f}%".format(
self.anno.dataset, mean_accuracy * 100)
)
return mean_accuracy
class Annotation:
def __init__(self, anno_filepath):
assert os.path.exists(anno_filepath), 'Annotation file does not exist'
with open(anno_filepath) as fd:
anno_results = json.load(fd)
self._data = {res['bison_id']: res for res in anno_results['data']}
self.dataset = "{}.{}".format(anno_results['info']['source'],
anno_results['info']['split'])
def getBisonIds(self):
return self._data.keys()
def __getitem__(self, key):
return self._data[key]
class Prediction:
def __init__(self, pred_filepath):
assert os.path.exists(pred_filepath), 'Prediction file does not exist'
with open(pred_filepath) as fd:
pred_results = json.load(fd)
self._data = {result['bison_id']: result['predicted_image_id']
for result in pred_results}
def getBisonIds(self):
return self._data.keys()
def __getitem__(self, key):
return self._data[key]
def _command_line_parser():
parser = argparse.ArgumentParser()
default_anno = './annotations/bison_annotations.cocoval2014.json'
default_pred = './predictions/fake_predictions.cocoval2014.json'
parser.add_argument('--anno_path', default=default_anno,
help='Path to the annotation file')
parser.add_argument('--pred_path', default=default_pred,
help='Path to the prediction file')
return parser
def main(args):
anno = Annotation(args.anno_path)
pred = Prediction(args.pred_path)
bison = BisonEval(anno, pred)
bison.evaluate()
if __name__ == '__main__':
parser = _command_line_parser()
args = parser.parse_args()
main(args)
| binary-image-selection-main | bison_eval.py |
"""Load data, create a model, (optionally train it), and evaluate it
Example:
```
python run.py --task WiC --n_epochs 1 --counter_unit epochs --evaluation_freq 0.25 --checkpointing 1 --logging 1 --lr 1e-5
```
"""
import argparse
import json
import logging
import os
import sys
from functools import partial
import superglue_tasks
from dataloaders import get_dataloaders
from snorkel.model.utils import set_seed
from snorkel.mtl.trainer import Trainer
from snorkel.mtl.model import MultitaskModel
from snorkel.mtl.loggers import TensorBoardWriter
from snorkel.mtl.snorkel_config import default_config
from snorkel.slicing.apply import PandasSFApplier
from snorkel.slicing.utils import add_slice_labels, convert_to_slice_tasks
from superglue_slices import slice_func_dict
from utils import (
str2list,
str2bool,
write_to_file,
add_flags_from_config,
task_dataset_to_dataframe,
)
logging.basicConfig(level=logging.INFO)
def add_application_args(parser):
parser.add_argument("--task", type=str2list, required=True, help="GLUE tasks")
parser.add_argument(
"--log_root",
type=str,
default="logs",
help="Path to root of the logs directory",
)
parser.add_argument(
"--run_name",
type=str,
help="Name of the current run (can include subdirectories)",
)
parser.add_argument(
"--data_dir", type=str, default="data", help="The path to GLUE dataset"
)
parser.add_argument(
"--bert_model",
type=str,
default="bert-large-cased",
help="Which pretrained BERT model to use",
)
parser.add_argument("--batch_size", type=int, default=16, help="batch size")
parser.add_argument(
"--seed", type=int, default=None, help="Random seed for training"
)
parser.add_argument(
"--max_data_samples", type=int, default=None, help="Maximum data samples to use"
)
parser.add_argument(
"--max_sequence_length", type=int, default=256, help="Maximum sentence length"
)
parser.add_argument(
"--last_hidden_dropout_prob",
type=float,
default=0.0,
help="Dropout on last layer of bert.",
)
parser.add_argument(
"--train", type=str2bool, default=True, help="Whether to train the model"
)
parser.add_argument(
"--slice_dict",
type=str,
default=None,
help="Json string dict mapping task_name to list of slicing functions to utilize."
# Example usage: --slice_dict '{"WiC": ["slice_verb"]}'
)
def get_parser():
# Parse cmdline args and setup environment
parser = argparse.ArgumentParser(
"SuperGLUE Runner", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
add_application_args(parser)
return parser
def main(args):
config = vars(args)
# Set random seed for reproducibility
if config["seed"]:
seed = config["seed"]
logging.info(f"Setting seed: {seed}")
set_seed(seed)
# Full log path gets created in LogWriter
log_writer = TensorBoardWriter(log_root=args.log_root, run_name=args.run_name)
config["log_dir"] = log_writer.log_dir
# Save command line argument into file
cmd_msg = " ".join(sys.argv)
logging.info(f"COMMAND: {cmd_msg}")
log_writer.write_text(cmd_msg, "cmd.txt")
# Save config into file
logging.info(f"CONFIG: {config}")
log_writer.write_config(config)
# Construct dataloaders and tasks and load slices
dataloaders = []
tasks = []
task_names = args.task
for task_name in task_names:
task_dataloaders = get_dataloaders(
data_dir=args.data_dir,
task_name=task_name,
splits=["train", "valid", "test"],
max_sequence_length=args.max_sequence_length,
max_data_samples=args.max_data_samples,
tokenizer_name=args.bert_model,
batch_size=args.batch_size,
)
dataloaders.extend(task_dataloaders)
task = superglue_tasks.task_funcs[task_name](
args.bert_model, last_hidden_dropout_prob=args.last_hidden_dropout_prob
)
tasks.append(task)
if args.slice_dict:
slice_dict = json.loads(str(args.slice_dict))
# Ensure this is a mapping str to list
for k, v in slice_dict.items():
assert isinstance(k, str)
assert isinstance(v, list)
slice_tasks = []
for task in tasks:
# Update slicing tasks
slice_names = slice_dict[task.name]
slice_tasks.extend(convert_to_slice_tasks(task, slice_names))
slicing_functions = [
slice_func_dict[task_name][slice_name] for slice_name in slice_names
]
applier = PandasSFApplier(slicing_functions)
# Update slicing dataloaders
for dl in dataloaders:
df = task_dataset_to_dataframe(dl.dataset)
S_matrix = applier.apply(df)
add_slice_labels(dl, task, S_matrix, slice_names)
tasks = slice_tasks
# Build model model
model = MultitaskModel(name=f"SuperGLUE", tasks=tasks)
# Load pretrained model if necessary
if config["model_path"]:
model.load(config["model_path"])
# Training
if args.train:
trainer = Trainer(**config)
trainer.train_model(model, dataloaders)
scores = model.score(dataloaders)
# Save metrics into file
logging.info(f"Metrics: {scores}")
log_writer.write_json(scores, "metrics.json")
# Save best metrics into file
if args.train and trainer.config["checkpointing"]:
logging.info(
f"Best metrics: " f"{trainer.log_manager.checkpointer.best_metric_dict}"
)
log_writer.write_json(
trainer.log_manager.checkpointer.best_metric_dict, "best_metrics.json"
)
if __name__ == "__main__":
parser = get_parser()
add_flags_from_config(parser, default_config)
args = parser.parse_args()
main(args)
| snorkel-superglue-master | run.py |
import logging
import os
import superglue_parsers
from task_config import SuperGLUE_TASK_SPLIT_MAPPING
from tokenizer import get_tokenizer
from pytorch_pretrained_bert import BertTokenizer
from snorkel.mtl.data import MultitaskDataLoader
logger = logging.getLogger(__name__)
def get_jsonl_path(data_dir: str, task_name: str, split: str):
return os.path.join(
data_dir, task_name, SuperGLUE_TASK_SPLIT_MAPPING[task_name][split]
)
def get_dataset(
data_dir: str,
task_name: str,
split: str,
tokenizer: BertTokenizer,
max_data_samples: int,
max_sequence_length: int,
):
jsonl_path = get_jsonl_path(data_dir, task_name, split)
return superglue_parsers.parser[task_name](
jsonl_path, tokenizer, max_data_samples, max_sequence_length
)
def get_dataloaders(
data_dir,
task_name="MultiRC",
splits=["train", "valid", "test"],
max_data_samples=None,
max_sequence_length=256,
tokenizer_name="bert-base-uncased",
batch_size=16,
):
"""Load data and return dataloaders"""
dataloaders = []
tokenizer = get_tokenizer(tokenizer_name)
for split in splits:
dataset = get_dataset(
data_dir, task_name, split, tokenizer, max_data_samples, max_sequence_length
)
dataloader = MultitaskDataLoader(
task_to_label_dict={task_name: "labels"},
dataset=dataset,
split=split,
batch_size=batch_size,
shuffle=(split == "train"),
)
dataloaders.append(dataloader)
logger.info(f"Loaded {split} for {task_name} with {len(dataset)} samples.")
return dataloaders
| snorkel-superglue-master | dataloaders.py |
SuperGLUE_TASK_NAMES = ["CB", "COPA", "MultiRC", "RTE", "WiC", "WSC"]
SuperGLUE_TASK_SPLIT_MAPPING = {
"CB": {"train": "train.jsonl", "valid": "val.jsonl", "test": "test.jsonl"},
"COPA": {"train": "train.jsonl", "valid": "val.jsonl", "test": "test.jsonl"},
"MultiRC": {"train": "train.jsonl", "valid": "val.jsonl", "test": "test.jsonl"},
"RTE": {"train": "train.jsonl", "valid": "val.jsonl", "test": "test.jsonl"},
"WiC": {"train": "train.jsonl", "valid": "val.jsonl", "test": "test.jsonl"},
"WSC": {"train": "train.jsonl", "valid": "val.jsonl", "test": "test.jsonl"},
"SWAG": {"train": "train.csv", "valid": "val.csv", "test": "test.csv"},
}
SuperGLUE_LABEL_MAPPING = {
"CB": {"entailment": 1, "contradiction": 2, "neutral": 3},
"COPA": {0: 1, 1: 2},
"RTE": {"entailment": 1, "not_entailment": 2},
"WiC": {True: 1, False: 2},
"WSC": {True: 1, False: 2},
"MultiRC": {True: 1, False: 2},
"SWAG": {0: 1, 1: 2, 2: 3, 3: 4},
}
SuperGLUE_LABEL_INVERSE = {}
for task, mapping in SuperGLUE_LABEL_MAPPING.items():
SuperGLUE_LABEL_INVERSE[task] = {v: k for k, v in mapping.items()}
SuperGLUE_TASK_METRIC_MAPPING = {
"CB": ["accuracy"],
"COPA": ["accuracy"],
"MultiRC": ["f1"],
"RTE": ["accuracy"],
"WiC": ["accuracy"],
"WSC": ["accuracy"],
"SWAG": ["accuracy"],
}
| snorkel-superglue-master | task_config.py |
import logging
from pytorch_pretrained_bert import BertTokenizer
logger = logging.getLogger(__name__)
def get_tokenizer(tokenizer_name):
logger.info(f"Loading Tokenizer {tokenizer_name}")
if tokenizer_name.startswith("bert"):
do_lower_case = "uncased" in tokenizer_name
tokenizer = BertTokenizer.from_pretrained(
tokenizer_name, do_lower_case=do_lower_case
)
return tokenizer
| snorkel-superglue-master | tokenizer.py |
from setuptools import find_packages, setup
with open("README.md") as read_file:
long_description = read_file.read()
with open("requirements.txt") as f:
requirements = f.read().splitlines()
setup(
name="snorkel-superglue",
version="0.1.0",
url="https://github.com/HazyResearch/snorkel-superglue",
description="Applying snorkel (stanford.snorkel.edu) to superglue",
long_description_content_type="text/markdown",
long_description=long_description,
license="Apache License 2.0",
packages=find_packages(),
include_package_data=True,
install_requires=requirements,
keywords="machine-learning ai information-extraction weak-supervision",
classifiers=[
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Information Analysis",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
],
project_urls={
"Homepage": "https://hazyresearch.github.io/snorkel-superglue/",
"Source": "https://github.com/HazyResearch/snorkel-superglue/",
"Bug Reports": "https://github.com/HazyResearch/snorkel-superglue/issues",
},
)
| snorkel-superglue-master | setup.py |
import argparse
import logging
import os
import pandas as pd
from snorkel.mtl.data import MultitaskDataset
def str2list(v, dim=","):
return [t.strip() for t in v.split(dim)]
def str2bool(v):
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise Exception("Boolean value expected.")
def write_to_file(path, file_name, value):
if not isinstance(value, str):
value = str(value)
fout = open(os.path.join(path, file_name), "w")
fout.write(value + "\n")
fout.close()
def add_flags_from_config(parser, config_dict):
"""
Adds a flag (and default value) to an ArgumentParser for each parameter in a config
"""
def OrNone(default):
def func(x):
# Convert "none" to proper None object
if x.lower() == "none":
return None
# If default is None (and x is not None), return x without conversion as str
elif default is None:
return str(x)
# Otherwise, default has non-None type; convert x to that type
else:
return type(default)(x)
return func
for param in config_dict:
default = config_dict[param]
try:
if isinstance(default, dict):
parser = add_flags_from_config(parser, default)
elif isinstance(default, bool):
parser.add_argument(f"--{param}", type=str2bool, default=default)
elif isinstance(default, list):
if len(default) > 0:
# pass a list as argument
parser.add_argument(
f"--{param}",
action="append",
type=type(default[0]),
default=default,
)
else:
parser.add_argument(f"--{param}", action="append", default=default)
else:
parser.add_argument(f"--{param}", type=OrNone(default), default=default)
except argparse.ArgumentError:
logging.warning(
f"Could not add flag for param {param} because it was already present."
)
return parser
def task_dataset_to_dataframe(dataset: MultitaskDataset) -> pd.DataFrame:
data_dict = dataset.X_dict
data_dict["labels"] = dataset.Y_dict["labels"]
return pd.DataFrame(data_dict)
| snorkel-superglue-master | utils.py |
""" Script for downloading all SuperGLUE data.
For licence information, see the original dataset information links
available from: https://super.gluebenchmark.com/
Example usage:
python download_superglue_data.py --data_dir data --tasks all
"""
import argparse
import os
import shutil
import sys
import tempfile
import urllib.request
import zipfile
TASKS = ["CB", "COPA", "MultiRC", "RTE", "WiC", "WSC", "diagnostic"]
TASK2PATH = {
"CB": "https://dl.fbaipublicfiles.com/glue/superglue/data/CB.zip",
"COPA": "https://dl.fbaipublicfiles.com/glue/superglue/data/COPA.zip",
"MultiRC": "https://dl.fbaipublicfiles.com/glue/superglue/data/MultiRC.zip",
"RTE": "https://dl.fbaipublicfiles.com/glue/superglue/data/RTE.zip",
"WiC": "https://dl.fbaipublicfiles.com/glue/superglue/data/WiC.zip",
"WSC": "https://dl.fbaipublicfiles.com/glue/superglue/data/WSC.zip",
"diagnostic": "https://www.dropbox.com/s/ju7d95ifb072q9f/diagnostic-full.tsv?dl=1",
}
def download_and_extract(task, data_dir):
print("Downloading and extracting %s..." % task)
if not os.path.isdir(os.path.join(data_dir, task)):
os.mkdir(os.path.join(data_dir, task))
data_file = os.path.join(data_dir, task, "%s.zip" % task)
urllib.request.urlretrieve(TASK2PATH[task], data_file)
with zipfile.ZipFile(data_file) as zip_ref:
zip_ref.extractall(os.path.join(data_dir, task))
os.remove(data_file)
print("\tCompleted!")
def download_diagnostic(data_dir):
print("Downloading and extracting diagnostic...")
if not os.path.isdir(os.path.join(data_dir, "RTE")):
os.mkdir(os.path.join(data_dir, "RTE"))
data_file = os.path.join(data_dir, "RTE", "diagnostic-full.tsv")
urllib.request.urlretrieve(TASK2PATH["diagnostic"], data_file)
print("\tCompleted!")
return
def get_tasks(task_names):
task_names = task_names.split(",")
if "all" in task_names:
tasks = TASKS
else:
tasks = []
for task_name in task_names:
assert task_name in TASKS, "Task %s not found!" % task_name
tasks.append(task_name)
if "RTE" in tasks and "diagnostic" not in tasks:
tasks.append("diagnostic")
return tasks
def main(arguments):
parser = argparse.ArgumentParser()
parser.add_argument(
"-d",
"--data_dir",
help="directory to save data to",
type=str,
default="../superglue_data",
)
parser.add_argument(
"-t",
"--tasks",
help="tasks to download data for as a comma separated string",
type=str,
default="all",
)
args = parser.parse_args(arguments)
if not os.path.exists(args.data_dir):
os.mkdir(args.data_dir)
tasks = get_tasks(args.tasks)
for task in tasks:
if task == "diagnostic":
download_diagnostic(args.data_dir)
else:
download_and_extract(task, args.data_dir)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| snorkel-superglue-master | download_superglue_data.py |
import torch
from torch import nn
class ChoiceModule(nn.Module):
def __init__(self, n_choices=2):
super().__init__()
self.n_choices = n_choices
def forward(self, immediate_ouput_dict):
logits = []
for i in range(self.n_choices):
logits.append(immediate_ouput_dict[f"choice{str(i)}rep"][0])
logits = torch.cat(logits, dim=1)
return logits
| snorkel-superglue-master | superglue_modules/copa_module.py |
import os
import torch
from pytorch_pretrained_bert.modeling import BertModel
from torch import nn
class BertModule(nn.Module):
def __init__(self, bert_model_name, cache_dir="./cache/"):
super().__init__()
# Create cache directory if not exists
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
self.bert_model = BertModel.from_pretrained(
bert_model_name, cache_dir=cache_dir
)
def forward(self, token_ids, token_type_ids=None, attention_mask=None):
encoded_layers, pooled_output = self.bert_model(
token_ids, token_type_ids, attention_mask
)
return encoded_layers, pooled_output
class BertLastCLSModule(nn.Module):
def __init__(self, dropout_prob=0.0):
super().__init__()
self.dropout = nn.Dropout(dropout_prob)
def forward(self, input):
last_hidden = input[-1][:, 0, :]
out = self.dropout(last_hidden)
return out
class BertContactLastCLSWithTwoTokensModule(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input, idx1, idx2):
last_layer = input[-1]
last_cls = last_layer[:, 0, :]
idx1 = idx1.unsqueeze(-1).unsqueeze(-1).expand([-1, -1, last_layer.size(-1)])
idx2 = idx2.unsqueeze(-1).unsqueeze(-1).expand([-1, -1, last_layer.size(-1)])
token1_emb = last_layer.gather(dim=1, index=idx1).squeeze(dim=1)
token2_emb = last_layer.gather(dim=1, index=idx2).squeeze(dim=1)
output = torch.cat([last_cls, token1_emb, token2_emb], dim=-1)
return output
| snorkel-superglue-master | superglue_modules/bert_module.py |
snorkel-superglue-master | superglue_modules/__init__.py |
|
from torch import nn
class RegressionModule(nn.Module):
def __init__(self, feature_dim):
super().__init__()
self.linear = nn.Linear(feature_dim, 1)
def forward(self, feature):
return self.linear.forward(feature)
| snorkel-superglue-master | superglue_modules/regression_module.py |
import torch
from torch import nn
from allennlp.modules.span_extractors import SelfAttentiveSpanExtractor
class SpanClassifierModule(nn.Module):
def _make_span_extractor(self):
return SelfAttentiveSpanExtractor(self.proj_dim)
def _make_cnn_layer(self, d_inp):
"""
Make a CNN layer as a projection of local context.
CNN maps [batch_size, max_len, d_inp]
to [batch_size, max_len, proj_dim] with no change in length.
"""
k = 1 + 2 * self.cnn_context
padding = self.cnn_context
return nn.Conv1d(
d_inp,
self.proj_dim,
kernel_size=k,
stride=1,
padding=padding,
dilation=1,
groups=1,
bias=True,
)
def __init__(
self,
d_inp=1024,
proj_dim=512,
num_spans=2,
cnn_context=0,
n_classes=2,
dropout=0.1,
):
super().__init__()
self.cnn_context = cnn_context
self.num_spans = num_spans
self.proj_dim = proj_dim
self.dropout = nn.Dropout(dropout)
self.projs = torch.nn.ModuleList()
for i in range(num_spans):
# create a word-level pooling layer operator
proj = self._make_cnn_layer(d_inp)
self.projs.append(proj)
self.span_extractors = torch.nn.ModuleList()
# Lee's self-pooling operator (https://arxiv.org/abs/1812.10860)
for i in range(num_spans):
span_extractor = self._make_span_extractor()
self.span_extractors.append(span_extractor)
# Classifier gets concatenated projections of spans.
clf_input_dim = self.span_extractors[1].get_output_dim() * num_spans
self.classifier = nn.Linear(clf_input_dim, n_classes)
def forward(self, feature, span1_idxs, span2_idxs, mask):
# Apply projection CNN layer for each span of the input sentence
sent_embs_t = self.dropout(feature[-1]).transpose(1, 2) # needed for CNN layer
se_projs = []
for i in range(self.num_spans):
se_proj = self.projs[i](sent_embs_t).transpose(2, 1).contiguous()
se_projs.append(se_proj)
span_embs = None
_kw = dict(sequence_mask=mask.unsqueeze(2).long())
span_idxs = [span1_idxs.unsqueeze(1), span2_idxs.unsqueeze(1)]
for i in range(self.num_spans):
# spans are [batch_size, num_targets, span_modules]
span_emb = self.span_extractors[i](se_projs[i], span_idxs[i], **_kw)
if span_embs is None:
span_embs = span_emb
else:
span_embs = torch.cat([span_embs, span_emb], dim=2)
# [batch_size, num_targets, n_classes]
logits = self.classifier(span_embs).squeeze(1)
return logits
| snorkel-superglue-master | superglue_modules/wsc_module.py |
from torch import nn
class ClassificationModule(nn.Module):
def __init__(self, feature_dim, class_cardinality):
super().__init__()
self.linear = nn.Linear(feature_dim, class_cardinality)
def forward(self, feature):
return self.linear.forward(feature)
| snorkel-superglue-master | superglue_modules/classification_module.py |
from snorkel.slicing.sf import slicing_function
from .general_sfs import slice_func_dict as general_slice_func_dict
@slicing_function
def slice_temporal_preposition(example):
temporal_prepositions = ["after", "before", "past"]
both_sentences = example.sentence1 + example.sentence2
return any([p in both_sentences for p in temporal_prepositions])
@slicing_function
def slice_possessive_preposition(example):
possessive_prepositions = ["inside of", "with", "within"]
both_sentences = example.sentence1 + example.sentence2
return any([p in both_sentences for p in possessive_prepositions])
@slicing_function
def slice_is_comparative(example):
comparative_words = ["more", "less", "better", "worse", "bigger", "smaller"]
both_sentences = example.sentence1 + example.sentence2
return any([p in both_sentences for p in comparative_words])
@slicing_function
def slice_is_quantification(example):
quantification_words = ["all", "some", "none"]
both_sentences = example.sentence1 + example.sentence2
return any([p in both_sentences for p in quantification_words])
@slicing_function
def slice_short_hypothesis(example, thresh=5):
return len(example.sentence2.split()) < thresh
@slicing_function
def slice_long_hypothesis(example, thresh=15):
return len(example.sentence2.split()) > thresh
@slicing_function
def slice_short_premise(example, thresh=10):
return len(example.sentence1.split()) < thresh
@slicing_function
def slice_long_premise(example, thresh=100):
return len(example.sentence1.split()) > thresh
slices = [
slice_temporal_preposition,
slice_possessive_preposition,
slice_is_comparative,
slice_is_quantification,
slice_short_hypothesis,
slice_long_hypothesis,
slice_short_premise,
slice_long_premise,
]
slice_func_dict = {slice.name: slice for slice in slices}
slice_func_dict.update(general_slice_func_dict) | snorkel-superglue-master | superglue_slices/CB_sfs.py |
from .general_sfs import slice_func_dict as general_slice_func_dict
slices = []
slice_func_dict = {slice.name: slice for slice in slices}
slice_func_dict.update(general_slice_func_dict) | snorkel-superglue-master | superglue_slices/COPA_sfs.py |
from .general_sfs import slice_func_dict as general_slice_func_dict
slices = []
slice_func_dict = {slice.name: slice for slice in slices}
slice_func_dict.update(general_slice_func_dict) | snorkel-superglue-master | superglue_slices/WSC_sfs.py |
from . import general_sfs, RTE_sfs, WiC_sfs, CB_sfs, COPA_sfs, MultiRC_sfs, WSC_sfs
slice_func_dict = {
"CB": CB_sfs.slice_func_dict,
"COPA": COPA_sfs.slice_func_dict,
"MultiRC": MultiRC_sfs.slice_func_dict,
"RTE": RTE_sfs.slice_func_dict,
"WiC": WiC_sfs.slice_func_dict,
"WSC": WSC_sfs.slice_func_dict,
}
| snorkel-superglue-master | superglue_slices/__init__.py |
from snorkel.slicing.sf import slicing_function
from .general_sfs import slice_func_dict as general_slice_func_dict
@slicing_function()
def slice_temporal_preposition(example):
temporal_prepositions = ["after", "before", "past"]
both_sentences = example.sentence1 + example.sentence2
return any([p in both_sentences for p in temporal_prepositions])
@slicing_function()
def slice_possessive_preposition(example):
possessive_prepositions = ["inside of", "with", "within"]
both_sentences = example.sentence1 + example.sentence2
return any([p in both_sentences for p in possessive_prepositions])
@slicing_function()
def slice_is_comparative(example):
comparative_words = ["more", "less", "better", "worse", "bigger", "smaller"]
both_sentences = example.sentence1 + example.sentence2
return any([p in both_sentences for p in comparative_words])
@slicing_function()
def slice_is_quantification(example):
quantification_words = ["all", "some", "none"]
both_sentences = example.sentence1 + example.sentence2
return any([p in both_sentences for p in quantification_words])
@slicing_function()
def slice_short_hypothesis(example, thresh=5):
return len(example.sentence2.split()) < thresh
@slicing_function()
def slice_long_hypothesis(example, thresh=15):
return len(example.sentence2.split()) > thresh
@slicing_function()
def slice_short_premise(example, thresh=10):
return len(example.sentence1.split()) < thresh
@slicing_function()
def slice_long_premise(example, thresh=100):
return len(example.sentence1.split()) > thresh
slices = [
slice_temporal_preposition,
slice_possessive_preposition,
slice_is_comparative,
slice_is_quantification,
slice_short_hypothesis,
slice_long_hypothesis,
slice_short_premise,
slice_long_premise,
]
slice_func_dict = {slice.name: slice for slice in slices}
slice_func_dict.update(general_slice_func_dict)
| snorkel-superglue-master | superglue_slices/RTE_sfs.py |
from .general_sfs import slice_func_dict as general_slice_func_dict
slices = []
slice_func_dict = {slice.name: slice for slice in slices}
slice_func_dict.update(general_slice_func_dict) | snorkel-superglue-master | superglue_slices/MultiRC_sfs.py |
from snorkel.slicing.sf import slicing_function
from .general_sfs import slice_func_dict as general_slice_func_dict
@slicing_function()
def slice_verb(example):
"""Is the target word a verb?"""
return example.pos == "V"
@slicing_function()
def slice_noun(example):
"""Is the target word a noun?"""
return example.pos == "N"
@slicing_function()
def slice_trigram(example):
"""Does the target word share a trigram between sentences?"""
def get_ngrams(tokens, window=1):
num_ngrams = len(tokens) - window + 1
for i in range(num_ngrams):
yield tokens[i : i + window]
trigrams = []
for sent, sent_idx in [
(example.sentence1, example.sentence1_idx),
(example.sentence2, example.sentence2_idx),
]:
tokens = sent.split()
trigrams.append(
[
" ".join(ngram).lower()
for ngram in get_ngrams(tokens[sent_idx - 2 : sent_idx + 2], window=3)
if len(ngram) == 3
]
)
return len(set(trigrams[0]).intersection(set(trigrams[1]))) > 0
@slicing_function()
def slice_mismatch_verb(example):
"""Is the target word a verb with different forms between sentences?"""
form1 = example.sentence1.split()[example.sentence1_idx]
form2 = example.sentence2.split()[example.sentence2_idx]
return (form1 != form2) and example.pos == "V"
@slicing_function()
def slice_mismatch_noun(example):
"""Is the target word a noun with different forms between sentences?"""
form1 = example.sentence1.split()[example.sentence1_idx]
form2 = example.sentence2.split()[example.sentence2_idx]
return (form1 != form2) and example.pos == "N"
slices = [
slice_verb,
slice_noun,
slice_trigram,
slice_mismatch_verb,
slice_mismatch_noun,
]
slice_func_dict = {slice.name: slice for slice in slices}
slice_func_dict.update(general_slice_func_dict)
| snorkel-superglue-master | superglue_slices/WiC_sfs.py |
from snorkel.slicing.sf import slicing_function
@slicing_function()
def slice_temporal_preposition(example):
temporal_prepositions = ["after", "before", "past"]
both_sentences = example.sentence1 + example.sentence2
return any([p in both_sentences for p in temporal_prepositions])
@slicing_function()
def slice_possessive_preposition(example):
possessive_prepositions = ["inside of", "with", "within"]
both_sentences = example.sentence1 + example.sentence2
return any([p in both_sentences for p in possessive_prepositions])
@slicing_function()
def slice_is_comparative(example):
comparative_words = ["more", "less", "better", "worse", "bigger", "smaller"]
both_sentences = example.sentence1 + example.sentence2
return any([p in both_sentences for p in comparative_words])
@slicing_function()
def slice_is_quantification(example):
quantification_words = ["all", "some", "none"]
both_sentences = example.sentence1 + example.sentence2
return any([p in both_sentences for p in quantification_words])
@slicing_function()
def slice_short_hypothesis(example, thresh=5):
return len(example.sentence2.split()) < thresh
@slicing_function()
def slice_long_hypothesis(example, thresh=15):
return len(example.sentence2.split()) > thresh
@slicing_function()
def slice_short_premise(example, thresh=10):
return len(example.sentence1.split()) < thresh
@slicing_function()
def slice_long_premise(example, thresh=100):
return len(example.sentence1.split()) > thresh
@slicing_function()
def slice_where(example):
sentences = example.sentence1 + example.sentence2
return "where" in sentences
@slicing_function()
def slice_who(example):
sentences = example.sentence1 + example.sentence2
return "who" in sentences
@slicing_function()
def slice_what(example):
sentences = example.sentence1 + example.sentence2
return "what" in sentences
@slicing_function()
def slice_when(example):
sentences = example.sentence1 + example.sentence2
return "when" in sentences
@slicing_function()
def slice_and(example):
sentences = example.sentence1 + example.sentence2
return "and" in sentences
@slicing_function()
def slice_but(example):
sentences = example.sentence1 + example.sentence2
return "but" in sentences
@slicing_function()
def slice_or(example):
sentences = example.sentence1 + example.sentence2
return "or" in sentences
@slicing_function()
def slice_multiple_articles(example):
sentences = example.sentence1 + example.sentence2
multiple_indefinite = (
sum([int(x == "a") for x in sentences.split()]) > 1
or sum([int(x == "an") for x in sentences.split()]) > 1
)
multiple_definite = sum([int(x == "the") for x in sentences.split()]) > 1
return multiple_indefinite or multiple_definite
slices = [
slice_temporal_preposition,
slice_possessive_preposition,
slice_is_comparative,
slice_is_quantification,
slice_short_hypothesis,
slice_long_hypothesis,
slice_short_premise,
slice_long_premise,
slice_where,
slice_who,
slice_what,
slice_when,
slice_and,
slice_or,
slice_but,
slice_multiple_articles,
]
slice_func_dict = {slice.name: slice for slice in slices}
| snorkel-superglue-master | superglue_slices/general_sfs.py |
import sys
from functools import partial
from superglue_modules.bert_module import (
BertContactLastCLSWithTwoTokensModule,
BertModule,
)
from superglue_modules.wsc_module import SpanClassifierModule
from task_config import SuperGLUE_LABEL_MAPPING, SuperGLUE_TASK_METRIC_MAPPING
from torch import nn
from snorkel.mtl.scorer import Scorer
from snorkel.mtl.task import Task, Operation
from . import utils
sys.path.append("..") # Adds higher directory to python modules path.
TASK_NAME = "WSC"
def build_task(bert_model_name, last_hidden_dropout_prob=None):
if last_hidden_dropout_prob:
raise NotImplementedError(f"TODO: last_hidden_dropout_prob for {TASK_NAME}")
bert_module = BertModule(bert_model_name)
bert_output_dim = 768 if "base" in bert_model_name else 1024
task_cardinality = (
len(SuperGLUE_LABEL_MAPPING[TASK_NAME].keys())
if SuperGLUE_LABEL_MAPPING[TASK_NAME] is not None
else 1
)
metrics = (
SuperGLUE_TASK_METRIC_MAPPING[TASK_NAME]
if TASK_NAME in SuperGLUE_TASK_METRIC_MAPPING
else []
)
custom_metric_funcs = {}
loss_fn = partial(utils.ce_loss, f"{TASK_NAME}_pred_head")
output_fn = partial(utils.output, f"{TASK_NAME}_pred_head")
task = Task(
name=TASK_NAME,
module_pool=nn.ModuleDict(
{
"bert_module": bert_module,
f"{TASK_NAME}_pred_head": SpanClassifierModule(
d_inp=bert_output_dim, proj_dim=bert_output_dim // 2
),
}
),
task_flow=[
Operation(
name=f"{TASK_NAME}_bert_module",
module_name="bert_module",
inputs=[
("_input_", "token_ids"),
("_input_", "token_segments"),
("_input_", "token_masks"),
],
),
Operation(
name=f"{TASK_NAME}_pred_head",
module_name=f"{TASK_NAME}_pred_head",
inputs=[
(f"{TASK_NAME}_bert_module", 0),
("_input_", "token1_idx"),
("_input_", "token2_idx"),
("_input_", "token_masks"),
],
),
],
loss_func=loss_fn,
output_func=output_fn,
scorer=Scorer(metrics=metrics, custom_metric_funcs=custom_metric_funcs),
)
return task
| snorkel-superglue-master | superglue_tasks/wsc.py |
import sys
from functools import partial
from superglue_modules.bert_module import BertLastCLSModule, BertModule
from superglue_modules.copa_module import ChoiceModule
from task_config import SuperGLUE_LABEL_MAPPING, SuperGLUE_TASK_METRIC_MAPPING
from torch import nn
from snorkel.mtl.scorer import Scorer
from snorkel.mtl.task import Task, Operation
from . import utils
sys.path.append("..") # Adds higher directory to python modules path.
TASK_NAME = "SWAG"
def build_task(bert_model_name, last_hidden_dropout_prob=0.0):
bert_module = BertModule(bert_model_name)
bert_output_dim = 768 if "base" in bert_model_name else 1024
task_cardinality = (
len(SuperGLUE_LABEL_MAPPING[TASK_NAME].keys())
if SuperGLUE_LABEL_MAPPING[TASK_NAME] is not None
else 1
)
metrics = (
SuperGLUE_TASK_METRIC_MAPPING[TASK_NAME]
if TASK_NAME in SuperGLUE_TASK_METRIC_MAPPING
else []
)
custom_metric_funcs = {}
loss_fn = partial(utils.ce_loss, f"{TASK_NAME}_pred_head")
output_fn = partial(utils.output, f"{TASK_NAME}_pred_head")
task = Task(
name=TASK_NAME,
module_pool=nn.ModuleDict(
{
"bert_module": bert_module,
"bert_last_cls": BertLastCLSModule(
dropout_prob=last_hidden_dropout_prob
),
"linear_module": nn.Linear(bert_output_dim, 1),
f"{TASK_NAME}_pred_head": ChoiceModule(task_cardinality),
}
),
task_flow=[
Operation(
name="choice0",
module_name="bert_module",
inputs=[("_input_", "token1_ids")],
),
Operation(
name="choice1",
module_name="bert_module",
inputs=[("_input_", "token2_ids")],
),
Operation(
name="choice2",
module_name="bert_module",
inputs=[("_input_", "token3_ids")],
),
Operation(
name="choice3",
module_name="bert_module",
inputs=[("_input_", "token4_ids")],
),
Operation(
name="choice0_bert_last_cls",
module_name="bert_last_cls",
inputs=[("choice0", 0)],
),
Operation(
name="choice1_bert_last_cls",
module_name="bert_last_cls",
inputs=[("choice1", 0)],
),
Operation(
name="choice2_bert_last_cls",
module_name="bert_last_cls",
inputs=[("choice2", 0)],
),
Operation(
name="choice3_bert_last_cls",
module_name="bert_last_cls",
inputs=[("choice3", 0)],
),
Operation(
name="choice0rep",
module_name="linear_module",
inputs=[("choice0_bert_last_cls", 0)],
),
Operation(
name="choice1rep",
module_name="linear_module",
inputs=[("choice1_bert_last_cls", 0)],
),
Operation(
name="choice2rep",
module_name="linear_module",
inputs=[("choice2_bert_last_cls", 0)],
),
Operation(
name="choice3rep",
module_name="linear_module",
inputs=[("choice3_bert_last_cls", 0)],
),
Operation(
name=f"{TASK_NAME}_pred_head",
module_name=f"{TASK_NAME}_pred_head",
inputs=[],
),
],
loss_func=loss_fn,
output_func=output_fn,
scorer=Scorer(metrics=metrics, custom_metric_funcs=custom_metric_funcs),
)
return task
| snorkel-superglue-master | superglue_tasks/swag.py |
import sys
from functools import partial
from superglue_modules.bert_module import BertLastCLSModule, BertModule
from task_config import SuperGLUE_LABEL_MAPPING, SuperGLUE_TASK_METRIC_MAPPING
from torch import nn
from snorkel.model.metrics import metric_score
from snorkel.mtl.scorer import Scorer
from snorkel.mtl.task import Task, Operation
from . import utils
sys.path.append("..") # Adds higher directory to python modules path.
TASK_NAME = "CB"
# custom_metric_funcs #################
def macro_f1(golds, preds, probs):
return metric_score(golds, preds, probs, metric="f1")
def accuracy_macro_f1(golds, preds, probs):
f1 = macro_f1(golds, preds, probs)
accuracy = metric_score(golds, preds, probs, metric="accuracy")
return (f1 + accuracy) / 2
#########################################
def build_task(bert_model_name, last_hidden_dropout_prob=0.0):
bert_module = BertModule(bert_model_name)
bert_output_dim = 768 if "base" in bert_model_name else 1024
task_cardinality = (
len(SuperGLUE_LABEL_MAPPING[TASK_NAME].keys())
if SuperGLUE_LABEL_MAPPING[TASK_NAME] is not None
else 1
)
metrics = (
SuperGLUE_TASK_METRIC_MAPPING[TASK_NAME]
if TASK_NAME in SuperGLUE_TASK_METRIC_MAPPING
else []
)
custom_metric_funcs = {"macro_f1": macro_f1, "accuracy_macro_f1": accuracy_macro_f1}
loss_fn = partial(utils.ce_loss, f"{TASK_NAME}_pred_head")
output_fn = partial(utils.output, f"{TASK_NAME}_pred_head")
task = Task(
name=TASK_NAME,
module_pool=nn.ModuleDict(
{
"bert_module": bert_module,
f"{TASK_NAME}_feature": BertLastCLSModule(
dropout_prob=last_hidden_dropout_prob
),
f"{TASK_NAME}_pred_head": nn.Linear(bert_output_dim, task_cardinality),
}
),
task_flow=[
Operation(
name=f"{TASK_NAME}_bert_module",
module_name="bert_module",
inputs=[
("_input_", "token_ids"),
("_input_", "token_segments"),
("_input_", "token_masks"),
],
),
Operation(
name=f"{TASK_NAME}_feature",
module_name=f"{TASK_NAME}_feature",
inputs=[(f"{TASK_NAME}_bert_module", 0)],
),
Operation(
name=f"{TASK_NAME}_pred_head",
module_name=f"{TASK_NAME}_pred_head",
inputs=[(f"{TASK_NAME}_feature", 0)],
),
],
loss_func=loss_fn,
output_func=output_fn,
scorer=Scorer(metrics=metrics, custom_metric_funcs=custom_metric_funcs),
)
return task
| snorkel-superglue-master | superglue_tasks/cb.py |
import sys
from functools import partial
from superglue_modules.bert_module import BertLastCLSModule, BertModule
from superglue_modules.copa_module import ChoiceModule
from task_config import SuperGLUE_LABEL_MAPPING, SuperGLUE_TASK_METRIC_MAPPING
from torch import nn
from snorkel.mtl.scorer import Scorer
from snorkel.mtl.task import Task, Operation
from . import utils
sys.path.append("..") # Adds higher directory to python modules path.
TASK_NAME = "COPA"
def build_task(bert_model_name, last_hidden_dropout_prob=0.0):
bert_module = BertModule(bert_model_name)
bert_output_dim = 768 if "base" in bert_model_name else 1024
task_cardinality = (
len(SuperGLUE_LABEL_MAPPING[TASK_NAME].keys())
if SuperGLUE_LABEL_MAPPING[TASK_NAME] is not None
else 1
)
metrics = (
SuperGLUE_TASK_METRIC_MAPPING[TASK_NAME]
if TASK_NAME in SuperGLUE_TASK_METRIC_MAPPING
else []
)
custom_metric_funcs = {}
loss_fn = partial(utils.ce_loss, f"{TASK_NAME}_pred_head")
output_fn = partial(utils.output, f"{TASK_NAME}_pred_head")
task = Task(
name=TASK_NAME,
module_pool=nn.ModuleDict(
{
"bert_module": bert_module,
f"{TASK_NAME}_feature": BertLastCLSModule(
dropout_prob=last_hidden_dropout_prob
),
"linear_module": nn.Linear(bert_output_dim, 1),
f"{TASK_NAME}_pred_head": ChoiceModule(task_cardinality),
}
),
task_flow=[
Operation(
name="choice0",
module_name="bert_module",
inputs=[("_input_", "token1_ids")],
),
Operation(
name="choice1",
module_name="bert_module",
inputs=[("_input_", "token2_ids")],
),
Operation(
name="choice0_bert_last_cls",
module_name=f"{TASK_NAME}_feature",
inputs=[("choice0", 0)],
),
Operation(
name="choice1_bert_last_cls",
module_name=f"{TASK_NAME}_feature",
inputs=[("choice1", 0)],
),
Operation(
name="choice0rep",
module_name="linear_module",
inputs=[("choice0_bert_last_cls", 0)],
),
Operation(
name="choice1rep",
module_name="linear_module",
inputs=[("choice1_bert_last_cls", 0)],
),
Operation(
name=f"{TASK_NAME}_pred_head",
module_name=f"{TASK_NAME}_pred_head",
inputs=[],
),
],
loss_func=loss_fn,
output_func=output_fn,
scorer=Scorer(metrics=metrics, custom_metric_funcs=custom_metric_funcs),
)
return task
| snorkel-superglue-master | superglue_tasks/copa.py |
from . import cb, copa, multirc, rte, wic, wsc, swag
task_funcs = {
"CB": cb.build_task,
"COPA": copa.build_task,
"MultiRC": multirc.build_task,
"RTE": rte.build_task,
"WiC": wic.build_task,
"WSC": wsc.build_task,
"SWAG": swag.build_task,
}
| snorkel-superglue-master | superglue_tasks/__init__.py |
import sys
from functools import partial
from superglue_modules.bert_module import BertLastCLSModule, BertModule
from task_config import SuperGLUE_LABEL_MAPPING, SuperGLUE_TASK_METRIC_MAPPING
from torch import nn
from snorkel.mtl.scorer import Scorer
from snorkel.mtl.task import Task, Operation
from . import utils
sys.path.append("..") # Adds higher directory to python modules path.
TASK_NAME = "RTE"
def build_task(bert_model_name, last_hidden_dropout_prob=0.0):
bert_module = BertModule(bert_model_name)
bert_output_dim = 768 if "base" in bert_model_name else 1024
task_cardinality = (
len(SuperGLUE_LABEL_MAPPING[TASK_NAME].keys())
if SuperGLUE_LABEL_MAPPING[TASK_NAME] is not None
else 1
)
metrics = (
SuperGLUE_TASK_METRIC_MAPPING[TASK_NAME]
if TASK_NAME in SuperGLUE_TASK_METRIC_MAPPING
else []
)
custom_metric_funcs = {}
loss_fn = partial(utils.ce_loss, f"{TASK_NAME}_pred_head")
output_fn = partial(utils.output, f"{TASK_NAME}_pred_head")
task = Task(
name=TASK_NAME,
module_pool=nn.ModuleDict(
{
"bert_module": bert_module,
f"{TASK_NAME}_feature": BertLastCLSModule(
dropout_prob=last_hidden_dropout_prob
),
f"{TASK_NAME}_pred_head": nn.Linear(bert_output_dim, task_cardinality),
}
),
task_flow=[
Operation(
name=f"{TASK_NAME}_bert_module",
module_name="bert_module",
inputs=[
("_input_", "token_ids"),
("_input_", "token_segments"),
("_input_", "token_masks"),
],
),
Operation(
name=f"{TASK_NAME}_feature",
module_name=f"{TASK_NAME}_feature",
inputs=[(f"{TASK_NAME}_bert_module", 0)],
),
Operation(
name=f"{TASK_NAME}_pred_head",
module_name=f"{TASK_NAME}_pred_head",
inputs=[(f"{TASK_NAME}_feature", 0)],
),
],
loss_func=loss_fn,
output_func=output_fn,
scorer=Scorer(metrics=metrics, custom_metric_funcs=custom_metric_funcs),
)
return task
| snorkel-superglue-master | superglue_tasks/rte.py |
import sys
from functools import partial
from torch import nn
from snorkel.model.metrics import metric_score
from snorkel.mtl.scorer import Scorer
from snorkel.mtl.task import Operation, Task
from superglue_modules.bert_module import BertLastCLSModule, BertModule
from task_config import SuperGLUE_LABEL_MAPPING, SuperGLUE_TASK_METRIC_MAPPING
from . import utils
sys.path.append("..") # Adds higher directory to python modules path.
TASK_NAME = "MultiRC"
def build_task(bert_model_name, last_hidden_dropout_prob=0.0):
bert_module = BertModule(bert_model_name)
bert_output_dim = 768 if "base" in bert_model_name else 1024
task_cardinality = (
len(SuperGLUE_LABEL_MAPPING[TASK_NAME].keys())
if SuperGLUE_LABEL_MAPPING[TASK_NAME] is not None
else 1
)
metrics = (
SuperGLUE_TASK_METRIC_MAPPING[TASK_NAME]
if TASK_NAME in SuperGLUE_TASK_METRIC_MAPPING
else []
)
custom_metric_funcs = {}
loss_fn = partial(utils.ce_loss, f"{TASK_NAME}_pred_head")
output_fn = partial(utils.output, f"{TASK_NAME}_pred_head")
task = Task(
name=TASK_NAME,
module_pool=nn.ModuleDict(
{
"bert_module": bert_module,
"bert_last_CLS": BertLastCLSModule(
dropout_prob=last_hidden_dropout_prob
),
f"{TASK_NAME}_pred_head": nn.Linear(bert_output_dim, task_cardinality),
}
),
task_flow=[
Operation(
name=f"{TASK_NAME}_bert_module",
module_name="bert_module",
inputs=[
("_input_", "token_ids"),
("_input_", "token_segments"),
("_input_", "token_masks"),
],
),
Operation(
name=f"{TASK_NAME}_bert_last_CLS",
module_name="bert_last_CLS",
inputs=[(f"{TASK_NAME}_bert_module", 0)],
),
Operation(
name=f"{TASK_NAME}_pred_head",
module_name=f"{TASK_NAME}_pred_head",
inputs=[(f"{TASK_NAME}_bert_last_CLS", 0)],
),
],
loss_func=loss_fn,
output_func=output_fn,
scorer=Scorer(metrics=metrics, custom_metric_funcs=custom_metric_funcs),
)
return task
| snorkel-superglue-master | superglue_tasks/multirc.py |
import torch.nn.functional as F
def ce_loss(module_name, immediate_ouput_dict, Y, active):
return F.cross_entropy(
immediate_ouput_dict[module_name][0][active], (Y.view(-1) - 1)[active]
)
def output(module_name, immediate_ouput_dict):
return F.softmax(immediate_ouput_dict[module_name][0], dim=1)
| snorkel-superglue-master | superglue_tasks/utils.py |
import sys
from functools import partial
from torch import nn
from snorkel.mtl.scorer import Scorer
from snorkel.mtl.task import Task, Operation
from superglue_modules.bert_module import (
BertContactLastCLSWithTwoTokensModule,
BertModule,
)
from task_config import SuperGLUE_LABEL_MAPPING, SuperGLUE_TASK_METRIC_MAPPING
from . import utils
sys.path.append("..") # Adds higher directory to python modules path.
TASK_NAME = "WiC"
def build_task(bert_model_name, last_hidden_dropout_prob=None):
if last_hidden_dropout_prob:
raise NotImplementedError(f"TODO: last_hidden_dropout_prob for {TASK_NAME}")
bert_module = BertModule(bert_model_name)
bert_output_dim = 768 if "base" in bert_model_name else 1024
task_cardinality = (
len(SuperGLUE_LABEL_MAPPING[TASK_NAME].keys())
if SuperGLUE_LABEL_MAPPING[TASK_NAME] is not None
else 1
)
metrics = (
SuperGLUE_TASK_METRIC_MAPPING[TASK_NAME]
if TASK_NAME in SuperGLUE_TASK_METRIC_MAPPING
else []
)
custom_metric_funcs = {}
loss_fn = partial(utils.ce_loss, f"{TASK_NAME}_pred_head")
output_fn = partial(utils.output, f"{TASK_NAME}_pred_head")
task = Task(
name=TASK_NAME,
module_pool=nn.ModuleDict(
{
"bert_module": bert_module,
f"{TASK_NAME}_feature": BertContactLastCLSWithTwoTokensModule(),
f"{TASK_NAME}_pred_head": nn.Linear(
bert_output_dim * 3, task_cardinality
),
}
),
task_flow=[
Operation(
name=f"{TASK_NAME}_bert_module",
module_name="bert_module",
inputs=[
("_input_", "token_ids"),
("_input_", "token_segments"),
("_input_", "token_masks"),
],
),
Operation(
name=f"{TASK_NAME}_feature",
module_name=f"{TASK_NAME}_feature",
inputs=[
(f"{TASK_NAME}_bert_module", 0),
("_input_", "token1_idx"),
("_input_", "token2_idx"),
],
),
Operation(
name=f"{TASK_NAME}_pred_head",
module_name=f"{TASK_NAME}_pred_head",
inputs=[(f"{TASK_NAME}_feature", 0)],
),
],
loss_func=loss_fn,
output_func=output_fn,
scorer=Scorer(metrics=metrics, custom_metric_funcs=custom_metric_funcs),
)
return task
| snorkel-superglue-master | superglue_tasks/wic.py |
import json
import logging
import sys
import numpy as np
import torch
from task_config import SuperGLUE_LABEL_MAPPING
from snorkel.mtl.data import MultitaskDataset
sys.path.append("..") # Adds higher directory to python modules path.
logger = logging.getLogger(__name__)
TASK_NAME = "WSC"
def get_char_index(text, span_text, span_index):
tokens = text.replace("\n", " ").lower().split(" ")
span_tokens = span_text.replace("\n", " ").lower().split(" ")
# Token exact match
if tokens[span_index : span_index + len(span_tokens)] == span_tokens:
st = len(" ".join(tokens[:span_index])) + 1 if span_index != 0 else 0
ed = st + len(span_text)
return st, ed
if span_index < len(tokens):
# Token fuzzy match with extra chars
char_in_text = " ".join(tokens[span_index : span_index + len(span_tokens)])
char_in_span = " ".join(span_tokens)
if char_in_text.startswith(char_in_span):
st = len(" ".join(tokens[:span_index])) + 1 if span_index != 0 else 0
# ed = st + len(char_in_span)
ed = st + len(char_in_text)
return st, ed
# Token fuzzy match with extra chars
char_in_text = " ".join(tokens[span_index : span_index + len(span_tokens)])
char_in_span = " ".join(span_tokens)
if char_in_span.startswith(char_in_text):
st = len(" ".join(tokens[:span_index])) + 1 if span_index != 0 else 0
ed = st + len(char_in_text)
return st, ed
# Index out of range
if span_index >= len(tokens):
span_index -= 10
# Token fuzzy match with different position
for idx in range(span_index, len(tokens)):
if tokens[idx : idx + len(span_tokens)] == span_tokens:
st = len(" ".join(tokens[:idx])) + 1 if idx != 0 else 0
ed = st + len(span_text)
return st, ed
# Token best fuzzy match with different position
for idx in range(span_index, len(tokens)):
if tokens[idx] == span_tokens[0]:
for length in range(1, len(span_tokens)):
if tokens[idx : idx + length] != span_tokens[:length]:
st = len(" ".join(tokens[:idx])) + 1 if idx != 0 else 0
ed = st + len(" ".join(span_tokens[: length - 1]))
return st, ed
return None
def parse(jsonl_path, tokenizer, max_data_samples, max_sequence_length):
logger.info(f"Loading data from {jsonl_path}.")
rows = [json.loads(row) for row in open(jsonl_path, encoding="utf-8")]
for i in range(2):
logger.info(f"Sample {i}: {rows[i]}")
# Truncate to max_data_samples
if max_data_samples:
rows = rows[:max_data_samples]
logger.info(f"Truncating to {max_data_samples} samples.")
# sentence text
sentences = []
# span1
span1s = []
# span2
span2s = []
# span1 idx
span1_idxs = []
# span2 idx
span2_idxs = []
# label
labels = []
token1_idxs = []
token2_idxs = []
bert_tokens = []
bert_token_ids = []
bert_token_masks = []
bert_token_segments = []
# Check the maximum token length
max_len = -1
for row in rows:
index = row["idx"]
text = row["text"]
span1_text = row["target"]["span1_text"]
span2_text = row["target"]["span2_text"]
span1_index = row["target"]["span1_index"]
span2_index = row["target"]["span2_index"]
label = row["label"] if "label" in row else True
span1_char_index = get_char_index(text, span1_text, span1_index)
span2_char_index = get_char_index(text, span2_text, span2_index)
assert span1_char_index is not None, f"Check example {id} in {jsonl_path}"
assert span2_char_index is not None, f"Check example {id} in {jsonl_path}"
# Tokenize sentences
bert_tokens_sub1 = tokenizer.tokenize(
text[: min(span1_char_index[0], span2_char_index[0])]
)
if span1_char_index[0] < span2_char_index[0]:
bert_tokens_sub2 = tokenizer.tokenize(
text[span1_char_index[0] : span1_char_index[1]]
)
token1_idx = [
len(bert_tokens_sub1) + 1,
len(bert_tokens_sub1) + len(bert_tokens_sub2),
]
else:
bert_tokens_sub2 = tokenizer.tokenize(
text[span2_char_index[0] : span2_char_index[1]]
)
token2_idx = [
len(bert_tokens_sub1) + 1,
len(bert_tokens_sub1) + len(bert_tokens_sub2),
]
sub3_st = (
span1_char_index[1]
if span1_char_index[0] < span2_char_index[0]
else span2_char_index[1]
)
sub3_ed = (
span1_char_index[0]
if span1_char_index[0] > span2_char_index[0]
else span2_char_index[0]
)
bert_tokens_sub3 = tokenizer.tokenize(text[sub3_st:sub3_ed])
if span1_char_index[0] < span2_char_index[0]:
bert_tokens_sub4 = tokenizer.tokenize(
text[span2_char_index[0] : span2_char_index[1]]
)
cur_len = (
len(bert_tokens_sub1) + len(bert_tokens_sub2) + len(bert_tokens_sub3)
)
token2_idx = [cur_len + 1, cur_len + len(bert_tokens_sub4)]
else:
bert_tokens_sub4 = tokenizer.tokenize(
text[span1_char_index[0] : span1_char_index[1]]
)
cur_len = (
len(bert_tokens_sub1) + len(bert_tokens_sub2) + len(bert_tokens_sub3)
)
token1_idx = [cur_len + 1, cur_len + len(bert_tokens_sub4)]
if span1_char_index[0] < span2_char_index[0]:
bert_tokens_sub5 = tokenizer.tokenize(text[span2_char_index[1] :])
else:
bert_tokens_sub5 = tokenizer.tokenize(text[span1_char_index[1] :])
tokens = (
["[CLS]"]
+ bert_tokens_sub1
+ bert_tokens_sub2
+ bert_tokens_sub3
+ bert_tokens_sub4
+ bert_tokens_sub5
+ ["[SEP]"]
)
if len(tokens) > max_len:
max_len = len(tokens)
token_ids = tokenizer.convert_tokens_to_ids(tokens)
token_segments = [0] * len(token_ids)
# Generate mask where 1 for real tokens and 0 for padding tokens
token_masks = [1] * len(token_ids)
token1_idxs.append(token1_idx)
token2_idxs.append(token2_idx)
sentences.append(text)
span1s.append(span1_text)
span2s.append(span2_text)
span1_idxs.append(span1_index)
span2_idxs.append(span2_index)
labels.append(SuperGLUE_LABEL_MAPPING[TASK_NAME][label])
bert_tokens.append(tokens)
bert_token_ids.append(torch.LongTensor(token_ids))
bert_token_masks.append(torch.LongTensor(token_masks))
bert_token_segments.append(torch.LongTensor(token_segments))
token1_idxs = torch.from_numpy(np.array(token1_idxs))
token2_idxs = torch.from_numpy(np.array(token2_idxs))
labels = torch.from_numpy(np.array(labels))
logger.info(f"Max token len {max_len}")
return MultitaskDataset(
name="SuperGLUE",
X_dict={
"sentence": sentences,
"span1": span1s,
"span2": span2s,
"span1_idx": span1_idxs,
"span2_idx": span2_idxs,
"token1_idx": token1_idxs,
"token2_idx": token2_idxs,
"tokens": bert_tokens,
"token_ids": bert_token_ids,
"token_masks": bert_token_masks,
"token_segments": bert_token_segments,
},
Y_dict={"labels": labels},
)
| snorkel-superglue-master | superglue_parsers/wsc.py |
import json
import logging
import sys
import pandas as pd
import numpy as np
import torch
from task_config import SuperGLUE_LABEL_MAPPING
from snorkel.mtl.data import MultitaskDataset
sys.path.append("..") # Adds higher directory to python modules path.
logger = logging.getLogger(__name__)
TASK_NAME = "SWAG"
def parse(csv_path, tokenizer, max_data_samples, max_sequence_length):
logger.info(f"Loading data from {csv_path}.")
rows = pd.read_csv(csv_path)
# for i in range(2):
# logger.info(f"Sample {i}: {rows[i]}")
# Truncate to max_data_samples
if max_data_samples:
rows = rows[:max_data_samples]
logger.info(f"Truncating to {max_data_samples} samples.")
# sentence1
sent1s = []
# sentence2
sent2s = []
# choice1
choice1s = []
# choice2
choice2s = []
# choice3
choice3s = []
# choice4
choice4s = []
labels = []
bert_token1_ids = []
bert_token2_ids = []
bert_token3_ids = []
bert_token4_ids = []
# Check the maximum token length
max_len = -1
for ex_idx, ex in rows.iterrows():
sent1 = ex["sent1"]
sent2 = ex["sent2"]
choice1 = ex["ending0"]
choice2 = ex["ending1"]
choice3 = ex["ending2"]
choice4 = ex["ending3"]
label = ex["label"] if "label" in ex else 0
sent1s.append(sent1)
sent2s.append(sent2)
choice1s.append(choice1)
choice2s.append(choice2)
choice3s.append(choice3)
choice4s.append(choice4)
labels.append(SuperGLUE_LABEL_MAPPING[TASK_NAME][label])
# Tokenize sentences
sent1_tokens = tokenizer.tokenize(sent1)
sent2_tokens = tokenizer.tokenize(sent2)
choice1_tokens = tokenizer.tokenize(choice1)
choice2_tokens = tokenizer.tokenize(choice2)
choice3_tokens = tokenizer.tokenize(choice3)
choice4_tokens = tokenizer.tokenize(choice4)
# Convert to BERT manner
bert_token1 = (
["[CLS]"]
+ sent1_tokens
+ ["[SEP]"]
+ sent2_tokens
+ choice1_tokens
+ ["[SEP]"]
)
bert_token2 = (
["[CLS]"]
+ sent1_tokens
+ ["[SEP]"]
+ sent2_tokens
+ choice2_tokens
+ ["[SEP]"]
)
bert_token3 = (
["[CLS]"]
+ sent1_tokens
+ ["[SEP]"]
+ sent2_tokens
+ choice3_tokens
+ ["[SEP]"]
)
bert_token4 = (
["[CLS]"]
+ sent1_tokens
+ ["[SEP]"]
+ sent2_tokens
+ choice4_tokens
+ ["[SEP]"]
)
token1_ids = tokenizer.convert_tokens_to_ids(bert_token1)
token2_ids = tokenizer.convert_tokens_to_ids(bert_token2)
token3_ids = tokenizer.convert_tokens_to_ids(bert_token3)
token4_ids = tokenizer.convert_tokens_to_ids(bert_token4)
if len(token1_ids) > max_len:
max_len = len(token1_ids)
if len(token2_ids) > max_len:
max_len = len(token2_ids)
if len(token3_ids) > max_len:
max_len = len(token3_ids)
if len(token4_ids) > max_len:
max_len = len(token4_ids)
bert_token1_ids.append(torch.LongTensor(token1_ids))
bert_token2_ids.append(torch.LongTensor(token2_ids))
bert_token3_ids.append(torch.LongTensor(token3_ids))
bert_token4_ids.append(torch.LongTensor(token4_ids))
labels = torch.from_numpy(np.array(labels))
logger.info(f"Max token len {max_len}")
return MultitaskDataset(
name="SuperGLUE",
X_dict={
"sent1": sent1s,
"sent2": sent2s,
"choice1": choice1s,
"choice2": choice2s,
"choice3": choice3s,
"choice4": choice4s,
"token1_ids": bert_token1_ids,
"token2_ids": bert_token2_ids,
"token3_ids": bert_token3_ids,
"token4_ids": bert_token4_ids,
},
Y_dict={"labels": labels},
)
| snorkel-superglue-master | superglue_parsers/swag.py |
import json
import logging
import sys
import numpy as np
import torch
from task_config import SuperGLUE_LABEL_MAPPING
from snorkel.mtl.data import MultitaskDataset
sys.path.append("..") # Adds higher directory to python modules path.
logger = logging.getLogger(__name__)
TASK_NAME = "CB"
def parse(jsonl_path, tokenizer, max_data_samples, max_sequence_length):
logger.info(f"Loading data from {jsonl_path}.")
rows = [json.loads(row) for row in open(jsonl_path, encoding="utf-8")]
for i in range(2):
logger.info(f"Sample {i}: {rows[i]}")
# Truncate to max_data_samples
if max_data_samples:
rows = rows[:max_data_samples]
logger.info(f"Truncating to {max_data_samples} samples.")
# sentence1 text
sentence1s = []
# sentence2 text
sentence2s = []
# label
labels = []
bert_token_ids = []
bert_token_masks = []
bert_token_segments = []
# Check the maximum token length
max_len = -1
for row in rows:
index = row["idx"]
sentence1 = row["premise"]
sentence2 = row["hypothesis"]
label = row["label"] if "label" in row else "entailment"
sentence1s.append(sentence1)
sentence2s.append(sentence2)
labels.append(SuperGLUE_LABEL_MAPPING[TASK_NAME][label])
# Tokenize sentences
sent1_tokens = tokenizer.tokenize(sentence1)
sent2_tokens = tokenizer.tokenize(sentence2)
if len(sent1_tokens) + len(sent2_tokens) > max_len:
max_len = len(sent1_tokens) + len(sent2_tokens)
while True:
total_length = len(sent1_tokens) + len(sent2_tokens)
# Account for [CLS], [SEP], [SEP] with "- 3"
if total_length <= max_sequence_length - 3:
break
if len(sent1_tokens) > len(sent2_tokens):
sent1_tokens.pop()
else:
sent2_tokens.pop()
# Convert to BERT manner
tokens = ["[CLS]"] + sent1_tokens + ["[SEP]"]
token_segments = [0] * len(tokens)
tokens += sent2_tokens + ["[SEP]"]
token_segments += [1] * (len(sent2_tokens) + 1)
token_ids = tokenizer.convert_tokens_to_ids(tokens)
# Generate mask where 1 for real tokens and 0 for padding tokens
token_masks = [1] * len(token_ids)
bert_token_ids.append(torch.LongTensor(token_ids))
bert_token_masks.append(torch.LongTensor(token_masks))
bert_token_segments.append(torch.LongTensor(token_segments))
labels = torch.from_numpy(np.array(labels))
logger.info(f"Max token len {max_len}")
return MultitaskDataset(
name="SuperGLUE",
X_dict={
"sentence1": sentence1s,
"sentence2": sentence2s,
"token_ids": bert_token_ids,
"token_masks": bert_token_masks,
"token_segments": bert_token_segments,
},
Y_dict={"labels": labels},
)
| snorkel-superglue-master | superglue_parsers/cb.py |
import json
import logging
import sys
import numpy as np
import torch
from task_config import SuperGLUE_LABEL_MAPPING
from snorkel.mtl.data import MultitaskDataset
sys.path.append("..") # Adds higher directory to python modules path.
logger = logging.getLogger(__name__)
TASK_NAME = "COPA"
def parse(jsonl_path, tokenizer, max_data_samples, max_sequence_length):
logger.info(f"Loading data from {jsonl_path}.")
rows = [json.loads(row) for row in open(jsonl_path, encoding="utf-8")]
for i in range(2):
logger.info(f"Sample {i}: {rows[i]}")
# Truncate to max_data_samples
if max_data_samples:
rows = rows[:max_data_samples]
logger.info(f"Truncating to {max_data_samples} samples.")
# sentence1
sent1s = []
# sentence2
sent2s = []
# choice1
choice1s = []
# choice2
choice2s = []
labels = []
bert_token1_ids = []
bert_token2_ids = []
# Check the maximum token length
max_len = -1
for sample in rows:
index = sample["idx"]
sent1 = sample["premise"]
sent2 = sample["question"]
sent2 = (
"What was the cause of this?"
if sent2 == "cause"
else "What happened as a result?"
)
choice1 = sample["choice1"]
choice2 = sample["choice2"]
label = sample["label"] if "label" in sample else True
sent1s.append(sent1)
sent2s.append(sent2)
choice1s.append(choice1)
choice2s.append(choice2)
labels.append(SuperGLUE_LABEL_MAPPING[TASK_NAME][label])
# Tokenize sentences
sent1_tokens = tokenizer.tokenize(sent1)
sent2_tokens = tokenizer.tokenize(sent2)
# Tokenize choices
choice1_tokens = tokenizer.tokenize(choice1)
choice2_tokens = tokenizer.tokenize(choice2)
# Convert to BERT manner
tokens1 = (
["[CLS]"]
+ sent1_tokens
+ ["[SEP]"]
+ sent2_tokens
+ ["[SEP]"]
+ choice1_tokens
+ ["[SEP]"]
)
tokens2 = (
["[CLS]"]
+ sent1_tokens
+ ["[SEP]"]
+ sent2_tokens
+ ["[SEP]"]
+ choice2_tokens
+ ["[SEP]"]
)
token1_ids = tokenizer.convert_tokens_to_ids(tokens1)
token2_ids = tokenizer.convert_tokens_to_ids(tokens2)
if len(token1_ids) > max_len:
max_len = len(token1_ids)
if len(token2_ids) > max_len:
max_len = len(token2_ids)
bert_token1_ids.append(torch.LongTensor(token1_ids))
bert_token2_ids.append(torch.LongTensor(token2_ids))
labels = torch.from_numpy(np.array(labels))
logger.info(f"Max token len {max_len}")
return MultitaskDataset(
name="SuperGLUE",
X_dict={
"sentence1": sent1s,
"sentence2": sent2s,
"choice1": choice1s,
"choice2": choice2s,
"token1_ids": bert_token1_ids,
"token2_ids": bert_token2_ids,
},
Y_dict={"labels": labels},
)
| snorkel-superglue-master | superglue_parsers/copa.py |
from . import cb, copa, multirc, rte, wic, wsc, swag
parser = {
"MultiRC": multirc.parse,
"WiC": wic.parse,
"CB": cb.parse,
"COPA": copa.parse,
"RTE": rte.parse,
"WSC": wsc.parse,
"SWAG": swag.parse,
}
| snorkel-superglue-master | superglue_parsers/__init__.py |
import json
import logging
import sys
import numpy as np
import torch
from task_config import SuperGLUE_LABEL_MAPPING
from snorkel.mtl.data import MultitaskDataset
sys.path.append("..") # Adds higher directory to python modules path.
logger = logging.getLogger(__name__)
TASK_NAME = "RTE"
def parse(jsonl_path, tokenizer, max_data_samples, max_sequence_length):
logger.info(f"Loading data from {jsonl_path}.")
rows = [json.loads(row) for row in open(jsonl_path, encoding="utf-8")]
for i in range(2):
logger.info(f"Sample {i}: {rows[i]}")
# Truncate to max_data_samples
if max_data_samples:
rows = rows[:max_data_samples]
logger.info(f"Truncating to {max_data_samples} samples.")
# sentence1 text
sentence1s = []
# sentence2 text
sentence2s = []
# label
labels = []
bert_token_ids = []
bert_token_masks = []
bert_token_segments = []
# Check the maximum token length
max_len = -1
for row in rows:
index = row["idx"]
sentence1 = row["premise"]
sentence2 = row["hypothesis"]
label = row["label"] if "label" in row else "entailment"
sentence1s.append(sentence1)
sentence2s.append(sentence2)
labels.append(SuperGLUE_LABEL_MAPPING[TASK_NAME][label])
# Tokenize sentences
sent1_tokens = tokenizer.tokenize(sentence1)
sent2_tokens = tokenizer.tokenize(sentence2)
if len(sent1_tokens) + len(sent2_tokens) > max_len:
max_len = len(sent1_tokens) + len(sent2_tokens)
while True:
total_length = len(sent1_tokens) + len(sent2_tokens)
# Account for [CLS], [SEP], [SEP] with "- 3"
if total_length <= max_sequence_length - 3:
break
if len(sent1_tokens) > len(sent2_tokens):
sent1_tokens.pop()
else:
sent2_tokens.pop()
# Convert to BERT manner
tokens = ["[CLS]"] + sent1_tokens + ["[SEP]"]
token_segments = [0] * len(tokens)
tokens += sent2_tokens + ["[SEP]"]
token_segments += [1] * (len(sent2_tokens) + 1)
token_ids = tokenizer.convert_tokens_to_ids(tokens)
# Generate mask where 1 for real tokens and 0 for padding tokens
token_masks = [1] * len(token_ids)
bert_token_ids.append(torch.LongTensor(token_ids))
bert_token_masks.append(torch.LongTensor(token_masks))
bert_token_segments.append(torch.LongTensor(token_segments))
labels = torch.from_numpy(np.array(labels))
logger.info(f"Max token len {max_len}")
return MultitaskDataset(
name="SuperGLUE",
X_dict={
"sentence1": sentence1s,
"sentence2": sentence2s,
"token_ids": bert_token_ids,
"token_masks": bert_token_masks,
"token_segments": bert_token_segments,
},
Y_dict={"labels": labels},
)
| snorkel-superglue-master | superglue_parsers/rte.py |
import json
import logging
import re
import sys
import numpy as np
import torch
from snorkel.mtl.data import MultitaskDataset
from task_config import SuperGLUE_LABEL_MAPPING
sys.path.append("..") # Adds higher directory to python modules path.
logger = logging.getLogger(__name__)
TASK_NAME = "MultiRC"
def get_rows(jsonl_path, max_data_samples):
logger.info(f"Loading data from {jsonl_path}.")
rows = [json.loads(row) for row in open(jsonl_path, encoding="utf-8")]
new_rows = []
for row in rows:
# each example has a paragraph field -> (text, questions)
# text is the paragraph, which requires some preprocessing
# questions is a list of questions,
# has fields (question, sentences_used, answers)
pid = row["idx"]
para_sent_list = re.sub(
"<b>Sent .{1,2}: </b>", "", row["paragraph"]["text"]
).split("<br>")
para = " ".join(para_sent_list)
for ques in row["paragraph"]["questions"]:
qid = ques["idx"]
question = ques["question"]
sent_used = ques["sentences_used"]
for ans in ques["answers"]:
new_row = {}
aid = ans["idx"]
answer = ans["text"]
new_row["pid"] = pid
new_row["qid"] = qid
new_row["aid"] = aid
new_row["paragraph"] = para
new_row["question"] = question
new_row["answer"] = answer
new_row["paragraph_sent_list"] = para_sent_list
new_row["sent_used"] = sent_used
new_row["label"] = ans["isAnswer"] if "isAnswer" in ans else False
new_rows.append(new_row)
for i in range(2):
logger.info(f"Sample {i}: {new_rows[i]}")
# Truncate to max_data_samples
if max_data_samples:
new_rows = new_rows[:max_data_samples]
logger.info(f"Truncating to {max_data_samples} samples.")
return new_rows
def parse_from_rows(rows, tokenizer, max_sequence_length):
# paragraph ids
pids = []
# question ids
qids = []
# answer ids
aids = []
# paragraph text
paras = []
# question text
questions = []
# answer text
answers = []
# labels
labels = []
bert_token_ids = []
bert_token_masks = []
bert_token_segments = []
# Check the maximum token length
max_len = -1
for row in rows:
pid = row["pid"]
qid = row["qid"]
aid = row["aid"]
para_token = tokenizer.tokenize(row["paragraph"])[: max_sequence_length - 2]
question_token = tokenizer.tokenize(row["question"])[: max_sequence_length - 2]
answer_token = tokenizer.tokenize(row["answer"])[: max_sequence_length - 2]
# Generate tokens
tokens = (
["[CLS]"]
+ para_token
+ ["[SEP]"]
+ question_token
+ answer_token
+ ["[SEP]"]
)
# No token segments
token_segments = [0] * (len(para_token) + 2) + [0] * (
len(question_token) + len(answer_token) + 1
)
token_ids = tokenizer.convert_tokens_to_ids(tokens)
token_masks = [1] * len(token_ids)
if len(tokens) > max_len:
max_len = len(tokens)
# Add to list
paras.append(row["paragraph"])
questions.append(row["question"])
answers.append(row["answer"])
label = row["label"]
labels.append(SuperGLUE_LABEL_MAPPING[TASK_NAME][label])
pids.append(pid)
qids.append(qid)
aids.append(aid)
bert_token_ids.append(torch.LongTensor(token_ids))
bert_token_masks.append(torch.LongTensor(token_masks))
bert_token_segments.append(torch.LongTensor(token_segments))
labels = torch.from_numpy(np.array(labels))
logger.info(f"Max token len {max_len}")
return MultitaskDataset(
name="SuperGLUE",
X_dict={
"pid": pids,
"qid": qids,
"aid": aids,
"para": paras,
"question": questions,
"answer": answers,
"token_ids": bert_token_ids,
"token_masks": bert_token_masks,
"token_segments": bert_token_segments,
},
Y_dict={"labels": labels},
)
def parse(jsonl_path, tokenizer, max_data_samples, max_sequence_length):
rows = get_rows(jsonl_path, max_data_samples)
return parse_from_rows(rows, tokenizer, max_sequence_length)
| snorkel-superglue-master | superglue_parsers/multirc.py |
import json
import logging
import sys
import numpy as np
import torch
from task_config import SuperGLUE_LABEL_MAPPING
from snorkel.mtl.data import MultitaskDataset
sys.path.append("..") # Adds higher directory to python modules path.
logger = logging.getLogger(__name__)
TASK_NAME = "WiC"
def get_rows(jsonl_path, max_data_samples):
logger.info(f"Loading data from {jsonl_path}.")
rows = [json.loads(row) for row in open(jsonl_path, encoding="utf-8")]
for i in range(2):
logger.info(f"Sample {i}: {rows[i]}")
# Truncate to max_data_samples
if max_data_samples:
rows = rows[:max_data_samples]
logger.info(f"Truncating to {max_data_samples} samples.")
for row in rows:
row["sentence1_idx"] = int(row["sentence1_idx"])
row["sentence2_idx"] = int(row["sentence2_idx"])
row["label"] = row["label"] if "label" in row else True
return rows
def parse_from_rows(rows, tokenizer, max_sequence_length):
# sentence1 text
sentence1s = []
# sentence2 text
sentence2s = []
# sentence1 idx
sentence1_idxs = []
# sentence2 idx
sentence2_idxs = []
# word in common
words = []
# pos tag
poses = []
# label
labels = []
token1_idxs = []
token2_idxs = []
bert_token_ids = []
bert_token_masks = []
bert_token_segments = []
# Check the maximum token length
max_len = -1
for row in rows:
index = row["idx"]
sentence1 = row["sentence1"]
sentence2 = row["sentence2"]
word = row["word"]
pos = row["pos"]
sentence1_idx = row["sentence1_idx"]
sentence2_idx = row["sentence2_idx"]
label = row["label"]
sentence1s.append(sentence1)
sentence2s.append(sentence2)
sentence1_idxs.append(sentence1_idx)
sentence2_idxs.append(sentence2_idx)
words.append(word)
poses.append(pos)
labels.append(SuperGLUE_LABEL_MAPPING[TASK_NAME][label])
# Tokenize sentences
sent1_tokens = tokenizer.tokenize(sentence1)
sent2_tokens = tokenizer.tokenize(sentence2)
word_tokens_in_sent1 = tokenizer.tokenize(sentence1.split()[sentence1_idx])
word_tokens_in_sent2 = tokenizer.tokenize(sentence2.split()[sentence2_idx])
while True:
total_length = len(sent1_tokens) + len(sent2_tokens)
if total_length > max_len:
max_len = total_length
# Account for [CLS], [SEP], [SEP] with "- 3"
if total_length <= max_sequence_length - 3:
break
if len(sent1_tokens) > len(sent2_tokens):
sent1_tokens.pop()
else:
sent2_tokens.pop()
for idx in range(sentence1_idx - 1, len(sent1_tokens)):
if (
sent1_tokens[idx : idx + len(word_tokens_in_sent1)]
== word_tokens_in_sent1
):
token1_idxs.append(idx + 1) # Add [CLS]
break
for idx in range(sentence2_idx - 1, len(sent2_tokens)):
if (
sent2_tokens[idx : idx + len(word_tokens_in_sent2)]
== word_tokens_in_sent2
):
token2_idxs.append(
idx + len(sent1_tokens) + 2
) # Add the length of the first sentence and [CLS] + [SEP]
break
# Convert to BERT manner
tokens = ["[CLS]"] + sent1_tokens + ["[SEP]"]
token_segments = [0] * len(tokens)
tokens += sent2_tokens + ["[SEP]"]
token_segments += [1] * (len(sent2_tokens) + 1)
token_ids = tokenizer.convert_tokens_to_ids(tokens)
# Generate mask where 1 for real tokens and 0 for padding tokens
token_masks = [1] * len(token_ids)
bert_token_ids.append(torch.LongTensor(token_ids))
bert_token_masks.append(torch.LongTensor(token_masks))
bert_token_segments.append(torch.LongTensor(token_segments))
token1_idxs = torch.from_numpy(np.array(token1_idxs))
token2_idxs = torch.from_numpy(np.array(token2_idxs))
labels = torch.from_numpy(np.array(labels))
logger.info(f"Max token len {max_len}")
return MultitaskDataset(
name="SuperGLUE",
X_dict={
"sentence1": sentence1s,
"sentence2": sentence2s,
"word": words,
"pos": poses,
"sentence1_idx": sentence1_idxs,
"sentence2_idx": sentence2_idxs,
"token1_idx": token1_idxs,
"token2_idx": token2_idxs,
"token_ids": bert_token_ids,
"token_masks": bert_token_masks,
"token_segments": bert_token_segments,
},
Y_dict={"labels": labels},
)
def parse(jsonl_path, tokenizer, max_data_samples, max_sequence_length):
rows = get_rows(jsonl_path, max_data_samples)
return parse_from_rows(rows, tokenizer, max_sequence_length)
| snorkel-superglue-master | superglue_parsers/wic.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import hydra
import torch
from lib.ddp_trainer import SegmentationTrainer
from lib.distributed import multi_proc_run
def single_proc_run(config):
if not torch.cuda.is_available():
raise Exception('No GPUs FOUND.')
torch.manual_seed(config.misc.seed)
torch.cuda.manual_seed(config.misc.seed)
trainer = SegmentationTrainer(config)
if config.train.is_train:
trainer.train()
else:
trainer.test()
@hydra.main(config_path='config', config_name='default.yaml')
def main(config):
# Convert to dict
if config.misc.num_gpus > 1:
multi_proc_run(config.misc.num_gpus, fun=single_proc_run, fun_args=(config,))
else:
single_proc_run(config)
if __name__ == '__main__':
__spec__ = None
os.environ['MKL_THREADING_LAYER'] = 'GNU'
main()
| ContrastiveSceneContexts-main | downstream/semseg/ddp_main.py |
import random
import logging
import numpy as np
import scipy
import scipy.ndimage
import scipy.interpolate
import torch
# A sparse tensor consists of coordinates and associated features.
# You must apply augmentation to both.
# In 2D, flip, shear, scale, and rotation of images are coordinate transformation
# color jitter, hue, etc., are feature transformations
##############################
# Feature transformations
##############################
class ChromaticTranslation(object):
"""Add random color to the image, input must be an array in [0,255] or a PIL image"""
def __init__(self, trans_range_ratio=1e-1):
"""
trans_range_ratio: ratio of translation i.e. 255 * 2 * ratio * rand(-0.5, 0.5)
"""
self.trans_range_ratio = trans_range_ratio
def __call__(self, coords, feats, labels):
if random.random() < 0.95:
tr = (np.random.rand(1, 3) - 0.5) * 255 * 2 * self.trans_range_ratio
feats[:, :3] = np.clip(tr + feats[:, :3], 0, 255)
return coords, feats, labels
class ChromaticAutoContrast(object):
def __init__(self, randomize_blend_factor=True, blend_factor=0.5):
self.randomize_blend_factor = randomize_blend_factor
self.blend_factor = blend_factor
def __call__(self, coords, feats, labels):
if random.random() < 0.2:
# mean = np.mean(feats, 0, keepdims=True)
# std = np.std(feats, 0, keepdims=True)
# lo = mean - std
# hi = mean + std
lo = feats[:, :3].min(0, keepdims=True)
hi = feats[:, :3].max(0, keepdims=True)
assert hi.max() > 1, f"invalid color value. Color is supposed to be [0-255]"
scale = 255 / (hi - lo)
contrast_feats = (feats[:, :3] - lo) * scale
blend_factor = random.random() if self.randomize_blend_factor else self.blend_factor
feats[:, :3] = (1 - blend_factor) * feats + blend_factor * contrast_feats
return coords, feats, labels
class ChromaticJitter(object):
def __init__(self, std=0.01):
self.std = std
def __call__(self, coords, feats, labels):
if random.random() < 0.95:
noise = np.random.randn(feats.shape[0], 3)
noise *= self.std * 255
feats[:, :3] = np.clip(noise + feats[:, :3], 0, 255)
return coords, feats, labels
class HueSaturationTranslation(object):
@staticmethod
def rgb_to_hsv(rgb):
# Translated from source of colorsys.rgb_to_hsv
# r,g,b should be a numpy arrays with values between 0 and 255
# rgb_to_hsv returns an array of floats between 0.0 and 1.0.
rgb = rgb.astype('float')
hsv = np.zeros_like(rgb)
# in case an RGBA array was passed, just copy the A channel
hsv[..., 3:] = rgb[..., 3:]
r, g, b = rgb[..., 0], rgb[..., 1], rgb[..., 2]
maxc = np.max(rgb[..., :3], axis=-1)
minc = np.min(rgb[..., :3], axis=-1)
hsv[..., 2] = maxc
mask = maxc != minc
hsv[mask, 1] = (maxc - minc)[mask] / maxc[mask]
rc = np.zeros_like(r)
gc = np.zeros_like(g)
bc = np.zeros_like(b)
rc[mask] = (maxc - r)[mask] / (maxc - minc)[mask]
gc[mask] = (maxc - g)[mask] / (maxc - minc)[mask]
bc[mask] = (maxc - b)[mask] / (maxc - minc)[mask]
hsv[..., 0] = np.select([r == maxc, g == maxc], [bc - gc, 2.0 + rc - bc], default=4.0 + gc - rc)
hsv[..., 0] = (hsv[..., 0] / 6.0) % 1.0
return hsv
@staticmethod
def hsv_to_rgb(hsv):
# Translated from source of colorsys.hsv_to_rgb
# h,s should be a numpy arrays with values between 0.0 and 1.0
# v should be a numpy array with values between 0.0 and 255.0
# hsv_to_rgb returns an array of uints between 0 and 255.
rgb = np.empty_like(hsv)
rgb[..., 3:] = hsv[..., 3:]
h, s, v = hsv[..., 0], hsv[..., 1], hsv[..., 2]
i = (h * 6.0).astype('uint8')
f = (h * 6.0) - i
p = v * (1.0 - s)
q = v * (1.0 - s * f)
t = v * (1.0 - s * (1.0 - f))
i = i % 6
conditions = [s == 0.0, i == 1, i == 2, i == 3, i == 4, i == 5]
rgb[..., 0] = np.select(conditions, [v, q, p, p, t, v], default=v)
rgb[..., 1] = np.select(conditions, [v, v, v, q, p, p], default=t)
rgb[..., 2] = np.select(conditions, [v, p, t, v, v, q], default=p)
return rgb.astype('uint8')
def __init__(self, hue_max, saturation_max):
self.hue_max = hue_max
self.saturation_max = saturation_max
def __call__(self, coords, feats, labels):
# Assume feat[:, :3] is rgb
hsv = HueSaturationTranslation.rgb_to_hsv(feats[:, :3])
hue_val = (random.random() - 0.5) * 2 * self.hue_max
sat_ratio = 1 + (random.random() - 0.5) * 2 * self.saturation_max
hsv[..., 0] = np.remainder(hue_val + hsv[..., 0] + 1, 1)
hsv[..., 1] = np.clip(sat_ratio * hsv[..., 1], 0, 1)
feats[:, :3] = np.clip(HueSaturationTranslation.hsv_to_rgb(hsv), 0, 255)
return coords, feats, labels
##############################
# Coordinate transformations
##############################
class RandomDropout(object):
def __init__(self, dropout_ratio=0.2, dropout_application_ratio=0.5):
"""
upright_axis: axis index among x,y,z, i.e. 2 for z
"""
self.dropout_ratio = dropout_ratio
self.dropout_application_ratio = dropout_application_ratio
def __call__(self, coords, feats, labels):
if random.random() < self.dropout_ratio:
N = len(coords)
inds = np.random.choice(N, int(N * (1 - self.dropout_ratio)), replace=False)
return coords[inds], feats[inds], labels[inds]
return coords, feats, labels
class RandomHorizontalFlip(object):
def __init__(self, upright_axis, is_temporal):
"""
upright_axis: axis index among x,y,z, i.e. 2 for z
"""
self.is_temporal = is_temporal
self.D = 4 if is_temporal else 3
self.upright_axis = {'x': 0, 'y': 1, 'z': 2}[upright_axis.lower()]
# Use the rest of axes for flipping.
self.horz_axes = set(range(self.D)) - set([self.upright_axis])
def __call__(self, coords, feats, labels):
if random.random() < 0.95:
for curr_ax in self.horz_axes:
if random.random() < 0.5:
coord_max = np.max(coords[:, curr_ax])
coords[:, curr_ax] = coord_max - coords[:, curr_ax]
return coords, feats, labels
class ElasticDistortion:
def __init__(self, distortion_params):
self.distortion_params = distortion_params
def elastic_distortion(self, coords, feats, labels, granularity, magnitude):
"""Apply elastic distortion on sparse coordinate space.
pointcloud: numpy array of (number of points, at least 3 spatial dims)
granularity: size of the noise grid (in same scale[m/cm] as the voxel grid)
magnitude: noise multiplier
"""
blurx = np.ones((3, 1, 1, 1)).astype('float32') / 3
blury = np.ones((1, 3, 1, 1)).astype('float32') / 3
blurz = np.ones((1, 1, 3, 1)).astype('float32') / 3
coords_min = coords.min(0)
# Create Gaussian noise tensor of the size given by granularity.
noise_dim = ((coords - coords_min).max(0) // granularity).astype(int) + 3
noise = np.random.randn(*noise_dim, 3).astype(np.float32)
# Smoothing.
for _ in range(2):
noise = scipy.ndimage.filters.convolve(noise, blurx, mode='constant', cval=0)
noise = scipy.ndimage.filters.convolve(noise, blury, mode='constant', cval=0)
noise = scipy.ndimage.filters.convolve(noise, blurz, mode='constant', cval=0)
# Trilinear interpolate noise filters for each spatial dimensions.
ax = [
np.linspace(d_min, d_max, d)
for d_min, d_max, d in zip(coords_min - granularity, coords_min + granularity *
(noise_dim - 2), noise_dim)
]
interp = scipy.interpolate.RegularGridInterpolator(ax, noise, bounds_error=0, fill_value=0)
coords += interp(coords) * magnitude
return coords, feats, labels
def __call__(self, coords, feats, labels):
if self.distortion_params is not None:
if random.random() < 0.95:
for granularity, magnitude in self.distortion_params:
coords, feats, labels = self.elastic_distortion(coords, feats, labels, granularity,
magnitude)
return coords, feats, labels
class Compose(object):
"""Composes several transforms together."""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, *args):
for t in self.transforms:
args = t(*args)
return args
class cfl_collate_fn_factory:
"""Generates collate function for coords, feats, labels.
Args:
limit_numpoints: If 0 or False, does not alter batch size. If positive integer, limits batch
size so that the number of input coordinates is below limit_numpoints.
"""
def __init__(self, limit_numpoints):
self.limit_numpoints = limit_numpoints
def __call__(self, list_data):
coords, feats, labels = list(zip(*list_data))
coords_batch, feats_batch, labels_batch = [], [], []
batch_id = 0
batch_num_points = 0
for batch_id, _ in enumerate(coords):
num_points = coords[batch_id].shape[0]
batch_num_points += num_points
if self.limit_numpoints and batch_num_points > self.limit_numpoints:
num_full_points = sum(len(c) for c in coords)
num_full_batch_size = len(coords)
logging.warning(
f'\t\tCannot fit {num_full_points} points into {self.limit_numpoints} points '
f'limit. Truncating batch size at {batch_id} out of {num_full_batch_size} with {batch_num_points - num_points}.'
)
break
# coords_batch.append(
# torch.cat((torch.from_numpy(
# coords[batch_id]).int(), torch.ones(num_points, 1).int() * batch_id), 1))
coords_batch.append(
torch.cat((torch.ones(num_points, 1).int() * batch_id, torch.from_numpy(
coords[batch_id]).int()), 1))
feats_batch.append(torch.from_numpy(feats[batch_id]))
labels_batch.append(torch.from_numpy(labels[batch_id]).int())
batch_id += 1
# Concatenate all lists
coords_batch = torch.cat(coords_batch, 0).int()
feats_batch = torch.cat(feats_batch, 0).float()
labels_batch = torch.cat(labels_batch, 0).int()
return coords_batch, feats_batch, labels_batch
class cflt_collate_fn_factory:
"""Generates collate function for coords, feats, labels, point_clouds, transformations.
Args:
limit_numpoints: If 0 or False, does not alter batch size. If positive integer, limits batch
size so that the number of input coordinates is below limit_numpoints.
"""
def __init__(self, limit_numpoints):
self.limit_numpoints = limit_numpoints
def __call__(self, list_data):
coords, feats, labels, transformations = list(zip(*list_data))
cfl_collate_fn = cfl_collate_fn_factory(limit_numpoints=self.limit_numpoints)
coords_batch, feats_batch, labels_batch = cfl_collate_fn(list(zip(coords, feats, labels)))
num_truncated_batch = coords_batch[:, -1].max().item() + 1
batch_id = 0
transformations_batch = []
for transformation in transformations:
if batch_id >= num_truncated_batch:
break
transformations_batch.append(torch.from_numpy(transformation).float())
batch_id += 1
transformations_batch = torch.stack(transformations_batch, 0)
return coords_batch, feats_batch, labels_batch, transformations_batch
| ContrastiveSceneContexts-main | downstream/semseg/datasets/transforms.py |
#from lib.datasets import synthia
#from lib.datasets import shapenet
from datasets import stanford
from datasets import scannet
DATASETS = []
def add_datasets(module):
DATASETS.extend([getattr(module, a) for a in dir(module) if 'Dataset' in a])
add_datasets(stanford)
#add_datasets(synthia)
add_datasets(scannet)
#add_datasets(shapenet)
def load_dataset(name):
'''Creates and returns an instance of the datasets given its name.
'''
# Find the model class from its name
mdict = {dataset.__name__: dataset for dataset in DATASETS}
if name not in mdict:
print('Invalid dataset index. Options are:')
# Display a list of valid dataset names
for dataset in DATASETS:
print('\t* {}'.format(dataset.__name__))
raise ValueError(f'Dataset {name} not defined')
DatasetClass = mdict[name]
return DatasetClass
| ContrastiveSceneContexts-main | downstream/semseg/datasets/__init__.py |
import logging
import unittest
import imageio
import os
import os.path as osp
import pickle
import numpy as np
from collections import defaultdict
from plyfile import PlyData
from lib.pc_utils import Camera, read_plyfile
from lib.dataset import DictDataset, VoxelizationDataset, TemporalVoxelizationDataset, \
str2datasetphase_type, DatasetPhase
from lib.transforms import cfl_collate_fn_factory
from lib.utils import read_txt, debug_on
class SynthiaDataset(DictDataset):
NUM_LABELS = 16
def __init__(self, data_path_file, input_transform=None, target_transform=None):
with open(data_path_file, 'r') as f:
data_paths = pickle.load(f)
super(SynthiaDataset, self).__init__(data_paths, input_transform, target_transform)
@staticmethod
def load_extrinsics(extrinsics_file):
"""Load the camera extrinsics from a .txt file.
"""
lines = read_txt(extrinsics_file)
params = [float(x) for x in lines[0].split(' ')]
extrinsics_matrix = np.asarray(params).reshape([4, 4])
return extrinsics_matrix
@staticmethod
def load_intrinsics(intrinsics_file):
"""Load the camera intrinsics from a intrinsics.txt file.
intrinsics.txt: a text file containing 4 values that represent (in this order) {focal length,
principal-point-x, principal-point-y, baseline (m) with the corresponding right
camera}
"""
lines = read_txt(intrinsics_file)
assert len(lines) == 7
intrinsics = {
'focal_length': float(lines[0]),
'pp_x': float(lines[2]),
'pp_y': float(lines[4]),
'baseline': float(lines[6]),
}
return intrinsics
@staticmethod
def load_depth(depth_file):
"""Read a single depth map (.png) file.
1280x760
760 rows, 1280 columns.
Depth is encoded in any of the 3 channels in centimetres as an ushort.
"""
img = np.asarray(imageio.imread(depth_file, format='PNG-FI')) # uint16
img = img.astype(np.int32) # Convert to int32 for torch compatibility
return img
@staticmethod
def load_label(label_file):
"""Load the ground truth semantic segmentation label.
Annotations are given in two channels. The first channel contains the class of that pixel
(see the table below). The second channel contains the unique ID of the instance for those
objects that are dynamic (cars, pedestrians, etc.).
Class R G B ID
Void 0 0 0 0
Sky 128 128 128 1
Building 128 0 0 2
Road 128 64 128 3
Sidewalk 0 0 192 4
Fence 64 64 128 5
Vegetation 128 128 0 6
Pole 192 192 128 7
Car 64 0 128 8
Traffic Sign 192 128 128 9
Pedestrian 64 64 0 10
Bicycle 0 128 192 11
Lanemarking 0 172 0 12
Reserved - - - 13
Reserved - - - 14
Traffic Light 0 128 128 15
"""
img = np.asarray(imageio.imread(label_file, format='PNG-FI')) # uint16
img = img.astype(np.int32) # Convert to int32 for torch compatibility
return img
@staticmethod
def load_rgb(rgb_file):
"""Load RGB images. 1280x760 RGB images used for training.
760 rows, 1280 columns.
"""
img = np.array(imageio.imread(rgb_file)) # uint8
return img
class SynthiaVoxelizationDataset(VoxelizationDataset):
"""Load the ground truth semantic segmentation label.
Annotations are given in two channels. The first channel contains the class of that pixel
(see the table below). The second channel contains the unique ID of the instance for those
objects that are dynamic (cars, pedestrians, etc.).
Class R G B ID
Void 0 0 0 0
Sky 128 128 128 1
Building 128 0 0 2
Road 128 64 128 3
Sidewalk 0 0 192 4
Fence 64 64 128 5
Vegetation 128 128 0 6
Pole 192 192 128 7
Car 64 0 128 8
Traffic Sign 192 128 128 9
Pedestrian 64 64 0 10
Bicycle 0 128 192 11
Lanemarking 0 172 0 12
Reserved - - - 13
Reserved - - - 14
Traffic Light 0 128 128 15
"""
CLASS_LABELS = ('building', 'road', 'sidewalk', 'fence', 'vegetation', 'pole', 'car',
'sign', 'pedestrian', 'cyclist', 'lanemarking', 'traffic light')
VALID_CLASS_IDS = (2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 15)
# Voxelization arguments
CLIP_BOUND = ((-1800, 1800), (-1800, 1800), (-1800, 1800))
TEST_CLIP_BOUND = ((-2500, 2500), (-2500, 2500), (-2500, 2500))
VOXEL_SIZE = 15 # cm
PREVOXELIZATION_VOXEL_SIZE = 7.5
# Elastic distortion, (granularity, magitude) pairs
# ELASTIC_DISTORT_PARAMS = ((80, 300),)
# Augmentation arguments
ROTATION_AUGMENTATION_BOUND = ((0, 0), (-np.pi, np.pi), (0, 0))
TRANSLATION_AUGMENTATION_RATIO_BOUND = ((-0.1, 0.1), (0, 0), (-0.1, 0.1))
ROTATION_AXIS = 'y'
LOCFEAT_IDX = 1
NUM_LABELS = 16 # Automatically subtract ignore labels after processed
IGNORE_LABELS = (0, 1, 13, 14) # void, sky, reserved, reserved
# Split used in the Minkowski ConvNet, CVPR'19
DATA_PATH_FILE = {
DatasetPhase.Train: 'train_cvpr19.txt',
DatasetPhase.Val: 'val_cvpr19.txt',
DatasetPhase.Test: 'test_cvpr19.txt'
}
def __init__(self,
config,
prevoxel_transform=None,
input_transform=None,
target_transform=None,
augment_data=True,
elastic_distortion=False,
cache=False,
phase=DatasetPhase.Train):
if isinstance(phase, str):
phase = str2datasetphase_type(phase)
if phase not in [DatasetPhase.Train, DatasetPhase.TrainVal]:
self.CLIP_BOUND = self.TEST_CLIP_BOUND
data_root = config.data.synthia_path
data_paths = read_txt(osp.join('/checkpoint/jihou/data/synthia4d/splits', self.DATA_PATH_FILE[phase]))
if phase == DatasetPhase.Train:
data_paths = data_paths[:int(len(data_paths)*config.data.data_ratio)]
data_paths = [d.split()[0] for d in data_paths]
logging.info('Loading {}: {}'.format(self.__class__.__name__, self.DATA_PATH_FILE[phase]))
super().__init__(
data_paths,
data_root=data_root,
input_transform=input_transform,
target_transform=target_transform,
ignore_label=config.data.ignore_label,
return_transformation=config.data.return_transformation,
augment_data=augment_data,
elastic_distortion=elastic_distortion,
config=config)
def load_data(self, index):
filepath = self.data_root / self.data_paths[index]
plydata = PlyData.read(filepath)
data = plydata.elements[0].data
coords = np.array([data['x'], data['y'], data['z']], dtype=np.float32).T
feats = np.array([data['r'], data['g'], data['b']], dtype=np.float32).T
labels = np.array(data['l'], dtype=np.int32)
instances = np.zeros_like(labels)
return coords, feats, labels, instances
class SynthiaCVPR15cmVoxelizationDataset(SynthiaVoxelizationDataset):
pass
class SynthiaCVPR30cmVoxelizationDataset(SynthiaVoxelizationDataset):
VOXEL_SIZE = 30
class SynthiaAllSequencesVoxelizationDataset(SynthiaVoxelizationDataset):
DATA_PATH_FILE = {
DatasetPhase.Train: 'train_raw.txt',
DatasetPhase.Val: 'val_raw.txt',
DatasetPhase.Test: 'test_raw.txt'
}
class TestSynthia(unittest.TestCase):
@debug_on()
def test(self):
from torch.utils.data import DataLoader
from lib.utils import Timer
from config import get_config
config = get_config()
dataset = SynthiaVoxelizationDataset(config)
timer = Timer()
data_loader = DataLoader(
dataset=dataset,
collate_fn=cfl_collate_fn_factory(limit_numpoints=False),
num_workers=0,
batch_size=4,
shuffle=True)
# Start from index 1
# for i, batch in enumerate(data_loader, 1):
iter = data_loader.__iter__()
for i in range(100):
timer.tic()
batch = iter.next()
print(batch, timer.toc())
if __name__ == '__main__':
unittest.main()
| ContrastiveSceneContexts-main | downstream/semseg/datasets/synthia.py |
from abc import ABC
from pathlib import Path
from collections import defaultdict
import random
import numpy as np
from enum import Enum
import torch
from torch.utils.data import Dataset, DataLoader
import MinkowskiEngine as ME
from plyfile import PlyData
import datasets.transforms as t
from datasets.dataloader import InfSampler, DistributedInfSampler
from datasets.voxelizer import Voxelizer
from lib.distributed import get_world_size
class DatasetPhase(Enum):
Train = 0
Val = 1
Val2 = 2
TrainVal = 3
Test = 4
Debug = 5
def datasetphase_2str(arg):
if arg == DatasetPhase.Train:
return 'train'
elif arg == DatasetPhase.Val:
return 'val'
elif arg == DatasetPhase.Val2:
return 'val2'
elif arg == DatasetPhase.TrainVal:
return 'trainval'
elif arg == DatasetPhase.Test:
return 'test'
elif arg == DatasetPhase.Debug:
return 'debug'
else:
raise ValueError('phase must be one of dataset enum.')
def str2datasetphase_type(arg):
if arg.upper() == 'TRAIN':
return DatasetPhase.Train
elif arg.upper() == 'VAL':
return DatasetPhase.Val
elif arg.upper() == 'VAL2':
return DatasetPhase.Val2
elif arg.upper() == 'TRAINVAL':
return DatasetPhase.TrainVal
elif arg.upper() == 'TEST':
return DatasetPhase.Test
elif arg.upper() == 'DEBUG':
return DatasetPhase.Debug
else:
raise ValueError('phase must be one of train/val/test')
def cache(func):
def wrapper(self, *args, **kwargs):
# Assume that args[0] is index
index = args[0]
if self.cache:
if index not in self.cache_dict[func.__name__]:
results = func(self, *args, **kwargs)
self.cache_dict[func.__name__][index] = results
return self.cache_dict[func.__name__][index]
else:
return func(self, *args, **kwargs)
return wrapper
class DictDataset(Dataset, ABC):
IS_FULL_POINTCLOUD_EVAL = False
def __init__(self,
data_paths,
prevoxel_transform=None,
input_transform=None,
target_transform=None,
cache=False,
data_root='/'):
"""
data_paths: list of lists, [[str_path_to_input, str_path_to_label], [...]]
"""
Dataset.__init__(self)
# Allows easier path concatenation
if not isinstance(data_root, Path):
data_root = Path(data_root)
self.data_root = data_root
self.data_paths = sorted(data_paths)
self.prevoxel_transform = prevoxel_transform
self.input_transform = input_transform
self.target_transform = target_transform
# dictionary of input
self.data_loader_dict = {
'input': (self.load_input, self.input_transform),
'target': (self.load_target, self.target_transform)
}
# For large dataset, do not cache
self.cache = cache
self.cache_dict = defaultdict(dict)
self.loading_key_order = ['input', 'target']
def load_input(self, index):
raise NotImplementedError
def load_target(self, index):
raise NotImplementedError
def get_classnames(self):
pass
def reorder_result(self, result):
return result
def __getitem__(self, index):
out_array = []
for k in self.loading_key_order:
loader, transformer = self.data_loader_dict[k]
v = loader(index)
if transformer:
v = transformer(v)
out_array.append(v)
return out_array
def __len__(self):
return len(self.data_paths)
class VoxelizationDatasetBase(DictDataset, ABC):
IS_TEMPORAL = False
CLIP_BOUND = (-1000, -1000, -1000, 1000, 1000, 1000)
ROTATION_AXIS = None
NUM_IN_CHANNEL = None
NUM_LABELS = -1 # Number of labels in the dataset, including all ignore classes
IGNORE_LABELS = None # labels that are not evaluated
def __init__(self,
data_paths,
prevoxel_transform=None,
input_transform=None,
target_transform=None,
cache=False,
data_root='/',
ignore_mask=255,
return_transformation=False,
**kwargs):
"""
ignore_mask: label value for ignore class. It will not be used as a class in the loss or evaluation.
"""
DictDataset.__init__(
self,
data_paths,
prevoxel_transform=prevoxel_transform,
input_transform=input_transform,
target_transform=target_transform,
cache=cache,
data_root=data_root)
self.ignore_mask = ignore_mask
self.return_transformation = return_transformation
def __getitem__(self, index):
raise NotImplementedError
def load_ply(self, index):
filepath = self.data_root / self.data_paths[index]
plydata = PlyData.read(filepath)
data = plydata.elements[0].data
coords = np.array([data['x'], data['y'], data['z']], dtype=np.float32).T
feats = np.array([data['red'], data['green'], data['blue']], dtype=np.float32).T
labels = np.array(data['label'], dtype=np.int32)
return coords, feats, labels, None
def load_data(self, index):
raise NotImplementedError
def __len__(self):
num_data = len(self.data_paths)
return num_data
class VoxelizationDataset(VoxelizationDatasetBase):
"""This dataset loads RGB point clouds and their labels as a list of points
and voxelizes the pointcloud with sufficient data augmentation.
"""
# Voxelization arguments
VOXEL_SIZE = 0.05 # 5cm
# Coordinate Augmentation Arguments: Unlike feature augmentation, coordinate
# augmentation has to be done before voxelization
SCALE_AUGMENTATION_BOUND = (0.9, 1.1)
ROTATION_AUGMENTATION_BOUND = ((-np.pi / 6, np.pi / 6), (-np.pi, np.pi), (-np.pi / 6, np.pi / 6))
TRANSLATION_AUGMENTATION_RATIO_BOUND = ((-0.2, 0.2), (-0.05, 0.05), (-0.2, 0.2))
ELASTIC_DISTORT_PARAMS = None
# MISC.
PREVOXELIZATION_VOXEL_SIZE = None
# Augment coords to feats
AUGMENT_COORDS_TO_FEATS = False
def __init__(self,
data_paths,
prevoxel_transform=None,
input_transform=None,
target_transform=None,
data_root='/',
ignore_label=255,
return_transformation=False,
augment_data=False,
config=None,
**kwargs):
self.augment_data = augment_data
self.config = config
VoxelizationDatasetBase.__init__(
self,
data_paths,
prevoxel_transform=prevoxel_transform,
input_transform=input_transform,
target_transform=target_transform,
cache=cache,
data_root=data_root,
ignore_mask=ignore_label,
return_transformation=return_transformation)
# Prevoxel transformations
self.voxelizer = Voxelizer(
voxel_size=self.VOXEL_SIZE,
clip_bound=self.CLIP_BOUND,
use_augmentation=augment_data,
scale_augmentation_bound=self.SCALE_AUGMENTATION_BOUND,
rotation_augmentation_bound=self.ROTATION_AUGMENTATION_BOUND,
translation_augmentation_ratio_bound=self.TRANSLATION_AUGMENTATION_RATIO_BOUND,
ignore_label=ignore_label)
# map labels not evaluated to ignore_label
label_map = {}
n_used = 0
for l in range(self.NUM_LABELS):
if l in self.IGNORE_LABELS:
label_map[l] = self.ignore_mask
else:
label_map[l] = n_used
n_used += 1
label_map[self.ignore_mask] = self.ignore_mask
self.label_map = label_map
self.NUM_LABELS -= len(self.IGNORE_LABELS)
def _augment_coords_to_feats(self, coords, feats, labels=None):
norm_coords = coords - coords.mean(0)
# color must come first.
if isinstance(coords, np.ndarray):
feats = np.concatenate((feats, norm_coords), 1)
else:
feats = torch.cat((feats, norm_coords), 1)
return coords, feats, labels
def convert_mat2cfl(self, mat):
# Generally, xyz,rgb,label
return mat[:, :3], mat[:, 3:-1], mat[:, -1]
def get_instance_info(self, xyz, instance_ids):
'''
:param xyz: (n, 3)
:param instance_ids: (n), int, (1~nInst, -1)
:return: instance_num, dict
'''
centers = np.ones((xyz.shape[0], 3), dtype=np.float32) * -1 # (n, 9), float, (cx, cy, cz, minx, miny, minz, maxx, maxy, maxz, occ, num_instances)
occupancy = {} # (nInst), int
bbox = {}
unique_ids = np.unique(instance_ids)
for id_ in unique_ids:
if id_ == -1:
continue
mask = (instance_ids == id_)
xyz_ = xyz[mask]
bbox_min = xyz_.min(0)
bbox_max = xyz_.max(0)
center = xyz_.mean(0)
centers[mask] = center
occupancy[id_] = mask.sum()
bbox[id_] = np.concatenate([bbox_min, bbox_max])
return {"ids": instance_ids, "center": centers, "occupancy": occupancy, "bbox": bbox}
def __getitem__(self, index):
coords, feats, labels = self.load_data(index)
# Downsample the pointcloud with finer voxel size before transformation for memory and speed
if self.PREVOXELIZATION_VOXEL_SIZE is not None:
inds = ME.utils.sparse_quantize(
coords / self.PREVOXELIZATION_VOXEL_SIZE, return_index=True)
coords = coords[inds]
feats = feats[inds]
labels = labels[inds]
# Prevoxel transformations
if self.prevoxel_transform is not None:
coords, feats, labels = self.prevoxel_transform(coords, feats, labels)
coords, feats, labels, transformation = self.voxelizer.voxelize(
coords, feats, labels)
# map labels not used for evaluation to ignore_label
if self.input_transform is not None:
coords, feats, labels = self.input_transform(coords, feats, labels)
if self.target_transform is not None:
coords, feats, labels = self.target_transform(coords, feats, labels)
if self.augment_data:
# For some networks, making the network invariant to even, odd coords is important
coords += (torch.rand(3) * 100).int().numpy()
# ------------- label mapping --------------------
if self.IGNORE_LABELS is not None:
labels = np.array([self.label_map[x] for x in labels], dtype=np.int)
# Use coordinate features if config is set
if self.AUGMENT_COORDS_TO_FEATS:
coords, feats, labels = self._augment_coords_to_feats(coords, feats, labels)
return_args = [coords, feats, labels]
if self.return_transformation:
return_args.append(transformation.astype(np.float32))
return tuple(return_args)
def initialize_data_loader(DatasetClass,
config,
phase,
num_workers,
shuffle,
repeat,
augment_data,
batch_size,
limit_numpoints,
input_transform=None,
target_transform=None):
if isinstance(phase, str):
phase = str2datasetphase_type(phase)
if config.data.return_transformation:
collate_fn = t.cflt_collate_fn_factory(limit_numpoints)
else:
collate_fn = t.cfl_collate_fn_factory(limit_numpoints)
prevoxel_transform_train = []
if augment_data:
prevoxel_transform_train.append(t.ElasticDistortion(DatasetClass.ELASTIC_DISTORT_PARAMS))
if len(prevoxel_transform_train) > 0:
prevoxel_transforms = t.Compose(prevoxel_transform_train)
else:
prevoxel_transforms = None
input_transforms = []
if input_transform is not None:
input_transforms += input_transform
if augment_data:
input_transforms += [
t.RandomDropout(0.2),
t.RandomHorizontalFlip(DatasetClass.ROTATION_AXIS, DatasetClass.IS_TEMPORAL),
t.ChromaticAutoContrast(),
t.ChromaticTranslation(config.augmentation.data_aug_color_trans_ratio),
t.ChromaticJitter(config.augmentation.data_aug_color_jitter_std),
# t.HueSaturationTranslation(config.data_aug_hue_max, config.data_aug_saturation_max),
]
if len(input_transforms) > 0:
input_transforms = t.Compose(input_transforms)
else:
input_transforms = None
dataset = DatasetClass(
config,
prevoxel_transform=prevoxel_transforms,
input_transform=input_transforms,
target_transform=target_transform,
cache=config.data.cache_data,
augment_data=augment_data,
phase=phase)
data_args = {
'dataset': dataset,
'num_workers': num_workers,
'batch_size': batch_size,
'collate_fn': collate_fn,
}
if repeat:
if get_world_size() > 1:
data_args['sampler'] = DistributedInfSampler(dataset, shuffle=shuffle) # torch.utils.data.distributed.DistributedSampler(dataset)
else:
data_args['sampler'] = InfSampler(dataset, shuffle)
else:
data_args['shuffle'] = shuffle
data_loader = DataLoader(**data_args)
return data_loader
| ContrastiveSceneContexts-main | downstream/semseg/datasets/dataset.py |
import logging
import os
import sys
import numpy as np
from collections import defaultdict
from scipy import spatial
import torch
from plyfile import PlyData
from lib.utils import read_txt, fast_hist, per_class_iu
from datasets.dataset import VoxelizationDataset, DatasetPhase, str2datasetphase_type, cache
import datasets.transforms as t
class StanfordVoxelizationDatasetBase:
# added
NUM_LABELS = 14
CLASS_LABELS = ('clutter', 'beam', 'board', 'bookcase', 'ceiling', 'chair', 'column',
'door', 'floor', 'sofa', 'table', 'wall', 'window')
VALID_CLASS_IDS = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13)
IGNORE_LABELS = tuple(set(range(14)) - set(VALID_CLASS_IDS))
CLASS_LABELS_INSTANCE = ('clutter', 'beam', 'board', 'bookcase', 'chair', 'column', 'door', 'sofa', 'table', 'window')
VALID_CLASS_IDS_INSTANCE = (0, 1, 2, 3, 5, 6, 7, 9, 11, 13)
IGNORE_LABELS_INSTANCE = tuple(set(range(14)) - set(VALID_CLASS_IDS_INSTANCE))
#---------
CLIP_SIZE = None
CLIP_BOUND = None
LOCFEAT_IDX = 2
ROTATION_AXIS = 'z'
#IGNORE_LABELS = (10,) # remove stairs, following SegCloud
# CLASSES = [
# 'clutter', 'beam', 'board', 'bookcase', 'ceiling', 'chair', 'column', 'door', 'floor', 'sofa',
# 'table', 'wall', 'window'
# ]
IS_FULL_POINTCLOUD_EVAL = True
DATA_PATH_FILE = {
DatasetPhase.Train: 'train.txt',
DatasetPhase.Val: 'val.txt',
DatasetPhase.TrainVal: 'trainval.txt',
DatasetPhase.Test: 'test.txt'
}
def test_pointcloud(self, pred_dir):
print('Running full pointcloud evaluation.')
# Join room by their area and room id.
room_dict = defaultdict(list)
for i, data_path in enumerate(self.data_paths):
area, room = data_path.split(os.sep)
room, _ = os.path.splitext(room)
room_id = '_'.join(room.split('_')[:-1])
room_dict[(area, room_id)].append(i)
# Test independently for each room.
sys.setrecursionlimit(100000) # Increase recursion limit for k-d tree.
pred_list = sorted(os.listdir(pred_dir))
hist = np.zeros((self.NUM_LABELS, self.NUM_LABELS))
for room_idx, room_list in enumerate(room_dict.values()):
print(f'Evaluating room {room_idx} / {len(room_dict)}.')
# Join all predictions and query pointclouds of split data.
pred = np.zeros((0, 4))
pointcloud = np.zeros((0, 7))
for i in room_list:
pred = np.vstack((pred, np.load(os.path.join(pred_dir, pred_list[i]))))
pointcloud = np.vstack((pointcloud, self.load_ply(i)[0]))
# Deduplicate all query pointclouds of split data.
pointcloud = np.array(list(set(tuple(l) for l in pointcloud.tolist())))
# Run test for each room.
pred_tree = spatial.KDTree(pred[:, :3], leafsize=500)
_, result = pred_tree.query(pointcloud[:, :3])
ptc_pred = pred[result, 3].astype(int)
ptc_gt = pointcloud[:, -1].astype(int)
if self.IGNORE_LABELS:
ptc_pred = self.label2masked[ptc_pred]
ptc_gt = self.label2masked[ptc_gt]
hist += fast_hist(ptc_pred, ptc_gt, self.NUM_LABELS)
# Print results.
ious = []
print('Per class IoU:')
for i, iou in enumerate(per_class_iu(hist) * 100):
result_str = ''
if hist.sum(1)[i]:
result_str += f'{iou}'
ious.append(iou)
else:
result_str += 'N/A' # Do not print if data not in ground truth.
print(result_str)
print(f'Average IoU: {np.nanmean(ious)}')
def _augment_coords_to_feats(self, coords, feats, labels=None):
# Center x,y
coords_center = coords.mean(0, keepdims=True)
coords_center[0, 2] = 0
norm_coords = coords - coords_center
feats = np.concatenate((feats, norm_coords), 1)
return coords, feats, labels
class StanfordDataset(StanfordVoxelizationDatasetBase, VoxelizationDataset):
# Voxelization arguments
VOXEL_SIZE = 0.05 # 5cm
CLIP_BOUND = 4 # [-N, N]
TEST_CLIP_BOUND = None
# Augmentation arguments
ROTATION_AUGMENTATION_BOUND = \
((-np.pi / 32, np.pi / 32), (-np.pi / 32, np.pi / 32), (-np.pi, np.pi))
TRANSLATION_AUGMENTATION_RATIO_BOUND = ((-0.2, 0.2), (-0.2, 0.2), (-0.05, 0.05))
# AUGMENT_COORDS_TO_FEATS = True
# NUM_IN_CHANNEL = 6
AUGMENT_COORDS_TO_FEATS = False
NUM_IN_CHANNEL = 3
def __init__(self,
config,
prevoxel_transform=None,
input_transform=None,
target_transform=None,
cache=False,
augment_data=True,
elastic_distortion=False,
phase=DatasetPhase.Train):
if isinstance(phase, str):
phase = str2datasetphase_type(phase)
if phase not in [DatasetPhase.Train, DatasetPhase.TrainVal]:
self.CLIP_BOUND = self.TEST_CLIP_BOUND
data_root = config.data.stanford3d_path
if isinstance(self.DATA_PATH_FILE[phase], (list, tuple)):
data_paths = []
for split in self.DATA_PATH_FILE[phase]:
data_paths += read_txt(os.path.join(data_root, 'splits', split))
else:
data_paths = read_txt(os.path.join(data_root, 'splits', self.DATA_PATH_FILE[phase]))
if config.data.voxel_size:
self.VOXEL_SIZE = config.data.voxel_size
logging.info('voxel size: {}'.format(self.VOXEL_SIZE))
logging.info('Loading {} {}: {}'.format(self.__class__.__name__, phase,
self.DATA_PATH_FILE[phase]))
VoxelizationDataset.__init__(
self,
data_paths,
data_root=data_root,
prevoxel_transform=prevoxel_transform,
input_transform=input_transform,
target_transform=target_transform,
ignore_label=config.data.ignore_label,
return_transformation=config.data.return_transformation,
augment_data=augment_data,
elastic_distortion=elastic_distortion,
config=config)
@cache
def load_ply(self, index):
filepath = self.data_root / self.data_paths[index]
plydata = PlyData.read(filepath)
data = plydata.elements[0].data
coords = np.array([data['x'], data['y'], data['z']], dtype=np.float32).T
feats = np.array([data['red'], data['green'], data['blue']], dtype=np.float32).T
labels = np.array(data['label'], dtype=np.int32)
return coords, feats, labels, None
@cache
def load_data(self, index):
filepath = self.data_root / self.data_paths[index]
pointcloud = torch.load(filepath)
coords = pointcloud[:,:3].astype(np.float32)
feats = pointcloud[:,3:6].astype(np.float32)
labels = pointcloud[:,6].astype(np.int32)
return coords, feats, labels
class StanfordArea5Dataset(StanfordDataset):
DATA_PATH_FILE = {
DatasetPhase.Train: ['area1.txt', 'area2.txt', 'area3.txt', 'area4.txt', 'area6.txt'],
DatasetPhase.Val: 'area5.txt',
DatasetPhase.Test: 'area5.txt'
}
class StanfordArea53cmDataset(StanfordArea5Dataset):
CLIP_BOUND = 3.2
VOXEL_SIZE = 0.03
class StanfordArea57d5cmDataset(StanfordArea5Dataset):
VOXEL_SIZE = 0.075
class StanfordArea510cmDataset(StanfordArea5Dataset):
VOXEL_SIZE = 0.1
def test(config):
"""Test point cloud data loader.
"""
from torch.utils.data import DataLoader
from lib.utils import Timer
import open3d as o3d
def make_pcd(coords, feats):
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(coords[:, :3].float().numpy())
pcd.colors = o3d.utility.Vector3dVector(feats[:, :3].numpy() / 255)
return pcd
timer = Timer()
DatasetClass = StanfordArea5Dataset
transformations = [
t.RandomHorizontalFlip(DatasetClass.ROTATION_AXIS, DatasetClass.IS_TEMPORAL),
t.ChromaticAutoContrast(),
t.ChromaticTranslation(config.data_aug_color_trans_ratio),
t.ChromaticJitter(config.data_aug_color_jitter_std),
]
dataset = DatasetClass(
config,
prevoxel_transform=t.ElasticDistortion(DatasetClass.ELASTIC_DISTORT_PARAMS),
input_transform=t.Compose(transformations),
augment_data=True,
cache=True,
elastic_distortion=True)
data_loader = DataLoader(
dataset=dataset,
collate_fn=t.cfl_collate_fn_factory(limit_numpoints=False),
batch_size=1,
shuffle=True)
# Start from index 1
iter = data_loader.__iter__()
for i in range(100):
timer.tic()
coords, feats, labels = iter.next()
pcd = make_pcd(coords, feats)
o3d.visualization.draw_geometries([pcd])
print(timer.toc())
if __name__ == '__main__':
from config import get_config
config = get_config()
test(config)
| ContrastiveSceneContexts-main | downstream/semseg/datasets/stanford.py |
import collections
import numpy as np
import MinkowskiEngine as ME
from scipy.linalg import expm, norm
# Rotation matrix along axis with angle theta
def M(axis, theta):
return expm(np.cross(np.eye(3), axis / norm(axis) * theta))
class Voxelizer:
def __init__(self,
voxel_size=1,
clip_bound=None,
use_augmentation=False,
scale_augmentation_bound=None,
rotation_augmentation_bound=None,
translation_augmentation_ratio_bound=None,
ignore_label=255):
"""
Args:
voxel_size: side length of a voxel
clip_bound: boundary of the voxelizer. Points outside the bound will be deleted
expects either None or an array like ((-100, 100), (-100, 100), (-100, 100)).
scale_augmentation_bound: None or (0.9, 1.1)
rotation_augmentation_bound: None or ((np.pi / 6, np.pi / 6), None, None) for 3 axis.
Use random order of x, y, z to prevent bias.
translation_augmentation_bound: ((-5, 5), (0, 0), (-10, 10))
ignore_label: label assigned for ignore (not a training label).
"""
self.voxel_size = voxel_size
self.clip_bound = clip_bound
self.ignore_label = ignore_label
# Augmentation
self.use_augmentation = use_augmentation
self.scale_augmentation_bound = scale_augmentation_bound
self.rotation_augmentation_bound = rotation_augmentation_bound
self.translation_augmentation_ratio_bound = translation_augmentation_ratio_bound
def get_transformation_matrix(self):
voxelization_matrix, rotation_matrix = np.eye(4), np.eye(4)
# Get clip boundary from config or pointcloud.
# Get inner clip bound to crop from.
# Transform pointcloud coordinate to voxel coordinate.
# 1. Random rotation
rot_mat = np.eye(3)
if self.use_augmentation and self.rotation_augmentation_bound is not None:
if isinstance(self.rotation_augmentation_bound, collections.Iterable):
rot_mats = []
for axis_ind, rot_bound in enumerate(self.rotation_augmentation_bound):
theta = 0
axis = np.zeros(3)
axis[axis_ind] = 1
if rot_bound is not None:
theta = np.random.uniform(*rot_bound)
rot_mats.append(M(axis, theta))
# Use random order
np.random.shuffle(rot_mats)
rot_mat = rot_mats[0] @ rot_mats[1] @ rot_mats[2]
else:
raise ValueError()
rotation_matrix[:3, :3] = rot_mat
# 2. Scale and translate to the voxel space.
scale = 1 / self.voxel_size
if self.use_augmentation and self.scale_augmentation_bound is not None:
scale *= np.random.uniform(*self.scale_augmentation_bound)
np.fill_diagonal(voxelization_matrix[:3, :3], scale)
# Get final transformation matrix.
return voxelization_matrix, rotation_matrix
def clip(self, coords, center=None, trans_aug_ratio=None):
bound_min = np.min(coords, 0).astype(float)
bound_max = np.max(coords, 0).astype(float)
bound_size = bound_max - bound_min
if center is None:
center = bound_min + bound_size * 0.5
if trans_aug_ratio is not None:
trans = np.multiply(trans_aug_ratio, bound_size)
center += trans
lim = self.clip_bound
if isinstance(self.clip_bound, (int, float)):
if bound_size.max() < self.clip_bound:
return None
else:
clip_inds = ((coords[:, 0] >= (-lim + center[0])) & \
(coords[:, 0] < (lim + center[0])) & \
(coords[:, 1] >= (-lim + center[1])) & \
(coords[:, 1] < (lim + center[1])) & \
(coords[:, 2] >= (-lim + center[2])) & \
(coords[:, 2] < (lim + center[2])))
return clip_inds
# Clip points outside the limit
clip_inds = ((coords[:, 0] >= (lim[0][0] + center[0])) & \
(coords[:, 0] < (lim[0][1] + center[0])) & \
(coords[:, 1] >= (lim[1][0] + center[1])) & \
(coords[:, 1] < (lim[1][1] + center[1])) & \
(coords[:, 2] >= (lim[2][0] + center[2])) & \
(coords[:, 2] < (lim[2][1] + center[2])))
return clip_inds
def voxelize(self, coords, feats, labels, center=None):
assert coords.shape[1] == 3 and coords.shape[0] == feats.shape[0] and coords.shape[0]
if self.clip_bound is not None:
trans_aug_ratio = np.zeros(3)
if self.use_augmentation and self.translation_augmentation_ratio_bound is not None:
for axis_ind, trans_ratio_bound in enumerate(self.translation_augmentation_ratio_bound):
trans_aug_ratio[axis_ind] = np.random.uniform(*trans_ratio_bound)
clip_inds = self.clip(coords, center, trans_aug_ratio)
if clip_inds is not None:
coords, feats = coords[clip_inds], feats[clip_inds]
if labels is not None:
labels = labels[clip_inds]
# Get rotation and scale
M_v, M_r = self.get_transformation_matrix()
# Apply transformations
rigid_transformation = M_v
if self.use_augmentation:
rigid_transformation = M_r @ rigid_transformation
homo_coords = np.hstack((coords, np.ones((coords.shape[0], 1), dtype=coords.dtype)))
coords_aug = np.floor(homo_coords @ rigid_transformation.T[:, :3])
# Align all coordinates to the origin.
min_coords = coords_aug.min(0)
M_t = np.eye(4)
M_t[:3, -1] = -min_coords
rigid_transformation = M_t @ rigid_transformation
coords_aug = np.floor(coords_aug - min_coords)
# key = self.hash(coords_aug) # floor happens by astype(np.uint64)
mapping, colabels = ME.utils.sparse_quantize(
coords_aug, feats, labels=labels, return_index=True, ignore_label=self.ignore_label)
coords_aug = coords_aug[mapping]
feats = feats[mapping]
labels = colabels
return coords_aug, feats, labels, rigid_transformation.flatten()
def voxelize_temporal(self,
coords_t,
feats_t,
labels_t,
centers=None,
return_transformation=False):
# Legacy code, remove
if centers is None:
centers = [
None,
] * len(coords_t)
coords_tc, feats_tc, labels_tc, transformation_tc = [], [], [], []
# ######################### Data Augmentation #############################
# Get rotation and scale
M_v, M_r = self.get_transformation_matrix()
# Apply transformations
rigid_transformation = M_v
if self.use_augmentation:
rigid_transformation = M_r @ rigid_transformation
# ######################### Voxelization #############################
# Voxelize coords
for coords, feats, labels, center in zip(coords_t, feats_t, labels_t, centers):
###################################
# Clip the data if bound exists
if self.clip_bound is not None:
trans_aug_ratio = np.zeros(3)
if self.use_augmentation and self.translation_augmentation_ratio_bound is not None:
for axis_ind, trans_ratio_bound in enumerate(self.translation_augmentation_ratio_bound):
trans_aug_ratio[axis_ind] = np.random.uniform(*trans_ratio_bound)
clip_inds = self.clip(coords, center, trans_aug_ratio)
if clip_inds is not None:
coords, feats = coords[clip_inds], feats[clip_inds]
if labels is not None:
labels = labels[clip_inds]
###################################
homo_coords = np.hstack((coords, np.ones((coords.shape[0], 1), dtype=coords.dtype)))
coords_aug = np.floor(homo_coords @ rigid_transformation.T)[:, :3]
coords_aug, feats, labels = ME.utils.sparse_quantize(
coords_aug, feats, labels=labels, ignore_label=self.ignore_label)
coords_tc.append(coords_aug)
feats_tc.append(feats)
labels_tc.append(labels)
transformation_tc.append(rigid_transformation.flatten())
return_args = [coords_tc, feats_tc, labels_tc]
if return_transformation:
return_args.append(transformation_tc)
return tuple(return_args)
def test():
N = 16575
coords = np.random.rand(N, 3) * 10
feats = np.random.rand(N, 4)
labels = np.floor(np.random.rand(N) * 3)
coords[:3] = 0
labels[:3] = 2
voxelizer = Voxelizer()
print(voxelizer.voxelize(coords, feats, labels))
if __name__ == '__main__':
test()
| ContrastiveSceneContexts-main | downstream/semseg/datasets/voxelizer.py |
import math
import torch
import torch.distributed as dist
from torch.utils.data.sampler import Sampler
class InfSampler(Sampler):
"""Samples elements randomly, without replacement.
Arguments:
data_source (Dataset): dataset to sample from
"""
def __init__(self, data_source, shuffle=False):
self.data_source = data_source
self.shuffle = shuffle
self.reset_permutation()
def reset_permutation(self):
perm = len(self.data_source)
if self.shuffle:
perm = torch.randperm(perm)
self._perm = perm.tolist()
def __iter__(self):
return self
def __next__(self):
if len(self._perm) == 0:
self.reset_permutation()
return self._perm.pop()
def __len__(self):
return len(self.data_source)
next = __next__ # Python 2 compatibility
class DistributedInfSampler(InfSampler):
def __init__(self, data_source, num_replicas=None, rank=None, shuffle=True):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.data_source = data_source
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.it = 0
self.num_samples = int(math.ceil(len(self.data_source) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
self.shuffle = shuffle
self.reset_permutation()
def __next__(self):
it = self.it * self.num_replicas + self.rank
value = self._perm[it % len(self._perm)]
self.it = self.it + 1
if (self.it * self.num_replicas) >= len(self._perm):
self.reset_permutation()
self.it = 0
return value
def __len__(self):
return self.num_samples | ContrastiveSceneContexts-main | downstream/semseg/datasets/dataloader.py |
import logging
import os
import sys
from pathlib import Path
import torch
import numpy as np
from scipy import spatial
from datasets.dataset import VoxelizationDataset, DatasetPhase, str2datasetphase_type
from lib.pc_utils import read_plyfile, save_point_cloud
from lib.utils import read_txt, fast_hist, per_class_iu
from lib.io3d import write_triangle_mesh, create_color_palette
class ScannetVoxelizationDataset(VoxelizationDataset):
# added
NUM_LABELS = 41 # Will be converted to 20 as defined in IGNORE_LABELS.
NUM_IN_CHANNEL = 3
CLASS_LABELS = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window',
'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator',
'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture')
VALID_CLASS_IDS = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39)
IGNORE_LABELS = tuple(set(range(NUM_LABELS)) - set(VALID_CLASS_IDS))
CLASS_LABELS_INSTANCE = ['cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter',
'desk', 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture']
VALID_CLASS_IDS_INSTANCE = np.array([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])
IGNORE_LABELS_INSTANCE = tuple(set(range(NUM_LABELS)) - set(VALID_CLASS_IDS_INSTANCE))
# Voxelization arguments
CLIP_BOUND = None
TEST_CLIP_BOUND = None
VOXEL_SIZE = 0.05
# Augmentation arguments
ROTATION_AUGMENTATION_BOUND = ((-np.pi / 64, np.pi / 64), (-np.pi / 64, np.pi / 64), (-np.pi,
np.pi))
TRANSLATION_AUGMENTATION_RATIO_BOUND = ((-0.2, 0.2), (-0.2, 0.2), (0, 0))
ELASTIC_DISTORT_PARAMS = ((0.2, 0.4), (0.8, 1.6))
ROTATION_AXIS = 'z'
LOCFEAT_IDX = 2
IS_FULL_POINTCLOUD_EVAL = True
# If trainval.txt does not exist, copy train.txt and add contents from val.txt
DATA_PATH_FILE = {
DatasetPhase.Train: 'scannetv2_train.txt',
DatasetPhase.Val: 'scannetv2_val.txt',
DatasetPhase.TrainVal: 'scannetv2_trainval.txt',
DatasetPhase.Test: 'scannetv2_test.txt',
}
def __init__(self,
config,
prevoxel_transform=None,
input_transform=None,
target_transform=None,
augment_data=True,
elastic_distortion=False,
cache=False,
phase=DatasetPhase.Train):
if isinstance(phase, str):
phase = str2datasetphase_type(phase)
# Use cropped rooms for train/val
data_root = config.data.scannet_path
if phase not in [DatasetPhase.Train, DatasetPhase.TrainVal]:
self.CLIP_BOUND = self.TEST_CLIP_BOUND
data_paths = read_txt(os.path.join(data_root, 'splits', self.DATA_PATH_FILE[phase]))
if phase == DatasetPhase.Train and config.data.train_file:
data_paths = read_txt(config.data.train_file)
# data efficiency by sampling points
self.sampled_inds = {}
if config.data.sampled_inds and phase == DatasetPhase.Train:
self.sampled_inds = torch.load(config.data.sampled_inds)
data_paths = [data_path + '.pth' for data_path in data_paths]
logging.info('Loading {}: {}'.format(self.__class__.__name__, self.DATA_PATH_FILE[phase]))
super().__init__(
data_paths,
data_root=data_root,
prevoxel_transform=prevoxel_transform,
input_transform=input_transform,
target_transform=target_transform,
ignore_label=config.data.ignore_label,
return_transformation=config.data.return_transformation,
augment_data=augment_data,
elastic_distortion=elastic_distortion,
config=config)
def get_output_id(self, iteration):
return '_'.join(Path(self.data_paths[iteration]).stem.split('_')[:2])
def _augment_locfeat(self, pointcloud):
# Assuming that pointcloud is xyzrgb(...), append location feat.
pointcloud = np.hstack(
(pointcloud[:, :6], 100 * np.expand_dims(pointcloud[:, self.LOCFEAT_IDX], 1),
pointcloud[:, 6:]))
return pointcloud
def load_data(self, index):
filepath = self.data_root / self.data_paths[index]
pointcloud = torch.load(filepath)
coords = pointcloud[0].astype(np.float32)
feats = pointcloud[1].astype(np.float32)
labels = pointcloud[2].astype(np.int32)
if self.sampled_inds:
scene_name = self.get_output_id(index)
mask = np.ones_like(labels).astype(np.bool)
sampled_inds = self.sampled_inds[scene_name]
mask[sampled_inds] = False
labels[mask] = 0
return coords, feats, labels
def save_features(self, coords, upsampled_features, transformation, iteration, save_dir):
inds_mapping, xyz = self.get_original_pointcloud(coords, transformation, iteration)
ptc_feats = upsampled_features.cpu().numpy()[inds_mapping]
room_id = self.get_output_id(iteration)
torch.save(ptc_feats, f'{save_dir}/{room_id}')
def get_original_pointcloud(self, coords, transformation, iteration):
logging.info('===> Start testing on original pointcloud space.')
data_path = self.data_paths[iteration]
fullply_f = self.data_root / data_path
query_xyz, _, query_label, _ = torch.load(fullply_f)
coords = coords[:, 1:].numpy() + 0.5
curr_transformation = transformation[0, :16].numpy().reshape(4, 4)
coords = np.hstack((coords, np.ones((coords.shape[0], 1))))
coords = (np.linalg.inv(curr_transformation) @ coords.T).T
# Run test for each room.
from pykeops.numpy import LazyTensor
from pykeops.numpy.utils import IsGpuAvailable
query_xyz = np.array(query_xyz)
x_i = LazyTensor( query_xyz[:,None,:] ) # x_i.shape = (1e6, 1, 3)
y_j = LazyTensor( coords[:,:3][None,:,:] ) # y_j.shape = ( 1, 2e6,3)
D_ij = ((x_i - y_j) ** 2).sum(-1) # (M**2, N) symbolic matrix of squared distances
indKNN = D_ij.argKmin(1, dim=1) # Grid <-> Samples, (M**2, K) integer tensor
inds = indKNN[:,0]
return inds, query_xyz
class ScannetVoxelization2cmDataset(ScannetVoxelizationDataset):
VOXEL_SIZE = 0.02
| ContrastiveSceneContexts-main | downstream/semseg/datasets/scannet.py |
# Evaluates semantic label task
# Input:
# - path to .txt prediction files
# - path to .txt ground truth files
# - output file to write results to
# Note that only the valid classes are used for evaluation,
# i.e., any ground truth label not in the valid label set
# is ignored in the evaluation.
#
# example usage: evaluate_semantic_label.py --scan_path [path to scan data] --output_file [output file]
# python imports
import math
import logging
import os, sys, argparse
import inspect
try:
import numpy as np
except:
print("Failed to import numpy package.")
sys.exit(-1)
try:
from itertools import izip
except ImportError:
izip = zip
#currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
#parentdir = os.path.dirname(currentdir)
#sys.path.insert(0,parentdir)
from datasets.evaluation.scannet_benchmark_utils import util_3d
from datasets.evaluation.scannet_benchmark_utils import util
class Evaluator:
def __init__(self, CLASS_LABELS, VALID_CLASS_IDS):
#CLASS_LABELS = ['wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table',
# 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk',
# 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture']
#VALID_CLASS_IDS = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])
self.CLASS_LABELS = CLASS_LABELS
self.VALID_CLASS_IDS = VALID_CLASS_IDS
self.UNKNOWN_ID = np.max(VALID_CLASS_IDS) + 1
self.gt = {}
self.pred = {}
max_id = self.UNKNOWN_ID
self.confusion = np.zeros((max_id+1, max_id+1), dtype=np.ulonglong)
def update_confusion(self, pred_ids, gt_ids, sceneId=None):
# sanity checks
if not pred_ids.shape == gt_ids.shape:
util.print_error('%s: number of predicted values does not match number of vertices' % pred_file, user_fault=True)
n = self.confusion.shape[0]
k = (gt_ids >= 0) & (gt_ids < n)
temporal = np.bincount(n * gt_ids[k].astype(int) + pred_ids[k], minlength=n**2).reshape(n, n)
for valid_class_row in self.VALID_CLASS_IDS:
for valid_class_col in self.VALID_CLASS_IDS:
self.confusion[valid_class_row][valid_class_col] += temporal[valid_class_row][valid_class_col]
@staticmethod
def write_to_benchmark(base='benchmark_segmentation', scene_id=None, pred_ids=None):
os.makedirs(base, exist_ok=True)
util_3d.export_ids('{}.txt'.format(os.path.join(base, scene_id)), pred_ids)
def get_iou(self, label_id, confusion):
if not label_id in self.VALID_CLASS_IDS:
return float('nan')
# #true positives
tp = np.longlong(confusion[label_id, label_id])
# #false negatives
fn = np.longlong(confusion[label_id, :].sum()) - tp
# #false positives
not_ignored = [l for l in self.VALID_CLASS_IDS if not l == label_id]
fp = np.longlong(confusion[not_ignored, label_id].sum())
denom = (tp + fp + fn)
if denom == 0:
return float('nan')
return (float(tp) / denom, tp, denom)
def write_result_file(self, confusion, ious, filename):
with open(filename, 'w') as f:
f.write('iou scores\n')
for i in range(len(self.VALID_CLASS_IDS)):
label_id = self.VALID_CLASS_IDS[i]
label_name = self.CLASS_LABELS[i]
iou = ious[label_name][0]
f.write('{0:<14s}({1:<2d}): {2:>5.3f}\n'.format(label_name, label_id, iou))
f.write("{0:<14s}: {1:>5.3f}".format('mean', np.array([ious[k][0] for k in ious]).mean()))
f.write('\nconfusion matrix\n')
f.write('\t\t\t')
for i in range(len(self.VALID_CLASS_IDS)):
#f.write('\t{0:<14s}({1:<2d})'.format(CLASS_LABELS[i], VALID_CLASS_IDS[i]))
f.write('{0:<8d}'.format(self.VALID_CLASS_IDS[i]))
f.write('\n')
for r in range(len(self.VALID_CLASS_IDS)):
f.write('{0:<14s}({1:<2d})'.format(self.CLASS_LABELS[r], self.VALID_CLASS_IDS[r]))
for c in range(len(self.VALID_CLASS_IDS)):
f.write('\t{0:>5.3f}'.format(confusion[self.VALID_CLASS_IDS[r],self.VALID_CLASS_IDS[c]]))
f.write('\n')
print('wrote results to', filename)
def evaluate_confusion(self, output_file=None):
class_ious = {}
counter = 0
summation = 0
for i in range(len(self.VALID_CLASS_IDS)):
label_name = self.CLASS_LABELS[i]
label_id = self.VALID_CLASS_IDS[i]
class_ious[label_name] = self.get_iou(label_id, self.confusion)
# print
logging.info('classes IoU')
logging.info('----------------------------')
for i in range(len(self.VALID_CLASS_IDS)):
label_name = self.CLASS_LABELS[i]
try:
logging.info('{0:<14s}: {1:>5.3f} ({2:>6d}/{3:<6d})'.format(label_name, class_ious[label_name][0], class_ious[label_name][1], class_ious[label_name][2]))
summation += class_ious[label_name][0]
counter += 1
except:
logging.info('{0:<14s}: nan ( nan/nan )'.format(label_name))
logging.info("{0:<14s}: {1:>5.3f}".format('mean', summation / counter))
if output_file:
self.write_result_file(self.confusion, class_ious, output_file)
return summation / counter
def config():
parser = argparse.ArgumentParser()
parser.add_argument('--pred_path', required=True, help='path to directory of predicted .txt files')
parser.add_argument('--gt_path', required=True, help='path to gt files')
parser.add_argument('--output_file', type=str, default='./semantic_label_evaluation.txt')
opt = parser.parse_args()
return opt
def main():
opt = config()
#------------------------- ScanNet --------------------------
CLASS_LABELS = ['wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table',
'door', 'window', 'bookshelf', 'picture', 'counter', 'desk',
'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture']
VALID_CLASS_IDS = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])
evaluator = Evaluator(CLASS_LABELS=CLASS_LABELS, VALID_CLASS_IDS=VALID_CLASS_IDS)
print('reading', len(os.listdir(opt.pred_path))-1, 'scans...')
for i, pred_file in enumerate(os.listdir(opt.pred_path)):
if pred_file == 'semantic_label_evaluation.txt':
continue
gt_file = os.path.join(opt.gt_path, pred_file)
if not os.path.isfile(gt_file):
util.print_error('Result file {} does not match any gt file'.format(pred_file), user_fault=True)
gt_ids = util_3d.load_ids(gt_file)
pred_file = os.path.join(opt.pred_path, pred_file)
pred_ids = util_3d.load_ids(pred_file)
evaluator.update_confusion(pred_ids, gt_ids, pred_file.split('.')[0])
sys.stdout.write("\rscans processed: {}".format(i+1))
sys.stdout.flush()
# evaluate
evaluator.evaluate_confusion(opt.output_file)
if __name__ == '__main__':
main()
| ContrastiveSceneContexts-main | downstream/semseg/datasets/evaluation/evaluate_semantic_label.py |
# Evaluates semantic instance task
# Adapted from the CityScapes evaluation: https://github.com/mcordts/cityscapesScripts/tree/master/cityscapesscripts/evaluation
# Input:
# - path to .txt prediction files
# - path to .txt ground truth files
# - output file to write results to
# Each .txt prediction file look like:
# [(pred0) rel. path to pred. mask over verts as .txt] [(pred0) label id] [(pred0) confidence]
# [(pred1) rel. path to pred. mask over verts as .txt] [(pred1) label id] [(pred1) confidence]
# [(pred2) rel. path to pred. mask over verts as .txt] [(pred2) label id] [(pred2) confidence]
# ...
#
# NOTE: The prediction files must live in the root of the given prediction path.
# Predicted mask .txt files must live in a subfolder.
# Additionally, filenames must not contain spaces.
# The relative paths to predicted masks must contain one integer per line,
# where each line corresponds to vertices in the *_vh_clean_2.ply (in that order).
# Non-zero integers indicate part of the predicted instance.
# The label ids specify the class of the corresponding mask.
# Confidence is a float confidence score of the mask.
#
# Note that only the valid classes are used for evaluation,
# i.e., any ground truth label not in the valid label set
# is ignored in the evaluation.
#
# example usage: evaluate_semantic_instance.py --scan_path [path to scan data] --output_file [output file]
# python imports
import logging
import math
import os, sys, argparse
import inspect
from copy import deepcopy
import argparse
import numpy as np
#currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
#parentdir = os.path.dirname(currentdir)
#sys.path.insert(0,parentdir)
from datasets.evaluation.scannet_benchmark_utils import util_3d
from datasets.evaluation.scannet_benchmark_utils import util
def setup_logging():
ch = logging.StreamHandler(sys.stdout)
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(
format=os.uname()[1].split('.')[0] + ' %(asctime)s %(message)s',
datefmt='%m/%d %H:%M:%S',
handlers=[ch])
class Evaluator:
# ---------- Evaluation params ---------- #
# overlaps for evaluation
overlaps = np.append(np.arange(0.5,0.95,0.05), 0.25)
# minimum region size for evaluation [verts]
min_region_sizes = np.array( [ 10 ] )
# distance thresholds [m]
distance_threshes = np.array( [ float('inf') ] )
# distance confidences
distance_confs = np.array( [ -float('inf') ] )
def __init__(self, CLASS_LABELS, VALID_CLASS_IDS, benchmark=False):
# ---------- Label info ---------- #
#CLASS_LABELS = ['cabinet', 'bed', 'chair', 'sofa', 'table', 'door',
# 'window', 'bookshelf', 'picture', 'counter',
# 'desk', 'curtain', 'refrigerator', 'shower curtain',
# 'toilet', 'sink', 'bathtub', 'otherfurniture']
#VALID_CLASS_IDS = np.array([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])
self.CLASS_LABELS = CLASS_LABELS
self.VALID_CLASS_IDS = VALID_CLASS_IDS
self.ID_TO_LABEL = {}
self.LABEL_TO_ID = {}
for i in range(len(VALID_CLASS_IDS)):
self.LABEL_TO_ID[CLASS_LABELS[i]] = VALID_CLASS_IDS[i]
self.ID_TO_LABEL[VALID_CLASS_IDS[i]] = CLASS_LABELS[i]
self.pred_instances = {}
self.gt_instances = {}
self.benchmark = benchmark
def evaluate_matches(self, matches):
# results: class x overlap
ap = np.zeros( (len(self.distance_threshes) , len(self.CLASS_LABELS) , len(self.overlaps)) , np.float )
for di, (min_region_size, distance_thresh, distance_conf) in enumerate(zip(self.min_region_sizes, self.distance_threshes, self.distance_confs)):
for oi, overlap_th in enumerate(self.overlaps):
pred_visited = {}
for m in matches:
for p in matches[m]['pred']:
for label_name in self.CLASS_LABELS:
for p in matches[m]['pred'][label_name]:
if 'filename' in p:
pred_visited[p['filename']] = False
for li, label_name in enumerate(self.CLASS_LABELS):
y_true = np.empty(0)
y_score = np.empty(0)
hard_false_negatives = 0
has_gt = False
has_pred = False
for m in matches:
pred_instances = matches[m]['pred'][label_name]
gt_instances = matches[m]['gt'][label_name]
# filter groups in ground truth
gt_instances = [ gt for gt in gt_instances if gt['instance_id']>=1000 and gt['vert_count']>=min_region_size and gt['med_dist']<=distance_thresh and gt['dist_conf']>=distance_conf ]
if gt_instances:
has_gt = True
if pred_instances:
has_pred = True
cur_true = np.ones ( len(gt_instances) )
cur_score = np.ones ( len(gt_instances) ) * (-float("inf"))
cur_match = np.zeros( len(gt_instances) , dtype=np.bool )
# collect matches
for (gti,gt) in enumerate(gt_instances):
found_match = False
num_pred = len(gt['matched_pred'])
for pred in gt['matched_pred']:
# greedy assignments
if pred_visited[pred['filename']]:
continue
overlap = float(pred['intersection']) / (gt['vert_count']+pred['vert_count']-pred['intersection'])
if overlap > overlap_th:
confidence = pred['confidence']
# if already have a prediction for this gt,
# the prediction with the lower score is automatically a false positive
if cur_match[gti]:
max_score = max( cur_score[gti] , confidence )
min_score = min( cur_score[gti] , confidence )
cur_score[gti] = max_score
# append false positive
cur_true = np.append(cur_true,0)
cur_score = np.append(cur_score,min_score)
cur_match = np.append(cur_match,True)
# otherwise set score
else:
found_match = True
cur_match[gti] = True
cur_score[gti] = confidence
pred_visited[pred['filename']] = True
if not found_match:
hard_false_negatives += 1
# remove non-matched ground truth instances
cur_true = cur_true [ cur_match==True ]
cur_score = cur_score[ cur_match==True ]
# collect non-matched predictions as false positive
for pred in pred_instances:
found_gt = False
for gt in pred['matched_gt']:
overlap = float(gt['intersection']) / (gt['vert_count']+pred['vert_count']-gt['intersection'])
if overlap > overlap_th:
found_gt = True
break
if not found_gt:
num_ignore = pred['void_intersection']
for gt in pred['matched_gt']:
# group?
if gt['instance_id'] < 1000:
num_ignore += gt['intersection']
# small ground truth instances
if gt['vert_count'] < min_region_size or gt['med_dist']>distance_thresh or gt['dist_conf']<distance_conf:
num_ignore += gt['intersection']
proportion_ignore = float(num_ignore)/pred['vert_count']
# if not ignored append false positive
if proportion_ignore <= overlap_th:
cur_true = np.append(cur_true,0)
confidence = pred["confidence"]
cur_score = np.append(cur_score,confidence)
# append to overall results
y_true = np.append(y_true,cur_true)
y_score = np.append(y_score,cur_score)
# compute average precision
if has_gt and has_pred:
# compute precision recall curve first
# sorting and cumsum
score_arg_sort = np.argsort(y_score)
y_score_sorted = y_score[score_arg_sort]
y_true_sorted = y_true[score_arg_sort]
y_true_sorted_cumsum = np.cumsum(y_true_sorted)
# unique thresholds
(thresholds,unique_indices) = np.unique( y_score_sorted , return_index=True )
num_prec_recall = len(unique_indices) + 1
# prepare precision recall
num_examples = len(y_score_sorted)
try:
num_true_examples = y_true_sorted_cumsum[-1]
except:
num_true_examples = 0
precision = np.zeros(num_prec_recall)
recall = np.zeros(num_prec_recall)
# deal with the first point
y_true_sorted_cumsum = np.append( y_true_sorted_cumsum , 0 )
# deal with remaining
for idx_res,idx_scores in enumerate(unique_indices):
cumsum = y_true_sorted_cumsum[idx_scores-1]
tp = num_true_examples - cumsum
fp = num_examples - idx_scores - tp
fn = cumsum + hard_false_negatives
p = float(tp)/(tp+fp)
r = float(tp)/(tp+fn)
precision[idx_res] = p
recall [idx_res] = r
# first point in curve is artificial
precision[-1] = 1.
recall [-1] = 0.
# compute average of precision-recall curve
recall_for_conv = np.copy(recall)
recall_for_conv = np.append(recall_for_conv[0], recall_for_conv)
recall_for_conv = np.append(recall_for_conv, 0.)
stepWidths = np.convolve(recall_for_conv,[-0.5,0,0.5],'valid')
# integrate is now simply a dot product
ap_current = np.dot(precision, stepWidths)
elif has_gt:
ap_current = 0.0
else:
ap_current = float('nan')
ap[di,li,oi] = ap_current
return ap
def compute_averages(self, aps):
d_inf = 0
o50 = np.where(np.isclose(self.overlaps,0.5))
o25 = np.where(np.isclose(self.overlaps,0.25))
oAllBut25 = np.where(np.logical_not(np.isclose(self.overlaps,0.25)))
avg_dict = {}
#avg_dict['all_ap'] = np.nanmean(aps[ d_inf,:,: ])
avg_dict['all_ap'] = np.nanmean(aps[ d_inf,:,oAllBut25])
avg_dict['all_ap_50%'] = np.nanmean(aps[ d_inf,:,o50])
avg_dict['all_ap_25%'] = np.nanmean(aps[ d_inf,:,o25])
avg_dict["classes"] = {}
for (li,label_name) in enumerate(self.CLASS_LABELS):
avg_dict["classes"][label_name] = {}
#avg_dict["classes"][label_name]["ap"] = np.average(aps[ d_inf,li, :])
avg_dict["classes"][label_name]["ap"] = np.average(aps[ d_inf,li,oAllBut25])
avg_dict["classes"][label_name]["ap50%"] = np.average(aps[ d_inf,li,o50])
avg_dict["classes"][label_name]["ap25%"] = np.average(aps[ d_inf,li,o25])
return avg_dict
def assign_instances_for_scan(self, scene_id):
# get gt instances
gt_ids = self.gt_instances[scene_id]
gt_instances = util_3d.get_instances(gt_ids, self.VALID_CLASS_IDS, self.CLASS_LABELS, self.ID_TO_LABEL)
# associate
gt2pred = deepcopy(gt_instances)
for label in gt2pred:
for gt in gt2pred[label]:
gt['matched_pred'] = []
pred2gt = {}
for label in self.CLASS_LABELS:
pred2gt[label] = []
num_pred_instances = 0
# mask of void labels in the groundtruth
bool_void = np.logical_not(np.in1d(gt_ids//1000, self.VALID_CLASS_IDS))
# go thru all prediction masks
for instance_id in self.pred_instances[scene_id]:
label_id = int(self.pred_instances[scene_id][instance_id]['label_id'])
conf = self.pred_instances[scene_id][instance_id]['conf']
if not label_id in self.ID_TO_LABEL:
continue
label_name = self.ID_TO_LABEL[label_id]
# read the mask
pred_mask = self.pred_instances[scene_id][instance_id]['pred_mask']
# convert to binary
num = np.count_nonzero(pred_mask)
if num < self.min_region_sizes[0]:
continue # skip if empty
pred_instance = {}
pred_instance['filename'] = str(scene_id) + '/' + str(instance_id)
pred_instance['pred_id'] = num_pred_instances
pred_instance['label_id'] = label_id
pred_instance['vert_count'] = num
pred_instance['confidence'] = conf
pred_instance['void_intersection'] = np.count_nonzero(np.logical_and(bool_void, pred_mask))
# matched gt instances
matched_gt = []
# go thru all gt instances with matching label
for (gt_num, gt_inst) in enumerate(gt2pred[label_name]):
intersection = np.count_nonzero(np.logical_and(gt_ids == gt_inst['instance_id'], pred_mask))
if intersection > 0:
gt_copy = gt_inst.copy()
pred_copy = pred_instance.copy()
gt_copy['intersection'] = intersection
pred_copy['intersection'] = intersection
matched_gt.append(gt_copy)
gt2pred[label_name][gt_num]['matched_pred'].append(pred_copy)
pred_instance['matched_gt'] = matched_gt
num_pred_instances += 1
pred2gt[label_name].append(pred_instance)
return gt2pred, pred2gt
def print_results(self, avgs):
sep = ""
col1 = ":"
lineLen = 64
logging.info("")
logging.info("#"*lineLen)
line = ""
line += "{:<15}".format("what" ) + sep + col1
line += "{:>15}".format("AP" ) + sep
line += "{:>15}".format("AP_50%" ) + sep
line += "{:>15}".format("AP_25%" ) + sep
logging.info(line)
logging.info("#"*lineLen)
for (li,label_name) in enumerate(self.CLASS_LABELS):
ap_avg = avgs["classes"][label_name]["ap"]
ap_50o = avgs["classes"][label_name]["ap50%"]
ap_25o = avgs["classes"][label_name]["ap25%"]
line = "{:<15}".format(label_name) + sep + col1
line += sep + "{:>15.3f}".format(ap_avg ) + sep
line += sep + "{:>15.3f}".format(ap_50o ) + sep
line += sep + "{:>15.3f}".format(ap_25o ) + sep
logging.info(line)
all_ap_avg = avgs["all_ap"]
all_ap_50o = avgs["all_ap_50%"]
all_ap_25o = avgs["all_ap_25%"]
logging.info("-"*lineLen)
line = "{:<15}".format("average") + sep + col1
line += "{:>15.3f}".format(all_ap_avg) + sep
line += "{:>15.3f}".format(all_ap_50o) + sep
line += "{:>15.3f}".format(all_ap_25o) + sep
logging.info(line)
logging.info("")
@staticmethod
def write_to_benchmark(output_path='benchmark_instance', scene_id=None, pred_inst={}):
os.makedirs(output_path, exist_ok=True)
os.makedirs(os.path.join(output_path, 'predicted_masks'), exist_ok=True)
f = open(os.path.join(output_path, scene_id + '.txt'), 'w')
for instance_id in pred_inst:
# for pred instance id starts from 0; in gt valid instance id starts from 1
score = pred_inst[instance_id]['conf']
label = pred_inst[instance_id]['label_id']
mask = pred_inst[instance_id]['pred_mask']
f.write('predicted_masks/{}_{:03d}.txt {} {:.4f}'.format(scene_id, instance_id, label, score))
if instance_id < len(pred_inst) - 1:
f.write('\n')
util_3d.export_ids(os.path.join(output_path, 'predicted_masks', scene_id + '_%03d.txt' % (instance_id)), mask)
f.close()
def add_prediction(self, instance_info, id):
self.pred_instances[id] = instance_info
def add_gt(self, instance_info, id):
self.gt_instances[id] = instance_info
def add_gt_from_benchmark(self, scene_id):
try:
gt_file_path = '/rhome/jhou/.gt/gt_insts/'
gt_file = os.path.join(gt_file_path, scene_id + '.txt')
gt_ids = util_3d.load_ids(gt_file)
except:
gt_file_path = '/rhome/jhou/data/dataset/scannet/scannet_benchmark/gt_instance/'
gt_file = os.path.join(gt_file_path, scene_id + '.txt')
gt_ids = util_3d.load_ids(gt_file)
self.add_gt(gt_ids, scene_id)
def evaluate(self):
print('evaluating', len(self.pred_instances), 'scans...')
matches = {}
for i, scene_id in enumerate(self.pred_instances):
gt2pred, pred2gt = self.assign_instances_for_scan(scene_id)
matches[scene_id] = {}
matches[scene_id]['gt'] = gt2pred
matches[scene_id]['pred'] = pred2gt
sys.stdout.write("\rscans processed: {}".format(i+1))
sys.stdout.flush()
print('')
ap_scores = self.evaluate_matches(matches)
avgs = self.compute_averages(ap_scores)
# print
self.print_results(avgs)
return avgs['all_ap'], avgs['all_ap_50%'], avgs['all_ap_25%']
def write_result_file(avgs, filename):
_SPLITTER = ','
with open(filename, 'w') as f:
f.write(_SPLITTER.join(['class', 'class id', 'ap', 'ap50', 'ap25']) + '\n')
for i in range(len(VALID_CLASS_IDS)):
class_name = CLASS_LABELS[i]
class_id = VALID_CLASS_IDS[i]
ap = avgs["classes"][class_name]["ap"]
ap50 = avgs["classes"][class_name]["ap50%"]
ap25 = avgs["classes"][class_name]["ap25%"]
f.write(_SPLITTER.join([str(x) for x in [class_name, class_id, ap, ap50, ap25]]) + '\n')
def config():
parser = argparse.ArgumentParser()
parser.add_argument('--pred_path', required=True, help='path to directory of predicted .txt files')
parser.add_argument('--gt_path', required=True, help='path to directory of gt .txt files')
parser.add_argument('--output_file', default='semantic_instance_evaluation.txt', help='output file [default: semantic_instance_evaluation.txt]')
opt = parser.parse_args()
return opt
if __name__ == '__main__':
opt = config()
setup_logging()
#-----------------scannet----------------------
CLASS_LABELS = ['cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window',
'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator',
'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture']
VALID_CLASS_IDS = np.array([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])
evaluator = Evaluator(CLASS_LABELS=CLASS_LABELS, VALID_CLASS_IDS=VALID_CLASS_IDS)
print('reading', len(os.listdir(opt.pred_path))-1, 'scans...')
for i, pred_file in enumerate(os.listdir(opt.pred_path)):
if os.path.isdir(os.path.join(opt.pred_path, pred_file)):
continue
scene_id = pred_file[:12]
sys.stdout.write("\rscans read: {}".format(i+1))
sys.stdout.flush()
gt_file = os.path.join(opt.gt_path, pred_file)
gt_ids = util_3d.load_ids(gt_file)
evaluator.add_gt(gt_ids, scene_id)
instances = util_3d.read_instance_prediction_file(os.path.join(opt.pred_path,pred_file), opt.pred_path)
for pred_mask_file in instances:
# read the mask
pred_mask = util_3d.load_ids(pred_mask_file)
instances[pred_mask_file]['pred_mask'] = pred_mask
evaluator.add_prediction(instances, scene_id)
print('')
_, _, _ = evaluator.evaluate()
| ContrastiveSceneContexts-main | downstream/semseg/datasets/evaluation/evaluate_semantic_instance.py |
import os, sys
import csv
try:
import numpy as np
except:
print("Failed to import numpy package.")
sys.exit(-1)
try:
import imageio
except:
print("Please install the module 'imageio' for image processing, e.g.")
print("pip install imageio")
sys.exit(-1)
# print an error message and quit
def print_error(message, user_fault=False):
sys.stderr.write('ERROR: ' + str(message) + '\n')
if user_fault:
sys.exit(2)
sys.exit(-1)
# if string s represents an int
def represents_int(s):
try:
int(s)
return True
except ValueError:
return False
def read_label_mapping(filename, label_from='raw_category', label_to='nyu40id'):
assert os.path.isfile(filename)
mapping = dict()
with open(filename) as csvfile:
reader = csv.DictReader(csvfile, delimiter='\t')
for row in reader:
mapping[row[label_from]] = int(row[label_to])
# if ints convert
if represents_int([key for key in mapping.keys()][0]):
mapping = {int(k):v for k,v in mapping.items()}
return mapping
# input: scene_types.txt or scene_types_all.txt
def read_scene_types_mapping(filename, remove_spaces=True):
assert os.path.isfile(filename)
mapping = dict()
lines = open(filename).read().splitlines()
lines = [line.split('\t') for line in lines]
if remove_spaces:
mapping = { x[1].strip():int(x[0]) for x in lines }
else:
mapping = { x[1]:int(x[0]) for x in lines }
return mapping
# color by label
def visualize_label_image(filename, image):
height = image.shape[0]
width = image.shape[1]
vis_image = np.zeros([height, width, 3], dtype=np.uint8)
color_palette = create_color_palette()
for idx, color in enumerate(color_palette):
vis_image[image==idx] = color
imageio.imwrite(filename, vis_image)
# color by different instances (mod length of color palette)
def visualize_instance_image(filename, image):
height = image.shape[0]
width = image.shape[1]
vis_image = np.zeros([height, width, 3], dtype=np.uint8)
color_palette = create_color_palette()
instances = np.unique(image)
for idx, inst in enumerate(instances):
vis_image[image==inst] = color_palette[inst%len(color_palette)]
imageio.imwrite(filename, vis_image)
| ContrastiveSceneContexts-main | downstream/semseg/datasets/evaluation/scannet_benchmark_utils/util.py |
import os, sys
import json
try:
import numpy as np
except:
print("Failed to import numpy package.")
sys.exit(-1)
try:
from plyfile import PlyData, PlyElement
except:
print("Please install the module 'plyfile' for PLY i/o, e.g.")
print("pip install plyfile")
sys.exit(-1)
from . import util
# matrix: 4x4 np array
# points Nx3 np array
def transform_points(matrix, points):
assert len(points.shape) == 2 and points.shape[1] == 3
num_points = points.shape[0]
p = np.concatenate([points, np.ones((num_points, 1))], axis=1)
p = np.matmul(matrix, np.transpose(p))
p = np.transpose(p)
p[:,:3] /= p[:,3,None]
return p[:,:3]
def export_ids(filename, ids):
with open(filename, 'w') as f:
for id in ids:
f.write('%d\n' % id)
def load_ids(filename):
ids = open(filename).read().splitlines()
ids = np.array(ids, dtype=np.int64)
return ids
def read_mesh_vertices(filename):
assert os.path.isfile(filename)
with open(filename, 'rb') as f:
plydata = PlyData.read(f)
num_verts = plydata['vertex'].count
vertices = np.zeros(shape=[num_verts, 3], dtype=np.float32)
vertices[:,0] = plydata['vertex'].data['x']
vertices[:,1] = plydata['vertex'].data['y']
vertices[:,2] = plydata['vertex'].data['z']
return vertices
# export 3d instance labels for instance evaluation
def export_instance_ids_for_eval(filename, label_ids, instance_ids):
assert label_ids.shape[0] == instance_ids.shape[0]
output_mask_path_relative = 'predicted_masks'
name = os.path.splitext(os.path.basename(filename))[0]
output_mask_path = os.path.join(os.path.dirname(filename), output_mask_path_relative)
if not os.path.isdir(output_mask_path):
os.mkdir(output_mask_path)
insts = np.unique(instance_ids)
zero_mask = np.zeros(shape=(instance_ids.shape[0]), dtype=np.int32)
with open(filename, 'w') as f:
for idx, inst_id in enumerate(insts):
if inst_id == 0: # 0 -> no instance for this vertex
continue
loc = np.where(instance_ids == inst_id)
label_id = label_ids[loc[0][0]]
# write mask indexing
output_mask_file_relavtive = os.path.join(output_mask_path_relative, name + '_' + str(idx) + '.txt')
f.write('%s %d %f\n' % (output_mask_file_relavtive, label_id, 1.0))
# write mask
mask = np.copy(zero_mask)
mask[loc[0]] = 1
output_mask_file = os.path.join(output_mask_path, name + '_' + str(idx) + '.txt')
export_ids(output_mask_file, mask)
# ------------ Instance Utils ------------ #
class Instance(object):
instance_id = 0
label_id = 0
vert_count = 0
med_dist = -1
dist_conf = 0.0
def __init__(self, mesh_vert_instances, instance_id):
if (instance_id == -1):
return
self.instance_id = int(instance_id)
self.label_id = int(self.get_label_id(instance_id))
self.vert_count = int(self.get_instance_verts(mesh_vert_instances, instance_id))
def get_label_id(self, instance_id):
return int(instance_id // 1000)
def get_instance_verts(self, mesh_vert_instances, instance_id):
return (mesh_vert_instances == instance_id).sum()
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
def to_dict(self):
dict = {}
dict["instance_id"] = self.instance_id
dict["label_id"] = self.label_id
dict["vert_count"] = self.vert_count
dict["med_dist"] = self.med_dist
dict["dist_conf"] = self.dist_conf
return dict
def from_json(self, data):
self.instance_id = int(data["instance_id"])
self.label_id = int(data["label_id"])
self.vert_count = int(data["vert_count"])
if ("med_dist" in data):
self.med_dist = float(data["med_dist"])
self.dist_conf = float(data["dist_conf"])
def __str__(self):
return "("+str(self.instance_id)+")"
def read_instance_prediction_file(filename, pred_path):
lines = open(filename).read().splitlines()
instance_info = {}
abs_pred_path = os.path.abspath(pred_path)
for line in lines:
parts = line.split(' ')
if len(parts) != 3:
util.print_error('invalid instance prediction file. Expected (per line): [rel path prediction] [label id prediction] [confidence prediction]')
if os.path.isabs(parts[0]):
util.print_error('invalid instance prediction file. First entry in line must be a relative path')
mask_file = os.path.join(os.path.dirname(filename), parts[0])
mask_file = os.path.abspath(mask_file)
# check that mask_file lives inside prediction path
if os.path.commonprefix([mask_file, abs_pred_path]) != abs_pred_path:
util.print_error('predicted mask {} in prediction text file {} points outside of prediction path.'.format(mask_file,filename))
info = {}
info["label_id"] = int(float(parts[1]))
info["conf"] = float(parts[2])
instance_info[mask_file] = info
return instance_info
def get_instances(ids, class_ids, class_labels, id2label):
instances = {}
for label in class_labels:
instances[label] = []
instance_ids = np.unique(ids)
for id in instance_ids:
if id == 0:
continue
inst = Instance(ids, id)
if inst.label_id in class_ids:
instances[id2label[inst.label_id]].append(inst.to_dict())
return instances
| ContrastiveSceneContexts-main | downstream/semseg/datasets/evaluation/scannet_benchmark_utils/util_3d.py |
# Evaluates semantic label task
# Input:
# - path to .txt prediction files
# - path to .txt ground truth files
# - output file to write results to
# Note that only the valid classes are used for evaluation,
# i.e., any ground truth label not in the valid label set
# is ignored in the evaluation.
#
# example usage: evaluate_semantic_label.py --scan_path [path to scan data] --output_file [output file]
# python imports
import math
import logging
import os, sys, argparse
import inspect
try:
import numpy as np
except:
print("Failed to import numpy package.")
sys.exit(-1)
try:
from itertools import izip
except ImportError:
izip = zip
#currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
#parentdir = os.path.dirname(currentdir)
#sys.path.insert(0,parentdir)
import util_3d
import util
class Evaluator:
def __init__(self, CLASS_LABELS, VALID_CLASS_IDS):
self.CLASS_LABELS = CLASS_LABELS
self.VALID_CLASS_IDS = VALID_CLASS_IDS
self.UNKNOWN_ID = np.max(VALID_CLASS_IDS) + 1
self.gt = {}
self.pred = {}
max_id = self.UNKNOWN_ID
self.confusion = np.zeros((max_id+1, max_id+1), dtype=np.ulonglong)
def update_confusion(self, pred_ids, gt_ids, sceneId=None):
# sanity checks
if not pred_ids.shape == gt_ids.shape:
util.print_error('%s: number of predicted values does not match number of vertices' % pred_file, user_fault=True)
n = self.confusion.shape[0]
k = (gt_ids >= 0) & (gt_ids < n)
temporal = np.bincount(n * gt_ids[k].astype(int) + pred_ids[k], minlength=n**2).reshape(n, n)
for valid_class_row in self.VALID_CLASS_IDS:
for valid_class_col in self.VALID_CLASS_IDS:
self.confusion[valid_class_row][valid_class_col] += temporal[valid_class_row][valid_class_col]
@staticmethod
def write_to_benchmark(base='benchmark_segmentation', sceneId=None, pred_ids=None):
os.makedirs(base, exist_ok=True)
util_3d.export_ids('{}.txt'.format(os.path.join(base, sceneId)), pred_ids)
def get_iou(self, label_id, confusion):
if not label_id in self.VALID_CLASS_IDS:
return float('nan')
# #true positives
tp = np.longlong(confusion[label_id, label_id])
# #false negatives
fn = np.longlong(confusion[label_id, :].sum()) - tp
# #false positives
not_ignored = [l for l in self.VALID_CLASS_IDS if not l == label_id]
fp = np.longlong(confusion[not_ignored, label_id].sum())
denom = (tp + fp + fn)
if denom == 0:
return float('nan')
return (float(tp) / denom, tp, denom)
def write_result_file(self, confusion, ious, filename):
with open(filename, 'w') as f:
f.write('iou scores\n')
for i in range(len(self.VALID_CLASS_IDS)):
label_id = self.VALID_CLASS_IDS[i]
label_name = self.CLASS_LABELS[i]
iou = ious[label_name][0]
f.write('{0:<14s}({1:<2d}): {2:>5.3f}\n'.format(label_name, label_id, iou))
f.write("{0:<14s}: {1:>5.3f}".format('mean', np.array([ious[k][0] for k in ious]).mean()))
f.write('\nconfusion matrix\n')
f.write('\t\t\t')
for i in range(len(self.VALID_CLASS_IDS)):
#f.write('\t{0:<14s}({1:<2d})'.format(CLASS_LABELS[i], VALID_CLASS_IDS[i]))
f.write('{0:<8d}'.format(self.VALID_CLASS_IDS[i]))
f.write('\n')
for r in range(len(self.VALID_CLASS_IDS)):
f.write('{0:<14s}({1:<2d})'.format(self.CLASS_LABELS[r], self.VALID_CLASS_IDS[r]))
for c in range(len(self.VALID_CLASS_IDS)):
f.write('\t{0:>5.3f}'.format(confusion[self.VALID_CLASS_IDS[r],self.VALID_CLASS_IDS[c]]))
f.write('\n')
print('wrote results to', filename)
def evaluate_confusion(self, output_file=None):
class_ious = {}
counter = 0
summation = 0
for i in range(len(self.VALID_CLASS_IDS)):
label_name = self.CLASS_LABELS[i]
label_id = self.VALID_CLASS_IDS[i]
class_ious[label_name] = self.get_iou(label_id, self.confusion)
# print
logging.info('classes IoU')
logging.info('----------------------------')
for i in range(len(self.VALID_CLASS_IDS)):
label_name = self.CLASS_LABELS[i]
try:
logging.info('{0:<14s}: {1:>5.3f} ({2:>6d}/{3:<6d})'.format(label_name, class_ious[label_name][0], class_ious[label_name][1], class_ious[label_name][2]))
summation += class_ious[label_name][0]
counter += 1
except:
logging.info('{0:<14s}: nan ( nan/nan )'.format(label_name))
logging.info("{0:<14s}: {1:>5.3f}".format('mean', summation / counter))
if output_file:
self.write_result_file(self.confusion, class_ious, output_file)
return summation / counter
def config():
parser = argparse.ArgumentParser()
parser.add_argument('--pred_path', required=True, help='path to directory of predicted .txt files')
parser.add_argument('--gt_path', required=True, help='path to gt files')
parser.add_argument('--output_file', type=str, default='./semantic_label_evaluation.txt')
opt = parser.parse_args()
return opt
def main():
opt = config()
ch = logging.StreamHandler(sys.stdout)
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(
format=os.uname()[1].split('.')[0] + ' %(asctime)s %(message)s',
datefmt='%m/%d %H:%M:%S',
handlers=[ch])
#------------------------- ScanNet --------------------------
CLASS_LABELS = ['wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table',
'door', 'window', 'bookshelf', 'picture', 'counter', 'desk',
'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture']
VALID_CLASS_IDS = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])
evaluator = Evaluator(CLASS_LABELS=CLASS_LABELS, VALID_CLASS_IDS=VALID_CLASS_IDS)
print('reading', len(os.listdir(opt.pred_path))-1, 'scans...')
for i, pred_file in enumerate(os.listdir(opt.pred_path)):
if pred_file == 'semantic_label_evaluation.txt':
continue
gt_file = os.path.join(opt.gt_path, pred_file)
if not os.path.isfile(gt_file):
util.print_error('Result file {} does not match any gt file'.format(pred_file), user_fault=True)
gt_ids = util_3d.load_ids(gt_file)
pred_file = os.path.join(opt.pred_path, pred_file)
pred_ids = util_3d.load_ids(pred_file)
evaluator.update_confusion(pred_ids, gt_ids, pred_file.split('.')[0])
sys.stdout.write("\rscans processed: {}".format(i+1))
sys.stdout.flush()
# evaluate
evaluator.evaluate_confusion(opt.output_file)
if __name__ == '__main__':
main()
| ContrastiveSceneContexts-main | downstream/semseg/datasets/evaluation/scannet_benchmark_utils/scripts/evaluate_semantic_label.py |
import os, sys
import csv
try:
import numpy as np
except:
print("Failed to import numpy package.")
sys.exit(-1)
try:
import imageio
except:
print("Please install the module 'imageio' for image processing, e.g.")
print("pip install imageio")
sys.exit(-1)
# print an error message and quit
def print_error(message, user_fault=False):
sys.stderr.write('ERROR: ' + str(message) + '\n')
if user_fault:
sys.exit(2)
sys.exit(-1)
# if string s represents an int
def represents_int(s):
try:
int(s)
return True
except ValueError:
return False
def read_label_mapping(filename, label_from='raw_category', label_to='nyu40id'):
assert os.path.isfile(filename)
mapping = dict()
with open(filename) as csvfile:
reader = csv.DictReader(csvfile, delimiter='\t')
for row in reader:
mapping[row[label_from]] = int(row[label_to])
# if ints convert
if represents_int([key for key in mapping.keys()][0]):
mapping = {int(k):v for k,v in mapping.items()}
return mapping
# input: scene_types.txt or scene_types_all.txt
def read_scene_types_mapping(filename, remove_spaces=True):
assert os.path.isfile(filename)
mapping = dict()
lines = open(filename).read().splitlines()
lines = [line.split('\t') for line in lines]
if remove_spaces:
mapping = { x[1].strip():int(x[0]) for x in lines }
else:
mapping = { x[1]:int(x[0]) for x in lines }
return mapping
# color by label
def visualize_label_image(filename, image):
height = image.shape[0]
width = image.shape[1]
vis_image = np.zeros([height, width, 3], dtype=np.uint8)
color_palette = create_color_palette()
for idx, color in enumerate(color_palette):
vis_image[image==idx] = color
imageio.imwrite(filename, vis_image)
# color by different instances (mod length of color palette)
def visualize_instance_image(filename, image):
height = image.shape[0]
width = image.shape[1]
vis_image = np.zeros([height, width, 3], dtype=np.uint8)
color_palette = create_color_palette()
instances = np.unique(image)
for idx, inst in enumerate(instances):
vis_image[image==inst] = color_palette[inst%len(color_palette)]
imageio.imwrite(filename, vis_image)
| ContrastiveSceneContexts-main | downstream/semseg/datasets/evaluation/scannet_benchmark_utils/scripts/util.py |
import os, sys
import json
try:
import numpy as np
except:
print("Failed to import numpy package.")
sys.exit(-1)
try:
from plyfile import PlyData, PlyElement
except:
print("Please install the module 'plyfile' for PLY i/o, e.g.")
print("pip install plyfile")
sys.exit(-1)
import util
# matrix: 4x4 np array
# points Nx3 np array
def transform_points(matrix, points):
assert len(points.shape) == 2 and points.shape[1] == 3
num_points = points.shape[0]
p = np.concatenate([points, np.ones((num_points, 1))], axis=1)
p = np.matmul(matrix, np.transpose(p))
p = np.transpose(p)
p[:,:3] /= p[:,3,None]
return p[:,:3]
def export_ids(filename, ids):
with open(filename, 'w') as f:
for id in ids:
f.write('%d\n' % id)
def load_ids(filename):
ids = open(filename).read().splitlines()
ids = np.array(ids, dtype=np.int64)
return ids
def read_mesh_vertices(filename):
assert os.path.isfile(filename)
with open(filename, 'rb') as f:
plydata = PlyData.read(f)
num_verts = plydata['vertex'].count
vertices = np.zeros(shape=[num_verts, 3], dtype=np.float32)
vertices[:,0] = plydata['vertex'].data['x']
vertices[:,1] = plydata['vertex'].data['y']
vertices[:,2] = plydata['vertex'].data['z']
return vertices
# export 3d instance labels for instance evaluation
def export_instance_ids_for_eval(filename, label_ids, instance_ids):
assert label_ids.shape[0] == instance_ids.shape[0]
output_mask_path_relative = 'predicted_masks'
name = os.path.splitext(os.path.basename(filename))[0]
output_mask_path = os.path.join(os.path.dirname(filename), output_mask_path_relative)
if not os.path.isdir(output_mask_path):
os.mkdir(output_mask_path)
insts = np.unique(instance_ids)
zero_mask = np.zeros(shape=(instance_ids.shape[0]), dtype=np.int32)
with open(filename, 'w') as f:
for idx, inst_id in enumerate(insts):
if inst_id == 0: # 0 -> no instance for this vertex
continue
loc = np.where(instance_ids == inst_id)
label_id = label_ids[loc[0][0]]
# write mask indexing
output_mask_file_relavtive = os.path.join(output_mask_path_relative, name + '_' + str(idx) + '.txt')
f.write('%s %d %f\n' % (output_mask_file_relavtive, label_id, 1.0))
# write mask
mask = np.copy(zero_mask)
mask[loc[0]] = 1
output_mask_file = os.path.join(output_mask_path, name + '_' + str(idx) + '.txt')
export_ids(output_mask_file, mask)
# ------------ Instance Utils ------------ #
class Instance(object):
instance_id = 0
label_id = 0
vert_count = 0
med_dist = -1
dist_conf = 0.0
def __init__(self, mesh_vert_instances, instance_id):
if (instance_id == -1):
return
self.instance_id = int(instance_id)
self.label_id = int(self.get_label_id(instance_id))
self.vert_count = int(self.get_instance_verts(mesh_vert_instances, instance_id))
def get_label_id(self, instance_id):
return int(instance_id // 1000)
def get_instance_verts(self, mesh_vert_instances, instance_id):
return (mesh_vert_instances == instance_id).sum()
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
def to_dict(self):
dict = {}
dict["instance_id"] = self.instance_id
dict["label_id"] = self.label_id
dict["vert_count"] = self.vert_count
dict["med_dist"] = self.med_dist
dict["dist_conf"] = self.dist_conf
return dict
def from_json(self, data):
self.instance_id = int(data["instance_id"])
self.label_id = int(data["label_id"])
self.vert_count = int(data["vert_count"])
if ("med_dist" in data):
self.med_dist = float(data["med_dist"])
self.dist_conf = float(data["dist_conf"])
def __str__(self):
return "("+str(self.instance_id)+")"
def read_instance_prediction_file(filename, pred_path):
lines = open(filename).read().splitlines()
instance_info = {}
abs_pred_path = os.path.abspath(pred_path)
for line in lines:
parts = line.split(' ')
if len(parts) != 3:
util.print_error('invalid instance prediction file. Expected (per line): [rel path prediction] [label id prediction] [confidence prediction]')
if os.path.isabs(parts[0]):
util.print_error('invalid instance prediction file. First entry in line must be a relative path')
mask_file = os.path.join(os.path.dirname(filename), parts[0])
mask_file = os.path.abspath(mask_file)
# check that mask_file lives inside prediction path
if os.path.commonprefix([mask_file, abs_pred_path]) != abs_pred_path:
util.print_error('predicted mask {} in prediction text file {} points outside of prediction path.'.format(mask_file,filename))
info = {}
info["label_id"] = int(float(parts[1]))
info["conf"] = float(parts[2])
instance_info[mask_file] = info
return instance_info
def get_instances(ids, class_ids, class_labels, id2label):
instances = {}
for label in class_labels:
instances[label] = []
instance_ids = np.unique(ids)
for id in instance_ids:
if id == 0:
continue
inst = Instance(ids, id)
if inst.label_id in class_ids:
instances[id2label[inst.label_id]].append(inst.to_dict())
return instances
| ContrastiveSceneContexts-main | downstream/semseg/datasets/evaluation/scannet_benchmark_utils/scripts/util_3d.py |
# Evaluates semantic instance task
# Adapted from the CityScapes evaluation: https://github.com/mcordts/cityscapesScripts/tree/master/cityscapesscripts/evaluation
# Input:
# - path to .txt prediction files
# - path to .txt ground truth files
# - output file to write results to
# Each .txt prediction file look like:
# [(pred0) rel. path to pred. mask over verts as .txt] [(pred0) label id] [(pred0) confidence]
# [(pred1) rel. path to pred. mask over verts as .txt] [(pred1) label id] [(pred1) confidence]
# [(pred2) rel. path to pred. mask over verts as .txt] [(pred2) label id] [(pred2) confidence]
# ...
#
# NOTE: The prediction files must live in the root of the given prediction path.
# Predicted mask .txt files must live in a subfolder.
# Additionally, filenames must not contain spaces.
# The relative paths to predicted masks must contain one integer per line,
# where each line corresponds to vertices in the *_vh_clean_2.ply (in that order).
# Non-zero integers indicate part of the predicted instance.
# The label ids specify the class of the corresponding mask.
# Confidence is a float confidence score of the mask.
#
# Note that only the valid classes are used for evaluation,
# i.e., any ground truth label not in the valid label set
# is ignored in the evaluation.
#
# example usage: evaluate_semantic_instance.py --scan_path [path to scan data] --output_file [output file]
# python imports
import logging
import math
import os, sys, argparse
import inspect
from copy import deepcopy
import argparse
import numpy as np
#currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
#parentdir = os.path.dirname(currentdir)
#sys.path.insert(0,parentdir)
import util_3d
import util
def setup_logging():
ch = logging.StreamHandler(sys.stdout)
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(
format=os.uname()[1].split('.')[0] + ' %(asctime)s %(message)s',
datefmt='%m/%d %H:%M:%S',
handlers=[ch])
class Evaluator:
# ---------- Evaluation params ---------- #
# overlaps for evaluation
overlaps = np.append(np.arange(0.5,0.95,0.05), 0.25)
# minimum region size for evaluation [verts]
min_region_sizes = np.array( [ 100 ] )
# distance thresholds [m]
distance_threshes = np.array( [ float('inf') ] )
# distance confidences
distance_confs = np.array( [ -float('inf') ] )
def __init__(self, CLASS_LABELS, VALID_CLASS_IDS, benchmark=False):
# ---------- Label info ---------- #
#CLASS_LABELS = ['cabinet', 'bed', 'chair', 'sofa', 'table', 'door',
# 'window', 'bookshelf', 'picture', 'counter',
# 'desk', 'curtain', 'refrigerator', 'shower curtain',
# 'toilet', 'sink', 'bathtub', 'otherfurniture']
#VALID_CLASS_IDS = np.array([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])
self.CLASS_LABELS = CLASS_LABELS
self.VALID_CLASS_IDS = VALID_CLASS_IDS
self.ID_TO_LABEL = {}
self.LABEL_TO_ID = {}
for i in range(len(VALID_CLASS_IDS)):
self.LABEL_TO_ID[CLASS_LABELS[i]] = VALID_CLASS_IDS[i]
self.ID_TO_LABEL[VALID_CLASS_IDS[i]] = CLASS_LABELS[i]
self.pred_instances = {}
self.gt_instances = {}
self.benchmark = benchmark
def evaluate_matches(self, matches):
# results: class x overlap
ap = np.zeros( (len(self.distance_threshes) , len(self.CLASS_LABELS) , len(self.overlaps)) , np.float )
for di, (min_region_size, distance_thresh, distance_conf) in enumerate(zip(self.min_region_sizes, self.distance_threshes, self.distance_confs)):
for oi, overlap_th in enumerate(self.overlaps):
pred_visited = {}
for m in matches:
for p in matches[m]['pred']:
for label_name in self.CLASS_LABELS:
for p in matches[m]['pred'][label_name]:
if 'filename' in p:
pred_visited[p['filename']] = False
for li, label_name in enumerate(self.CLASS_LABELS):
y_true = np.empty(0)
y_score = np.empty(0)
hard_false_negatives = 0
has_gt = False
has_pred = False
for m in matches:
pred_instances = matches[m]['pred'][label_name]
gt_instances = matches[m]['gt'][label_name]
# filter groups in ground truth
gt_instances = [ gt for gt in gt_instances if gt['instance_id']>=1000 and gt['vert_count']>=min_region_size and gt['med_dist']<=distance_thresh and gt['dist_conf']>=distance_conf ]
if gt_instances:
has_gt = True
if pred_instances:
has_pred = True
cur_true = np.ones ( len(gt_instances) )
cur_score = np.ones ( len(gt_instances) ) * (-float("inf"))
cur_match = np.zeros( len(gt_instances) , dtype=np.bool )
# collect matches
for (gti,gt) in enumerate(gt_instances):
found_match = False
num_pred = len(gt['matched_pred'])
for pred in gt['matched_pred']:
# greedy assignments
if pred_visited[pred['filename']]:
continue
overlap = float(pred['intersection']) / (gt['vert_count']+pred['vert_count']-pred['intersection'])
if overlap > overlap_th:
confidence = pred['confidence']
# if already have a prediction for this gt,
# the prediction with the lower score is automatically a false positive
if cur_match[gti]:
max_score = max( cur_score[gti] , confidence )
min_score = min( cur_score[gti] , confidence )
cur_score[gti] = max_score
# append false positive
cur_true = np.append(cur_true,0)
cur_score = np.append(cur_score,min_score)
cur_match = np.append(cur_match,True)
# otherwise set score
else:
found_match = True
cur_match[gti] = True
cur_score[gti] = confidence
pred_visited[pred['filename']] = True
if not found_match:
hard_false_negatives += 1
# remove non-matched ground truth instances
cur_true = cur_true [ cur_match==True ]
cur_score = cur_score[ cur_match==True ]
# collect non-matched predictions as false positive
for pred in pred_instances:
found_gt = False
for gt in pred['matched_gt']:
overlap = float(gt['intersection']) / (gt['vert_count']+pred['vert_count']-gt['intersection'])
if overlap > overlap_th:
found_gt = True
break
if not found_gt:
num_ignore = pred['void_intersection']
for gt in pred['matched_gt']:
# group?
if gt['instance_id'] < 1000:
num_ignore += gt['intersection']
# small ground truth instances
if gt['vert_count'] < min_region_size or gt['med_dist']>distance_thresh or gt['dist_conf']<distance_conf:
num_ignore += gt['intersection']
proportion_ignore = float(num_ignore)/pred['vert_count']
# if not ignored append false positive
if proportion_ignore <= overlap_th:
cur_true = np.append(cur_true,0)
confidence = pred["confidence"]
cur_score = np.append(cur_score,confidence)
# append to overall results
y_true = np.append(y_true,cur_true)
y_score = np.append(y_score,cur_score)
# compute average precision
if has_gt and has_pred:
# compute precision recall curve first
# sorting and cumsum
score_arg_sort = np.argsort(y_score)
y_score_sorted = y_score[score_arg_sort]
y_true_sorted = y_true[score_arg_sort]
y_true_sorted_cumsum = np.cumsum(y_true_sorted)
# unique thresholds
(thresholds,unique_indices) = np.unique( y_score_sorted , return_index=True )
num_prec_recall = len(unique_indices) + 1
# prepare precision recall
num_examples = len(y_score_sorted)
try:
num_true_examples = y_true_sorted_cumsum[-1]
except:
num_true_examples = 0
precision = np.zeros(num_prec_recall)
recall = np.zeros(num_prec_recall)
# deal with the first point
y_true_sorted_cumsum = np.append( y_true_sorted_cumsum , 0 )
# deal with remaining
for idx_res,idx_scores in enumerate(unique_indices):
cumsum = y_true_sorted_cumsum[idx_scores-1]
tp = num_true_examples - cumsum
fp = num_examples - idx_scores - tp
fn = cumsum + hard_false_negatives
p = float(tp)/(tp+fp)
r = float(tp)/(tp+fn)
precision[idx_res] = p
recall [idx_res] = r
# first point in curve is artificial
precision[-1] = 1.
recall [-1] = 0.
# compute average of precision-recall curve
recall_for_conv = np.copy(recall)
recall_for_conv = np.append(recall_for_conv[0], recall_for_conv)
recall_for_conv = np.append(recall_for_conv, 0.)
stepWidths = np.convolve(recall_for_conv,[-0.5,0,0.5],'valid')
# integrate is now simply a dot product
ap_current = np.dot(precision, stepWidths)
elif has_gt:
ap_current = 0.0
else:
ap_current = float('nan')
ap[di,li,oi] = ap_current
return ap
def compute_averages(self, aps):
d_inf = 0
o50 = np.where(np.isclose(self.overlaps,0.5))
o25 = np.where(np.isclose(self.overlaps,0.25))
oAllBut25 = np.where(np.logical_not(np.isclose(self.overlaps,0.25)))
avg_dict = {}
#avg_dict['all_ap'] = np.nanmean(aps[ d_inf,:,: ])
avg_dict['all_ap'] = np.nanmean(aps[ d_inf,:,oAllBut25])
avg_dict['all_ap_50%'] = np.nanmean(aps[ d_inf,:,o50])
avg_dict['all_ap_25%'] = np.nanmean(aps[ d_inf,:,o25])
avg_dict["classes"] = {}
for (li,label_name) in enumerate(self.CLASS_LABELS):
avg_dict["classes"][label_name] = {}
#avg_dict["classes"][label_name]["ap"] = np.average(aps[ d_inf,li, :])
avg_dict["classes"][label_name]["ap"] = np.average(aps[ d_inf,li,oAllBut25])
avg_dict["classes"][label_name]["ap50%"] = np.average(aps[ d_inf,li,o50])
avg_dict["classes"][label_name]["ap25%"] = np.average(aps[ d_inf,li,o25])
return avg_dict
def assign_instances_for_scan(self, scene_id):
# get gt instances
gt_ids = self.gt_instances[scene_id]
gt_instances = util_3d.get_instances(gt_ids, self.VALID_CLASS_IDS, self.CLASS_LABELS, self.ID_TO_LABEL)
# associate
gt2pred = deepcopy(gt_instances)
for label in gt2pred:
for gt in gt2pred[label]:
gt['matched_pred'] = []
pred2gt = {}
for label in self.CLASS_LABELS:
pred2gt[label] = []
num_pred_instances = 0
# mask of void labels in the groundtruth
bool_void = np.logical_not(np.in1d(gt_ids//1000, self.VALID_CLASS_IDS))
# go thru all prediction masks
for instance_id in self.pred_instances[scene_id]:
label_id = int(self.pred_instances[scene_id][instance_id]['label_id'])
conf = self.pred_instances[scene_id][instance_id]['conf']
if not label_id in self.ID_TO_LABEL:
continue
label_name = self.ID_TO_LABEL[label_id]
# read the mask
pred_mask = self.pred_instances[scene_id][instance_id]['pred_mask']
# convert to binary
num = np.count_nonzero(pred_mask)
if num < self.min_region_sizes[0]:
continue # skip if empty
pred_instance = {}
pred_instance['filename'] = str(scene_id) + '/' + str(instance_id)
pred_instance['pred_id'] = num_pred_instances
pred_instance['label_id'] = label_id
pred_instance['vert_count'] = num
pred_instance['confidence'] = conf
pred_instance['void_intersection'] = np.count_nonzero(np.logical_and(bool_void, pred_mask))
# matched gt instances
matched_gt = []
# go thru all gt instances with matching label
for (gt_num, gt_inst) in enumerate(gt2pred[label_name]):
intersection = np.count_nonzero(np.logical_and(gt_ids == gt_inst['instance_id'], pred_mask))
if intersection > 0:
gt_copy = gt_inst.copy()
pred_copy = pred_instance.copy()
gt_copy['intersection'] = intersection
pred_copy['intersection'] = intersection
matched_gt.append(gt_copy)
gt2pred[label_name][gt_num]['matched_pred'].append(pred_copy)
pred_instance['matched_gt'] = matched_gt
num_pred_instances += 1
pred2gt[label_name].append(pred_instance)
return gt2pred, pred2gt
def print_results(self, avgs):
sep = ""
col1 = ":"
lineLen = 64
logging.info("")
logging.info("#"*lineLen)
line = ""
line += "{:<15}".format("what" ) + sep + col1
line += "{:>15}".format("AP" ) + sep
line += "{:>15}".format("AP_50%" ) + sep
line += "{:>15}".format("AP_25%" ) + sep
logging.info(line)
logging.info("#"*lineLen)
for (li,label_name) in enumerate(self.CLASS_LABELS):
ap_avg = avgs["classes"][label_name]["ap"]
ap_50o = avgs["classes"][label_name]["ap50%"]
ap_25o = avgs["classes"][label_name]["ap25%"]
line = "{:<15}".format(label_name) + sep + col1
line += sep + "{:>15.3f}".format(ap_avg ) + sep
line += sep + "{:>15.3f}".format(ap_50o ) + sep
line += sep + "{:>15.3f}".format(ap_25o ) + sep
logging.info(line)
all_ap_avg = avgs["all_ap"]
all_ap_50o = avgs["all_ap_50%"]
all_ap_25o = avgs["all_ap_25%"]
logging.info("-"*lineLen)
line = "{:<15}".format("average") + sep + col1
line += "{:>15.3f}".format(all_ap_avg) + sep
line += "{:>15.3f}".format(all_ap_50o) + sep
line += "{:>15.3f}".format(all_ap_25o) + sep
logging.info(line)
logging.info("")
@staticmethod
def write_to_benchmark(output_path='benchmark_instance', scene_id=None, pred_inst={}):
os.makedirs(output_path, exist_ok=True)
os.makedirs(os.path.join(output_path, 'predicted_masks'), exist_ok=True)
f = open(os.path.join(output_path, scene_id + '.txt'), 'w')
for instance_id in pred_inst:
# for pred instance id starts from 0; in gt valid instance id starts from 1
score = pred_inst[instance_id]['conf']
label = pred_inst[instance_id]['label_id']
mask = pred_inst[instance_id]['pred_mask']
f.write('predicted_masks/{}_{:03d}.txt {} {:.4f}'.format(scene_id, instance_id, label, score))
if instance_id < len(pred_inst) - 1:
f.write('\n')
util_3d.export_ids(os.path.join(output_path, 'predicted_masks', scene_id + '_%03d.txt' % (instance_id)), mask)
f.close()
def add_prediction(self, instance_info, id):
self.pred_instances[id] = instance_info
def add_gt(self, instance_info, id):
self.gt_instances[id] = instance_info
def evaluate(self):
print('evaluating', len(self.pred_instances), 'scans...')
matches = {}
for i, scene_id in enumerate(self.pred_instances):
gt2pred, pred2gt = self.assign_instances_for_scan(scene_id)
matches[scene_id] = {}
matches[scene_id]['gt'] = gt2pred
matches[scene_id]['pred'] = pred2gt
sys.stdout.write("\rscans processed: {}".format(i+1))
sys.stdout.flush()
print('')
ap_scores = self.evaluate_matches(matches)
avgs = self.compute_averages(ap_scores)
# print
self.print_results(avgs)
return avgs['all_ap'], avgs['all_ap_50%'], avgs['all_ap_25%']
def write_result_file(avgs, filename):
_SPLITTER = ','
with open(filename, 'w') as f:
f.write(_SPLITTER.join(['class', 'class id', 'ap', 'ap50', 'ap25']) + '\n')
for i in range(len(VALID_CLASS_IDS)):
class_name = CLASS_LABELS[i]
class_id = VALID_CLASS_IDS[i]
ap = avgs["classes"][class_name]["ap"]
ap50 = avgs["classes"][class_name]["ap50%"]
ap25 = avgs["classes"][class_name]["ap25%"]
f.write(_SPLITTER.join([str(x) for x in [class_name, class_id, ap, ap50, ap25]]) + '\n')
def config():
parser = argparse.ArgumentParser()
parser.add_argument('--pred_path', required=True, help='path to directory of predicted .txt files')
parser.add_argument('--gt_path', required=True, help='path to directory of gt .txt files')
parser.add_argument('--output_file', default='semantic_instance_evaluation.txt', help='output file [default: semantic_instance_evaluation.txt]')
opt = parser.parse_args()
return opt
if __name__ == '__main__':
opt = config()
setup_logging()
#-----------------scannet----------------------
CLASS_LABELS = ['cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window',
'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator',
'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture']
VALID_CLASS_IDS = np.array([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])
evaluator = Evaluator(CLASS_LABELS=CLASS_LABELS, VALID_CLASS_IDS=VALID_CLASS_IDS)
print('reading', len(os.listdir(opt.pred_path))-1, 'scans...')
for i, pred_file in enumerate(os.listdir(opt.pred_path)):
if os.path.isdir(os.path.join(opt.pred_path, pred_file)):
continue
scene_id = pred_file[:12]
sys.stdout.write("\rscans read: {}".format(i+1))
sys.stdout.flush()
gt_file = os.path.join(opt.gt_path, pred_file)
gt_ids = util_3d.load_ids(gt_file)
evaluator.add_gt(gt_ids, scene_id)
instances = util_3d.read_instance_prediction_file(os.path.join(opt.pred_path,pred_file), opt.pred_path)
for pred_mask_file in instances:
# read the mask
pred_mask = util_3d.load_ids(pred_mask_file)
instances[pred_mask_file]['pred_mask'] = pred_mask
evaluator.add_prediction(instances, scene_id)
print('')
_, _, _ = evaluator.evaluate()
| ContrastiveSceneContexts-main | downstream/semseg/datasets/evaluation/scannet_benchmark_utils/scripts/evaluate_semantic_instance.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import glob
import numpy as np
import os
import torch
from tqdm import tqdm
from lib.utils import mkdir_p
from lib.pc_utils import save_point_cloud, read_plyfile
import MinkowskiEngine as ME
STANFORD_3D_IN_PATH = '/checkpoint/jihou/data/stanford3d/Stanford3dDataset_v1.2/'
STANFORD_3D_OUT_PATH = '/checkpoint/jihou/data/stanford3d/pointcloud_pth'
STANFORD_3D_TO_SEGCLOUD_LABEL = {
4: 0,
8: 1,
12: 2,
1: 3,
6: 4,
13: 5,
7: 6,
5: 7,
11: 8,
3: 9,
9: 10,
2: 11,
0: 12,
}
class Stanford3DDatasetConverter:
CLASSES = [
'clutter', 'beam', 'board', 'bookcase', 'ceiling', 'chair', 'column', 'door', 'floor', 'sofa',
'stairs', 'table', 'wall', 'window'
]
TRAIN_TEXT = 'train'
VAL_TEXT = 'val'
TEST_TEXT = 'test'
@classmethod
def read_txt(cls, txtfile):
# Read txt file and parse its content.
with open(txtfile) as f:
pointcloud = []
for l in f:
try:
pointcloud += [[float(li) for li in l.split()]]
except Exception as e:
print(e, txtfile)
continue
# pointcloud = [l.split() for l in f]
# Load point cloud to named numpy array.
pointcloud = np.array(pointcloud).astype(np.float32)
assert pointcloud.shape[1] == 6
xyz = pointcloud[:, :3].astype(np.float32)
rgb = pointcloud[:, 3:].astype(np.uint8)
return xyz, rgb
@classmethod
def convert_to_ply(cls, root_path, out_path, save_pth=False):
"""Convert Stanford3DDataset to PLY format that is compatible with
Synthia dataset. Assumes file structure as given by the dataset.
Outputs the processed PLY files to `STANFORD_3D_OUT_PATH`.
"""
txtfiles = glob.glob(os.path.join(root_path, '*/*/*.txt'))
for txtfile in tqdm(txtfiles):
file_sp = os.path.normpath(txtfile).split(os.path.sep)
target_path = os.path.join(out_path, file_sp[-3])
out_file = os.path.join(target_path, file_sp[-2] + '.ply')
if save_pth:
out_file = os.path.join(target_path, file_sp[-2] + '.pth')
if os.path.exists(out_file):
print(out_file, ' exists')
continue
annotation, _ = os.path.split(txtfile)
subclouds = glob.glob(os.path.join(annotation, 'Annotations/*.txt'))
coords, feats, labels, instances = [], [], [], []
for inst, subcloud in enumerate(subclouds):
# Read ply file and parse its rgb values.
xyz, rgb = cls.read_txt(subcloud)
_, annotation_subfile = os.path.split(subcloud)
clsidx = cls.CLASSES.index(annotation_subfile.split('_')[0])
coords.append(xyz)
feats.append(rgb)
labels.append(np.ones((len(xyz), 1), dtype=np.int32) * clsidx)
instances.append(np.ones((len(xyz), 1), dtype=np.int32) * inst)
if len(coords) == 0:
print(txtfile, ' has 0 files.')
else:
# Concat
coords = np.concatenate(coords, 0)
feats = np.concatenate(feats, 0)
labels = np.concatenate(labels, 0)
instances = np.concatenate(instances, 0)
inds, collabels = ME.utils.sparse_quantize(
coords,
feats,
labels,
return_index=True,
ignore_label=255,
quantization_size=0.01 # 1cm
)
pointcloud = np.concatenate((coords[inds], feats[inds], collabels[:, None]), axis=1)
if save_pth:
pointcloud = np.concatenate((coords[inds], feats[inds], collabels[:, None], instances[inds]), axis=1)
# Write ply file.
mkdir_p(target_path)
if save_pth:
torch.save(pointcloud, out_file)
continue
save_point_cloud(pointcloud, out_file, with_label=True, verbose=False)
def generate_splits(stanford_out_path, suffix='ply'):
"""Takes preprocessed out path and generate txt files"""
split_path = './splits/stanford'
mkdir_p(split_path)
for i in range(1, 7):
curr_path = os.path.join(stanford_out_path, f'Area_{i}')
files = glob.glob(os.path.join(curr_path, '*.{}'.format(suffix)))
files = [os.path.relpath(full_path, stanford_out_path) for full_path in files]
out_txt = os.path.join(split_path, f'area{i}.txt')
with open(out_txt, 'w') as f:
f.write('\n'.join(files))
if __name__ == '__main__':
Stanford3DDatasetConverter.convert_to_ply(STANFORD_3D_IN_PATH, STANFORD_3D_OUT_PATH, save_pth=True)
generate_splits(STANFORD_3D_OUT_PATH, 'pth')
| ContrastiveSceneContexts-main | downstream/semseg/datasets/preprocessing/stanford/stanford.py |
import os
import sys
import plyfile
import json
import time
import torch
import argparse
import numpy as np
def get_raw2scannet_label_map():
lines = [line.rstrip() for line in open('scannetv2-labels.combined.tsv')]
lines = lines[1:]
raw2scannet = {}
for i in range(len(lines)):
elements = lines[i].split('\t')
# raw_name = elements[0]
# nyu40_name = elements[6]
raw_name = elements[1]
nyu40_id = elements[4]
nyu40_name = elements[7]
raw2scannet[raw_name] = nyu40_id
return raw2scannet
g_raw2scannet = get_raw2scannet_label_map()
RAW2SCANNET = g_raw2scannet
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--input', default='/canis/Datasets/ScanNet/public/v2/scans/')
parser.add_argument('--output', default='./output')
opt = parser.parse_args()
return opt
def main(config):
for scene_name in os.listdir(config.input):
print(scene_name)
# Over-segmented segments: maps from segment to vertex/point IDs
segid_to_pointid = {}
segfile = os.path.join(config.input, scene_name, '%s_vh_clean_2.0.010000.segs.json'%(scene_name))
with open(segfile) as jsondata:
d = json.load(jsondata)
seg = d['segIndices']
for i in range(len(seg)):
if seg[i] not in segid_to_pointid:
segid_to_pointid[seg[i]] = []
segid_to_pointid[seg[i]].append(i)
# Raw points in XYZRGBA
ply_filename = os.path.join(config.input, scene_name, '%s_vh_clean_2.ply' % (scene_name))
f = plyfile.PlyData().read(ply_filename)
points = np.array([list(x) for x in f.elements[0]])
# Instances over-segmented segment IDs: annotation on segments
instance_segids = []
labels = []
annotation_filename = os.path.join(config.input, scene_name, '%s.aggregation.json'%(scene_name))
with open(annotation_filename) as jsondata:
d = json.load(jsondata)
for x in d['segGroups']:
instance_segids.append(x['segments'])
labels.append(x['label'])
# Each instance's points
instance_labels = np.zeros(points.shape[0])
semantic_labels = np.zeros(points.shape[0])
for i in range(len(instance_segids)):
segids = instance_segids[i]
pointids = []
for segid in segids:
pointids += segid_to_pointid[segid]
pointids = np.array(pointids)
instance_labels[pointids] = i+1
semantic_labels[pointids] = RAW2SCANNET[labels[i]]
colors = points[:,3:6]
points = points[:,0:3] # XYZ+RGB+NORMAL
torch.save((points, colors, semantic_labels, instance_labels), os.path.join(config.output, scene_name+'.pth'))
if __name__=='__main__':
config = parse_args()
os.makedirs(config.output, exist_ok=True)
main(config)
| ContrastiveSceneContexts-main | downstream/semseg/datasets/preprocessing/scannet/collect_indoor3d_data.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import random
from torch.nn import Module
from MinkowskiEngine import SparseTensor
class Wrapper(Module):
"""
Wrapper for the segmentation networks.
"""
OUT_PIXEL_DIST = -1
def __init__(self, NetClass, in_nchannel, out_nchannel, config):
super(Wrapper, self).__init__()
self.initialize_filter(NetClass, in_nchannel, out_nchannel, config)
def initialize_filter(self, NetClass, in_nchannel, out_nchannel, config):
raise NotImplementedError('Must initialize a model and a filter')
def forward(self, x, coords, colors=None):
soutput = self.model(x)
# During training, make the network invariant to the filter
if not self.training or random.random() < 0.5:
# Filter requires the model to finish the forward pass
wrapper_coords = self.filter.initialize_coords(self.model, coords, colors)
finput = SparseTensor(soutput.F, wrapper_coords)
soutput = self.filter(finput)
return soutput
| ContrastiveSceneContexts-main | downstream/semseg/models/wrapper.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from models.resnet import ResNetBase, get_norm
from models.modules.common import ConvType, NormType, conv, conv_tr
from models.modules.resnet_block import BasicBlock, BasicBlockINBN, Bottleneck
import torch.nn as nn
import MinkowskiEngine as ME
from MinkowskiEngine import MinkowskiReLU
import MinkowskiEngine.MinkowskiOps as me
class MinkUNetBase(ResNetBase):
BLOCK = None
PLANES = (64, 128, 256, 512, 256, 128, 128)
DILATIONS = (1, 1, 1, 1, 1, 1)
LAYERS = (2, 2, 2, 2, 2, 2)
INIT_DIM = 64
OUT_PIXEL_DIST = 1
NORM_TYPE = NormType.BATCH_NORM
NON_BLOCK_CONV_TYPE = ConvType.SPATIAL_HYPERCUBE
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
# To use the model, must call initialize_coords before forward pass.
# Once data is processed, call clear to reset the model before calling initialize_coords
def __init__(self, in_channels, out_channels, config, D=3, **kwargs):
super(MinkUNetBase, self).__init__(in_channels, out_channels, config, D)
def network_initialization(self, in_channels, out_channels, config, D):
# Setup net_metadata
dilations = self.DILATIONS
bn_momentum = config.bn_momentum
def space_n_time_m(n, m):
return n if D == 3 else [n, n, n, m]
if D == 4:
self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1)
# Output of the first conv concated to conv6
self.inplanes = self.INIT_DIM
self.conv1p1s1 = conv(
in_channels,
self.inplanes,
kernel_size=space_n_time_m(config.conv1_kernel_size, 1),
stride=1,
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn1 = get_norm(self.NORM_TYPE, self.PLANES[0], D, bn_momentum=bn_momentum)
self.block1 = self._make_layer(
self.BLOCK,
self.PLANES[0],
self.LAYERS[0],
dilation=dilations[0],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv2p1s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn2 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block2 = self._make_layer(
self.BLOCK,
self.PLANES[1],
self.LAYERS[1],
dilation=dilations[1],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv3p2s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn3 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block3 = self._make_layer(
self.BLOCK,
self.PLANES[2],
self.LAYERS[2],
dilation=dilations[2],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv4p4s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn4 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block4 = self._make_layer(
self.BLOCK,
self.PLANES[3],
self.LAYERS[3],
dilation=dilations[3],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr4p8s2 = conv_tr(
self.inplanes,
self.PLANES[4],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr4 = get_norm(self.NORM_TYPE, self.PLANES[4], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion
self.block5 = self._make_layer(
self.BLOCK,
self.PLANES[4],
self.LAYERS[4],
dilation=dilations[4],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr5p4s2 = conv_tr(
self.inplanes,
self.PLANES[5],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr5 = get_norm(self.NORM_TYPE, self.PLANES[5], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion
self.block6 = self._make_layer(
self.BLOCK,
self.PLANES[5],
self.LAYERS[5],
dilation=dilations[5],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr6p2s2 = conv_tr(
self.inplanes,
self.PLANES[6],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr6 = get_norm(self.NORM_TYPE, self.PLANES[6], D, bn_momentum=bn_momentum)
self.relu = MinkowskiReLU(inplace=True)
self.final = nn.Sequential(
conv(
self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion,
512,
kernel_size=1,
stride=1,
dilation=1,
bias=False,
D=D), ME.MinkowskiBatchNorm(512), ME.MinkowskiReLU(),
conv(512, out_channels, kernel_size=1, stride=1, dilation=1, bias=True, D=D))
def forward(self, x):
out = self.conv1p1s1(x)
out = self.bn1(out)
out = self.relu(out)
out_b1p1 = self.block1(out)
out = self.conv2p1s2(out_b1p1)
out = self.bn2(out)
out = self.relu(out)
out_b2p2 = self.block2(out)
out = self.conv3p2s2(out_b2p2)
out = self.bn3(out)
out = self.relu(out)
out_b3p4 = self.block3(out)
out = self.conv4p4s2(out_b3p4)
out = self.bn4(out)
out = self.relu(out)
# pixel_dist=8
out = self.block4(out)
out = self.convtr4p8s2(out)
out = self.bntr4(out)
out = self.relu(out)
out = me.cat(out, out_b3p4)
out = self.block5(out)
out = self.convtr5p4s2(out)
out = self.bntr5(out)
out = self.relu(out)
out = me.cat(out, out_b2p2)
out = self.block6(out)
out = self.convtr6p2s2(out)
out = self.bntr6(out)
out = self.relu(out)
out = me.cat(out, out_b1p1)
return self.final(out)
class ResUNet14(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (1, 1, 1, 1, 1, 1)
class ResUNet18(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (2, 2, 2, 2, 2, 2)
class ResUNet18INBN(ResUNet18):
NORM_TYPE = NormType.INSTANCE_BATCH_NORM
BLOCK = BasicBlockINBN
class ResUNet34(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (3, 4, 6, 3, 2, 2)
class ResUNet50(MinkUNetBase):
BLOCK = Bottleneck
LAYERS = (3, 4, 6, 3, 2, 2)
class ResUNet101(MinkUNetBase):
BLOCK = Bottleneck
LAYERS = (3, 4, 23, 3, 2, 2)
class ResUNet14D(ResUNet14):
PLANES = (64, 128, 256, 512, 512, 512, 512)
class ResUNet18D(ResUNet18):
PLANES = (64, 128, 256, 512, 512, 512, 512)
class ResUNet34D(ResUNet34):
PLANES = (64, 128, 256, 512, 512, 512, 512)
class ResUNet34E(ResUNet34):
INIT_DIM = 32
PLANES = (32, 64, 128, 256, 128, 64, 64)
class ResUNet34F(ResUNet34):
INIT_DIM = 32
PLANES = (32, 64, 128, 256, 128, 64, 32)
class MinkUNetHyper(MinkUNetBase):
BLOCK = None
PLANES = (64, 128, 256, 512, 256, 128, 128)
DILATIONS = (1, 1, 1, 1, 1, 1)
LAYERS = (2, 2, 2, 2, 2, 2)
INIT_DIM = 64
OUT_PIXEL_DIST = 1
NORM_TYPE = NormType.BATCH_NORM
NON_BLOCK_CONV_TYPE = ConvType.SPATIAL_HYPERCUBE
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
# To use the model, must call initialize_coords before forward pass.
# Once data is processed, call clear to reset the model before calling initialize_coords
def __init__(self, in_channels, out_channels, config, D=3, **kwargs):
super(MinkUNetBase, self).__init__(in_channels, out_channels, config, D)
def network_initialization(self, in_channels, out_channels, config, D):
# Setup net_metadata
dilations = self.DILATIONS
bn_momentum = config.bn_momentum
def space_n_time_m(n, m):
return n if D == 3 else [n, n, n, m]
if D == 4:
self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1)
# Output of the first conv concated to conv6
self.inplanes = self.INIT_DIM
self.conv1p1s1 = conv(
in_channels,
self.inplanes,
kernel_size=space_n_time_m(config.conv1_kernel_size, 1),
stride=1,
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn1 = get_norm(self.NORM_TYPE, self.PLANES[0], D, bn_momentum=bn_momentum)
self.block1 = self._make_layer(
self.BLOCK,
self.PLANES[0],
self.LAYERS[0],
dilation=dilations[0],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv2p1s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn2 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block2 = self._make_layer(
self.BLOCK,
self.PLANES[1],
self.LAYERS[1],
dilation=dilations[1],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv3p2s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn3 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block3 = self._make_layer(
self.BLOCK,
self.PLANES[2],
self.LAYERS[2],
dilation=dilations[2],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv4p4s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn4 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block4 = self._make_layer(
self.BLOCK,
self.PLANES[3],
self.LAYERS[3],
dilation=dilations[3],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.pool_tr4 = ME.MinkowskiPoolingTranspose(kernel_size=8, stride=8, dimension=D)
out_pool4 = self.inplanes
self.convtr4p8s2 = conv_tr(
self.inplanes,
self.PLANES[4],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr4 = get_norm(self.NORM_TYPE, self.PLANES[4], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion
self.block5 = self._make_layer(
self.BLOCK,
self.PLANES[4],
self.LAYERS[4],
dilation=dilations[4],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.pool_tr5 = ME.MinkowskiPoolingTranspose(kernel_size=4, stride=4, dimension=D)
out_pool5 = self.inplanes
self.convtr5p4s2 = conv_tr(
self.inplanes,
self.PLANES[5],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr5 = get_norm(self.NORM_TYPE, self.PLANES[5], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion
self.block6 = self._make_layer(
self.BLOCK,
self.PLANES[5],
self.LAYERS[5],
dilation=dilations[5],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.pool_tr6 = ME.MinkowskiPoolingTranspose(kernel_size=2, stride=2, dimension=D)
out_pool6 = self.inplanes
self.convtr6p2s2 = conv_tr(
self.inplanes,
self.PLANES[6],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr6 = get_norm(self.NORM_TYPE, self.PLANES[6], D, bn_momentum=bn_momentum)
self.relu = MinkowskiReLU(inplace=True)
self.final = nn.Sequential(
conv(
out_pool5 + out_pool6 + self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion,
512,
kernel_size=1,
bias=False,
D=D), ME.MinkowskiBatchNorm(512), ME.MinkowskiReLU(),
conv(512, out_channels, kernel_size=1, bias=True, D=D))
def forward(self, x):
out = self.conv1p1s1(x)
out = self.bn1(out)
out = self.relu(out)
out_b1p1 = self.block1(out)
out = self.conv2p1s2(out_b1p1)
out = self.bn2(out)
out = self.relu(out)
out_b2p2 = self.block2(out)
out = self.conv3p2s2(out_b2p2)
out = self.bn3(out)
out = self.relu(out)
out_b3p4 = self.block3(out)
out = self.conv4p4s2(out_b3p4)
out = self.bn4(out)
out = self.relu(out)
# pixel_dist=8
out = self.block4(out)
out = self.convtr4p8s2(out)
out = self.bntr4(out)
out = self.relu(out)
out = me.cat(out, out_b3p4)
out = self.block5(out)
out_5 = self.pool_tr5(out)
out = self.convtr5p4s2(out)
out = self.bntr5(out)
out = self.relu(out)
out = me.cat(out, out_b2p2)
out = self.block6(out)
out_6 = self.pool_tr6(out)
out = self.convtr6p2s2(out)
out = self.bntr6(out)
out = self.relu(out)
out = me.cat(out, out_b1p1, out_6, out_5)
return self.final(out)
class MinkUNetHyper14INBN(MinkUNetHyper):
NORM_TYPE = NormType.INSTANCE_BATCH_NORM
BLOCK = BasicBlockINBN
class STMinkUNetBase(MinkUNetBase):
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
def __init__(self, in_channels, out_channels, config, D=4, **kwargs):
super(STMinkUNetBase, self).__init__(in_channels, out_channels, config, D, **kwargs)
class STResUNet14(STMinkUNetBase, ResUNet14):
pass
class STResUNet18(STMinkUNetBase, ResUNet18):
pass
class STResUNet34(STMinkUNetBase, ResUNet34):
pass
class STResUNet50(STMinkUNetBase, ResUNet50):
pass
class STResUNet101(STMinkUNetBase, ResUNet101):
pass
class STResTesseractUNetBase(STMinkUNetBase):
CONV_TYPE = ConvType.HYPERCUBE
class STResTesseractUNet14(STResTesseractUNetBase, ResUNet14):
pass
class STResTesseractUNet18(STResTesseractUNetBase, ResUNet18):
pass
class STResTesseractUNet34(STResTesseractUNetBase, ResUNet34):
pass
class STResTesseractUNet50(STResTesseractUNetBase, ResUNet50):
pass
class STResTesseractUNet101(STResTesseractUNetBase, ResUNet101):
pass
| ContrastiveSceneContexts-main | downstream/semseg/models/resunet.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.