python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import logging
import numpy as np
import torch
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.data.transforms import TransformGen
__all__ = ["DetrDatasetMapper"]
def build_transform_gen(cfg, is_train):
"""
Create a list of :class:`TransformGen` from config.
Returns:
list[TransformGen]
"""
if is_train:
min_size = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
sample_style = "choice"
if sample_style == "range":
assert len(min_size) == 2, "more than 2 ({}) min_size(s) are provided for ranges".format(len(min_size))
logger = logging.getLogger(__name__)
tfm_gens = []
if is_train:
tfm_gens.append(T.RandomFlip())
tfm_gens.append(T.ResizeShortestEdge(min_size, max_size, sample_style))
if is_train:
logger.info("TransformGens used in training: " + str(tfm_gens))
return tfm_gens
class DetrDatasetMapper:
"""
A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by DETR.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies geometric transforms to the image and annotation
3. Find and applies suitable cropping to the image and annotation
4. Prepare image and annotation to Tensors
"""
def __init__(self, cfg, is_train=True):
if cfg.INPUT.CROP.ENABLED and is_train:
self.crop_gen = [
T.ResizeShortestEdge([400, 500, 600], sample_style="choice"),
T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE),
]
else:
self.crop_gen = None
assert not cfg.MODEL.MASK_ON, "Mask is not supported"
self.tfm_gens = build_transform_gen(cfg, is_train)
logging.getLogger(__name__).info(
"Full TransformGens used in training: {}, crop: {}".format(str(self.tfm_gens), str(self.crop_gen))
)
self.img_format = cfg.INPUT.FORMAT
self.is_train = is_train
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
utils.check_image_size(dataset_dict, image)
if self.crop_gen is None:
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
else:
if np.random.rand() > 0.5:
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
else:
image, transforms = T.apply_transform_gens(
self.tfm_gens[:-1] + self.crop_gen + self.tfm_gens[-1:], image
)
image_shape = image.shape[:2] # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
if not self.is_train:
# USER: Modify this if you want to keep them for some reason.
dataset_dict.pop("annotations", None)
return dataset_dict
if "annotations" in dataset_dict:
# USER: Modify this if you want to keep them for some reason.
for anno in dataset_dict["annotations"]:
anno.pop("segmentation", None)
anno.pop("keypoints", None)
# USER: Implement additional transformations if you have other types of data
annos = [
utils.transform_instance_annotations(obj, transforms, image_shape)
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
instances = utils.annotations_to_instances(annos, image_shape)
dataset_dict["instances"] = utils.filter_empty_instances(instances)
return dataset_dict
| detr-master | d2/detr/dataset_mapper.py |
#! /usr/bin/env python
# Script to launch AllenNLP Beaker jobs.
import argparse
import os
import json
import random
import tempfile
import subprocess
import sys
# This has to happen before we import spacy (even indirectly), because for some crazy reason spacy
# thought it was a good idea to set the random seed on import...
random_int = random.randint(0, 2**32)
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.join(os.path.join(__file__, os.pardir), os.pardir))))
from allennlp.common.params import Params
def main(param_file: str, args: argparse.Namespace):
commit = subprocess.check_output(["git", "rev-parse", "HEAD"], universal_newlines=True).strip()
image = f"allennlp/sparc_rc:{commit}"
overrides = ""
# Reads params and sets environment.
params = Params.from_file(param_file, overrides)
flat_params = params.as_flat_dict()
env = {}
for k, v in flat_params.items():
k = str(k).replace('.', '_')
env[k] = str(v)
# If the git repository is dirty, add a random hash.
result = subprocess.run('git diff-index --quiet HEAD --', shell=True)
if result.returncode != 0:
dirty_hash = "%x" % random_int
image += "-" + dirty_hash
if args.blueprint:
blueprint = args.blueprint
print(f"Using the specified blueprint: {blueprint}")
else:
print(f"Building the Docker image ({image})...")
subprocess.run(f'docker build -t {image} .', shell=True, check=True)
print(f"Create a Beaker blueprint...")
blueprint = subprocess.check_output(f'beaker blueprint create --quiet {image}', shell=True, universal_newlines=True).strip()
print(f" Blueprint created: {blueprint}")
config_dataset_id = subprocess.check_output(f'beaker dataset create --quiet {param_file}', shell=True, universal_newlines=True).strip()
allennlp_command = [
"python",
"-m",
"allennlp.run",
"train",
"/config.json",
"-s",
"/output",
"--file-friendly-logging",
"--include-package",
"reading_comprehension"
]
dataset_mounts = []
for source in args.source + [f"{config_dataset_id}:/config.json"]:
datasetId, containerPath = source.split(":")
dataset_mounts.append({
"datasetId": datasetId,
"containerPath": containerPath
})
for var in args.env:
key, value = var.split("=")
env[key] = value
requirements = {}
if args.cpu:
requirements["cpu"] = float(args.cpu)
if args.memory:
requirements["memory"] = args.memory
if args.gpu_count:
requirements["gpuCount"] = int(args.gpu_count)
config_spec = {
"description": args.desc,
"blueprint": blueprint,
"resultPath": "/output",
"args": allennlp_command,
"datasetMounts": dataset_mounts,
"requirements": requirements,
"env": env
}
config_task = {"spec": config_spec, "name": "training"}
config = {
"tasks": [config_task]
}
output_path = args.spec_output_path if args.spec_output_path else tempfile.mkstemp(".yaml",
"beaker-config-")[1]
with open(output_path, "w") as output:
output.write(json.dumps(config, indent=4))
print(f"Beaker spec written to {output_path}.")
experiment_command = ["beaker", "experiment", "create", "--file", output_path]
if args.name:
experiment_command.append("--name")
experiment_command.append(args.name.replace(" ", "-"))
if args.dry_run:
print(f"This is a dry run (--dry-run). Launch your job with the following command:")
print(f" " + " ".join(experiment_command))
else:
print(f"Running the experiment:")
print(f" " + " ".join(experiment_command))
subprocess.run(experiment_command)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('param_file', type=str, help='The qanet configuration file.')
parser.add_argument('--name', type=str, help='A name for the experiment.')
parser.add_argument('--spec_output_path', type=str, help='The destination to write the experiment spec.')
parser.add_argument('--dry-run', action='store_true', help='If specified, an experiment will not be created.')
parser.add_argument('--blueprint', type=str, help='The Blueprint to use (if unspecified one will be built)')
parser.add_argument('--desc', type=str, help='A description for the experiment.')
parser.add_argument('--env', action='append', default=[], help='Set environment variables (e.g. NAME=value or NAME)')
parser.add_argument('--source', action='append', default=[], help='Bind a remote data source (e.g. source-id:/target/path)')
parser.add_argument('--cpu', help='CPUs to reserve for this experiment (e.g., 0.5)')
parser.add_argument('--gpu-count', default=1, help='GPUs to use for this experiment (e.g., 1 (default))')
parser.add_argument('--memory', help='Memory to reserve for this experiment (e.g., 1GB)')
args = parser.parse_args()
main(args.param_file, args)
| allennlp-reading-comprehension-research-master | run_with_beaker.py |
allennlp-reading-comprehension-research-master | tests/__init__.py |
|
#pylint: disable=unused-import
import pathlib
from allennlp.common.testing import ModelTestCase
from reading_comprehension.drop_models.augmented_qanet import AugmentedQANet
from reading_comprehension.data.drop_reader import DROPReader
class QANetModelTest(ModelTestCase):
PROJECT_ROOT = (pathlib.Path(__file__).parent / "..").resolve() # pylint: disable=no-member
MODULE_ROOT = PROJECT_ROOT / "reading_comprehension"
TESTS_ROOT = PROJECT_ROOT / "tests"
FIXTURES_ROOT = PROJECT_ROOT / "fixtures"
def setUp(self):
super().setUp()
self.set_up_model(self.FIXTURES_ROOT / "aug_qanet" / "experiment.json",
self.FIXTURES_ROOT / "aug_qanet" / "drop.json")
def test_model_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
| allennlp-reading-comprehension-research-master | tests/test_aug_qanet.py |
def test_travis_integration():
# Remove this file once we have actual code and tests.
assert True
| allennlp-reading-comprehension-research-master | tests/test_travis.py |
allennlp-reading-comprehension-research-master | reading_comprehension/__init__.py |
|
from allennlp.data.tokenizers import Token
def split_tokens_by_hyphen(tokens):
hyphens = ["-", "–", "~"]
new_tokens = []
def split_token_by_hyphen(token, hyphen):
split_tokens = []
char_offset = token.idx
for sub_str in token.text.split(hyphen):
if sub_str:
split_tokens.append(Token(text=sub_str, idx=char_offset))
char_offset += len(sub_str)
split_tokens.append(Token(text=hyphen, idx=char_offset))
char_offset += len(hyphen)
if split_tokens:
split_tokens.pop(-1)
char_offset -= len(hyphen)
return split_tokens
else:
return [token]
for token in tokens:
if any(hyphen in token.text for hyphen in hyphens):
unsplit_tokens, split_tokens = [token], []
for hyphen in hyphens:
for unsplit_token in unsplit_tokens:
if hyphen in token.text:
split_tokens += split_token_by_hyphen(unsplit_token, hyphen)
else:
split_tokens.append(unsplit_token)
unsplit_tokens, split_tokens = split_tokens, []
new_tokens += unsplit_tokens
else:
new_tokens.append(token)
return new_tokens
| allennlp-reading-comprehension-research-master | reading_comprehension/utils.py |
import string
import re
from typing import Tuple, List, Union
from overrides import overrides
from allennlp.tools.squad_eval import metric_max_over_ground_truths
from allennlp.training.metrics.metric import Metric
from reading_comprehension.data.drop_official_evaluate import get_metrics as drop_em_and_f1
from reading_comprehension.data.drop_official_evaluate import to_string as convert_annotation_to_string
STOPWORDS = set(["a", "an", "the"])
PUNCTUATIONS = set(string.punctuation)
def string_to_bag(raw_text):
text = raw_text.lower()
text_tokens = set()
for token in text.strip().split(" "):
if not re.match(r"\d*\.\d+", token):
token = ''.join(ch for ch in token if ch not in PUNCTUATIONS)
if token != '':
text_tokens.add(token)
return set(text_tokens) - STOPWORDS
def bag_of_words_exact_match(prediction: str, ground_truth: str):
return string_to_bag(prediction) == string_to_bag(ground_truth)
def bag_of_words_f1(prediction: str, ground_truth: str):
prediction_bag = string_to_bag(prediction)
gold_bag = string_to_bag(ground_truth)
hit = len(gold_bag.intersection(prediction_bag))
if hit > 0:
precision = 1.0 * hit / len(prediction_bag)
recall = 1.0 * hit / len(gold_bag)
return 2.0 * precision * recall / (precision + recall)
else:
return 0.0
@Metric.register("drop")
class DropEmAndF1(Metric):
"""
This :class:`Metric` takes the best span string computed by a model, along with the answer
strings labeled in the data, and computed exact match and F1 score based on bag of words
"""
def __init__(self) -> None:
self._total_em = 0.0
self._total_f1 = 0.0
self._count = 0
@overrides
def __call__(self, prediction: Union[str, List], ground_truths: List):
"""
Parameters
----------
prediction: ``Union[str, List]``
The predicted answer from the model evaluated. This could be a string, or a list of string
when multiple spans are predicted as answer.
ground_truths: ``List``
All the ground truth answer annotations.
"""
ground_truth_answer_strings = [convert_annotation_to_string(annotation)[0] for annotation in ground_truths]
# pylint: disable=unused-variable
ground_truth_answer_types = [convert_annotation_to_string(annotation)[1] for annotation in ground_truths]
exact_match, f1_score = metric_max_over_ground_truths(
drop_em_and_f1,
prediction,
ground_truth_answer_strings
)
self._total_em += exact_match
self._total_f1 += f1_score
self._count += 1
@overrides
def get_metric(self, reset: bool = False) -> Tuple[float, float]:
"""
Returns
-------
Average exact match and F1 score (in that order) as computed by the official SQuAD script
over all inputs.
"""
exact_match = self._total_em / self._count if self._count > 0 else 0
f1_score = self._total_f1 / self._count if self._count > 0 else 0
if reset:
self.reset()
return exact_match, f1_score
@overrides
def reset(self):
self._total_em = 0.0
self._total_f1 = 0.0
self._count = 0
def __str__(self):
return f"DropEmAndF1(em={self._total_em}, f1={self._total_f1})"
| allennlp-reading-comprehension-research-master | reading_comprehension/drop_metrics.py |
import sys
from allennlp.predictors import Predictor
from allennlp.models.archival import load_archive
from allennlp.common.util import import_submodules
# The path to the augmented qanet project dir
sys.path.append('../../')
import_submodules('reading_comprehension')
# This maps from the name of the task
# to the ``DemoModel`` indicating the location of the trained model
# and the type of the ``Predictor``. This is necessary, as you might
# have multiple models (for example, a NER tagger and a POS tagger)
# that have the same ``Predictor`` wrapper. The corresponding model
# will be served at the `/predict/<name-of-task>` API endpoint.
class DemoModel:
"""
A demo model is determined by both an archive file
(representing the trained model)
and a choice of predictor
"""
def __init__(self, archive_file: str, predictor_name: str) -> None:
self.archive_file = archive_file
self.predictor_name = predictor_name
def predictor(self) -> Predictor:
archive = load_archive(self.archive_file)
return Predictor.from_archive(archive, self.predictor_name)
MODELS = {'machine-comprehension': DemoModel('../../model.tar.gz', # the path to the model archive file
'machine-comprehension')}
| allennlp-reading-comprehension-research-master | reading_comprehension/demo/models.py |
allennlp-reading-comprehension-research-master | reading_comprehension/demo/__init__.py |
|
from typing import Any, Dict, List, Optional
import logging
import torch
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import TextFieldEmbedder
from allennlp.nn import util, InitializerApplicator, RegularizerApplicator
from allennlp.models.reading_comprehension.util import get_best_span
from reading_comprehension.drop_metrics import DropEmAndF1
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@Model.register("bert_rc_marginal")
class BertRcMarginal(Model):
"""
This class adapts the BERT RC model to do question answering on DROP dataset.
"""
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
dropout: float = 0.0,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super().__init__(vocab, regularizer)
self._text_field_embedder = text_field_embedder
self._span_start_predictor = torch.nn.Linear(self._text_field_embedder.get_output_dim(), 1)
self._span_end_predictor = torch.nn.Linear(self._text_field_embedder.get_output_dim(), 1)
self._drop_metrics = DropEmAndF1()
self._dropout = torch.nn.Dropout(p=dropout)
initializer(self)
def forward(self, # type: ignore
question: Dict[str, torch.LongTensor],
passage: Dict[str, torch.LongTensor],
question_and_passage: Dict[str, torch.LongTensor],
answer_as_passage_spans: torch.LongTensor = None,
metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ, unused-argument
# logger.info("="*10)
# logger.info([len(metadata[i]["passage_tokens"]) for i in range(len(metadata))])
# logger.info([len(metadata[i]["question_tokens"]) for i in range(len(metadata))])
# logger.info(question_and_passage["bert"].shape)
# The segment labels should be as following:
# <CLS> + question_word_pieces + <SEP> + passage_word_pieces + <SEP>
# 0 0 0 1 1
# We get this in a tricky way here
expanded_question_bert_tensor = torch.zeros_like(question_and_passage["bert"])
expanded_question_bert_tensor[:, :question["bert"].shape[1]] = question["bert"]
segment_labels = (question_and_passage["bert"] - expanded_question_bert_tensor > 0).long()
question_and_passage["segment_labels"] = segment_labels
embedded_question_and_passage = self._text_field_embedder(question_and_passage)
# We also get the passage mask for the concatenated question and passage in a similar way
expanded_question_mask = torch.zeros_like(question_and_passage["mask"])
# We shift the 1s to one column right here, to mask the [SEP] token in the middle
expanded_question_mask[:, 1:question["mask"].shape[1]+1] = question["mask"]
expanded_question_mask[:, 0] = 1
passage_mask = question_and_passage["mask"] - expanded_question_mask
batch_size = embedded_question_and_passage.size(0)
span_start_logits = self._span_start_predictor(embedded_question_and_passage).squeeze(-1)
span_end_logits = self._span_end_predictor(embedded_question_and_passage).squeeze(-1)
# Shape: (batch_size, passage_length)
passage_span_start_log_probs = util.masked_log_softmax(span_start_logits, passage_mask)
passage_span_end_log_probs = util.masked_log_softmax(span_end_logits, passage_mask)
passage_span_start_logits = util.replace_masked_values(span_start_logits, passage_mask, -1e32)
passage_span_end_logits = util.replace_masked_values(span_end_logits, passage_mask, -1e32)
best_passage_span = get_best_span(passage_span_start_logits, passage_span_end_logits)
output_dict = {"passage_span_start_probs": passage_span_start_log_probs.exp(),
"passage_span_end_probs": passage_span_end_log_probs.exp()}
# If answer is given, compute the loss for training.
if answer_as_passage_spans is not None:
# Shape: (batch_size, # of answer spans)
gold_passage_span_starts = answer_as_passage_spans[:, :, 0]
gold_passage_span_ends = answer_as_passage_spans[:, :, 1]
# Some spans are padded with index -1,
# so we clamp those paddings to 0 and then mask after `torch.gather()`.
gold_passage_span_mask = (gold_passage_span_starts != -1).long()
clamped_gold_passage_span_starts = util.replace_masked_values(gold_passage_span_starts,
gold_passage_span_mask,
0)
clamped_gold_passage_span_ends = util.replace_masked_values(gold_passage_span_ends,
gold_passage_span_mask,
0)
# Shape: (batch_size, # of answer spans)
log_likelihood_for_passage_span_starts = \
torch.gather(passage_span_start_log_probs, 1, clamped_gold_passage_span_starts)
log_likelihood_for_passage_span_ends = \
torch.gather(passage_span_end_log_probs, 1, clamped_gold_passage_span_ends)
# Shape: (batch_size, # of answer spans)
log_likelihood_for_passage_spans = \
log_likelihood_for_passage_span_starts + log_likelihood_for_passage_span_ends
# For those padded spans, we set their log probabilities to be very small negative value
log_likelihood_for_passage_spans = \
util.replace_masked_values(log_likelihood_for_passage_spans, gold_passage_span_mask, -1e32)
# Shape: (batch_size, )
log_marginal_likelihood_for_passage_span = util.logsumexp(log_likelihood_for_passage_spans)
output_dict["loss"] = - log_marginal_likelihood_for_passage_span.mean()
# Compute the metrics and add the tokenized input to the output.
if metadata is not None:
output_dict["question_id"] = []
output_dict["answer"] = []
question_tokens = []
passage_tokens = []
for i in range(batch_size):
question_tokens.append(metadata[i]['question_tokens'])
passage_tokens.append(metadata[i]['passage_tokens'])
# We did not consider multi-mention answers here
passage_str = metadata[i]['original_passage']
offsets = metadata[i]['passage_token_offsets']
predicted_span = tuple(best_passage_span[i].detach().cpu().numpy())
# Remove the offsets of question tokens and the [SEP] token
predicted_span = (predicted_span[0] - len(metadata[i]['question_tokens']) - 1,
predicted_span[1] - len(metadata[i]['question_tokens']) - 1)
start_offset = offsets[predicted_span[0]][0]
end_offset = offsets[predicted_span[1]][1]
best_answer_str = passage_str[start_offset:end_offset]
output_dict["question_id"].append(metadata[i]["question_id"])
output_dict["answer"].append(best_answer_str)
answer_annotations = metadata[i].get('answer_annotations', [])
if answer_annotations:
self._drop_metrics(best_answer_str, answer_annotations)
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
exact_match, f1_score = self._drop_metrics.get_metric(reset)
return {'em': exact_match, 'f1': f1_score}
| allennlp-reading-comprehension-research-master | reading_comprehension/drop_models/bert_rc_marginal.py |
from typing import Any, Dict, List, Optional
import torch
from torch.nn.functional import nll_loss
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import Highway
from allennlp.modules import Seq2SeqEncoder, TextFieldEmbedder
from allennlp.nn import util, InitializerApplicator, RegularizerApplicator
from allennlp.training.metrics import BooleanAccuracy, CategoricalAccuracy
from allennlp.models.reading_comprehension.util import get_best_span
from reading_comprehension.drop_metrics import DropEmAndF1
# TODO: Change this to marginal loss
@Model.register("passage_only")
class PassageOnlyRcModel(Model):
"""
This class will encode the passage using a encoder, and then predict a span for
answer without considering the question.
If you want to test the question-only baseline, just replace the passage with question
when loading data in the data reader.
"""
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
num_highway_layers: int,
encoding_layer: Seq2SeqEncoder,
dropout_prob: float = 0.1,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super().__init__(vocab, regularizer)
text_embed_dim = text_field_embedder.get_output_dim()
encoding_in_dim = encoding_layer.get_input_dim()
encoding_out_dim = encoding_layer.get_output_dim()
self._text_field_embedder = text_field_embedder
self._embedding_proj_layer = torch.nn.Linear(text_embed_dim, encoding_in_dim)
self._highway_layer = Highway(encoding_in_dim, num_highway_layers)
self._encoding_layer = encoding_layer
self._span_start_predictor = torch.nn.Linear(encoding_out_dim * 2, 1)
self._span_end_predictor = torch.nn.Linear(encoding_out_dim * 2, 1)
self._span_start_accuracy = CategoricalAccuracy()
self._span_end_accuracy = CategoricalAccuracy()
self._span_accuracy = BooleanAccuracy()
self._drop_metrics = DropEmAndF1()
self._dropout = torch.nn.Dropout(p=dropout_prob)
initializer(self)
def forward(self, # type: ignore
question: Dict[str, torch.LongTensor],
passage: Dict[str, torch.LongTensor],
span_start: torch.IntTensor = None,
span_end: torch.IntTensor = None,
metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ, unused-argument
passage_mask = util.get_text_field_mask(passage).float()
embedded_passage = self._dropout(self._text_field_embedder(passage))
batch_size = embedded_passage.size(0)
embedded_passage = self._highway_layer(self._embedding_proj_layer(embedded_passage))
encoded_passage_list = [embedded_passage]
for _ in range(3):
encoded_passage = self._dropout(self._encoding_layer(encoded_passage_list[-1], passage_mask))
encoded_passage_list.append(encoded_passage)
# Shape: (batch_size, passage_length, modeling_dim * 2))
span_start_input = torch.cat([encoded_passage_list[-3], encoded_passage_list[-2]], dim=-1)
# Shape: (batch_size, passage_length)
span_start_logits = self._span_start_predictor(span_start_input).squeeze(-1)
# Shape: (batch_size, passage_length, modeling_dim * 2)
span_end_input = torch.cat([encoded_passage_list[-3], encoded_passage_list[-1]], dim=-1)
span_end_logits = self._span_end_predictor(span_end_input).squeeze(-1)
span_start_logits = util.replace_masked_values(span_start_logits, passage_mask, -1e32)
span_end_logits = util.replace_masked_values(span_end_logits, passage_mask, -1e32)
best_span = get_best_span(span_start_logits, span_end_logits)
output_dict = {}
# Compute the loss for training.
if span_start is not None:
loss = nll_loss(util.masked_log_softmax(span_start_logits, passage_mask), span_start.squeeze(-1))
self._span_start_accuracy(span_start_logits, span_start.squeeze(-1))
loss += nll_loss(util.masked_log_softmax(span_end_logits, passage_mask), span_end.squeeze(-1))
self._span_end_accuracy(span_end_logits, span_end.squeeze(-1))
self._span_accuracy(best_span, torch.stack([span_start, span_end], -1))
output_dict["loss"] = loss
# Compute the metrics and add the tokenized input to the output.
if metadata is not None:
output_dict["question_id"] = []
output_dict["answer"] = []
question_tokens = []
passage_tokens = []
for i in range(batch_size):
question_tokens.append(metadata[i]['question_tokens'])
passage_tokens.append(metadata[i]['passage_tokens'])
passage_str = metadata[i]['original_passage']
offsets = metadata[i]['token_offsets']
predicted_span = tuple(best_span[i].detach().cpu().numpy())
start_offset = offsets[predicted_span[0]][0]
end_offset = offsets[predicted_span[1]][1]
best_span_string = passage_str[start_offset:end_offset]
output_dict["question_id"].append(metadata[i]["question_id"])
output_dict["answer"].append(best_span_string)
answer_annotations = metadata[i].get('answer_annotations', [])
if answer_annotations:
self._drop_metrics(best_span_string, answer_annotations)
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
exact_match, f1_score = self._drop_metrics.get_metric(reset)
return {
'start_acc': self._span_start_accuracy.get_metric(reset),
'end_acc': self._span_end_accuracy.get_metric(reset),
'span_acc': self._span_accuracy.get_metric(reset),
'em': exact_match,
'f1': f1_score,
}
| allennlp-reading-comprehension-research-master | reading_comprehension/drop_models/passage_only.py |
allennlp-reading-comprehension-research-master | reading_comprehension/drop_models/__init__.py |
|
from typing import Any, Dict, List, Optional
import torch
from allennlp.common.checks import check_dimensions_match
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.models.reading_comprehension.bidaf import BidirectionalAttentionFlow
from allennlp.modules import Highway
from allennlp.modules import Seq2SeqEncoder, SimilarityFunction, TimeDistributed, TextFieldEmbedder
from allennlp.modules.matrix_attention.legacy_matrix_attention import LegacyMatrixAttention
from allennlp.nn import util, InitializerApplicator, RegularizerApplicator
from reading_comprehension.drop_metrics import DropEmAndF1
@Model.register("bidaf_marginal")
class BiDAFMarginal(Model):
"""
This class adapts the BiDAF model to do question answering on DROP dataset.
"""
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
num_highway_layers: int,
phrase_layer: Seq2SeqEncoder,
similarity_function: SimilarityFunction,
modeling_layer: Seq2SeqEncoder,
span_end_encoder: Seq2SeqEncoder,
dropout: float = 0.2,
mask_lstms: bool = True,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super(BiDAFMarginal, self).__init__(vocab, regularizer)
self._text_field_embedder = text_field_embedder
self._highway_layer = TimeDistributed(Highway(text_field_embedder.get_output_dim(),
num_highway_layers))
self._phrase_layer = phrase_layer
self._matrix_attention = LegacyMatrixAttention(similarity_function)
self._modeling_layer = modeling_layer
self._span_end_encoder = span_end_encoder
encoding_dim = phrase_layer.get_output_dim()
modeling_dim = modeling_layer.get_output_dim()
span_start_input_dim = encoding_dim * 4 + modeling_dim
self._span_start_predictor = TimeDistributed(torch.nn.Linear(span_start_input_dim, 1))
span_end_encoding_dim = span_end_encoder.get_output_dim()
span_end_input_dim = encoding_dim * 4 + span_end_encoding_dim
self._span_end_predictor = TimeDistributed(torch.nn.Linear(span_end_input_dim, 1))
# Bidaf has lots of layer dimensions which need to match up - these aren't necessarily
# obvious from the configuration files, so we check here.
check_dimensions_match(modeling_layer.get_input_dim(), 4 * encoding_dim,
"modeling layer input dim", "4 * encoding dim")
check_dimensions_match(text_field_embedder.get_output_dim(), phrase_layer.get_input_dim(),
"text field embedder output dim", "phrase layer input dim")
check_dimensions_match(span_end_encoder.get_input_dim(), 4 * encoding_dim + 3 * modeling_dim,
"span end encoder input dim", "4 * encoding dim + 3 * modeling dim")
self._drop_metrics = DropEmAndF1()
self._dropout = torch.nn.Dropout(p=dropout)
self._mask_lstms = mask_lstms
initializer(self)
def forward(self, # type: ignore
question: Dict[str, torch.LongTensor],
passage: Dict[str, torch.LongTensor],
numbers_in_passage: Dict[str, torch.LongTensor],
number_indices: torch.LongTensor,
answer_as_passage_spans: torch.LongTensor = None,
answer_as_question_spans: torch.LongTensor = None,
answer_as_add_sub_expressions: torch.LongTensor = None,
answer_as_counts: torch.LongTensor = None,
metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ, unused-argument
embedded_question = self._highway_layer(self._text_field_embedder(question))
embedded_passage = self._highway_layer(self._text_field_embedder(passage))
batch_size = embedded_question.size(0)
passage_length = embedded_passage.size(1)
question_mask = util.get_text_field_mask(question).float()
passage_mask = util.get_text_field_mask(passage).float()
question_lstm_mask = question_mask if self._mask_lstms else None
passage_lstm_mask = passage_mask if self._mask_lstms else None
encoded_question = self._dropout(self._phrase_layer(embedded_question, question_lstm_mask))
encoded_passage = self._dropout(self._phrase_layer(embedded_passage, passage_lstm_mask))
encoding_dim = encoded_question.size(-1)
# Shape: (batch_size, passage_length, question_length)
passage_question_similarity = self._matrix_attention(encoded_passage, encoded_question)
# Shape: (batch_size, passage_length, question_length)
passage_question_attention = util.masked_softmax(passage_question_similarity, question_mask)
# Shape: (batch_size, passage_length, encoding_dim)
passage_question_vectors = util.weighted_sum(encoded_question, passage_question_attention)
# We replace masked values with something really negative here, so they don't affect the
# max below.
masked_similarity = util.replace_masked_values(passage_question_similarity,
question_mask.unsqueeze(1),
-1e7)
# Shape: (batch_size, passage_length)
question_passage_similarity = masked_similarity.max(dim=-1)[0].squeeze(-1)
# Shape: (batch_size, passage_length)
question_passage_attention = util.masked_softmax(question_passage_similarity, passage_mask)
# Shape: (batch_size, encoding_dim)
question_passage_vector = util.weighted_sum(encoded_passage, question_passage_attention)
# Shape: (batch_size, passage_length, encoding_dim)
tiled_question_passage_vector = question_passage_vector.unsqueeze(1).expand(batch_size,
passage_length,
encoding_dim)
# Shape: (batch_size, passage_length, encoding_dim * 4)
final_merged_passage = torch.cat([encoded_passage,
passage_question_vectors,
encoded_passage * passage_question_vectors,
encoded_passage * tiled_question_passage_vector],
dim=-1)
modeled_passage = self._dropout(self._modeling_layer(final_merged_passage, passage_lstm_mask))
modeling_dim = modeled_passage.size(-1)
# Shape: (batch_size, passage_length, encoding_dim * 4 + modeling_dim))
span_start_input = self._dropout(torch.cat([final_merged_passage, modeled_passage], dim=-1))
# Shape: (batch_size, passage_length)
span_start_logits = self._span_start_predictor(span_start_input).squeeze(-1)
# Shape: (batch_size, passage_length)
span_start_probs = util.masked_softmax(span_start_logits, passage_mask)
# Shape: (batch_size, modeling_dim)
span_start_representation = util.weighted_sum(modeled_passage, span_start_probs)
# Shape: (batch_size, passage_length, modeling_dim)
tiled_start_representation = span_start_representation.unsqueeze(1).expand(batch_size,
passage_length,
modeling_dim)
# Shape: (batch_size, passage_length, encoding_dim * 4 + modeling_dim * 3)
span_end_representation = torch.cat([final_merged_passage,
modeled_passage,
tiled_start_representation,
modeled_passage * tiled_start_representation],
dim=-1)
# Shape: (batch_size, passage_length, encoding_dim)
encoded_span_end = self._dropout(self._span_end_encoder(span_end_representation,
passage_lstm_mask))
# Shape: (batch_size, passage_length, encoding_dim * 4 + span_end_encoding_dim)
span_end_input = self._dropout(torch.cat([final_merged_passage, encoded_span_end], dim=-1))
span_end_logits = self._span_end_predictor(span_end_input).squeeze(-1)
# Shape: (batch_size, passage_length)
passage_span_start_log_probs = util.masked_log_softmax(span_start_logits, passage_mask)
passage_span_end_log_probs = util.masked_log_softmax(span_end_logits, passage_mask)
passage_span_start_logits = util.replace_masked_values(span_start_logits, passage_mask, -1e7)
passage_span_end_logits = util.replace_masked_values(span_end_logits, passage_mask, -1e7)
# Shape: (batch_size, 2)
best_passage_span = \
BidirectionalAttentionFlow.get_best_span(passage_span_start_logits, passage_span_end_logits)
output_dict = {"passage_question_attention": passage_question_attention,
"passage_span_start_probs": passage_span_start_log_probs.exp(),
"passage_span_end_probs": passage_span_end_log_probs.exp()}
# If answer is given, compute the loss for training.
if answer_as_passage_spans is not None:
# Shape: (batch_size, # of answer spans)
gold_passage_span_starts = answer_as_passage_spans[:, :, 0]
gold_passage_span_ends = answer_as_passage_spans[:, :, 1]
# Some spans are padded with index -1,
# so we clamp those paddings to 0 and then mask after `torch.gather()`.
gold_passage_span_mask = (gold_passage_span_starts != -1).long()
clamped_gold_passage_span_starts = util.replace_masked_values(gold_passage_span_starts,
gold_passage_span_mask,
0)
clamped_gold_passage_span_ends = util.replace_masked_values(gold_passage_span_ends,
gold_passage_span_mask,
0)
# Shape: (batch_size, # of answer spans)
log_likelihood_for_passage_span_starts = \
torch.gather(passage_span_start_log_probs, 1, clamped_gold_passage_span_starts)
log_likelihood_for_passage_span_ends = \
torch.gather(passage_span_end_log_probs, 1, clamped_gold_passage_span_ends)
# Shape: (batch_size, # of answer spans)
log_likelihood_for_passage_spans = \
log_likelihood_for_passage_span_starts + log_likelihood_for_passage_span_ends
# For those padded spans, we set their log probabilities to be very small negative value
log_likelihood_for_passage_spans = \
util.replace_masked_values(log_likelihood_for_passage_spans, gold_passage_span_mask, -1e32)
# Shape: (batch_size, )
log_marginal_likelihood_for_passage_span = util.logsumexp(log_likelihood_for_passage_spans)
output_dict["loss"] = - log_marginal_likelihood_for_passage_span.mean()
# Compute the metrics and add the tokenized input to the output.
if metadata is not None:
output_dict["question_id"] = []
output_dict["answer"] = []
question_tokens = []
passage_tokens = []
for i in range(batch_size):
question_tokens.append(metadata[i]['question_tokens'])
passage_tokens.append(metadata[i]['passage_tokens'])
# We did not consider multi-mention answers here
passage_str = metadata[i]['original_passage']
offsets = metadata[i]['passage_token_offsets']
predicted_span = tuple(best_passage_span[i].detach().cpu().numpy())
start_offset = offsets[predicted_span[0]][0]
end_offset = offsets[predicted_span[1]][1]
best_answer_str = passage_str[start_offset:end_offset]
output_dict["question_id"].append(metadata[i]["question_id"])
output_dict["answer"].append(best_answer_str)
answer_annotations = metadata[i].get('answer_annotations', [])
if answer_annotations:
self._drop_metrics(best_answer_str, answer_annotations)
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
exact_match, f1_score = self._drop_metrics.get_metric(reset)
return {'em': exact_match, 'f1': f1_score}
| allennlp-reading-comprehension-research-master | reading_comprehension/drop_models/bidaf_marginal.py |
from typing import Any, Dict, List, Iterable, Optional
import logging
import torch
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.models.reading_comprehension.util import get_best_span
from allennlp.modules import Highway
from allennlp.nn.activations import Activation
from allennlp.modules.feedforward import FeedForward
from allennlp.modules import Seq2SeqEncoder, TextFieldEmbedder
from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention
from allennlp.nn import util, InitializerApplicator, RegularizerApplicator
from allennlp.nn.util import masked_softmax
from reading_comprehension.drop_metrics import DropEmAndF1
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@Model.register("augmented_qanet")
class AugmentedQANet(Model):
"""
This class adapts the QANet model to do question answering on DROP dataset.
"""
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
num_highway_layers: int,
phrase_layer: Seq2SeqEncoder,
matrix_attention_layer: MatrixAttention,
modeling_layer: Seq2SeqEncoder,
dropout_prob: float = 0.1,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None,
answering_abilities: Iterable[str] = ("passage_span_extraction",
"question_span_extraction",
"addition_subtraction",
"counting")) -> None:
super().__init__(vocab, regularizer)
# The answering abilities to include in this model
self.answering_abilities = list(answering_abilities)
text_embed_dim = text_field_embedder.get_output_dim()
encoding_in_dim = phrase_layer.get_input_dim()
encoding_out_dim = phrase_layer.get_output_dim()
modeling_in_dim = modeling_layer.get_input_dim()
modeling_out_dim = modeling_layer.get_output_dim()
self._text_field_embedder = text_field_embedder
self._embedding_proj_layer = torch.nn.Linear(text_embed_dim, encoding_in_dim)
self._highway_layer = Highway(encoding_in_dim, num_highway_layers)
self._encoding_proj_layer = torch.nn.Linear(encoding_in_dim, encoding_in_dim)
self._phrase_layer = phrase_layer
self._matrix_attention = matrix_attention_layer
self._modeling_proj_layer = torch.nn.Linear(encoding_out_dim * 4, modeling_in_dim)
self._modeling_layer = modeling_layer
self._passage_weights_predictor = torch.nn.Linear(modeling_out_dim, 1)
self._question_weights_predictor = torch.nn.Linear(encoding_out_dim, 1)
if len(self.answering_abilities) > 1:
self._answer_ability_predictor = FeedForward(modeling_out_dim + encoding_out_dim,
activations=[Activation.by_name('relu')(),
Activation.by_name('linear')()],
hidden_dims=[modeling_out_dim,
len(self.answering_abilities)],
num_layers=2,
dropout=dropout_prob)
if "passage_span_extraction" in self.answering_abilities:
self._passage_span_extraction_index = self.answering_abilities.index("passage_span_extraction")
self._passage_span_start_predictor = FeedForward(modeling_out_dim * 2,
activations=[Activation.by_name('relu')(),
Activation.by_name('linear')()],
hidden_dims=[modeling_out_dim, 1],
num_layers=2)
self._passage_span_end_predictor = FeedForward(modeling_out_dim * 2,
activations=[Activation.by_name('relu')(),
Activation.by_name('linear')()],
hidden_dims=[modeling_out_dim, 1],
num_layers=2)
if "question_span_extraction" in answering_abilities:
self._question_span_extraction_index = self.answering_abilities.index("question_span_extraction")
self._question_span_start_predictor = FeedForward(modeling_out_dim * 2,
activations=[Activation.by_name('relu')(),
Activation.by_name('linear')()],
hidden_dims=[modeling_out_dim, 1],
num_layers=2)
self._question_span_end_predictor = FeedForward(modeling_out_dim * 2,
activations=[Activation.by_name('relu')(),
Activation.by_name('linear')()],
hidden_dims=[modeling_out_dim, 1],
num_layers=2)
if "addition_subtraction" in answering_abilities:
self._addition_subtraction_index = self.answering_abilities.index("addition_subtraction")
self._number_sign_predictor = FeedForward(modeling_out_dim * 3,
activations=[Activation.by_name('relu')(),
Activation.by_name('linear')()],
hidden_dims=[modeling_out_dim, 3],
num_layers=2)
if "counting" in answering_abilities:
self._counting_index = self.answering_abilities.index("counting")
self._count_number_predictor = FeedForward(modeling_out_dim,
activations=[Activation.by_name('relu')(),
Activation.by_name('linear')()],
hidden_dims=[modeling_out_dim, 10],
num_layers=2)
self._drop_metrics = DropEmAndF1()
self._dropout = torch.nn.Dropout(p=dropout_prob)
initializer(self)
def forward(self, # type: ignore
question: Dict[str, torch.LongTensor],
passage: Dict[str, torch.LongTensor],
numbers_in_passage: Dict[str, torch.LongTensor],
number_indices: torch.LongTensor,
answer_as_passage_spans: torch.LongTensor = None,
answer_as_question_spans: torch.LongTensor = None,
answer_as_add_sub_expressions: torch.LongTensor = None,
answer_as_counts: torch.LongTensor = None,
metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ, unused-argument
question_mask = util.get_text_field_mask(question).float()
passage_mask = util.get_text_field_mask(passage).float()
embedded_question = self._dropout(self._text_field_embedder(question))
embedded_passage = self._dropout(self._text_field_embedder(passage))
embedded_question = self._highway_layer(self._embedding_proj_layer(embedded_question))
embedded_passage = self._highway_layer(self._embedding_proj_layer(embedded_passage))
batch_size = embedded_question.size(0)
projected_embedded_question = self._encoding_proj_layer(embedded_question)
projected_embedded_passage = self._encoding_proj_layer(embedded_passage)
encoded_question = self._dropout(self._phrase_layer(projected_embedded_question, question_mask))
encoded_passage = self._dropout(self._phrase_layer(projected_embedded_passage, passage_mask))
# Shape: (batch_size, passage_length, question_length)
passage_question_similarity = self._matrix_attention(encoded_passage, encoded_question)
# Shape: (batch_size, passage_length, question_length)
passage_question_attention = masked_softmax(passage_question_similarity,
question_mask,
memory_efficient=True)
# Shape: (batch_size, passage_length, encoding_dim)
passage_question_vectors = util.weighted_sum(encoded_question, passage_question_attention)
# Shape: (batch_size, question_length, passage_length)
question_passage_attention = masked_softmax(passage_question_similarity.transpose(1, 2),
passage_mask,
memory_efficient=True)
# Shape: (batch_size, passage_length, passage_length)
passsage_attention_over_attention = torch.bmm(passage_question_attention, question_passage_attention)
# Shape: (batch_size, passage_length, encoding_dim)
passage_passage_vectors = util.weighted_sum(encoded_passage, passsage_attention_over_attention)
# Shape: (batch_size, passage_length, encoding_dim * 4)
merged_passage_attention_vectors = self._dropout(
torch.cat([encoded_passage, passage_question_vectors,
encoded_passage * passage_question_vectors,
encoded_passage * passage_passage_vectors],
dim=-1))
# The recurrent modeling layers. Since these layers share the same parameters,
# we don't construct them conditioned on answering abilities.
modeled_passage_list = [self._modeling_proj_layer(merged_passage_attention_vectors)]
for _ in range(4):
modeled_passage = self._dropout(self._modeling_layer(modeled_passage_list[-1], passage_mask))
modeled_passage_list.append(modeled_passage)
# Pop the first one, which is input
modeled_passage_list.pop(0)
# The first modeling layer is used to calculate the vector representation of passage
passage_weights = self._passage_weights_predictor(modeled_passage_list[0]).squeeze(-1)
passage_weights = masked_softmax(passage_weights, passage_mask)
passage_vector = util.weighted_sum(modeled_passage_list[0], passage_weights)
# The vector representation of question is calculated based on the unmatched encoding,
# because we may want to infer the answer ability only based on the question words.
question_weights = self._question_weights_predictor(encoded_question).squeeze(-1)
question_weights = masked_softmax(question_weights, question_mask)
question_vector = util.weighted_sum(encoded_question, question_weights)
if len(self.answering_abilities) > 1:
# Shape: (batch_size, number_of_abilities)
answer_ability_logits = \
self._answer_ability_predictor(torch.cat([passage_vector, question_vector], -1))
answer_ability_log_probs = torch.nn.functional.log_softmax(answer_ability_logits, -1)
best_answer_ability = torch.argmax(answer_ability_log_probs, 1)
if "counting" in self.answering_abilities:
# Shape: (batch_size, 10)
count_number_logits = self._count_number_predictor(passage_vector)
count_number_log_probs = torch.nn.functional.log_softmax(count_number_logits, -1)
# Info about the best count number prediction
# Shape: (batch_size,)
best_count_number = torch.argmax(count_number_log_probs, -1)
best_count_log_prob = \
torch.gather(count_number_log_probs, 1, best_count_number.unsqueeze(-1)).squeeze(-1)
if len(self.answering_abilities) > 1:
best_count_log_prob += answer_ability_log_probs[:, self._counting_index]
if "passage_span_extraction" in self.answering_abilities:
# Shape: (batch_size, passage_length, modeling_dim * 2))
passage_for_span_start = torch.cat([modeled_passage_list[0], modeled_passage_list[1]], dim=-1)
# Shape: (batch_size, passage_length)
passage_span_start_logits = self._passage_span_start_predictor(passage_for_span_start).squeeze(-1)
# Shape: (batch_size, passage_length, modeling_dim * 2)
passage_for_span_end = torch.cat([modeled_passage_list[0], modeled_passage_list[2]], dim=-1)
# Shape: (batch_size, passage_length)
passage_span_end_logits = self._passage_span_end_predictor(passage_for_span_end).squeeze(-1)
# Shape: (batch_size, passage_length)
passage_span_start_log_probs = util.masked_log_softmax(passage_span_start_logits, passage_mask)
passage_span_end_log_probs = util.masked_log_softmax(passage_span_end_logits, passage_mask)
# Info about the best passage span prediction
passage_span_start_logits = util.replace_masked_values(passage_span_start_logits, passage_mask, -1e7)
passage_span_end_logits = util.replace_masked_values(passage_span_end_logits, passage_mask, -1e7)
# Shape: (batch_size, 2)
best_passage_span = get_best_span(passage_span_start_logits, passage_span_end_logits)
# Shape: (batch_size, 2)
best_passage_start_log_probs = \
torch.gather(passage_span_start_log_probs, 1, best_passage_span[:, 0].unsqueeze(-1)).squeeze(-1)
best_passage_end_log_probs = \
torch.gather(passage_span_end_log_probs, 1, best_passage_span[:, 1].unsqueeze(-1)).squeeze(-1)
# Shape: (batch_size,)
best_passage_span_log_prob = best_passage_start_log_probs + best_passage_end_log_probs
if len(self.answering_abilities) > 1:
best_passage_span_log_prob += answer_ability_log_probs[:, self._passage_span_extraction_index]
if "question_span_extraction" in self.answering_abilities:
# Shape: (batch_size, question_length)
encoded_question_for_span_prediction = \
torch.cat([encoded_question,
passage_vector.unsqueeze(1).repeat(1, encoded_question.size(1), 1)], -1)
question_span_start_logits = \
self._question_span_start_predictor(encoded_question_for_span_prediction).squeeze(-1)
# Shape: (batch_size, question_length)
question_span_end_logits = \
self._question_span_end_predictor(encoded_question_for_span_prediction).squeeze(-1)
question_span_start_log_probs = util.masked_log_softmax(question_span_start_logits, question_mask)
question_span_end_log_probs = util.masked_log_softmax(question_span_end_logits, question_mask)
# Info about the best question span prediction
question_span_start_logits = \
util.replace_masked_values(question_span_start_logits, question_mask, -1e7)
question_span_end_logits = \
util.replace_masked_values(question_span_end_logits, question_mask, -1e7)
# Shape: (batch_size, 2)
best_question_span = get_best_span(question_span_start_logits, question_span_end_logits)
# Shape: (batch_size, 2)
best_question_start_log_probs = \
torch.gather(question_span_start_log_probs, 1, best_question_span[:, 0].unsqueeze(-1)).squeeze(-1)
best_question_end_log_probs = \
torch.gather(question_span_end_log_probs, 1, best_question_span[:, 1].unsqueeze(-1)).squeeze(-1)
# Shape: (batch_size,)
best_question_span_log_prob = best_question_start_log_probs + best_question_end_log_probs
if len(self.answering_abilities) > 1:
best_question_span_log_prob += answer_ability_log_probs[:, self._question_span_extraction_index]
if "addition_subtraction" in self.answering_abilities:
# Shape: (batch_size, # of numbers in the passage)
number_indices = number_indices.squeeze(-1)
number_mask = (number_indices != -1).long()
clamped_number_indices = util.replace_masked_values(number_indices, number_mask, 0)
encoded_passage_for_numbers = torch.cat([modeled_passage_list[0], modeled_passage_list[3]], dim=-1)
# Shape: (batch_size, # of numbers in the passage, encoding_dim)
encoded_numbers = torch.gather(
encoded_passage_for_numbers,
1,
clamped_number_indices.unsqueeze(-1).expand(-1, -1, encoded_passage_for_numbers.size(-1)))
# Shape: (batch_size, # of numbers in the passage)
encoded_numbers = torch.cat(
[encoded_numbers, passage_vector.unsqueeze(1).repeat(1, encoded_numbers.size(1), 1)], -1)
# Shape: (batch_size, # of numbers in the passage, 3)
number_sign_logits = self._number_sign_predictor(encoded_numbers)
number_sign_log_probs = torch.nn.functional.log_softmax(number_sign_logits, -1)
# Shape: (batch_size, # of numbers in passage).
best_signs_for_numbers = torch.argmax(number_sign_log_probs, -1)
# For padding numbers, the best sign masked as 0 (not included).
best_signs_for_numbers = util.replace_masked_values(best_signs_for_numbers, number_mask, 0)
# Shape: (batch_size, # of numbers in passage)
best_signs_log_probs = torch.gather(
number_sign_log_probs, 2, best_signs_for_numbers.unsqueeze(-1)).squeeze(-1)
# the probs of the masked positions should be 1 so that it will not affect the joint probability
# TODO: this is not quite right, since if there are many numbers in the passage,
# TODO: the joint probability would be very small.
best_signs_log_probs = util.replace_masked_values(best_signs_log_probs, number_mask, 0)
# Shape: (batch_size,)
best_combination_log_prob = best_signs_log_probs.sum(-1)
if len(self.answering_abilities) > 1:
best_combination_log_prob += answer_ability_log_probs[:, self._addition_subtraction_index]
output_dict = {}
# If answer is given, compute the loss.
if answer_as_passage_spans is not None or answer_as_question_spans is not None \
or answer_as_add_sub_expressions is not None or answer_as_counts is not None:
log_marginal_likelihood_list = []
for answering_ability in self.answering_abilities:
if answering_ability == "passage_span_extraction":
# Shape: (batch_size, # of answer spans)
gold_passage_span_starts = answer_as_passage_spans[:, :, 0]
gold_passage_span_ends = answer_as_passage_spans[:, :, 1]
# Some spans are padded with index -1,
# so we clamp those paddings to 0 and then mask after `torch.gather()`.
gold_passage_span_mask = (gold_passage_span_starts != -1).long()
clamped_gold_passage_span_starts = \
util.replace_masked_values(gold_passage_span_starts, gold_passage_span_mask, 0)
clamped_gold_passage_span_ends = \
util.replace_masked_values(gold_passage_span_ends, gold_passage_span_mask, 0)
# Shape: (batch_size, # of answer spans)
log_likelihood_for_passage_span_starts = \
torch.gather(passage_span_start_log_probs, 1, clamped_gold_passage_span_starts)
log_likelihood_for_passage_span_ends = \
torch.gather(passage_span_end_log_probs, 1, clamped_gold_passage_span_ends)
# Shape: (batch_size, # of answer spans)
log_likelihood_for_passage_spans = \
log_likelihood_for_passage_span_starts + log_likelihood_for_passage_span_ends
# For those padded spans, we set their log probabilities to be very small negative value
log_likelihood_for_passage_spans = \
util.replace_masked_values(log_likelihood_for_passage_spans, gold_passage_span_mask, -1e7)
# Shape: (batch_size, )
log_marginal_likelihood_for_passage_span = util.logsumexp(log_likelihood_for_passage_spans)
log_marginal_likelihood_list.append(log_marginal_likelihood_for_passage_span)
elif answering_ability == "question_span_extraction":
# Shape: (batch_size, # of answer spans)
gold_question_span_starts = answer_as_question_spans[:, :, 0]
gold_question_span_ends = answer_as_question_spans[:, :, 1]
# Some spans are padded with index -1,
# so we clamp those paddings to 0 and then mask after `torch.gather()`.
gold_question_span_mask = (gold_question_span_starts != -1).long()
clamped_gold_question_span_starts = \
util.replace_masked_values(gold_question_span_starts, gold_question_span_mask, 0)
clamped_gold_question_span_ends = \
util.replace_masked_values(gold_question_span_ends, gold_question_span_mask, 0)
# Shape: (batch_size, # of answer spans)
log_likelihood_for_question_span_starts = \
torch.gather(question_span_start_log_probs, 1, clamped_gold_question_span_starts)
log_likelihood_for_question_span_ends = \
torch.gather(question_span_end_log_probs, 1, clamped_gold_question_span_ends)
# Shape: (batch_size, # of answer spans)
log_likelihood_for_question_spans = \
log_likelihood_for_question_span_starts + log_likelihood_for_question_span_ends
# For those padded spans, we set their log probabilities to be very small negative value
log_likelihood_for_question_spans = \
util.replace_masked_values(log_likelihood_for_question_spans,
gold_question_span_mask,
-1e7)
# Shape: (batch_size, )
# pylint: disable=invalid-name
log_marginal_likelihood_for_question_span = \
util.logsumexp(log_likelihood_for_question_spans)
log_marginal_likelihood_list.append(log_marginal_likelihood_for_question_span)
elif answering_ability == "addition_subtraction":
# The padded add-sub combinations use 0 as the signs for all numbers, and we mask them here.
# Shape: (batch_size, # of combinations)
gold_add_sub_mask = (answer_as_add_sub_expressions.sum(-1) > 0).float()
# Shape: (batch_size, # of numbers in the passage, # of combinations)
gold_add_sub_signs = answer_as_add_sub_expressions.transpose(1, 2)
# Shape: (batch_size, # of numbers in the passage, # of combinations)
log_likelihood_for_number_signs = torch.gather(number_sign_log_probs, 2, gold_add_sub_signs)
# the log likelihood of the masked positions should be 0
# so that it will not affect the joint probability
log_likelihood_for_number_signs = \
util.replace_masked_values(log_likelihood_for_number_signs, number_mask.unsqueeze(-1), 0)
# Shape: (batch_size, # of combinations)
log_likelihood_for_add_subs = log_likelihood_for_number_signs.sum(1)
# For those padded combinations, we set their log probabilities to be very small negative value
log_likelihood_for_add_subs = \
util.replace_masked_values(log_likelihood_for_add_subs, gold_add_sub_mask, -1e7)
# Shape: (batch_size, )
log_marginal_likelihood_for_add_sub = util.logsumexp(log_likelihood_for_add_subs)
log_marginal_likelihood_list.append(log_marginal_likelihood_for_add_sub)
elif answering_ability == "counting":
# Count answers are padded with label -1,
# so we clamp those paddings to 0 and then mask after `torch.gather()`.
# Shape: (batch_size, # of count answers)
gold_count_mask = (answer_as_counts != -1).long()
# Shape: (batch_size, # of count answers)
clamped_gold_counts = util.replace_masked_values(answer_as_counts, gold_count_mask, 0)
log_likelihood_for_counts = torch.gather(count_number_log_probs, 1, clamped_gold_counts)
# For those padded spans, we set their log probabilities to be very small negative value
log_likelihood_for_counts = \
util.replace_masked_values(log_likelihood_for_counts, gold_count_mask, -1e7)
# Shape: (batch_size, )
log_marginal_likelihood_for_count = util.logsumexp(log_likelihood_for_counts)
log_marginal_likelihood_list.append(log_marginal_likelihood_for_count)
else:
raise ValueError(f"Unsupported answering ability: {answering_ability}")
if len(self.answering_abilities) > 1:
# Add the ability probabilities if there are more than one abilities
all_log_marginal_likelihoods = torch.stack(log_marginal_likelihood_list, dim=-1)
all_log_marginal_likelihoods = all_log_marginal_likelihoods + answer_ability_log_probs
marginal_log_likelihood = util.logsumexp(all_log_marginal_likelihoods)
else:
marginal_log_likelihood = log_marginal_likelihood_list[0]
output_dict["loss"] = - marginal_log_likelihood.mean()
# Compute the metrics and add the tokenized input to the output.
if metadata is not None:
output_dict["question_id"] = []
output_dict["answer"] = []
question_tokens = []
passage_tokens = []
for i in range(batch_size):
question_tokens.append(metadata[i]['question_tokens'])
passage_tokens.append(metadata[i]['passage_tokens'])
if len(self.answering_abilities) > 1:
predicted_ability_str = self.answering_abilities[best_answer_ability[i].detach().cpu().numpy()]
else:
predicted_ability_str = self.answering_abilities[0]
# We did not consider multi-mention answers here
if predicted_ability_str == "passage_span_extraction":
passage_str = metadata[i]['original_passage']
offsets = metadata[i]['passage_token_offsets']
predicted_span = tuple(best_passage_span[i].detach().cpu().numpy())
start_offset = offsets[predicted_span[0]][0]
end_offset = offsets[predicted_span[1]][1]
predicted_answer = passage_str[start_offset:end_offset]
elif predicted_ability_str == "question_span_extraction":
question_str = metadata[i]['original_question']
offsets = metadata[i]['question_token_offsets']
predicted_span = tuple(best_question_span[i].detach().cpu().numpy())
start_offset = offsets[predicted_span[0]][0]
end_offset = offsets[predicted_span[1]][1]
predicted_answer = question_str[start_offset:end_offset]
elif predicted_ability_str == "addition_subtraction": # plus_minus combination answer
original_numbers = metadata[i]['original_numbers']
sign_remap = {0: 0, 1: 1, 2: -1}
predicted_signs = [sign_remap[it] for it in best_signs_for_numbers[i].detach().cpu().numpy()]
result = sum([sign * number for sign, number in zip(predicted_signs, original_numbers)])
predicted_answer = str(result)
elif predicted_ability_str == "counting":
predicted_count = best_count_number[i].detach().cpu().numpy()
predicted_answer = str(predicted_count)
else:
raise ValueError(f"Unsupported answer ability: {predicted_ability_str}")
output_dict["question_id"].append(metadata[i]["question_id"])
output_dict["answer"].append(predicted_answer)
answer_annotations = metadata[i].get('answer_annotations', [])
if answer_annotations:
self._drop_metrics(predicted_answer, answer_annotations)
# This is used for the demo.
output_dict["passage_question_attention"] = passage_question_attention
output_dict["question_tokens"] = question_tokens
output_dict["passage_tokens"] = passage_tokens
# The demo takes `best_span_str` as a key to find the predicted answer
output_dict["best_span_str"] = output_dict["answer"]
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
exact_match, f1_score = self._drop_metrics.get_metric(reset)
return {'em': exact_match, 'f1': f1_score}
| allennlp-reading-comprehension-research-master | reading_comprehension/drop_models/augmented_qanet.py |
allennlp-reading-comprehension-research-master | reading_comprehension/data/__init__.py |
|
# pylint: skip-file
import json
import sys
import argparse
import string
import numpy as np
import re
# Copied from: https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r'\b(a|an|the)\b', re.UNICODE)
return re.sub(regex, ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def remove_punc(text):
if not is_number(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
else:
return text
def lower(text):
return text.lower()
# use the number instead of string, if it is one
def norm_number(text):
if is_number(text):
return str(float(text))
else:
return text
sp = ' '.join([white_space_fix(remove_articles(norm_number(remove_punc(lower(tok))))) for tok in s.split()])
return sp
def answer_to_bags(answer):
span_bag = set()
raw_spans = []
if isinstance(answer, list) or isinstance(answer, tuple):
raw_spans = answer
if isinstance(answer, str):
raw_spans = [answer]
span_bag = set()
token_bag = set()
for raw_span in raw_spans:
span = normalize_answer(raw_span)
span_bag.add(span)
token_bag.update(span.split())
return span_bag, token_bag
def get_metrics(predicted, gold):
predicted_bags = answer_to_bags(predicted)
gold_bags = answer_to_bags(gold)
exact_match = 1 if predicted_bags[0] == gold_bags[0] else 0
intersection = len(gold_bags[1].intersection(predicted_bags[1]))
if len(predicted_bags[1]) == 0:
precision = 1.0
else:
precision = intersection / len(predicted_bags[1])
if len(gold_bags[1]) == 0:
recall = 1.0
else:
recall = intersection / len(gold_bags[1])
f1 = (2 * precision * recall) / (precision + recall) if not (precision == 0 and recall == 0) else 0
return exact_match, f1
def to_string(answer):
if answer["number"] != "":
return tuple([str(answer["number"])]), "number"
elif len(answer["spans"]) > 0:
return tuple(answer["spans"]), "span" if len(answer["spans"]) == 1 else "spans"
else:
return tuple(
["{0} {1} {2}".format(answer["date"]["day"], answer["date"]["month"], answer["date"]["year"])]), "date"
def _run_evaluation(annotations, predicted_answers):
"""
Evaluation for programatic use.
"""
exact_match = []
f1 = []
# for each type as well
type_to_em = {}
type_to_f1 = {}
for pid, annotation in annotations.items():
for qa_pair in annotation["qa_pairs"]:
query_id = qa_pair["query_id"]
max_em_score = 0
max_f1_score = 0
max_type = None
if query_id in predicted_answers:
if "answer" in predicted_answers[query_id]:
predicted = predicted_answers[query_id]["answer"]
else:
predicted = predicted_answers[query_id]
else:
print("Missing prediction for question: {}".format(query_id))
predicted = None
for answer in [qa_pair["answer"]] + qa_pair["validated_answers"]:
gold_answer, gold_type = to_string(answer)
em_score, f1_score = get_metrics(predicted, gold_answer)
if gold_answer[0].strip() != "":
max_em_score = max(max_em_score, em_score)
max_f1_score = max(max_f1_score, f1_score)
if max_em_score == em_score or max_f1_score == f1_score: max_type = gold_type
exact_match.append(max_em_score)
f1.append(max_f1_score)
if max_type not in type_to_em:
type_to_em[max_type] = []
type_to_em[max_type].append(max_em_score)
if max_type not in type_to_f1:
type_to_f1[max_type] = []
type_to_f1[max_type].append(max_f1_score)
global_em = np.mean(exact_match)
global_f1 = np.mean(f1)
print("Exact-match accuracy {0:.2f}".format(global_em * 100))
print("F1 score {0:.2f}".format(global_f1 * 100))
print("{0:.2f} & {1:.2f}".format(global_em * 100, global_f1 * 100))
print("----")
total = np.sum([len(v) for v in type_to_em.values()])
for typ in sorted(type_to_em.keys()):
print("{0}: {1} ({2:.2f}%)".format(typ, len(type_to_em[typ]), 100. * len(type_to_em[typ]) / total))
print(" Exact-match accuracy {0:.3f}".format(100. * np.mean(type_to_em[typ])))
print(" F1 score {0:.3f}".format(100. * np.mean(type_to_f1[typ])))
return global_em, global_f1
def run_evaluation(args):
predicted_answers = json.load(open(args.prediction_path, encoding='utf-8'))
annotations = json.load(open(args.gold_path, encoding='utf-8'))
return _run_evaluation(annotations, predicted_answers)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Evaluate on DROP dataset')
parser.add_argument("--gold_path", type=str, required=False, default="drop_dataset_test.gold.json",
help='location of the gold file')
parser.add_argument("--prediction_path", type=str, required=True,
help='location of the prediction file')
args = parser.parse_args()
run_evaluation(args)
| allennlp-reading-comprehension-research-master | reading_comprehension/data/drop_official_evaluate.py |
import json
import logging
import itertools
import string
from typing import Dict, List, Union, Tuple, Any
from collections import defaultdict
from overrides import overrides
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.data.dataset_readers.reading_comprehension.util import make_reading_comprehension_instance
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, WordTokenizer
from allennlp.data.dataset_readers.reading_comprehension.util import IGNORED_TOKENS, STRIPPED_CHARACTERS
from allennlp.data.fields import Field, TextField, MetadataField, LabelField, ListField, \
SequenceLabelField, SpanField, IndexField
from word2number.w2n import word_to_num
from reading_comprehension.utils import split_tokens_by_hyphen
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
WORD_NUMBER_MAP = {"zero": 0, "one": 1, "two": 2, "three": 3, "four": 4,
"five": 5, "six": 6, "seven": 7, "eight": 8,
"nine": 9, "ten": 10, "eleven": 11, "twelve": 12,
"thirteen": 13, "fourteen": 14, "fifteen": 15,
"sixteen": 16, "seventeen": 17, "eighteen": 18, "nineteen": 19}
@DatasetReader.register("drop")
class DROPReader(DatasetReader):
def __init__(self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
lazy: bool = False,
passage_length_limit: int = None,
question_length_limit: int = None,
skip_when_all_empty: List[str] = None,
instance_format: str = "drop",
relaxed_span_match_for_finding_labels: bool = True) -> None:
"""
Reads a JSON-formatted DROP dataset file and returns a ``Dataset``
Parameters
----------
tokenizer : ``Tokenizer``, optional (default=``WordTokenizer()``)
We use this ``Tokenizer`` for both the question and the passage. See :class:`Tokenizer`.
Default is ```WordTokenizer()``.
token_indexers : ``Dict[str, TokenIndexer]``, optional
We similarly use this for both the question and the passage. See :class:`TokenIndexer`.
Default is ``{"tokens": SingleIdTokenIndexer()}``.
lazy : ``bool``, optional (default=False)
If this is true, ``instances()`` will return an object whose ``__iter__`` method
reloads the dataset each time it's called. Otherwise, ``instances()`` returns a list.
passage_length_limit : ``int``, optional (default=None)
If specified, we will cut the passage if the length of passage exceeds this limit.
question_length_limit : ``int``, optional (default=None)
If specified, we will cut the question if the length of passage exceeds this limit.
skip_when_all_empty: ``List[str]``, optional (default=None)
In some cases such as preparing for training examples, you may want to skip some examples
when there are no gold labels. You can specify on what condition should the examples be
skipped. Currently, you can put "passage_span", "question_span", "addition_subtraction",
or "counting" in this list, to tell the reader skip when there are no such label found.
If not specified, we will keep all the examples.
instance_format: ``str``, optional (default="drop")
Since we want to test different kind of models on DROP dataset, and they may require
different instance format. Current, we support three formats: "drop", "squad" and "bert".
relaxed_span_match_for_finding_labels : ``bool``, optional (default=True)
DROP dataset contains multi-span answers, and the date-type answers usually cannot find
a single passage span to match it, either. In order to use as many examples as possible
to train the model, we may not want a strict match for such cases when finding the gold
span labels. If this argument is true, we will treat every span in the multi-span answers
as correct, and every token in the date answer as correct, too. Note that this will not
affect evaluation.
"""
super().__init__(lazy)
self._tokenizer = tokenizer or WordTokenizer()
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self.passage_length_limit = passage_length_limit
self.question_length_limit = question_length_limit
for item in skip_when_all_empty:
assert item in ["passage_span", "question_span", "addition_subtraction", "counting"], \
f"Unsupported skip type: {item}"
self.skip_when_all_empty = skip_when_all_empty if skip_when_all_empty is not None else []
self.instance_format = instance_format
self.relaxed_span_match_for_finding_labels = relaxed_span_match_for_finding_labels
@overrides
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
logger.info("Reading file at %s", file_path)
with open(file_path) as dataset_file:
dataset = json.load(dataset_file)
logger.info("Reading the dataset")
instances, skip_count = [], 0
for passage_id, passage_info in dataset.items():
passage_text = passage_info["passage"]
passage_tokens = self._tokenizer.tokenize(passage_text)
passage_tokens = split_tokens_by_hyphen(passage_tokens)
for question_answer in passage_info["qa_pairs"]:
question_id = question_answer["query_id"]
question_text = question_answer["question"].strip()
answer_annotations = []
if "answer" in question_answer:
answer_annotations.append(question_answer["answer"])
if "validated_answers" in question_answer:
answer_annotations += question_answer["validated_answers"]
instance = self.text_to_instance(question_text,
passage_text,
question_id,
passage_id,
answer_annotations,
passage_tokens)
if instance is not None:
instances.append(instance)
else:
skip_count += 1
# pylint: disable=logging-fstring-interpolation
logger.info(f"Skipped {skip_count} questions, kept {len(instances)} questions.")
return instances
@overrides
def text_to_instance(self, # type: ignore
question_text: str,
passage_text: str,
question_id: str = None,
passage_id: str = None,
answer_annotations: List[Dict] = None,
passage_tokens: List[Token] = None) -> Union[Instance, None]:
# pylint: disable=arguments-differ
if not passage_tokens:
passage_tokens = self._tokenizer.tokenize(passage_text)
passage_tokens = split_tokens_by_hyphen(passage_tokens)
question_tokens = self._tokenizer.tokenize(question_text)
question_tokens = split_tokens_by_hyphen(question_tokens)
# passage_text = question_text
# passage_tokens = question_tokens
if self.passage_length_limit is not None:
passage_tokens = passage_tokens[: self.passage_length_limit]
if self.question_length_limit is not None:
question_tokens = question_tokens[: self.question_length_limit]
answer_type, answer_texts = None, []
if answer_annotations:
# Currently we only use the first annotated answer here, but actually this doesn't affect
# the training, because we only have one annotation for the train set.
answer_type, answer_texts = self.extract_answer_info_from_annotation(answer_annotations[0])
# Tokenize the answer text in order to find the matched span based on token
tokenized_answer_texts = []
for answer_text in answer_texts:
answer_tokens = self._tokenizer.tokenize(answer_text)
answer_tokens = split_tokens_by_hyphen(answer_tokens)
tokenized_answer_texts.append(' '.join(token.text for token in answer_tokens))
if self.instance_format == "squad":
valid_passage_spans = \
self.find_valid_spans(passage_tokens, tokenized_answer_texts) if tokenized_answer_texts else []
if not valid_passage_spans:
if "passage_span" in self.skip_when_all_empty:
return None
else:
valid_passage_spans.append((len(passage_tokens) - 1, len(passage_tokens) - 1))
return make_reading_comprehension_instance(question_tokens,
passage_tokens,
self._token_indexers,
passage_text,
valid_passage_spans,
# this `answer_texts` will not be used for evaluation
answer_texts,
additional_metadata={
"original_passage": passage_text,
"original_question": question_text,
"passage_id": passage_id,
"question_id": question_id,
"valid_passage_spans": valid_passage_spans,
"answer_annotations": answer_annotations})
elif self.instance_format == "bert":
question_concat_passage_tokens = question_tokens + [Token("[SEP]")] + passage_tokens
valid_passage_spans = []
for span in self.find_valid_spans(passage_tokens, tokenized_answer_texts):
# This span is for `question + [SEP] + passage`.
valid_passage_spans.append((span[0] + len(question_tokens) + 1,
span[1] + len(question_tokens) + 1))
if not valid_passage_spans:
if "passage_span" in self.skip_when_all_empty:
return None
else:
valid_passage_spans.append((len(question_concat_passage_tokens) - 1,
len(question_concat_passage_tokens) - 1))
answer_info = {"answer_texts": answer_texts, # this `answer_texts` will not be used for evaluation
"answer_passage_spans": valid_passage_spans}
return self.make_bert_drop_instance(question_tokens,
passage_tokens,
question_concat_passage_tokens,
self._token_indexers,
passage_text,
answer_info,
additional_metadata={
"original_passage": passage_text,
"original_question": question_text,
"passage_id": passage_id,
"question_id": question_id,
"answer_annotations": answer_annotations})
elif self.instance_format == "drop":
numbers_in_passage = []
number_indices = []
for token_index, token in enumerate(passage_tokens):
number = self.convert_word_to_number(token.text)
if number is not None:
numbers_in_passage.append(number)
number_indices.append(token_index)
# hack to guarantee minimal length of padded number
numbers_in_passage.append(0)
number_indices.append(-1)
numbers_as_tokens = [Token(str(number)) for number in numbers_in_passage]
valid_passage_spans = \
self.find_valid_spans(passage_tokens, tokenized_answer_texts) if tokenized_answer_texts else []
valid_question_spans = \
self.find_valid_spans(question_tokens, tokenized_answer_texts) if tokenized_answer_texts else []
target_numbers = []
# `answer_texts` is a list of valid answers.
for answer_text in answer_texts:
number = self.convert_word_to_number(answer_text)
if number is not None:
target_numbers.append(number)
valid_signs_for_add_sub_expressions = []
valid_counts = []
if answer_type in ["number", "date"]:
valid_signs_for_add_sub_expressions = \
self.find_valid_add_sub_expressions(numbers_in_passage, target_numbers)
if answer_type in ["number"]:
# Currently we only support count number 0 ~ 9
numbers_for_count = list(range(10))
valid_counts = self.find_valid_counts(numbers_for_count, target_numbers)
type_to_answer_map = {"passage_span": valid_passage_spans,
"question_span": valid_question_spans,
"addition_subtraction": valid_signs_for_add_sub_expressions,
"counting": valid_counts}
if self.skip_when_all_empty \
and not any(type_to_answer_map[skip_type] for skip_type in self.skip_when_all_empty):
return None
answer_info = {"answer_texts": answer_texts, # this `answer_texts` will not be used for evaluation
"answer_passage_spans": valid_passage_spans,
"answer_question_spans": valid_question_spans,
"signs_for_add_sub_expressions": valid_signs_for_add_sub_expressions,
"counts": valid_counts}
return self.make_marginal_drop_instance(question_tokens,
passage_tokens,
numbers_as_tokens,
number_indices,
self._token_indexers,
passage_text,
answer_info,
additional_metadata={
"original_passage": passage_text,
"original_question": question_text,
"original_numbers": numbers_in_passage,
"passage_id": passage_id,
"question_id": question_id,
"answer_info": answer_info,
"answer_annotations": answer_annotations})
else:
raise ValueError(f"Expect the instance format to be \"drop\", \"squad\" or \"bert\", "
f"but got {self.instance_format}")
@staticmethod
def make_marginal_drop_instance(question_tokens: List[Token],
passage_tokens: List[Token],
number_tokens: List[Token],
number_indices: List[int],
token_indexers: Dict[str, TokenIndexer],
passage_text: str,
answer_info: Dict[str, Any] = None,
additional_metadata: Dict[str, Any] = None) -> Instance:
additional_metadata = additional_metadata or {}
fields: Dict[str, Field] = {}
passage_offsets = [(token.idx, token.idx + len(token.text)) for token in passage_tokens]
question_offsets = [(token.idx, token.idx + len(token.text)) for token in question_tokens]
# This is separate so we can reference it later with a known type.
fields["passage"] = TextField(passage_tokens, token_indexers)
fields["question"] = TextField(question_tokens, token_indexers)
number_index_fields = [IndexField(index, fields["passage"]) for index in number_indices]
fields["number_indices"] = ListField(number_index_fields)
# This field is actually not required in the model,
# it is used to create the `answer_as_plus_minus_combinations` field, which is a `SequenceLabelField`.
# We cannot use `number_indices` field for creating that, because the `ListField` will not be empty
# when we want to create a new empty field. That will lead to error.
fields["numbers_in_passage"] = TextField(number_tokens, token_indexers)
metadata = {"original_passage": passage_text,
"passage_token_offsets": passage_offsets,
"question_token_offsets": question_offsets,
"question_tokens": [token.text for token in question_tokens],
"passage_tokens": [token.text for token in passage_tokens],
"number_tokens": [token.text for token in number_tokens],
"number_indices": number_indices}
if answer_info:
metadata["answer_texts"] = answer_info["answer_texts"]
passage_span_fields = \
[SpanField(span[0], span[1], fields["passage"]) for span in answer_info["answer_passage_spans"]]
if not passage_span_fields:
passage_span_fields.append(SpanField(-1, -1, fields["passage"]))
fields["answer_as_passage_spans"] = ListField(passage_span_fields)
question_span_fields = \
[SpanField(span[0], span[1], fields["question"]) for span in answer_info["answer_question_spans"]]
if not question_span_fields:
question_span_fields.append(SpanField(-1, -1, fields["question"]))
fields["answer_as_question_spans"] = ListField(question_span_fields)
add_sub_signs_field = []
for signs_for_one_add_sub_expression in answer_info["signs_for_add_sub_expressions"]:
add_sub_signs_field.append(
SequenceLabelField(signs_for_one_add_sub_expression, fields["numbers_in_passage"]))
if not add_sub_signs_field:
add_sub_signs_field.append(
SequenceLabelField([0] * len(fields["numbers_in_passage"]), fields["numbers_in_passage"]))
fields["answer_as_add_sub_expressions"] = ListField(add_sub_signs_field)
count_fields = [LabelField(count_label, skip_indexing=True) for count_label in answer_info["counts"]]
if not count_fields:
count_fields.append(LabelField(-1, skip_indexing=True))
fields["answer_as_counts"] = ListField(count_fields)
metadata.update(additional_metadata)
fields["metadata"] = MetadataField(metadata)
return Instance(fields)
@staticmethod
def make_bert_drop_instance(question_tokens: List[Token],
passage_tokens: List[Token],
question_concat_passage_tokens: List[Token],
token_indexers: Dict[str, TokenIndexer],
passage_text: str,
answer_info: Dict[str, Any] = None,
additional_metadata: Dict[str, Any] = None) -> Instance:
additional_metadata = additional_metadata or {}
fields: Dict[str, Field] = {}
passage_offsets = [(token.idx, token.idx + len(token.text)) for token in passage_tokens]
# This is separate so we can reference it later with a known type.
fields['passage'] = TextField(passage_tokens, token_indexers)
fields['question'] = TextField(question_tokens, token_indexers)
question_and_passage_filed = TextField(question_concat_passage_tokens, token_indexers)
fields['question_and_passage'] = question_and_passage_filed
metadata = {'original_passage': passage_text, 'passage_token_offsets': passage_offsets,
'question_tokens': [token.text for token in question_tokens],
'passage_tokens': [token.text for token in passage_tokens], }
if answer_info:
metadata['answer_texts'] = answer_info["answer_texts"]
passage_span_fields = \
[SpanField(span[0], span[1], fields["question_and_passage"])
for span in answer_info["answer_passage_spans"]]
if not passage_span_fields:
passage_span_fields.append(SpanField(-1, -1, fields["question_and_passage"]))
fields["answer_as_passage_spans"] = ListField(passage_span_fields)
metadata.update(additional_metadata)
fields['metadata'] = MetadataField(metadata)
return Instance(fields)
@staticmethod
def extract_answer_info_from_annotation(answer_annotation: Dict[str, Any]) -> Tuple[str, List]:
answer_type = None
if answer_annotation["spans"]:
answer_type = "spans"
elif answer_annotation["number"]:
answer_type = "number"
elif any(answer_annotation["date"].values()):
answer_type = "date"
answer_content = answer_annotation[answer_type] if answer_type is not None else None
answer_texts = []
if answer_type is None: # No answer
pass
elif answer_type == "spans":
# answer_content is a list of string in this case
answer_texts = answer_content
elif answer_type == "date":
# answer_content is a dict with "month", "day", "year" as the keys
date_tokens = [answer_content[key]
for key in ["month", "day", "year"] if key in answer_content and answer_content[key]]
answer_texts = date_tokens
elif answer_type == "number":
# answer_content is a string of number
answer_texts = [answer_content]
return answer_type, answer_texts
@staticmethod
def convert_word_to_number(word: str, try_to_include_more_numbers=False):
"""
Currently we only support limited types of conversion.
"""
if try_to_include_more_numbers:
# strip all punctuations from the sides of the word, except for the negative sign
punctruations = string.punctuation.replace('-', '')
word = word.strip(punctruations)
# some words may contain the comma as deliminator
word = word.replace(",", "")
# word2num will convert hundred, thousand ... to number, but we skip it.
if word in ["hundred", "thousand", "million", "billion", "trillion"]:
return None
try:
number = word_to_num(word)
except ValueError:
try:
number = int(word)
except ValueError:
try:
number = float(word)
except ValueError:
number = None
return number
else:
no_comma_word = word.replace(",", "")
if no_comma_word in WORD_NUMBER_MAP:
number = WORD_NUMBER_MAP[no_comma_word]
else:
try:
number = int(no_comma_word)
except ValueError:
number = None
return number
@staticmethod
def find_valid_spans(passage_tokens: List[Token],
answer_texts: List[str]) -> List[Tuple[int, int]]:
normalized_tokens = [token.text.lower().strip(STRIPPED_CHARACTERS) for token in passage_tokens]
word_positions: Dict[str, List[int]] = defaultdict(list)
for i, token in enumerate(normalized_tokens):
word_positions[token].append(i)
spans = []
for answer_text in answer_texts:
answer_tokens = answer_text.lower().strip(STRIPPED_CHARACTERS).split()
num_answer_tokens = len(answer_tokens)
if answer_tokens[0] not in word_positions:
continue
for span_start in word_positions[answer_tokens[0]]:
span_end = span_start # span_end is _inclusive_
answer_index = 1
while answer_index < num_answer_tokens and span_end + 1 < len(normalized_tokens):
token = normalized_tokens[span_end + 1]
if answer_tokens[answer_index].strip(STRIPPED_CHARACTERS) == token:
answer_index += 1
span_end += 1
elif token in IGNORED_TOKENS:
span_end += 1
else:
break
if num_answer_tokens == answer_index:
spans.append((span_start, span_end))
return spans
@staticmethod
def find_valid_add_sub_expressions(numbers: List[int],
targets: List[int],
max_number_of_numbers_to_consider: int = 2) -> List[List[int]]:
valid_signs_for_add_sub_expressions = []
# TODO: Try smaller numbers?
for number_of_numbers_to_consider in range(2, max_number_of_numbers_to_consider + 1):
possible_signs = list(itertools.product((-1, 1), repeat=number_of_numbers_to_consider))
for number_combination in itertools.combinations(enumerate(numbers), number_of_numbers_to_consider):
indices = [it[0] for it in number_combination]
values = [it[1] for it in number_combination]
for signs in possible_signs:
eval_value = sum(sign * value for sign, value in zip(signs, values))
if eval_value in targets:
labels_for_numbers = [0] * len(numbers) # 0 represents ``not included''.
for index, sign in zip(indices, signs):
labels_for_numbers[index] = 1 if sign == 1 else 2 # 1 for positive, 2 for negative
valid_signs_for_add_sub_expressions.append(labels_for_numbers)
return valid_signs_for_add_sub_expressions
@staticmethod
def find_valid_counts(count_numbers: List[int],
targets: List[int]) -> List[int]:
valid_indices = []
for index, number in enumerate(count_numbers):
if number in targets:
valid_indices.append(index)
return valid_indices
| allennlp-reading-comprehension-research-master | reading_comprehension/data/drop_reader.py |
import os
import random
import signal
import sys
import time
import uuid
from typing import Dict, Iterable, List, Optional
import click
import petname
import rich
import yaml
from beaker import Beaker, CanceledCode, CurrentJobStatus, ExperimentSpec, TaskResources
from rich import pretty, print, traceback
VERSION = "1.2.1"
class TermInterrupt(Exception):
pass
def handle_sigterm(sig, frame):
del sig, frame
raise TermInterrupt
def generate_name() -> str:
return petname.generate() + "-" + str(uuid.uuid4())[:8]
def symbol_for_status(status: CurrentJobStatus) -> str:
if status == CurrentJobStatus.finalized:
return ":white_check_mark:"
elif status == CurrentJobStatus.running:
return ":runner:"
elif status == CurrentJobStatus.created:
return ":thumbsup:"
elif status == CurrentJobStatus.scheduled:
return ":stopwatch:"
elif status == CurrentJobStatus.canceled:
return ":no_entry_sign:"
elif status == CurrentJobStatus.preempted:
return ":warning:"
else:
return ""
def display_logs(logs: Iterable[bytes]):
console = rich.get_console()
def print_line(line: str):
# Remove timestamp
try:
_, line = line.split("Z ", maxsplit=1)
except ValueError:
pass
console.print(line, highlight=False, markup=False)
line_buffer = ""
for bytes_chunk in logs:
chunk = line_buffer + bytes_chunk.decode(errors="ignore")
chunk = chunk.replace("\r", "\n")
lines = chunk.split("\n")
if chunk.endswith("\n"):
line_buffer = ""
else:
# Last line line chunk is probably incomplete.
lines, line_buffer = lines[:-1], lines[-1]
for line in lines:
print_line(line)
if line_buffer:
print_line(line_buffer)
@click.command()
@click.version_option(VERSION)
@click.argument("spec", type=str)
@click.option(
"--token",
required=True,
help="Your Beaker user token",
default=lambda: os.environ.get("BEAKER_TOKEN"),
)
@click.option("--workspace", required=True, help="The Beaker workspace to use")
@click.option(
"--clusters",
help="""A comma-separated list of clusters that can be used to override the
cluster for a task if any of them have enough resources available.""",
)
@click.option("--org", default="ai2", help="The Beaker organization")
@click.option("-n", "--name", default=None, help="A name to assign to the experiment")
@click.option(
"--timeout",
type=int,
default=-1,
help="""Time to wait (in seconds) for the experiment to finish.
A timeout of -1 means wait indefinitely. A timeout of 0 means don't wait at all.""",
)
@click.option(
"--poll-interval",
type=int,
default=5,
help="""Time to wait (in seconds) between polling for status changes of the experiment's jobs.""",
)
def main(
spec: str,
token: str,
workspace: str,
clusters: str, # type: ignore
org: str = "ai2",
name: Optional[str] = None, # type: ignore
timeout: int = -1,
poll_interval: int = 5,
):
"""
Submit and await a Beaker experiment defined by the SPEC.
SPEC can be a JSON or Yaml string or file.
"""
beaker = Beaker.from_env(user_token=token, default_workspace=workspace, default_org=org)
print(f"- Authenticated as [b]'{beaker.account.name}'[/]")
name: str = name or generate_name()
print(f"- Experiment name: [b]'{name}'[/]")
# Load experiment spec.
serialized_spec: str
if os.path.exists(spec):
with open(spec, "rt") as spec_file:
serialized_spec = spec_file.read()
else:
serialized_spec = spec
spec_dict = yaml.load(serialized_spec, Loader=yaml.SafeLoader)
exp_spec = ExperimentSpec.from_json(spec_dict)
if exp_spec.tasks[0].image.beaker is not None:
# Validate Beaker image.
image_full_name = beaker.image.get(exp_spec.tasks[0].image.beaker).full_name
exp_spec.tasks[0].image.beaker = image_full_name
print("- Experiment spec:", exp_spec.to_json())
# Find best cluster to use.
cluster_to_use: Optional[str] = None
clusters: List[str] = [] if not clusters else clusters.split(",")
if clusters:
for i, task_spec in enumerate(exp_spec.tasks):
available_clusters = beaker.cluster.filter_available(
task_spec.resources or TaskResources(), *clusters
)
random.shuffle(available_clusters)
for cluster_utilization in available_clusters:
if cluster_utilization.queued_jobs == 0:
cluster_to_use = cluster_utilization.cluster.full_name
task_spec.context.cluster = cluster_to_use
print(
f"- Found cluster with enough free resources for task [i]'{task_spec.name or i}'[/]: "
f"[b]'{cluster_to_use}'[/]"
)
break
# Submit experiment.
print("- Submitting experiment...")
experiment = beaker.experiment.create(name, exp_spec)
print(f" :eyes: See progress at {beaker.experiment.url(experiment)}")
# Can return right away if timeout is 0.
if timeout == 0:
return
# Otherwise we wait for all tasks to complete and then display the logs.
try:
print("- Waiting for tasks to complete...")
task_to_status: Dict[str, Optional[CurrentJobStatus]] = {}
start_time = time.time()
time.sleep(poll_interval)
while timeout < 0 or time.time() - start_time <= timeout:
# Get tasks and check for status changes.
tasks = beaker.experiment.tasks(experiment)
for task in tasks:
job = task.latest_job
status: Optional[CurrentJobStatus] = None
if job is not None:
if job.status.canceled_code in {
CanceledCode.system_preemption,
CanceledCode.user_preemption,
}:
status = CurrentJobStatus.preempted
else:
status = job.status.current
status = None if job is None else job.status.current
if task.id not in task_to_status or status != task_to_status[task.id]:
print(
f" Task [i]'{task.display_name}'[/]",
"submitted..." if status is None else status,
"" if status is None else symbol_for_status(status),
)
task_to_status[task.id] = status
# Check if all tasks have been completed.
if task_to_status and all(
[status == CurrentJobStatus.finalized for status in task_to_status.values()]
):
break
else:
time.sleep(poll_interval)
else:
print("[red]Timeout exceeded![/]")
raise TimeoutError()
# Get logs and exit codes.
for task in beaker.experiment.tasks(experiment):
job = task.latest_job
assert job is not None
print()
rich.get_console().rule(f"Logs from task [i]'{task.display_name}'[/] :point_down:")
display_logs(beaker.job.logs(job, quiet=True))
rich.get_console().rule(f"End of logs from task [i]'{task.display_name}'[/]")
print("- Summary:")
exit_code = 0
for task in beaker.experiment.tasks(experiment):
job = task.latest_job
assert job is not None
if job.status.exit_code is not None and job.status.exit_code > 0:
exit_code = job.status.exit_code
print(f" :x: Task '{task.display_name}' failed with exit code {exit_code}")
elif job.status.failed is not None:
exit_code = 1
print(f" :x: Task '{task.display_name}' failed")
if job.status.message is not None:
print(job.status.message)
else:
print(f" :white_check_mark: Task '{task.display_name}' succeeded")
sys.exit(exit_code)
except (KeyboardInterrupt, TermInterrupt, TimeoutError):
print("[yellow]Canceling jobs...[/]")
beaker.experiment.stop(experiment)
sys.exit(1)
if __name__ == "__main__":
rich.reconfigure(
width=max(rich.get_console().width, 180), force_terminal=True, force_interactive=False
)
pretty.install()
traceback.install(width=180, show_locals=True, suppress=[click])
signal.signal(signal.SIGTERM, handle_sigterm)
main()
| beaker-run-action-main | beaker_run.py |
from datetime import datetime
from pathlib import Path
from beaker_run import VERSION
def main():
changelog = Path("CHANGELOG.md")
with changelog.open() as f:
lines = f.readlines()
insert_index: int = -1
for i in range(len(lines)):
line = lines[i]
if line.startswith("## Unreleased"):
insert_index = i + 1
elif line.startswith(f"## [v{VERSION}]"):
print("CHANGELOG already up-to-date")
return
elif line.startswith("## [v"):
break
if insert_index < 0:
raise RuntimeError("Couldn't find 'Unreleased' section")
lines.insert(insert_index, "\n")
lines.insert(
insert_index + 1,
f"## [v{VERSION}](https://github.com/allenai/beaker-run-action/releases/tag/v{VERSION}) - "
f"{datetime.now().strftime('%Y-%m-%d')}\n",
)
with changelog.open("w") as f:
f.writelines(lines)
if __name__ == "__main__":
main()
| beaker-run-action-main | scripts/prepare_changelog.py |
# encoding: utf-8
"""
Prepares markdown release notes for GitHub releases.
"""
import os
from typing import List, Optional
import packaging.version
TAG = os.environ["TAG"]
ADDED_HEADER = "### Added 🎉"
CHANGED_HEADER = "### Changed ⚠️"
FIXED_HEADER = "### Fixed ✅"
REMOVED_HEADER = "### Removed 👋"
def get_change_log_notes() -> str:
in_current_section = False
current_section_notes: List[str] = []
with open("CHANGELOG.md") as changelog:
for line in changelog:
if line.startswith("## "):
if line.startswith("## Unreleased"):
continue
if line.startswith(f"## [{TAG}]"):
in_current_section = True
continue
break
if in_current_section:
if line.startswith("### Added"):
line = ADDED_HEADER + "\n"
elif line.startswith("### Changed"):
line = CHANGED_HEADER + "\n"
elif line.startswith("### Fixed"):
line = FIXED_HEADER + "\n"
elif line.startswith("### Removed"):
line = REMOVED_HEADER + "\n"
current_section_notes.append(line)
assert current_section_notes
return "## What's new\n\n" + "".join(current_section_notes).strip() + "\n"
def get_commit_history() -> str:
new_version = packaging.version.parse(TAG)
# Get all tags sorted by version, latest first.
all_tags = os.popen("git tag -l --sort=-version:refname 'v*'").read().split("\n")
# Out of `all_tags`, find the latest previous version so that we can collect all
# commits between that version and the new version we're about to publish.
# Note that we ignore pre-releases unless the new version is also a pre-release.
last_tag: Optional[str] = None
for tag in all_tags:
if not tag.strip(): # could be blank line
continue
version = packaging.version.parse(tag)
if new_version.pre is None and version.pre is not None:
continue
if version < new_version:
last_tag = tag
break
if last_tag is not None:
commits = os.popen(f"git log {last_tag}..{TAG}^ --oneline --first-parent").read()
else:
commits = os.popen("git log --oneline --first-parent").read()
return "## Commits\n\n" + commits
def main():
print(get_change_log_notes())
print(get_commit_history())
if __name__ == "__main__":
main()
| beaker-run-action-main | scripts/release_notes.py |
# pylint: disable=wildcard-import
from my_library.dataset_readers import *
from my_library.models import *
from my_library.predictors import *
| allennlp-as-a-library-example-master | my_library/__init__.py |
from my_library.dataset_readers.semantic_scholar_papers import SemanticScholarDatasetReader
| allennlp-as-a-library-example-master | my_library/dataset_readers/__init__.py |
from typing import Dict
import json
import logging
from overrides import overrides
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import LabelField, TextField
from allennlp.data.instance import Instance
from allennlp.data.tokenizers import Tokenizer, WordTokenizer
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@DatasetReader.register("s2_papers")
class SemanticScholarDatasetReader(DatasetReader):
"""
Reads a JSON-lines file containing papers from the Semantic Scholar database, and creates a
dataset suitable for document classification using these papers.
Expected format for each input line: {"paperAbstract": "text", "title": "text", "venue": "text"}
The JSON could have other fields, too, but they are ignored.
The output of ``read`` is a list of ``Instance`` s with the fields:
title: ``TextField``
abstract: ``TextField``
label: ``LabelField``
where the ``label`` is derived from the venue of the paper.
Parameters
----------
lazy : ``bool`` (optional, default=False)
Passed to ``DatasetReader``. If this is ``True``, training will start sooner, but will
take longer per batch. This also allows training with datasets that are too large to fit
in memory.
tokenizer : ``Tokenizer``, optional
Tokenizer to use to split the title and abstrct into words or other kinds of tokens.
Defaults to ``WordTokenizer()``.
token_indexers : ``Dict[str, TokenIndexer]``, optional
Indexers used to define input token representations. Defaults to ``{"tokens":
SingleIdTokenIndexer()}``.
"""
def __init__(self,
lazy: bool = False,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None) -> None:
super().__init__(lazy)
self._tokenizer = tokenizer or WordTokenizer()
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
@overrides
def _read(self, file_path):
with open(cached_path(file_path), "r") as data_file:
logger.info("Reading instances from lines in file at: %s", file_path)
for line in data_file:
line = line.strip("\n")
if not line:
continue
paper_json = json.loads(line)
title = paper_json['title']
abstract = paper_json['paperAbstract']
venue = paper_json['venue']
yield self.text_to_instance(title, abstract, venue)
@overrides
def text_to_instance(self, title: str, abstract: str, venue: str = None) -> Instance: # type: ignore
# pylint: disable=arguments-differ
tokenized_title = self._tokenizer.tokenize(title)
tokenized_abstract = self._tokenizer.tokenize(abstract)
title_field = TextField(tokenized_title, self._token_indexers)
abstract_field = TextField(tokenized_abstract, self._token_indexers)
fields = {'title': title_field, 'abstract': abstract_field}
if venue is not None:
fields['label'] = LabelField(venue)
return Instance(fields)
| allennlp-as-a-library-example-master | my_library/dataset_readers/semantic_scholar_papers.py |
from my_library.predictors.paper_classifier_predictor import PaperClassifierPredictor
| allennlp-as-a-library-example-master | my_library/predictors/__init__.py |
from overrides import overrides
from allennlp.common.util import JsonDict
from allennlp.data import Instance
from allennlp.predictors.predictor import Predictor
@Predictor.register('paper-classifier')
class PaperClassifierPredictor(Predictor):
""""Predictor wrapper for the AcademicPaperClassifier"""
def predict_json(self, inputs: JsonDict) -> JsonDict:
instance = self._json_to_instance(inputs)
output_dict = self.predict_instance(instance)
# label_dict will be like {0: "ACL", 1: "AI", ...}
label_dict = self._model.vocab.get_index_to_token_vocabulary('labels')
# Convert it to list ["ACL", "AI", ...]
all_labels = [label_dict[i] for i in range(len(label_dict))]
output_dict["all_labels"] = all_labels
return output_dict
@overrides
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
title = json_dict['title']
abstract = json_dict['paperAbstract']
return self._dataset_reader.text_to_instance(title=title, abstract=abstract)
| allennlp-as-a-library-example-master | my_library/predictors/paper_classifier_predictor.py |
from my_library.models.academic_paper_classifier import AcademicPaperClassifier
| allennlp-as-a-library-example-master | my_library/models/__init__.py |
from typing import Dict, Optional
import numpy
from overrides import overrides
import torch
import torch.nn.functional as F
from allennlp.common.checks import ConfigurationError
from allennlp.data import Vocabulary
from allennlp.modules import FeedForward, Seq2VecEncoder, TextFieldEmbedder
from allennlp.models.model import Model
from allennlp.nn import InitializerApplicator, RegularizerApplicator
from allennlp.nn import util
from allennlp.training.metrics import CategoricalAccuracy
@Model.register("paper_classifier")
class AcademicPaperClassifier(Model):
"""
This ``Model`` performs text classification for an academic paper. We assume we're given a
title and an abstract, and we predict some output label.
The basic model structure: we'll embed the title and the abstract, and encode each of them with
separate Seq2VecEncoders, getting a single vector representing the content of each. We'll then
concatenate those two vectors, and pass the result through a feedforward network, the output of
which we'll use as our scores for each label.
Parameters
----------
vocab : ``Vocabulary``, required
A Vocabulary, required in order to compute sizes for input/output projections.
text_field_embedder : ``TextFieldEmbedder``, required
Used to embed the ``tokens`` ``TextField`` we get as input to the model.
title_encoder : ``Seq2VecEncoder``
The encoder that we will use to convert the title to a vector.
abstract_encoder : ``Seq2VecEncoder``
The encoder that we will use to convert the abstract to a vector.
classifier_feedforward : ``FeedForward``
initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)
Used to initialize the model parameters.
regularizer : ``RegularizerApplicator``, optional (default=``None``)
If provided, will be used to calculate the regularization penalty during training.
"""
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
title_encoder: Seq2VecEncoder,
abstract_encoder: Seq2VecEncoder,
classifier_feedforward: FeedForward,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super(AcademicPaperClassifier, self).__init__(vocab, regularizer)
self.text_field_embedder = text_field_embedder
self.num_classes = self.vocab.get_vocab_size("labels")
self.title_encoder = title_encoder
self.abstract_encoder = abstract_encoder
self.classifier_feedforward = classifier_feedforward
if text_field_embedder.get_output_dim() != title_encoder.get_input_dim():
raise ConfigurationError("The output dimension of the text_field_embedder must match the "
"input dimension of the title_encoder. Found {} and {}, "
"respectively.".format(text_field_embedder.get_output_dim(),
title_encoder.get_input_dim()))
if text_field_embedder.get_output_dim() != abstract_encoder.get_input_dim():
raise ConfigurationError("The output dimension of the text_field_embedder must match the "
"input dimension of the abstract_encoder. Found {} and {}, "
"respectively.".format(text_field_embedder.get_output_dim(),
abstract_encoder.get_input_dim()))
self.metrics = {
"accuracy": CategoricalAccuracy(),
"accuracy3": CategoricalAccuracy(top_k=3)
}
self.loss = torch.nn.CrossEntropyLoss()
initializer(self)
@overrides
def forward(self, # type: ignore
title: Dict[str, torch.LongTensor],
abstract: Dict[str, torch.LongTensor],
label: torch.LongTensor = None) -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ
"""
Parameters
----------
title : Dict[str, Variable], required
The output of ``TextField.as_array()``.
abstract : Dict[str, Variable], required
The output of ``TextField.as_array()``.
label : Variable, optional (default = None)
A variable representing the label for each instance in the batch.
Returns
-------
An output dictionary consisting of:
class_probabilities : torch.FloatTensor
A tensor of shape ``(batch_size, num_classes)`` representing a distribution over the
label classes for each instance.
loss : torch.FloatTensor, optional
A scalar loss to be optimised.
"""
embedded_title = self.text_field_embedder(title)
title_mask = util.get_text_field_mask(title)
encoded_title = self.title_encoder(embedded_title, title_mask)
embedded_abstract = self.text_field_embedder(abstract)
abstract_mask = util.get_text_field_mask(abstract)
encoded_abstract = self.abstract_encoder(embedded_abstract, abstract_mask)
logits = self.classifier_feedforward(torch.cat([encoded_title, encoded_abstract], dim=-1))
output_dict = {'logits': logits}
if label is not None:
loss = self.loss(logits, label)
for metric in self.metrics.values():
metric(logits, label)
output_dict["loss"] = loss
return output_dict
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
Does a simple argmax over the class probabilities, converts indices to string labels, and
adds a ``"label"`` key to the dictionary with the result.
"""
class_probabilities = F.softmax(output_dict['logits'], dim=-1)
output_dict['class_probabilities'] = class_probabilities
predictions = class_probabilities.cpu().data.numpy()
argmax_indices = numpy.argmax(predictions, axis=-1)
labels = [self.vocab.get_token_from_index(x, namespace="labels")
for x in argmax_indices]
output_dict['label'] = labels
return output_dict
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {metric_name: metric.get_metric(reset) for metric_name, metric in self.metrics.items()}
| allennlp-as-a-library-example-master | my_library/models/academic_paper_classifier.py |
allennlp-as-a-library-example-master | tests/dataset_readers/__init__.py |
|
# pylint: disable=no-self-use,invalid-name
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.util import ensure_list
from my_library.dataset_readers import SemanticScholarDatasetReader
class TestSemanticScholarDatasetReader(AllenNlpTestCase):
def test_read_from_file(self):
reader = SemanticScholarDatasetReader()
instances = ensure_list(reader.read('tests/fixtures/s2_papers.jsonl'))
instance1 = {"title": ["Interferring", "Discourse", "Relations", "in", "Context"],
"abstract": ["We", "investigate", "various", "contextual", "effects"],
"venue": "ACL"}
instance2 = {"title": ["GRASPER", ":", "A", "Permissive", "Planning", "Robot"],
"abstract": ["Execut", "ion", "of", "classical", "plans"],
"venue": "AI"}
instance3 = {"title": ["Route", "Planning", "under", "Uncertainty", ":", "The", "Canadian",
"Traveller", "Problem"],
"abstract": ["The", "Canadian", "Traveller", "problem", "is"],
"venue": "AI"}
assert len(instances) == 10
fields = instances[0].fields
assert [t.text for t in fields["title"].tokens] == instance1["title"]
assert [t.text for t in fields["abstract"].tokens[:5]] == instance1["abstract"]
assert fields["label"].label == instance1["venue"]
fields = instances[1].fields
assert [t.text for t in fields["title"].tokens] == instance2["title"]
assert [t.text for t in fields["abstract"].tokens[:5]] == instance2["abstract"]
assert fields["label"].label == instance2["venue"]
fields = instances[2].fields
assert [t.text for t in fields["title"].tokens] == instance3["title"]
assert [t.text for t in fields["abstract"].tokens[:5]] == instance3["abstract"]
assert fields["label"].label == instance3["venue"]
| allennlp-as-a-library-example-master | tests/dataset_readers/semantic_scholar_dataset_reader_test.py |
# pylint: disable=no-self-use,invalid-name,unused-import
from unittest import TestCase
from pytest import approx
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
# required so that our custom model + predictor + dataset reader
# will be registered by name
import my_library
class TestPaperClassifierPredictor(TestCase):
def test_uses_named_inputs(self):
inputs = {
"title": "Interferring Discourse Relations in Context",
"paperAbstract": (
"We investigate various contextual effects on text "
"interpretation, and account for them by providing "
"contextual constraints in a logical theory of text "
"interpretation. On the basis of the way these constraints "
"interact with the other knowledge sources, we draw some "
"general conclusions about the role of domain-specific "
"information, top-down and bottom-up discourse information "
"flow, and the usefulness of formalisation in discourse theory."
)
}
archive = load_archive('tests/fixtures/model.tar.gz')
predictor = Predictor.from_archive(archive, 'paper-classifier')
result = predictor.predict_json(inputs)
label = result.get("label")
assert label in {'AI', 'ML', 'ACL'}
all_labels = result.get("all_labels")
assert all_labels == ['AI', 'ACL', 'ML']
class_probabilities = result.get("class_probabilities")
assert class_probabilities is not None
assert all(cp > 0 for cp in class_probabilities)
assert sum(class_probabilities) == approx(1.0)
| allennlp-as-a-library-example-master | tests/predictors/predictor_test.py |
# pylint: disable=invalid-name,protected-access
from allennlp.common.testing import ModelTestCase
class AcademicPaperClassifierTest(ModelTestCase):
def setUp(self):
super(AcademicPaperClassifierTest, self).setUp()
self.set_up_model('tests/fixtures/academic_paper_classifier.json',
'tests/fixtures/s2_papers.jsonl')
def test_model_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
| allennlp-as-a-library-example-master | tests/models/academic_paper_classifier_test.py |
allennlp-as-a-library-example-master | tests/models/__init__.py |
|
from invoke import task
import boto3
import subprocess
import os
import glob
import tempfile
import platform
@task
def extract_store_nvidia_driver(context, cuda_url):
if platform.system() != "Linux":
raise Exception("CUDA driver extraction can only be run on Linu")
name = os.path.basename(cuda_url)
subprocess.run("wget -O '%s' '%s'" % (name, cuda_url), shell=True)
os.chmod(name, 0o755)
with tempfile.TemporaryDirectory() as tf_name:
subprocess.check_call("./%s --extract=%s" % (name, tf_name), shell=True)
drivers = list(glob.glob('%s/NVIDIA-Linux-x86_64-*.run' % tf_name))
if drivers:
driver_path = drivers[0]
print("Storing driver %s at s3://ai2thor-vision-nvidia" % driver_path)
with open(driver_path, "rb") as f:
data = f.read()
key = os.path.basename(driver_path)
s3 = boto3.resource("s3")
s3.Object('ai2-vision-nvidia', key).put(
Body=data, ACL="public-read", ContentType="text/x-shellscript"
)
else:
raise Exception("no drivers found in %s" % name)
os.unlink(name)
| ai2thor-docker-main | tasks.py |
import ai2thor.controller
import ai2thor.platform
from pprint import pprint
if __name__ == '__main__':
controller = ai2thor.controller.Controller(platform=ai2thor.platform.CloudRendering, scene='FloorPlan28')
event = controller.step(action='RotateRight')
pprint(event.metadata['agent'])
| ai2thor-docker-main | example_agent.py |
import csv
import os
import pickle
csv.field_size_limit(2147483647)
from collections import Counter
class EHRDataset:
def __init__(self, train_path, dev_path, test_path, do_train=True, do_test=True):
assert do_train or do_test, "if no train and no test, which data should it loads?"
self.train_data = self.read_csv(train_path)
self.dev_data = self.read_csv(dev_path)
self.test_data = self.read_csv(test_path)
self.do_train = do_train
self.do_test = do_test
def read_csv(self, path):
reader = csv.reader(open(path))
data = {}
next(reader, None)
for row in reader:
data[row[0]] = {'ehr': row[1], 'outcome': int(row[2])}
len_data = len(data)
ten_percent = int(0.1*len_data)
#data = {x[0]: x[1] for x in list(data.items())[:ten_percent]} for debug purposes (less data)
return data
def compute_class_weights(self):
class_counts = Counter()
data = self.train_data
for example in data:
class_counts.update([data[example]['outcome']])
num_samples = sum(list(class_counts.values()))
num_classes = len(class_counts)
balance_coeff = float(num_samples)/num_classes
self.class_weights = {k:balance_coeff/float(v) for k,v in class_counts.items()}
def add_relevant_literature(self, lit_dir, topk, lit_file):
all_texts = pickle.load(open(lit_file, 'rb'))
rankings_files = os.listdir(lit_dir)
num_rankings_files = len(rankings_files)
missing_lit_counter = 0
ehrs_to_process = set()
if self.do_train:
ehrs_to_process = ehrs_to_process | set(self.train_data.keys()) | set(self.dev_data.keys())
if self.do_test:
ehrs_to_process = ehrs_to_process | set(self.test_data.keys())
all_ehrs = set(self.train_data.keys()) | set(self.dev_data.keys()) | set(self.test_data.keys())
for i, file in enumerate(rankings_files):
id = file.split('.pkl')[0]
if id not in all_ehrs:
print(f"id {id} not in train/dev/test datasets")
if id not in ehrs_to_process:
continue
docs = pickle.load(open(os.path.join(lit_dir, file), 'rb'))
if isinstance(docs, dict) and len(docs.keys()) == 1:
docs = docs[id]
docs = list(reversed(sorted(docs, key=lambda x:x[1])))
docs_nums = [x[0] for x in docs]
not_found_docs = set(docs_nums) - set(all_texts)
num_not_found_docs = len(not_found_docs)
if num_not_found_docs > 0:
print(f"not found: {num_not_found_docs}")
chosen_docs = docs[:int(topk)] if topk >= 1 else [x for x in docs if x[1] >= topk]
try:
chosen_docs = [[x[0], all_texts[x[0]]['text'], x[1]] for x in chosen_docs] # We may want to include year later?
except:
missing_lit_counter += 1
if id in self.train_data:
self.train_data[id]['pubmed_docs'] = chosen_docs
elif id in self.dev_data:
self.dev_data[id]['pubmed_docs'] = chosen_docs
elif id in self.test_data:
self.test_data[id]['pubmed_docs'] = chosen_docs
print(f"added docs to {i + 1}/{len(rankings_files)} ehr files", end="\r", flush=True)
def add_literature_matrices(self, lit_embed_file):
lit_embeds = pickle.load(open(lit_embed_file, 'rb'))
if self.do_train:
for id in self.train_data:
self.train_data[id]['pubmed_doc_embeds'] = {x[0]:lit_embeds[x[0]] for x in self.train_data[id]['pubmed_docs']}
for id in self.dev_data:
self.dev_data[id]['pubmed_doc_embeds'] = {x[0]:lit_embeds[x[0]] for x in self.dev_data[id]['pubmed_docs']}
if self.do_test:
for id in self.test_data:
self.test_data[id]['pubmed_doc_embeds'] = {x[0]:lit_embeds[x[0]] for x in self.test_data[id]['pubmed_docs']}
| BEEP-main | outcome-prediction/data_loader.py |
import argparse
import random
import os
import pickle
import copy
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.metrics import roc_auc_score, f1_score, average_precision_score, \
RocCurveDisplay, PrecisionRecallDisplay, \
precision_score, recall_score, precision_recall_curve, roc_curve
from matplotlib import pyplot as plt
import pandas as pd
import setproctitle
import time
from data_loader import EHRDataset
from transformers import AdamW, BertConfig, BertTokenizer, BertForSequenceClassification, \
AutoTokenizer, AutoConfig, AutoModel, BertTokenizerFast, set_seed, get_linear_schedule_with_warmup
from transformers.models.longformer.modeling_longformer import LongformerSelfAttention
from outcome_models import BertLongForSequenceClassification, LitAugPredictorBienc, LitAugPredictorCrossenc, L2RLitAugPredictorBienc
def create_long_model(init_model, save_model_to, attention_window, max_pos, num_labels):
config = BertConfig.from_pretrained(init_model,
num_labels=num_labels,
label2id={x:x for x in range(num_labels)},
id2label={x:x for x in range(num_labels)},
cache_dir='../cache'
)
model = BertForSequenceClassification.from_pretrained(init_model, config=config, cache_dir='../cache')
tokenizer = BertTokenizerFast.from_pretrained(init_model, model_max_length=max_pos, cache_dir='../cache')
config = model.config
# extend position embeddings
tokenizer.model_max_length = max_pos
tokenizer.init_kwargs['model_max_length'] = max_pos
current_max_pos, embed_size = model.bert.embeddings.position_embeddings.weight.shape
config.max_position_embeddings = max_pos
assert max_pos > current_max_pos
# allocate a larger position embedding matrix
new_pos_embed = model.bert.embeddings.position_embeddings.weight.new_empty(max_pos, embed_size)
# copy position embeddings over and over to initialize the new position embeddings
k = 0
step = current_max_pos
while k < max_pos - 1:
new_pos_embed[k:(k + step)] = model.bert.embeddings.position_embeddings.weight
k += step
model.bert.embeddings.position_embeddings.weight.data = new_pos_embed
model.bert.embeddings.position_ids.data = torch.tensor([i for i in range(max_pos)]).reshape(1, max_pos)
# replace the `modeling_bert.BertSelfAttention` object with `LongformerSelfAttention`
config.attention_window = [attention_window] * config.num_hidden_layers
for i, layer in enumerate(model.bert.encoder.layer):
longformer_self_attn = LongformerSelfAttention(config, layer_id=i)
longformer_self_attn.query = layer.attention.self.query
longformer_self_attn.key = layer.attention.self.key
longformer_self_attn.value = layer.attention.self.value
longformer_self_attn.query_global = copy.deepcopy(layer.attention.self.query)
longformer_self_attn.key_global = copy.deepcopy(layer.attention.self.key)
longformer_self_attn.value_global = copy.deepcopy(layer.attention.self.value)
layer.attention.self = longformer_self_attn
model.save_pretrained(save_model_to)
tokenizer.save_pretrained(save_model_to)
def train(model, train_data, dev_data, out_dir, epochs, lr, class_weights, acc_steps, strategy,
use_warmup, warmup_steps, stop_on_roc, dump_test_preds):
# print('Dropout default" {}'.format(model.config.hidden_dropout_prob))
weights = torch.cuda.FloatTensor([x[1] for x in list(sorted(class_weights.items(), key=lambda x:x[0]))])
weighted_ce_loss = nn.CrossEntropyLoss(weight=weights)
if 'vote' in strategy:
weighted_ce_loss = nn.NLLLoss(weight=weights)
optimizer = optim.Adam(model.parameters(), lr=lr)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=2)
if use_warmup:
# optimizer = AdamW(model.parameters(), lr=lr)
# scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=2)
print('Using linear schedule with warmup for {} steps'.format(warmup_steps))
scheduler = get_linear_schedule_with_warmup(optimizer, warmup_steps, epochs*len(train_data))
prev_dev_loss = 10000
prev_auroc = -10000
batch_size = len(train_data[0]['ehr_id'])
acc_factor = acc_steps/batch_size
val_steps = batch_size * (800 // batch_size) # just to be in line with the originally 800 steps (of batch size 1)
for epoch in range(epochs):
step = 0
random.shuffle(train_data)
model.train()
epoch_loss = 0.0
optimizer.zero_grad()
start_time = time.time()
num_batches = len(train_data)
num_train_examples = num_batches * batch_size
for batch in train_data:
gpu_batch = {x:y.cuda() for x,y in batch.items()
if x not in ['ehr_id', 'pubmed_docs', 'pubmed_doc_weights',
'ehr_rerank_tokens', 'pubmed_doc_ids']}
if 'pubmed_docs' in batch:
gpu_batch['pubmed_docs'] = batch['pubmed_docs']
gpu_batch['pubmed_doc_weights'] = batch['pubmed_doc_weights']
if 'pubmed_doc_ids' in batch:
gpu_batch['pubmed_doc_ids'] = batch['pubmed_doc_ids']
if 'ehr_rerank_tokens' in batch:
gpu_batch['ehr_rerank_tokens'] = {x:y.cuda() for x,y in batch['ehr_rerank_tokens'].items()}
outputs = model(**gpu_batch)
logits = outputs[1]
wloss = weighted_ce_loss(logits, gpu_batch["labels"])
if outputs[0] is not None:
wloss += outputs[0]
wloss /= acc_factor
epoch_loss += wloss.item()
wloss.backward()
step += batch_size
if step%acc_steps == 0:
optimizer.step()
optimizer.zero_grad()
if step%val_steps == 0:
print('Completed {}/{} training steps'.format(step, num_train_examples))
dev_loss, auroc = test(model, dev_data, dump_test_preds, out_dir, epoch, step=step,
return_loss=True, class_weights=class_weights, strategy=strategy)
if not stop_on_roc and dev_loss < prev_dev_loss: # stop on loss
prev_dev_loss = dev_loss
torch.save(model.state_dict(), os.path.join(out_dir, 'best_model.pt'))
if stop_on_roc and auroc > prev_auroc: # stops on AUROC
prev_auroc = auroc
torch.save(model.state_dict(), os.path.join(out_dir, 'best_model.pt'))
if not use_warmup:
if not stop_on_roc:
scheduler.step(dev_loss)
else:
scheduler.step(auroc)
else:
print('Different step for linear warmup')
scheduler.step()
end_so_far = time.time()
elapsed_so_far = end_so_far - start_time
ETA = 1 / (step/num_train_examples) * elapsed_so_far
print(f"{step} steps + validation took {elapsed_so_far//60} minutes, ETA for epoch: {ETA//60} minutes")
epoch_loss /= (len(train_data)/acc_factor)
print('Training loss after epoch {}: {}'.format(epoch, epoch_loss))
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': epoch_loss,
}, os.path.join(out_dir, 'checkpoints/checkpoint_{}.pt'.format(epoch)))
dev_loss, auroc = test(model, dev_data, dump_test_preds, out_dir, epoch, step="end",
return_loss=True, class_weights=class_weights, strategy=strategy)
if dev_loss < prev_dev_loss:
prev_dev_loss = dev_loss
torch.save(model.state_dict(), os.path.join(out_dir, 'best_model.pt'))
scheduler.step(dev_loss)
def test(model, dev_data, dump_test_preds, out_dir, epoch, step,
return_loss=False, class_weights=None, strategy='average'):
with torch.no_grad():
model.eval()
unique_labels = list(class_weights.keys())
weights = torch.cuda.FloatTensor([x[1] for x in list(sorted(class_weights.items(), key=lambda x:x[0]))])
weighted_ce_loss = nn.CrossEntropyLoss(weight=weights)
if 'vote' in strategy:
weighted_ce_loss = nn.NLLLoss(weight=weights)
softmax = nn.Softmax(dim=1)
dev_loss = 0.0
all_preds = []
all_pred_probs = []
all_labels = []
all_ids = []
all_pred_probs_dump = []
for i, batch in enumerate(dev_data):
gpu_batch = {x:y.cuda() for x,y in batch.items() if x not in ['ehr_id', 'pubmed_docs', 'pubmed_doc_weights', 'ehr_rerank_tokens', 'pubmed_doc_ids']}
if 'pubmed_docs' in batch:
gpu_batch['pubmed_docs'] = batch['pubmed_docs']
gpu_batch['pubmed_doc_weights'] = batch['pubmed_doc_weights']
if 'pubmed_doc_ids' in batch:
gpu_batch['pubmed_doc_ids'] = batch['pubmed_doc_ids']
if 'ehr_rerank_tokens' in batch:
gpu_batch['ehr_rerank_tokens'] = {x:y.cuda() for x,y in batch['ehr_rerank_tokens'].items()}
outputs = model(**gpu_batch)
logits = outputs[1]
all_preds += torch.argmax(logits, dim=1).detach().cpu().numpy().tolist()
probs = softmax(logits) if 'average' in strategy else torch.exp(logits)
all_pred_probs_dump += probs.detach().cpu().numpy().tolist()
probs = probs if len(unique_labels) > 2 else probs[:,1]
wloss = weighted_ce_loss(logits, gpu_batch["labels"])
dev_loss += wloss.item()
all_pred_probs += probs.detach().cpu().numpy().tolist()
all_labels += gpu_batch["labels"].cpu().numpy().tolist()
all_ids += batch['ehr_id']
print(f"completed {i+1}/{len(dev_data)} validation batches", end="\r", flush=True)
prediction_dict = dict(zip(all_ids, all_preds))
pred_prob_dict = dict(zip(all_ids, all_pred_probs_dump))
if not return_loss and dump_test_preds: # return_loss flag is not used for the test data
pickle.dump(prediction_dict, open(os.path.join(out_dir, 'test_predictions.pkl'), 'wb'))
pickle.dump(pred_prob_dict, open(os.path.join(out_dir, 'test_probabilities.pkl'), 'wb'))
metrics_dict = compute_classification_metrics(all_preds, all_pred_probs, all_labels, epoch, step, out_dir)
dev_loss /= len(dev_data)
print('Validation loss after epoch {}: {}'.format(epoch, dev_loss))
print('------------------Validation Scores for Epoch {}-------------------'.format(epoch))
for metric_name, metric_value in metrics_dict.items():
print(f'{metric_name}: {metric_value}')
auroc = metrics_dict["ROC AUC"]
if return_loss:
return dev_loss, auroc
def prec_rec_at_k(k, labels, probs):
from collections import Counter
if k>=1:
k = k/100.0
print(f"if we take only those in top {k} AND P(y(x)==class)>0.5")
for class_num in set(labels.values()): # range(len(probs[list(probs.keys())[0]])): ???
select_count = int(k * len(probs))
top_probs = list(reversed(sorted(probs.items(), key=lambda x: x[1][class_num])))[:select_count]
eval_probs = [probs[x[0]] for x in top_probs]
eval_labels = [labels[x[0]] for x in top_probs]
eval_probs = np.array(eval_probs)
eval_preds = np.argmax(eval_probs, axis=1)
correct = 0.0
all_labels = list(labels.values())
class_counter = Counter(all_labels)
pred_counter = Counter(eval_preds.tolist())
for x, y in zip(eval_preds, eval_labels):
if x == y and x == class_num:
correct += 1
print('Class {}'.format(class_num))
if correct != 0.0:
print('Precision@10: {}'.format((correct / pred_counter[class_num]) ))
print('Recall@10: {}'.format((correct / class_counter[class_num]) ))
else:
print('Precision and recall are 0.0!!')
print()
print()
print(f"if we take all those in top {k} as belonging to that class")
for class_num in set(labels.values()): # range(len(probs[list(probs.keys())[0]])): ???
select_count = int(k * len(probs))
top_probs = list(reversed(sorted(probs.items(), key=lambda x: x[1][class_num])))[:select_count]
eval_probs = [probs[x[0]] for x in top_probs]
eval_labels = [labels[x[0]] for x in top_probs]
eval_probs = np.array(eval_probs)
#eval_preds = np.argmax(eval_probs, axis=1) # todo: replace that...
eval_preds = class_num * np.ones(select_count) # todo: ... by that
correct = 0.0
all_labels = list(labels.values())
class_counter = Counter(all_labels)
pred_counter = Counter(eval_preds.tolist())
for x, y in zip(eval_preds, eval_labels):
if x == y and x == class_num:
correct += 1
print('Class {}'.format(class_num))
if correct != 0.0:
print('Precision@10: {}'.format((correct / pred_counter[class_num])))
print('Recall@10: {}'.format((correct / class_counter[class_num])))
else:
print('Precision and recall are 0.0!!')
print()
print()
def compute_classification_metrics(preds, probs, labels, epoch, step, out_dir):
print("precision/recall @10 using Aakanasha's function:")
n = len(labels)
probs_for_aakanasha = {i: [1 - probs[i], probs[i]] for i in range(n)}
labels_for_aakanasha = {i: labels[i] for i in range(n)}
prec_rec_at_k(0.1, labels_for_aakanasha, probs_for_aakanasha)
unique_labels, counts = np.unique(labels, return_counts=True)
p_labels = counts / sum(counts)
is_binary = len(unique_labels) == 2
probs = np.array(probs)
labels = np.array(labels)
preds = np.array(preds)
argsort_probs = np.argsort(probs)
probs = probs[argsort_probs]
preds = preds[argsort_probs]
labels = labels[argsort_probs]
metrics_dict = {}
# from the documentation: average parameter will be ignored if y_true is binary.
# in this case it's auprc and roc_auc w.r.t. class 1
auprc = average_precision_score(y_true=labels, y_score=probs, average="macro")
metrics_dict["precision (using threshold 0.5)"] = precision_score(y_true=labels, y_pred=preds)
metrics_dict["recall (using threshold 0.5)"] = recall_score(y_true=labels, y_pred=preds)
metrics_dict["AUPRC"] = auprc
roc_auc = roc_auc_score(y_true=labels, y_score=probs, average="macro", multi_class="ovo")
metrics_dict["ROC AUC"] = roc_auc
if is_binary:
f1 = f1_score(y_true=labels, y_pred=preds)
metrics_dict["f1 (w.r.t. class 1)"] = f1
micro_f1 = f1_score(y_true=labels, y_pred=preds, average='micro')
metrics_dict["micro_f1"] = micro_f1
macro_f1 = f1_score(y_true=labels, y_pred=preds, average='macro')
metrics_dict["macro_f1"] = macro_f1
weighted_f1 = f1_score(y_true=labels, y_pred=preds, average='weighted')
metrics_dict["weighted_f1"] = weighted_f1
"""
fig, ax = plt.subplots()
df = pd.DataFrame(data={"prob": probs, "class": labels})
df.groupby("class").prob.plot(kind='kde', ax=ax)
plt.legend()
plt.xlim((-0.05, 1.05))
plt.xlabel("p")
title = f"separation between the classes, epoch: {epoch}, step: {step}"
plt.title(title)
separation_path = os.path.join(out_dir, f"separation_epoch_{epoch}_step_{step}.jpg")
plt.savefig(separation_path)
plt.show(block=False)
plt.close()
"""
data_distribution = {unique_labels[i]: round(100*p_labels[i], 2) for i in range(len(unique_labels))}
metrics_dict["true distribution"] = data_distribution
if is_binary:
true_share_of_1 = np.mean(labels)
true_share_of_1_size = int(true_share_of_1 * len(labels))
true_percent_of_1 = np.round(100*true_share_of_1, 2)
true_share_of_0 = 1-np.mean(labels)
true_share_of_0_size = int(true_share_of_0 * len(labels))
true_percent_of_0 = np.round(100*true_share_of_0, 2)
precision_at_true_percent_class_0 = sum(labels[:true_share_of_0_size]==0) / true_share_of_0_size
recall_at_true_percent_class_0 = sum(labels[:true_share_of_0_size]==0) / sum(labels==0)
#metrics_dict[f"precision@{true_percent_of_0} (true percent) (class 0)"] = precision_at_true_percent_class_0
#metrics_dict[f"recall@{true_percent_of_0} (true percent) (class 0)"] = recall_at_true_percent_class_0
precision_at_true_percent_class_1 = sum(labels[-true_share_of_1_size:]==1) / true_share_of_1_size
recall_at_true_percent_class_1 = sum(labels[-true_share_of_1_size:]==1) / sum(labels==1)
#metrics_dict[f"precision@{true_percent_of_1} (true percent) (class 1)"] = precision_at_true_percent_class_1
#metrics_dict[f"recall@{true_percent_of_1} (true percent) (class 1)"] = recall_at_true_percent_class_1
k_percent = 10 # precision/recall@k%
percent_size = int(k_percent/100 * len(labels))
precision_at_top_k_class_0 = sum(labels[:percent_size]==0) / percent_size
recall_at_top_k_class_0 = sum(labels[:percent_size]==0) / sum(labels==0)
metrics_dict[f"precision@{k_percent} (class 0)"] = precision_at_top_k_class_0
metrics_dict[f"recall@{k_percent} (class 0)"] = recall_at_top_k_class_0
precision_at_top_k_class_1 = sum(labels[-percent_size:]==1) / percent_size
recall_at_top_k_class_1 = sum(labels[-percent_size:]==1) / sum(labels==1)
metrics_dict[f"precision@{k_percent} (class 1)"] = precision_at_top_k_class_1
metrics_dict[f"recall@{k_percent} (class 1)"] = recall_at_top_k_class_1
"""
plt.figure()
PrecisionRecallDisplay.from_predictions(labels, probs)
title = f"precision-recall curve, epoch: {epoch}, step: {step}"
plt.title(title)
pr_path = os.path.join(out_dir, f"pr_curve_epoch_{epoch}_step_{step}.jpg")
plt.savefig(pr_path)
#plt.show(block=False)
plt.close()
plt.figure()
RocCurveDisplay.from_predictions(labels, probs)
title = f"ROC curve, epoch: {epoch}, step: {step}"
plt.title(title)
roc_path = os.path.join(out_dir, f"roc_curve_epoch_{epoch}_step_{step}.jpg")
plt.savefig(roc_path)
#plt.show(block=False)
plt.close()
"""
return metrics_dict
def run(train_path, dev_path, test_path, lit_ranks, lit_file, init_model,
rerank_model_path, rerank_checkpoint, longmodel_dir, out_dir,
do_train, do_test, checkpoint, attention_window, max_pos,
batch_size, lr, epochs, seed, accumulation_steps, num_top_docs, strategy, enc_strategy,
use_warmup, warmup_steps, stop_on_roc, dump_test_preds, use_pico, doc_embeds, l2r_top_docs,
outcome, retrieval_labels, query_proj, query_loss):
assert accumulation_steps % batch_size == 0, "accumulation_steps must be a multiple of batch_size"
if longmodel_dir is not None and not os.path.exists(longmodel_dir):
os.makedirs(longmodel_dir)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
checkpoint_dir = os.path.join(out_dir, 'checkpoints')
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
set_seed(seed)
setproctitle.setproctitle("python")
outcome_questions = {'mortality': 'What is the hospital mortality? ',
'pmv': 'What is the probability of prolonged mechanical ventilation? ',
'los': 'What is the probable length of stay? '}
dataset = EHRDataset(train_path, dev_path, test_path, do_train, do_test)
dataset.compute_class_weights()
if lit_ranks is not None:
dataset.add_relevant_literature(lit_ranks, num_top_docs, lit_file)
missing_lit = 0
if do_train:
for doc in list(dataset.train_data.keys()):
if 'pubmed_docs' not in dataset.train_data[doc]:
missing_lit += 1
dataset.train_data.pop(doc)
for doc in list(dataset.dev_data.keys()):
if 'pubmed_docs' not in dataset.dev_data[doc]:
missing_lit += 1
dataset.dev_data.pop(doc)
if do_test:
for doc in list(dataset.test_data.keys()):
if 'pubmed_docs' not in dataset.test_data[doc]:
missing_lit += 1
dataset.test_data.pop(doc)
print('{} documents do not have PubMed abstracts'.format(missing_lit))
if doc_embeds:
dataset.add_literature_matrices(doc_embeds)
num_labels = len(list(dataset.class_weights.keys()))
if retrieval_labels is not None:
retrieval_labels = pickle.load(open(retrieval_labels, 'rb'))
if longmodel_dir is not None:
create_long_model(
init_model=init_model,
save_model_to=longmodel_dir,
attention_window=attention_window,
max_pos=max_pos,
num_labels=num_labels
)
model_path = longmodel_dir if longmodel_dir is not None else init_model
config = BertConfig.from_pretrained(model_path,
num_labels=num_labels,
label2id={x: x for x in range(num_labels)},
id2label={x: x for x in range(num_labels)},
cache_dir='../cache')
tokenizer = BertTokenizerFast.from_pretrained(model_path, cache_dir='../cache') if 'Discharge' not in model_path \
else AutoTokenizer.from_pretrained(model_path, cache_dir='../cache')
model = BertLongForSequenceClassification.from_pretrained(model_path, config=config, cache_dir='../cache') \
if longmodel_dir is not None \
else BertForSequenceClassification.from_pretrained(model_path, config=config, cache_dir='../cache')
rerank_config, rerank_tokenizer, rerank_model = None, None, None
if rerank_model_path is not None:
rerank_label_vocab = {'Relevant': 1, 'Irrelevant': 0}
rerank_config = AutoConfig.from_pretrained(
rerank_model_path,
num_labels=len(list(rerank_label_vocab.keys())),
label2id=rerank_label_vocab,
id2label={i: l for l, i in rerank_label_vocab.items()},
cache_dir='../cache',
)
rerank_tokenizer = AutoTokenizer.from_pretrained(
rerank_model_path,
cache_dir='../cache',
use_fast=True,
)
rerank_model = AutoModel.from_pretrained(
rerank_model_path,
from_tf=bool(".ckpt" in rerank_model_path),
config=rerank_config,
cache_dir='../cache',
)
special_tokens_dict = {'additional_special_tokens': ['[ENTSEP]']}
num_added_toks = rerank_tokenizer.add_special_tokens(special_tokens_dict)
rerank_model.resize_token_embeddings(len(rerank_tokenizer))
if rerank_checkpoint is not None and do_train: # Only load pretrained reranker if training is to be carried out
rerank_model.load_state_dict(
torch.load(rerank_checkpoint)) # Otherwise full model will contain reranker weights too
if use_pico:
special_tokens_dict = {'additional_special_tokens': ['<PAR>', '</PAR>', '<INT>', '</INT>', '<OUT>', '</OUT>']}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
model.resize_token_embeddings(len(tokenizer))
print('Added additional special tokens for PICO highlights')
if lit_ranks is not None and doc_embeds is None: # we're training with literature, and we don't use existing embeddings
if enc_strategy == 'bienc':
model = LitAugPredictorBienc(config, model, num_top_docs, strategy)
elif enc_strategy == 'crossenc':
model = LitAugPredictorCrossenc(config, model, num_top_docs, strategy)
if lit_ranks is not None and doc_embeds is not None:
if query_proj is None:
model = L2RLitAugPredictorBienc(config, model, l2r_top_docs, strategy, rerank_model)
else:
model = L2RLitAugPredictorBienc(config, model, l2r_top_docs, strategy, rerank_model,
query_proj)
if query_loss is not None:
model.query_loss = query_loss
model = model.cuda()
# print('Initialized longformer model with pretrained LM...')
def preprocess_function(examples, split):
data_args = (([x[1] for x in examples], None))
max_length = max_pos if longmodel_dir is not None else 512
result = tokenizer(*data_args, padding='max_length', max_length=max_length, truncation=True,
return_tensors='pt')
result["labels"] = torch.LongTensor([(x[2]) for x in examples])
result["ehr_id"] = [(x[0]) for x in examples]
if doc_embeds is not None:
if rerank_tokenizer is not None:
data_args = (([outcome_questions[outcome] + x[1] for x in examples], None))
result["ehr_rerank_tokens"] = rerank_tokenizer(*data_args, padding='max_length', max_length=max_length,
truncation=True, return_tensors='pt')
else:
data_args = (([outcome_questions[outcome] + x[1] for x in examples], None))
result["ehr_rerank_tokens"] = tokenizer(*data_args, padding='max_length', max_length=max_length,
truncation=True, return_tensors='pt')
if lit_ranks is not None and doc_embeds is None:
result["pubmed_docs"] = []
result["pubmed_doc_weights"] = []
k_range = int(num_top_docs) if num_top_docs >= 1 else max([len(x[-1]) for x in examples])
if k_range > 0:
if enc_strategy == 'bienc':
for k in range(k_range):
result["pubmed_doc_weights"].append([x[-1][k][2] if len(x[-1]) > k else 0.0 for x in examples])
data_args = (([x[-1][k][1] if len(x[-1]) > k else '' for x in examples], None))
result["pubmed_docs"].append(
tokenizer(*data_args, padding='max_length', max_length=max_length, truncation=True,
return_tensors='pt'))
if enc_strategy == 'crossenc':
for k in range(k_range):
result["pubmed_doc_weights"].append([x[-1][k][2] if len(x[-1]) > k else 0.0 for x in examples])
data_args = (
([x[1] for x in examples], [x[-1][k][1] if len(x[-1]) > k else '' for x in examples]))
result["pubmed_docs"].append(
tokenizer(*data_args, padding='max_length', max_length=max_length, truncation=True,
return_tensors='pt'))
if doc_embeds is not None:
result["pubmed_docs"] = []
result["pubmed_doc_weights"] = []
result["pubmed_doc_embeds"] = []
result["pubmed_doc_ids"] = []
if retrieval_labels is not None and split == 'train':
result["pubmed_doc_labels"] = []
for x in examples:
result["pubmed_doc_ids"].append([y[0] for y in x[-1]])
result["pubmed_doc_weights"].append([y[2] for y in x[-1]])
data_args = (([y[1] for y in x[-1]], None)) # y[0] will be Pubmed ID of doc
result["pubmed_docs"].append(
tokenizer(*data_args, padding='max_length', max_length=max_length, truncation=True,
return_tensors='pt'))
result["pubmed_doc_embeds"].append(np.vstack([x[3][y[0]] for y in x[-1]])[np.newaxis, :, :])
if retrieval_labels is not None and split == 'train':
result["pubmed_doc_labels"].append([retrieval_labels[x[0]][y[0]] for y in x[-1]])
if retrieval_labels is not None and split == 'train':
result["pubmed_doc_labels"] = torch.LongTensor(np.vstack(result["pubmed_doc_labels"]))
result["pubmed_doc_embeds"] = np.vstack(result["pubmed_doc_embeds"])
result["pubmed_doc_embeds"] = torch.FloatTensor(result["pubmed_doc_embeds"])
return result
def batch_and_tokenize_data(examples, batch_size, split):
example_list = []
for file in list(examples.keys()):
example = examples[file]
if lit_ranks is None:
example_list.append([file, example['ehr'], example['outcome']])
elif doc_embeds is None:
example_list.append([file, example['ehr'], example['outcome'], example['pubmed_docs']])
else:
example_list.append(
[file, example['ehr'], example['outcome'], example['pubmed_doc_embeds'], example['pubmed_docs']])
batches = []
# if longmodel_dir is not None and (split == 'dev' or split == 'test'):
# batch_size = 1
#num_tokens_in_ehrs = []
#num_tokens_in_lits = []
for i in range(0, len(example_list), batch_size):
start = i
end = min(start + batch_size, len(example_list))
batch = preprocess_function(example_list[start:end], split)
batches.append(batch)
#num_tokens_in_ehr = batch["attention_mask"].numpy().sum()
#num_tokens_in_ehrs.append(num_tokens_in_ehr)
#num_tokens_in_lit = batch["pubmed_docs"][0]["attention_mask"][0].numpy().sum()
#num_tokens_in_lits.append(num_tokens_in_lit)
if len(batches) % 100 == 0:
print('Created {} batches'.format(len(batches)), end="\r", flush=True)
return batches
print('Started batch creation')
train_batches, dev_batches, test_batches = None, None, None
if do_train:
train_batches = batch_and_tokenize_data(dataset.train_data, batch_size, 'train')
print('Created {} train batches'.format(len(train_batches)))
dev_batches = batch_and_tokenize_data(dataset.dev_data, batch_size, 'dev')
print('Created {} dev batches'.format(len(dev_batches)))
if do_test:
test_batches = batch_and_tokenize_data(dataset.test_data, batch_size, 'test')
print('Created {} test batches'.format(len(test_batches)))
if do_train:
train(model, train_batches, dev_batches, out_dir, epochs, lr, dataset.class_weights,
accumulation_steps, strategy, use_warmup, warmup_steps, stop_on_roc, dump_test_preds)
if do_test:
if checkpoint is not None:
if 'checkpoint' in checkpoint:
full_checkpoint = torch.load(checkpoint)
model.load_state_dict(full_checkpoint['model_state_dict'])
else:
model.load_state_dict(torch.load(checkpoint))
print('Loaded checkpoint')
else:
model.load_state_dict(torch.load(os.path.join(out_dir, 'best_model.pt')))
test(model, test_batches, dump_test_preds, out_dir, epoch="end", step="test",
class_weights=dataset.class_weights, strategy=strategy)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--train_path', type=str, action='store', required=True, help='Path to training file')
parser.add_argument('--dev_path', type=str, action='store', required=True, help='Path to development file')
parser.add_argument('--test_path', type=str, action='store', required=True, help='Path to test file')
parser.add_argument('--lit_ranks', type=str, action='store',
help='Path to directory containing files of ehr : literature similarity ranks ')
parser.add_argument('--lit_file', type=str, action='store', help='Path to file containing literature ')
parser.add_argument('--init_model', type=str, action='store', default='microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext', \
help='Pretrained model to initialize weights from')
parser.add_argument('--rerank_model_path', type=str, action='store', help='Pretrained model to initialize reranker weights from')
parser.add_argument('--rerank_checkpoint', type=str, action='store', help='Checkpoint to load reranker weights from')
parser.add_argument('--longmodel_dir', type=str, action='store', help='Path to dump longformer version of model')
parser.add_argument('--out_dir', type=str, action='store', required=True, help='Provide path to directory to store outputs')
parser.add_argument('--do_train', action='store_true', default=False, help='Specify if training should be performed')
parser.add_argument('--do_test', action='store_true', default=False, help='Specify if evaluation on test data should be performed')
parser.add_argument('--checkpoint', type=str, action='store', help='Path to checkpoint to load model weights from')
parser.add_argument('--attention_window', type=int, action='store', default=512, help='Attention window size')
parser.add_argument('--max_pos', type=int, action='store', default=4096, help='Maximum position embedding size')
parser.add_argument('--batch_size', type=int, action='store', default=1, help='Specify batch size')
parser.add_argument('--lr', type=float, action='store', default=2e-5, help='Specify learning rate')
parser.add_argument('--epochs', type=int, action='store', default=20, help='Specify number of epochs')
parser.add_argument('--seed', type=int, action='store', default=42, help='Specify random seed')
parser.add_argument('--accumulation_steps', type=int, action='store', default=32, help='Specify number of steps for gradient accumulation')
parser.add_argument('--num_top_docs', type=float, action='store', default=1, help='Number of top ranked abstracts from PubMed to include')
parser.add_argument('--strategy', type=str, action='store', default='average', help='Strategy to use to combine literature with EHR')
parser.add_argument('--enc_strategy', type=str, action='store', default='bienc', help='Encoding strategy to use for notes and articles (bienc/crossenc)')
parser.add_argument('--use_warmup', action='store_true', default=False, help='Choose whether to use LR warmup or not')
parser.add_argument('--warmup_steps', type=int, action='store', default=5000, help='Choose number of warmup steps')
parser.add_argument('--stop_on_roc', action='store_true', default=False, help='Use AUROC as early stopping metric')
parser.add_argument('--dump_test_preds', action='store_true', default=False, help='Dump predictions on test set')
parser.add_argument('--use_pico', action='store_true', default=False, help='Add PICO highlights to chosen literature docs')
parser.add_argument('--doc_embeds', type=str, action='store', help='Embeddings of top ranked abstracts for learning to retrieve')
parser.add_argument('--l2r_top_docs', type=int, action='store', default=5, help='Number of top documents to chosse in learning to retrieve')
parser.add_argument('--outcome', type=str, action='store', required=True, help='Choose outcome to predict (pmv/los/mortality)')
parser.add_argument('--retrieval_labels', type=str, action='store', default=None,
help='Path to file containing pseudo labels for retrieval training L2R')
parser.add_argument('--query_proj', type=str, action='store', help='Projection layer to use for queries in L2R')
parser.add_argument('--query_loss', type=str, action='store', help='Direct loss term for query encoding (pred/reg)')
parser.add_argument('--run_name', type=str, default="deault run name", action='store', help='name of the run')
args = parser.parse_args()
args_dict = vars(args)
print(f"run name: {args_dict['run_name']}")
for key, value in args_dict.items():
print(f"{key}: {value}")
args_dict = vars(args)
args_dict.pop("run_name")
run(**args_dict)
| BEEP-main | outcome-prediction/run_outcome_prediction.py |
import os
import math
import copy
import numpy as np
import torch
import torch.nn as nn
from dataclasses import dataclass, field
from transformers import BertForSequenceClassification
from transformers.models.longformer.modeling_longformer import LongformerSelfAttention
class BertLongSelfAttention(LongformerSelfAttention):
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
):
is_index_masked = attention_mask < 0
is_index_masked = is_index_masked.squeeze(1).squeeze(1)
attention_mask = attention_mask.squeeze(1).squeeze(1)
# print('Running self-attention layer #: {}'.format(self.layer_id))
return super().forward(hidden_states, \
attention_mask=attention_mask, \
is_index_masked=is_index_masked) # output_attentions=output_attentions [Arg not present in v4.1.1]
class BertLongForSequenceClassification(BertForSequenceClassification):
def __init__(self, config):
super().__init__(config)
for i, layer in enumerate(self.bert.encoder.layer):
# replace the `modeling_bert.BertSelfAttention` object with `LongformerSelfAttention`
layer.attention.self = BertLongSelfAttention(config, layer_id=i)
class LitAugPredictorCrossenc(nn.Module):
def __init__(self, bert_config, bert_model, topk, strategy='average'):
super().__init__()
self.bert_model = bert_model
self.bert_config = bert_config
self.topk = topk
self.strategy = strategy
self.softmax = nn.Softmax(dim=1)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
pubmed_docs=None,
pubmed_doc_weights=None
):
note_lit_reps = []
if 'vote' in self.strategy:
prob_matrices = []
for doc_batch in pubmed_docs:
doc_batch = {x:y.cuda() for x,y in doc_batch.items()}
cur_logits = self.bert_model(**doc_batch)[0]
cur_logits_softmax = self.softmax(cur_logits)
prob_matrices.append(cur_logits_softmax)
averaged_probs = None
if self.strategy == 'softvote':
averaged_probs = torch.mean(torch.stack(prob_matrices), dim=0)
if self.strategy == 'weightvote':
if len(prob_matrices) == 1:
averaged_probs = torch.mean(torch.stack(prob_matrices), dim=0)
else:
weighted_matrices = []
total_weight = torch.zeros(prob_matrices[0].size()).cuda()
for prob_matrix, weights in zip(prob_matrices, pubmed_doc_weights):
weights = torch.cuda.FloatTensor(weights).unsqueeze(1).repeat(1, self.bert_config.num_labels)
weighted_matrices.append(weights * prob_matrix)
total_weight += weights
weighted_matrices = [x/total_weight for x in weighted_matrices]
averaged_probs = torch.sum(torch.stack(weighted_matrices), dim=0)
averaged_log_probs = torch.log(averaged_probs)
return (None, averaged_log_probs)
if self.strategy == 'average':
rep_list = []
for doc_batch in pubmed_docs:
doc_batch = {x:y.cuda() for x,y in doc_batch.items()}
cur_outputs = self.bert_model.bert(**doc_batch)[1] # 0 - last state, 1 - pooled output
rep_list.append(cur_outputs)
final_lit_rep = torch.mean(torch.stack(rep_list), dim=0)
logits = self.bert_model.classifier(final_lit_rep)
return (None, logits)
if self.strategy == 'weightaverage':
rep_list = []
total_weight = torch.zeros((input_ids.size()[0], self.bert_config.hidden_size)).cuda()
for doc_batch, weights in zip(pubmed_docs, pubmed_doc_weights):
doc_batch = {x:y.cuda() for x,y in doc_batch.items()}
cur_outputs = self.bert_model.bert(**doc_batch)[1]
weights = torch.cuda.FloatTensor(weights).unsqueeze(1).repeat(1, self.bert_config.hidden_size)
rep_list.append(weights * cur_outputs)
total_weight += weights
rep_list = [x/total_weight for x in rep_list]
averaged_reps = torch.sum(torch.stack(rep_list), dim=0)
logits = self.bert_model.classifier(averaged_reps)
return (None, logits)
class LitAugPredictorBienc(nn.Module):
def __init__(self, bert_config, bert_model, topk, strategy='average'):
super().__init__()
self.input_size = 2 * bert_config.hidden_size # embeddings of note + literature
self.output_size = bert_config.num_labels
self.bert_model = bert_model
self.bert_config = bert_config
self.topk = topk
self.strategy = strategy
self.predictor = nn.Linear(self.input_size, self.output_size)
self.softmax = nn.Softmax(dim=1)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
pubmed_docs=None,
pubmed_doc_weights=None,
split='train'
):
note_outputs = self.bert_model.bert(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict
)
note_reps = note_outputs[1]
lit_reps = []
if len(pubmed_docs) >= 50:
pubmed_docs = pubmed_docs[:50]
# print(len(pubmed_docs))
if len(pubmed_docs) == 0:
lit_reps.append(torch.zeros(note_reps.size()).cuda())
for doc_batch in pubmed_docs:
doc_batch = {x:y.cuda() for x,y in doc_batch.items()}
cur_outputs = self.bert_model.bert(**doc_batch)
lit_reps.append(cur_outputs[1])
if self.strategy == 'average':
final_lit_rep = torch.mean(torch.stack(lit_reps), dim=0)
final_rep = torch.cat([note_reps, final_lit_rep], dim=1)
logits = self.predictor(final_rep)
return (None, logits)
if self.strategy == 'weightaverage':
total_lit_rep = torch.zeros(lit_reps[0].size()).cuda()
total_weight = torch.zeros((input_ids.size()[0], self.bert_config.hidden_size)).cuda()
for cur_lit_rep, weights in zip(lit_reps, pubmed_doc_weights):
weights = torch.cuda.FloatTensor(weights).unsqueeze(1).repeat(1, self.bert_config.hidden_size)
total_weight += weights
total_lit_rep += (weights * cur_lit_rep)
if torch.sum(total_weight).item() != 0.0:
total_lit_rep /= total_weight
final_rep = torch.cat([note_reps, total_lit_rep], dim=1)
logits = self.predictor(final_rep)
return (None, logits)
if self.strategy == 'softvote' or self.strategy == 'weightvote':
prob_matrices = []
for cur_lit_rep in lit_reps:
cur_final_rep = torch.cat([note_reps, cur_lit_rep], dim=1)
cur_logits = self.predictor(cur_final_rep)
cur_logits_softmax = self.softmax(cur_logits)
prob_matrices.append(cur_logits_softmax)
averaged_probs = None
if self.strategy == 'softvote':
averaged_probs = torch.mean(torch.stack(prob_matrices), dim=0)
if self.strategy == 'weightvote':
if len(prob_matrices) == 1:
averaged_probs = torch.mean(torch.stack(prob_matrices), dim=0)
else:
weighted_matrices = []
total_weight = torch.zeros(prob_matrices[0].size()).cuda()
for prob_matrix, weights in zip(prob_matrices, pubmed_doc_weights):
weights = torch.cuda.FloatTensor(weights).unsqueeze(1).repeat(1, self.output_size)
weighted_matrices.append(weights * prob_matrix)
total_weight += weights
weighted_matrices = [x/total_weight for x in weighted_matrices if torch.sum(total_weight).item() != 0.0]
averaged_probs = torch.sum(torch.stack(weighted_matrices), dim=0)
averaged_log_probs = torch.log(averaged_probs)
return (None, averaged_log_probs)
class L2RLitAugPredictorBienc(nn.Module):
def __init__(self, bert_config, bert_model, tokenizer, topk, strategy='average', rerank_model=None, query_proj=None):
super().__init__()
self.input_size = 2 * bert_config.hidden_size # embeddings of note + literature
self.output_size = bert_config.num_labels
self.bert_model = bert_model
self.tokenizer = tokenizer
self.bert_config = bert_config
self.topk = topk
self.strategy = strategy
self.predictor = nn.Linear(self.input_size, self.output_size)
self.softmax = nn.Softmax(dim=1)
self.cosine = nn.CosineSimilarity(dim=2)
if rerank_model is not None:
self.rerank_model = rerank_model
if query_proj is not None:
self.query_proj = query_proj
if query_proj == 'linear':
self.query_proj_layer = nn.Linear(bert_config.hidden_size, bert_config.hidden_size)
if query_proj == 'transformer':
encoder_layer = nn.TransformerEncoderLayer(d_model=bert_config.hidden_size, nhead=8)
self.query_proj_layer = nn.TransformerEncoder(encoder_layer, num_layers=1)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
pubmed_docs=None,
pubmed_doc_weights=None,
pubmed_doc_embeds=None,
ehr_rerank_tokens=None,
pubmed_doc_ids=None,
pubmed_doc_labels=None,
split='train'
):
note_question_outputs, note_question_hidden_states = None, None
retrieval_loss = 0.0
if hasattr(self, 'rerank_model'):
note_question_outputs = self.rerank_model(**ehr_rerank_tokens)
note_question_outputs = note_question_outputs['last_hidden_state'][:,0,:]
else:
note_question_outputs = self.bert_model.bert(**ehr_rerank_tokens)
note_question_hidden_states = note_question_outputs[0]
note_question_outputs = note_question_outputs[1]
if hasattr(self, 'query_proj_layer'):
if self.query_proj == 'linear':
note_question_outputs = self.query_proj_layer(note_question_outputs)
if self.query_proj == 'transformer':
note_question_hidden_states = note_question_hidden_states.permute(1,0,2)
note_question_outputs = self.query_proj_layer(note_question_hidden_states)
note_question_outputs = torch.mean(note_question_outputs.permute(1,0,2), dim=1)
if hasattr(self, 'query_loss'):
if self.query_loss == 'pred':
empty_lit_reps = torch.zeros(note_question_outputs.size()).cuda()
note_question_lit_reps = torch.cat([note_question_outputs, empty_lit_reps], dim=1)
note_question_probs = self.predictor(note_question_lit_reps)
retrieval_loss = nn.CrossEntropyLoss()(note_question_probs, labels)
note_question_reps = note_question_outputs.unsqueeze(1)
note_question_rep_repeat = note_question_reps.repeat(1,pubmed_doc_embeds.size()[1],1)
note_lit_sim = self.cosine(note_question_rep_repeat, pubmed_doc_embeds)
# note_lit_sim = torch.nan_to_num(note_lit_sim, nan=-1.1)
# note_lit_sim = torch.inner(note_question_rep_repeat, pubmed_doc_embeds)
# note_lit_sim = -1 * torch.cdist(note_question_reps, pubmed_doc_embeds)
# note_lit_sim = note_lit_sim.squeeze(1)
corrected_note_lit_sim = torch.FloatTensor(np.nan_to_num(note_lit_sim.detach().cpu().numpy(), nan=-1.1)).cuda()
top_doc_scores, top_doc_inds = torch.topk(corrected_note_lit_sim, self.topk, dim=1) # Should break graph here
if pubmed_doc_labels is not None:
max_sim_array = torch.max(note_lit_sim.detach(), dim=1)[0].unsqueeze(-1)
max_sim_array = max_sim_array.repeat(1,note_lit_sim.size()[-1])
note_lit_softmax = self.softmax(note_lit_sim - max_sim_array)
retrieval_loss -= torch.log(torch.sum(note_lit_softmax * pubmed_doc_labels))
# Recompute note reps (without question) using outcome prediction LM
note_outputs = self.bert_model.bert(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict
)
note_outputs = note_outputs[1]
if hasattr(self, 'query_loss'):
if self.query_loss == 'reg':
retrieval_loss += nn.MSELoss()(note_question_outputs, note_outputs)
note_reps = note_outputs.unsqueeze(1)
if split == 'test' and torch.sum(torch.isnan(note_outputs)) > 0:
note_reps = torch.FloatTensor(np.nan_to_num(note_reps.detach().cpu().numpy(), nan=0)).cuda()
print('Note rep contains NaNs!!!')
output_array = []
for i in range(top_doc_inds.size()[0]):
cur_doc_inds = top_doc_inds[i,:].detach().cpu().numpy().tolist()
cur_args = (([pubmed_docs[i][0][0][x] for x in cur_doc_inds], None))
cur_doc_input = self.tokenizer(*cur_args, padding='max_length', max_length=512, truncation=True, return_tensors='pt')
cur_doc_input = {k:v.cuda() for k,v in cur_doc_input.items()}
# print(cur_doc_input)
# print(cur_doc_inds)
# cur_doc_input = {k:torch.index_select(v.cuda(), 0, cur_doc_inds) for k,v in pubmed_docs[i].items()}
cur_outputs = self.bert_model.bert(**cur_doc_input)[1]
if split == 'test' and torch.sum(torch.isnan(cur_outputs)) > 0:
cur_outputs = torch.FloatTensor(np.nan_to_num(cur_outputs.detach().cpu().numpy(), nan=0)).cuda()
if self.strategy == 'average':
final_lit_rep = torch.mean(cur_outputs, dim=0).unsqueeze(0)
final_rep = torch.cat([note_reps[i,:,:], final_lit_rep], dim=1)
logits = self.predictor(final_rep)
max_val = max(logits.detach().cpu().numpy().tolist()[0])
output_array.append(logits - max_val)
if self.strategy == 'weightaverage':
weights = top_doc_scores[i,:].unsqueeze(1).detach()
total_weight = torch.sum(weights).item()
final_lit_rep = []
if split == 'test' and torch.sum(torch.isnan(cur_outputs)) > 0:
print('Lit rep contains NaNs!!!!')
if math.isnan(total_weight):
final_lit_rep = torch.mean(cur_outputs, dim=0).unsqueeze(0)
else:
final_lit_rep = torch.sum((cur_outputs * weights)/total_weight, dim=0).unsqueeze(0)
final_rep = torch.cat([note_reps[i,:,:], final_lit_rep], dim=1)
logits = self.predictor(final_rep)
max_val = max(logits.detach().cpu().numpy().tolist()[0])
output_array.append(logits - max_val)
if 'vote' in self.strategy:
cur_note_rep = note_reps[i,:,:].repeat(self.topk,1)
final_rep = torch.cat([cur_note_rep, cur_outputs], dim=1)
logits = self.predictor(final_rep)
max_val = max(logits.detach().cpu().numpy().tolist()[0])
logits_softmax = self.softmax(logits - max_val)
if self.strategy == 'softvote':
output_array.append(torch.mean(logits_softmax, dim=0))
if self.strategy == 'weightvote':
weights = top_doc_scores[i,:].unsqueeze(1).detach()
total_weight = torch.sum(weights).item()
if math.isnan(total_weight):
output_array.append(torch.mean(logits_softmax, dim=0))
else:
output_array.append(torch.sum((logits_softmax * weights)/total_weight, dim=0))
final_output = torch.stack(output_array).squeeze(1)
if 'vote' in self.strategy:
final_output = torch.log(final_output)
return (retrieval_loss, final_output)
| BEEP-main | outcome-prediction/outcome_models.py |
import argparse
import math
import random
import os
import pickle
import copy
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.metrics import roc_auc_score, f1_score
import setproctitle
from data_loader import EHRDataset
from transformers import AdamW, BertConfig, BertTokenizer, BertForSequenceClassification, \
AutoTokenizer, AutoConfig, AutoModel, BertTokenizerFast, set_seed, get_linear_schedule_with_warmup
from transformers.models.longformer.modeling_longformer import LongformerSelfAttention
from outcome_models import BertLongForSequenceClassification, LitAugPredictorBienc, LitAugPredictorCrossenc, L2RLitAugPredictorBienc
from ray import tune
from ray.tune.schedulers import MedianStoppingRule
from shutil import copyfile
os.environ["TUNE_DISABLE_STRICT_METRIC_CHECKING"] = "1"
def create_long_model(init_model, save_model_to, attention_window, max_pos, num_labels):
config = BertConfig.from_pretrained(init_model,
num_labels=num_labels,
label2id={x:x for x in range(num_labels)},
id2label={x:x for x in range(num_labels)}
)
model = BertForSequenceClassification.from_pretrained(init_model, config=config)
tokenizer = BertTokenizerFast.from_pretrained(init_model, model_max_length=max_pos)
config = model.config
# extend position embeddings
tokenizer.model_max_length = max_pos
tokenizer.init_kwargs['model_max_length'] = max_pos
current_max_pos, embed_size = model.bert.embeddings.position_embeddings.weight.shape
config.max_position_embeddings = max_pos
assert max_pos > current_max_pos
# allocate a larger position embedding matrix
new_pos_embed = model.bert.embeddings.position_embeddings.weight.new_empty(max_pos, embed_size)
# copy position embeddings over and over to initialize the new position embeddings
k = 0
step = current_max_pos
while k < max_pos - 1:
new_pos_embed[k:(k + step)] = model.bert.embeddings.position_embeddings.weight
k += step
model.bert.embeddings.position_embeddings.weight.data = new_pos_embed
model.bert.embeddings.position_ids.data = torch.tensor([i for i in range(max_pos)]).reshape(1, max_pos)
# replace the `modeling_bert.BertSelfAttention` object with `LongformerSelfAttention`
config.attention_window = [attention_window] * config.num_hidden_layers
for i, layer in enumerate(model.bert.encoder.layer):
longformer_self_attn = LongformerSelfAttention(config, layer_id=i)
longformer_self_attn.query = layer.attention.self.query
longformer_self_attn.key = layer.attention.self.key
longformer_self_attn.value = layer.attention.self.value
longformer_self_attn.query_global = copy.deepcopy(layer.attention.self.query)
longformer_self_attn.key_global = copy.deepcopy(layer.attention.self.key)
longformer_self_attn.value_global = copy.deepcopy(layer.attention.self.value)
layer.attention.self = longformer_self_attn
model.save_pretrained(save_model_to)
tokenizer.save_pretrained(save_model_to)
def train(model, train_data, dev_data, out_dir, epochs, lr, class_weights, acc_steps, strategy, config_string):
# print('Dropout default" {}'.format(model.config.hidden_dropout_prob))
weights = torch.cuda.FloatTensor([x[1] for x in list(sorted(class_weights.items(), key=lambda x:x[0]))])
weighted_ce_loss = nn.CrossEntropyLoss(weight=weights)
if 'vote' in strategy:
weighted_ce_loss = nn.NLLLoss(weight=weights)
optimizer = optim.Adam(model.parameters(), lr=lr)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=2)
if args.use_warmup:
# optimizer = AdamW(model.parameters(), lr=lr)
# scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=2)
print('Using linear schedule with warmup for {} steps'.format(args.warmup_steps))
scheduler = get_linear_schedule_with_warmup(optimizer, args.warmup_steps, epochs*len(train_data))
step = 0
prev_dev_loss = 10000
prev_auroc = -10000
batch_size = len(train_data[0]['ehr_id'])
acc_factor = acc_steps/batch_size
for epoch in range(epochs):
random.shuffle(train_data)
model.train()
epoch_loss = 0.0
optimizer.zero_grad()
for batch in train_data:
gpu_batch = {x:y.cuda() for x,y in batch.items() if x not in ['ehr_id', 'pubmed_docs', 'pubmed_doc_weights', 'ehr_rerank_tokens', 'pubmed_doc_ids']}
if 'pubmed_docs' in batch:
gpu_batch['pubmed_docs'] = batch['pubmed_docs']
gpu_batch['pubmed_doc_weights'] = batch['pubmed_doc_weights']
if 'pubmed_doc_ids' in batch:
gpu_batch['pubmed_doc_ids'] = batch['pubmed_doc_ids']
if 'ehr_rerank_tokens' in batch:
gpu_batch['ehr_rerank_tokens'] = {x:y.cuda() for x,y in batch['ehr_rerank_tokens'].items()}
outputs = model(**gpu_batch)
logits = outputs[1]
wloss = weighted_ce_loss(logits, gpu_batch["labels"])
if outputs[0] is not None and not math.isinf(outputs[0].item()) and not math.isnan(outputs[0].item()):
wloss += (outputs[0] / batch_size)
wloss /= acc_factor
epoch_loss += wloss.item()
wloss.backward()
step += batch_size
if step%acc_steps == 0:
# NOTE: Uncomment gradient clipping for L2R experiments
# nn.utils.clip_grad_norm_(model.parameters(), 0.1)
optimizer.step()
optimizer.zero_grad()
if step%800 == 0:
print('Completed {} training steps'.format(step))
dev_loss, auroc = test(model, dev_data, epoch=epoch, return_loss=True, class_weights=class_weights, strategy=strategy)
tune.report(dev_loss=dev_loss)
tune.report(auroc=auroc)
tune.report(step=step)
if not args.stop_on_roc and dev_loss < prev_dev_loss:
prev_dev_loss = dev_loss
if os.path.exists(os.path.join(out_dir, 'checkpoint_1_{}.pt'.format(config_string))):
copyfile(os.path.join(out_dir, 'checkpoint_1_{}.pt'.format(config_string)),
os.path.join(out_dir, 'checkpoint_2_{}.pt'.format(config_string)))
copyfile(os.path.join(out_dir, 'best_model_{}.pt'.format(config_string)),
os.path.join(out_dir, 'checkpoint_1_{}.pt'.format(config_string)))
torch.save(model.state_dict(), os.path.join(out_dir, 'best_model_{}.pt'.format(config_string)))
elif os.path.exists(os.path.join(out_dir, 'best_model_{}.pt'.format(config_string))):
print('Need to move best model')
copyfile(os.path.join(out_dir, 'best_model_{}.pt'.format(config_string)),
os.path.join(out_dir, 'checkpoint_1_{}.pt'.format(config_string)))
torch.save(model.state_dict(), os.path.join(out_dir, 'best_model_{}.pt'.format(config_string)))
else:
torch.save(model.state_dict(), os.path.join(out_dir, 'best_model_{}.pt'.format(config_string)))
if args.stop_on_roc and auroc > prev_auroc:
prev_auroc = auroc
torch.save(model.state_dict(), os.path.join(out_dir, 'best_model_{}.pt'.format(config_string)))
if not args.use_warmup:
if not args.stop_on_roc:
scheduler.step(dev_loss)
else:
scheduler.step(auroc)
else:
print('Different step for linear warmup')
scheduler.step()
print('Current epoch loss: {}'.format(epoch_loss))
epoch_loss /= (len(train_data)/acc_factor)
print('Training loss after epoch {}: {}'.format(epoch, epoch_loss))
# NOTE: Uncomment to enable per-epoch checkpointing
# This can take up a lot of space during hyperparamter sweeps
# torch.save({
# 'epoch': epoch,
# 'model_state_dict': model.state_dict(),
# 'optimizer_state_dict': optimizer.state_dict(),
# 'loss': epoch_loss,
# }, os.path.join(out_dir, 'checkpoints/checkpoint_{}_{}.pt'.format(epoch, config_string)))
dev_loss, auroc = test(model, dev_data, epoch=epoch, return_loss=True, class_weights=class_weights, strategy=strategy)
tune.report(dev_loss=dev_loss)
tune.report(auroc=auroc)
tune.report(step=step)
if dev_loss < prev_dev_loss:
prev_dev_loss = dev_loss
torch.save(model.state_dict(), os.path.join(out_dir, 'best_model_{}.pt'.format(config_string)))
scheduler.step(dev_loss)
def test(model, dev_data, epoch=0, return_loss=False, class_weights=None, strategy='average'):
model.eval()
unique_labels = list(class_weights.keys())
weights = torch.cuda.FloatTensor([x[1] for x in list(sorted(class_weights.items(), key=lambda x:x[0]))])
weighted_ce_loss = nn.CrossEntropyLoss(weight=weights)
if 'vote' in strategy:
weighted_ce_loss = nn.NLLLoss(weight=weights)
softmax = nn.Softmax(dim=1)
dev_loss = 0.0
all_preds = []
all_pred_probs = []
all_labels = []
all_ids = []
all_pred_probs_dump = []
for batch in dev_data:
gpu_batch = {x:y.cuda() for x,y in batch.items() if x not in ['ehr_id', 'pubmed_docs', 'pubmed_doc_weights', 'ehr_rerank_tokens', 'pubmed_doc_ids']}
if 'pubmed_docs' in batch:
gpu_batch['pubmed_docs'] = batch['pubmed_docs']
gpu_batch['pubmed_doc_weights'] = batch['pubmed_doc_weights']
if 'pubmed_doc_ids' in batch:
gpu_batch['pubmed_doc_ids'] = batch['pubmed_doc_ids']
if 'ehr_rerank_tokens' in batch:
gpu_batch['ehr_rerank_tokens'] = {x:y.cuda() for x,y in batch['ehr_rerank_tokens'].items()}
outputs = model(**gpu_batch, split='test')
logits = outputs[1]
all_preds += torch.argmax(logits, dim=1).detach().cpu().numpy().tolist()
probs = softmax(logits) if 'average' in strategy else torch.exp(logits)
all_pred_probs_dump += probs.detach().cpu().numpy().tolist()
probs = probs if len(unique_labels) > 2 else probs[:,1]
wloss = weighted_ce_loss(logits, gpu_batch["labels"])
dev_loss += wloss.item()
all_pred_probs += probs.detach().cpu().numpy().tolist()
all_labels += gpu_batch["labels"].cpu().numpy().tolist()
all_ids += batch['ehr_id']
prediction_dict = dict(zip(all_ids, all_preds))
pred_prob_dict = dict(zip(all_ids, all_pred_probs_dump))
if not return_loss and args.dump_test_preds: # return_loss flag is not used for the test data
pickle.dump(prediction_dict, open(os.path.join(args.out_dir, 'dev_predictions.pkl'), 'wb'))
pickle.dump(pred_prob_dict, open(os.path.join(args.out_dir, 'dev_probabilities.pkl'), 'wb'))
auroc, f1, mf1 = compute_classification_metrics(all_preds, all_pred_probs, all_labels)
dev_loss /= len(dev_data)
print('Validation loss after epoch {}: {}'.format(epoch, dev_loss))
print('------------------Validation Scores for Epoch {}-------------------'.format(epoch))
print('AUROC: {}'.format(auroc))
print('Micro F1: {}'.format(f1))
print('Macro F1: {}'.format(mf1))
if return_loss:
return dev_loss, auroc
def compute_classification_metrics(preds, probs, labels):
unique_labels = set(labels)
probs = np.array(probs)
labels = np.array(labels)
preds = np.array(preds)
roc_auc = roc_auc_score(y_true=labels, y_score=probs, average="macro", multi_class="ovo")
f1 = f1_score(y_true=labels, y_pred=preds, average='micro')
mf1 = f1_score(y_true=labels, y_pred=preds, average='macro')
return roc_auc, f1, mf1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--train', type=str, action='store', required=True, help='Path to training file')
parser.add_argument('--dev', type=str, action='store', required=True, help='Path to development file')
parser.add_argument('--test', type=str, action='store', required=True, help='Path to test file')
parser.add_argument('--lit_dir', type=str, action='store', help='Path to directory containing literature ')
parser.add_argument('--init_model', type=str, action='store', default='microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext', \
help='Pretrained model to initialize weights from')
parser.add_argument('--rerank_model', type=str, action='store', help='Pretrained model to initialize reranker weights from')
parser.add_argument('--rerank_checkpoint', type=str, action='store', help='Checkpoint to load reranker weights from')
parser.add_argument('--longmodel_dir', type=str, action='store', help='Path to dump longformer version of model')
parser.add_argument('--out_dir', type=str, action='store', required=True, help='Provide path to directory to store outputs')
parser.add_argument('--do_train', action='store_true', default=False, help='Specify if training should be performed')
parser.add_argument('--do_test', action='store_true', default=False, help='Specify if evaluation on test data should be performed')
parser.add_argument('--checkpoint', type=str, action='store', help='Path to checkpoint to load model weights from')
parser.add_argument('--attention_window', type=int, action='store', default=512, help='Attention window size')
parser.add_argument('--max_pos', type=int, action='store', default=4096, help='Maximum position embedding size')
parser.add_argument('--batch_size', type=int, action='store', default=1, help='Specify batch size')
parser.add_argument('--lr', type=float, action='store', default=2e-5, help='Specify learning rate')
parser.add_argument('--epochs', type=int, action='store', default=20, help='Specify number of epochs')
parser.add_argument('--seed', type=int, action='store', default=42, help='Specify random seed')
parser.add_argument('--accumulation_steps', type=int, action='store', default=32, help='Specify number of steps for gradient accumulation')
parser.add_argument('--num_top_docs', type=float, action='store', default=1, help='Number of top ranked abstracts from PubMed to include')
parser.add_argument('--strategy', type=str, action='store', default='average', help='Strategy to use to combine literature with EHR')
parser.add_argument('--enc_strategy', type=str, action='store', default='bienc', help='Encoding strategy to use for notes and articles (bienc/crossenc)')
parser.add_argument('--use_warmup', action='store_true', default=False, help='Choose whether to use LR warmup or not')
parser.add_argument('--warmup_steps', type=int, action='store', default=5000, help='Choose number of warmup steps')
parser.add_argument('--stop_on_roc', action='store_true', default=False, help='Use AUROC as early stopping metric')
parser.add_argument('--dump_test_preds', action='store_true', default=False, help='Dump predictions on test set')
parser.add_argument('--use_pico', action='store_true', default=False, help='Add PICO highlights to chosen literature docs')
parser.add_argument('--doc_embeds', type=str, action='store', help='Embeddings of top ranked abstracts for learning to retrieve')
parser.add_argument('--l2r_top_docs', type=int, action='store', default=5, help='Number of top documents to chosse in learning to retrieve')
parser.add_argument('--outcome', type=str, action='store', required=True, help='Choose outcome to predict (pmv/los/mortality)')
parser.add_argument('--retrieval_labels', type=str, action='store', help='Path to file containing pseudo labels for retrieval training L2R')
parser.add_argument('--query_proj', type=str, action='store', help='Projection layer to use for queries in L2R')
parser.add_argument('--query_loss', type=str, action='store', help='Direct loss term for query encoding (pred/reg)')
args = parser.parse_args()
if args.longmodel_dir is not None and not os.path.exists(args.longmodel_dir):
os.makedirs(args.longmodel_dir)
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
checkpoint_dir = os.path.join(args.out_dir, 'checkpoints')
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
set_seed(args.seed)
setproctitle.setproctitle("python")
'''
dataset = EHRDataset(args.train, args.dev, args.test)
dataset.compute_class_weights()
model_path = args.longmodel_dir if args.longmodel_dir is not None else args.init_model
num_labels = 2 if args.outcome != 'los' else 4
model_config = BertConfig.from_pretrained(model_path,
num_labels=num_labels,
label2id={x:x for x in range(num_labels)},
id2label={x:x for x in range(num_labels)}
)
tokenizer = BertTokenizerFast.from_pretrained(model_path) if 'Discharge' not in model_path \
else AutoTokenizer.from_pretrained(model_path)
model = BertLongForSequenceClassification.from_pretrained(model_path, config=model_config) if args.longmodel_dir is not None \
else BertForSequenceClassification.from_pretrained(model_path, config=model_config)
'''
def preprocess_function(tokenizer, examples, split, topk, rerank_tokenizer=None, outcome_questions=None, retrieval_labels=None):
data_args = (([x[1] for x in examples], None))
max_length = args.max_pos if args.longmodel_dir is not None else 512
result = tokenizer(*data_args, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt')
result["labels"] = torch.LongTensor([(x[2]) for x in examples])
result["ehr_id"] = [(x[0]) for x in examples]
if args.doc_embeds is not None:
if rerank_tokenizer is not None:
data_args = (([outcome_questions[args.outcome] + x[1] for x in examples], None))
result["ehr_rerank_tokens"] = rerank_tokenizer(*data_args, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt')
else:
data_args = (([outcome_questions[args.outcome] + x[1] for x in examples], None))
result["ehr_rerank_tokens"] = tokenizer(*data_args, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt')
if args.lit_dir is not None and args.doc_embeds is None:
result["pubmed_docs"] = []
result["pubmed_doc_weights"] = []
k_range = int(topk) if topk >= 1 else max([len(x[-1]) for x in examples])
if k_range > 0:
if args.enc_strategy == 'bienc':
for k in range(k_range):
result["pubmed_doc_weights"].append([x[-1][k][2] if len(x[-1]) > k else 0.0 for x in examples])
data_args = (([x[-1][k][1] if len(x[-1]) > k else '' for x in examples], None))
result["pubmed_docs"].append(tokenizer(*data_args, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt'))
if args.enc_strategy == 'crossenc':
for k in range(k_range):
result["pubmed_doc_weights"].append([x[-1][k][2] if len(x[-1]) > k else 0.0 for x in examples])
data_args = (([x[1] for x in examples], [x[-1][k][1] if len(x[-1]) > k else '' for x in examples]))
result["pubmed_docs"].append(tokenizer(*data_args, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt'))
if args.doc_embeds is not None:
result["pubmed_docs"] = []
result["pubmed_doc_weights"] = []
result["pubmed_doc_embeds"] = []
result["pubmed_doc_ids"] = []
if args.retrieval_labels is not None and split=='train':
result["pubmed_doc_labels"] = []
for x in examples:
result["pubmed_doc_ids"].append([y[0] for y in x[-1]])
result["pubmed_doc_weights"].append([y[2] for y in x[-1]])
data_args = (([y[1] for y in x[-1]], None)) # y[0] will be Pubmed ID of doc
result["pubmed_docs"].append(data_args)
# result["pubmed_docs"].append(tokenizer(*data_args, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt'))
result["pubmed_doc_embeds"].append(np.vstack([x[3][y[0]] for y in x[-1]])[np.newaxis,:,:])
if retrieval_labels is not None and split=='train':
result["pubmed_doc_labels"].append([retrieval_labels[x[0]][y[0]] for y in x[-1]])
if retrieval_labels is not None and split=='train':
result["pubmed_doc_labels"] = torch.LongTensor(np.vstack(result["pubmed_doc_labels"]))
result["pubmed_doc_embeds"] = np.vstack(result["pubmed_doc_embeds"])
result["pubmed_doc_embeds"] = torch.FloatTensor(result["pubmed_doc_embeds"])
return result
def batch_and_tokenize_data(tokenizer, examples, batch_size, split, topk, rerank_tokenizer=None, outcome_questions=None, retrieval_labels=None):
example_list = []
for file in list(examples.keys()):
example = examples[file]
if args.lit_dir is None:
example_list.append([file, example['ehr'], example['outcome']])
elif args.doc_embeds is None:
example_list.append([file, example['ehr'], example['outcome'], example['pubmed_docs']])
else:
example_list.append([file, example['ehr'], example['outcome'], example['pubmed_doc_embeds'], example['pubmed_docs']])
batches = []
# if args.longmodel_dir is not None and (split == 'dev' or split == 'test'):
# batch_size = 1
for i in range(0, len(example_list), batch_size):
start = i
end = min(start+batch_size, len(example_list))
batch = preprocess_function(tokenizer, example_list[start:end], split, topk, rerank_tokenizer, outcome_questions, retrieval_labels)
batches.append(batch)
if len(batches) % 100 == 0:
print('Created {} batches'.format(len(batches)))
return batches
def run_outcome_prediction_pipeline(config):
outcome_questions = {'mortality': 'What is the hospital mortality? ', \
'pmv': 'What is the probability of prolonged mechanical ventilation? ', \
'los': 'What is the probable length of stay? '}
dataset = EHRDataset(args.train, args.dev, args.test)
dataset.compute_class_weights()
if args.lit_dir is not None:
top_doc_num = config["k"] if not args.doc_embeds else int(args.num_top_docs)
dataset.add_relevant_literature(args.lit_dir, top_doc_num, args.use_pico)
missing_lit = 0
for doc in dataset.train_data:
if 'pubmed_docs' not in dataset.train_data[doc]:
missing_lit += 1
for doc in dataset.dev_data:
if 'pubmed_docs' not in dataset.dev_data[doc]:
missing_lit += 1
for doc in dataset.test_data:
if 'pubmed_docs' not in dataset.test_data[doc]:
missing_lit += 1
print('{} documents do not have PubMed abstracts'.format(missing_lit))
if args.doc_embeds:
dataset.add_literature_matrices(args.doc_embeds)
num_labels = len(list(dataset.class_weights.keys()))
retrieval_labels = None
if args.retrieval_labels is not None:
retrieval_labels = pickle.load(open(args.retrieval_labels, 'rb'))
if args.longmodel_dir is not None:
create_long_model(
init_model=args.init_model,
save_model_to=args.longmodel_dir,
attention_window=args.attention_window,
max_pos=args.max_pos,
num_labels=num_labels
)
model_path = args.longmodel_dir if args.longmodel_dir is not None else args.init_model
model_config = BertConfig.from_pretrained(model_path,
num_labels=num_labels,
label2id={x:x for x in range(num_labels)},
id2label={x:x for x in range(num_labels)}
)
tokenizer = BertTokenizerFast.from_pretrained(model_path) if 'Discharge' not in model_path \
else AutoTokenizer.from_pretrained(model_path)
model = BertLongForSequenceClassification.from_pretrained(model_path, config=model_config) if args.longmodel_dir is not None \
else BertForSequenceClassification.from_pretrained(model_path, config=model_config)
rerank_config, rerank_tokenizer, rerank_model = None, None, None
if args.rerank_model is not None:
rerank_label_vocab = {'Relevant': 1, 'Irrelevant': 0}
rerank_config = AutoConfig.from_pretrained(
args.rerank_model,
num_labels=len(list(rerank_label_vocab.keys())),
label2id=rerank_label_vocab,
id2label={i: l for l, i in rerank_label_vocab.items()},
cache_dir='../cache',
)
rerank_tokenizer = AutoTokenizer.from_pretrained(
args.rerank_model,
cache_dir='../cache',
use_fast=True,
)
rerank_model = AutoModel.from_pretrained(
args.rerank_model,
from_tf=bool(".ckpt" in args.rerank_model),
config=rerank_config,
cache_dir='../cache',
)
special_tokens_dict = {'additional_special_tokens': ['[ENTSEP]']}
num_added_toks = rerank_tokenizer.add_special_tokens(special_tokens_dict)
rerank_model.resize_token_embeddings(len(rerank_tokenizer))
if args.rerank_checkpoint is not None and args.do_train: # Only load pretrained reranker if training is to be carried out
rerank_model.load_state_dict(torch.load(args.rerank_checkpoint)) # Otherwise full model will contain reranker weights too
if args.use_pico:
special_tokens_dict = {'additional_special_tokens': ['<PAR>', '</PAR>', '<INT>', '</INT>', '<OUT>', '</OUT>']}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
model.resize_token_embeddings(len(tokenizer))
print('Added additional special tokens for PICO highlights')
if args.lit_dir is not None and args.doc_embeds is None:
if args.enc_strategy == 'bienc':
model = LitAugPredictorBienc(model_config, model, config["k"], args.strategy)
elif args.enc_strategy == 'crossenc':
model = LitAugPredictorCrossenc(model_config, model, config["k"], args.strategy)
if args.lit_dir is not None and args.doc_embeds is not None:
if args.query_proj is None:
model = L2RLitAugPredictorBienc(model_config, model, tokenizer, config["k"], args.strategy, rerank_model)
else:
model = L2RLitAugPredictorBienc(model_config, model, tokenizer, config["k"], args.strategy, rerank_model, args.query_proj)
if args.query_loss is not None:
model.query_loss = args.query_loss
model = model.cuda()
# print('Initialized longformer model with pretrained LM...')
batch_size = 5 if config["k"] == 1 else 1
print('Started batch creation')
train_batches = batch_and_tokenize_data(tokenizer, dataset.train_data, batch_size, 'train', config["k"], rerank_tokenizer, outcome_questions, retrieval_labels)
print('Created {} train batches'.format(len(train_batches)))
dev_batches = batch_and_tokenize_data(tokenizer, dataset.dev_data, batch_size, 'dev', config["k"], rerank_tokenizer, outcome_questions, retrieval_labels)
print('Created {} dev batches'.format(len(dev_batches)))
test_batches = batch_and_tokenize_data(tokenizer, dataset.test_data, batch_size, 'test', config["k"], rerank_tokenizer, outcome_questions, retrieval_labels)
print('Created {} test batches'.format(len(test_batches)))
config_string = '{}_{}_{}'.format(config["lr"], config["k"], config["acc"])
if args.do_train:
train(model, train_batches, dev_batches, args.out_dir, args.epochs, config["lr"], dataset.class_weights, \
config["acc"], args.strategy, config_string)
if args.do_test:
if args.checkpoint is not None:
if 'checkpoint' in args.checkpoint:
full_checkpoint = torch.load(args.checkpoint)
model.load_state_dict(full_checkpoint['model_state_dict'])
else:
model.load_state_dict(torch.load(args.checkpoint))
else:
model.load_state_dict(torch.load(os.path.join(args.out_dir, 'best_model_{}.pt'.format(config_string))))
test(model, dev_batches, class_weights=dataset.class_weights, strategy=args.strategy)
scheduler = MedianStoppingRule(time_attr='step', metric='dev_loss', mode='min', grace_period=10000, min_time_slice=800)
analysis = tune.run(
run_outcome_prediction_pipeline,
num_samples=1,
config={
"num_workers": 3,
"lr": tune.grid_search([5e-4, 1e-5, 5e-5, 1e-6, 5e-6]),
"k": tune.grid_search([1, 5, 10]),
"acc": tune.grid_search([10, 20])
},
resources_per_trial={'gpu': 1},
scheduler=scheduler
)
print("best config: ", analysis.get_best_config(metric="dev_loss", mode="min"))
# NOTE: To run testing on a specific configuration only, comment out the following two lines
# and uncomment the two lines after those, which allow you to specify config parameters of your choice
best_config = analysis.get_best_config(metric="dev_loss", mode="min")
config_string = '{}_{}_{}'.format(best_config["lr"], best_config["k"], best_config["acc"])
# best_config = {"k": 5, "acc": 20, "lr": 1e-5, "num_workers": 3}
# config_string = '{}_{}_{}'.format(best_config["lr"], best_config["k"], best_config["acc"])
outcome_questions = {'mortality': 'What is the hospital mortality? ', \
'pmv': 'What is the probability of prolonged mechanical ventilation? ', \
'los': 'What is the probable length of stay? '}
model_path = args.longmodel_dir if args.longmodel_dir is not None else args.init_model
num_labels = 2 if args.outcome != 'los' else 4
retrieval_labels = None
if args.retrieval_labels is not None:
retrieval_labels = pickle.load(open(args.retrieval_labels, 'rb'))
model_config = BertConfig.from_pretrained(model_path,
num_labels=num_labels,
label2id={x:x for x in range(num_labels)},
id2label={x:x for x in range(num_labels)}
)
tokenizer = BertTokenizerFast.from_pretrained(model_path) if 'Discharge' not in model_path \
else AutoTokenizer.from_pretrained(model_path)
model = BertLongForSequenceClassification.from_pretrained(model_path, config=model_config) if args.longmodel_dir is not None \
else BertForSequenceClassification.from_pretrained(model_path, config=model_config)
dataset = EHRDataset(args.train, args.dev, args.test)
dataset.compute_class_weights()
if args.lit_dir is not None:
top_doc_num = best_config["k"] if not args.doc_embeds else int(args.num_top_docs)
dataset.add_relevant_literature(args.lit_dir, top_doc_num, args.use_pico)
if args.doc_embeds:
dataset.add_literature_matrices(args.doc_embeds)
rerank_config, rerank_tokenizer, rerank_model = None, None, None
if args.rerank_model is not None:
rerank_label_vocab = {'Relevant': 1, 'Irrelevant': 0}
rerank_config = AutoConfig.from_pretrained(
args.rerank_model,
num_labels=len(list(rerank_label_vocab.keys())),
label2id=rerank_label_vocab,
id2label={i: l for l, i in rerank_label_vocab.items()},
cache_dir='../cache',
)
rerank_tokenizer = AutoTokenizer.from_pretrained(
args.rerank_model,
cache_dir='../cache',
use_fast=True,
)
rerank_model = AutoModel.from_pretrained(
args.rerank_model,
from_tf=bool(".ckpt" in args.rerank_model),
config=rerank_config,
cache_dir='../cache',
)
special_tokens_dict = {'additional_special_tokens': ['[ENTSEP]']}
num_added_toks = rerank_tokenizer.add_special_tokens(special_tokens_dict)
rerank_model.resize_token_embeddings(len(rerank_tokenizer))
batch_size = 5 if best_config["k"] == 1 else 1
test_batches = batch_and_tokenize_data(tokenizer, dataset.test_data, batch_size, 'test', best_config["k"], rerank_tokenizer, outcome_questions, retrieval_labels)
if args.lit_dir is not None and args.doc_embeds is None:
if args.enc_strategy == 'bienc':
model = LitAugPredictorBienc(model_config, model, best_config["k"], args.strategy)
elif args.enc_strategy == 'crossenc':
model = LitAugPredictorCrossenc(model_config, model, best_config["k"], args.strategy)
if args.lit_dir is not None and args.doc_embeds is not None:
if args.query_proj is None:
model = L2RLitAugPredictorBienc(model_config, model, tokenizer, best_config["k"], args.strategy, rerank_model)
else:
model = L2RLitAugPredictorBienc(model_config, model, tokenizer, best_config["k"], args.strategy, rerank_model, args.query_proj)
if args.query_loss is not None:
model.query_loss = args.query_loss
model = model.cuda()
model.load_state_dict(torch.load(os.path.join(args.out_dir, 'best_model_{}.pt'.format(config_string))))
test(model, test_batches, class_weights=dataset.class_weights, strategy=args.strategy)
| BEEP-main | outcome-prediction/run_outcome_prediction_hpo.py |
import argparse
import random
import os
import pickle
import copy
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.metrics import roc_auc_score, f1_score
import setproctitle
from data_loader import EHRDataset
from transformers import AdamW, BertConfig, BertTokenizer, BertForSequenceClassification, \
AutoTokenizer, AutoConfig, AutoModel, BertTokenizerFast, set_seed, get_linear_schedule_with_warmup
from transformers.models.longformer.modeling_longformer import LongformerSelfAttention
from outcome_models import BertLongForSequenceClassification, LitAugPredictorBienc, LitAugPredictorCrossenc, L2RLitAugPredictorBienc
from ray import tune
from ray.tune.schedulers import MedianStoppingRule
from shutil import copyfile
os.environ["TUNE_DISABLE_STRICT_METRIC_CHECKING"] = "1"
def create_long_model(init_model, save_model_to, attention_window, max_pos, num_labels):
config = BertConfig.from_pretrained(init_model,
num_labels=num_labels,
label2id={x:x for x in range(num_labels)},
id2label={x:x for x in range(num_labels)}
)
model = BertForSequenceClassification.from_pretrained(init_model, config=config)
tokenizer = BertTokenizerFast.from_pretrained(init_model, model_max_length=max_pos)
config = model.config
# extend position embeddings
tokenizer.model_max_length = max_pos
tokenizer.init_kwargs['model_max_length'] = max_pos
current_max_pos, embed_size = model.bert.embeddings.position_embeddings.weight.shape
config.max_position_embeddings = max_pos
assert max_pos > current_max_pos
# allocate a larger position embedding matrix
new_pos_embed = model.bert.embeddings.position_embeddings.weight.new_empty(max_pos, embed_size)
# copy position embeddings over and over to initialize the new position embeddings
k = 0
step = current_max_pos
while k < max_pos - 1:
new_pos_embed[k:(k + step)] = model.bert.embeddings.position_embeddings.weight
k += step
model.bert.embeddings.position_embeddings.weight.data = new_pos_embed
model.bert.embeddings.position_ids.data = torch.tensor([i for i in range(max_pos)]).reshape(1, max_pos)
# replace the `modeling_bert.BertSelfAttention` object with `LongformerSelfAttention`
config.attention_window = [attention_window] * config.num_hidden_layers
for i, layer in enumerate(model.bert.encoder.layer):
longformer_self_attn = LongformerSelfAttention(config, layer_id=i)
longformer_self_attn.query = layer.attention.self.query
longformer_self_attn.key = layer.attention.self.key
longformer_self_attn.value = layer.attention.self.value
longformer_self_attn.query_global = copy.deepcopy(layer.attention.self.query)
longformer_self_attn.key_global = copy.deepcopy(layer.attention.self.key)
longformer_self_attn.value_global = copy.deepcopy(layer.attention.self.value)
layer.attention.self = longformer_self_attn
model.save_pretrained(save_model_to)
tokenizer.save_pretrained(save_model_to)
def train(model, train_data, dev_data, out_dir, epochs, lr, class_weights, acc_steps, strategy, config_string):
# print('Dropout default" {}'.format(model.config.hidden_dropout_prob))
weights = torch.cuda.FloatTensor([x[1] for x in list(sorted(class_weights.items(), key=lambda x:x[0]))])
weighted_ce_loss = nn.CrossEntropyLoss(weight=weights)
if 'vote' in strategy:
weighted_ce_loss = nn.NLLLoss(weight=weights)
optimizer = optim.Adam(model.parameters(), lr=lr)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=2)
if args.use_warmup:
# optimizer = AdamW(model.parameters(), lr=lr)
# scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=2)
print('Using linear schedule with warmup for {} steps'.format(args.warmup_steps))
scheduler = get_linear_schedule_with_warmup(optimizer, args.warmup_steps, epochs*len(train_data))
step = 0
prev_dev_loss = 10000
prev_auroc = -10000
batch_size = len(train_data[0]['ehr_id'])
acc_factor = acc_steps/batch_size
for epoch in range(epochs):
random.shuffle(train_data)
model.train()
epoch_loss = 0.0
optimizer.zero_grad()
for batch in train_data:
gpu_batch = {x:y.cuda() for x,y in batch.items() if x not in ['ehr_id', 'pubmed_docs', 'pubmed_doc_weights', 'ehr_rerank_tokens', 'pubmed_doc_ids']}
if 'pubmed_docs' in batch:
gpu_batch['pubmed_docs'] = batch['pubmed_docs']
gpu_batch['pubmed_doc_weights'] = batch['pubmed_doc_weights']
if 'pubmed_doc_ids' in batch:
gpu_batch['pubmed_doc_ids'] = batch['pubmed_doc_ids']
if 'ehr_rerank_tokens' in batch:
gpu_batch['ehr_rerank_tokens'] = {x:y.cuda() for x,y in batch['ehr_rerank_tokens'].items()}
outputs = model(**gpu_batch)
logits = outputs[1]
wloss = weighted_ce_loss(logits, gpu_batch["labels"])
if outputs[0] is not None:
wloss += outputs[0]
wloss /= acc_factor
epoch_loss += wloss.item()
wloss.backward()
step += batch_size
if step%acc_steps == 0:
optimizer.step()
optimizer.zero_grad()
if step%800 == 0:
print('Completed {} training steps'.format(step))
dev_loss, auroc = test(model, dev_data, epoch=epoch, return_loss=True, class_weights=class_weights, strategy=strategy)
tune.report(dev_loss=dev_loss)
tune.report(auroc=auroc)
tune.report(step=step)
if not args.stop_on_roc and dev_loss < prev_dev_loss:
prev_dev_loss = dev_loss
if os.path.exists(os.path.join(out_dir, 'checkpoint_1_{}.pt'.format(config_string))):
copyfile(os.path.join(out_dir, 'checkpoint_1_{}.pt'.format(config_string)),
os.path.join(out_dir, 'checkpoint_2_{}.pt'.format(config_string)))
copyfile(os.path.join(out_dir, 'best_model_{}.pt'.format(config_string)),
os.path.join(out_dir, 'checkpoint_1_{}.pt'.format(config_string)))
torch.save(model.state_dict(), os.path.join(out_dir, 'best_model_{}.pt'.format(config_string)))
elif os.path.exists(os.path.join(out_dir, 'best_model_{}.pt'.format(config_string))):
print('Need to move best model')
copyfile(os.path.join(out_dir, 'best_model_{}.pt'.format(config_string)),
os.path.join(out_dir, 'checkpoint_1_{}.pt'.format(config_string)))
torch.save(model.state_dict(), os.path.join(out_dir, 'best_model_{}.pt'.format(config_string)))
else:
torch.save(model.state_dict(), os.path.join(out_dir, 'best_model_{}.pt'.format(config_string)))
if args.stop_on_roc and auroc > prev_auroc:
prev_auroc = auroc
torch.save(model.state_dict(), os.path.join(out_dir, 'best_model_{}.pt'.format(config_string)))
if not args.use_warmup:
if not args.stop_on_roc:
scheduler.step(dev_loss)
else:
scheduler.step(auroc)
else:
print('Different step for linear warmup')
scheduler.step()
epoch_loss /= (len(train_data)/acc_factor)
print('Training loss after epoch {}: {}'.format(epoch, epoch_loss))
# NOTE: Uncomment this to enable per-epoch checkpointing
# However, this is likely to occupy a lot of space during hyperparameter sweeps
# torch.save({
# 'epoch': epoch,
# 'model_state_dict': model.state_dict(),
# 'optimizer_state_dict': optimizer.state_dict(),
# 'loss': epoch_loss,
# }, os.path.join(out_dir, 'checkpoints/checkpoint_{}_{}.pt'.format(epoch, config_string)))
dev_loss, auroc = test(model, dev_data, epoch=epoch, return_loss=True, class_weights=class_weights, strategy=strategy)
tune.report(dev_loss=dev_loss)
tune.report(auroc=auroc)
tune.report(step=step)
if dev_loss < prev_dev_loss:
prev_dev_loss = dev_loss
torch.save(model.state_dict(), os.path.join(out_dir, 'best_model_{}.pt'.format(config_string)))
scheduler.step(dev_loss)
def test(model, dev_data, epoch=0, return_loss=False, class_weights=None, strategy='average'):
model.eval()
unique_labels = list(class_weights.keys())
weights = torch.cuda.FloatTensor([x[1] for x in list(sorted(class_weights.items(), key=lambda x:x[0]))])
weighted_ce_loss = nn.CrossEntropyLoss(weight=weights)
if 'vote' in strategy:
weighted_ce_loss = nn.NLLLoss(weight=weights)
softmax = nn.Softmax(dim=1)
dev_loss = 0.0
all_preds = []
all_pred_probs = []
all_labels = []
all_ids = []
all_pred_probs_dump = []
for batch in dev_data:
gpu_batch = {x:y.cuda() for x,y in batch.items() if x not in ['ehr_id', 'pubmed_docs', 'pubmed_doc_weights', 'ehr_rerank_tokens', 'pubmed_doc_ids']}
if 'pubmed_docs' in batch:
gpu_batch['pubmed_docs'] = batch['pubmed_docs']
gpu_batch['pubmed_doc_weights'] = batch['pubmed_doc_weights']
if 'pubmed_doc_ids' in batch:
gpu_batch['pubmed_doc_ids'] = batch['pubmed_doc_ids']
if 'ehr_rerank_tokens' in batch:
gpu_batch['ehr_rerank_tokens'] = {x:y.cuda() for x,y in batch['ehr_rerank_tokens'].items()}
outputs = model(**gpu_batch)
logits = outputs[1]
all_preds += torch.argmax(logits, dim=1).detach().cpu().numpy().tolist()
probs = softmax(logits) if 'average' in strategy else torch.exp(logits)
all_pred_probs_dump += probs.detach().cpu().numpy().tolist()
probs = probs if len(unique_labels) > 2 else probs[:,1]
wloss = weighted_ce_loss(logits, gpu_batch["labels"])
dev_loss += wloss.item()
all_pred_probs += probs.detach().cpu().numpy().tolist()
all_labels += gpu_batch["labels"].cpu().numpy().tolist()
all_ids += batch['ehr_id']
prediction_dict = dict(zip(all_ids, all_preds))
pred_prob_dict = dict(zip(all_ids, all_pred_probs_dump))
if not return_loss and args.dump_test_preds: # return_loss flag is not used for the test data
pickle.dump(prediction_dict, open(os.path.join(args.out_dir, 'dev_predictions.pkl'), 'wb'))
pickle.dump(pred_prob_dict, open(os.path.join(args.out_dir, 'dev_probabilities.pkl'), 'wb'))
auroc, f1, mf1 = compute_classification_metrics(all_preds, all_pred_probs, all_labels)
dev_loss /= len(dev_data)
print('Validation loss after epoch {}: {}'.format(epoch, dev_loss))
print('------------------Validation Scores for Epoch {}-------------------'.format(epoch))
print('AUROC: {}'.format(auroc))
print('Micro F1: {}'.format(f1))
print('Macro F1: {}'.format(mf1))
if return_loss:
return dev_loss, auroc
def compute_classification_metrics(preds, probs, labels):
unique_labels = set(labels)
probs = np.array(probs)
labels = np.array(labels)
preds = np.array(preds)
roc_auc = roc_auc_score(y_true=labels, y_score=probs, average="macro", multi_class="ovo")
f1 = f1_score(y_true=labels, y_pred=preds, average='micro')
mf1 = f1_score(y_true=labels, y_pred=preds, average='macro')
return roc_auc, f1, mf1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--train', type=str, action='store', required=True, help='Path to training file')
parser.add_argument('--dev', type=str, action='store', required=True, help='Path to development file')
parser.add_argument('--test', type=str, action='store', required=True, help='Path to test file')
parser.add_argument('--lit_dir', type=str, action='store', help='Path to directory containing literature ')
parser.add_argument('--init_model', type=str, action='store', default='microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext', \
help='Pretrained model to initialize weights from')
parser.add_argument('--rerank_model', type=str, action='store', help='Pretrained model to initialize reranker weights from')
parser.add_argument('--rerank_checkpoint', type=str, action='store', help='Checkpoint to load reranker weights from')
parser.add_argument('--longmodel_dir', type=str, action='store', help='Path to dump longformer version of model')
parser.add_argument('--out_dir', type=str, action='store', required=True, help='Provide path to directory to store outputs')
parser.add_argument('--do_train', action='store_true', default=False, help='Specify if training should be performed')
parser.add_argument('--do_test', action='store_true', default=False, help='Specify if evaluation on test data should be performed')
parser.add_argument('--checkpoint', type=str, action='store', help='Path to checkpoint to load model weights from')
parser.add_argument('--attention_window', type=int, action='store', default=512, help='Attention window size')
parser.add_argument('--max_pos', type=int, action='store', default=4096, help='Maximum position embedding size')
parser.add_argument('--batch_size', type=int, action='store', default=1, help='Specify batch size')
parser.add_argument('--lr', type=float, action='store', default=2e-5, help='Specify learning rate')
parser.add_argument('--epochs', type=int, action='store', default=20, help='Specify number of epochs')
parser.add_argument('--seed', type=int, action='store', default=42, help='Specify random seed')
parser.add_argument('--accumulation_steps', type=int, action='store', default=32, help='Specify number of steps for gradient accumulation')
parser.add_argument('--num_top_docs', type=float, action='store', default=1, help='Number of top ranked abstracts from PubMed to include')
parser.add_argument('--strategy', type=str, action='store', default='average', help='Strategy to use to combine literature with EHR')
parser.add_argument('--enc_strategy', type=str, action='store', default='bienc', help='Encoding strategy to use for notes and articles (bienc/crossenc)')
parser.add_argument('--use_warmup', action='store_true', default=False, help='Choose whether to use LR warmup or not')
parser.add_argument('--warmup_steps', type=int, action='store', default=5000, help='Choose number of warmup steps')
parser.add_argument('--stop_on_roc', action='store_true', default=False, help='Use AUROC as early stopping metric')
parser.add_argument('--dump_test_preds', action='store_true', default=False, help='Dump predictions on test set')
parser.add_argument('--use_pico', action='store_true', default=False, help='Add PICO highlights to chosen literature docs')
parser.add_argument('--doc_embeds', type=str, action='store', help='Embeddings of top ranked abstracts for learning to retrieve')
parser.add_argument('--l2r_top_docs', type=int, action='store', default=5, help='Number of top documents to chosse in learning to retrieve')
parser.add_argument('--outcome', type=str, action='store', required=True, help='Choose outcome to predict (pmv/los/mortality)')
parser.add_argument('--retrieval_labels', type=str, action='store', help='Path to file containing pseudo labels for retrieval training L2R')
parser.add_argument('--query_proj', type=str, action='store', help='Projection layer to use for queries in L2R')
parser.add_argument('--query_loss', type=str, action='store', help='Direct loss term for query encoding (pred/reg)')
args = parser.parse_args()
if args.longmodel_dir is not None and not os.path.exists(args.longmodel_dir):
os.makedirs(args.longmodel_dir)
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
checkpoint_dir = os.path.join(args.out_dir, 'checkpoints')
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
set_seed(args.seed)
setproctitle.setproctitle("python")
def preprocess_function(tokenizer, examples, split, topk, rerank_tokenizer=None):
data_args = (([x[1] for x in examples], None))
max_length = args.max_pos if args.longmodel_dir is not None else 512
result = tokenizer(*data_args, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt')
result["labels"] = torch.LongTensor([(x[2]) for x in examples])
result["ehr_id"] = [(x[0]) for x in examples]
if args.doc_embeds is not None:
if rerank_tokenizer is not None:
data_args = (([outcome_questions[args.outcome] + x[1] for x in examples], None))
result["ehr_rerank_tokens"] = rerank_tokenizer(*data_args, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt')
else:
data_args = (([outcome_questions[args.outcome] + x[1] for x in examples], None))
result["ehr_rerank_tokens"] = tokenizer(*data_args, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt')
if args.lit_dir is not None and args.doc_embeds is None:
result["pubmed_docs"] = []
result["pubmed_doc_weights"] = []
k_range = int(topk) if topk >= 1 else max([len(x[-1]) for x in examples])
if k_range > 0:
if args.enc_strategy == 'bienc':
for k in range(k_range):
result["pubmed_doc_weights"].append([x[-1][k][2] if len(x[-1]) > k else 0.0 for x in examples])
data_args = (([x[-1][k][1] if len(x[-1]) > k else '' for x in examples], None))
result["pubmed_docs"].append(tokenizer(*data_args, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt'))
if args.enc_strategy == 'crossenc':
for k in range(k_range):
result["pubmed_doc_weights"].append([x[-1][k][2] if len(x[-1]) > k else 0.0 for x in examples])
data_args = (([x[1] for x in examples], [x[-1][k][1] if len(x[-1]) > k else '' for x in examples]))
result["pubmed_docs"].append(tokenizer(*data_args, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt'))
if args.doc_embeds is not None:
result["pubmed_docs"] = []
result["pubmed_doc_weights"] = []
result["pubmed_doc_embeds"] = []
result["pubmed_doc_ids"] = []
if args.retrieval_labels is not None and split=='train':
result["pubmed_doc_labels"] = []
for x in examples:
result["pubmed_doc_ids"].append([y[0] for y in x[-1]])
result["pubmed_doc_weights"].append([y[2] for y in x[-1]])
data_args = (([y[1] for y in x[-1]], None)) # y[0] will be Pubmed ID of doc
result["pubmed_docs"].append(tokenizer(*data_args, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt'))
result["pubmed_doc_embeds"].append(np.vstack([x[3][y[0]] for y in x[-1]])[np.newaxis,:,:])
if retrieval_labels is not None and split=='train':
result["pubmed_doc_labels"].append([retrieval_labels[x[0]][y[0]] for y in x[-1]])
if retrieval_labels is not None and split=='train':
result["pubmed_doc_labels"] = torch.LongTensor(np.vstack(result["pubmed_doc_labels"]))
result["pubmed_doc_embeds"] = np.vstack(result["pubmed_doc_embeds"])
result["pubmed_doc_embeds"] = torch.FloatTensor(result["pubmed_doc_embeds"])
return result
def batch_and_tokenize_data(tokenizer, examples, batch_size, split, topk, rerank_tokenizer=None):
example_list = []
for file in list(examples.keys()):
example = examples[file]
if args.lit_dir is None:
example_list.append([file, example['ehr'], example['outcome']])
elif args.doc_embeds is None:
example_list.append([file, example['ehr'], example['outcome'], example['pubmed_docs']])
else:
example_list.append([file, example['ehr'], example['outcome'], example['pubmed_doc_embeds'], example['pubmed_docs']])
batches = []
# if args.longmodel_dir is not None and (split == 'dev' or split == 'test'):
# batch_size = 1
for i in range(0, len(example_list), batch_size):
start = i
end = min(start+batch_size, len(example_list))
batch = preprocess_function(tokenizer, example_list[start:end], split, topk, rerank_tokenizer)
batches.append(batch)
if len(batches) % 100 == 0:
print('Created {} batches'.format(len(batches)))
return batches
def run_outcome_prediction_pipeline(config):
outcome_questions = {'mortality': 'What is the hospital mortality? ', \
'pmv': 'What is the probability of prolonged mechanical ventilation? ', \
'los': 'What is the probable length of stay? '}
dataset = EHRDataset(args.train, args.dev, args.test)
dataset.compute_class_weights()
if args.lit_dir is not None:
dataset.add_relevant_literature(args.lit_dir, args.num_top_docs, args.use_pico)
missing_lit = 0
for doc in dataset.train_data:
if 'pubmed_docs' not in dataset.train_data[doc]:
missing_lit += 1
for doc in dataset.dev_data:
if 'pubmed_docs' not in dataset.dev_data[doc]:
missing_lit += 1
for doc in dataset.test_data:
if 'pubmed_docs' not in dataset.test_data[doc]:
missing_lit += 1
print('{} documents do not have PubMed abstracts'.format(missing_lit))
if args.doc_embeds:
dataset.add_literature_matrices(args.doc_embeds)
num_labels = len(list(dataset.class_weights.keys()))
retrieval_labels = None
if args.retrieval_labels is not None:
retrieval_labels = pickle.load(open(args.retrieval_labels, 'rb'))
if args.longmodel_dir is not None:
create_long_model(
init_model=args.init_model,
save_model_to=args.longmodel_dir,
attention_window=args.attention_window,
max_pos=args.max_pos,
num_labels=num_labels
)
model_path = args.longmodel_dir if args.longmodel_dir is not None else args.init_model
model_config = BertConfig.from_pretrained(model_path,
num_labels=num_labels,
label2id={x:x for x in range(num_labels)},
id2label={x:x for x in range(num_labels)}
)
tokenizer = BertTokenizerFast.from_pretrained(model_path) if 'Discharge' not in model_path \
else AutoTokenizer.from_pretrained(model_path)
model = BertLongForSequenceClassification.from_pretrained(model_path, config=model_config) if args.longmodel_dir is not None \
else BertForSequenceClassification.from_pretrained(model_path, config=model_config)
rerank_config, rerank_tokenizer, rerank_model = None, None, None
if args.rerank_model is not None:
rerank_label_vocab = {'Relevant': 1, 'Irrelevant': 0}
rerank_config = AutoConfig.from_pretrained(
args.rerank_model,
num_labels=len(list(rerank_label_vocab.keys())),
label2id=rerank_label_vocab,
id2label={i: l for l, i in rerank_label_vocab.items()},
cache_dir='../cache',
)
rerank_tokenizer = AutoTokenizer.from_pretrained(
args.rerank_model,
cache_dir='../cache',
use_fast=True,
)
rerank_model = AutoModel.from_pretrained(
args.rerank_model,
from_tf=bool(".ckpt" in args.rerank_model),
config=rerank_config,
cache_dir='../cache',
)
special_tokens_dict = {'additional_special_tokens': ['[ENTSEP]']}
num_added_toks = rerank_tokenizer.add_special_tokens(special_tokens_dict)
rerank_model.resize_token_embeddings(len(rerank_tokenizer))
if args.rerank_checkpoint is not None and args.do_train: # Only load pretrained reranker if training is to be carried out
rerank_model.load_state_dict(torch.load(args.rerank_checkpoint)) # Otherwise full model will contain reranker weights too
if args.use_pico:
special_tokens_dict = {'additional_special_tokens': ['<PAR>', '</PAR>', '<INT>', '</INT>', '<OUT>', '</OUT>']}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
model.resize_token_embeddings(len(tokenizer))
print('Added additional special tokens for PICO highlights')
if args.lit_dir is not None and args.doc_embeds is None:
if args.enc_strategy == 'bienc':
model = LitAugPredictorBienc(model_config, model, args.num_top_docs, args.strategy)
elif args.enc_strategy == 'crossenc':
model = LitAugPredictorCrossenc(model_config, model, args.num_top_docs, args.strategy)
if args.lit_dir is not None and args.doc_embeds is not None:
if args.query_proj is None:
model = L2RLitAugPredictorBienc(model_config, model, args.l2r_top_docs, args.strategy, rerank_model)
else:
model = L2RLitAugPredictorBienc(model_config, model, args.l2r_top_docs, args.strategy, rerank_model, args.query_proj)
if args.query_loss is not None:
model.query_loss = args.query_loss
model = model.cuda()
# print('Initialized longformer model with pretrained LM...')
print('Started batch creation')
train_batches = batch_and_tokenize_data(tokenizer, dataset.train_data, args.batch_size, 'train', args.num_top_docs, rerank_tokenizer)
print('Created {} train batches'.format(len(train_batches)))
dev_batches = batch_and_tokenize_data(tokenizer, dataset.dev_data, args.batch_size, 'dev', args.num_top_docs, rerank_tokenizer)
print('Created {} dev batches'.format(len(dev_batches)))
test_batches = batch_and_tokenize_data(tokenizer, dataset.test_data, args.batch_size, 'test', args.num_top_docs, rerank_tokenizer)
print('Created {} test batches'.format(len(test_batches)))
config_string = '{}_{}'.format(config["lr"], config["acc"])
if args.do_train:
train(model, train_batches, dev_batches, args.out_dir, args.epochs, config["lr"], dataset.class_weights, \
config["acc"], args.strategy, config_string)
if args.do_test:
if args.checkpoint is not None:
if 'checkpoint' in args.checkpoint:
full_checkpoint = torch.load(args.checkpoint)
model.load_state_dict(full_checkpoint['model_state_dict'])
else:
model.load_state_dict(torch.load(args.checkpoint))
else:
model.load_state_dict(torch.load(os.path.join(args.out_dir, 'best_model_{}.pt'.format(config_string))))
test(model, dev_batches, class_weights=dataset.class_weights, strategy=args.strategy)
scheduler = MedianStoppingRule(time_attr='step', metric='dev_loss', mode='min', grace_period=10000, min_time_slice=800)
analysis = tune.run(
run_outcome_prediction_pipeline,
num_samples=1,
config={
"num_workers": 2,
"lr": tune.grid_search([5e-4, 1e-5, 5e-5, 1e-6, 5e-6]),
"acc": tune.grid_search([10, 20])
},
resources_per_trial={'gpu': 1},
scheduler=scheduler
)
print("best config: ", analysis.get_best_config(metric="dev_loss", mode="min"))
best_config = analysis.get_best_config(metric="dev_loss", mode="min")
config_string = '{}_{}'.format(best_config["lr"], best_config["acc"])
model_path = args.longmodel_dir if args.longmodel_dir is not None else args.init_model
num_labels = 2 if args.outcome != 'los' else 4
model_config = BertConfig.from_pretrained(model_path,
num_labels=num_labels,
label2id={x:x for x in range(num_labels)},
id2label={x:x for x in range(num_labels)}
)
tokenizer = BertTokenizerFast.from_pretrained(model_path) if 'Discharge' not in model_path \
else AutoTokenizer.from_pretrained(model_path)
model = BertLongForSequenceClassification.from_pretrained(model_path, config=model_config) if args.longmodel_dir is not None \
else BertForSequenceClassification.from_pretrained(model_path, config=model_config)
dataset = EHRDataset(args.train, args.dev, args.test)
dataset.compute_class_weights()
if args.lit_dir is not None:
dataset.add_relevant_literature(args.lit_dir, best_config["k"], args.use_pico)
if args.doc_embeds:
dataset.add_literature_matrices(args.doc_embeds)
test_batches = batch_and_tokenize_data(tokenizer, dataset.test_data, args.batch_size, 'test', args.num_top_docs, None)
if args.lit_dir is not None and args.doc_embeds is None:
if args.enc_strategy == 'bienc':
model = LitAugPredictorBienc(model_config, model, args.num_top_docs, args.strategy)
elif args.enc_strategy == 'crossenc':
model = LitAugPredictorCrossenc(model_config, model, args.num_top_docs, args.strategy)
if args.lit_dir is not None and args.doc_embeds is not None:
if args.query_proj is None:
model = L2RLitAugPredictorBienc(model_config, model, args.l2r_top_docs, args.strategy, rerank_model)
else:
model = L2RLitAugPredictorBienc(model_config, model, args.l2r_top_docs, args.strategy, rerank_model, args.query_proj)
if args.query_loss is not None:
model.query_loss = args.query_loss
model = model.cuda()
model.load_state_dict(torch.load(os.path.join(args.out_dir, 'best_model_{}.pt'.format(config_string))))
test(model, test_batches, class_weights=dataset.class_weights, strategy=args.strategy)
| BEEP-main | outcome-prediction/run_outcome_prediction_baseline_hpo.py |
import time
import pickle
import csv
import math
import datetime
import os
import argparse
EMAILID = "[email protected]"
TOOLNAME = ""
from Bio import Entrez
Entrez.email = EMAILID
# Function to retrieve articles from a specified database using a provided query string
# Query string can be a single word/phrase or a list of words/phrase separated using '_'
# Note that if a list of words/phrases is provided, this search will require every term
# to be present in any articles it retrieves (i.e., 'AND' operation for multiple-term lists)
# TODO: Please add your tool name and email ID in the base_url variable
def db_extract(db, query):
query_enterz = "+".join([x+"[MeSH Terms]" for x in query.split("_")])
handle = Entrez.esearch(db=db, term=query_enterz, retmax=100000)
record = Entrez.read(handle)
num_papers = int(record["Count"])
id_list = [str(x) for x in record["IdList"]]
articles_ids = set()
print(f"{num_papers} papers found.")
if len(id_list) == num_papers:
articles_ids.update(id_list)
else:
print("cannot get them at once, taking them year by year") # in pubmed when they have > 9999 papers
today = datetime.datetime.today()
max_date = today
min_date = max_date.replace(year=max_date.year-1)
while len(articles_ids) < num_papers:
min_date_str = str(min_date.date()).replace(",", "/")
max_date_str = str(max_date.date()).replace(",", "/")
handle = Entrez.esearch(db=db, term=query_enterz, retmax=100000,
mindate=min_date_str, maxdate=max_date_str)
record = Entrez.read(handle)
id_list = [str(x) for x in record["IdList"]]
assert len(id_list) == int(record["Count"]), f"failed to get all {min_date} - {max_date} papers"
articles_ids.update(id_list)
max_date = min_date
min_date = max_date.replace(year=max_date.year-1)
return articles_ids
# Procedure to combine articles retrieved from both PMC and PubMed databases
# To do this combination, PMC article IDs need to be mapped to their corresponding PubMed IDs first
# to avoid double-counting of articles included in both databases
def combine_ids(pmc, pubmed, pmc_ids_map):
reader = csv.reader(open(pmc_ids_map))
id_dict = {}
next(reader, None)
for row in reader:
id_dict[row[-4][3:]] = row[-3]
correct_pmc = set()
for id in pmc:
if id not in id_dict or id_dict[id] == '':
correct_pmc.add('PMC'+id)
continue
correct_pmc.add(id_dict[id])
final_ids = correct_pmc.union(pubmed)
return final_ids
# Split abstracts according to database they are retrieved from
# This needs to be done to ensure that we are checking the correct database while retrieving text
def split_ids(ids_strings):
pubmed = []
pmc = []
for id_string in ids_strings:
if id_string.startswith('PMC'):
pmc.append(id_string[3:]) # Drop PMC prefix since it is no longer needed to distinguish between PubMed/PMC
else:
pubmed.append(id_string)
return pubmed, pmc
def get_abstract_text(subrec):
if "Abstract" in subrec:
abstract_text_lines = subrec["Abstract"]["AbstractText"]
return "\n".join(subrec["Abstract"]["AbstractText"]) + "\n"
else:
return ""
def get_abstract_dict(abstract_record):
pmid = str(abstract_record["MedlineCitation"]["PMID"])
text = get_abstract_text(abstract_record["MedlineCitation"]["Article"])
year = int(abstract_record["MedlineCitation"]["DateCompleted"]["Year"])
return pmid, {"text": text, "year": year}
def retrieve_all_abstracts(id_list, database, outpath, error_log):
max_query_size = 200 # PubMed only accepts 200 IDs at a time when retrieving abstract text
print('Retrieval will require {} queries'.format(math.ceil(len(id_list)/float(max_query_size))))
texts = {}
total_texts = 0
for i in range(0, len(id_list), max_query_size):
start = i
end = min(len(id_list), start+max_query_size)
cur_ids = id_list[start:end]
handle = Entrez.efetch(database, id=cur_ids, retmod="xml")
record = Entrez.read(handle)
d = map(get_abstract_dict, record["PubmedArticle"])
cur_texts = dict((x, y) for x, y in d if y["text"]!="")
total_texts += len(cur_texts)
texts.update(cur_texts)
if end % 1000 == 0 or end == len(id_list):
print(f'After {end} calls, have {total_texts} abstracts ({end-total_texts} were empty)')
pickle.dump(texts, open(outpath, 'wb'))
x=0
return outpath
def extract_ids(outcome_name, queries, ids_outpath, dbs, pmc_ids_map):
know_to_handle = set(['pubmed', "pmc"])
assert set(dbs).issubset(know_to_handle), f"not provided how to handle dbs {set(dbs) - know_to_handle}"
dbs_ids = {}
for db in dbs:
print(db)
db_ids = set()
for query in queries:
print(f"query: {query}")
db_query_ids = db_extract(db, query)
db_ids.update(db_query_ids)
print(f"union of {db}_ids: {len(db_ids)} ids\n")
dbs_ids[db] = db_ids
if "pmc" in dbs:
pubmed_ids = dbs_ids.get("pubmed", set())
articles_ids = combine_ids(dbs_ids["pmc"], pubmed_ids, pmc_ids_map)
else:
articles_ids = dbs_ids["pubmed"]
print("Final collection for {} has {} articles".format(outcome_name, len(articles_ids)))
pickle.dump(articles_ids, open(ids_outpath, 'wb'))
def extract_outcome_papers(outcome_name, queries, dbs, out_dir):
abstracts_outpath = os.path.join(out_dir, f"{outcome_name}_texts_and_dates.pkl")
ids_outpath = os.path.join(out_dir, f'{outcome_name}_ids.pkl')
if not os.path.isfile(ids_outpath):
extract_ids(outcome_name, queries, ids_outpath, dbs)
articles_ids = pickle.load(open(ids_outpath, "rb"))
print(f"have {len(articles_ids)} ids")
# Running text retrieval for IDs retrieved by outcome-specific queries
pubmed_ids, pmc_ids = split_ids(articles_ids)
pubmed_ids = sorted(pubmed_ids)
pmc_ids = sorted(pmc_ids)
print('{} abstracts will be scraped from PubMed'.format(len(pubmed_ids)))
print('{} abstracts will be scraped from PMC'.format(len(pmc_ids)))
error_log = open(f'retrieval_errors.txt', 'w')
retrieve_all_abstracts(pubmed_ids, 'pubmed', abstracts_outpath, error_log)
error_log.close()
return os.path.abspath(abstracts_outpath)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--outcome_name', type=str, action='store', required=True, help='name of outcome')
parser.add_argument('--queries', type=str, action='store', required=True,
help='queries for enterz. encapsulated with " ", separated with ,')
parser.add_argument('--dbs', type=str, nargs="*", action='store', required=True, help='dbs to look in',
default=["pmc", "pubmed"])
parser.add_argument('--out_dir', type=str, action='store', help="directory to save the abstracts")
parser.add_argument('--PMC_ids_map', type=str, action="store", default="../data/PMC_id_map.csv")
args = parser.parse_args()
args_dict = vars(args)
args_dict["queries"] = args_dict["queries"].split(",")
abstracts_outpath = extract_outcome_papers(**args_dict)
print(f"abstracts written to {abstracts_outpath}")
# example:
# outcome_name = "mortality"
# queries = "hospital mortality, mortality_risk factors_humans"
# --> ["hospital mortality", "mortality_risk factors_humans"] | BEEP-main | literature-retrieval/enterz_outcome_specific_retreival.py |
import pickle
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
import math
from collections import Counter
import argparse
from scipy.sparse import coo_matrix, coo_array, vstack as sparse_vstack
import time
import psutil
import gc
def sparse_rank(lit_mentions_file, ehr_mentions_file, outcome, outpath):
GB = 1024**3
# Loading MeSH terms for EHRs for patient cohort
print("reading EHR mentions file...")
mention_info = pickle.load(open(ehr_mentions_file, 'rb'))
print(f"{len(mention_info)} EHRs")
# Loading MeSH terms for documents from outcome-specific literature collection
print("reading literature mentions file...")
doc_tags_info = pickle.load(open(lit_mentions_file, 'rb'))
print(f"{len(doc_tags_info)} abstracts")
#mentions_info = {key: mention_info[key] for key in list(mention_info.keys())[:1000]}
#doc_tags_info = {key: doc_tags_info[key] for key in list(doc_tags_info.keys())[:1000]}
# Note that the cohort for PMV prediction is smaller than other outcomes
# So we need to filter out patients for whom PMV information is not available
ids2keep = pickle.load(open('../data/pmv_ids.pkl', 'rb')) if outcome == 'pmv' else None
# Reformat EHR MeSH term data
print("preprocessing EHR mentions...")
ehr_tags = {}
for file in mention_info:
if ids2keep is not None and file not in ids2keep:
continue
ehr_mesh_terms = []
for sent in mention_info[file]:
for mention in mention_info[file][sent]:
if 'mesh_ids' not in mention:
continue
for pair in mention['mesh_ids']:
ehr_mesh_terms.append(pair[0])
ehr_tags[file] = ehr_mesh_terms
# Reformat literature MeSH term data
print("preprocessing literature mentions...")
doc_tags = {}
for file in doc_tags_info:
"""
if ids2keep is not None and file not in ids2keep:
continue
"""
doc_mesh_terms = []
for sent in doc_tags_info[file]:
for mention in doc_tags_info[file][sent]:
if 'mesh_ids' not in mention:
continue
for pair in mention['mesh_ids']:
doc_mesh_terms.append(pair[0])
doc_tags[file] = doc_mesh_terms
doc_tags_unique = set([x for y in doc_tags.values() for x in y])
ehr_tags_unique = set([x for y in ehr_tags.values() for x in y])
# Compute vocabulary of MeSH terms for TF-IDF vector building
mesh_vocab = doc_tags_unique & ehr_tags_unique
print('MeSH vocabulary size: {}'.format(len(mesh_vocab)))
mesh_vocab = dict(list(zip(list(mesh_vocab), range(len(mesh_vocab)))))
# Construct TF-IDF vectors for both outcome-specific literature and EHRs
doc_freq = Counter()
ehr_vectors_sparse = {}
article_vectors_sparse = {}
# Term frequency computation
# saving in sparse matrix type,
# which has same API as numpy array but saves- and computes on- actually the non-zeros only
# thus saving a lot of memory and compute
print("computing TF for EHR files...")
for file in ehr_tags:
term_list = [x for x in ehr_tags[file] if x in mesh_vocab]
doc_freq.update(set(term_list))
unique_values, counts = np.unique(term_list, return_counts=True)
indices = [mesh_vocab[x] for x in unique_values]
cur_vec_sparse = coo_matrix((counts, (np.zeros_like(indices), indices)), shape=(1, len(mesh_vocab)))
ehr_vectors_sparse[file] = cur_vec_sparse
print("computing TF for literature files...")
for file in doc_tags:
term_list = [x for x in doc_tags[file] if x in mesh_vocab]
doc_freq.update(set(term_list))
unique_values, counts = np.unique(term_list, return_counts=True)
indices = [mesh_vocab[x] for x in unique_values]
cur_vec_sparse = coo_matrix((counts, (np.zeros_like(indices), indices)), shape=(1, len(mesh_vocab)))
article_vectors_sparse[file] = cur_vec_sparse
print("computing IDF...")
num_docs = len(doc_tags) + len(ehr_tags)
inverse_doc_freq = {k: math.log(num_docs / float(v)) for k, v in doc_freq.items()}
inverse_doc_freq_vector = [1] * len(mesh_vocab)
for x in mesh_vocab:
inverse_doc_freq_vector[mesh_vocab[x]] = inverse_doc_freq[x]
inverse_doc_freq_vector_sparse = coo_array(inverse_doc_freq_vector)
# Construct TF-IDF vector matrices for both literature and outcomes
# This helps speed up cosine similarity computation
print("constructing TF-IDF vectors for EHR files ...")
ehr_items_sparse = list(ehr_vectors_sparse.items())
ehr_ids, ehr_matrix_sparse = [x[0] for x in ehr_items_sparse], [x[1] for x in ehr_items_sparse]
ehr_matrix_sparse = sparse_vstack(ehr_matrix_sparse)
ehr_matrix_sparse *= inverse_doc_freq_vector_sparse
print("constructing TF-IDF vectors for literature files ...")
article_items_sparse = list(article_vectors_sparse.items())
article_ids, article_matrix_sparse = [x[0] for x in article_items_sparse], [x[1] for x in article_items_sparse]
article_matrix_sparse = sparse_vstack(article_matrix_sparse)
article_matrix_sparse *= inverse_doc_freq_vector_sparse
# Computing cosine similarities and identifying top ranked documents
keep_var_names = ["ehr_ids", "ehr_matrix_sparse", "article_ids", "article_matrix_sparse", "outpath",
"locals_dict", "local_var_names", "var_name", "keep_var_names"]
num_unreferenced_but_not_freed = gc.collect()
ranked_pairs = {}
available_bytes = psutil.virtual_memory().available
print(f"available before: {available_bytes}")
print(f"available before: {available_bytes/GB} GB")
num_articles = len(article_ids)
row_size_in_bytes = num_articles * np.dtype("float64").itemsize
num_rows_fitting_in_memory = available_bytes // (6 * row_size_in_bytes)
needed_available = int(2 * row_size_in_bytes * num_rows_fitting_in_memory)
print(f"needed available: {needed_available/GB}")
if ehr_matrix_sparse.shape[0] < num_rows_fitting_in_memory:
print("computing similarities...")
similarities = cosine_similarity(ehr_matrix_sparse, article_matrix_sparse)
print("ranking...")
print("argsort...")
top_indices = np.argsort(similarities)[:, -1:-1001:-1]
print("taking along axis...")
top_similarities = np.take_along_axis(similarities, top_indices, axis=-1)
del similarities
print("top pairs...")
top_pairs = np.stack((top_indices, top_similarities), axis=2).tolist()
print("ranked pairs...")
for i, file in enumerate(ehr_ids):
ranked_pairs[file] = [(article_ids[int(x[0])], x[1]) for x in top_pairs[i]]
else:
for start in range(0, ehr_matrix_sparse.shape[0], num_rows_fitting_in_memory):
print(f"waiting for free memory...")
i=0
while psutil.virtual_memory().available < needed_available:
time.sleep(1)
i += 1
print(f"waited {i} secs")
end = min(start + num_rows_fitting_in_memory, ehr_matrix_sparse.shape[0])
print(f"Computing similarities for EHRs {start}-{end}")
cur_ehr_matrix = ehr_matrix_sparse[start:end, :]
cur_similarities = cosine_similarity(cur_ehr_matrix, article_matrix_sparse)
print("ranking...")
print("argsort...")
top_indices = np.argsort(cur_similarities)[:, -1:-1001:-1]
print("taking along axis...")
top_similarities = np.take_along_axis(cur_similarities, top_indices, axis=-1)
top_pairs = np.stack((top_indices, top_similarities), axis=2).tolist()
cur_ehr_ids = ehr_ids[start:end]
for i, file in enumerate(cur_ehr_ids):
ranked_pairs[file] = [(article_ids[int(x[0])], x[1]) for x in top_pairs[i]]
print(f"before deleting have {psutil.virtual_memory().available/GB}")
del cur_similarities
del top_similarities
gc.collect()
print(f"after deleting have {psutil.virtual_memory().available / GB}")
# Store ranked results from sparse retriever
print("dumping...")
pickle.dump(ranked_pairs, open(outpath, 'wb'))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--lit_mentions_file', type=str, action='store', required=True,
help='Provide path to pkl file containing outcome-specific literature linked mentions')
parser.add_argument('--ehr_mentions_file', type=str, action='store', required=True,
help='Provide path to pkl file containing ehr linked mentions')
parser.add_argument('--outcome', type=str, action='store', required=True,
help='name of the outcome')
parser.add_argument('--outpath', type=str, action='store', required=True,
help='path for out file')
args = parser.parse_args()
start = time.time()
sparse_rank(**vars(args))
end = time.time()
print(f"took {end-start} secs")
| BEEP-main | literature-retrieval/sparse_retriever.py |
import os.path
import pickle
import pip
# Initialize MeSH entity linker to link filtered mentions
import spacy
from scispacy.linking import EntityLinker
import glob
en_core_sci_md_url = "https://s3-us-west-2.amazonaws.com/ai2-s2-scispacy/releases/v0.4.0/en_core_sci_md-0.4.0.tar.gz"
try:
import en_core_sci_md
except:
print('downloading "en_core_sci_md"')
pip.main(["install", en_core_sci_md_url])
import en_core_sci_md
def link_mentions(filtered_mentions_path, outpath):
print(f"reading mentions: {filtered_mentions_path}...")
filtered_mentions = pickle.load(open(filtered_mentions_path, 'rb'))
print("loading linker...")
nlp = en_core_sci_md.load(disable=["tok2vec", "tagger", "parser", "attribute_ruler", "lemmatizer"])
nlp.add_pipe("merge_noun_chunks")
nlp.add_pipe("scispacy_linker", config={"linker_name": "mesh", "resolve_abbreviations": True})
#linker = nlp.get_pipe("scispacy_linker")
# Perform MeSH linking on filtered entities
for i, file in enumerate(list(filtered_mentions.keys())):
print('Linking entities for file {} ({})'.format(file, i))
for sent in filtered_mentions[file]:
cur_ments = filtered_mentions[file][sent]
for mention in cur_ments:
doc = nlp(mention['mention'])
if not doc.ents:
continue
entities = doc.ents[0]
cuis = [cui for cui in entities._.kb_ents] #if cui[1]>=0.75]
if not cuis:
continue
mention['mesh_ids'] = cuis
pickle.dump(filtered_mentions, open(outpath, 'wb'))
outcome = "mortality"
filtered_mentions_files = glob.glob("./mortality_literature_filtered_mentions*.pkl")
for filtered_file in filtered_mentions_files:
out_path = filtered_file.replace("filtered", "linked")
if os.path.isfile(out_path):
continue
link_mentions(filtered_file, out_path) | BEEP-main | literature-retrieval/mention_linking.py |
import os
import pickle
import csv
import spacy.cli
from nltk import word_tokenize, sent_tokenize
from spacy.tokens.doc import Doc
from spacy.tokens import Span
from medspacy.context import ConTextComponent, ConTextRule
import glob
"""
try:
import en_core_web_sm
except:
print('downloading "en_core_web_sm"')
spacy.cli.download("en_core_web_sm")
import en_core_web_sm
"""
print('downloading "en_core_web_sm"')
spacy.cli.download("en_core_web_sm")
import en_core_web_sm
import warnings
warnings.filterwarnings('ignore')
# Read in mentions and raw texts
def read_raw_text_files(file):
files = {}
if file.endswith(".csv"):
reader = csv.reader(open(file))
next(reader, None)
elif file.endswith(".pkl"):
reader = pickle.load(open(file, "rb"))
reader = [(key, value["text"]) for key, value in reader.items()]
else:
reader = None
raise (ValueError("file extension not recognized"))
for row in reader:
files[row[0]] = []
text = row[1]
sents = sent_tokenize(text)
for sent in sents:
words = word_tokenize(sent)
files[row[0]].append(words)
return files
def filter_mentions(mentions_file, text_file, outpath):
print("loading en_core_web_sm")
nlp = en_core_web_sm.load(disable=["tokenizer", "ner"])
# spacy.load("en_core_web_sm", disable=["tokenizer","ner"])
context = ConTextComponent(nlp, rules="default", use_context_window=True, max_scope=5)
# Add paths to files containing extracted mentions and raw texts here
# In our pipeline, mentions are extracted using a model trained on the i2b2 2010 dataset
print(f"reading datasets: {os.path.basename(text_file)}, {os.path.basename(mentions_file)}")
texts = read_raw_text_files(text_file)
mentions = pickle.load(open(mentions_file, 'rb'))
print("done.")
# Apply ConText algorithm to identify and filter negated entities
filtered_mentions = {}
file_ind = 0
for file in mentions: #texts:
print('Processsing file {} ({})'.format(file, file_ind))
file_ind += 1
filtered_mentions[file] = {}
file_lines = texts[file]
file_mentions = mentions[file]
assert len(file_lines) == len(file_mentions)
for i, line in enumerate(file_lines):
cur_ment = file_mentions[i]
if not cur_ment:
continue
# If mentions are present in sentence, perform negation-based filtering
filtered_mentions[file][i] = []
doc = Doc(nlp.vocab, line)
for name, proc in nlp.pipeline:
doc = proc(doc)
entities = []
for mention in cur_ment:
mention_words = mention["mention"]
line_words = line[mention['start_offset']:(mention['end_offset'] + 1)]
assert mention_words == line_words
entities.append(Span(doc, mention['start_offset'], mention['end_offset'] + 1, mention['pred_type']))
doc.ents = tuple(entities)
doc = context(doc)
for ent in doc.ents:
if ent._.is_negated:
continue
filtered_mentions[file][i].append(
{'mention': ent.text, 'start_offset': ent.start, 'end_offset': ent.end, 'pred_type': ent.label_})
all_mention_count = sum([len(y) for x, y in mentions[file].items()])
filtered_mention_count = sum([len(y) for x, y in filtered_mentions[file].items()])
print('{} mentions kept out of {} for file {}'.format(filtered_mention_count, all_mention_count, file))
pickle.dump(filtered_mentions, open(outpath, 'wb'))
outcome = "mortality"
mentions_files = glob.glob("./mortality_literature_mentions*.pkl")
print(mentions_files)
for mention_file in mentions_files:
#mention_file = fr"../data/{outcome}/mentions/mortality_{split}_mentions.pkl"
text_file = r"../data/outcome-literature/mortality_texts_and_dates.pkl"
outpath = mention_file.replace("mentions", "filtered_mentions")
filter_mentions(mention_file, text_file, outpath)
| BEEP-main | literature-retrieval/mention_filtering.py |
'''
Code to run LM-based reranker over abstracts retrieved per query
Command: python text_reranker.py --retrieval_results <RETRIEVAL_PICKLE_FILE> --entities <ENTITY_PICKLE_FILE> --out_dir <OUT_DIR> --model_name_or_path <MODEL> --checkpoint <MODEL_CHECKPOINT>
'''
print("started text_reranker...")
import torch
print("is cuda available?", torch.cuda.is_available())
if not torch.cuda.is_available():
print("cuda not available. exiting")
exit(0)
import argparse
import os
import pickle
import time
from matplotlib import pyplot as plt
from datetime import datetime
import numpy as np
from optimum.bettertransformer import BetterTransformer
import torch.nn as nn
from scipy import stats
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
set_seed,
)
print("importing TextDataset")
from data_loader import TextDataset
def predict(model, dev_data):
with torch.no_grad():
model.eval()
softmax = nn.Softmax(dim=1)
model_predictions = {}
i = 1
sum_of_iters = 0
start_for = time.time()
cur_preds_array = []
relevances_array = []
batches_inds = []
batches_articles_ids = []
for batch in dev_data:
start_iter = time.time()
start = time.time()
gpu_batch = {x:y.cuda() for x,y in batch.items() if x not in ['query_id', 'article_id']}
end = time.time()
gpuing = round(end-start, 4)
start = time.time()
outputs = model(**gpu_batch)
end = time.time()
applying_model = round(end-start, 4)
start = time.time()
torch.cuda.synchronize()
end = time.time()
sync = round(end-start, 4)
start = time.time()
cur_preds = outputs[1]
end = time.time()
taking_output = round(end-start, 4)
start = time.time()
cur_preds = cur_preds.detach()
#cur_preds_array.append(cur_preds)
end = time.time()
detaching = round(end-start, 4)
#batches_inds.append(batch['query_id'][0])
#batches_articles_ids.append(batch["article_id"])
start = time.time()
cur_preds = cur_preds.cpu()
end = time.time()
cpuing = round(end-start, 4)
start = time.time()
cur_probs = softmax(cur_preds)
end = time.time()
softmaxing = round(end-start, 4)
start = time.time()
relevance = cur_probs.numpy()[:,1].tolist()
#relevances_array.append(cur_probs.numpy())
end = time.time()
relev = round(end-start, 4)
if batch['query_id'][0] not in model_predictions:
model_predictions[batch['query_id'][0]] = []
start = time.time()
model_predictions[batch['query_id'][0]] += list(zip(batch['article_id'], relevance))
end = time.time()
zipping = round(end-start, 4)
i += 1
end_iter = time.time()
iter_time = round(end_iter-start_iter, 4)
sum_of_iters += iter_time
#print(f"iter took {iter_time}: gpuing: {gpuing}, applying model: {applying_model}, synchronizing: {sync}, taking_output: {taking_output}, detaching: {detaching}, cpuing: {cpuing}, softmaxing: {softmaxing}, relevance: {relev}, zipping: {zipping}", flush=True)
end_for = time.time()
print(f"for {i} batches took {round(end_for-start_for, 4)}, with sum_of_iters: {round(sum_of_iters, 4)}", flush=True)
"""
for cur_preds, batch_ind, batch_articles_ids in zip(cur_preds_array, batches_inds, batches_articles_ids):
start_iter = time.time()
cur_preds = cur_preds.cpu()
cur_probs = softmax(cur_preds)
relevance = cur_probs.numpy()[:, 1].tolist()
if batch_ind not in model_predictions:
model_predictions[batch_ind] = []
model_predictions[batch_ind] += list(zip(batch_articles_ids, relevance))
end_iter = time.time()
iter_time = round(end_iter-start_iter, 4)
print(f"iter after took {iter_time}", flush=True)
"""
return model_predictions
def rerank(doc_ranks, outcome, ids2keep_file, ranking_type, top_k, abstracts, out_dir,
query_format, query_type, ehr_entities, ehr_texts,
model_name_or_path, checkpoint, batch_size, seed, mod=None):
print("started rerank...", flush=True)
if outcome == "pmv" and ids2keep_file is None:
raise(ValueError("for outcome 'pmv' ids2keep should be provided."))
if query_format == "text":
assert ehr_texts is not None, "if query_format is text, path to texts should be provided"
elif query_format == "entity":
assert ehr_entities is not None, "if query_format is entity, path to ehr's entities should be provided"
else:
raise(ValueError(f"query format '{query_format}' is not recognized"))
if seed is not None:
set_seed(seed)
dataset = TextDataset(doc_ranks, abstracts, ids2keep_file)
dataset.select_articles(top_k, ranking_type)
dataset.create_dataset(outcome)
if query_format == "text":
print("adding ehr texts...", flush=True)
dataset.add_ehr_text(ehr_texts)
print('Added texts to dataset...', flush=True)
elif query_format == "entity":
print("adding ehr entities", flush=True)
dataset.add_ehr_entities(ehr_entities)
print('Added entities to dataset...', flush=True)
else:
raise(ValueError(f"query format '{query_format}' is not recognized"))
label_vocab = {'Relevant': 1, 'Irrelevant': 0}
config = AutoConfig.from_pretrained(
model_name_or_path,
num_labels=len(list(label_vocab.keys())),
label2id=label_vocab,
id2label={i: l for l, i in label_vocab.items()},
cache_dir='../../cache',
)
tokenizer = AutoTokenizer.from_pretrained(
model_name_or_path,
cache_dir='../../cache',
use_fast=True,
)
model = AutoModelForSequenceClassification.from_pretrained(
model_name_or_path,
from_tf=bool(".ckpt" in model_name_or_path),
config=config,
cache_dir='../../cache',
)
special_tokens_dict = {'additional_special_tokens': ['[ENTSEP]']}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
model.resize_token_embeddings(len(tokenizer))
def preprocess_function(examples):
# Tokenize the texts
args = (([x[2] for x in examples], [x[3] for x in examples]))
# We can try out longformer models at some point??
result = tokenizer(*args, padding='max_length', max_length=512, truncation=True, return_tensors='pt')
result["labels"] = torch.cuda.LongTensor([(x[-1]) for x in examples])
result["query_id"] = [(x[0]) for x in examples]
result["article_id"] = [(x[1]) for x in examples]
return result
# TODO why do need questions at all?
outcome_questions = {'mortality': 'What is the hospital mortality?', # the probaility of?
'pmv': 'What is the probability of prolonged mechanical ventilation?',
'los': 'What is the probable length of stay?'}
def batch_and_tokenize_data(examples, batch_size):
example_list = []
for file in examples:
example = examples[file]
if query_format == "text":
query = outcome_questions[example['outcome']] + ' ' + example['text']
elif query_format == "entity":
if query_type == "mesh":
query = example['outcome'] + ' [ENTSEP] ' + ' [ENTSEP] '.join(example['entities']['mesh'])
elif query_type == "all":
query = example['outcome'] + ' [ENTSEP] ' + ' [ENTSEP] '.join(example['entities']['mesh'])
query += ' ' + ' [ENTSEP] '.join(example['entities']['non-mesh'])
else:
raise (ValueError(f"query type '{query_type}' is not recognized"))
else:
raise(ValueError(f"query format '{query_format}' is not recognized"))
for id in example['articles']:
article = example['articles'][id]
example_list.append([file, id, query, article['article_text'], article['judgement']])
batches = []
for i in range(0, len(example_list), batch_size):
print(f"{i} out of {len(example_list)}", flush=True)
start = i
end = min(start+batch_size, len(example_list))
batch = preprocess_function(example_list[start:end])
#yield batch
batches.append(batch)
print('Created {} batches'.format(len(batches)), flush=True)
return batches
model = model.cuda()
if 'checkpoint' in checkpoint:
full_checkpoint = torch.load(checkpoint)
model.load_state_dict(full_checkpoint['model_state_dict'])
else:
model.load_state_dict(torch.load(checkpoint))
model = BetterTransformer.transform(model)
fnum = 0
reranked_all = {}
num_files = len(dataset.data.keys())
for file in sorted(dataset.data.keys()):
out_file_path = os.path.join(out_dir, file+'.pkl')
fnum += 1
if mod is not None and fnum%10 != mod:
continue
print('Ranking documents for query {} ({}/{})'.format(file, fnum, num_files), flush=True)
if os.path.isfile(out_file_path):
time_stamp_last_modified = os.path.getmtime(out_file_path)
time_last_modified = datetime.fromtimestamp(time_stamp_last_modified)
print(f"{file} existing from {time_last_modified}", flush=True)
with open(out_file_path, "rb") as f:
reranked_docs = pickle.load(f)
else:
start = time.time()
cur_query = {file: dataset.data[file]}
try:
batches = batch_and_tokenize_data(cur_query, batch_size)
except BaseException as e:
print(e)
continue
end = time.time()
print(f"tokenizing and batching for {top_k} docs took {end-start}", flush=True)
start_predicting = time.time()
reranked_docs = predict(model, batches)
if file not in reranked_docs:
print('Warning: No reranking performed for {}'.format(file), flush=True)
continue
end_predicting = time.time()
print('predicting for {} docs took: {}'.format(top_k, end_predicting-start_predicting, flush=True))
reranked_docs = reranked_docs[file]
ranked = [x[1] for x in sorted(dataset.doc_ranks[file], key= lambda x: x[0])]
reranked = [x[1] for x in sorted(reranked_docs, key=lambda x: x[0])]
try:
pearson_corr = np.corrcoef(ranked, reranked)[0, 1]
spearman_corr = stats.spearmanr(ranked, reranked).statistic
except Exception as e:
print(e)
continue
print(f"pearson corr: {pearson_corr}, spearman corr: {spearman_corr}")
reranked_docs = sorted(reranked_docs, key=lambda x: x[1], reverse=True)
reranked_all[file] = reranked_docs
pickle.dump(reranked_docs, open(out_file_path, 'wb'))
out_file_name = os.path.basename(doc_ranks).replace(".pkl", "_reranked.pkl")
pickle.dump(reranked_all, open(os.path.join(out_dir, out_file_name), 'wb'))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--doc_ranks', type=str, action='store', required=True,
help='Path to file {ehr_id: [(doc_id, rank) for m doc_ids ]')
parser.add_argument('--ranking_type', type=str, choices=["similarity", "distance"], action='store', required=True,
help='type of ranking "similarity"/"distance" (relevant in taking top k)')
parser.add_argument('--abstracts', type=str, action='store', required=True,
help='Path to file containing abstract texts')
parser.add_argument('--outcome', type=str, action='store', required=True, help='Target outcome to predict')
parser.add_argument('--query_format', type=str, action='store', choices=["text", "entity"], default='text',
help='Indicate how query should be framed (text/entity)')
parser.add_argument('--query_type', type=str, action='store', choices=["mesh", "all"], default='all',
help='Indicate which entity types to include in query [mesh/all]')
parser.add_argument('--ehr_entities', type=str, action='store', default=None,
help='Path to file containing extracted entities for queries')
parser.add_argument('--ehr_texts', type=str, action='store', default=None,
help='Path to file containing raw texts from EHRs')
parser.add_argument('--checkpoint', type=str, action='store', help='Path to checkpoint to load model weights from')
parser.add_argument('--top_k', type=int, action='store', help='Number of top results to rerank')
parser.add_argument('--model_name_or_path', type=str, action='store', required=True,
help='Path to pretrained LM to be used')
parser.add_argument('--out_dir', type=str, action='store', required=True, help='Provide path to directory to store outputs')
parser.add_argument('--ids2keep_file', type=str, action='store', default=None,
help='file for ehr ids to keep (e.g. for pmv)')
parser.add_argument('--batch_size', type=int, action='store', default=20, help='Specify batch size')
parser.add_argument('--seed', type=int, action='store', default=42, help='Specify random seed')
parser.add_argument('--mod', type=int, action='store', default=None)
args = parser.parse_args()
rerank(**vars(args))
print("Done.")
| BEEP-main | literature-retrieval/reranker/text_reranker.py |
import gc
import os
import csv
csv.field_size_limit(2147483647)
import pickle
import spacy
import scispacy
from scispacy.linking import EntityLinker
en_core_sci_sm_url = "https://s3-us-west-2.amazonaws.com/ai2-s2-scispacy/releases/v0.5.1/en_core_sci_sm-0.5.1.tar.gz"
try:
print("trying to load en_core_sci_sm")
nlp = spacy.load("en_core_sci_sm")
except:
print("downloading en_core_sci_sm...")
import pip
pip.main(["install", en_core_sci_sm_url])
nlp = spacy.load("en_core_sci_sm")
print("adding pipe...")
nlp.add_pipe("scispacy_linker", config={"resolve_abbreviations": True, "linker_name": "mesh"})
print("linking...")
linker = nlp.get_pipe("scispacy_linker")
print("done linking")
class TextDataset:
def __init__(self, doc_rank_file, doc_text_file, ids2keep_file=None):
self.doc_ranks = pickle.load(open(doc_rank_file, 'rb'))
self.doc_texts = pickle.load(open(doc_text_file, 'rb'))
self.data = {}
if ids2keep_file is not None:
ids2keep = pickle.load(open(ids2keep_file, 'rb'))
self.doc_ranks = {ehr_id: doc_ranks for (ehr_id, doc_ranks) in self.doc_ranks.items()
if ehr_id in ids2keep}
gc.collect()
# TODO: Retrieve texts for each query/article pair and construct final dataset ???
# TODO: Make sure to add outcome too ???
def select_articles(self, k, ranking_type):
print(f"selecting {k} top articles")
# Order articles and select top k
selected_ids = {}
for qid in self.doc_ranks:
sorted_ = sorted(self.doc_ranks[qid], key=lambda x: x[1])
if ranking_type == "similarity":
ordered_ids = list(reversed(sorted_))
elif ranking_type == "distance":
ordered_ids = list(sorted_)
else:
raise(ValueError(f"ranking_type {ranking_type} is not recognized"))
end = min(len(ordered_ids), k) if k is not None else len(ordered_ids)
selected_ids[qid] = ordered_ids[:end]
self.doc_ranks = selected_ids
gc.collect()
def create_dataset(self, outcome):
data = {}
for qid in self.doc_ranks:
data[qid] = {'outcome': outcome, 'articles': {}}
for pair in self.doc_ranks[qid]:
aid, score = pair
data[qid]['articles'][aid] = {'article_text': self.doc_texts[aid]['text'], 'judgement': 0} # Dummy judgement labels
self.data = data
def add_ehr_text(self, filepath):
reader = csv.reader(open(filepath))
next(reader, None)
for row in reader:
if row[0] in self.data:
self.data[row[0]]['text'] = row[1]
else:
print(f"{row[0]} not in data")
def add_ehr_entities(self, filepath, cutoff=0.9):
ehr_entities = pickle.load(open(filepath, 'rb'))
for file in ehr_entities:
if file not in self.data:
continue
# print('Adding entities for {}'.format(file))
entities = {'mesh': [], 'non-mesh': []}
for sent in ehr_entities[file]:
for entity in ehr_entities[file][sent]:
if 'mesh_ids' not in entity:
entities['non-mesh'].append(entity['mention'])
continue
for pair in entity['mesh_ids']:
term, prob = pair
if prob < cutoff:
continue
entity_mesh_text = linker.kb.cui_to_entity[term][1] if term in linker.kb.cui_to_entity else ''
if entity_mesh_text != '':
entities['mesh'].append(entity_mesh_text)
self.data[file]['entities'] = entities
class TRECDataset:
def __init__(self, base_dir, years=None):
train_file = os.path.join(base_dir, 'train-split-filtered.csv')
dev_file = os.path.join(base_dir, 'dev-split-filtered.csv')
test_file = os.path.join(base_dir, 'test-split-filtered.csv')
self.train_data = self.read_file(train_file, years)
self.dev_data = self.read_file(dev_file, years)
self.test_data = self.read_file(test_file, years)
def read_file(self, filepath, years=None):
reader = csv.reader(open(filepath))
next(reader, None)
data = {}
for row in reader:
if row[0] not in data:
if years is not None: # Year-based filtering out of data
query_year = int(row[0][1:5])
if query_year not in years:
continue
data[row[0]] = {'outcome': row[1], 'text': row[2], 'articles': {}}
data[row[0]]['articles'][row[3]] = {'article_text': row[4], 'judgement': 1 if int(row[5]) >=1 else 0} # Categories 1 and 2 count as relevant
return data
def add_entities(self, entity_file, cutoff=0.9):
entity_file = pickle.load(open(entity_file, 'rb'))
data_files = list(self.train_data.keys()) + list(self.dev_data.keys()) + list(self.test_data.keys())
for file in entity_file:
if file not in data_files:
continue
entities = {'mesh': [], 'non-mesh': []}
for sent in entity_file[file]:
for entity in entity_file[file][sent]:
if 'mesh_ids' not in entity:
entities['non-mesh'].append(entity['mention'])
continue
for pair in entity['mesh_ids']:
term, prob = pair
if prob < cutoff:
continue
entity_mesh_text = linker.kb.cui_to_entity[term][1] if term in linker.kb.cui_to_entity else ''
if entity_mesh_text != '':
entities['mesh'].append(entity_mesh_text)
if file in self.train_data:
self.train_data[file]['entities'] = entities
elif file in self.dev_data:
self.dev_data[file]['entities'] = entities
else:
self.test_data[file]['entities'] = entities
| BEEP-main | literature-retrieval/reranker/data_loader.py |
'''
Code to run LM-based reranker over abstracts retrieved per query
Command: python run_reranker.py --data <DATA_DIR> --entities <ENTITY_PICKLE_FILE> --out_dir <OUT_DIR> --model_name_or_path <MODEL> --do_train --do_test
'''
import argparse
import os
import random
import statistics
import pickle
import torch
import torch.nn as nn
import torch.optim as optim
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
set_seed,
)
from data_loader import TRECDataset
def train(model, train_data, dev_data, out_dir, label_vocab, epochs, lr, fold):
label_list = [x[0] for x in list(sorted(label_vocab.items(), key=lambda x: x[1]))]
optimizer = optim.Adam(model.parameters(), lr=lr)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=2)
step = 0
prev_dev_loss = 10000
for epoch in range(epochs):
random.shuffle(train_data)
model.train()
epoch_loss = 0.0
for batch in train_data:
optimizer.zero_grad()
gpu_batch = {x:y.cuda() for x,y in batch.items() if x not in ['query_id', 'article_id']} # Tensors need to be moved to GPU
outputs = model(**gpu_batch) # Batch is a dictionary from HF that needs to be unpacked
loss = outputs[0]
epoch_loss += loss.item()
loss.backward()
optimizer.step()
step += 1
if step%1000 == 0:
print('Completed {} training steps'.format(step))
epoch_loss /= len(train_data)
print('Training loss after epoch {}: {}'.format(epoch, epoch_loss))
# Checkpoint model after each epoch anyway, in addition to storing best loss
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': epoch_loss,
}, os.path.join(out_dir, 'checkpoints/checkpoint_{}_fold_{}.pt'.format(epoch, fold)))
dev_loss, _ = test(model, dev_data, label_vocab, epoch=epoch, return_loss=True)
if dev_loss < prev_dev_loss:
prev_dev_loss = dev_loss
torch.save(model.state_dict(), os.path.join(out_dir, 'best_model_fold_{}.pt'.format(fold)))
scheduler.step(dev_loss)
def test(model, dev_data, label_vocab, epoch=0, return_loss=False):
model.eval()
softmax = nn.Softmax(dim=1)
label_list = [x[0] for x in list(sorted(label_vocab.items(), key=lambda x: x[1]))]
dev_loss = 0.0
model_predictions = {}
gold_labels = []
for batch in dev_data:
gpu_batch = {x:y.cuda() for x,y in batch.items() if x not in ['query_id', 'article_id']}
outputs = model(**gpu_batch)
loss = outputs[0]
dev_loss += loss.item()
cur_preds = outputs[1].detach().cpu()
cur_probs = softmax(cur_preds)
relevance = cur_probs.numpy()[:,1].tolist()
cur_gold_relevance = batch['labels'].detach().cpu().numpy().tolist()
for i, qid in enumerate(batch['query_id']):
aid = batch['article_id'][i]
if qid not in model_predictions:
model_predictions[qid] = []
model_predictions[qid].append((aid, relevance[i], cur_gold_relevance[i]))
dev_loss /= len(dev_data)
# pickle.dump(model_predictions, open('trec_2016_filtered_lowlr.pkl', 'wb'))
print('Validation loss after epoch {}: {}'.format(epoch, dev_loss))
scores = compute_precision_at_k(model_predictions, k=10)
score_list = list(scores.values())
mean_preck = sum(score_list) / len(score_list)
median_preck = statistics.median(score_list)
print('------------------Precision@K Scores for Epoch {}-------------------'.format(epoch))
print('Mean Precision@K: {}'.format(mean_preck))
print('Median Precision@K: {}'.format(median_preck))
for file in scores:
print('{}\t{}'.format(file, scores[file]))
# TODO: Maybe add NDCG or other metric implementations too
if return_loss:
return dev_loss, scores
return scores
def compute_precision_at_k(preds, k=10):
precision = {}
for file in preds:
cur_ordered_preds = list(reversed(sorted(preds[file], key=lambda x : x[1])))
cur_prec = 0.0
for chosen_id in cur_ordered_preds[:k]:
cur_prec += 1 if chosen_id[-1] == 1 else 0
cur_prec /= float(k)
precision[file] = cur_prec
return precision
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data', type=str, action='store', required=True, help='Path to directory containing train/dev/test data')
parser.add_argument('--entities', type=str, action='store', required=True, help='Path to file containing extracted entities for queries')
parser.add_argument('--out_dir', type=str, action='store', required=True, help='Provide path to directory to store outputs')
parser.add_argument('--model_name_or_path', type=str, action='store', required=True, help='Path to pretrained LM to be used')
parser.add_argument('--query_type', type=str, action='store', default='all', help='Indicate which entity types to include in query [mesh/all]')
parser.add_argument('--query_format', type=str, action='store', default='entity', help='Indicate how query should be framed (text/entity)')
parser.add_argument('--do_train', action='store_true', default=False, help='Specify if training should be performed')
parser.add_argument('--do_test', action='store_true', default=False, help='Specify if evaluation on test data should be performed')
parser.add_argument('--rebalance', action='store_true', default=False, help='Specify whether training data for reranker should be rebalanced')
parser.add_argument('--batch_size', type=int, action='store', default=8, help='Specify batch size')
parser.add_argument('--lr', type=float, action='store', default=2e-5, help='Specify learning rate')
parser.add_argument('--epochs', type=int, action='store', default=20, help='Specify number of epochs')
parser.add_argument('--seed', type=int, action='store', default=42, help='Specify random seed')
parser.add_argument('--checkpoint', type=str, action='store', help='Path to checkpoint to load model weights from')
parser.add_argument('--years', type=str, action='store', help='Provide years from which TREC data should be used')
parser.add_argument('--folds', type=int, action='store', default=5, help='Provide number of folds to use in cross-validation')
args = parser.parse_args()
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
checkpoint_dir = os.path.join(args.out_dir, 'checkpoints')
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
set_seed(args.seed)
years = None
if args.years is not None:
years = [int(x) for x in args.years.split(',')] if ',' in args.years else [int(args.years)]
dataset = TRECDataset(args.data, years)
print('Loaded train, dev and test splits...')
dataset.add_entities(args.entities)
print('Added entities to dataset...')
label_vocab = {'Relevant': 1, 'Irrelevant': 0}
# Initialized within each fold
tokenizer = None
model = None
config = None
def preprocess_function(examples):
# Tokenize the texts
args = (([x[2] for x in examples], [x[3] for x in examples]))
# We can try out longformer models at some point??
result = tokenizer(*args, padding='max_length', max_length=512, truncation=True, return_tensors='pt')
result["labels"] = torch.cuda.LongTensor([(x[-1]) for x in examples])
result["query_id"] = [(x[0]) for x in examples]
result["article_id"] = [(x[1]) for x in examples]
return result
def rebalance(examples):
per_query_relevance = {}
for sample in examples:
if sample[0] not in per_query_relevance:
per_query_relevance[sample[0]] = {0: 0, 1: 0}
per_query_relevance[sample[0]][sample[-1]] += 1
extra_samples = []
for query in per_query_relevance:
gap = per_query_relevance[query][0] - per_query_relevance[query][1]
rel_samples = [x for x in examples if x[0] == query and x[-1] == 1]
for i in range(gap):
chosen_sample = random.choice(rel_samples)
article_tokens = chosen_sample[-2].split()
while len(article_tokens) == 0:
chosen_sample = random.choice(rel_samples)
article_tokens = chosen_sample[-2].split()
# Drop out one word
del article_tokens[random.choice(range(len(article_tokens)))]
chosen_sample[-2] = ' '.join(article_tokens)
extra_samples.append(chosen_sample)
final_samples = examples + extra_samples
random.shuffle(final_samples)
return final_samples
def batch_and_tokenize_data(examples, batch_size, split):
example_list = []
for file in examples:
example = examples[file]
query = example['outcome'] + ' [ENTSEP] ' + ' [ENTSEP] '.join(example['entities']['mesh'])
if args.query_type == 'all': # Flag to include non-mesh linked entities too
query += ' ' + ' [ENTSEP] '.join(example['entities']['non-mesh'])
if args.query_format == 'text':
query = 'What is the {}? '.format(example['outcome']) + example['text']
for id in example['articles']:
article = example['articles'][id]
example_list.append([file, id, query, article['article_text'], article['judgement']])
if split == 'train':
random.shuffle(example_list)
if args.rebalance:
example_list = rebalance(example_list)
batches = []
for i in range(0, len(example_list), batch_size):
start = i
end = min(start+batch_size, len(example_list))
batch = preprocess_function(example_list[start:end])
batches.append(batch)
if len(batches) % 1000 == 0:
print('Created {} batches'.format(len(batches)))
return batches
all_data = {**dataset.train_data, **dataset.dev_data, **dataset.test_data}
print('Creating {} folds...'.format(args.folds))
fold_size = 1.0/args.folds
test_size = len(all_data) * fold_size
train_size = len(all_data) * ((args.folds-1) * fold_size)
queries = list(all_data.keys())
random.shuffle(queries)
scores = {}
for i in range(args.folds):
config = AutoConfig.from_pretrained(
args.model_name_or_path,
num_labels=len(list(label_vocab.keys())),
label2id=label_vocab,
id2label={i: l for l, i in label_vocab.items()},
# finetuning_task='mnli',
cache_dir='../../cache',
)
tokenizer = AutoTokenizer.from_pretrained(
args.model_name_or_path,
cache_dir='../../cache',
use_fast=True,
)
model = AutoModelForSequenceClassification.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir='../../cache',
)
special_tokens_dict = {'additional_special_tokens': ['[ENTSEP]']}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
model.resize_token_embeddings(len(tokenizer))
model = model.cuda()
test_start =int(i*test_size)
test_end = int((i+1)*test_size)
test_queries = queries[test_start:test_end]
train_queries = [x for x in queries if x not in test_queries]
random.shuffle(train_queries)
dev_cutoff = int(0.5*test_size)
dev_queries = train_queries[:dev_cutoff]
train_queries = train_queries[dev_cutoff:]
train_data = {k:v for k,v in all_data.items() if k in train_queries}
dev_data = {k:v for k,v in all_data.items() if k in dev_queries}
test_data = {k:v for k,v in all_data.items() if k in test_queries}
print('Started batch creation for fold {}'.format(i))
train_batches = batch_and_tokenize_data(train_data, args.batch_size, 'train')
print('Created {} train batches'.format(len(train_batches)))
dev_batches = batch_and_tokenize_data(dev_data, args.batch_size, 'dev')
print('Created {} dev batches'.format(len(dev_batches)))
test_batches = batch_and_tokenize_data(test_data, args.batch_size, 'test')
print('Created {} test batches'.format(len(test_batches)))
print('Running training/testing loop for fold {}'.format(i))
if args.do_train:
train(model, train_batches, dev_batches, args.out_dir, label_vocab, args.epochs, args.lr, fold=i)
if args.do_test:
if args.checkpoint is not None:
if 'checkpoint' in args.checkpoint:
full_checkpoint = torch.load(args.checkpoint)
model.load_state_dict(full_checkpoint['model_state_dict'])
else:
model.load_state_dict(torch.load(args.checkpoint))
else:
model.load_state_dict(torch.load(os.path.join(args.out_dir, 'best_model_fold_{}.pt').format(i)))
fold_scores = test(model, test_batches, label_vocab)
scores = {**scores, **fold_scores}
print('Final Scores On All Queries:')
print('Mean Precision@K: {}'.format(statistics.mean(list(scores.values()))))
print('Median Precision@K: {}'.format(statistics.median(list(scores.values()))))
for file in scores:
print('{}: {}'.format(file, scores[file]))
| BEEP-main | literature-retrieval/reranker/run_reranker_cv.py |
import os.path
import pickle
import argparse
import numpy as np
import os
def take_top_n_and_untie(input_file_path, ranking_type, out_dir, top_n):
os.makedirs(out_dir, exist_ok=True)
if ranking_type != "similarity":
print("note that the similarity score are some decreasing function of the distances,"
"and may not represent the 'correct' similarity score")
data = pickle.load(open(input_file_path, "rb"))
counter = 0
out_of = len(data)
if ranking_type == "similarity":
for ehr in data.keys():
num_files = min(top_n, len(data[ehr]))
top_n_docs_with_similarity_score = sorted(data[ehr], key=lambda x: x[1])[-num_files:][::-1]
out_path = os.path.join(out_dir, f"{ehr}.pkl")
pickle.dump(top_n_docs_with_similarity_score, open(out_path, "wb"))
counter += 1
print(f"processed {counter}/{out_of} files", end="\r", flush=True)
elif ranking_type == "distance":
all_dists_min = float("inf")
all_dists_max = -float("inf")
all_dists_counts = 0
all_dists_sum = 0
all_dists_squared_sum = 0
for ehr in data.keys():
distances = np.array([x[1] for x in data[ehr]])
all_dists_min = min(all_dists_min, min(distances))
all_dists_max = max(all_dists_max, max(distances))
all_dists_sum += sum(distances)
all_dists_squared_sum += sum(distances**2)
all_dists_counts += len(distances)
dists_mean = all_dists_sum / all_dists_counts
dists_var = all_dists_squared_sum / all_dists_counts - dists_mean**2
dists_std = dists_var ** 0.5
all_dists_range = all_dists_max - all_dists_min
for ehr in data.keys():
num_files = min(top_n, len(data[ehr]))
top_n_docs_with_distance_score = sorted(data[ehr], key=lambda x: x[1])[:num_files]
top_distances = np.array([x[1] for x in top_n_docs_with_distance_score])
# mean std normalization
#normalized_top_distances = (top_distances-distances_mean)/distances_std # minus sign to make them positive
# min-max normalization: the least distance will become sim=1. other will become 0<sim<1 (0 for min distance)
normalized_top_distances = 1 - (top_distances - all_dists_min) / all_dists_range
assert np.array_equal(sorted(normalized_top_distances, reverse=True), normalized_top_distances)
top_n_docs_with_similarity_score = [(x[0], y)
for x,y in zip(top_n_docs_with_distance_score, normalized_top_distances)]
out_path = os.path.join(out_dir, f"{ehr}.pkl")
pickle.dump(top_n_docs_with_similarity_score, open(out_path, "wb"))
counter += 1
print(f"processed {counter}/{out_of} files", end="\r", flush=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input_file_path", required=True, type=str)
parser.add_argument("--ranking_type", required=True, type=str, choices=["similarity", "distance"])
parser.add_argument("--out_dir", type=str, required=True)
parser.add_argument("--top_n", type=int, default=1000, required=True)
args = parser.parse_args()
args = vars(args)
take_top_n_and_untie(**args)
| BEEP-main | literature-retrieval/reranker/take_top_n.py |
import pickle
import argparse
import os
def merge(sparse_ranked_path, dense_ranked_path, out_path, top_n):
sparse_ranked = pickle.load(open(sparse_ranked_path, "rb"))
dense_ranked = pickle.load(open(dense_ranked_path, "rb"))
sparse_keys = set(sparse_ranked.keys())
dense_keys = set(dense_ranked.keys())
common_keys = sparse_keys & dense_keys
dense_not_sparse_keys = dense_keys - sparse_keys
sparse_not_dense_keys = sparse_keys - dense_keys
if len(dense_not_sparse_keys) > 0:
print(f"{len(dense_not_sparse_keys)} EHRs have sparse ranking but not dense ranking")
if len(sparse_not_dense_keys) > 0:
print(f"{len(sparse_not_dense_keys)} EHRs have dense ranking but not sparse ranking")
half_n = top_n // 2
merged = {}
for k, ehr in enumerate(common_keys):
ehr_sparse_ranking = sparse_ranked[ehr] # by similarity. the bigger the closer
top_half_n_sparse = sorted(ehr_sparse_ranking, key=lambda x: x[1])[-half_n:][::-1]
ehr_dense_ranking = dense_ranked[ehr] # by distance. the smaller the closer
top_half_n_dense = sorted(ehr_dense_ranking, key=lambda x: x[1])[:half_n]
top_half_n_sparse_docs = [x[0] for x in top_half_n_sparse]
top_half_n_dense_docs = [x[0] for x in top_half_n_dense]
ehr_docs_combined = []
existing = set()
for i in range(half_n):
sparse_ith = top_half_n_sparse_docs[i]
dense_ith = top_half_n_dense_docs[i]
rank = 1-i/half_n
if sparse_ith not in existing:
ehr_docs_combined.append((sparse_ith, rank))
existing.add(sparse_ith)
if dense_ith not in existing:
ehr_docs_combined.append((dense_ith, rank))
existing.add(dense_ith)
merged[ehr] = ehr_docs_combined
print(f"processed {k+1}/{len(common_keys)} docs", end="\r", flush=True)
pickle.dump(merged, open(out_path, "wb"))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--sparse_ranked_path", required=True, type=str,
help="path to sparse ranked (assuming the ranking are similairties, the bigger the closer")
parser.add_argument("--dense_ranked_path", required=True, type=str,
help="path to dense ranked (assuming the ranking are distances, the less the closer")
parser.add_argument("--out_path", type=str, required=True)
parser.add_argument("--top_n", type=int, default=1000, required=True)
args = parser.parse_args()
args = vars(args)
merge(**args)
| BEEP-main | literature-retrieval/reranker/merge_rankers.py |
import gc
import os
import csv
import pickle
import spacy
# seems not needed but without it the program failed to find something
import scispacy
from scispacy.linking import EntityLinker
csv.field_size_limit(2147483647)
en_core_sci_sm_url = "https://s3-us-west-2.amazonaws.com/ai2-s2-scispacy/releases/v0.5.1/en_core_sci_sm-0.5.1.tar.gz"
try:
nlp = spacy.load("en_core_sci_sm")
except:
print("downloading en_core_sci_sm...")
import pip
pip.main(["install", en_core_sci_sm_url])
nlp = spacy.load("en_core_sci_sm")
nlp.add_pipe("scispacy_linker", config={"resolve_abbreviations": True, "linker_name": "mesh"})
linker = nlp.get_pipe("scispacy_linker")
class TextDataset:
def __init__(self, doc_text_file, ehr_entity_file):
self.doc_texts = pickle.load(open(doc_text_file, 'rb'))
self.ehr_entities = pickle.load(open(ehr_entity_file, 'rb'))
self.all_doc_ids = list(self.doc_texts.keys())
self.query_texts = {}
self.texts = {}
self.data = {}
def add_ehr_text(self, filepath, ids2keep=None):
reader = csv.reader(open(filepath))
next(reader, None)
if ids2keep is not None:
ids2keep = pickle.load(open(ids2keep, 'rb'))
for row in reader:
if ids2keep is not None:
if row[0] not in ids2keep:
continue
self.query_texts[row[0]] = row[1]
def create_dataset(self, outcome, censor_after_year):
data = {}
aid = [x for x in self.all_doc_ids if not x.startswith('PMC')]
for id in aid:
if int(self.doc_texts[id]['year']) > censor_after_year or self.doc_texts[id]['text'] == '': # why censoring > 2016?
continue
self.texts[id] = self.doc_texts[id]['text']
del self.doc_texts
gc.collect()
for qid in self.query_texts.keys():
data[qid] = {'outcome': outcome}
self.data = data
def add_entities(self, cutoff=0.9):
for file in self.ehr_entities:
if file not in self.data:
print(f"file {file} not in self.data")
continue
entities = {'mesh': [], 'non-mesh': []}
for sent in self.ehr_entities[file]:
for entity in self.ehr_entities[file][sent]:
if 'mesh_ids' not in entity:
entities['non-mesh'].append(entity['mention'])
continue
for pair in entity['mesh_ids']:
term, prob = pair
if prob < cutoff:
continue
entity_mesh_text = linker.kb.cui_to_entity[term][1] if term in linker.kb.cui_to_entity else ''
if entity_mesh_text != '':
entities['mesh'].append(entity_mesh_text)
self.data[file]['entities'] = entities
del self.ehr_entities
gc.collect()
class TRECDataset:
def __init__(self, base_dir, years=None):
train_file = os.path.join(base_dir, 'train-split-filtered.csv')
dev_file = os.path.join(base_dir, 'dev-split-filtered.csv')
test_file = os.path.join(base_dir, 'test-split-filtered.csv')
self.train_data, train_texts = self.read_file(train_file, years)
self.dev_data, dev_texts = self.read_file(dev_file, years)
self.test_data, test_texts = self.read_file(test_file, years)
self.texts = {**train_texts, **dev_texts, **test_texts}
def read_file(self, filepath, years=None):
reader = csv.reader(open(filepath))
next(reader, None)
data = {}
texts = {}
for row in reader:
if row[0] not in data:
if years is not None: # Year-based filtering out of data
query_year = int(row[0][1:5])
if query_year not in years:
continue
data[row[0]] = {'outcome': row[1], 'articles': {}}
texts[row[0]] = 'What is the {}? '.format(row[1])+row[2]
texts[row[3]] = row[4]
data[row[0]]['articles'][row[3]] = {'judgement': 1 if int(row[5]) >=1 else -1} # Categories 1 and 2 count as relevant
return data, texts
def add_entities(self, entity_file, cutoff=0.9):
entity_file = pickle.load(open(entity_file, 'rb'))
data_files = list(self.train_data.keys()) + list(self.dev_data.keys()) + list(self.test_data.keys())
for file in entity_file:
if file not in data_files:
continue
entities = {'mesh': [], 'non-mesh': []}
for sent in entity_file[file]:
for entity in entity_file[file][sent]:
if 'mesh_ids' not in entity:
entities['non-mesh'].append(entity['mention'])
continue
for pair in entity['mesh_ids']:
term, prob = pair
if prob < cutoff:
continue
entity_mesh_text = linker.kb.cui_to_entity[term][1] if term in linker.kb.cui_to_entity else ''
if entity_mesh_text != '':
entities['mesh'].append(entity_mesh_text)
if file in self.train_data:
self.train_data[file]['entities'] = entities
elif file in self.dev_data:
self.dev_data[file]['entities'] = entities
else:
self.test_data[file]['entities'] = entities
| BEEP-main | literature-retrieval/dense-retriever/data_loader_bireranker.py |
'''
Code to run LM-based reranker over abstracts retrieved per query
Command: python run_triplet_bireranker_cv.py --data <DATA_DIR> --entities <ENTITY_PICKLE_FILE> --out_dir <OUT_DIR> --model_name_or_path <MODEL> --do_train --do_test
'''
import argparse
import os
import random
import statistics
import pickle
import torch
import torch.nn as nn
import torch.optim as optim
from transformers import (
AutoConfig,
AutoModel,
AutoTokenizer,
set_seed,
)
from data_loader_bireranker import TRECDataset
def encode_texts(model, tokenizer, texts):
texts = tokenizer(texts, padding='max_length', max_length=512, truncation=True, return_tensors="pt")
texts = {x:y.cuda() for x,y in texts.items()}
embed = model(**texts)
embed = embed['last_hidden_state'][:,0,:]
return embed
# Default value of margin parameter from sentence transformers:
# https://github.com/UKPLab/sentence-transformers/blob/fb7b7a3044f21de5b54f466f732baa48ba1ae4ba/sentence_transformers/losses/TripletLoss.py
def train(model, tokenizer, texts, train_data, dev_data, out_dir, label_vocab, epochs, lr, fold, margin=5.0):
label_list = [x[0] for x in list(sorted(label_vocab.items(), key=lambda x: x[1]))]
triplet_loss = nn.TripletMarginLoss(margin=margin)
optimizer = optim.Adam(model.parameters(), lr=lr)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=2)
step = 0
prev_dev_loss = 10000
for epoch in range(epochs):
random.shuffle(train_data)
model.train()
epoch_loss = 0.0
for batch in train_data:
optimizer.zero_grad()
cur_texts = {}
for sample in batch:
anchor = encode_texts(model, tokenizer, [texts[x[0]] for x in batch])
positive = encode_texts(model, tokenizer, [texts[x[1]] for x in batch])
negative = encode_texts(model, tokenizer, [texts[x[2]] for x in batch])
loss = triplet_loss(anchor, positive, negative)
epoch_loss += loss.item()
loss.backward()
optimizer.step()
step += 1
if step%1000 == 0:
print('Completed {} training steps'.format(step))
epoch_loss /= len(train_data)
print('Training loss after epoch {}: {}'.format(epoch, epoch_loss))
# Checkpoint model after each epoch anyway, in addition to storing best loss
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': epoch_loss,
}, os.path.join(out_dir, 'checkpoints/checkpoint_{}_fold_{}.pt'.format(epoch, fold)))
dev_loss, _ = test(model, tokenizer, texts, dev_data, label_vocab, epoch=epoch, fold=fold, return_loss=True)
if dev_loss < prev_dev_loss:
prev_dev_loss = dev_loss
torch.save(model.state_dict(), os.path.join(out_dir, 'best_model_fold_{}.pt'.format(fold)))
scheduler.step(dev_loss)
def test(model, tokenizer, texts, dev_data, label_vocab, epoch=0, fold=0, return_loss=False):
model.eval()
dist = nn.PairwiseDistance(p=2.0)
label_list = [x[0] for x in list(sorted(label_vocab.items(), key=lambda x: x[1]))]
model_predictions = {}
gold_labels = []
for batch in dev_data:
for sample in batch:
queries = encode_texts(model, tokenizer, [texts[x[0]] for x in batch])
articles = encode_texts(model, tokenizer, [texts[x[1]] for x in batch])
judgements = [x[2] for x in batch]
distance = dist(queries, articles).detach().cpu().numpy().tolist()
for i, sample in enumerate(batch):
qid, aid, rel = sample
if qid not in model_predictions:
model_predictions[qid] = []
model_predictions[qid].append((aid, distance[i], rel))
pickle.dump(model_predictions, open('trec_triplet_bireranker_preds_{}.pkl'.format(fold), 'wb'))
# print('Validation loss after epoch {}: {}'.format(epoch, dev_loss))
scores = compute_precision_at_k(model_predictions, k=10)
score_list = list(scores.values())
mean_preck = sum(score_list) / len(score_list)
median_preck = statistics.median(score_list)
print('------------------Precision@K Scores for Epoch {}-------------------'.format(epoch))
print('Mean Precision@K: {}'.format(mean_preck))
print('Median Precision@K: {}'.format(median_preck))
for file in scores:
print('{}\t{}'.format(file, scores[file]))
# TODO: Maybe add NDCG or other metric implementations too
if return_loss:
return mean_preck, scores
return scores
def compute_precision_at_k(preds, k=10):
precision = {}
for file in preds:
cur_ordered_preds = list(sorted(preds[file], key=lambda x : x[1]))
cur_prec = 0.0
for chosen_id in cur_ordered_preds[:k]:
cur_prec += 1 if chosen_id[-1] == 1 else 0
cur_prec /= float(k)
precision[file] = cur_prec
return precision
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data', type=str, action='store', required=True, help='Path to directory containing train/dev/test data')
parser.add_argument('--entities', type=str, action='store', required=True, help='Path to file containing extracted entities for queries')
parser.add_argument('--out_dir', type=str, action='store', required=True, help='Provide path to directory to store outputs')
parser.add_argument('--model_name_or_path', type=str, action='store', required=True, help='Path to pretrained LM to be used')
parser.add_argument('--query_type', type=str, action='store', default='all', help='Indicate which entity types to include in query [mesh/all]')
parser.add_argument('--query_format', type=str, action='store', default='entity', help='Indicate how query should be framed (text/entity)')
parser.add_argument('--do_train', action='store_true', default=False, help='Specify if training should be performed')
parser.add_argument('--do_test', action='store_true', default=False, help='Specify if evaluation on test data should be performed')
parser.add_argument('--rebalance', action='store_true', default=False, help='Specify whether training data for reranker should be rebalanced')
parser.add_argument('--batch_size', type=int, action='store', default=8, help='Specify batch size')
parser.add_argument('--lr', type=float, action='store', default=2e-5, help='Specify learning rate')
parser.add_argument('--epochs', type=int, action='store', default=20, help='Specify number of epochs')
parser.add_argument('--seed', type=int, action='store', default=42, help='Specify random seed')
parser.add_argument('--checkpoint', type=str, action='store', help='Path to checkpoint to load model weights from')
parser.add_argument('--years', type=str, action='store', help='Provide years from which TREC data should be used')
parser.add_argument('--folds', type=int, action='store', default=5, help='Provide number of folds to use in cross-validation')
parser.add_argument('--strategy', type=str, action='store', default='hard', help='Choose negative sampling strategy to use (hard/easy)')
parser.add_argument('--margin', type=float, action='store', default=5.0, help='Margin to use for triplet loss')
args = parser.parse_args()
# TODO: Maybe add margin as a command line argument
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
checkpoint_dir = os.path.join(args.out_dir, 'checkpoints')
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
set_seed(args.seed)
years = None
if args.years is not None:
years = [int(x) for x in args.years.split(',')] if ',' in args.years else [int(args.years)]
dataset = TRECDataset(args.data, years)
print('Loaded train, dev and test splits...')
dataset.add_entities(args.entities)
print('Added entities to dataset...')
label_vocab = {'Relevant': 1, 'Irrelevant': 0}
# Initialized within each fold
tokenizer = None
model = None
config = None
def preprocess(examples):
# Tokenize the texts
args = (([x[2] for x in examples], [x[3] for x in examples]))
result = tokenizer(*args, padding='max_length', max_length=512, truncation=True, return_tensors='pt')
result["labels"] = torch.cuda.LongTensor([(x[-1]) for x in examples])
result["query_id"] = [(x[0]) for x in examples]
result["article_id"] = [(x[1]) for x in examples]
return result
def rebalance(examples):
per_query_relevance = {}
for sample in examples:
if sample[0] not in per_query_relevance:
per_query_relevance[sample[0]] = {0: 0, 1: 0}
per_query_relevance[sample[0]][sample[-1]] += 1
extra_samples = []
for query in per_query_relevance:
gap = per_query_relevance[query][0] - per_query_relevance[query][1]
rel_samples = [x for x in examples if x[0] == query and x[-1] == 1]
for i in range(gap):
chosen_sample = random.choice(rel_samples)
article_tokens = chosen_sample[-2].split()
while len(article_tokens) == 0:
chosen_sample = random.choice(rel_samples)
article_tokens = chosen_sample[-2].split()
# Drop out one word
del article_tokens[random.choice(range(len(article_tokens)))]
chosen_sample[-2] = ' '.join(article_tokens)
extra_samples.append(chosen_sample)
final_samples = examples + extra_samples
random.shuffle(final_samples)
return final_samples
def batch_and_tokenize_data(examples, batch_size, split):
example_list = []
all_irrelevant = [article for file in examples for article in examples[file]['articles'] if examples[file]['articles'][article]['judgement'] == -1]
for file in examples:
example = examples[file]
# query = example['outcome'] + ' [ENTSEP] ' + ' [ENTSEP] '.join(example['entities']['mesh'])
# if args.query_type == 'all': # Flag to include non-mesh linked entities too
# query += ' ' + ' [ENTSEP] '.join(example['entities']['non-mesh'])
# if args.query_format == 'text':
# query = 'What is the {}? '.format(example['outcome']) + example['text']
pos_ex = []
neg_ex = []
for id in example['articles']:
article = example['articles'][id]
if article['judgement'] == 1:
pos_ex.append([file, id, article['judgement']])
else:
neg_ex.append([file, id, article['judgement']])
# article = example['articles'][id]
# example_list.append([file, id, article['judgement']])
random.shuffle(pos_ex)
random.shuffle(neg_ex)
chosen_neg_ex = []
if split == 'train':
if args.strategy == 'hard':
chosen_neg_ex = neg_ex[:len(pos_ex)]
example_list += [[file, x[1], y[1]] for x,y in zip(pos_ex, chosen_neg_ex)]
elif args.strategy == 'easy':
random.shuffle(all_irrelevant)
chosen_neg_ex = all_irrelevant[:len(pos_ex)]
example_list += [[file, x[1], y] for x,y in zip(pos_ex, chosen_neg_ex)]
else:
example_list += pos_ex + neg_ex
batches = []
for i in range(0, len(example_list), batch_size):
start = i
end = min(start+batch_size, len(example_list))
batch = example_list[start:end]
batches.append(batch)
if len(batches) % 1000 == 0:
print('Created {} batches'.format(len(batches)))
return batches
all_data = {**dataset.train_data, **dataset.dev_data, **dataset.test_data}
print('Creating {} folds...'.format(args.folds))
fold_size = 1.0/args.folds
test_size = len(all_data) * fold_size
train_size = len(all_data) * ((args.folds-1) * fold_size)
queries = list(all_data.keys())
random.shuffle(queries)
scores = {}
for i in range(args.folds):
config = AutoConfig.from_pretrained(
args.model_name_or_path,
num_labels=len(list(label_vocab.keys())),
label2id=label_vocab,
id2label={i: l for l, i in label_vocab.items()},
# finetuning_task='mnli',
cache_dir='../../cache',
)
tokenizer = AutoTokenizer.from_pretrained(
args.model_name_or_path,
cache_dir='../../cache',
use_fast=True,
)
model = AutoModel.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir='../../cache',
)
special_tokens_dict = {'additional_special_tokens': ['[ENTSEP]']}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
model.resize_token_embeddings(len(tokenizer))
model = model.cuda()
# tokenized_texts = {}
# for file in dataset.texts:
# text = dataset.texts[file]
# tokenized_texts[file] = tokenizer(text, padding='max_length', max_length=512, truncation=True, return_tensors="pt")
test_start =int(i*test_size)
test_end = int((i+1)*test_size)
test_queries = queries[test_start:test_end]
train_queries = [x for x in queries if x not in test_queries]
random.shuffle(train_queries)
dev_cutoff = int(0.5*test_size)
dev_queries = train_queries[:dev_cutoff]
train_queries = train_queries[dev_cutoff:]
train_data = {k:v for k,v in all_data.items() if k in train_queries}
dev_data = {k:v for k,v in all_data.items() if k in dev_queries}
test_data = {k:v for k,v in all_data.items() if k in test_queries}
if args.folds == 1:
train_data = {**dataset.train_data, **dataset.test_data}
dev_data = dataset.dev_data
test_data = dataset.test_data
print('Started batch creation for fold {}'.format(i))
train_batches = batch_and_tokenize_data(train_data, args.batch_size, 'train')
print('Created {} train batches'.format(len(train_batches)))
dev_batches = batch_and_tokenize_data(dev_data, args.batch_size, 'dev')
print('Created {} dev batches'.format(len(dev_batches)))
test_batches = batch_and_tokenize_data(test_data, args.batch_size, 'test')
print('Created {} test batches'.format(len(test_batches)))
# TODO: Comment after testing
# train_batches = train_batches[:10]
# dev_batches = dev_batches[:10]
# test_batches = test_batches[:10]
print('Running training/testing loop for fold {}'.format(i))
if args.do_train:
train(model, tokenizer, dataset.texts, train_batches, dev_batches, args.out_dir, label_vocab, args.epochs, args.lr, fold=i, margin=args.margin)
if args.do_test:
if args.checkpoint is not None:
if 'checkpoint' in args.checkpoint:
full_checkpoint = torch.load(args.checkpoint)
model.load_state_dict(full_checkpoint['model_state_dict'])
else:
model.load_state_dict(torch.load(args.checkpoint))
else:
model.load_state_dict(torch.load(os.path.join(args.out_dir, 'best_model_fold_{}.pt').format(i)))
fold_scores = test(model, tokenizer, dataset.texts, test_batches, label_vocab, fold=i)
scores = {**scores, **fold_scores}
print('Final Scores On All Queries:')
print('Mean Precision@K: {}'.format(statistics.mean(list(scores.values()))))
print('Median Precision@K: {}'.format(statistics.median(list(scores.values()))))
for file in scores:
print('{}: {}'.format(file, scores[file]))
| BEEP-main | literature-retrieval/dense-retriever/run_triplet_bireranker_cv.py |
'''
Code to run LM-based reranker over abstracts retrieved per query
Command: python text_reranker.py --retrieval_results <RETRIEVAL_PICKLE_FILE> --entities <ENTITY_PICKLE_FILE> --out_dir <OUT_DIR> --model_name_or_path <MODEL> --checkpoint <MODEL_CHECKPOINT>
'''
import argparse
import gc
import os
import pickle
from sklearn.metrics.pairwise import euclidean_distances
import numpy as np
import torch
from transformers import (
AutoConfig,
AutoModel,
AutoTokenizer,
set_seed,
)
from data_loader_bireranker import TextDataset
def dense_ranker(abstracts, ehr_entities, text_file, outcome, ids2keep,
checkpoint, model_name_or_path, censor_after_year, out_dir,
query_type, query_format,
top_k, seed, cutoff):
if seed is not None:
set_seed(seed)
if outcome == "pmv" and ids2keep is None:
raise(ValueError("for outcome 'pmv' ids2keep should be provided."))
dataset = TextDataset(abstracts, ehr_entities)
dataset.add_ehr_text(text_file, ids2keep)
dataset.create_dataset(outcome, censor_after_year)
dataset.add_entities(cutoff)
print('Added entities to dataset...')
label_vocab = {'Relevant': 1, 'Irrelevant': 0}
config = AutoConfig.from_pretrained(
model_name_or_path,
num_labels=len(list(label_vocab.keys())),
label2id=label_vocab,
id2label={i: l for l, i in label_vocab.items()},
# finetuning_task='mnli',
cache_dir='../../cache',
)
tokenizer = AutoTokenizer.from_pretrained(
model_name_or_path,
cache_dir='../../cache',
use_fast=True,
)
model = AutoModel.from_pretrained(
model_name_or_path,
from_tf=bool(".ckpt" in model_name_or_path),
config=config,
cache_dir='../../cache',
)
special_tokens_dict = {'additional_special_tokens': ['[ENTSEP]']}
tokenizer.add_special_tokens(special_tokens_dict)
model.resize_token_embeddings(len(tokenizer))
model = model.cuda()
if 'checkpoint' in checkpoint:
full_checkpoint = torch.load(checkpoint)
model.load_state_dict(full_checkpoint['model_state_dict'])
else:
model.load_state_dict(torch.load(checkpoint))
def encode_texts(texts):
embeds = {}
i = 0
for file in texts:
print('Encoding article {}'.format(i))
i += 1
text = texts[file]
text = tokenizer(text, padding='max_length', max_length=512, truncation=True, return_tensors="pt")
text = {x:y.cuda() for x,y in text.items()}
embed = model(**text)
embeds[file] = embed['last_hidden_state'][0,0,:].detach().cpu().numpy()
return embeds
encoded_pubmed_articles = encode_texts(dataset.texts)
print('Encoded all outcome-specific articles')
del dataset.texts
gc.collect()
article_items = list(encoded_pubmed_articles.items())
article_ids, article_matrix = [x[0] for x in article_items], [x[1] for x in article_items]
article_matrix = np.vstack(article_matrix)
outcome_questions = {'mortality': 'What is the hospital mortality?', # the probability of?
'pmv': 'What is the probability of prolonged mechanical ventilation?',
'los': 'What is the probable length of stay?'}
query_question = outcome_questions[outcome]
fnum = 0
all_ehrs_ranked = {}
for file in list(dataset.data.keys()):
print('Ranking documents for query {} ({})'.format(file, fnum))
fnum += 1
if query_format == "text":
query_data = dataset.query_texts[file]
elif query_format == "entity":
entities = dataset.data[file]["entities"]
mesh_entities = entities["mesh"]
non_mesh_entities = entities["non-mesh"]
if query_type == "all":
query_terms = mesh_entities + non_mesh_entities
elif query_type == "mesh":
query_terms = mesh_entities
else:
raise(ValueError(f"query type {query_type} is not recognized"))
query_data = ' [ENTSEP] ' + ' [ENTSEP] '.join(query_terms)
else:
raise (ValueError(f"query format {query_format} is not recognized"))
cur_query_text = query_question + ' ' + query_data # TODO: why do we need question at all?
cur_query_text = tokenizer(cur_query_text, padding='max_length', max_length=512, truncation=True,
return_tensors="pt")
cur_query_text = {x:y.cuda() for x,y in cur_query_text.items()}
cur_query_embed = model(**cur_query_text)
cur_query_embed = cur_query_embed['last_hidden_state'][0,0,:].detach().cpu().numpy().transpose()
cur_query_embed = cur_query_embed.reshape(1,-1)
distances = euclidean_distances(cur_query_embed, article_matrix).astype(np.float16).tolist()[0]
ranked_docs = list(zip(article_ids, distances))
ranked_docs = list(sorted(ranked_docs, key=lambda x:x[1]))
all_ehrs_ranked[file] = ranked_docs[:top_k]
out_path = os.path.join(out_dir, file + ".pkl")
pickle.dump(ranked_docs[:top_k], open(out_path, 'wb'))
split = ""
if "train" in text_file:
split = "train"
elif "test" in text_file:
split = "test"
elif "dev" in text_file:
split = "dev"
else:
print("not sure what split is it, cannot save overall file.")
print("dumping all...")
overall_outpath = os.path.join(out_dir, f"dense_ranked_{split}" + ".pkl")
pickle.dump(all_ehrs_ranked, open(overall_outpath, 'wb'))
print("Done.")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--text_file', type=str, action='store', required=True,
help='Path to file containing raw texts from EHRs')
parser.add_argument('--ehr_entities', type=str, action='store', required=True,
help='Path to file containing extracted entities for queries')
parser.add_argument('--ids2keep', type=str, action='store', default=None,
help='Path to ehr ids to keep (if we take only subset)')
parser.add_argument('--abstracts', type=str, action='store',
help='Path to pkl file containing id: abstract texts')
parser.add_argument('--out_dir', type=str, action='store', required=True,
help='Provide path to output directory')
parser.add_argument('--outcome', type=str, action='store', required=True, help='Target outcome to predict')
parser.add_argument('--model_name_or_path', type=str, action='store', required=True,
help='Path to pretrained LM to be used')
parser.add_argument('--checkpoint', type=str, action='store', help='Path to checkpoint to load model weights from')
parser.add_argument('--top_k', type=int, action='store', help='Number of top results to rerank', default=1000)
parser.add_argument('--censor_after_year', type=int, action='store', default=2016,
help='censor literature after this year')
parser.add_argument('--cutoff', type=float, action='store', default=0.9,
help='cut entities with probability less than that value')
parser.add_argument('--query_format', type=str, action='store', default='text',
help='Indicate how query should be framed (text/entity)')
parser.add_argument('--query_type', type=str, action='store', default='all',
help='Indicate which entity types to include in query [mesh/all]')
parser.add_argument('--seed', type=int, action='store', default=42, help='Specify random seed')
args = parser.parse_args()
arguments_dict = vars(args)
dense_ranker(**arguments_dict)
| BEEP-main | literature-retrieval/dense-retriever/text_triplet_bireranker.py |
import os
import csv
import math
from nltk import sent_tokenize, word_tokenize
import pickle
class RawTextDataset:
def __init__(self, file, tag_type):
self.data = self.read_raw_text_files(file)
self.label_vocab = {'O': 0, 'B-PROB': 1, 'I-PROB': 2, 'B-TREAT': 3, 'I-TREAT': 4, 'B-TEST': 5, 'I-TEST': 6} \
if tag_type == 'i2b2' \
else {'O': 0, 'I-PAR': 1, 'I-INT': 2, 'I-OUT': 3}
def read_raw_text_files(self, file):
files = {}
if file.endswith(".csv"):
reader = csv.reader(open(file))
next(reader, None)
elif file.endswith(".pkl"):
reader = pickle.load(open(file, "rb"))
reader = [(key, value["text"]) for key, value in reader.items()]
else:
reader = None
raise(ValueError("file extension not recognized"))
for row in reader:
files[row[0]] = []
text = row[1]
sents = sent_tokenize(text)
for sent in sents:
words = word_tokenize(sent)
files[row[0]].append(words)
return files
class i2b2Dataset:
def __init__(self, root_dir):
self.train_data, self.dev_data = self.read_conll_format_file(os.path.join(root_dir, 'train.txt'), split=True)
self.test_data = self.read_conll_format_file(os.path.join(root_dir, 'test.txt'))
self.data = {"train": self.train_data, "dev": self.dev_data, "test": self.test_data}
# Convert labels to integer values
self.init_label_vocab()
self.construct_label_sequences(self.train_data)
self.construct_label_sequences(self.dev_data)
self.construct_label_sequences(self.test_data)
def read_conll_format_file(self, filepath, split=False): # Split train.txt into train and dev data
reader = open(filepath)
ex_id = 0
tokens = []
pico = []
examples = [] # Each example is a dict containing ID, tokens and NER labels
for line in reader:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if tokens:
examples.append({
'id': ex_id,
'tokens': tokens,
'labels': pico
})
ex_id += 1
tokens = []
pico = []
else:
ex_data = line.split()
tokens.append(ex_data[0])
pico.append(ex_data[-1].rstrip())
# Last example may be left out
if tokens:
examples.append({
'id': ex_id,
'tokens': tokens,
'labels': pico
})
if split:
dev_prop = math.ceil(0.1*len(examples))
return examples[dev_prop:], examples[:dev_prop]
return examples
def init_label_vocab(self):
self.label_vocab = {
'O': 0,
'B-PROB': 1,
'I-PROB': 2,
'B-TREAT': 3,
'I-TREAT': 4,
'B-TEST': 5,
'I-TEST': 6
}
def construct_label_sequences(self, examples):
for example in examples:
label_seq = [self.label_vocab[x] for x in example['labels']]
example['gold_seq'] = label_seq
class PICODataset:
def __init__(self, root_dir):
self.train_data = self.read_conll_format_file(os.path.join(root_dir, 'train.txt'))
self.dev_data = self.read_conll_format_file(os.path.join(root_dir, 'dev.txt'))
self.test_data = self.read_conll_format_file(os.path.join(root_dir, 'test.txt'))
self.data = {"train": self.train_data, "dev": self.dev_data, "test": self.test_data}
# Convert labels to integer values
self.init_label_vocab()
self.construct_label_sequences(self.train_data)
self.construct_label_sequences(self.dev_data)
self.construct_label_sequences(self.test_data)
def read_conll_format_file(self, filepath):
reader = open(filepath)
ex_id = 0
tokens = []
pico = []
examples = [] # Each example is a dict containing ID, tokens and pico labels
for line in reader:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if tokens:
examples.append({
'id': ex_id,
'tokens': tokens,
'labels': pico
})
ex_id += 1
tokens = []
pico = []
else:
ex_data = line.split()
tokens.append(ex_data[0])
pico.append(ex_data[-1].rstrip())
# Last example may be left out
if tokens:
examples.append({
'id': ex_id,
'tokens': tokens,
'labels': pico
})
return examples
def init_label_vocab(self):
self.label_vocab = {
'O': 0,
'I-PAR': 1,
'I-INT': 2,
'I-OUT': 3,
}
def construct_label_sequences(self, examples):
for example in examples:
label_seq = [self.label_vocab[x] for x in example['labels']]
example['gold_seq'] = label_seq
| BEEP-main | literature-retrieval/mention-extraction/data_loader.py |
import torch
import numpy as np
def tokenize_and_align_labels(tokenizer, examples):
tokenized_inputs = tokenizer(
examples,
padding='max_length',
truncation=True,
# We use this argument because the texts in our dataset are lists of words (with a label for each word).
is_split_into_words=True,
max_length=128,
return_tensors='pt'
)
labels = []
for i, example in enumerate(examples):
word_ids = tokenized_inputs.word_ids(batch_index=i)
previous_word_idx = None
label_ids = []
for word_idx in word_ids:
# Special tokens have a word id that is None. We set the label to -100 so they are automatically
# ignored in the loss function.
if word_idx is None:
label_ids.append(-100)
# We set a dummy label ("O") for the first token of each word.
elif word_idx != previous_word_idx:
label_ids.append(0)
# For the other tokens in a word, we set the label to -100, but we might want to change that? #TODO why, actuallu???
else:
label_ids.append(-100)
previous_word_idx = word_idx
labels.append(label_ids)
tokenized_inputs["labels"] = torch.cuda.LongTensor(labels)
return tokenized_inputs
def batch_and_tokenize_data(tokenizer, examples, batch_size):
batches = []
for i in range(0, len(examples), batch_size):
start = i
end = min(start + batch_size, len(examples))
batch = tokenize_and_align_labels(tokenizer, examples[start:end])
batches.append(batch)
return batches
def extract_entities(sequence, task):
entities = {}
starts = [i for i,x in enumerate(sequence) if x.startswith('B')]
starts = [i for i, x in enumerate(sequence) if x.startswith('B')] if task == 'i2b2' \
else [i for i, x in enumerate(sequence) if x.startswith('I') and not sequence[i - 1].startswith('I')]
ends = []
for idx in starts:
if idx == len(sequence)-1 or not sequence[idx+1].startswith('I'):
ends.append(idx)
continue
cur_idx = idx + 1
while cur_idx < len(sequence) and sequence[cur_idx].startswith('I'):
cur_idx += 1
ends.append(cur_idx-1)
if len(starts) != len(ends):
print('Missing end indices for some predictions!!')
offsets = list(zip(starts, ends))
for offset in offsets:
tag = sequence[offset[0]].split('-')[1]
entities[offset] = tag
return entities
def tag(model, data, label_vocab):
with torch.no_grad():
model.eval()
label_list = [x[0] for x in list(sorted(label_vocab.items(), key=lambda x: x[1]))]
batch = {x:y.cuda() for x,y in data.items()}
outputs = model(**batch)
cur_preds = np.argmax(outputs[1].detach().cpu().numpy(), axis=2)
labels = batch['labels'].cpu().numpy()
true_predictions = [
[label_list[p] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(cur_preds, labels)
]
return true_predictions | BEEP-main | literature-retrieval/mention-extraction/utils.py |
'''
Code for NER and PICO tagging. Model: Pretrained LM + linear layer
Run: python pico_trainer.py --data_dir <DATA DIR> --out_dir <OUTPUT DIR> --model_name_or_path <LM NAME> --task <pico/i2b2> --do_train --do_test
'''
import os
import argparse
import random
import numpy as np
from collections import Counter
import torch
import torch.optim as optim
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
set_seed,
)
from data_loader import PICODataset, i2b2Dataset
def compute_macro_f1(predictions, references, label_list):
results = {}
total_f1 = 0.0
for label in label_list:
gold_count = 0
pred_count = 0
correct_count = 0
for pred_list, ref_list in zip(predictions, references):
for pred, ref in zip(pred_list, ref_list):
if ref == label:
gold_count += 1
if pred == label:
pred_count += 1
if pred == ref:
correct_count += 1
label_prec = correct_count / float(pred_count) if pred_count != 0 else 0.0
label_rec = correct_count / float(gold_count) if gold_count != 0 else 0.0
label_f1 = (2 * label_prec * label_rec) / (label_prec + label_rec) if label_prec != 0 and label_rec != 0 else 0.0
results[label] = label_f1
total_f1 += label_f1
total_f1 /= len(label_list)
results['overall_f1'] = total_f1
return results
def extract_entities(sequence):
entities = {}
starts = [i for i,x in enumerate(sequence) if x.startswith('B')]
ends = []
for idx in starts:
if idx == len(sequence)-1 or not sequence[idx+1].startswith('I'):
ends.append(idx)
continue
cur_idx = idx + 1
while cur_idx < len(sequence) and sequence[cur_idx].startswith('I'):
cur_idx += 1
ends.append(cur_idx-1)
if len(starts) != len(ends):
print('Missing end indices for some predictions!!')
offsets = list(zip(starts, ends))
for offset in offsets:
tag = sequence[offset[0]].split('-')[1]
entities[offset] = tag
return entities
def compute_exact_f1(predictions, references, label_list):
results = {}
pred_count = 0.0
ref_count = 0.0
correct_count = 0.0
num_tags = (len(label_list)-1)/2
per_tag_pred = Counter()
per_tag_ref = Counter()
per_tag_correct = Counter()
for pred_list, ref_list in zip(predictions, references):
pred_entities = extract_entities(pred_list)
ref_entities = extract_entities(ref_list)
pred_count += len(pred_entities)
ref_count += len(ref_entities)
per_tag_pred.update(list(pred_entities.values()))
per_tag_ref.update(list(ref_entities.values()))
matched_spans = set(pred_entities.keys()).intersection(set(ref_entities.keys())) # Find entities that match boundaries exactly
for span in matched_spans:
if pred_entities[span] == ref_entities[span]: # Check that type also matches
correct_count += 1
per_tag_correct.update([pred_entities[span]])
rec = correct_count / ref_count if ref_count != 0 else 0.0
prec = correct_count / pred_count if pred_count != 0 else 0.0
f1 = (2 * prec * rec) / (prec + rec) if prec != 0 and rec != 0 else 0.0
for label in per_tag_ref:
tag_prec = per_tag_correct[label] / float(per_tag_pred[label]) if per_tag_pred[label] != 0 else 0.0
tag_rec = per_tag_correct[label] / float(per_tag_ref[label]) if per_tag_ref[label] != 0 else 0.0
tag_f1 = (2 * tag_prec * tag_rec) / (tag_prec + tag_rec) if tag_rec != 0 and tag_prec != 0 else 0.0
results[label] = tag_f1
results['overall_f1'] = f1
return results
def train(model, train_data, dev_data, out_dir, label_vocab, epochs, lr, task='pico'):
label_list = [x[0] for x in list(sorted(label_vocab.items(), key=lambda x: x[1]))]
optimizer = optim.Adam(model.parameters(), lr=lr)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=2)
print(label_list)
step = 0
prev_dev_loss = 10000
for epoch in range(epochs):
model.train()
epoch_loss = 0.0
for batch in train_data:
optimizer.zero_grad()
batch = {x:y.cuda() for x,y in batch.items()} # Tensors need to be moved to GPU
outputs = model(**batch) # Batch is a dictionary from HF that needs to be unpacked
loss = outputs[0]
epoch_loss += loss.item()
loss.backward()
optimizer.step()
step += 1
if step%1000 == 0:
print('Completed {} training steps'.format(step))
epoch_loss /= len(train_data)
print('Training loss after epoch {}: {}'.format(epoch, epoch_loss))
# Checkpoint model after each epoch anyway, in addition to storing best loss
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': epoch_loss,
}, os.path.join(out_dir, 'checkpoints/checkpoint_{}.pt'.format(epoch)))
dev_loss = test(model, dev_data, label_vocab, epoch=epoch, return_loss=True, task=task)
if dev_loss < prev_dev_loss:
prev_dev_loss = dev_loss
torch.save(model.state_dict(), os.path.join(out_dir, 'best_model.pt'))
scheduler.step(dev_loss)
def test(model, dev_data, label_vocab, epoch=0, return_loss=False, task='pico'):
model.eval()
label_list = [x[0] for x in list(sorted(label_vocab.items(), key=lambda x: x[1]))]
dev_loss = 0.0
model_predictions = []
gold_labels = []
for batch in dev_data:
batch = {x:y.cuda() for x,y in batch.items()}
outputs = model(**batch)
loss = outputs[0]
dev_loss += loss.item()
cur_preds = np.argmax(outputs[1].detach().cpu().numpy(), axis=2)
labels = batch['labels'].cpu().numpy()
true_predictions = [
[label_list[p] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(cur_preds, labels)
]
true_labels = [
[label_list[l] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(cur_preds, labels)
]
model_predictions += true_predictions
gold_labels += true_labels
dev_loss /= len(dev_data)
print('Validation loss after epoch {}: {}'.format(epoch, dev_loss))
results = {}
if task == 'pico':
results = compute_macro_f1(model_predictions, gold_labels, label_list)
elif task == 'i2b2':
results = compute_exact_f1(model_predictions, gold_labels, label_list)
print('------------------F1 Scores for Epoch {}-------------------'.format(epoch))
print('Overall Macro F1 Score: {}'.format(results['overall_f1']))
for label in results:
if 'overall' in label:
continue
print('F1 Score for {}: {}'.format(label, results[label]))
if return_loss:
return dev_loss
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, action='store', required=True, help='Provide path to data directory')
parser.add_argument('--out_dir', type=str, action='store', required=True, help='Provide path to directory to store outputs')
parser.add_argument('--model_name_or_path', type=str, action='store', required=True, help='Path to pretrained LM to be used')
parser.add_argument('--task', type=str, action='store', required=True, help='Choose whether to do PICO or i2b2 concept tagging')
parser.add_argument('--do_train', action='store_true', default=False, help='Specify if training should be performed')
parser.add_argument('--do_test', action='store_true', default=False, help='Specify if evaluation on test data should be performed')
parser.add_argument('--batch_size', type=int, action='store', default=16, help='Specify batch size')
parser.add_argument('--lr', type=float, action='store', default=2e-5, help='Specify learning rate')
parser.add_argument('--epochs', type=int, action='store', default=20, help='Specify number of epochs')
parser.add_argument('--seed', type=int, action='store', default=42, help='Specify random seed')
args = parser.parse_args()
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
checkpoint_dir = os.path.join(args.out_dir, 'checkpoints')
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
set_seed(args.seed)
dataset = []
if args.task == 'pico':
dataset = PICODataset(args.data_dir)
elif args.task == 'i2b2':
dataset = i2b2Dataset(args.data_dir)
else:
print('Invalid task name provided - choose from pico or i2b2!')
label_vocab = dataset.label_vocab
config = AutoConfig.from_pretrained(
args.model_name_or_path,
num_labels=len(list(label_vocab.keys())),
label2id=label_vocab,
id2label={i: l for l, i in label_vocab.items()},
finetuning_task='ner',
cache_dir='../../cache/',
)
tokenizer = AutoTokenizer.from_pretrained(
args.model_name_or_path,
cache_dir='../../cache',
use_fast=True,
)
model = AutoModelForTokenClassification.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir='../../cache',
)
# Tokenize all texts and align the labels with them.
def tokenize_and_align_labels(examples):
example_texts = [x['tokens'] for x in examples]
tokenized_inputs = tokenizer(
example_texts,
padding='max_length',
truncation=True,
# We use this argument because the texts in our dataset are lists of words (with a label for each word).
is_split_into_words=True,
max_length=128,
return_tensors='pt'
)
labels = []
for i, example in enumerate(examples):
label_seq = example['gold_seq']
word_ids = tokenized_inputs.word_ids(batch_index=i)
previous_word_idx = None
label_ids = []
for word_idx in word_ids:
# Special tokens have a word id that is None. We set the label to -100 so they are automatically
# ignored in the loss function.
if word_idx is None:
label_ids.append(-100)
# We set the label for the first token of each word.
elif word_idx != previous_word_idx:
label_ids.append(label_seq[word_idx])
# For the other tokens in a word, we set the label to -100, but we might want to change that?
else:
label_ids.append(-100)
previous_word_idx = word_idx
labels.append(label_ids)
tokenized_inputs["labels"] = torch.cuda.LongTensor(labels)
return tokenized_inputs
def batch_and_tokenize_data(examples, batch_size):
random.shuffle(examples)
batches = []
for i in range(0, len(examples), batch_size):
start = i
end = min(start+batch_size, len(examples))
batch = tokenize_and_align_labels(examples[start:end])
batches.append(batch)
return batches
# Batch data, tokenize and align labels with (subword) tokens
train_dataset = batch_and_tokenize_data(dataset.data["train"], args.batch_size)
dev_dataset = batch_and_tokenize_data(dataset.data["dev"], args.batch_size)
test_dataset = batch_and_tokenize_data(dataset.data["test"], args.batch_size)
model = model.cuda()
if args.do_train:
train(model, train_dataset, dev_dataset, args.out_dir, label_vocab, args.epochs, args.lr, task=args.task)
if args.do_test:
model.load_state_dict(torch.load(os.path.join(args.out_dir, 'best_model.pt')))
test(model, test_dataset, label_vocab, task=args.task)
| BEEP-main | literature-retrieval/mention-extraction/pico_trainer.py |
'''
Code to dump NER or PICO tags for raw text. Model: Pretrained LM + linear layer
Run: python text_tagger.py --data <RAW TEXT CSV> --out_dir <OUTPUT DIR> --model_name_or_path <LM NAME> --checkpoint <MODEL WEIGHT FILE> --task <pico/i2b2>
'''
import pickle
import os
import argparse
import numpy as np
import torch
from transformers import AutoConfig, AutoModelForTokenClassification, AutoTokenizer, set_seed
from data_loader import RawTextDataset
from utils import extract_entities, batch_and_tokenize_data
def tag(model, data, label_vocab):
with torch.no_grad():
model.eval()
label_list = [x[0] for x in list(sorted(label_vocab.items(), key=lambda x: x[1]))]
batch = {x: y.cuda() for x, y in data.items()}
outputs = model(**batch)
cur_preds = np.argmax(outputs[1].detach().cpu().numpy(), axis=2)
labels = batch['labels'].cpu().numpy()
true_predictions = [
[label_list[p] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(cur_preds, labels)
]
return true_predictions
def run_tagger(data, out_dir, model_name_or_path, checkpoint, task, batch_size, outcome):
if not os.path.exists(out_dir):
os.makedirs(out_dir)
print("building dataset (tokenizing sentence and words)...")
dataset = RawTextDataset(data, task)
label_vocab = dataset.label_vocab
print("dataset built.\n")
print("constructing config...")
config = AutoConfig.from_pretrained(
model_name_or_path,
num_labels=len(list(label_vocab.keys())),
label2id=label_vocab,
id2label={i: l for l, i in label_vocab.items()},
finetuning_task='ner',
cache_dir='../../cache',
)
print("config constructed.\n")
print("constructing tokenizer...")
tokenizer = AutoTokenizer.from_pretrained(
model_name_or_path,
cache_dir='../../cache',
use_fast=True,
)
print("tokenizer constructed.\n")
print("constructing model...")
model = AutoModelForTokenClassification.from_pretrained(
model_name_or_path,
from_tf=bool(".ckpt" in model_name_or_path),
config=config,
cache_dir='../../cache',
)
print("model constructed.")
print("loading model's state dict...")
model.load_state_dict(torch.load(checkpoint))
print("model's state dict loaded.")
split = ""
if "train" in data:
split = "train"
elif "dev" in data or "val" in data:
split = "dev"
elif "test" in data:
split = "test"
# Grab all sentences from a file, tokenize and align labels with (subword) tokens
model = model.cuda()
mentions = {}
print("extracting mentions...")
for i, file in enumerate(list(dataset.data.keys())):
print('Extracting mentions from file {} ({})'.format(file, i))
text = dataset.data[file]
mentions[file] = {}
mentions[file]['tokenized_text'] = text
test_dataset = batch_and_tokenize_data(tokenizer, text, batch_size)
preds = []
for batch in test_dataset:
preds += tag(model, batch, label_vocab)
sent_idx = 0
for sent, sent_pred in zip(text, preds):
res = extract_entities(sent_pred, task)
sent_entities = [{"mention": sent[start_:(end_ + 1)],
"start_offset": start_, "end_offset": end_, "pred_type": NE}
for (start_, end_), NE in res.items()]
mentions[file][sent_idx] = sent_entities
sent_idx += 1
if i % 20000 == 0 and i != 0:
outpath = os.path.join(out_dir, '{}_{}_mentions_{}.pkl'.format(outcome, split, i))
pickle.dump(mentions, open(outpath, 'wb'))
mentions = {}
if mentions:
if i < 20000:
outpath = os.path.join(out_dir, '{}_{}_mentions.pkl'.format(outcome, split))
else:
outpath = os.path.join(out_dir, '{}_{}_mentions_{}.pkl'.format(outcome, split, i))
pickle.dump(mentions, open(outpath, 'wb'))
print("mentions extracting done.")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data', type=str, action='store', required=True,
help='Provide path to csv file containing raw texts')
parser.add_argument('--out_dir', type=str, action='store', required=True,
help='Provide path to directory to store outputs')
parser.add_argument('--model_name_or_path', type=str, action='store', required=True,
help='Path to pretrained LM to be used')
parser.add_argument('--checkpoint', type=str, action='store', required=True,
help='Path to checkpoint to load model weights from')
parser.add_argument('--task', type=str, action='store', required=True,
help='Choose whether to do PICO or i2b2 concept tagging')
parser.add_argument('--batch_size', type=int, action='store', default=16, help='Specify batch size')
parser.add_argument('--outcome', type=str, action='store', required=True, help='name of the outcome')
args = parser.parse_args()
run_tagger(**vars(args))
| BEEP-main | literature-retrieval/mention-extraction/text_tagger.py |
import sys
import csv
import pickle
import os
admnote_folder = sys.argv[1]
note_texts = {}
for file in os.listdir(admnote_folder):
reader = csv.reader(open(os.path.join(admnote_folder, file)))
next(reader, None)
for row in reader:
note_texts[int(row[0])] = row[1]
pmv_labels = pickle.load(open('pmv_labels.pkl', 'rb'))
if not os.path.isdir('mechanical_ventilation'):
os.mkdir('mechanical_ventilation')
train_file = open('mechanical_ventilation/pmv_train.csv', 'w')
dev_file = open('mechanical_ventilation/pmv_dev.csv', 'w')
test_file = open('mechanical_ventilation/pmv_test.csv', 'w')
train_writer = csv.writer(train_file)
dev_writer = csv.writer(dev_file)
test_writer = csv.writer(test_file)
train_writer.writerow(['id', 'text', 'label'])
dev_writer.writerow(['id', 'text', 'label'])
test_writer.writerow(['id', 'text', 'label'])
for note in pmv_labels:
if pmv_labels[note][-1] == 'train':
train_writer.writerow([note, note_texts[note], pmv_labels[note][0]])
if pmv_labels[note][-1] == 'val':
dev_writer.writerow([note, note_texts[note], pmv_labels[note][0]])
if pmv_labels[note][-1] == 'test':
test_writer.writerow([note, note_texts[note], pmv_labels[note][0]])
train_file.close()
dev_file.close()
test_file.close()
| BEEP-main | data/generate_pmv_data.py |
import argparse
import json
from copy import deepcopy
from math import ceil
from random import shuffle
from commaqa.inference.utils import LIST_JOINER, EOQ_MARKER, INTERQ_MARKER, ANSWER_MARKER, \
SIMPQ_MARKER
def parse_arguments():
arg_parser = argparse.ArgumentParser(description='Solve a ReModeL dataset using composition')
arg_parser.add_argument('--input_json', type=str, required=True,
help="Input JSON dataset files")
arg_parser.add_argument('--chains', type=str, required=True,
help="Input chains TSV file")
arg_parser.add_argument('--decomp_json', type=str, required=False, help="Output decompositions")
arg_parser.add_argument('--max_examples', type=float, required=False, default=1.0,
help="Maximum number of examples to use. "
"If set to <=1.0, use as fraction.")
return arg_parser.parse_args()
def is_valid_answer(predicted_answer, gold_answer):
if isinstance(gold_answer, list):
gold_answer_str = LIST_JOINER.join(sorted(gold_answer))
else:
gold_answer_str = str(gold_answer)
if isinstance(predicted_answer, list):
predicted_answer_str = LIST_JOINER.join(sorted([str(s) for s in predicted_answer]))
else:
predicted_answer_str = str(gold_answer)
# print(predicted_answer_str, gold_answer_str)
return predicted_answer_str == gold_answer_str
def build_train_seqs(question_seq):
question_seq = question_seq.strip() + " " + EOQ_MARKER
train_seqs = [question_seq]
while INTERQ_MARKER in question_seq:
answer_idx = question_seq.rfind(ANSWER_MARKER)
question_seq = question_seq[:answer_idx].strip()
interq_idx = question_seq.rfind(INTERQ_MARKER)
question_seq = question_seq[:interq_idx] + SIMPQ_MARKER + question_seq[
interq_idx + len(INTERQ_MARKER):]
train_seqs.append(question_seq)
return train_seqs
if __name__ == '__main__':
args = parse_arguments()
with open(args.input_json, "r") as input_fp:
input_json = json.load(input_fp)
predictions_per_qid = {}
with open(args.chains, "r") as chains_fp:
for line in chains_fp:
fields = line.strip().split("\t")
qid = fields[0]
if qid not in predictions_per_qid:
predictions_per_qid[qid] = []
predictions_per_qid[qid].append(fields[1:])
decomp_json = []
num_chains_correct_answer = 0
num_questions_correct_chains = 0
num_question_no_chains = 0
num_questions = 0
num_chains = 0
for input_item in input_json:
for qa_pair in input_item["qa_pairs"]:
qid = qa_pair["id"]
num_questions += 1
if qid not in predictions_per_qid:
# print(qid)
num_question_no_chains += 1
continue
found_match = False
for potential_seq in predictions_per_qid[qid]:
num_chains += 1
if is_valid_answer(json.loads(potential_seq[1]), qa_pair["answer"]):
found_match = True
num_chains_correct_answer += 1
train_seqs = build_train_seqs(potential_seq[0])
decomp = deepcopy(qa_pair)
decomp["train_seqs"] = train_seqs
decomp_json.append(decomp)
if found_match:
num_questions_correct_chains += 1
num_questions_with_chains = (num_questions - num_question_no_chains)
print("Num Questions: {}".format(num_questions))
print("Num Questions with no chains: {} ({:.2f}%)".format(
num_question_no_chains, (num_question_no_chains * 100 / num_questions)))
print("Num Questions with chains: {} ({:.2f}%)".format(
num_questions_with_chains, (num_questions_with_chains * 100 / num_questions)))
print("Num Questions with at least one correct chain: {}"
"({:.2f}% of predicted, {:.2f}% of total)".format(
num_questions_correct_chains,
(num_questions_correct_chains * 100 / num_questions_with_chains),
(num_questions_correct_chains * 100 / num_questions)))
print("Num Chains: {}({:.2f} c per predicted, {:.2f} c per total)".format(
num_chains, num_chains / num_questions_with_chains, num_chains / num_questions))
print("Num Chains with correct answer: {}({:.2f}%)".format(
num_chains_correct_answer, (num_chains_correct_answer * 100 / num_chains)))
if args.decomp_json:
# sample examples here as they will be ungrouped
if args.max_examples < 1.0:
shuffle(decomp_json)
decomp_json = decomp_json[:ceil(len(decomp_json) * args.max_examples)]
elif args.max_examples > 1.0:
shuffle(decomp_json)
decomp_json = decomp_json[:args.max_examples]
with open(args.decomp_json, "w") as output_fp:
for decomp in decomp_json:
output_fp.write(json.dumps(decomp) + "\n")
| CommaQA-main | commaqa/dataset/generate_decompositions_from_chains.py |
CommaQA-main | commaqa/dataset/__init__.py |
|
import argparse
import json
from copy import deepcopy
from math import ceil
from random import shuffle
from commaqa.configs.predicate_language_config import ModelQuestionConfig
from commaqa.dataset.utils import nonempty_answer
from commaqa.execution.operation_executer import OperationExecuter
from commaqa.execution.utils import build_models
def parse_arguments():
arg_parser = argparse.ArgumentParser(description='Solve a ReModeL dataset using composition')
arg_parser.add_argument('--input_json', type=str, required=True,
help="Input JSON dataset files")
arg_parser.add_argument('--pred_json', type=str, required=False, help="Output predictions")
arg_parser.add_argument('--decomp_json', type=str, required=False, help="Output decompositions")
arg_parser.add_argument('--max_examples', type=float, required=False, default=1.0,
help="Maximum number of examples to use. "
"If set to <=1.0, use as fraction.")
return arg_parser.parse_args()
def build_chain(prev_chain, operation, model, question):
return prev_chain + " QS: ({}) [{}] {}".format(operation, model, question)
if __name__ == '__main__':
args = parse_arguments()
with open(args.input_json, "r") as input_fp:
input_json = json.load(input_fp)
pred_json = {}
decomp_json = []
for input_item in input_json:
kb = input_item["kb"]
model_configurations = {}
for model_name, configs in input_item["pred_lang_config"].items():
model_configurations[model_name] = [ModelQuestionConfig(config) for config in configs]
model_lib = build_models(model_configurations, kb)
executor = OperationExecuter(model_lib)
for qa_pair in input_item["qa_pairs"]:
qid = qa_pair["id"]
# use oracle decomposition
curr_assignment = {}
last_answer = ""
train_seqs = []
prev_chain = " QC: " + qa_pair["question"]
for idx, step in enumerate(qa_pair["decomposition"]):
train_seq = build_chain(prev_chain=prev_chain,
operation=step["op"],
model=step["m"],
question=step["q"])
train_seqs.append(train_seq)
answers, facts_used = executor.execute_operation(operation=step["op"],
model=step["m"],
question=step["q"],
assignments=curr_assignment)
last_answer = answers
if not nonempty_answer(answers):
print("no answer!")
print(step, curr_assignment, kb)
break
prev_chain = train_seq.replace(" QS: ", " QI: ") + " A: " + json.dumps(answers)
curr_assignment["#" + str(idx + 1)] = answers
train_seqs.append(prev_chain + " QS: [EOQ]")
decomp = deepcopy(qa_pair)
decomp["train_seqs"] = train_seqs
decomp_json.append(decomp)
if isinstance(last_answer, list):
pred_json[qid] = last_answer
else:
pred_json[qid] = str(last_answer)
if args.pred_json:
with open(args.pred_json, "w") as output_fp:
json.dump(pred_json, output_fp, indent=2)
if args.decomp_json:
# sample examples here as they will be ungrouped
if args.max_examples < 1.0:
shuffle(decomp_json)
decomp_json = decomp_json[:ceil(len(decomp_json) * args.max_examples)]
elif args.max_examples > 1.0:
shuffle(decomp_json)
decomp_json = decomp_json[:args.max_examples]
with open(args.decomp_json, "w") as output_fp:
for decomp in decomp_json:
output_fp.write(json.dumps(decomp) + "\n")
| CommaQA-main | commaqa/dataset/generate_decomposition_predictions.py |
import argparse
import json
import os
import random
import re
import string
from math import ceil
from pathlib import Path
from shutil import copyfile
from typing import List
import _jsonnet
from tqdm import tqdm
from commaqa.configs.dataset_build_config import DatasetBuildConfig
from commaqa.dataset.utils import get_predicate_args, dict_product, nonempty_answer
from commaqa.execution.utils import build_models
def parse_arguments():
arg_parser = argparse.ArgumentParser(description='Build a ReModeL dataset from inputs')
arg_parser.add_argument('--input_json', type=str, required=True,
help="Input JSON configuration files "
"(comma-separated for multiple files)")
arg_parser.add_argument('--output', "-o", type=str, required=True, help="Output folder")
arg_parser.add_argument('--num_groups', type=int, required=False, default=500,
help="Number of example groups to create")
arg_parser.add_argument('--num_examples_per_group', type=int, required=False, default=10,
help="Number of examples per group")
arg_parser.add_argument('--entity_percent', type=float, required=False, default=0.25,
help="Percentage of entities to sample for each group")
return arg_parser.parse_args()
class SubDatasetBuilder:
def __init__(self, configs: List[DatasetBuildConfig]):
self.configs = configs
def build_entities(self, entities, ent_type):
m = re.match("list\((.*)\)", ent_type)
if m:
# too many possible permutations, only build a list of size ent_type*2
returned_list = []
ent_type = m.group(1)
for i in range(len(entities[ent_type]) * 2):
sample_size = random.choice(range(2, 5))
sampled_ents = random.sample(entities[ent_type], sample_size)
returned_list.append(json.dumps(sampled_ents))
return returned_list
else:
return entities[ent_type]
def build_sub_dataset(self,
num_entities_per_group=5,
num_groups: int = 10, num_examples_per_group: int = 10):
per_model_dataset = {}
for g in tqdm(range(num_groups)):
config = random.choice(self.configs)
entities = config.entities.subsample(num_entities_per_group)
complete_kb = {}
for pred in config.predicates:
complete_kb[pred.pred_name] = pred.populate_kb(entities)
model_library = build_models(config.pred_lang_config.model_config, complete_kb)
# per_model_qa = {}
# per_model_kb = {}
for model, model_configs in config.pred_lang_config.model_config.items():
all_qa = {}
gold_kb = {}
for model_config in model_configs:
if model_config.init is None:
raise ValueError("Initialization needs to be specified to build the "
"sub-model dataset for {}".format(model_config))
# Add the model-specific kb based on the steps
for step in model_config.steps:
qpred, qargs = get_predicate_args(step.question)
if qpred not in gold_kb:
gold_kb[qpred] = complete_kb[qpred]
context = ""
gold_context = ""
for pred in config.predicates:
context_rep = pred.generate_context(complete_kb)
context += context_rep
if pred.pred_name in gold_kb:
gold_context += context_rep
output_data = {
"all_kb": complete_kb,
"kb": gold_kb,
"context": gold_context,
"all_context": context,
}
# Generate questions
assignment_dict = {}
# Initialize question arguments
for key, ent_type in model_config.init.items():
assignment_dict[key] = self.build_entities(entities, ent_type)
# For each assignment, generate a question
for assignment in dict_product(assignment_dict):
if isinstance(model_config.questions, str):
questions = [model_config.questions]
else:
questions = model_config.questions
# for each question format, generate a question
for question in questions:
source_question = question
for key, val in assignment.items():
question = question.replace(key, val)
answers, facts_used = model_library[model].ask_question(question)
if nonempty_answer(answers):
if source_question not in all_qa:
all_qa[source_question] = []
all_qa[source_question].append({
"question": question,
"answer": answers,
"facts_used": facts_used,
"assignment": assignment,
"config": model_config.to_json(),
"id": "".join(
[random.choice(string.hexdigits) for n in
range(16)]).lower()
})
# subsample questions to equalize #questions per theory
min_size = min([len(qa) for qa in all_qa.values()])
subsampled_questions = []
for qa_per_sourceq in all_qa.values():
subsampled_questions.extend(random.sample(qa_per_sourceq, min_size))
qa = random.sample(subsampled_questions, num_examples_per_group)
output_data["all_qa"] = [qa for qa_per_sourceq in all_qa.values()
for qa in qa_per_sourceq]
output_data["qa_pairs"] = qa
if model not in per_model_dataset:
per_model_dataset[model] = []
per_model_dataset[model].append(output_data)
return per_model_dataset
if __name__ == '__main__':
args = parse_arguments()
dataset_configs = []
counter = 0
for filename in args.input_json.split(","):
counter += 1
output_dir = ""
if args.output.endswith(".json"):
output_dir = os.path.dirname(args.output)
else:
output_dir = args.output
if filename.endswith(".jsonnet"):
data = json.loads(_jsonnet.evaluate_file(filename))
with open(output_dir + "/source{}.json".format(counter), "w") as output_fp:
json.dump(data, output_fp, indent=2)
dataset_config = DatasetBuildConfig(data)
dataset_configs.append(dataset_config)
else:
copyfile(filename, output_dir + "/source{}.json".format(counter))
with open(filename, "r") as input_fp:
input_json = json.load(input_fp)
dataset_config = DatasetBuildConfig(input_json)
dataset_configs.append(dataset_config)
builder = SubDatasetBuilder(dataset_configs)
per_model_dataset = builder.build_sub_dataset(num_groups=args.num_groups,
num_entities_per_group=args.entity_percent,
num_examples_per_group=args.num_examples_per_group)
for model, data in per_model_dataset.items():
num_examples = len(data)
print("Model: {}".format(model))
print("Number of example groups: {}".format(num_examples))
train_ex = ceil(num_examples * 0.8)
dev_ex = ceil(num_examples * 0.1)
test_ex = num_examples - train_ex - dev_ex
print("Train/Dev/Test: {}/{}/{}".format(train_ex, dev_ex, test_ex))
output_dir = args.output + "/" + model
Path(output_dir).mkdir(parents=True, exist_ok=True)
files = [output_dir + "/train.json", output_dir + "/dev.json", output_dir + "/test.json"]
datasets = [data[:train_ex], data[train_ex:train_ex + dev_ex], data[train_ex + dev_ex:]]
for file, dataset in zip(files, datasets):
with open(file, "w") as output_fp:
json.dump(dataset, output_fp, indent=4)
| CommaQA-main | commaqa/dataset/build_submodel_datasets.py |
import itertools
import re
pred_match = re.compile("(.*)\((.*)\)$")
def get_answer_indices(question_str):
return [int(m.group(1)) for m in re.finditer("#(\d)", question_str)]
def get_question_indices(question_str):
return [int(m.group(1)) for m in re.finditer("\$(\d)", question_str)]
def is_question_var(var_name):
return var_name.startswith("$")
def get_predicate_args(predicate_str):
mat = pred_match.match(predicate_str)
if mat is None:
return None, None
predicate = mat.group(1)
pred_args = mat.group(2).split(", ") if " | " not in mat.group(2) else mat.group(2).split(" | ")
return predicate, pred_args
def flatten_list(input_list):
output_list = []
for item in input_list:
if isinstance(item, list):
output_list.extend(flatten_list(item))
else:
output_list.append(item)
return output_list
def align_assignments(target_predicate, source_predicate, source_assignments):
"""
Returns a (map from target_predicate arg name to the assignment in source_assignments),
(map from target_predicate arg name to the source predicate arg)
"""
target_pred, target_args = get_predicate_args(target_predicate)
source_pred, source_args = get_predicate_args(source_predicate)
if target_pred != source_pred:
raise ValueError("Source predicate: {} does not match target predicate: {}".format(
source_predicate, target_predicate
))
if len(target_args) != len(source_args):
raise ValueError("Number of target arguments: {} don't match source arguments: {}".format(
target_args, source_args
))
target_assignment = {}
target_assignment_map = {}
for target_arg, source_arg in zip(target_args, source_args):
if source_arg == "?":
if target_arg != "?":
raise ValueError("Source ({}) and Target ({}) predicates have mismatch"
" on '?'".format(source_predicate, target_predicate))
continue
if source_arg not in source_assignments:
raise ValueError("No assignment for {} in input assignments: {}".format(
source_arg, source_assignments
))
target_assignment[target_arg] = source_assignments[source_arg]
target_assignment_map[target_arg] = source_arg
return target_assignment, target_assignment_map
def dict_product(dicts):
return (dict(zip(dicts, x)) for x in itertools.product(*dicts.values()))
def nonempty_answer(answer):
if isinstance(answer, list) and len(answer) == 0:
return False
if isinstance(answer, str) and answer == "":
return False
return True
NOANSWER = None
def valid_answer(answer):
return answer is not None
| CommaQA-main | commaqa/dataset/utils.py |
import argparse
import json
import logging
import os
import random
from math import ceil
from random import shuffle
from shutil import copyfile
from typing import List
import _jsonnet
from commaqa.configs.dataset_build_config import DatasetBuildConfig
from commaqa.execution.utils import build_models
logger = logging.getLogger(__name__)
def parse_arguments():
arg_parser = argparse.ArgumentParser(description='Build a CommaQA dataset from inputs')
arg_parser.add_argument('--input_json', type=str, required=True,
help="Input JSON configuration files "
"(comma-separated for multiple files)")
arg_parser.add_argument('--output', "-o", type=str, required=True, help="Output folder")
arg_parser.add_argument('--num_groups', type=int, required=False, default=100,
help="Number of example groups to create")
arg_parser.add_argument('--num_examples_per_group', type=int, required=False, default=10,
help="Number of examples per group")
arg_parser.add_argument('--entity_percent', type=float, required=False, default=1.0,
help="Percentage of entities to sample for each group")
arg_parser.add_argument('--debug', action="store_true", default=False,
help="Enable debug logging")
return arg_parser.parse_args()
class DatasetBuilder:
def __init__(self, configs: List[DatasetBuildConfig]):
self.configs = configs
def build_dataset(self,
num_entities_per_group=5,
num_groups: int = 10, num_examples_per_group: int = 10):
data = []
numqs_per_theory = {}
logger.info("Creating examples with {} questions per group. #Groups: {}. "
"#Entities per group: {}".format(num_examples_per_group, num_groups,
num_entities_per_group))
# Distribution over input configs to sample equal #examples from each config
# Start with equal number (num_groups) and reduce by len(configs) each time
# This will ensure that we get num_groups/len(configs) groups per config as well as
# a peaky distribution that samples examples from rarely used configs
config_distribution = [num_groups for x in self.configs]
group_idx = 0
num_attempts = 0
while group_idx < num_groups:
num_attempts += 1
config_idx = random.choices(range(len(self.configs)), config_distribution)[0]
current_config = self.configs[config_idx]
# sample entities based on the current config
entities = current_config.entities.subsample(num_entities_per_group)
# build a KB based on the entities
complete_kb = {}
complete_kb_fact_map = {}
for pred in current_config.predicates:
complete_kb[pred.pred_name] = pred.populate_kb(entities)
curr_pred_kb_fact_map = pred.generate_kb_fact_map(complete_kb)
for k, v in curr_pred_kb_fact_map.items():
complete_kb_fact_map[k] = v
questions_per_theory = {}
context = " ".join(complete_kb_fact_map.values())
output_data = {
"kb": complete_kb,
"context": context,
"per_fact_context": complete_kb_fact_map,
"pred_lang_config": current_config.pred_lang_config.model_config_as_json()
}
# build questions using KB and language config
model_library = build_models(current_config.pred_lang_config.model_config, complete_kb)
for theory in current_config.theories:
theory_qs = theory.create_questions(entities=entities.entity_type_map,
pred_lang_config=current_config.pred_lang_config,
model_library=model_library)
theory_key = theory.to_str()
if theory_key not in numqs_per_theory:
numqs_per_theory[theory_key] = 0
numqs_per_theory[theory_key] += len(theory_qs)
questions_per_theory[theory_key] = theory_qs
all_questions = [qa for qa_per_theory in questions_per_theory.values()
for qa in qa_per_theory]
if len(all_questions) < num_examples_per_group:
# often happens when a configuration has only one theory, skip print statement
if len(current_config.theories) != 1:
logger.warning("Insufficient examples: {} generated. Sizes:{} KB:\n{}".format(
len(all_questions),
[(tidx, len(final_questions)) for (tidx, final_questions) in
questions_per_theory.items()],
json.dumps(complete_kb, indent=2)
))
logger.debug("Skipping config: {} Total #questions: {}".format(config_idx,
len(all_questions)))
continue
# subsample questions to equalize #questions per theory
min_size = min([len(qa) for qa in questions_per_theory.values()])
subsampled_questions = []
for qa_per_theory in questions_per_theory.values():
subsampled_questions.extend(random.sample(qa_per_theory, min_size))
if len(subsampled_questions) < num_examples_per_group:
logger.warning("Skipping config: {} Sub-sampled questions: {}".format(
config_idx, len(subsampled_questions)))
continue
final_questions = random.sample(subsampled_questions, num_examples_per_group)
output_data["all_qa"] = all_questions
output_data["qa_pairs"] = final_questions
data.append(output_data)
group_idx += 1
# update distribution over configs
config_distribution[config_idx] -= len(self.configs)
if group_idx % 100 == 0:
logger.info("Created {} groups. Attempted: {}".format(group_idx,
num_attempts))
for theory_key, numqs in numqs_per_theory.items():
logger.debug("Theory: <{}> \n NumQs: [{}]".format(theory_key, numqs))
return data
if __name__ == '__main__':
args = parse_arguments()
dataset_configs = []
counter = 0
if args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
for filename in args.input_json.split(","):
counter += 1
output_dir = ""
# if output is a json file
if args.output.endswith(".json"):
output_dir = os.path.dirname(args.output)
else:
output_dir = args.output
if filename.endswith(".jsonnet"):
data = json.loads(_jsonnet.evaluate_file(filename))
# dump the configuration as a the source file
with open(output_dir + "/source{}.json".format(counter), "w") as output_fp:
json.dump(data, output_fp, indent=2)
dataset_config = DatasetBuildConfig(data)
dataset_configs.append(dataset_config)
else:
# dump the configuration as a the source file
copyfile(filename, output_dir + "/source{}.json".format(counter))
with open(filename, "r") as input_fp:
input_json = json.load(input_fp)
dataset_config = DatasetBuildConfig(input_json)
dataset_configs.append(dataset_config)
builder = DatasetBuilder(dataset_configs)
data = builder.build_dataset(num_groups=args.num_groups,
num_entities_per_group=args.entity_percent,
num_examples_per_group=args.num_examples_per_group)
num_examples = len(data)
print("Number of example groups: {}".format(num_examples))
if args.output.endswith(".json"):
print("Single file output name provided (--output file ends with .json)")
print("Dumping examples into a single file instead of train/dev/test splits")
with open(args.output, "w") as output_fp:
json.dump(data, output_fp, indent=4)
else:
shuffle(data)
train_ex = ceil(num_examples * 0.8)
dev_ex = ceil(num_examples * 0.1)
test_ex = num_examples - train_ex - dev_ex
print("Train/Dev/Test: {}/{}/{}".format(train_ex, dev_ex, test_ex))
files = [args.output + "/train.json", args.output + "/dev.json", args.output + "/test.json"]
datasets = [data[:train_ex], data[train_ex:train_ex + dev_ex], data[train_ex + dev_ex:]]
for file, dataset in zip(files, datasets):
with open(file, "w") as output_fp:
json.dump(dataset, output_fp, indent=4)
| CommaQA-main | commaqa/dataset/build_dataset.py |
import torch
from transformers import AutoConfig, AutoTokenizer, AutoModelWithLMHead
from transformers.generation_utils import SampleEncoderDecoderOutput
import logging
logger = logging.getLogger(__name__)
class LMGenerator:
def __init__(self, model_path, device=None,
generation_args={}, encoder_args={}, decoder_args={}):
if device is None:
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
else:
self.device = device
self.config = AutoConfig.from_pretrained(model_path)
self.tokenizer = AutoTokenizer.from_pretrained(model_path)
self.model = AutoModelWithLMHead.from_pretrained(model_path, config=self.config).to(
self.device)
self.generation_args = generation_args
# always generate output with scores
self.generation_args["output_scores"] = True
self.generation_args["return_dict_in_generate"] = True
self.encoder_args = encoder_args
self.decoder_args = decoder_args
def generate_text_sequence(self, input_text):
"""
:param input_text:
:return: returns a sequence of tuples (string, score) where lower score is better
"""
encoded_prompt = self.tokenizer.encode(input_text, **self.encoder_args)
encoded_prompt = encoded_prompt.to(self.device)
generated_dict = self.model.generate(input_ids=encoded_prompt, **self.generation_args)
generated_seqs = generated_dict.sequences
if isinstance(generated_dict, SampleEncoderDecoderOutput):
logger.warning("No scores generated when sampled sequences")
generated_scores = [0] * len(generated_seqs)
else:
generated_scores = generated_dict.sequences_scores.tolist()
if len(generated_seqs.shape) > 2:
generated_seqs.squeeze_()
output_seq_score = []
for generated_sequence_idx, generated_seq in enumerate(generated_seqs):
generated_output = generated_seq.tolist()
text = self.tokenizer.decode(generated_output, **self.decoder_args)
# flip the negative logit so that sequence with lowest scores is best
output_seq_score.append((text, -generated_scores[generated_sequence_idx]))
# Ensure sorted output
return sorted(output_seq_score, key=lambda x: x[1])
| CommaQA-main | commaqa/models/generator.py |
import json
import logging
from commaqa.dataset.utils import flatten_list, get_answer_indices, NOANSWER, \
valid_answer
logger = logging.getLogger(__name__)
class OperationExecuter:
def __init__(self, model_library, ignore_input_mismatch=False):
self.model_library = model_library
self.ignore_input_mismatch = ignore_input_mismatch
self.num_calls = 0
def execute_sub_operations(self, answers, operation):
operation_seq = operation.split("_")
for op in operation_seq[1:]:
if op == "flat":
answers = flatten_list(answers)
elif op == "unique":
if not isinstance(answers, list):
raise ValueError("SUBOP: unique can only be applied to list. "
"Input: {}".format(answers))
seen_objs = set()
output_answers = []
for item in answers:
# handle any structure: convert to str
item_str = json.dumps(item)
if item_str not in seen_objs:
output_answers.append(item)
seen_objs.add(item_str)
answers = output_answers
elif op == "keys":
answers = [x[0] for x in answers]
elif op == "values":
answers = [x[1] for x in answers]
else:
raise ValueError("SUBOP: Unknown sub-operation: {}".format(op))
return answers
def execute_select(self, operation, model, question, assignments):
indices = get_answer_indices(question)
for index in indices:
idx_str = "#" + str(index)
if idx_str not in assignments:
raise ValueError("SELECT: Can not perform select operation with input arg: {}"
" No assignments yet!".format(idx_str))
question = question.replace(idx_str, json.dumps(assignments[idx_str]))
answers, facts_used = self.model_library[model].ask_question(question)
if not valid_answer(answers):
return NOANSWER, []
answers = self.execute_sub_operations(answers, operation)
return answers, facts_used
def execute_project(self, operation, model, question, assignments):
indices = get_answer_indices(question)
if len(indices) > 1:
raise ValueError("PROJECT: Can not handle more than one answer idx: {} "
"for project: {}".format(indices, question))
if len(indices) == 0:
raise ValueError("PROJECT: Did not find any indices to project on " + str(question))
idx_str = "#" + str(indices[0])
if idx_str not in assignments:
raise ValueError("PROJECT: Can not perform project operation with input arg: {}"
" No assignments yet!".format(idx_str))
answers = []
facts_used = []
operation_seq = operation.split("_")
first_op = operation_seq[0]
if not isinstance(assignments[idx_str], list):
raise ValueError("PROJECT: Can not perform project operation on a non-list input: {}"
" Operation: {} Question: {}".format(assignments[idx_str],
operation, question))
for item in assignments[idx_str]:
# print(idx_str, item, assignments)
if isinstance(item, list) and len(item) == 2:
item = tuple(item)
if first_op == "projectValues":
# item should be a tuple
if not isinstance(item, tuple):
raise ValueError("PROJECT: Item: {} is not a tuple in assignments: {}. "
"Expected for projectValues".format(item,
assignments[idx_str]))
new_question = question.replace(idx_str, json.dumps(item[1]))
elif first_op == "projectKeys":
# item should be a tuple
if not isinstance(item, tuple):
raise ValueError("PROJECT: Item: {} is not a tuple in assignments: {}. "
"Expected for projectKeys".format(item,
assignments[idx_str]))
new_question = question.replace(idx_str, json.dumps(item[0]))
else:
if not isinstance(item, str):
raise ValueError("PROJECT: Item: {} is not a string in assigments: {}. "
"Expected for project".format(item, assignments[idx_str]))
new_question = question.replace(idx_str, item)
curr_answers, curr_facts = self.model_library[model].ask_question(new_question)
if not valid_answer(curr_answers):
return NOANSWER, []
facts_used.extend(curr_facts)
if first_op == "projectValues":
answers.append((item[0], curr_answers))
elif first_op == "projectKeys":
answers.append((curr_answers, item[1]))
elif first_op == "project":
answers.append((item, curr_answers))
answers = self.execute_sub_operations(answers, operation)
return answers, facts_used
def execute_filter(self, operation, model, question, assignments):
q_answer_indices = get_answer_indices(question)
if len(q_answer_indices) > 1:
# more than one answer index in the question, use the operation to identify the
# answer idx to operate over
op_answer_indices = get_answer_indices(operation)
if len(op_answer_indices) != 1:
raise ValueError("Need one answer idx to be specified in filter operation since "
"multiple specified in the question! "
"Operation: {} Question: {}".format(operation, question))
else:
operation_idx = op_answer_indices[0]
for idx in q_answer_indices:
if idx != operation_idx:
# modify question directly to include the non-operated answer id
idx_str = "#" + str(idx)
if idx_str not in assignments:
raise ValueError(
"FILTER: Can not perform filter operation with input arg: {} "
"No assignments yet!".format(idx_str))
# print(question, idx_str, assignments)
question = question.replace(idx_str, json.dumps(assignments[idx_str]))
elif len(q_answer_indices) == 1:
operation_idx = q_answer_indices[0]
else:
raise ValueError("FILTER: No answer index in question for filter"
"Operation: {} Question: {}".format(operation, question))
idx_str = "#" + str(operation_idx)
if idx_str not in assignments:
raise ValueError("FILTER: Can not perform filter operation with input arg: {}"
" No assignments yet!".format(idx_str))
if not isinstance(assignments[idx_str], list):
raise ValueError("FILTER: Can not perform filter operation on a non-list input: {}"
" Operation: {} Question: {}".format(assignments[idx_str],
operation, question))
answers = []
facts_used = []
operation_seq = operation.split("_")
first_op = operation_seq[0]
for item in assignments[idx_str]:
if isinstance(item, list) and len(item) == 2:
item = tuple(item)
if first_op.startswith("filterValues"):
# item should be a tuple
if not isinstance(item, tuple):
raise ValueError("FILTER: Item: {} is not a tuple in assignments: {}. "
"Expected for filterValues".format(item, assignments[idx_str]))
(key, value) = item
new_question = question.replace(idx_str, json.dumps(value))
elif first_op.startswith("filterKeys"):
if not isinstance(item, tuple):
raise ValueError("FILTER: Item: {} is not a tuple in assignments: {}. "
"Expected for filterKeys".format(item,
assignments[idx_str]))
(key, value) = item
new_question = question.replace(idx_str, json.dumps(key))
else:
if not isinstance(item, str):
raise ValueError("FILTER: Item: {} is not a string in assigments: {}. "
"Expected for filter".format(item, assignments[idx_str]))
new_question = question.replace(idx_str, item)
answer, curr_facts = self.model_library[model].ask_question(new_question)
if not valid_answer(answer):
return NOANSWER, []
if not isinstance(answer, str):
raise ValueError("FILTER: Incorrect question type for filter. Returned answer: {}"
" for question: {}".format(answer, new_question))
answer = answer.lower()
if answer == "yes" or answer == "1" or answer == "true":
answers.append(item)
facts_used.extend(curr_facts)
answers = self.execute_sub_operations(answers, operation)
return answers, facts_used
def execute_operation(self, operation, model, question, assignments):
self.num_calls += 1
if model not in self.model_library:
error_mesg = "Model: {} not found in " \
"model_library: {}".format(model, self.model_library.keys())
if not self.ignore_input_mismatch:
raise ValueError(error_mesg)
else:
logger.debug(error_mesg)
return NOANSWER, []
try:
if operation.startswith("select"):
return self.execute_select(operation, model, question, assignments)
elif operation.startswith("project"):
return self.execute_project(operation, model, question, assignments)
elif operation.startswith("filter"):
return self.execute_filter(operation, model, question, assignments)
else:
logger.debug(
"Can not execute operation: {}. Returning empty list".format(operation))
return NOANSWER, []
except ValueError as e:
if self.ignore_input_mismatch:
logger.debug("Can not execute operation: {} question: {} "
"with assignments: {}".format(operation, question, assignments))
logger.debug(str(e))
return NOANSWER, []
else:
raise e
| CommaQA-main | commaqa/execution/operation_executer.py |
import json
import logging
import re
from json import JSONDecodeError
from commaqa.execution.model_executer import ModelExecutor
logger = logging.getLogger(__name__)
class MathModel(ModelExecutor):
def __init__(self, **kwargs):
self.func_regex = {
"is_greater\((.+) \| (.+)\)": self.greater_than,
"is_smaller\((.+) \| (.+)\)": self.smaller_than,
"diff\((.+) \| (.+)\)": self.diff,
"belongs_to\((.+) \| (.+)\)": self.belongs_to,
"max\((.+)\)": self.max,
"min\((.+)\)": self.min,
"count\((.+)\)": self.count
}
super(MathModel, self).__init__(**kwargs)
@staticmethod
def get_number(num):
# can only extract numbers from strings
if isinstance(num, int) or isinstance(num, float):
return num
if not isinstance(num, str):
return None
try:
item = json.loads(num)
except JSONDecodeError:
logger.debug("Could not JSON parse: " + num)
return None
if isinstance(item, list):
if (len(item)) != 1:
logger.debug("List of values instead of single number in {}".format(num))
return None
item = item[0]
if isinstance(item, list):
logger.debug("Could not parse float from list within the list: {}".format(item))
return None
try:
return float(item)
except ValueError:
logger.debug("Could not parse float from: " + item)
return None
def max(self, groups):
if len(groups) != 1:
raise ValueError("Incorrect regex for max. "
"Did not find 1 group: {}".format(groups))
try:
entity = json.loads(groups[0])
if isinstance(entity, list):
numbers = []
for x in entity:
num = MathModel.get_number(x)
if num is None:
if self.ignore_input_mismatch:
logger.debug("Cannot parse as number: {}".format(x))
return None, []
else:
raise ValueError("Cannot parse as number: {} in {}".format(x, entity))
numbers.append(num)
else:
logger.debug("max can only handle list of entities. Arg: " + str(entity))
return None, []
except JSONDecodeError:
logger.error("Could not parse: {}".format(groups[0]))
raise
return max(numbers), []
def min(self, groups):
if len(groups) != 1:
raise ValueError("Incorrect regex for min. "
"Did not find 1 group: {}".format(groups))
try:
entity = json.loads(groups[0])
if isinstance(entity, list):
numbers = []
for x in entity:
num = MathModel.get_number(x)
if num is None:
if self.ignore_input_mismatch:
logger.debug("Cannot parse as number: {}".format(x))
return None, []
else:
raise ValueError("Cannot parse as number: {} in {}".format(x, entity))
numbers.append(num)
else:
logger.debug("min can only handle list of entities. Arg: " + str(entity))
return None, []
except JSONDecodeError:
logger.debug("Could not parse: {}".format(groups[0]))
if self.ignore_input_mismatch:
return None, []
else:
raise
return min(numbers), []
def count(self, groups):
if len(groups) != 1:
raise ValueError("Incorrect regex for max. "
"Did not find 1 group: {}".format(groups))
try:
entity = json.loads(groups[0])
if isinstance(entity, list):
return len(entity), []
else:
logger.debug("count can only handle list of entities. Arg: " + str(entity))
return None, []
except JSONDecodeError:
logger.debug("Could not parse: {}".format(groups[0]))
if self.ignore_input_mismatch:
return None, []
else:
raise
def belongs_to(self, groups):
if len(groups) != 2:
raise ValueError("Incorrect regex for belongs_to. "
"Did not find 2 groups: {}".format(groups))
try:
entity = json.loads(groups[0])
if isinstance(entity, list):
if len(entity) > 1:
logger.debug(
"belongs_to can only handle single entity as 1st arg. Args:" + str(groups))
return None, []
else:
entity = entity[0]
except JSONDecodeError:
entity = groups[0]
try:
ent_list = json.loads(groups[1])
except JSONDecodeError:
logger.debug("Could not JSON parse: " + groups[1])
raise
if not isinstance(ent_list, list):
logger.debug("belongs_to can only handle lists as 2nd arg. Args:" + str(groups))
return None, []
if entity in ent_list:
return "yes", []
else:
return "no", []
def diff(self, groups):
if len(groups) != 2:
raise ValueError("Incorrect regex for diff. "
"Did not find 2 groups: {}".format(groups))
num1 = MathModel.get_number(groups[0])
num2 = MathModel.get_number(groups[1])
if num1 is None or num2 is None:
if self.ignore_input_mismatch:
# can not compare with Nones
return None, []
else:
raise ValueError("Cannot answer diff with {}".format(groups))
if num2 > num1:
return round(num2 - num1, 3), []
else:
return round(num1 - num2, 3), []
def greater_than(self, groups):
if len(groups) != 2:
raise ValueError("Incorrect regex for greater_than. "
"Did not find 2 groups: {}".format(groups))
num1 = MathModel.get_number(groups[0])
num2 = MathModel.get_number(groups[1])
if num1 is None or num2 is None:
if self.ignore_input_mismatch:
# can not compare with Nones
return None, []
else:
raise ValueError("Cannot answer gt with {}".format(groups))
if num1 > num2:
return "yes", []
else:
return "no", []
def smaller_than(self, groups):
if len(groups) != 2:
raise ValueError("Incorrect regex for smaller_than. "
"Did not find 2 groups: {}".format(groups))
num1 = MathModel.get_number(groups[0])
num2 = MathModel.get_number(groups[1])
if num1 is None or num2 is None:
if self.ignore_input_mismatch:
# can not compare with Nones
return None, []
else:
raise ValueError("Cannot answer lt with {}".format(groups))
if num1 < num2:
return "yes", []
else:
return "no", []
def ask_question_predicate(self, question_predicate):
for regex, func in self.func_regex.items():
m = re.match(regex, question_predicate)
if m:
return func(m.groups())
raise ValueError("Could not parse: {}".format(question_predicate))
| CommaQA-main | commaqa/execution/math_model.py |
MATH_MODEL = "math_special"
KBLOOKUP_MODEL = "kblookup"
| CommaQA-main | commaqa/execution/constants.py |
CommaQA-main | commaqa/execution/__init__.py |
|
import logging
import re
from commaqa.configs.utils import execute_steps
from commaqa.dataset.utils import get_predicate_args, align_assignments, get_question_indices, \
valid_answer, NOANSWER
from commaqa.execution.constants import KBLOOKUP_MODEL
from commaqa.execution.operation_executer import OperationExecuter
logger = logging.getLogger(__name__)
class ModelExecutor:
def __init__(self, predicate_language, model_name, kblookup, ignore_input_mismatch=False):
self.predicate_language = predicate_language
self.model_name = model_name
self.kblookup = kblookup
self.ignore_input_mismatch = ignore_input_mismatch
self.num_calls = 0
def find_qpred_assignments(self, input_question, question_definition):
question_re = re.escape(question_definition)
varid_groupid = {}
qindices = get_question_indices(question_definition)
for num in qindices:
question_re = question_re.replace("\\$" + str(num),
"(?P<G" + str(num) + ">.+)")
varid_groupid["$" + str(num)] = "G" + str(num)
qmatch = re.match(question_re, input_question)
if qmatch:
assignments = {}
for varid, groupid in varid_groupid.items():
assignments[varid] = qmatch.group(groupid)
return assignments
return None
def ask_question(self, input_question):
self.num_calls += 1
qpred, qargs = get_predicate_args(input_question)
if qpred is not None:
return self.ask_question_predicate(question_predicate=input_question)
else:
answers, facts_used = None, None
for pred_lang in self.predicate_language:
for question in pred_lang.questions:
assignments = self.find_qpred_assignments(input_question=input_question,
question_definition=question)
if assignments is not None:
new_pred = pred_lang.predicate
for varid, assignment in assignments.items():
new_pred = new_pred.replace(varid, assignment)
answers, facts_used = self.ask_question_predicate(new_pred)
if valid_answer(answers):
# if this is valid answer, return it
return answers, facts_used
# if answers is not None:
# # some match found for the question but no valid answer.
# # Return the last matching answer.
# return answers, facts_used
if not self.ignore_input_mismatch:
raise ValueError("No matching question found for {} "
"in pred_lang:\n{}".format(input_question,
self.predicate_language))
else:
# no matching question. return NOANSWER
return NOANSWER, []
def ask_question_predicate(self, question_predicate):
qpred, qargs = get_predicate_args(question_predicate)
for pred_lang in self.predicate_language:
mpred, margs = get_predicate_args(pred_lang.predicate)
if mpred != qpred:
continue
if pred_lang.steps:
model_library = {KBLOOKUP_MODEL: self.kblookup}
kb_executor = OperationExecuter(model_library)
source_assignments = {x: x for x in qargs}
curr_assignment, assignment_map = align_assignments(
target_predicate=pred_lang.predicate,
source_predicate=question_predicate,
source_assignments=source_assignments
)
assignments = execute_steps(steps=pred_lang.steps,
input_assignments=curr_assignment,
executer=kb_executor,
pred_lang_config=None,
input_model=KBLOOKUP_MODEL)
if assignments:
last_answer = pred_lang.steps[-1].answer
return assignments[last_answer], assignments["facts_used"]
elif assignments is None:
# execution failed, try next predicate
continue
else:
logger.debug("No answer found for question: {}".format(question_predicate))
return [], []
else:
return self.kblookup.ask_question_predicate(question_predicate)
# No match found for predicate
error = "No matching predicate for {}".format(question_predicate)
if self.ignore_input_mismatch:
logger.debug(error)
return NOANSWER, []
else:
raise ValueError(error)
| CommaQA-main | commaqa/execution/model_executer.py |
import logging
from commaqa.execution.constants import MATH_MODEL
from commaqa.execution.kblookup import KBLookup
from commaqa.execution.math_model import MathModel
from commaqa.execution.model_executer import ModelExecutor
logger = logging.getLogger(__name__)
def build_models(pred_lang_config, complete_kb, ignore_input_mismatch=False):
model_library = {}
kblookup = KBLookup(kb=complete_kb)
for model_name, configs in pred_lang_config.items():
if model_name == MATH_MODEL:
model = MathModel(predicate_language=configs,
model_name=model_name,
kblookup=kblookup,
ignore_input_mismatch=ignore_input_mismatch)
else:
model = ModelExecutor(predicate_language=configs,
model_name=model_name,
kblookup=kblookup,
ignore_input_mismatch=ignore_input_mismatch)
model_library[model_name] = model
return model_library
| CommaQA-main | commaqa/execution/utils.py |
import logging
from commaqa.dataset.utils import get_predicate_args
logger = logging.getLogger(__name__)
class KBLookup:
def __init__(self, kb):
self.kb = kb
def ask_question(self, question_predicate):
return self.ask_question_predicate(question_predicate)
def ask_question_predicate(self, question_predicate):
predicate, pred_args = get_predicate_args(question_predicate)
answers = []
facts_used = []
for fact in self.kb[predicate]:
fact_pred, fact_args = get_predicate_args(fact)
if len(pred_args) != len(fact_args):
raise ValueError(
"Mismatch in specification args {} and fact args {}".format(
pred_args, fact_args
))
mismatch = False
answer = ""
for p, f in zip(pred_args, fact_args):
# KB fact arg doesn't match the predicate arg
if p != "?" and p != f and p != "_":
mismatch = True
# predicate arg is a query, populate answer with fact arg
elif p == "?":
answer = f
# if all args matched, add answer
if not mismatch:
answers.append(answer)
facts_used.append(fact)
if len(answers) == 0:
logger.debug("No matching facts for {}. Facts:\n{}".format(question_predicate,
self.kb[predicate]))
# If its a boolean query, use number of answers
if "?" not in pred_args:
if len(answers) == 0:
return "no", facts_used
else:
return "yes", facts_used
else:
return answers, facts_used
| CommaQA-main | commaqa/execution/kblookup.py |
import random
from copy import deepcopy
from typing import List, Dict
from commaqa.configs.entities_config import EntitiesConfig
from commaqa.dataset.utils import get_predicate_args
class PredicateConfig:
def __init__(self, pred_json):
self.pred_name = pred_json[0]
self.args = pred_json[1]["args"]
self.nary = pred_json[1].get("nary")
self.graph_type = pred_json[1].get("type")
self.language = pred_json[1].get("language")
def populate_chains(self, entity_config: EntitiesConfig) -> List[str]:
if len(self.args) != 2 or self.args[0] != self.args[1]:
raise ValueError("Chains KB can only be created with binary predicates having the same "
"arg types. Change args for {}".format(self.pred_name))
kb = []
entity_list = deepcopy(entity_config.entity_type_map[self.args[0]])
last_entity = None
while len(entity_list) > 0:
if last_entity is None:
last_entity = random.choice(entity_list)
entity_list.remove(last_entity)
next_entity = random.choice(entity_list)
entity_arr = [last_entity, next_entity]
fact = self.pred_name + "(" + ", ".join(entity_arr) + ")"
kb.append(fact)
last_entity = next_entity
entity_list.remove(last_entity)
return kb
def populate_trees(self, entity_config: EntitiesConfig) -> List[str]:
if len(self.args) != 2 or self.args[0] != self.args[1]:
raise ValueError("Trees KB can only be created with binary predicates having the same "
"arg types. Change args for {}".format(self.pred_name))
if len(self.nary) is None or "1" not in self.nary:
raise ValueError("Nary needs to be set with at least one index set to 1 to produce"
"a tree structure kb. Pred: {}".format(self.pred_name))
kb = []
entity_list = deepcopy(entity_config.entity_type_map[self.args[0]])
# create the root node
open_entities = random.sample(entity_list, 1)
entity_list.remove(open_entities[0])
unique_idx = self.nary.index("1")
while len(entity_list) > 0:
new_open_entities = []
# for each open node
for open_entity in open_entities:
# select children
if len(entity_list) > 2:
children = random.sample(entity_list, 2)
else:
children = entity_list
# add edge between child and open node
for child in children:
if unique_idx == 1:
entity_arr = [open_entity, child]
else:
entity_arr = [child, open_entity]
# remove child from valid nodes to add
entity_list.remove(child)
# add it to the next set of open nodes
new_open_entities.append(child)
fact = self.pred_name + "(" + ", ".join(entity_arr) + ")"
kb.append(fact)
open_entities = new_open_entities
return kb
def populate_kb(self, entity_config: EntitiesConfig) -> List[str]:
if self.graph_type == "chain":
return self.populate_chains(entity_config)
elif self.graph_type == "tree":
return self.populate_tree(entity_config)
elif self.graph_type is not None:
raise ValueError("Unknown graph type: {}".format(self.graph_type))
if self.nary is None:
raise ValueError("At least one of nary or type needs to be set for predicate"
" {}".format(self.pred_name))
return self.populate_relations(entity_config)
def populate_relations(self, entity_config: EntitiesConfig) -> List[str]:
kb = set()
arg_counts = []
arg_pos_list = []
for arg in self.args:
if arg not in entity_config.entity_type_map:
raise ValueError("No entity list defined for {}."
"Needed for predicate: {}".format(arg, self.pred_name))
arg_counts.append(len(entity_config.entity_type_map[arg]))
arg_pos_list.append(deepcopy(entity_config.entity_type_map[arg]))
max_attempts = 2 * max(arg_counts)
orig_arg_pos_list = deepcopy(arg_pos_list)
while max_attempts > 0:
entity_arr = []
max_attempts -= 1
for idx in range(len(self.args)):
ent = random.choice(arg_pos_list[idx])
# assume relations can never be reflexive
if ent in entity_arr:
entity_arr = None
break
entity_arr.append(ent)
if entity_arr is None:
continue
for idx, ent in enumerate(entity_arr):
if self.nary[idx] == "1":
arg_pos_list[idx].remove(ent)
if len(arg_pos_list[idx]) == 0:
max_attempts = 0
elif self.nary[idx] == "n":
arg_pos_list[idx].remove(ent)
# once all entities have been used once, reset to the original list
if len(arg_pos_list[idx]) == 0:
arg_pos_list[idx] = deepcopy(orig_arg_pos_list[idx])
fact = self.pred_name + "(" + ", ".join(entity_arr) + ")"
if fact not in kb:
kb.add(fact)
return list(kb)
def generate_kb_fact_map(self, kb: Dict[str, List[str]]) -> Dict[str, str]:
kb_fact_map = {}
for kb_item in kb[self.pred_name]:
if self.language:
pred, args = get_predicate_args(kb_item)
sentence = self.language if isinstance(self.language, str) \
else random.choice(self.language)
for argidx, arg in enumerate(args):
sentence = sentence.replace("$" + str(argidx + 1), arg)
else:
pred_name, fields = get_predicate_args(kb_item)
if len(fields) != 2:
sentence = kb_item
else:
sentence = fields[0] + " " + pred_name + " " + " ".join(fields[1:])
kb_fact_map[kb_item] = sentence + "."
return kb_fact_map
def generate_context(self, kb: Dict[str, List[str]]) -> str:
kb_fact_map = self.generate_kb_fact_map(kb)
return " ".join(kb_fact_map.values())
| CommaQA-main | commaqa/configs/predicate_config.py |
from commaqa.configs.entities_config import EntitiesConfig
from commaqa.configs.predicate_config import PredicateConfig
from commaqa.configs.predicate_language_config import PredicateLanguageConfig
from commaqa.configs.theory_config import TheoryConfig
class DatasetBuildConfig:
def __init__(self, input_json):
self.version = input_json["version"]
self.entities = EntitiesConfig(input_json["entities"])
self.predicates = [PredicateConfig(x) for x in input_json["predicates"].items()]
self.theories = [TheoryConfig(x) for x in input_json["theories"]]
self.pred_lang_config = PredicateLanguageConfig(input_json["predicate_language"])
| CommaQA-main | commaqa/configs/dataset_build_config.py |
import random
from math import ceil
from typing import Dict, Any, List
class EntitiesConfig:
def __init__(self, entities_json: Dict[str, List[str]]):
self.entity_type_map = entities_json
def subsample(self, num_ents):
new_ent_map = {}
for etype, elist in self.entity_type_map.items():
# if fraction passed, sample ratio
if num_ents <= 1:
new_ent_map[etype] = random.sample(elist, ceil(len(elist) * num_ents))
else:
new_ent_map[etype] = random.sample(elist, num_ents)
return EntitiesConfig(new_ent_map)
def __getitem__(self, item: str):
return self.entity_type_map[item]
| CommaQA-main | commaqa/configs/entities_config.py |
import json
import logging
import random
import string
from typing import Dict, List
from commaqa.configs.step_config import StepConfig
from commaqa.configs.utils import execute_steps
from commaqa.dataset.utils import dict_product, align_assignments, nonempty_answer, is_question_var
from commaqa.execution.model_executer import ModelExecutor
from commaqa.execution.operation_executer import OperationExecuter
logger = logging.getLogger(__name__)
class TheoryConfig:
def __init__(self, theory_json):
self.steps = [StepConfig(x) for x in theory_json["steps"]]
self.questions = theory_json.get("questions")
self.init = theory_json["init"]
def to_json(self):
return {
"steps": [x.to_json() for x in self.steps],
"questions": self.questions,
"init": self.init
}
def to_str(self):
return json.dumps(self.to_json())
def get_possible_assignments(self, entities: Dict[str, List[str]],
model_library: Dict[str, ModelExecutor],
pred_lang_config):
assignment_dict = {}
for key, ent_type in self.init.items():
assignment_dict[key] = entities[ent_type]
possible_assignments = dict_product(assignment_dict)
# assume no duplicates in assignments
possible_assignments = [assignment for assignment in possible_assignments
if len(set(assignment.values())) == len(assignment.values())]
op_executor = OperationExecuter(model_library=model_library)
output_assignments = []
for curr_assignment in possible_assignments:
# print(self.to_json())
new_assignment = execute_steps(steps=self.steps, input_assignments=curr_assignment,
executer=op_executor, pred_lang_config=pred_lang_config,
input_model=None)
if new_assignment:
output_assignments.append(new_assignment)
if len(output_assignments) < 2:
logger.debug("Few assignments: {} found for theory: {} given kb:\n {}".format(
json.dumps(output_assignments, indent=2), self.to_str(),
json.dumps(list(model_library.values())[0].kblookup.kb, indent=2)))
return output_assignments
def create_decompositions(self, pred_lang_config, assignment):
decomposition = []
for step in self.steps:
valid_configs = pred_lang_config.find_valid_configs(step.question)
if len(valid_configs) == 0:
raise ValueError("No predicate config matches {}".format(step.question))
# # model less operation
# model = "N/A"
# print(step.question)
# question = step.question
# for k, v in assignment.items():
# if k.startswith("$"):
# question = question.replace(k, v)
# else:
lang_conf = random.choice(valid_configs)
model = lang_conf.model
question = random.choice(lang_conf.questions)
_, assignment_map = align_assignments(lang_conf.predicate, step.question,
assignment)
for lang_pred_arg, question_pred_arg in assignment_map.items():
if is_question_var(question_pred_arg):
question = question.replace(lang_pred_arg, assignment[question_pred_arg])
else:
# replace the question idx with the appropriate answer idx in the theory
question = question.replace(lang_pred_arg, question_pred_arg)
answer = assignment[step.answer]
decomposition.append({
"m": model,
"q": question,
"a": answer,
"op": step.operation
})
return decomposition
def create_questions(self, entities: Dict[str, List[str]], pred_lang_config, model_library):
possible_assignments = self.get_possible_assignments(entities=entities,
pred_lang_config=pred_lang_config,
model_library=model_library)
qa = []
for assignment in possible_assignments:
decomposition = self.create_decompositions(pred_lang_config=pred_lang_config,
assignment=assignment)
# move facts_used out of the assignment structure
facts_used = list(set(assignment["facts_used"]))
del assignment["facts_used"]
question = random.choice(self.questions)
answer = assignment[self.steps[-1].answer]
for p, f in assignment.items():
if p in question:
question = question.replace(p, f)
if decomposition[-1]["a"] != answer:
raise ValueError("Answer to the last question in decomposition not the same as the "
"final answer!.\n Decomposition:{} \n Question: {} \n Answer: {}"
"".format(decomposition, question, answer))
# ignore questions with no valid answers
if not nonempty_answer(answer):
continue
# ignore questions with too many answers
if isinstance(answer, list) and len(answer) > 5:
continue
qa.append({
"question": question,
"answer": answer,
"assignment": assignment,
"config": self.to_json(),
"decomposition": decomposition,
"facts_used": facts_used,
"id": "".join([random.choice(string.hexdigits) for n in range(16)]).lower()
})
return qa
| CommaQA-main | commaqa/configs/theory_config.py |
CommaQA-main | commaqa/configs/__init__.py |
|
import logging
from copy import deepcopy
from typing import List, Dict
from commaqa.configs.predicate_language_config import PredicateLanguageConfig
from commaqa.configs.step_config import StepConfig
from commaqa.dataset.utils import is_question_var, nonempty_answer
from commaqa.execution.operation_executer import OperationExecuter
logger = logging.getLogger(__name__)
def execute_steps(steps: List[StepConfig], input_assignments: Dict[str, str],
executer: OperationExecuter, pred_lang_config: PredicateLanguageConfig = None,
input_model: str = None):
curr_assignment = deepcopy(input_assignments)
if "facts_used" not in curr_assignment:
curr_assignment["facts_used"] = []
for step in steps:
if input_model is None:
model = pred_lang_config.find_model(step.question)
if model is None:
raise ValueError("No model found for {}".format(step.question))
else:
model = input_model
new_question = step.question
for k, v in curr_assignment.items():
# only replace question variables($1, $2). Answer variables (#1, #2) used by executer
if is_question_var(k):
new_question = new_question.replace(k, v)
answers, curr_facts = executer.execute_operation(operation=step.operation,
model=model,
question=new_question,
assignments=curr_assignment)
if answers is None:
# execution failed
return None
elif nonempty_answer(answers):
curr_assignment[step.answer] = answers
curr_assignment["facts_used"].extend(curr_facts)
else:
logger.debug("Stopped Execution. Empty answer: {}\n"
"Question: {}\n Step: {}\n Assignment: {}".format(
answers, new_question, step.to_json(), curr_assignment))
return {}
return curr_assignment
| CommaQA-main | commaqa/configs/utils.py |
class StepConfig:
def __init__(self, step_json):
self.operation = step_json["operation"]
self.question = step_json["question"]
self.answer = step_json["answer"]
def to_json(self):
return self.__dict__
| CommaQA-main | commaqa/configs/step_config.py |
from commaqa.configs.step_config import StepConfig
from commaqa.dataset.utils import get_predicate_args
class ModelQuestionConfig:
def __init__(self, config_json):
self.steps = [StepConfig(x) for x in
config_json["steps"]] if "steps" in config_json else []
self.questions = config_json.get("questions")
self.init = config_json["init"]
self.model = config_json["model"]
self.predicate = config_json["predicate"]
def to_json(self):
return {
"steps": [x.to_json() for x in self.steps],
"questions": self.questions,
"init": self.init,
"model": self.model,
"predicate": self.predicate
}
class PredicateLanguageConfig:
def __init__(self, pred_lang_config):
# import json
# print(json.dumps(pred_lang_config, indent=2))
self.predicate_config = {}
self.model_config = {}
for predicate, config in pred_lang_config.items():
config["predicate"] = predicate
question_config = ModelQuestionConfig(config)
self.predicate_config[predicate] = question_config
model = config["model"]
if model not in self.model_config:
self.model_config[model] = []
self.model_config[model].append(question_config)
def model_config_as_json(self):
return {model: [config.to_json() for config in configs]
for model, configs in self.model_config.items()}
def find_model(self, question_predicate):
matching_configs = self.find_valid_configs(question_predicate)
if len(matching_configs) == 0:
return None
matching_models = {x.model for x in matching_configs}
if len(matching_models) != 1:
raise ValueError("Unexpected number of matching models: {} for {}. "
"Expected one model".format(matching_models, question_predicate))
return matching_models.pop()
def find_valid_configs(self, question_predicate):
qpred, qargs = get_predicate_args(question_predicate)
matching_configs = []
for key, config in self.predicate_config.items():
config_qpred, config_qargs = get_predicate_args(key)
if config_qpred == qpred:
assert len(qargs) == len(config_qargs), \
"{} {}\n{}".format(qargs, config_qargs, question_predicate)
mismatch = False
for qarg, cqarg in zip(qargs, config_qargs):
if (cqarg == "?") ^ (qarg == "?"):
mismatch = True
if not mismatch:
matching_configs.append(config)
return matching_configs
| CommaQA-main | commaqa/configs/predicate_language_config.py |
from typing import Dict
from commaqa.inference.dataset_readers import HotpotQAReader, DatasetReader, DropReader
from commaqa.inference.participant_execution import ExecutionParticipant
from commaqa.inference.participant_qgen import LMGenParticipant, RandomGenParticipant
from commaqa.inference.participant_util import DumpChainsParticipant
MODEL_NAME_CLASS = {
"lmgen": LMGenParticipant,
"randgen": RandomGenParticipant,
"dump_chains": DumpChainsParticipant,
"operation_executer": ExecutionParticipant
}
READER_NAME_CLASS: Dict[str, DatasetReader] = {
"hotpot": HotpotQAReader,
"drop": DropReader
}
| CommaQA-main | commaqa/inference/constants.py |
CommaQA-main | commaqa/inference/__init__.py |
|
import logging
import math
import random
import re
from itertools import product, permutations
from commaqa.inference.model_search import ParticipantModel
from commaqa.inference.utils import get_sequence_representation, stem_filter_tokenization, BLANK, \
stop_words_set
from commaqa.models.generator import LMGenerator
logger = logging.getLogger(__name__)
class LMGenParticipant(ParticipantModel):
def __init__(self, scale_by_step=1, add_eos=False, add_prefix="", next_model="execute",
end_state="[EOQ]", **kwargs):
self.scale_by_step = scale_by_step
self.add_eos = add_eos
self.add_prefix = add_prefix
self.next_model = next_model
self.end_state = end_state
self.num_calls = 0
self.lmgen = LMGenerator(**kwargs)
def return_model_calls(self):
return {"lmgen": self.num_calls}
def query(self, state, debug=False):
"""The main function that interfaces with the overall search and
model controller, and manipulates the incoming data.
:param data: should have a dictionary as input containing
mutable data
:type data: dict
:param state: the state of controller and model flow.
:type state: launchpadqa.question_search.model_search.SearchState
:rtype: list
:raises: ValueError
"""
## first checks state of `json_input` to figure out how to format things
## the first question
data = state.data
question_seq = data["question_seq"]
answer_seq = data["answer_seq"]
gen_seq = get_sequence_representation(origq=data["query"], question_seq=question_seq,
answer_seq=answer_seq)
if self.add_prefix:
gen_seq = self.add_prefix + gen_seq
if self.add_eos:
gen_seq = gen_seq + "</s>"
if debug: print("<GEN>: %s" % gen_seq)
## eventual output
new_states = []
## go through generated questions
output_seq_scores = self.lmgen.generate_text_sequence(gen_seq)
self.num_calls += 1
observed_outputs = set()
for (output_seq, score) in output_seq_scores:
output = output_seq.strip()
# catch potentially spurious duplicates
if output in observed_outputs:
continue
else:
observed_outputs.add(output)
# copy state
new_state = state.copy()
## add new question to question_seq
new_state.data["question_seq"].append(output)
if output == self.end_state:
new_state.next = self.end_state
else:
new_state.next = self.next_model
# lower is better, same as the scores returned by generate_text_sequence
assert score >= 0, "Score from generation assumed to be +ve. Got: {}! Needs to be " \
"+ve to ensure monotonically increasing scores as expected by the" \
" search.".format(score)
new_state._score += score
new_state.data["score_seq"].append(score)
new_state.data["command_seq"].append("gen")
## mark the last output
new_state.last_output = output
new_states.append(new_state)
##
return new_states
class RandomGenParticipant(ParticipantModel):
def __init__(self, operations_file, model_questions_file, sample_operations, sample_questions,
max_steps=6, next_model="execute", topk_questions=True, end_state="[EOQ]"):
self.operations = self.load_operations(operations_file)
self.model_questions = self.load_model_questions(model_questions_file)
self.sample_operations = sample_operations
self.sample_questions = sample_questions
self.end_state = end_state
self.next_model = next_model
self.max_steps = max_steps
self.num_calls = 0
self.topk_questions = topk_questions
def return_model_calls(self):
return {"randomgen": self.num_calls}
def load_operations(self, operations_file):
with open(operations_file, "r") as input_fp:
ops = [x.strip() for x in input_fp.readlines()]
return ops
def load_model_questions(self, model_questions_file):
model_question_list = {}
with open(model_questions_file, "r") as input_fp:
for line in input_fp:
fields = line.strip().split("\t")
model = fields[0]
if model not in model_question_list:
model_question_list[model] = []
for question in fields[1:]:
question_entities = self.find_question_entities(question)
for q_ent in question_entities:
question = question.replace(q_ent, BLANK)
model_question_list[model].append(question)
# get unique questions
output_model_questions = []
for model_key, question_list in model_question_list.items():
unique_questions = list(set(question_list))
for q in unique_questions:
output_model_questions.append((model_key, q))
print(model_key, q)
logger.info("{} Questions in {} language".format(len(unique_questions),
model_key))
return output_model_questions
def select(self, population, sample_size_or_prop, samplek=True):
if sample_size_or_prop >= 1:
if samplek:
return random.sample(population, k=sample_size_or_prop)
else:
return population[:sample_size_or_prop]
else:
if samplek:
return random.sample(population, k=math.ceil(sample_size_or_prop * len(population)))
else:
return population[:math.ceil(sample_size_or_prop * len(population))]
def build_end_state(self, state):
new_state = state.copy()
output = self.end_state
new_state.data["question_seq"].append(output)
new_state.next = self.end_state
new_state.data["score_seq"].append(0)
new_state.data["command_seq"].append("gen")
## mark the last output
new_state.last_output = output
return new_state
def score_question(self, sub_question, complex_question):
sub_question_tokens = set(stem_filter_tokenization(sub_question))
if len(sub_question_tokens) == 0:
logger.debug("No tokens found in sub_question: {}!!".format(sub_question))
return 0.0
complex_question_tokens = set(stem_filter_tokenization(complex_question))
overlap = sub_question_tokens.intersection(complex_question_tokens)
# only penalized for sub-question length
return len(overlap) / len(sub_question_tokens)
def find_question_entities(self, origq):
entities = []
for m in re.finditer("\\b([A-Z]\w+)", origq):
if m.group(1).lower() not in stop_words_set:
entities.append(m.group(1))
for m in re.finditer("([0-9\.]+)", origq):
entities.append(m.group(1))
return entities
def replace_blanks(self, blanked_str, fillers):
num_blanks = blanked_str.count(BLANK)
output_strings = []
if num_blanks > 0:
filler_permutations = permutations(fillers, num_blanks)
for permutation in filler_permutations:
new_str = blanked_str
for filler_val in permutation:
new_str = new_str.replace(BLANK, filler_val, 1)
output_strings.append(new_str)
else:
output_strings = [blanked_str]
return output_strings
def query(self, state, debug=False):
data = state.data
num_steps = len(data["question_seq"])
# push for one extra step so that all shorter chains have been explored
if num_steps > self.max_steps:
return [self.build_end_state(state)]
origq = data["query"]
answer_strs = []
if num_steps == 0:
# hard-coded to only consider select in the first step
ops = ["select"]
else:
for x in range(num_steps):
answer_strs.append("#" + str(x + 1))
operations_pool = []
for op in self.operations:
operations_pool.extend(self.replace_blanks(op, answer_strs))
ops = self.select(operations_pool, self.sample_operations)
question_entities = self.find_question_entities(origq)
# hack to only use a filler in one of the steps
potential_fillers = question_entities + answer_strs
filler_pool = []
for filler in potential_fillers:
found_match = False
for question in state.data["subquestion_seq"]:
if filler in question:
found_match = True
break
if not found_match:
filler_pool.append(filler)
questions_pool = [(m, newq) for (m, q) in self.model_questions
for newq in self.replace_blanks(q, filler_pool)]
if self.topk_questions:
sorted_model_questions = sorted(questions_pool, reverse=True,
key=lambda x: self.score_question(x[1], origq))
model_questions = self.select(sorted_model_questions, self.sample_questions,
samplek=False)
else:
model_questions = self.select(questions_pool, self.sample_questions, samplek=True)
op_model_qs_prod = product(ops, model_questions)
## eventual output
new_states = []
self.num_calls += 1
for (op, model_qs) in op_model_qs_prod:
(model, question) = model_qs
# no point repeating the exact same question
if question in state.data["subquestion_seq"]:
continue
# copy state
new_state = state.copy()
output = "({}) [{}] {}".format(op, model, question)
## add new question to question_seq
new_state.data["question_seq"].append(output)
new_state.next = self.next_model
new_state.data["score_seq"].append(1)
new_state._score += 1
new_state.data["command_seq"].append("gen")
## mark the last output
new_state.last_output = output
new_states.append(new_state)
##
# if len(data["question_seq"]) > 0:
# new_states.append(self.build_end_state(state))
return new_states
| CommaQA-main | commaqa/inference/participant_qgen.py |
import os
from typing import List, Dict
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
stemmer = PorterStemmer()
stop_words_set = set(stopwords.words('english'))
QUESTION_MARKER = " Q: "
COMPQ_MARKER = " QC: "
SIMPQ_MARKER = " QS: "
INTERQ_MARKER = " QI: "
ANSWER_MARKER = " A: "
EOQ_MARKER = "[EOQ]"
LIST_JOINER = " + "
BLANK = "__"
WH_WORDS = set(["who", "what", "where", "how", "why", "when", "which"])
def get_sequence_representation(origq: str, question_seq: List[str], answer_seq: List[str]):
ret_seq = COMPQ_MARKER + origq
if len(question_seq) != len(answer_seq):
raise ValueError("Number of generated questions and answers should match before"
"question generation. Qs: {} As: {}".format(question_seq, answer_seq))
for aidx in range(len(answer_seq)):
ret_seq += INTERQ_MARKER
ret_seq += question_seq[aidx]
ret_seq += ANSWER_MARKER + answer_seq[aidx]
ret_seq += SIMPQ_MARKER
return ret_seq
def tokenize_str(input_str):
return word_tokenize(input_str)
def stem_tokens(token_arr):
return [stemmer.stem(token) for token in token_arr]
def filter_stop_tokens(token_arr):
return [token for token in token_arr if token not in stop_words_set]
def stem_filter_tokenization(input_str):
return stem_tokens(filter_stop_tokens(tokenize_str(input_str.lower())))
# functions borrowed from AllenNLP to parse JSONNET with env vars
def get_environment_variables() -> Dict[str, str]:
"""
Wraps `os.environ` to filter out non-encodable values.
"""
return {key: value for key, value in os.environ.items() if _is_encodable(value)}
def _is_encodable(value: str) -> bool:
"""
We need to filter out environment variables that can't
be unicode-encoded to avoid a "surrogates not allowed"
error in jsonnet.
"""
# Idiomatically you'd like to not check the != b""
# but mypy doesn't like that.
return (value == "") or (value.encode("utf-8", "ignore") != b"")
| CommaQA-main | commaqa/inference/utils.py |
import argparse
import json
import logging
import os
import _jsonnet
from commaqa.inference.constants import MODEL_NAME_CLASS, READER_NAME_CLASS
from commaqa.inference.dataset_readers import DatasetReader
from commaqa.inference.model_search import (
ModelController,
BestFirstDecomposer, QuestionGeneratorData)
from commaqa.inference.utils import get_environment_variables
logger = logging.getLogger(__name__)
def parse_arguments():
arg_parser = argparse.ArgumentParser(description='Convert HotPotQA dataset into SQUAD format')
arg_parser.add_argument('--input', type=str, required=True, help="Input QA file")
arg_parser.add_argument('--output', type=str, required=True, help="Output file")
arg_parser.add_argument('--config', type=str, required=True, help="Model configs")
arg_parser.add_argument('--reader', type=str, required=True, help="Dataset reader",
choices=READER_NAME_CLASS.keys())
arg_parser.add_argument('--debug', action='store_true', default=False,
help="Debug output")
arg_parser.add_argument('--demo', action='store_true', default=False,
help="Demo mode")
arg_parser.add_argument('--threads', default=1, type=int,
help="Number of threads (use MP if set to >1)")
return arg_parser.parse_args()
def load_decomposer(config_map):
print("loading participant models (might take a while)...")
model_map = {}
for key, value in config_map["models"].items():
class_name = value.pop("name")
if class_name not in MODEL_NAME_CLASS:
raise ValueError("No class mapped to model name: {} in MODEL_NAME_CLASS:{}".format(
class_name, MODEL_NAME_CLASS))
model = MODEL_NAME_CLASS[class_name](**value)
if key in config_map:
raise ValueError("Overriding key: {} with value: {} using instantiated model of type:"
" {}".format(key, config_map[key], class_name))
config_map[key] = model.query
model_map[key] = model
## instantiating
controller = ModelController(config_map, QuestionGeneratorData)
decomposer = BestFirstDecomposer(controller)
return decomposer, model_map
if __name__ == "__main__":
args = parse_arguments()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.ERROR)
if args.config.endswith(".jsonnet"):
ext_vars = get_environment_variables()
logger.info("Parsing config with external variables: {}".format(ext_vars))
config_map = json.loads(_jsonnet.evaluate_file(args.config, ext_vars=ext_vars))
else:
with open(args.config, "r") as input_fp:
config_map = json.load(input_fp)
decomposer, model_map = load_decomposer(config_map)
reader: DatasetReader = READER_NAME_CLASS[args.reader]()
print("Running decomposer on examples")
qid_answer_chains = []
if args.demo:
while True:
qid = input("QID: ")
question = input("Question: ")
example = {
"qid": qid,
"query": question,
"question": question
}
final_state, other_states = decomposer.find_answer_decomp(example, debug=args.debug)
if final_state is None:
print("FAILED!")
else:
if args.debug:
for other_state in other_states:
data = other_state.data
for q, a, s in zip(data["question_seq"], data["answer_seq"],
data["score_seq"]):
print("Q: {} A: {} S:{}".format(q, a, s), end='\t')
print("Score: " + str(other_state._score))
data = final_state._data
chain = example["question"]
for q, a in zip(data["question_seq"], data["answer_seq"]):
chain += " Q: {} A: {}".format(q, a)
chain += " S: " + str(final_state._score)
print(chain)
else:
if args.threads > 1:
import multiprocessing as mp
mp.set_start_method("spawn")
with mp.Pool(args.threads) as p:
qid_answer_chains = p.map(decomposer.return_qid_prediction,
reader.read_examples(args.input))
else:
for example in reader.read_examples(args.input):
qid_answer_chains.append(
decomposer.return_qid_prediction(example, debug=args.debug))
num_call_metrics = {}
for participant in model_map.values():
for model, num_calls in participant.return_model_calls().items():
print("Number of calls to {}: {}".format(model, num_calls))
num_call_metrics[model] = num_calls
metrics_json = {
"num_calls": num_call_metrics
}
metrics_file = os.path.join(os.path.dirname(args.output), "metrics.json")
with open(metrics_file, "w") as output_fp:
json.dump(metrics_json, output_fp)
predictions = {x[0]: x[1] for x in qid_answer_chains}
with open(args.output, "w") as output_fp:
json.dump(predictions, output_fp)
chains = [x[2] for x in qid_answer_chains]
ext_index = args.output.rfind(".")
chain_tsv = args.output[:ext_index] + "_chains.tsv"
with open(chain_tsv, "w") as output_fp:
for chain in chains:
output_fp.write(chain + "\n")
| CommaQA-main | commaqa/inference/configurable_inference.py |
import copy
import heapq
import json
import logging
class BasicDataInstance(dict):
_REQUIRED_ATTRS = set([])
def __init__(self, input_data):
dict.__init__({})
self.update(input_data)
for item in type(self)._REQUIRED_ATTRS:
if item not in self:
self[item] = []
class QuestionGeneratorData(BasicDataInstance):
_REQUIRED_ATTRS = set([
"question_seq",
"subquestion_seq",
"answer_seq",
"command_seq",
"model_seq",
"operation_seq",
"score_seq",
"para_seq"
])
class ParticipantModel(object):
"""Base model in this case for coordinating different models. Provides a general
class to structure all contributing models (in this case, by defining a single
function `query`, which is the single method that is called for each model).
"""
def query(self, state, debug=False):
"""The main function that interfaces with the overall search and
model controller, and manipulates the incoming data.
:param state: the state of controller and model flow.
:type state: launchpadqa.question_search.model_search.SearchState
:rtype: list
"""
raise NotImplementedError("Must implement to work inside of controller!")
def return_model_calls(self):
"""
:return: a dict of <model_name, number of calls> made by this participant
"""
raise NotImplementedError("Must implement to work inside of controller!")
class ModelController(object):
"""This class is a `ModelController` that takes multiple (arbitrary)
models and a control specification of how to interface the different
models (which can be thought of as a kind of state graph). For example
"""
def __init__(self, model_list,
data_class=BasicDataInstance):
"""Create an instance of a ComplexModel
:param model_list: a list of models with identifiers and
control flow.
:type model_list: dict
"""
if "start_state" not in model_list:
raise ValueError('Must specify start state')
if "end_state" not in model_list:
raise ValueError('Must specify end state')
self.model_list = model_list
self.data_class = data_class
def execute(self, state, debug=False):
"""Executes a command and query
:param state: a given state in search
:type state: SearchState (defined here)
:returns: a list of output
:rtype: list
"""
if state.next not in self.model_list:
self.logger.error("Can not handle next state: " + state.next)
return []
try:
model_func = self.model_list[state.next]
model_output = model_func(state, debug=debug)
if not isinstance(model_output, list):
return [model_output]
return model_output
except Exception as e:
self.logger.error(e, exc_info=True)
raise ValueError('Error caught during model execution: %s' % e)
def init_data(self, data_instance):
"""Create an initialized version of the data object
that will get through around.
:param data_instance: any arbitrary piece of data.
:rtype: self.data_class
"""
return self.data_class(data_instance)
def command_model(self, command):
return "command := %s" % \
(self.model_list[command].__name__)
def key_val(self, key):
return self.model_list["keys"][key]
@property
def start_state(self):
return self.model_list["start_state"]
@property
def end_state(self):
return self.model_list["end_state"]
@property
def logger(self):
"""Returns a logger instance
"""
level = '.'.join([__name__, type(self).__name__])
return logging.getLogger(level)
## utility class for controlling and recording search state
class SearchState(object):
"""Tracks and records the state of a given search.
"""
def __init__(self, json_data,
command,
score=0.0,
last_output='UNKNOWN',
):
"""Keep track of different stages in the state
:param json_data: some basic, json represntation of data
"""
self._data = json_data
self._score = score
self._next = command
self._last_output = last_output
def copy(self):
"""Does a deep copy of the state
:returns: new search state
"""
new_data = copy.deepcopy(self._data)
new_score = copy.deepcopy(self._score)
new_next = copy.deepcopy(self._next)
return SearchState(
new_data,
new_next,
new_score,
last_output="UNKNOWN",
)
@property
def last_output(self):
return self._last_output
@last_output.setter
def last_output(self, new_output):
self._last_output = new_output
## important to implement to work
## with the heap datastructures
def __lt__(self, other):
if self.score < other.score:
return True
return False
def __eq__(self, other):
if self.score == other.score:
return True
return False
@property
def data(self):
return self._data
@property
def score(self):
return self._score
@property
def next(self):
return self._next
@next.setter
def next(self, value):
self._next = value
@data.setter
def data(self, value):
self._data = value
## string method (especially for debugging)
def __str__(self):
return "[OUTPUT] val=%s [SCORE] %s" % (self._last_output,
str(self._score))
## THE BASIC SEARCH STRATEGIES (largely from the other code)
class QuestionSearchBase(object):
def __init__(self, model_controller):
"""Create a `QuestionDecomposer instance`
:param model_ensemble: a collection of models with control instructions
"""
self.controller = model_controller
def find_answer_decomp(self, json_input, debug=False):
"""Main question decomposition function
:param json_input: the input to all of the models.
"""
raise NotImplementedError
@classmethod
def from_config(cls, config):
"""Load a model from configuration
:param config: the global configuration
"""
pass
def return_qid_prediction(self, example, debug=False):
final_state, other_states = self.find_answer_decomp(example, debug=debug)
if final_state is None:
print(example["question"] + " FAILED!")
chain = example["qid"] + "\t" + example["question"]
return (example["qid"], "", chain)
else:
data = final_state._data
chain = example["qid"] + "\t" + example["question"]
for m, q, a in zip(data["model_seq"], data["question_seq"], data["answer_seq"]):
chain += "\tQ: ({}) {} A: {}".format(m, q, a)
chain += "\tS: " + str(final_state._score)
print(chain)
final_answer = data["answer_seq"][-1]
try:
json_answer = json.loads(final_answer)
# use this only if list (ignore numbers, etc)
if isinstance(json_answer, list):
final_answer = json_answer
except ValueError:
# Not a valid json ignore
pass
return (example["qid"], final_answer, chain)
class BestFirstDecomposer(QuestionSearchBase):
def find_answer_decomp(self, json_input, debug=False):
"""Run the question decomposer. The main function here is to use
the controller to pass around inputs to the different models, then
keep a track of the search state and terminate when the shortest path
has been found.
:param json_input: some input to the model
"""
## start state of controller : e.g., generate
start_command = self.controller.start_state
start_data = self.controller.init_data(json_input)
## min-heap
heap = []
init_input = json_input["question"] if json_input["question"] else "UNKNOWN"
if debug: print("[START QUERY] : %s" % init_input)
init_state = SearchState(start_data, ## initial input
start_command, ## starting point
score=0.0, ## starting score
)
## push it to heap
heapq.heappush(heap, init_state)
max_step = 0
## todo : add constraints on search (e.g., beam sizes, etc..)
## start the main search
while True:
if len(heap) == 0:
if debug: print("[FAILED]: %s" % init_input)
return None, []
## pop from heap
current_state = heapq.heappop(heap)
if debug:
print("[MIN_STATE] command=%s" % (current_state.next))
## end state
if current_state.next == self.controller.end_state:
if debug: print("[TERMINATED]\n%s" % current_state)
return current_state, heap
## generate output and new stated
for new_state in self.controller.execute(current_state, debug=debug):
## debug view
if debug: print("\t%s" % new_state)
## push onto heap
heapq.heappush(heap, new_state)
| CommaQA-main | commaqa/inference/model_search.py |
import json
import logging
import re
from commaqa.configs.predicate_language_config import ModelQuestionConfig
from commaqa.dataset.utils import valid_answer, nonempty_answer
from commaqa.execution.operation_executer import OperationExecuter
from commaqa.execution.utils import build_models
from commaqa.inference.model_search import ParticipantModel
logger = logging.getLogger(__name__)
class ExecutionParticipant(ParticipantModel):
def __init__(self, remodel_file, next_model="gen", skip_empty_answers=False):
self.next_model = next_model
self.skip_empty_answers = skip_empty_answers
self.per_model_calls = {"executer": 0, "op_executer": 0}
if remodel_file:
with open(remodel_file, "r") as input_fp:
input_json = json.load(input_fp)
self.kb_lang_groups = []
self.qid_to_kb_lang_idx = {}
for input_item in input_json:
kb = input_item["kb"]
pred_lang = input_item["pred_lang_config"]
idx = len(self.kb_lang_groups)
self.kb_lang_groups.append((kb, pred_lang))
for qa_pair in input_item["qa_pairs"]:
qid = qa_pair["id"]
self.qid_to_kb_lang_idx[qid] = idx
self.operation_regex = re.compile("\((.+)\) \[([^\]]+)\] (.*)")
def return_model_calls(self):
return self.per_model_calls
def query(self, state, debug=False):
"""The main function that interfaces with the overall search and
model controller, and manipulates the incoming data.
:param state: the state of controller and model flow.
:type state: launchpadqa.question_search.model_search.SearchState
:rtype: list
"""
## the data
data = state._data
self.per_model_calls["executer"] += 1
step_model_key = "executer_step{}".format(len(data["question_seq"]))
if step_model_key not in self.per_model_calls:
self.per_model_calls[step_model_key] = 0
self.per_model_calls[step_model_key] += 1
question = data["question_seq"][-1]
qid = data["qid"]
(kb, pred_lang) = self.kb_lang_groups[self.qid_to_kb_lang_idx[qid]]
model_configurations = {}
for model_name, configs in pred_lang.items():
model_configurations[model_name] = [ModelQuestionConfig(config) for config in configs]
model_lib = build_models(model_configurations, kb, ignore_input_mismatch=True)
### run the model (as before)
if debug: print("<OPERATION>: %s, qid=%s" % (question, qid))
m = self.operation_regex.match(question)
if m is None:
logger.debug("No match for {}".format(question))
return []
assignment = {}
for ans_idx, ans in enumerate(data["answer_seq"]):
assignment["#" + str(ans_idx + 1)] = json.loads(ans)
executer = OperationExecuter(model_library=model_lib, ignore_input_mismatch=True)
answers, facts_used = executer.execute_operation(operation=m.group(1),
model=m.group(2),
question=m.group(3),
assignments=assignment)
for model_name, model in model_lib.items():
if model_name not in self.per_model_calls:
self.per_model_calls[model_name] = 0
self.per_model_calls[model_name] += model.num_calls
self.per_model_calls["op_executer"] += executer.num_calls
if not valid_answer(answers):
logger.debug("Invalid answer for qid: {} question: {} chain: {}!".format(
qid, question, ", ".join(data["question_seq"])))
return []
if self.skip_empty_answers and not nonempty_answer(answers):
logger.debug("Empty answer for qid: {} question: {} chain: {}!".format(
qid, question, ", ".join(data["question_seq"])))
return []
# copy state
new_state = state.copy()
## add answer
new_state.data["answer_seq"].append(json.dumps(answers))
new_state.data["para_seq"].append("")
new_state.data["command_seq"].append("qa")
new_state.data["model_seq"].append(m.group(2))
new_state.data["operation_seq"].append(m.group(1))
new_state.data["subquestion_seq"].append(m.group(3))
## change output
new_state.last_output = answers
new_state.next = self.next_model
return [new_state]
| CommaQA-main | commaqa/inference/participant_execution.py |
from commaqa.inference.model_search import ParticipantModel
from commaqa.inference.utils import get_sequence_representation
class DumpChainsParticipant(ParticipantModel):
def __init__(self, output_file, next_model="gen"):
self.output_file = output_file
self.next_model = next_model
self.num_calls = 0
def return_model_calls(self):
return {"dumpchains": self.num_calls}
def dump_chain(self, state):
data = state.data
origq = data["query"]
qchain = data["question_seq"]
achain = data["answer_seq"]
sequence = get_sequence_representation(origq=origq, question_seq=qchain, answer_seq=achain)
ans = achain[-1]
with open(self.output_file, 'a') as chains_fp:
chains_fp.write(data["qid"] + "\t" + sequence + "\t" + ans + "\n")
def query(self, state, debug=False):
self.num_calls += 1
if len(state.data["question_seq"]) > 0:
self.dump_chain(state)
new_state = state.copy()
new_state.next = self.next_model
return new_state
| CommaQA-main | commaqa/inference/participant_util.py |
import json
class DatasetReader:
def read_examples(self, file):
return NotImplementedError("read_examples not implemented by " + self.__class__.__name__)
class HotpotQAReader(DatasetReader):
def read_examples(self, file):
with open(file, 'r') as input_fp:
input_json = json.load(input_fp)
for entry in input_json:
yield {
"qid": entry["_id"],
"query": entry["question"],
# metadata
"answer": entry["answer"],
"question": entry["question"],
"type": entry.get("type", ""),
"level": entry.get("level", "")
}
def format_drop_answer(answer_json):
if answer_json["number"]:
return answer_json["number"]
if len(answer_json["spans"]):
return answer_json["spans"]
# only date possible
date_json = answer_json["date"]
if not (date_json["day"] or date_json["month"] or date_json["year"]):
print("Number, Span or Date not set in {}".format(answer_json))
return None
return date_json["day"] + "-" + date_json["month"] + "-" + date_json["year"]
class DropReader(DatasetReader):
def read_examples(self, file):
with open(file, 'r') as input_fp:
input_json = json.load(input_fp)
for paraid, item in input_json.items():
para = item["passage"]
for qa_pair in item["qa_pairs"]:
question = qa_pair["question"]
qid = qa_pair["query_id"]
answer = format_drop_answer(qa_pair["answer"])
yield {
"qid": qid,
"query": question,
# metadata
"answer": answer,
"question": question
}
| CommaQA-main | commaqa/inference/dataset_readers.py |
#!/usr/bin/python
"""
Official DROP evaluation script obtained from
https://github.com/allenai/allennlp-reading-comprehension/blob/master/allennlp_rc/eval/drop_eval.py
"""
from collections import defaultdict
from typing import Any, Dict, List, Set, Tuple, Union, Optional
import json
import argparse
import string
import re
import numpy as np
from scipy.optimize import linear_sum_assignment
# From here through _normalize_answer was originally copied from:
# https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/
# Then cleaned up and modified a bit.
def _remove_articles(text: str) -> str:
regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
return re.sub(regex, " ", text)
def _white_space_fix(text: str) -> str:
return " ".join(text.split())
EXCLUDE = set(string.punctuation)
def _remove_punc(text: str) -> str:
if not _is_number(text):
return "".join(ch for ch in text if ch not in EXCLUDE)
else:
return text
def _lower(text: str) -> str:
return text.lower()
def _tokenize(text: str) -> List[str]:
return re.split(" |-", text)
def _normalize_answer(text: str) -> str:
"""Lower text and remove punctuation, articles and extra whitespace."""
parts = [
_white_space_fix(_remove_articles(_normalize_number(_remove_punc(_lower(token)))))
for token in _tokenize(text)
]
parts = [part for part in parts if part.strip()]
normalized = " ".join(parts).strip()
return normalized
def _is_number(text: str) -> bool:
try:
float(text)
return True
except ValueError:
return False
def _normalize_number(text: str) -> str:
if _is_number(text):
return str(float(text))
else:
return text
def _answer_to_bags(
answer: Union[str, List[str], Tuple[str, ...]]
) -> Tuple[List[str], List[Set[str]]]:
if isinstance(answer, (list, tuple)):
raw_spans = answer
else:
raw_spans = [answer]
normalized_spans: List[str] = []
token_bags = []
for raw_span in raw_spans:
normalized_span = _normalize_answer(raw_span)
normalized_spans.append(normalized_span)
token_bags.append(set(normalized_span.split()))
return normalized_spans, token_bags
def _align_bags(predicted: List[Set[str]], gold: List[Set[str]]) -> List[float]:
"""
Takes gold and predicted answer sets and first finds the optimal 1-1 alignment
between them and gets maximum metric values over all the answers.
"""
scores = np.zeros([len(gold), len(predicted)])
for gold_index, gold_item in enumerate(gold):
for pred_index, pred_item in enumerate(predicted):
if _match_numbers_if_present(gold_item, pred_item):
scores[gold_index, pred_index] = _compute_f1(pred_item, gold_item)
row_ind, col_ind = linear_sum_assignment(-scores)
max_scores = np.zeros([max(len(gold), len(predicted))])
for row, column in zip(row_ind, col_ind):
max_scores[row] = max(max_scores[row], scores[row, column])
return max_scores
def _compute_f1(predicted_bag: Set[str], gold_bag: Set[str]) -> float:
intersection = len(gold_bag.intersection(predicted_bag))
if not predicted_bag:
precision = 1.0
else:
precision = intersection / float(len(predicted_bag))
if not gold_bag:
recall = 1.0
else:
recall = intersection / float(len(gold_bag))
f1 = (
(2 * precision * recall) / (precision + recall)
if not (precision == 0.0 and recall == 0.0)
else 0.0
)
return f1
def _match_numbers_if_present(gold_bag: Set[str], predicted_bag: Set[str]) -> bool:
gold_numbers = set()
predicted_numbers = set()
for word in gold_bag:
if _is_number(word):
gold_numbers.add(word)
for word in predicted_bag:
if _is_number(word):
predicted_numbers.add(word)
if (not gold_numbers) or gold_numbers.intersection(predicted_numbers):
return True
return False
def get_metrics(
predicted: Union[str, List[str], Tuple[str, ...]], gold: Union[str, List[str], Tuple[str, ...]]
) -> Tuple[float, float]:
"""
Takes a predicted answer and a gold answer (that are both either a string or a list of
strings), and returns exact match and the DROP F1 metric for the prediction. If you are
writing a script for evaluating objects in memory (say, the output of predictions during
validation, or while training), this is the function you want to call, after using
:func:`answer_json_to_strings` when reading the gold answer from the released data file.
"""
predicted_bags = _answer_to_bags(predicted)
gold_bags = _answer_to_bags(gold)
if set(predicted_bags[0]) == set(gold_bags[0]) and len(predicted_bags[0]) == len(gold_bags[0]):
exact_match = 1.0
else:
exact_match = 0.0
f1_per_bag = _align_bags(predicted_bags[1], gold_bags[1])
f1 = np.mean(f1_per_bag)
f1 = round(f1, 2)
return exact_match, f1
def answer_json_to_strings(answer: Dict[str, Any]) -> Tuple[Tuple[str, ...], str]:
"""
Takes an answer JSON blob from the DROP data release and converts it into strings used for
evaluation.
"""
if "number" in answer and answer["number"]:
return tuple([str(answer["number"])]), "number"
elif "spans" in answer and answer["spans"]:
return tuple(answer["spans"]), "span" if len(answer["spans"]) == 1 else "spans"
elif "date" in answer:
return (
tuple(
[
"{0} {1} {2}".format(
answer["date"]["day"], answer["date"]["month"], answer["date"]["year"]
)
]
),
"date",
)
else:
raise ValueError(
f"Answer type not found, should be one of number, spans or date at: {json.dumps(answer)}"
)
def evaluate_json(
annotations: Dict[str, Any], predicted_answers: Dict[str, Any]
) -> Tuple[float, float]:
"""
Takes gold annotations and predicted answers and evaluates the predictions for each question
in the gold annotations. Both JSON dictionaries must have query_id keys, which are used to
match predictions to gold annotations (note that these are somewhat deep in the JSON for the
gold annotations, but must be top-level keys in the predicted answers).
The ``annotations`` are assumed to have the format of the dev set in the DROP data release.
The ``predicted_answers`` JSON must be a dictionary keyed by query id, where the value is a string
(or list of strings) that is the answer.
"""
instance_exact_match = []
instance_f1 = []
# for each type as well
type_to_em: Dict[str, List[float]] = defaultdict(list)
type_to_f1: Dict[str, List[float]] = defaultdict(list)
for _, annotation in annotations.items():
for qa_pair in annotation["qa_pairs"]:
query_id = qa_pair["query_id"]
max_em_score = 0.0
max_f1_score = 0.0
max_type = None
if query_id in predicted_answers:
predicted = predicted_answers[query_id]
candidate_answers = [qa_pair["answer"]]
if "validated_answers" in qa_pair and qa_pair["validated_answers"]:
candidate_answers += qa_pair["validated_answers"]
for answer in candidate_answers:
gold_answer, gold_type = answer_json_to_strings(answer)
em_score, f1_score = get_metrics(predicted, gold_answer)
if gold_answer[0].strip() != "":
max_em_score = max(max_em_score, em_score)
max_f1_score = max(max_f1_score, f1_score)
if max_em_score == em_score and max_f1_score == f1_score:
max_type = gold_type
else:
print("Missing prediction for question: {}".format(query_id))
if qa_pair and qa_pair["answer"]:
max_type = answer_json_to_strings(qa_pair["answer"])[1]
else:
max_type = "number"
max_em_score = 0.0
max_f1_score = 0.0
instance_exact_match.append(max_em_score)
instance_f1.append(max_f1_score)
type_to_em[max_type].append(max_em_score)
type_to_f1[max_type].append(max_f1_score)
global_em = np.mean(instance_exact_match)
global_f1 = np.mean(instance_f1)
print("Exact-match accuracy {0:.2f}".format(global_em * 100))
print("F1 score {0:.2f}".format(global_f1 * 100))
print("{0:.2f} & {1:.2f}".format(global_em * 100, global_f1 * 100))
print("----")
total = np.sum([len(v) for v in type_to_em.values()])
for typ in sorted(type_to_em.keys()):
print(
"{0}: {1} ({2:.2f}%)".format(
typ, len(type_to_em[typ]), 100.0 * len(type_to_em[typ]) / total
)
)
print(" Exact-match accuracy {0:.3f}".format(100.0 * np.mean(type_to_em[typ])))
print(" F1 score {0:.3f}".format(100.0 * np.mean(type_to_f1[typ])))
return global_em, global_f1
def evaluate_prediction_file(
prediction_path: str, gold_path: str, output_path: Optional[str] = None
) -> Tuple[float, float]:
"""
Takes a prediction file and a gold file and evaluates the predictions for each question in the
gold file. Both files must be json formatted and must have query_id keys, which are used to
match predictions to gold annotations. The gold file is assumed to have the format of the dev
set in the DROP data release. The prediction file must be a JSON dictionary keyed by query id,
where the value is either a JSON dictionary with an "answer" key, or just a string (or list of
strings) that is the answer. Writes a json with global_em and global_f1 metrics to file at
the specified output path, unless None is passed as output path.
"""
predicted_answers = json.load(open(prediction_path, encoding="utf-8"))
annotations = json.load(open(gold_path, encoding="utf-8"))
global_em, global_f1 = evaluate_json(annotations, predicted_answers)
# Output predictions to file if an output path is given
if output_path is not None:
output_dict = {"global_em": global_em, "global_f1": global_f1}
with open(output_path, "w", encoding="utf8") as outfile:
json.dump(output_dict, outfile)
return (global_em, global_f1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="evaluate on drop dataset")
parser.add_argument(
"--gold_path",
type=str,
required=False,
default="drop_dataset_test.gold.json",
help="location of the gold file",
)
parser.add_argument(
"--prediction_path",
type=str,
required=False,
default="sample_predictions.json",
help="location of the prediction file",
)
parser.add_argument(
"--output_path",
type=str,
required=False,
default=None,
help="location of the output metrics file",
)
args = parser.parse_args()
evaluate_prediction_file(args.prediction_path, args.gold_path, args.output_path) | CommaQA-main | scripts/drop_eval.py |
import json
import sys
def evaluate(answer_file, prediction_file):
answer_by_id = {}
for line in open(answer_file).readlines():
struct = json.loads(line)
answer_by_id[struct["id"]] = struct
prediction_by_id = {}
for line in open(prediction_file).readlines():
struct = json.loads(line)
prediction_by_id[struct["id"]] = struct
answer_count = len(answer_by_id)
prediction_count = len(prediction_by_id)
if answer_count != prediction_count:
print(
f"Prediction count ({prediction_count}) doesn't match answer count ({answer_count})"
)
sys.exit(1)
total = 0
correct = 0
total_start = 0
correct_start = 0
total_end = 0
correct_end = 0
story_prediction_map = {}
for answer in answer_by_id.values():
answer_id = answer["id"]
prediction = prediction_by_id.get(answer_id, None)
if not prediction:
print(f"Prediction for id {answer_id} missing")
sys.exit(1)
hypothesis = answer["query"]
story = answer["story"]
answer_label = answer["label"]
prediction_label = prediction["label"]
if story not in story_prediction_map:
story_prediction_map[story] = []
total += 1
if answer_label == prediction_label:
correct += 1
story_prediction_map[story].append(True)
else:
story_prediction_map[story].append(False)
if "starts before" in hypothesis or "starts after" in hypothesis:
total_start += 1
if answer_label == prediction_label:
correct_start += 1
else:
total_end += 1
if answer_label == prediction_label:
correct_end += 1
s_total = 0
s_correct = 0
for key in story_prediction_map:
s_total += 1
cv = True
for v in story_prediction_map[key]:
cv = cv and v
if cv:
s_correct += 1
total_acc = float(correct) / float(total)
start_acc = float(correct_start) / float(total_start)
end_acc = float(correct_end) / float(total_end)
story_em = float(s_correct) / float(s_total)
return total_acc, start_acc, end_acc, story_em
def main():
import argparse
parser = argparse.ArgumentParser(
description="Evaluate leaderboard predictions for questions."
)
parser.add_argument(
"--question_answers",
"-qa",
help="Filename of the question answers to read.",
required=True,
)
parser.add_argument(
"--predictions",
"-p",
help="Filename of the leaderboard predictions",
required=True,
)
parser.add_argument(
"--output", "-o", help="Output results to this file.", required=True
)
args = parser.parse_args()
total_acc, start_acc, end_acc, story_em = evaluate(
args.question_answers, args.predictions
)
with open(args.output, "wt", encoding="UTF-8") as output:
output.write(
json.dumps(
{
"total_acc": total_acc,
"start_acc": start_acc,
"end_acc": end_acc,
"story_em": story_em,
}
)
)
if __name__ == "__main__":
main()
| aristo-leaderboard-master | tracie/evaluator/evaluator.py |
import os
import evaluator
import unittest
import tempfile
import typing
class TestAccuracy(unittest.TestCase):
def test_EverythingCorrect(self):
qa = {"P1": "E", "P2": "N", "P3": "N"}
p = {"P1": "E", "P2": "N", "P3": "N"}
self.assertEqual(3.0 / 3.0, evaluator.calculate_accuracy(qa, p))
def test_EverythingWrong(self):
qa = {"P1": "E", "P2": "N", "P3": "N"}
p = {"P1": "N", "P2": "E", "P3": "E"}
self.assertEqual(0.0 / 3.0, evaluator.calculate_accuracy(qa, p))
def test_MixedResults(self):
qa = {"P1": "E", "P2": "N", "P3": "N"}
p = {"P1": "E", "P2": "N", "P3": "E"}
self.assertEqual(2.0 / 3.0, evaluator.calculate_accuracy(qa, p))
def test_ExtraPredictions(self):
qa = {"P1": "E", "P2": "N", "P3": "N"}
p = {"P1": "E", "P2": "N", "P3": "N", "PExtra": "E"}
with self.assertRaises(SystemExit) as context:
evaluator.calculate_accuracy(qa, p)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_PREDICTIONS_EXTRA)
def test_MissingPredictions(self):
qa = {"P1": "E", "P2": "N", "P3": "N"}
p = {"P1": "E", "P2": "N"}
with self.assertRaises(SystemExit) as context:
evaluator.calculate_accuracy(qa, p)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_PREDICTION_MISSING)
def temp_file_with_contents(lines: typing.List[str]) -> str:
t = tempfile.NamedTemporaryFile(mode='wt', delete=False)
t.writelines(lines)
t.close()
return t.name
class TestReadAnswers(unittest.TestCase):
def test_ReadAnswers(self):
t = temp_file_with_contents([
'{"id": "P1", "gold_label": "E"}\n',
'{"id": "P2", "gold_label": "N"}\n',
'{"id": "P3", "gold_label": "N"}\n',
])
answers = evaluator.read_answers(t)
os.remove(t)
self.assertEqual(answers, {"P1": "E", "P2": "N", "P3": "N"})
def test_ReadAnswersEmpty(self):
t = temp_file_with_contents([])
with self.assertRaises(SystemExit) as context:
evaluator.read_answers(t)
os.remove(t)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_ANSWERS_MALFORMED)
def test_ReadAnswersCorrupted(self):
t = temp_file_with_contents(['this is not json'])
with self.assertRaises(SystemExit) as context:
evaluator.read_answers(t)
os.remove(t)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_ANSWERS_MALFORMED)
def test_ReadAnswersRepeated(self):
t = temp_file_with_contents([
'{"id": "P1", "gold_label": "E"}\n',
'{"id": "P1", "gold_label": "N"}\n',
])
with self.assertRaises(SystemExit) as context:
evaluator.read_answers(t)
os.remove(t)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_ANSWERS_MALFORMED)
class TestReadPredictions(unittest.TestCase):
def test_ReadPredictions(self):
t = temp_file_with_contents([
'P1,E\n',
'"P2",N\n',
])
predictions = evaluator.read_predictions(t)
os.remove(t)
self.assertEqual(predictions, {
"P1": "E",
"P2": "N",
})
def test_ReadPredictionsMissingColumn(self):
t = temp_file_with_contents([
'P1,E\n',
'"P2"\n',
])
with self.assertRaises(SystemExit) as context:
evaluator.read_predictions(t)
os.remove(t)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_PREDICTIONS_MALFORMED)
def test_ReadPredictionsRepeated(self):
t = temp_file_with_contents([
'P1,E\n',
'P1,N\n',
])
with self.assertRaises(SystemExit) as context:
evaluator.read_predictions(t)
os.remove(t)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_PREDICTIONS_MALFORMED)
def test_ReadPredictionsCorruptedBadKey(self):
t = temp_file_with_contents([
'P1,X\n',
])
with self.assertRaises(SystemExit) as context:
evaluator.read_predictions(t)
os.remove(t)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_PREDICTIONS_MALFORMED)
def test_ReadPredictionsCorruptedEmptyKey(self):
t = temp_file_with_contents([
',N\n',
])
with self.assertRaises(SystemExit) as context:
evaluator.read_predictions(t)
os.remove(t)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_PREDICTIONS_MALFORMED)
if __name__ == '__main__':
unittest.main()
| aristo-leaderboard-master | scitail/evaluator/test_evaluator.py |
#!/usr/bin/env python3
import csv
from typing import *
import logging
import sys
import json
EXIT_STATUS_ANSWERS_MALFORMED = 1
EXIT_STATUS_PREDICTIONS_MALFORMED = 2
EXIT_STATUS_PREDICTIONS_EXTRA = 3
EXIT_STATUS_PREDICTION_MISSING = 4
VALID_PREDICTION_VALUES = ['E', 'N']
def calculate_accuracy(answers: Dict[str, str], predictions: Dict[str, str]) -> float:
score = 0.0
for entailment_pair_id, answer in answers.items():
try:
predictions_for_q = predictions[entailment_pair_id]
except KeyError:
logging.error("Missing prediction for entailment pair '%s'.", entailment_pair_id)
sys.exit(EXIT_STATUS_PREDICTION_MISSING)
if answer in predictions_for_q:
score += 1
del predictions[entailment_pair_id]
if len(predictions) > 0:
logging.error("Found %d extra predictions, for example: %s", len(predictions),
", ".join(list(predictions.keys())[:3]))
sys.exit(EXIT_STATUS_PREDICTIONS_EXTRA)
return score / len(answers)
def read_answers(filename: str) -> Dict[str, str]:
answers = {} # type: Dict[str, str]
with open(filename, "rt", encoding="UTF-8", errors="replace") as f:
for line in f:
line = line.strip()
try:
record = json.loads(line)
except ValueError as e:
logging.error("Error while reading file %s: %s", filename, e)
sys.exit(EXIT_STATUS_ANSWERS_MALFORMED)
entailment_pair_id = record["id"]
answer = record["gold_label"]
if entailment_pair_id in answers:
logging.error("Key %s repeated in %s", entailment_pair_id, filename)
sys.exit(EXIT_STATUS_ANSWERS_MALFORMED)
answers[entailment_pair_id] = answer
if len(answers) == 0:
logging.error("No answers found in file %s", filename)
sys.exit(EXIT_STATUS_ANSWERS_MALFORMED)
return answers
def read_predictions(filename: str) -> Dict[str, str]:
predictions = {} # type: Dict[str, str]
with open(filename, "rt", encoding="UTF-8", errors="replace") as f:
reader = csv.reader(f)
try:
for row in reader:
try:
entailment_pair_id = row[0]
prediction = row[1]
except IndexError as e:
logging.error("Error reading value from CSV file %s on line %d: %s", filename, reader.line_num, e)
sys.exit(EXIT_STATUS_PREDICTIONS_MALFORMED)
if entailment_pair_id in predictions:
logging.error("Key %s repeated in file %s on line %d", entailment_pair_id, filename,
reader.line_num)
sys.exit(EXIT_STATUS_PREDICTIONS_MALFORMED)
if entailment_pair_id == "":
logging.error("Key is empty in file %s on line %d", filename, reader.line_num)
sys.exit(EXIT_STATUS_PREDICTIONS_MALFORMED)
# prediction cannot be empty string
if prediction == "":
logging.error("Key %s has empty string for prediction in file %s on line %d",
entailment_pair_id, filename, reader.line_num)
sys.exit(EXIT_STATUS_PREDICTIONS_MALFORMED)
# predictions must be part of the controlled vocabulary
if prediction not in VALID_PREDICTION_VALUES:
logging.error("Key %s has invalid prediction in file %s on line %d",
entailment_pair_id, filename, reader.line_num)
sys.exit(EXIT_STATUS_PREDICTIONS_MALFORMED)
predictions[entailment_pair_id] = prediction
except csv.Error as e:
logging.error('file %s, line %d: %s', filename, reader.line_num, e)
sys.exit(EXIT_STATUS_PREDICTIONS_MALFORMED)
return predictions
def main():
import argparse
parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for SciTail sentence pairs.')
parser.add_argument(
'--answers', '-a',
help='Filename of the answers to read. Expects a JSONL file with documents that have fields "id" and '
'"gold_label".',
required=True)
parser.add_argument(
'--predictions', '-p',
help="Filename of the leaderboard predictions, in CSV format.",
required=True)
parser.add_argument(
'--output', '-o',
help='Output results to this file.')
args = parser.parse_args()
answers = read_answers(args.answers)
predictions = read_predictions(args.predictions)
accuracy = calculate_accuracy(answers, predictions)
if args.output:
print("Writing results to file: %s" % args.output)
with open(args.output, "wt", encoding="UTF-8") as output:
output.write(json.dumps({"accuracy": accuracy}))
else:
print("accuracy:", accuracy)
if __name__ == '__main__':
main()
| aristo-leaderboard-master | scitail/evaluator/evaluator.py |
aristo-leaderboard-master | eqasc/code/allennlp_reasoning_explainqa/__init__.py |
|
aristo-leaderboard-master | eqasc/code/allennlp_reasoning_explainqa/evaluator/__init__.py |
|
import json
import random
import sys
from allennlp_reasoning_explainqa.common.constants import CORRECT_OPTION_TAG
from allennlp_reasoning_explainqa.training.metrics.confusion_matrix import (
F1MeasureCustomRetrievalEval,
)
from allennlp_reasoning_explainqa.training.metrics.explanation_eval import (
ExplanationEval,
)
# Sets random seed to a nothing-up-my-sleeve number so that we have
# deterministic evaluation scores.
random.seed(12345)
# Sets random seed to a nothing-up-my-sleeve number so that we have
# deterministic evaluation scores.
random.seed(12345)
def evaluate(prediction_filename, label_filename):
chainid_to_label = json.load(open(label_filename, "r"))
chain_count = len(chainid_to_label)
predictions_lines = open(prediction_filename, "r").readlines()
predictions = [json.loads(row) for row in predictions_lines]
prediction_count = len(predictions)
if chain_count != prediction_count:
print(
f"Label file {label_filename} has {chain_count} chains, but prediction file {prediction_filename} has {prediction_count} predictions. These must be equal."
)
sys.exit(1)
f1eval = F1MeasureCustomRetrievalEval(pos_label=1)
explanation_eval = ExplanationEval()
chain_ids_covered = []
cnt = 0
for row in predictions:
assert "score" in row, "Prediction should contain field score"
assert "chain_id" in row, "Prediction should contain field chain_id"
score = row["score"]
chain_id = row["chain_id"]
qid = chain_id.strip().split("_")[0]
print("qid,chain_id,score = ", qid, chain_id, score)
gtlabel = chainid_to_label[chain_id]
f1eval(int(gtlabel), score)
explanation_eval(qid, CORRECT_OPTION_TAG, int(gtlabel), score)
chain_ids_covered.append(chain_id)
cnt += 1
assert len(chain_ids_covered) == len(
chainid_to_label
), "Found {} chains but expected {} chains".format(
len(chain_ids_covered), len(chainid_to_label)
)
binclf_performance = f1eval.get_metric(reset=True)
print("f1.get_metric() = ", binclf_performance)
explanation_performance = explanation_eval.get_metric(reset=True)
print("explanation_eval.get_metric() = ", explanation_performance)
final_metrics = {
"auc_roc": binclf_performance["auc_roc"],
"explainP1": explanation_performance["explainP1"],
"explainNDCG": explanation_performance["explainNDCG"],
}
print("=" * 32)
print(": auc_roc = ", binclf_performance["auc_roc"])
print(": P1 = ", explanation_performance["explainP1"])
print(": explainNDCG = ", explanation_performance["explainNDCG"])
print("=" * 32)
return final_metrics
if __name__ == "__main__":
prediction_filename = sys.argv[1]
label_filename = sys.argv[2]
metrics_filename = sys.argv[3]
print(
f"Evaluating prediction file {prediction_filename} with label file {label_filename}"
)
metrics = evaluate(prediction_filename, label_filename)
print(f"Writing final metrics to file: {metrics_filename}")
json.dump(metrics, open(metrics_filename, "w"))
| aristo-leaderboard-master | eqasc/code/allennlp_reasoning_explainqa/evaluator/evaluator.py |
aristo-leaderboard-master | eqasc/code/allennlp_reasoning_explainqa/training/__init__.py |
|
from allennlp_reasoning_explainqa.training.metrics.confusion_matrix import *
from allennlp_reasoning_explainqa.training.metrics.explanation_eval import *
| aristo-leaderboard-master | eqasc/code/allennlp_reasoning_explainqa/training/metrics/__init__.py |
import random
from collections import Counter
import numpy as np
from allennlp_reasoning_explainqa.common.constants import *
def dcg_score(y_true, y_score, k=10, gains="exponential"):
"""Discounted cumulative gain (DCG) at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
gains : str
Whether gains should be "exponential" (default) or "linear".
Returns
-------
DCG @k : float
"""
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order[:k])
if gains == "exponential":
gains = 2 ** y_true - 1
elif gains == "linear":
gains = y_true
else:
raise ValueError("Invalid gains option.")
# highest rank is 1 so +2 instead of +1
discounts = np.log2(np.arange(len(y_true)) + 2)
# print("gains,discounts = ", gains,discounts)
return np.sum(gains / discounts)
def ndcg_score(y_true, y_score, k=10, gains="exponential"):
"""Normalized discounted cumulative gain (NDCG) at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
gains : str
Whether gains should be "exponential" (default) or "linear".
Returns
-------
NDCG @k : float
"""
best = dcg_score(y_true, y_true, k, gains)
actual = dcg_score(y_true, y_score, k, gains)
return actual / best
class ExplanationEval:
def __init__(self, pos_label=1, neg_label=0) -> None:
self._predictions = {}
self._id_count = 0
self._pos_label = pos_label
self._neg_label = neg_label
self._labels = [pos_label, neg_label]
def __call__(self, ques_id, choice_type, ground_truth_label, score):
"""
Parameters
----------
value : ``float``
The value to average.
"""
if choice_type in CORRECT_OPTION_TAG_LIST:
assert ground_truth_label in self._labels, "Label not known"
if ques_id not in self._predictions:
self._predictions[ques_id] = []
self._id_count += 1
self._predictions[ques_id].append(
{"score": score, "ground_truth": ground_truth_label}
)
def get_metric(self, reset: bool = False):
if reset:
print(
"explain_eval: Counter(len(vals)) : ",
Counter([len(val) for val in self._predictions.values()]),
)
ret = {
"explainP1": [],
"explainP1_normalized": [],
"explainP2": [],
"explainP5": [],
"explainNDCG": [],
}
total_label_counts = {"label_" + str(k): 0 for k in self._labels}
for id, vals in self._predictions.items():
random.shuffle(
vals
) # hack to avoid high scores in case of ties and correct ones got in first
vals = sorted(
vals, key=lambda x: -x["score"]
) # sort by decreasing order of score
cnt_pos_flag = 0
y_true = [val["ground_truth"] for val in vals]
y_score = [val["score"] for val in vals]
total_true = sum(y_true)
if total_true > 0:
ndcg = ndcg_score(y_true, y_score, k=10, gains="linear")
else:
ndcg = 0
ret["explainNDCG"].append(ndcg)
ndcg_numerator = 0.0
ndcg_denominator = 0.0
discount = 1.0
discount_den = 1.0
for j, val in enumerate(
vals
): # to do what if num items is less than 5 ? -- will effect R@5
if val["ground_truth"] == self._pos_label:
cnt_pos_flag = (
1 # since just want to know ehteher it is there or not
)
ndcg_numerator += discount * 1.0
# denominator represents maximum possible. whenever encounter a positive, denominator value should increase
# since it is 0/1, it simple here. no need to sort.
# cnt_pos += 1
ndcg_denominator += discount_den * 1.0
discount_den *= 0.5
labelk = self._pos_label
else:
labelk = self._neg_label
total_label_counts["label_" + str(labelk)] += 1
if j == 0:
ret["explainP1"].append(cnt_pos_flag)
if j == 1:
ret["explainP2"].append(cnt_pos_flag)
if j == 4:
ret["explainP5"].append(cnt_pos_flag)
discount *= 0.5
if cnt_pos_flag > 0:
ret["explainP1_normalized"].append(ret["explainP1"][-1])
assert ndcg_numerator <= ndcg_denominator # sanity check
self.ret = {
k: {"items": len(lst), "score": np.mean(lst)} for k, lst in ret.items()
}
return_metric = {}
for k, lst in ret.items():
return_metric[k + "_items"] = len(lst)
if len(lst) > 0:
return_metric[k] = np.mean(lst)
return_metric.update(total_label_counts)
if reset:
self.reset()
return return_metric
def reset(self):
self._predictions = {}
# self._gt = {}
self._id_count = 0
self.ret = {}
def __str__(self):
return str(self.ret)
if __name__ == "__main__":
explain_eval = ExplanationEval()
dummy1 = [[1, 1, 1.5], [1, 1, 1.0], [1, 0, 0.9]] # perfect ranking
for ques_id, ground_truth_label, score in dummy1:
explain_eval(ques_id, CORRECT_OPTION_TAG, ground_truth_label, score)
print(explain_eval.get_metric(reset=True))
print("============")
# {'explainP1_items': 1, 'explainP1': 1.0, 'explainP1_normalized_items': 1, 'explainP1_normalized': 1.0,
# 'explainP2_items': 1, 'explainP2': 1.0, 'explainP5_items': 0, 'explainNDCG_items': 1, 'explainNDCG': 1.0,
# 'explainNDCG_exp_items': 1, 'explainNDCG_exp': 1.0, 'label_1': 2, 'label_0': 1}
dummy1 = [
[1, 1, 1.5],
[1, 1, 1.0],
[1, 0, 0.9], # perfect ranking
[2, 0, 1.5],
[2, 0, 1.0],
[2, 1, 0.9], # completely opposite ranking
]
for ques_id, ground_truth_label, score in dummy1:
explain_eval(ques_id, CORRECT_OPTION_TAG, ground_truth_label, score)
print(explain_eval.get_metric(reset=True))
print("============")
dummy1 = [[1, 0, 1.0], [1, 1, 1.0], [1, 1, 1.0]]
for ques_id, ground_truth_label, score in dummy1:
explain_eval(ques_id, CORRECT_OPTION_TAG, ground_truth_label, score)
print(explain_eval.get_metric(reset=True))
print("============")
dummy1 = [[1, 1, 1.0], [1, 1, 1.0], [1, 0, 1.0]]
for ques_id, ground_truth_label, score in dummy1:
explain_eval(ques_id, CORRECT_OPTION_TAG, ground_truth_label, score)
print(explain_eval.get_metric(reset=True))
print("============")
dummy1 = [[1, 0, 1.0], [1, 1, 1.01], [1, 1, 1.01]]
for ques_id, ground_truth_label, score in dummy1:
explain_eval(ques_id, CORRECT_OPTION_TAG, ground_truth_label, score)
print(explain_eval.get_metric(reset=True))
print("============")
dummy1 = [[1, 0, 1.02], [1, 1, 1.01], [1, 1, 1.01]]
for ques_id, ground_truth_label, score in dummy1:
explain_eval(ques_id, CORRECT_OPTION_TAG, ground_truth_label, score)
print(explain_eval.get_metric(reset=True))
print("============")
dummy1 = [[1, 0, 1.0], [1, 0, 1.0], [1, 1, 1.0]]
for ques_id, ground_truth_label, score in dummy1:
explain_eval(ques_id, CORRECT_OPTION_TAG, ground_truth_label, score)
print(explain_eval.get_metric(reset=True))
print("============")
dummy1 = [[1, 0, 1.0], [1, 0, 1.0], [1, 0, 1.0]]
for ques_id, ground_truth_label, score in dummy1:
explain_eval(ques_id, CORRECT_OPTION_TAG, ground_truth_label, score)
print(explain_eval.get_metric(reset=True))
print("============")
dummy1 = [
[1, 1, 1.0],
[1, 1, 1.0],
]
for ques_id, ground_truth_label, score in dummy1:
explain_eval(ques_id, CORRECT_OPTION_TAG, ground_truth_label, score)
print(explain_eval.get_metric(reset=True))
# env PYTHONPATH=. python allennlp_reasoning_explainqa/training/metrics/explanation_eval.py
| aristo-leaderboard-master | eqasc/code/allennlp_reasoning_explainqa/training/metrics/explanation_eval.py |
import numpy as np
import sklearn.metrics
from sklearn.metrics import roc_curve
class F1MeasureCustomRetrievalEval:
def __init__(self, pos_label=1) -> None:
self._predictions = []
self._gt = []
self._pos_label = pos_label
self._probs = []
def __call__(self, label, score):
"""
Parameters
----------
predictions : ``torch.Tensor``, required.
A tensor of predictions of shape (batch_size, ..., num_classes).
gold_labels : ``torch.Tensor``, required.
A tensor of integer class label of shape (batch_size, ...). It must be the same
shape as the ``predictions`` tensor without the ``num_classes`` dimension.
mask: ``torch.Tensor``, optional (default = None).
A masking tensor the same size as ``gold_labels``.
"""
self._gt.append(label)
self._probs.append(score)
def get_metric(self, reset: bool = False, given_thresh=None): # -> Dict[str,Float]:
probs = np.array(self._probs)
gt = np.array(self._gt)
threshold_max = None
f1_score_given_thresh = None
if reset and len(probs) > 0:
fpr, tpr, thresholds = roc_curve(gt, probs)
f1_scores = []
for thresh in thresholds:
f1_scores.append(
sklearn.metrics.f1_score(
gt, [1 if m > thresh else 0 for m in probs]
)
)
f1_scores = np.array(f1_scores)
f1_scores_max = np.max(f1_scores)
threshold_max = thresholds[np.argmax(f1_scores)]
auc_roc = sklearn.metrics.roc_auc_score(gt, probs)
if given_thresh is not None:
f1_score_given_thresh = sklearn.metrics.f1_score(
gt, [1 if m > given_thresh else 0 for m in probs]
)
else:
auc_roc = 0
f1_scores_max = 0
if reset:
self.reset()
return {
"auc_roc": auc_roc,
"f1_scores_max": f1_scores_max,
"threshold_max": threshold_max,
"f1_score_given_thresh": f1_score_given_thresh,
}
def reset(self):
self._gt = []
self._probs = []
| aristo-leaderboard-master | eqasc/code/allennlp_reasoning_explainqa/training/metrics/confusion_matrix.py |
CORRECT_OPTION_TAG = "correct_option"
INCORRECT_OPTION_TAG = "incorrect_option"
CORRECT_OPTION_GOLD_TAG = "gold"
CORRECT_OPTION_TAG_LIST = [CORRECT_OPTION_TAG, CORRECT_OPTION_GOLD_TAG]
ALL_OPTION_TAG_LIST = [
CORRECT_OPTION_TAG,
CORRECT_OPTION_GOLD_TAG,
INCORRECT_OPTION_TAG,
]
| aristo-leaderboard-master | eqasc/code/allennlp_reasoning_explainqa/common/constants.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.