python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
"""Slightly modified subclass of the AllenNLP conll2003 dataset reader.
Allows pruning negative sentences given a percent value and a limiting
by a max length
"""
from typing import Dict, Sequence, Iterable, List
import itertools
import logging
logging.basicConfig(level=logging.ERROR)
from overrides import overrides
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import TokenIndexer
from allennlp.data.tokenizers import Token
from allennlp.data.dataset_readers import Conll2003DatasetReader
from random import randint
from collections import defaultdict
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def _is_divider(line: str) -> bool:
empty_line = line.strip() == ''
if empty_line:
return True
else:
first_token = line.split()[0]
if first_token == "-DOCSTART-": # pylint: disable=simplifiable-if-statement
return True
else:
return False
_VALID_LABELS = {'ner', 'pos', 'chunk'}
@DatasetReader.register("rcc-ner")
class RccNerDatasetReader(Conll2003DatasetReader):
"""
Reads instances from a pretokenised file where each line is in the following format:
WORD POS-TAG CHUNK-TAG NER-TAG
with a blank line indicating the end of each sentence
and '-DOCSTART- -X- -X- O' indicating the end of each article,
and converts it into a ``Dataset`` suitable for sequence tagging.
percent_negatives: ``int``, optional (default=``100``)
Represents the percentage of negative sentences included
cutoff_sentence_length: ``int``, optional (default=``30``)
Represents the max number of tokens for a sentence to be evaluated
"""
def __init__(self,
token_indexers: Dict[str, TokenIndexer] = None,
tag_label: str = "ner",
feature_labels: Sequence[str] = (),
lazy: bool = False,
percent_negatives: int = 100,
cutoff_sentence_length: int = 30,
coding_scheme: str = "IOB1",
filter_sections: bool = False) -> None:
super().__init__(token_indexers, tag_label, feature_labels, lazy, coding_scheme)
self.percent_negatives = percent_negatives
self.cutoff_sentence_length = cutoff_sentence_length
self.filter_sections = filter_sections
def _is_title(self, tokens: List[Token], pos_tags: List[str], i: int):
if pos_tags[i] == "PROPN":
return True
if len(tokens) <= 2:
return True
if i + 1 < len(tokens):
if tokens[i+1].text == ":" or pos_tags[i+1] in ["PROPN", "NUM"]:
return True
def _is_sentence(self, tokens: List[Token], pos_tags: List[str]):
if len(tokens) < 3:
return False
if "NOUN" not in pos_tags and "VERB" not in pos_tags:
return False
pos_counts = defaultdict(int)
for pos_tag in pos_tags:
pos_counts[pos_tag] += 1
if (pos_counts["NUM"] + pos_counts["SYM"] + pos_counts["PUNCT"]) > 0.4*len(pos_tags) and pos_counts["VERB"] == 0:
return False
return True
@overrides
def _read(self, file_path: str) -> Iterable[Instance]:
start = False
stop = False
start_words_single = ["abstract",
"introduction",
"methods",
"data",
"method",
"intro",
"background",
"keywords"]
end_words_single = ["references",
"acknowledgements"]
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
with open(file_path, "r") as data_file:
logger.info("Reading instances from lines in file at: %s", file_path)
# Group into alternative divider / sentence chunks.
instance_count = 0
yielded = False
for is_divider, lines in itertools.groupby(data_file, _is_divider):
# Ignore the divider chunks, so that `lines` corresponds to the words
# of a single sentence.
if not is_divider:
instance_count += 1
fields = [line.strip().split() for line in lines]
# unzipping trick returns tuples, but our Fields need lists
tokens, pos_tags, chunk_tags, ner_tags = [list(field) for field in zip(*fields)]
# negative sentence
if len(set(ner_tags)) == 1 and ner_tags[0] == 'O':
if randint(0, 100) > self.percent_negatives:
continue
# TextField requires ``Token`` objects
tokens = [Token(token) for token in tokens]
for i, (token, pos_tag) in enumerate(zip(tokens, pos_tags)):
if token.text.lower() in start_words_single and self._is_title(tokens, pos_tags, i):
start = True
if instance_count >= 75:
start = True
if start and instance_count >= 150 and token.text.lower() in end_words_single and self._is_title(tokens, pos_tags, i):
stop = True
if self.filter_sections:
if not start:
continue
if stop:
break
# print(tokens)
# print(pos_tags)
# print(self._is_sentence(tokens, pos_tags))
# input('e.')
# if not self._is_sentence(tokens, pos_tags):
# continue
if self.cutoff_sentence_length and len(tokens) < self.cutoff_sentence_length:
yielded = True
yield self.text_to_instance(tokens, pos_tags, chunk_tags, ner_tags)
if not yielded:
yield self.text_to_instance([Token("The")], ["Det"], ["O"], ["O"])
| coleridge-rich-context-ai2-master | project/ner_rcc/rcc_ner.py |
"""This script splits up the papers such that the datasets in train, dev, and test are disjoint.
Note: It assumes that the old splits and their file structure remain the same as before"""
import os
import json
from collections import defaultdict
import numpy as np
import random
def build_dataset_id_to_papers(old_train_path, old_dev_path, old_test_path):
"""Creates a dictionary mapping dataset id to a list of papers that reference that dataset
according to the labels.
@param old_train_path: path to the old train split folder
@param old_dev_path: path to the old dev split folder
@param old_test_path: path to the old test split folder
"""
train_citations_path = os.path.join(old_train_path, "data_set_citations.json")
dev_citations_path = os.path.join(old_dev_path, "data_set_citations.json")
test_citations_path = os.path.join(old_test_path, "data_set_citations.json")
dataset_id_to_papers = defaultdict(list)
with open(train_citations_path) as fp:
train_citations_json = json.load(fp)
with open(dev_citations_path) as fp:
dev_citations_json = json.load(fp)
with open(test_citations_path) as fp:
test_citations_json = json.load(fp)
citations_json = train_citations_json + dev_citations_json + test_citations_json
for citation in citations_json:
dataset_id = citation["data_set_id"]
publication_id = citation["publication_id"]
dataset_id_to_papers[dataset_id].append(publication_id)
return dataset_id_to_papers
def get_old_splits(old_train_path, old_dev_path, old_test_path):
"""Returns the set of papers in the original train, dev, and test splits
respectively
@param old_train_path: path to the old train split folder
@param old_dev_path: path to the old dev split folder
@param old_test_path: path to the old test split folder
"""
train_publications_path = os.path.join(old_train_path, "publications.json")
dev_publications_path = os.path.join(old_dev_path, "publications.json")
test_publications_path = os.path.join(old_test_path, "publications.json")
with open(train_publications_path) as fp:
train_publications_json = json.load(fp)
with open(dev_publications_path) as fp:
dev_publications_json = json.load(fp)
with open(test_publications_path) as fp:
test_publications_json = json.load(fp)
train_papers = set()
dev_papers = set()
test_papers = set()
for publication in train_publications_json:
publication_id = publication["publication_id"]
train_papers.add(publication_id)
for publication in dev_publications_json:
publication_id = publication["publication_id"]
dev_papers.add(publication_id)
for publication in test_publications_json:
publication_id = publication["publication_id"]
test_papers.add(publication_id)
print()
print("Original splits:")
print("Num train papers:", len(train_papers))
print("Num dev papers:", len(dev_papers))
print("Num test papers:", len(test_papers))
assert len(train_papers) + len(dev_papers) + len(test_papers) == 5100, "There should be exactly 5100 papers in the old splits"
return train_papers, dev_papers, test_papers
def create_splits(dataset_id_with_papers_sorted):
"""Returns the set of papers in the new splits for train, dev, and test
respectively
@param dataset_id_with_paper_sorted: a sorted list of (dataset id, list of papers) tuples
"""
train_papers = set()
dev_papers = set()
test_papers = set()
all_papers = set()
for dataset_id, papers in dataset_id_with_papers_sorted:
all_papers.update(papers)
# take any datasets that appear in many papers as training data
if len(papers) > 20:
train_papers.update(papers)
continue
# if any of the current dataset's papers are already in train, dev, or test,
# put all of this dataset's papers in that split
if any(paper in train_papers for paper in papers):
train_papers.update(papers)
elif any(paper in dev_papers for paper in papers):
dev_papers.update(papers)
elif any(paper in test_papers for paper in papers):
test_papers.update(papers)
else:
# randomly assign this dataset's papers to train, dev, or test
random_flip = random.randint(0, 100)
if random_flip <= 70:
train_papers.update(papers)
elif random_flip <= 85:
dev_papers.update(papers)
else:
test_papers.update(papers)
# resolve conflicts by preferring dev over train, and test over dev
train_papers = train_papers - dev_papers
train_papers = train_papers - test_papers
dev_papers = dev_papers - test_papers
print()
print("New splits:")
print("Num train papers:", len(train_papers))
print("Num dev papers:", len(dev_papers))
print("Num test papers:", len(test_papers))
assert len(train_papers) + len(dev_papers) + len(test_papers) == 2550, "There should be exactly 2550 papers with datasets in them"
return train_papers, dev_papers, test_papers
def write_split(old_data_path,
new_split_path,
new_papers_path,
old_train_papers,
old_dev_papers,
old_test_papers,
new_papers):
"""Writes a concatenated conll file for a given data fold
@param old_data_path: path to the old data folder
@param new_split_path: path to write the concatenated conll file to
@param new_papers_path: path to write the list of papers in the fold to
@param old_train_papers: set of papers in the old train set
@param old_dev_papers: set of papers in the old dev set
@param old_test_papers: set of papers in the old test set
@param new_papers: set of papers in the new fold
"""
old_train_conll_path = os.path.join(old_data_path, "train", "ner-conll")
old_dev_conll_path = os.path.join(old_data_path, "dev", "ner-conll")
old_test_conll_path = os.path.join(old_data_path, "test", "ner-conll")
with open(new_split_path, "w") as new_split_fp,\
open(new_papers_path, "w") as new_papers_fp:
for paper in new_papers:
if paper in old_train_papers:
base_conll_path = old_train_conll_path
elif paper in old_dev_papers:
base_conll_path = old_dev_conll_path
elif paper in old_test_papers:
base_conll_path = old_test_conll_path
else:
raise Exception("Paper {} was not found in old train, dev, or test".format(paper))
old_conll_file_path = os.path.join(base_conll_path, str(paper) + "_extraction.conll")
with open(old_conll_file_path) as old_conll_fp:
new_split_fp.write(old_conll_fp.read())
new_papers_fp.write(str(paper))
new_papers_fp.write('\n')
def write_splits(old_data_path,
new_splits_base_path,
old_train_papers,
old_dev_papers,
old_test_papers,
new_train_papers,
new_dev_papers,
new_test_papers):
"""Writes concatenated conll files for train, dev, and test based on the new splits
@param old_data_path: path to the old data folder
@param new_splits_base_path: path to the folder where the concatenated files will be written
@param old_train_papers: set of papers in the old train set
@param old_dev_papers: set of papers in the old dev set
@param old_test_papers: set of papers in the old test set
@param new_train_papers: set of papers in the new train fold
@param new_dev_papers: set of papers in the new dev fold
@param new_test_papers: set of papers in the new test fold
"""
train_concat_path = os.path.join(new_splits_base_path, "train_concat.conll")
dev_concat_path = os.path.join(new_splits_base_path, "dev_concat.conll")
test_concat_path = os.path.join(new_splits_base_path, "test_concat.conll")
train_papers_path = os.path.join(new_splits_base_path, "train_papers.txt")
dev_papers_path = os.path.join(new_splits_base_path, "dev_papers.txt")
test_papers_path = os.path.join(new_splits_base_path, "test_papers.txt")
write_split(old_data_path, train_concat_path, train_papers_path, old_train_papers, old_dev_papers, old_test_papers, new_train_papers)
write_split(old_data_path, dev_concat_path, dev_papers_path, old_train_papers, old_dev_papers, old_test_papers, new_dev_papers)
write_split(old_data_path, test_concat_path, test_papers_path, old_train_papers, old_dev_papers, old_test_papers, new_test_papers)
def main():
old_data_path = os.path.abspath(os.path.join("project", "data"))
old_train_path = os.path.join(old_data_path, "train")
old_dev_path = os.path.join(old_data_path, "dev")
old_test_path = os.path.join(old_data_path, "test")
old_train_papers, old_dev_papers, old_test_papers = get_old_splits(old_train_path, old_dev_path, old_test_path)
dataset_id_to_papers = build_dataset_id_to_papers(old_train_path, old_dev_path, old_test_path)
dataset_id_with_papers_sorted = sorted([(k, v) for k, v in dataset_id_to_papers.items()], key=lambda x: len(x[1]))
new_train_papers, new_dev_papers, new_test_papers = create_splits(dataset_id_with_papers_sorted)
new_splits_base_path = os.path.abspath(os.path.join("project", "ner_retraining", "data"))
write_splits(old_data_path,
new_splits_base_path,
old_train_papers,
old_dev_papers,
old_test_papers,
new_train_papers,
new_dev_papers,
new_test_papers)
if __name__ == "__main__":
main() | coleridge-rich-context-ai2-master | project/ner_retraining/create_splits.py |
import sys
import os
import argparse
sys.path.append(os.path.abspath(os.path.join("project")))
import ner_model
import json
def main(conll_path, output_path, model_path):
ner = ner_model.NerModel(conll_path, model_path)
citations_list = ner.predict_from_publication_list()
with open(output_path, "w") as fp:
json.dump(citations_list, fp)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'--conll_path',
)
parser.add_argument(
'--output_path',
)
parser.add_argument(
'--model_path'
)
args = parser.parse_args()
main(args.conll_path,
args.output_path,
args.model_path) | coleridge-rich-context-ai2-master | project/ner_retraining/generate_ner_output.py |
from overrides import overrides
from allennlp.common.util import JsonDict
from allennlp.data import Instance
from allennlp.predictors.predictor import Predictor
@Predictor.register('classifier')
class ClassifierPredictor(Predictor):
"""
Predictor for the :class:`~allennlp.models.text_classification.Seq2Seq` model.
"""
def predict(self, title: str) -> JsonDict:
"""
Make a machine comprehension prediction on the supplied input.
See https://rajpurkar.github.io/SQuAD-explorer/ for more information about the machine comprehension task.
Parameters
----------
question : ``str``
A question about the content in the supplied paragraph. The question must be answerable by a
span in the paragraph.
passage : ``str``
A paragraph of information relevant to the question.
Returns
-------
A dictionary that represents the prediction made by the system. The answer string will be under the
"best_span_str" key.
"""
return self.predict_json({"title" : title})
@overrides
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
"""
Expects JSON that looks like ``{"title": "..."}``.
"""
title = json_dict["title"]
return self._dataset_reader.text_to_instance(text=title)
| coleridge-rich-context-ai2-master | project/field_classifier/predictor.py |
from allennlp.models.archival import load_archive
from allennlp.service.predictors import Predictor
from field_classifier.classifier import Classifier
from field_classifier.predictor import ClassifierPredictor
from field_classifier.textcat import TextCatReader
import os
import json
import numpy as np
l0_archive = load_archive(
os.path.abspath(os.path.join("data", "model_logs", "l0_model.tar.gz"))
)
l0_predictor = Predictor.from_archive(l0_archive, 'classifier')
l1_archive = load_archive(
os.path.abspath(os.path.join("data", "model_logs", "l1_model.tar.gz"))
)
l1_predictor = Predictor.from_archive(l1_archive, 'classifier')
test_pubs = [{"title": "this is a test", "publication_id": 1}]
clf_output = []
l0_label_map = l0_archive.model.vocab.get_index_to_token_vocabulary("labels")
l1_label_map = l1_archive.model.vocab.get_index_to_token_vocabulary("labels")
for test_pub in test_pubs:
l0_prediction = l0_predictor.predict_json({"title": test_pub['title']})
l1_prediction = l1_predictor.predict_json({"title": test_pub['title']})
pred = {}
pred['publication_id'] = test_pub['publication_id']
l0_score = np.max(l0_prediction['label_probs'])
l1_score = np.max(l1_prediction['label_probs'])
l0_field = l0_label_map[np.argmax(l0_prediction['label_probs'])]
l1_field = l1_label_map[np.argmax(l1_prediction['label_probs'])]
if l1_score > 0.4:
output_score = "{}:{}".format(l0_score, l1_score)
output_field = "{}:{}".format(l0_field, l1_field)
else:
output_score = "{}".format(l0_score)
output_field = "{}".format(l0_field)
pred['score'] = output_score
pred['research_field'] = output_field
clf_output.append(pred)
print(clf_output)
| coleridge-rich-context-ai2-master | project/field_classifier/eval_field_classifier.py |
from typing import Any, Dict, List, Optional
import torch
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import FeedForward, Seq2SeqEncoder, TextFieldEmbedder
from allennlp.nn import InitializerApplicator, RegularizerApplicator
from allennlp.nn.util import (get_final_encoder_states, get_text_field_mask,
masked_max, masked_mean)
from allennlp.training.metrics import CategoricalAccuracy
@Model.register("seq2seq_classifier")
class Classifier(Model):
"""
This ``Model`` implements a classifier with a seq2seq encoder of text.
See allennlp.modules.seq2seq_encoders for available encoders.
Parameters
----------
vocab : ``Vocabulary``
text_field_embedder : ``TextFieldEmbedder``
Used to embed the ``TextField`` we get as input to the model.
encoder : ``Seq2SeqEncoder``
Used to encode the text
output_feedforward : ``FeedForward``
Used to prepare the text for prediction.
classification_layer : ``FeedForward``
This feedforward network computes the output logits.
dropout : ``float``, optional (default=0.5)
Dropout percentage to use.
initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)
Used to initialize the model parameters.
regularizer : ``RegularizerApplicator``, optional (default=``None``)
If provided, will be used to calculate the regularization penalty during training
"""
def __init__(self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
aggregations: List[str],
output_feedforward: FeedForward,
classification_layer: FeedForward,
dropout: float = 0.5,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super().__init__(vocab, regularizer)
self._text_field_embedder = text_field_embedder
if dropout:
self.dropout = torch.nn.Dropout(dropout)
else:
self.dropout = None
self._encoder = encoder
self._aggregations = aggregations
self._output_feedforward = output_feedforward
self._classification_layer = classification_layer
self._num_labels = vocab.get_vocab_size(namespace="labels")
self._accuracy = CategoricalAccuracy()
self._loss = torch.nn.CrossEntropyLoss()
initializer(self)
def forward(self, # type: ignore
tokens: Dict[str, torch.LongTensor],
label: torch.IntTensor = None,
metadata: List[Dict[str, Any]] = None # pylint:disable=unused-argument
) -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ
"""
Parameters
----------
tokens : Dict[str, torch.LongTensor]
From a ``TextField``
label : torch.IntTensor, optional (default = None)
From a ``LabelField``
metadata : ``List[Dict[str, Any]]``, optional, (default = None)
Metadata to persist
Returns
-------
An output dictionary consisting of:
label_logits : torch.FloatTensor
A tensor of shape ``(batch_size, num_labels)`` representing
unnormalized log probabilities of the label.
label_probs : torch.FloatTensor
A tensor of shape ``(batch_size, num_labels)`` representing
probabilities of the label.
loss : torch.FloatTensor, optional
A scalar loss to be optimised.
"""
embedded_text = self._text_field_embedder(tokens)
mask = get_text_field_mask(tokens).float()
encoder_output = self._encoder(embedded_text, mask)
encoded_repr = []
for aggregation in self._aggregations:
if aggregation == "meanpool":
broadcast_mask = mask.unsqueeze(-1).float()
context_vectors = encoder_output * broadcast_mask
encoded_text = masked_mean(context_vectors,
broadcast_mask,
dim=1,
keepdim=False)
elif aggregation == 'maxpool':
broadcast_mask = mask.unsqueeze(-1).float()
context_vectors = encoder_output * broadcast_mask
encoded_text = masked_max(context_vectors,
broadcast_mask,
dim=1)
elif aggregation == 'final_state':
is_bi = self._encoder.is_bidirectional()
encoded_text = get_final_encoder_states(encoder_output,
mask,
is_bi)
encoded_repr.append(encoded_text)
encoded_repr = torch.cat(encoded_repr, 1)
if self.dropout:
encoded_repr = self.dropout(encoded_repr)
output_hidden = self._output_feedforward(encoded_repr)
label_logits = self._classification_layer(output_hidden)
label_probs = torch.nn.functional.softmax(label_logits, dim=-1)
output_dict = {"label_logits": label_logits, "label_probs": label_probs}
if label is not None:
loss = self._loss(label_logits, label.long().view(-1))
self._accuracy(label_logits, label)
output_dict["loss"] = loss
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {'accuracy': self._accuracy.get_metric(reset)}
| coleridge-rich-context-ai2-master | project/field_classifier/classifier.py |
coleridge-rich-context-ai2-master | project/field_classifier/__init__.py |
|
import json
import logging
from typing import Dict, List
import numpy as np
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import (Field, LabelField, ListField, MetadataField,
TextField)
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers import Tokenizer, WordTokenizer
from allennlp.data.tokenizers.sentence_splitter import SpacySentenceSplitter
from overrides import overrides
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@DatasetReader.register("semisupervised_text_classification_json")
class TextCatReader(DatasetReader):
"""
Reads tokens and their labels from a labeled text classification dataset.
Expects a "tokens" field and a "category" field in JSON format.
The output of ``read`` is a list of ``Instance`` s with the fields:
tokens: ``TextField`` and
label: ``LabelField``
Parameters
----------
token_indexers : ``Dict[str, TokenIndexer]``, optional
optional (default=``{"tokens": SingleIdTokenIndexer()}``)
We use this to define the input representation for the text.
See :class:`TokenIndexer`.
tokenizer : ``Tokenizer``, optional (default = ``{"tokens": WordTokenizer()}``)
Tokenizer to use to split the input text into words or other kinds of tokens.
segment_sentences: ``bool``, optional (default = ``False``)
If True, we will first segment the text into sentences using SpaCy and then tokenize words.
Necessary for some models that require pre-segmentation of sentences,
like the Hierarchical Attention Network.
sequence_length: ``int``, optional (default = ``None``)
If specified, will truncate tokens to specified maximum length.
ignore_labels: ``bool``, optional (default = ``False``)
If specified, will ignore labels when reading data, useful for semi-supervised textcat
skip_label_indexing: ``bool``, optional (default = ``False``)
Whether or not to skip label indexing. You might want to skip label indexing if your
labels are numbers, so the dataset reader doesn't re-number them starting from 0.
lazy : ``bool``, optional, (default = ``False``)
Whether or not instances can be read lazily.
"""
def __init__(self,
token_indexers: Dict[str, TokenIndexer] = None,
tokenizer: Tokenizer = None,
unrestricted_tokenizer: Tokenizer = None,
segment_sentences: bool = False,
sequence_length: int = None,
ignore_labels: bool = False,
skip_label_indexing: bool = False,
sample: int = None,
unlabeled_data_path: str = None,
lazy: bool = False) -> None:
super().__init__(lazy=lazy)
self._tokenizer = tokenizer or WordTokenizer()
self._unrestricted_tokenizer = unrestricted_tokenizer
self._sample = sample
self._segment_sentences = segment_sentences
self._sequence_length = sequence_length
self._ignore_labels = ignore_labels
self._skip_label_indexing = skip_label_indexing
self._token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()}
self._unlabeled_data_path = unlabeled_data_path
if self._segment_sentences:
self._sentence_segmenter = SpacySentenceSplitter()
def _reservoir_sampling(self, file_):
"""
reservoir sampling for reading random lines from file without loading
entire file into memory
See here for explanation of algorithm:
https://stackoverflow.com/questions/35680236/select-100-random-lines-from-a-file-with-a-1-million-which-cant-be-read-into-me
Parameters
----------
file : `str` - file path
sample_size : `int` - size of random sample you want
Returns
-------
result : `List[str]` - sample lines of file
"""
file_iterator = iter(file_)
try:
result = [next(file_iterator) for _ in range(self._sample)]
except StopIteration:
raise ValueError("Sample larger than population")
for index, item in enumerate(file_iterator, start=self._sample):
sample_index = np.random.randint(0, index)
if sample_index < self._sample:
result[sample_index] = item
np.random.shuffle(result)
return result
@overrides
def _read(self, file_path):
with open(cached_path(file_path), "r") as data_file:
if self._sample is not None:
lines = [(item, False) for item in self._reservoir_sampling(data_file)]
else:
lines = [(item, True) for item in data_file.readlines()]
if self._unlabeled_data_path:
with open(cached_path(self._unlabeled_data_path)) as data_file:
lines += [(item, False) for item in data_file.readlines()]
for line, is_labeled in lines:
items = json.loads(line)
text = items["tokens"]
label = str(items['category'])
instance = self.text_to_instance(text=text, label=label, is_labeled=is_labeled)
if instance is not None:
yield instance
def _truncate(self, tokens):
"""
truncate a set of tokens using the provided sequence length
"""
if len(tokens) > self._sequence_length:
tokens = tokens[:self._sequence_length]
return tokens
@overrides
def text_to_instance(self, text: str, label: str = None, is_labeled: bool = False) -> Instance: # type: ignore
"""
Parameters
----------
text : ``str``, required.
The text to classify
label ``str``, optional, (default = None).
The label for this text.
Returns
-------
An ``Instance`` containing the following fields:
tokens : ``TextField``
The tokens in the sentence or phrase.
label : ``LabelField``
The label label of the sentence or phrase.
"""
# pylint: disable=arguments-differ
fields: Dict[str, Field] = {}
if self._segment_sentences:
sentences: List[Field] = []
sentence_splits = self._sentence_segmenter.split_sentences(text)
for sentence in sentence_splits:
word_tokens = self._tokenizer.tokenize(sentence)
if self._sequence_length is not None:
word_tokens = self._truncate(word_tokens)
sentences.append(TextField(word_tokens, self._token_indexers))
fields['tokens'] = ListField(sentences)
else:
tokens = self._tokenizer.tokenize(text)
if self._sequence_length is not None:
tokens = self._truncate(tokens)
fields['tokens'] = TextField(tokens, self._token_indexers)
if self._unrestricted_tokenizer:
unrestricted_tokens = self._unrestricted_tokenizer.tokenize(text)
if self._sequence_length is not None:
unrestricted_tokens = self._truncate(unrestricted_tokens)
fields['filtered_tokens'] = TextField(unrestricted_tokens, self._token_indexers)
# TODO: Document 'default' unsupervised label as pre-condition.
if label is not None:
fields['label'] = LabelField(label, skip_indexing=self._skip_label_indexing)
fields['metadata'] = MetadataField({"is_labeled": is_labeled})
return Instance(fields)
| coleridge-rich-context-ai2-master | project/field_classifier/textcat.py |
coleridge-rich-context-ai2-master | project/s2base/__init__.py |
|
import spacy
class SciSpaCyParser(object):
def __init__(self):
self.nlp = spacy.load('en_scispacy_core_web_sm')
def remove_new_lines(self, text):
"""Used to preprocess away new lines in the middle of words. This function
is intended to be called on a raw string before it is passed through a
spaCy pipeline
@param text: a string of text to be processed
"""
text = text.replace("-\n\n", "")
text = text.replace("- \n\n", "")
text = text.replace("-\n", "")
text = text.replace("- \n", "")
return text
def preprocess_text(self, text):
"""Function to preprocess text before passing it on to spacy
@param text: the raw text to process
"""
text = self.remove_new_lines(text)
return text
def postprocess_doc(self, doc):
"""Function to postprocess a doc before returning it for use.
This post processing could be done by converting the doc to an array,
processing out what you don't want, and then converting the array back
to a doc.
@param doc: a spacy processed doc
"""
return doc
def scispacy_create_doc(self, text):
"""Function to use SciSpaCy instead of spaCy. Intended usage is to replace
instances of `nlp = spacy.load("<model_name>")` with `nlp = scispacy_create_doc`
@param text: the text to be processed into a spacy doc
"""
text = self.preprocess_text(text)
doc = self.nlp(text)
doc = self.postprocess_doc(doc)
return doc
| coleridge-rich-context-ai2-master | project/s2base/scispacy_util.py |
import argparse
import itertools
import ir_datasets
import abnirml
_logger = ir_datasets.log.easy()
def flush(fout, qid, docs):
scored_docs = [(score, did) for did, score in docs.items()]
for i, (score, did) in enumerate(sorted(scored_docs, reverse=True)):
fout.write(f'{qid} 0 {did} {i+1} {score} run\n')
fout.flush()
def iter_scores(scoreddocs, scorer, query_lookup, doc_store):
it1, it2 = itertools.tee(scoreddocs)
def score_input_iter():
for record in it1:
yield query_lookup[record.query_id], doc_store.get(record.doc_id, 'text')
score_iter = scorer.score_iter(score_input_iter())
for record, score in zip(it2, score_iter):
yield record.query_id, record.doc_id, score
def main():
parser = argparse.ArgumentParser()
parser.add_argument('dataset')
parser.add_argument('scorer')
parser.add_argument('output')
args = parser.parse_args()
dataset = ir_datasets.load(args.dataset)
scorer = abnirml.SCORERS[args.scorer] # remove cacher
query_lookup = {q.query_id: q.text for q in dataset.queries_iter()}
doc_store = dataset.docs_store()
last_qid = None
doc_scores = None
with open(args.output, 'wt') as fout:
results = {}
for qid, did, score in iter_scores(dataset.scoreddocs_iter(), scorer, query_lookup, doc_store):
if qid not in results:
results[qid] = {}
results[qid][did] = score
for qid, doc_scores in results.items():
flush(fout, qid, doc_scores)
if __name__ == '__main__':
main()
| abnirml-master | abnirml/rerank.py |
import os
import atexit
import shutil
import tempfile
from contextlib import contextmanager
import threading
import pyterrier as pt
import ir_datasets
_logger = ir_datasets.log.easy()
class _JavaInterface:
def __init__(self):
self._autoclass = None
self.cast = None
self.JavaException = None
self._defs = {}
self._cache = {}
self._jars = []
self._log_listeners = []
def register(self, jars=None, defs=None):
if jars is not None:
if self._autoclass is None:
for jar in jars:
self._jars.append(jar)
else:
raise RuntimeError('Cannot add JAR after jnius has been initialized.')
if defs is not None:
for n, path in defs.items():
self._defs[n] = path
def add_log_listener(self, func):
self._log_listeners.insert(0, func)
@contextmanager
def listen_java_log(self, func):
self._log_listeners.insert(0, func)
yield
self._log_listeners.pop(0)
def __getattr__(self, key):
if self._autoclass is None:
self.initialize()
if key not in self._cache:
self._cache[key] = self._autoclass(self._defs[key])
return self._cache[key]
def initialize(self):
if not pt.started():
with _logger.duration('java init'):
log_fifo, l4j12_file, l4j24_file = self._init_java_logger_interface()
import jnius_config
jnius_config.set_classpath(*self._jars, os.path.dirname(l4j12_file))
jnius_config.add_options(f'-Dlog4j.configuration={l4j12_file}')
jnius_config.add_options(f'-Dlog4j.configurationFile={l4j24_file}')
pt.init()
from jnius import autoclass, cast, JavaException
self._autoclass = autoclass
self.cast = cast
self.JavaException = JavaException
for key, path in self._defs.items():
self._cache[key] = self._autoclass(path)
def _java_log_listen(self, fifo):
while True:
with open(fifo) as f:
buf = ''
for line in f:
if line.rstrip() == '':
buf = buf.rstrip()
for listener in self._log_listeners:
result = listener(buf)
if result == False:
break
buf = ''
else:
buf += line
def _init_java_logger_interface(self):
base_tmp = tempfile.mkdtemp()
atexit.register(shutil.rmtree, base_tmp)
l4j12_config_file = os.path.join(base_tmp, 'log4j.properties')
l4j24_config_file = os.path.join(base_tmp, 'log4j24.xml')
log_fifo = os.path.join(base_tmp, 'log_interface.fifo')
os.mkfifo(log_fifo)
log_thread = threading.Thread(target=self._java_log_listen, args=(log_fifo,), daemon=True)
log_thread.start()
with open(l4j12_config_file, 'wt') as f:
f.write(f'''
log4j.rootLogger=fifo
log4j.appender.fifo=org.apache.log4j.FileAppender
log4j.appender.fifo.fileName={log_fifo}
log4j.appender.fifo.layout=org.apache.log4j.PatternLayout
log4j.appender.fifo.layout.ConversionPattern=%p %c [%t] %m%n%n
''')
with open(l4j24_config_file, 'wt') as f:
f.write(f'''<?xml version="1.0" encoding="UTF-8"?>
<Configuration>
<Appenders>
<File name="LogFifo" fileName="{log_fifo}">
<PatternLayout>
<Pattern>%p %c [%t] %m%n%n</Pattern>
</PatternLayout>
</File>
</Appenders>
<Loggers>
<Root level="all">
<AppenderRef ref="LogFifo"/>
</Root>
</Loggers>
</Configuration>
''')
return log_fifo, l4j12_config_file, l4j24_config_file
J = _JavaInterface()
J.register(defs=dict(
# Core
System='java.lang.System',
Array='java.lang.reflect.Array',
Arrays='java.util.Arrays',
ArrayList='java.util.ArrayList',
Integer='java.lang.Integer',
Float='java.lang.Float',
File='java.io.File',
Thread='java.lang.Thread',
StringReader='java.io.StringReader',
))
| abnirml-master | abnirml/java.py |
import ir_datasets
_logger = ir_datasets.log.easy()
_logger.logger().setLevel(20) # INFO
from . import datasets
from . import probes
from . import scorers
from . import indices
from . import eval as ev
from . import util
from pathlib import Path
import pyterrier as pt
from abnirml.java import J
J.initialize()
ProbeExperiment = ev.ProbeExperiment
Probe = probes.Probe
Scorer = scorers.Scorer
NeuralScorer = scorers.NeuralScorer
PyTerrierScorer = scorers.PyTerrierScorer
CachedScorer = scorers.CachedScorer
JflegProbe = probes.JflegProbe
trecdl19 = indices.PtIndexWrapper(ir_datasets.load('msmarco-passage/trec-dl-2019'))
trecdl19_index = trecdl19.docs_ptindex()
antiquetest = indices.PtIndexWrapper(ir_datasets.load('antique/test'))
base_path = Path.home()/'.abnirml'
if not base_path.exists():
base_path.mkdir(parents=True, exist_ok=True)
if not (base_path/'cache'/'scorers').exists():
(base_path/'cache'/'scorers').mkdir(parents=True, exist_ok=True)
if not (base_path/'cache'/'probes').exists():
(base_path/'cache'/'probes').mkdir(parents=True, exist_ok=True)
PROBES = {
'CV-DL19-Rel-Len': probes.const_var.ConstVarQrelsProbe(trecdl19, const=probes.const_var.Rel, var=probes.const_var.Len),
'CV-DL19-Rel-Tf': probes.const_var.ConstVarQrelsProbe(trecdl19, const=probes.const_var.Rel, var=probes.const_var.Tf),
'CV-DL19-Rel-SumTf': probes.const_var.ConstVarQrelsProbe(trecdl19, const=probes.const_var.Rel, var=probes.const_var.SumTf),
'CV-DL19-Rel-Overlap': probes.const_var.ConstVarQrelsProbe(trecdl19, const=probes.const_var.Rel, var=probes.const_var.Overlap),
'CV-DL19-Len-Rel': probes.const_var.ConstVarQrelsProbe(trecdl19, const=probes.const_var.Len, var=probes.const_var.Rel),
'CV-DL19-Len-Tf': probes.const_var.ConstVarQrelsProbe(trecdl19, const=probes.const_var.Len, var=probes.const_var.Tf),
'CV-DL19-Len-SumTf': probes.const_var.ConstVarQrelsProbe(trecdl19, const=probes.const_var.Len, var=probes.const_var.SumTf),
'CV-DL19-Len-Overlap': probes.const_var.ConstVarQrelsProbe(trecdl19, const=probes.const_var.Len, var=probes.const_var.Overlap),
'CV-DL19-Tf-Rel': probes.const_var.ConstVarQrelsProbe(trecdl19, const=probes.const_var.Tf, var=probes.const_var.Rel),
'CV-DL19-Tf-Len': probes.const_var.ConstVarQrelsProbe(trecdl19, const=probes.const_var.Tf, var=probes.const_var.Len),
'CV-DL19-Tf-SumTf': probes.const_var.ConstVarQrelsProbe(trecdl19, const=probes.const_var.Tf, var=probes.const_var.SumTf),
'CV-DL19-Tf-Overlap': probes.const_var.ConstVarQrelsProbe(trecdl19, const=probes.const_var.Tf, var=probes.const_var.Overlap),
'CV-DL19-SumTf-Rel': probes.const_var.ConstVarQrelsProbe(trecdl19, const=probes.const_var.SumTf, var=probes.const_var.Rel),
'CV-DL19-SumTf-Len': probes.const_var.ConstVarQrelsProbe(trecdl19, const=probes.const_var.SumTf, var=probes.const_var.Len),
'CV-DL19-SumTf-Tf': probes.const_var.ConstVarQrelsProbe(trecdl19, const=probes.const_var.SumTf, var=probes.const_var.Tf),
'CV-DL19-SumTf-Overlap': probes.const_var.ConstVarQrelsProbe(trecdl19, const=probes.const_var.SumTf, var=probes.const_var.Overlap),
'CV-DL19-Overlap-Rel': probes.const_var.ConstVarQrelsProbe(trecdl19, const=probes.const_var.Overlap, var=probes.const_var.Rel),
'CV-DL19-Overlap-Len': probes.const_var.ConstVarQrelsProbe(trecdl19, const=probes.const_var.Overlap, var=probes.const_var.Len),
'CV-DL19-Overlap-Tf': probes.const_var.ConstVarQrelsProbe(trecdl19, const=probes.const_var.Overlap, var=probes.const_var.Tf),
'CV-DL19-Overlap-SumTf': probes.const_var.ConstVarQrelsProbe(trecdl19, const=probes.const_var.Overlap, var=probes.const_var.SumTf),
'TR-DL19-ShufWords': probes.transform.TransformProbe(trecdl19, probes.transform.ShufWords()),
'TR-DL19-ShufSents': probes.transform.TransformProbe(trecdl19, probes.transform.ShufSents()),
'TR-DL19-ReverseSents': probes.transform.TransformProbe(trecdl19, probes.transform.ReverseSents()),
'TR-DL19-ReverseWords': probes.transform.TransformProbe(trecdl19, probes.transform.ReverseWords()),
'TR-DL19-ShufWordsKeepSents': probes.transform.TransformProbe(trecdl19, probes.transform.ShufWordsKeepSents()),
'TR-DL19-ShufWordsKeepSentsAndNPs': probes.transform.TransformProbe(trecdl19, probes.transform.ShufWordsKeepSentsAndNPs()),
'TR-DL19-ShufWordsKeepNPs': probes.transform.TransformProbe(trecdl19, probes.transform.ShufWordsKeepNPs()),
'TR-DL19-ShufNPSlots': probes.transform.TransformProbe(trecdl19, probes.transform.ShufNPSlots()),
'TR-DL19-ShufPrepositions': probes.transform.TransformProbe(trecdl19, probes.transform.ShufPrepositions()),
'TR-DL19-ReverseNPSlots': probes.transform.TransformProbe(trecdl19, probes.transform.ReverseNPSlots()),
'TR-DL19-SwapNumNPSlots2': probes.transform.TransformProbe(trecdl19, probes.transform.SwapNumNPSlots2()),
'TR-DL19-CaseFold': probes.transform.TransformProbe(trecdl19, probes.transform.CaseFold()),
'TR-DL19-Lemma': probes.transform.TransformProbe(trecdl19, probes.transform.Lemmatize()),
'TR-DL19-DelPunct': probes.transform.TransformProbe(trecdl19, probes.transform.DelPunct()),
'TR-DL19-DelSent-start': probes.transform.TransformProbe(trecdl19, probes.transform.DelSent('start')),
'TR-DL19-DelSent-end': probes.transform.TransformProbe(trecdl19, probes.transform.DelSent('end')),
'TR-DL19-DelSent-rand': probes.transform.TransformProbe(trecdl19, probes.transform.DelSent('rand')),
'TR-DL19-AddSent-start': probes.transform.TransformProbe(trecdl19, probes.transform.AddSent('start')),
'TR-DL19-AddSent-end': probes.transform.TransformProbe(trecdl19, probes.transform.AddSent('end')),
'TR-DL19-DocTTTTTQuery': probes.transform.TransformProbe(trecdl19, probes.transform.DocTTTTTQuery()),
'TR-DL19-Query': probes.transform.TransformProbe(trecdl19, probes.transform.Query()),
'TR-DL19-Typo': probes.transform.TransformProbe(trecdl19, probes.transform.Typo()),
'TR-DL19-Typo-nostops': probes.transform.TransformProbe(trecdl19, probes.transform.Typo(no_stops=True)),
'TR-DL19-DelStops': probes.transform.TransformProbe(trecdl19, probes.transform.RmStops()),
'TR-DL19-DelStops-DelPunct': probes.transform.TransformProbe(trecdl19, probes.transform.Multi([probes.transform.RmStops(), probes.transform.DelPunct()])),
'TR-DL19-nrel-ShufWords': probes.transform.TransformProbe(trecdl19, probes.transform.ShufWords(), rel_range=(0, 1)),
'TR-DL19-nrel-ShufSents': probes.transform.TransformProbe(trecdl19, probes.transform.ShufSents(), rel_range=(0, 1)),
'TR-DL19-nrel-ReverseSents': probes.transform.TransformProbe(trecdl19, probes.transform.ReverseSents(), rel_range=(0, 1)),
'TR-DL19-nrel-ReverseWords': probes.transform.TransformProbe(trecdl19, probes.transform.ReverseWords(), rel_range=(0, 1)),
'TR-DL19-nrel-ShufWordsKeepSents': probes.transform.TransformProbe(trecdl19, probes.transform.ShufWordsKeepSents(), rel_range=(0, 1)),
'TR-DL19-nrel-ShufWordsKeepSentsAndNPs': probes.transform.TransformProbe(trecdl19, probes.transform.ShufWordsKeepSentsAndNPs(), rel_range=(0, 1)),
'TR-DL19-nrel-ShufWordsKeepNPs': probes.transform.TransformProbe(trecdl19, probes.transform.ShufWordsKeepNPs(), rel_range=(0, 1)),
'TR-DL19-nrel-ShufNPSlots': probes.transform.TransformProbe(trecdl19, probes.transform.ShufNPSlots(), rel_range=(0, 1)),
'TR-DL19-nrel-ShufPrepositions': probes.transform.TransformProbe(trecdl19, probes.transform.ShufPrepositions(), rel_range=(0, 1)),
'TR-DL19-nrel-ReverseNPSlots': probes.transform.TransformProbe(trecdl19, probes.transform.ReverseNPSlots(), rel_range=(0, 1)),
'TR-DL19-nrel-SwapNumNPSlots2': probes.transform.TransformProbe(trecdl19, probes.transform.SwapNumNPSlots2(), rel_range=(0, 1)),
'TR-DL19-nrel-CaseFold': probes.transform.TransformProbe(trecdl19, probes.transform.CaseFold(), rel_range=(0, 1)),
'TR-DL19-nrel-Lemma': probes.transform.TransformProbe(trecdl19, probes.transform.Lemmatize(), rel_range=(0, 1)),
'TR-DL19-nrel-DelPunct': probes.transform.TransformProbe(trecdl19, probes.transform.DelPunct(), rel_range=(0, 1)),
'TR-DL19-nrel-DelSent-start': probes.transform.TransformProbe(trecdl19, probes.transform.DelSent('start'), rel_range=(0, 1)),
'TR-DL19-nrel-DelSent-end': probes.transform.TransformProbe(trecdl19, probes.transform.DelSent('end'), rel_range=(0, 1)),
'TR-DL19-nrel-DelSent-rand': probes.transform.TransformProbe(trecdl19, probes.transform.DelSent('rand'), rel_range=(0, 1)),
'TR-DL19-nrel-AddSent-start': probes.transform.TransformProbe(trecdl19, probes.transform.AddSent('start'), rel_range=(0, 1)),
'TR-DL19-nrel-AddSent-end': probes.transform.TransformProbe(trecdl19, probes.transform.AddSent('end'), rel_range=(0, 1)),
'TR-DL19-nrel-DocTTTTTQuery': probes.transform.TransformProbe(trecdl19, probes.transform.DocTTTTTQuery(), rel_range=(0, 1)),
'TR-DL19-nrel-Query': probes.transform.TransformProbe(trecdl19, probes.transform.Query(), rel_range=(0, 1)),
'TR-DL19-nrel-Typo': probes.transform.TransformProbe(trecdl19, probes.transform.Typo(), rel_range=(0, 1)),
'TR-DL19-nrel-DelStops': probes.transform.TransformProbe(trecdl19, probes.transform.RmStops(), rel_range=(0, 1)),
'TR-DL19-nrel-DelStops-DelPunct': probes.transform.TransformProbe(trecdl19, probes.transform.Multi([probes.transform.RmStops(), probes.transform.DelPunct()]), rel_range=(0, 1)),
'TR-DL19-rel-ShufWords': probes.transform.TransformProbe(trecdl19, probes.transform.ShufWords(), rel_range=(2, 3)),
'TR-DL19-rel-ShufSents': probes.transform.TransformProbe(trecdl19, probes.transform.ShufSents(), rel_range=(2, 3)),
'TR-DL19-rel-ReverseSents': probes.transform.TransformProbe(trecdl19, probes.transform.ReverseSents(), rel_range=(2, 3)),
'TR-DL19-rel-ReverseWords': probes.transform.TransformProbe(trecdl19, probes.transform.ReverseWords(), rel_range=(2, 3)),
'TR-DL19-rel-ShufWordsKeepSents': probes.transform.TransformProbe(trecdl19, probes.transform.ShufWordsKeepSents(), rel_range=(2, 3)),
'TR-DL19-rel-ShufWordsKeepSentsAndNPs': probes.transform.TransformProbe(trecdl19, probes.transform.ShufWordsKeepSentsAndNPs(), rel_range=(2, 3)),
'TR-DL19-rel-ShufWordsKeepNPs': probes.transform.TransformProbe(trecdl19, probes.transform.ShufWordsKeepNPs(), rel_range=(2, 3)),
'TR-DL19-rel-ShufNPSlots': probes.transform.TransformProbe(trecdl19, probes.transform.ShufNPSlots(), rel_range=(2, 3)),
'TR-DL19-rel-ShufPrepositions': probes.transform.TransformProbe(trecdl19, probes.transform.ShufPrepositions(), rel_range=(2, 3)),
'TR-DL19-rel-ReverseNPSlots': probes.transform.TransformProbe(trecdl19, probes.transform.ReverseNPSlots(), rel_range=(2, 3)),
'TR-DL19-rel-SwapNumNPSlots2': probes.transform.TransformProbe(trecdl19, probes.transform.SwapNumNPSlots2(), rel_range=(2, 3)),
'TR-DL19-rel-CaseFold': probes.transform.TransformProbe(trecdl19, probes.transform.CaseFold(), rel_range=(2, 3)),
'TR-DL19-rel-Lemma': probes.transform.TransformProbe(trecdl19, probes.transform.Lemmatize(), rel_range=(2, 3)),
'TR-DL19-rel-DelPunct': probes.transform.TransformProbe(trecdl19, probes.transform.DelPunct(), rel_range=(2, 3)),
'TR-DL19-rel-DelSent-start': probes.transform.TransformProbe(trecdl19, probes.transform.DelSent('start'), rel_range=(2, 3)),
'TR-DL19-rel-DelSent-end': probes.transform.TransformProbe(trecdl19, probes.transform.DelSent('end'), rel_range=(2, 3)),
'TR-DL19-rel-DelSent-rand': probes.transform.TransformProbe(trecdl19, probes.transform.DelSent('rand'), rel_range=(2, 3)),
'TR-DL19-rel-AddSent-start': probes.transform.TransformProbe(trecdl19, probes.transform.AddSent('start'), rel_range=(2, 3)),
'TR-DL19-rel-AddSent-end': probes.transform.TransformProbe(trecdl19, probes.transform.AddSent('end'), rel_range=(2, 3)),
'TR-DL19-rel-DocTTTTTQuery': probes.transform.TransformProbe(trecdl19, probes.transform.DocTTTTTQuery(), rel_range=(2, 3)),
'TR-DL19-rel-Query': probes.transform.TransformProbe(trecdl19, probes.transform.Query(), rel_range=(2, 3)),
'TR-DL19-rel-Typo': probes.transform.TransformProbe(trecdl19, probes.transform.Typo(), rel_range=(2, 3)),
'TR-DL19-rel-DelStops': probes.transform.TransformProbe(trecdl19, probes.transform.RmStops(), rel_range=(2, 3)),
'TR-DL19-rel-DelStops-DelPunct': probes.transform.TransformProbe(trecdl19, probes.transform.Multi([probes.transform.RmStops(), probes.transform.DelPunct()]), rel_range=(2, 3)),
'CV-ANT-Rel-Len': probes.const_var.ConstVarQrelsProbe(antiquetest, const=probes.const_var.Rel, var=probes.const_var.Len),
'CV-ANT-Rel-Tf': probes.const_var.ConstVarQrelsProbe(antiquetest, const=probes.const_var.Rel, var=probes.const_var.Tf),
'CV-ANT-Rel-SumTf': probes.const_var.ConstVarQrelsProbe(antiquetest, const=probes.const_var.Rel, var=probes.const_var.SumTf),
'CV-ANT-Rel-Overlap': probes.const_var.ConstVarQrelsProbe(antiquetest, const=probes.const_var.Rel, var=probes.const_var.Overlap),
'CV-ANT-Len-Rel': probes.const_var.ConstVarQrelsProbe(antiquetest, const=probes.const_var.Len, var=probes.const_var.Rel),
'CV-ANT-Len-Tf': probes.const_var.ConstVarQrelsProbe(antiquetest, const=probes.const_var.Len, var=probes.const_var.Tf),
'CV-ANT-Len-SumTf': probes.const_var.ConstVarQrelsProbe(antiquetest, const=probes.const_var.Len, var=probes.const_var.SumTf),
'CV-ANT-Len-Overlap': probes.const_var.ConstVarQrelsProbe(antiquetest, const=probes.const_var.Len, var=probes.const_var.Overlap),
'CV-ANT-Tf-Rel': probes.const_var.ConstVarQrelsProbe(antiquetest, const=probes.const_var.Tf, var=probes.const_var.Rel),
'CV-ANT-Tf-Len': probes.const_var.ConstVarQrelsProbe(antiquetest, const=probes.const_var.Tf, var=probes.const_var.Len),
'CV-ANT-Tf-SumTf': probes.const_var.ConstVarQrelsProbe(antiquetest, const=probes.const_var.Tf, var=probes.const_var.SumTf),
'CV-ANT-Tf-Overlap': probes.const_var.ConstVarQrelsProbe(antiquetest, const=probes.const_var.Tf, var=probes.const_var.Overlap),
'CV-ANT-SumTf-Rel': probes.const_var.ConstVarQrelsProbe(antiquetest, const=probes.const_var.SumTf, var=probes.const_var.Rel),
'CV-ANT-SumTf-Len': probes.const_var.ConstVarQrelsProbe(antiquetest, const=probes.const_var.SumTf, var=probes.const_var.Len),
'CV-ANT-SumTf-Tf': probes.const_var.ConstVarQrelsProbe(antiquetest, const=probes.const_var.SumTf, var=probes.const_var.Tf),
'CV-ANT-SumTf-Overlap': probes.const_var.ConstVarQrelsProbe(antiquetest, const=probes.const_var.SumTf, var=probes.const_var.Overlap),
'CV-ANT-Overlap-Rel': probes.const_var.ConstVarQrelsProbe(antiquetest, const=probes.const_var.Overlap, var=probes.const_var.Rel),
'CV-ANT-Overlap-Len': probes.const_var.ConstVarQrelsProbe(antiquetest, const=probes.const_var.Overlap, var=probes.const_var.Len),
'CV-ANT-Overlap-Tf': probes.const_var.ConstVarQrelsProbe(antiquetest, const=probes.const_var.Overlap, var=probes.const_var.Tf),
'CV-ANT-Overlap-SumTf': probes.const_var.ConstVarQrelsProbe(antiquetest, const=probes.const_var.Overlap, var=probes.const_var.SumTf),
'TR-ANT-ShufWords': probes.transform.TransformProbe(antiquetest, probes.transform.ShufWords()),
'TR-ANT-ShufSents': probes.transform.TransformProbe(antiquetest, probes.transform.ShufSents()),
'TR-ANT-ReverseSents': probes.transform.TransformProbe(antiquetest, probes.transform.ReverseSents()),
'TR-ANT-ReverseWords': probes.transform.TransformProbe(antiquetest, probes.transform.ReverseWords()),
'TR-ANT-ShufWordsKeepSents': probes.transform.TransformProbe(antiquetest, probes.transform.ShufWordsKeepSents()),
'TR-ANT-ShufWordsKeepSentsAndNPs': probes.transform.TransformProbe(antiquetest, probes.transform.ShufWordsKeepSentsAndNPs()),
'TR-ANT-ShufWordsKeepNPs': probes.transform.TransformProbe(antiquetest, probes.transform.ShufWordsKeepNPs()),
'TR-ANT-ShufNPSlots': probes.transform.TransformProbe(antiquetest, probes.transform.ShufNPSlots()),
'TR-ANT-ShufPrepositions': probes.transform.TransformProbe(antiquetest, probes.transform.ShufPrepositions()),
'TR-ANT-ReverseNPSlots': probes.transform.TransformProbe(antiquetest, probes.transform.ReverseNPSlots()),
'TR-ANT-SwapNumNPSlots2': probes.transform.TransformProbe(antiquetest, probes.transform.SwapNumNPSlots2()),
'TR-ANT-CaseFold': probes.transform.TransformProbe(antiquetest, probes.transform.CaseFold()),
'TR-ANT-Lemma': probes.transform.TransformProbe(antiquetest, probes.transform.Lemmatize()),
'TR-ANT-DelPunct': probes.transform.TransformProbe(antiquetest, probes.transform.DelPunct()),
'TR-ANT-DelSent-start': probes.transform.TransformProbe(antiquetest, probes.transform.DelSent('start')),
'TR-ANT-DelSent-end': probes.transform.TransformProbe(antiquetest, probes.transform.DelSent('end')),
'TR-ANT-DelSent-rand': probes.transform.TransformProbe(antiquetest, probes.transform.DelSent('rand')),
'TR-ANT-AddSent-start': probes.transform.TransformProbe(antiquetest, probes.transform.AddSent('start')),
'TR-ANT-AddSent-end': probes.transform.TransformProbe(antiquetest, probes.transform.AddSent('end')),
'TR-ANT-AddSent1-end': probes.transform.TransformProbe(antiquetest, probes.transform.AddSent('end', rel=1)),
'TR-ANT-DocTTTTTQuery': probes.transform.TransformProbe(antiquetest, probes.transform.DocTTTTTQuery()),
'TR-ANT-Query': probes.transform.TransformProbe(antiquetest, probes.transform.Query()),
'TR-ANT-Typo': probes.transform.TransformProbe(antiquetest, probes.transform.Typo()),
'TR-ANT-Typo-nostops': probes.transform.TransformProbe(antiquetest, probes.transform.Typo(no_stops=True)),
'TR-ANT-DelStops': probes.transform.TransformProbe(antiquetest, probes.transform.RmStops()),
'TR-ANT-DelStops-DelPunct': probes.transform.TransformProbe(antiquetest, probes.transform.Multi([probes.transform.RmStops(), probes.transform.DelPunct()])),
'TR-ANT-nrel-ShufWords': probes.transform.TransformProbe(antiquetest, probes.transform.ShufWords(), rel_range=(0, 1)),
'TR-ANT-nrel-ShufSents': probes.transform.TransformProbe(antiquetest, probes.transform.ShufSents(), rel_range=(0, 1)),
'TR-ANT-nrel-ReverseSents': probes.transform.TransformProbe(antiquetest, probes.transform.ReverseSents(), rel_range=(0, 1)),
'TR-ANT-nrel-ReverseWords': probes.transform.TransformProbe(antiquetest, probes.transform.ReverseWords(), rel_range=(0, 1)),
'TR-ANT-nrel-ShufWordsKeepSents': probes.transform.TransformProbe(antiquetest, probes.transform.ShufWordsKeepSents(), rel_range=(0, 1)),
'TR-ANT-nrel-ShufWordsKeepSentsAndNPs': probes.transform.TransformProbe(antiquetest, probes.transform.ShufWordsKeepSentsAndNPs(), rel_range=(0, 1)),
'TR-ANT-nrel-ShufWordsKeepNPs': probes.transform.TransformProbe(antiquetest, probes.transform.ShufWordsKeepNPs(), rel_range=(0, 1)),
'TR-ANT-nrel-ShufNPSlots': probes.transform.TransformProbe(antiquetest, probes.transform.ShufNPSlots(), rel_range=(0, 1)),
'TR-ANT-nrel-ShufPrepositions': probes.transform.TransformProbe(antiquetest, probes.transform.ShufPrepositions(), rel_range=(0, 1)),
'TR-ANT-nrel-ReverseNPSlots': probes.transform.TransformProbe(antiquetest, probes.transform.ReverseNPSlots(), rel_range=(0, 1)),
'TR-ANT-nrel-SwapNumNPSlots2': probes.transform.TransformProbe(antiquetest, probes.transform.SwapNumNPSlots2(), rel_range=(0, 1)),
'TR-ANT-nrel-CaseFold': probes.transform.TransformProbe(antiquetest, probes.transform.CaseFold(), rel_range=(0, 1)),
'TR-ANT-nrel-Lemma': probes.transform.TransformProbe(antiquetest, probes.transform.Lemmatize(), rel_range=(0, 1)),
'TR-ANT-nrel-DelPunct': probes.transform.TransformProbe(antiquetest, probes.transform.DelPunct(), rel_range=(0, 1)),
'TR-ANT-nrel-DelSent-start': probes.transform.TransformProbe(antiquetest, probes.transform.DelSent('start'), rel_range=(0, 1)),
'TR-ANT-nrel-DelSent-end': probes.transform.TransformProbe(antiquetest, probes.transform.DelSent('end'), rel_range=(0, 1)),
'TR-ANT-nrel-DelSent-rand': probes.transform.TransformProbe(antiquetest, probes.transform.DelSent('rand'), rel_range=(0, 1)),
'TR-ANT-nrel-AddSent-start': probes.transform.TransformProbe(antiquetest, probes.transform.AddSent('start'), rel_range=(0, 1)),
'TR-ANT-nrel-AddSent-end': probes.transform.TransformProbe(antiquetest, probes.transform.AddSent('end'), rel_range=(0, 1)),
'TR-ANT-nrel-DocTTTTTQuery': probes.transform.TransformProbe(antiquetest, probes.transform.DocTTTTTQuery(), rel_range=(0, 1)),
'TR-ANT-nrel-Query': probes.transform.TransformProbe(antiquetest, probes.transform.Query(), rel_range=(0, 1)),
'TR-ANT-nrel-Typo': probes.transform.TransformProbe(antiquetest, probes.transform.Typo(), rel_range=(0, 1)),
'TR-ANT-nrel-DelStops': probes.transform.TransformProbe(antiquetest, probes.transform.RmStops(), rel_range=(0, 1)),
'TR-ANT-nrel-DelStops-DelPunct': probes.transform.TransformProbe(antiquetest, probes.transform.Multi([probes.transform.RmStops(), probes.transform.DelPunct()]), rel_range=(0, 1)),
'TR-ANT-rel-ShufWords': probes.transform.TransformProbe(antiquetest, probes.transform.ShufWords(), rel_range=(2, 3)),
'TR-ANT-rel-ShufSents': probes.transform.TransformProbe(antiquetest, probes.transform.ShufSents(), rel_range=(2, 3)),
'TR-ANT-rel-ReverseSents': probes.transform.TransformProbe(antiquetest, probes.transform.ReverseSents(), rel_range=(2, 3)),
'TR-ANT-rel-ReverseWords': probes.transform.TransformProbe(antiquetest, probes.transform.ReverseWords(), rel_range=(2, 3)),
'TR-ANT-rel-ShufWordsKeepSents': probes.transform.TransformProbe(antiquetest, probes.transform.ShufWordsKeepSents(), rel_range=(2, 3)),
'TR-ANT-rel-ShufWordsKeepSentsAndNPs': probes.transform.TransformProbe(antiquetest, probes.transform.ShufWordsKeepSentsAndNPs(), rel_range=(2, 3)),
'TR-ANT-rel-ShufWordsKeepNPs': probes.transform.TransformProbe(antiquetest, probes.transform.ShufWordsKeepNPs(), rel_range=(2, 3)),
'TR-ANT-rel-ShufNPSlots': probes.transform.TransformProbe(antiquetest, probes.transform.ShufNPSlots(), rel_range=(2, 3)),
'TR-ANT-rel-ShufPrepositions': probes.transform.TransformProbe(antiquetest, probes.transform.ShufPrepositions(), rel_range=(2, 3)),
'TR-ANT-rel-ReverseNPSlots': probes.transform.TransformProbe(antiquetest, probes.transform.ReverseNPSlots(), rel_range=(2, 3)),
'TR-ANT-rel-SwapNumNPSlots2': probes.transform.TransformProbe(antiquetest, probes.transform.SwapNumNPSlots2(), rel_range=(2, 3)),
'TR-ANT-rel-CaseFold': probes.transform.TransformProbe(antiquetest, probes.transform.CaseFold(), rel_range=(2, 3)),
'TR-ANT-rel-Lemma': probes.transform.TransformProbe(antiquetest, probes.transform.Lemmatize(), rel_range=(2, 3)),
'TR-ANT-rel-DelPunct': probes.transform.TransformProbe(antiquetest, probes.transform.DelPunct(), rel_range=(2, 3)),
'TR-ANT-rel-DelSent-start': probes.transform.TransformProbe(antiquetest, probes.transform.DelSent('start'), rel_range=(2, 3)),
'TR-ANT-rel-DelSent-end': probes.transform.TransformProbe(antiquetest, probes.transform.DelSent('end'), rel_range=(2, 3)),
'TR-ANT-rel-DelSent-rand': probes.transform.TransformProbe(antiquetest, probes.transform.DelSent('rand'), rel_range=(2, 3)),
'TR-ANT-rel-AddSent-start': probes.transform.TransformProbe(antiquetest, probes.transform.AddSent('start'), rel_range=(2, 3)),
'TR-ANT-rel-AddSent-end': probes.transform.TransformProbe(antiquetest, probes.transform.AddSent('end'), rel_range=(2, 3)),
'TR-ANT-rel-DocTTTTTQuery': probes.transform.TransformProbe(antiquetest, probes.transform.DocTTTTTQuery(), rel_range=(2, 3)),
'TR-ANT-rel-Query': probes.transform.TransformProbe(antiquetest, probes.transform.Query(), rel_range=(2, 3)),
'TR-ANT-rel-Typo': probes.transform.TransformProbe(antiquetest, probes.transform.Typo(), rel_range=(2, 3)),
'TR-ANT-rel-DelStops': probes.transform.TransformProbe(antiquetest, probes.transform.RmStops(), rel_range=(2, 3)),
'TR-ANT-rel-DelStops-DelPunct': probes.transform.TransformProbe(antiquetest, probes.transform.Multi([probes.transform.RmStops(), probes.transform.DelPunct()]), rel_range=(2, 3)),
'DS-Jfleg': probes.JflegProbe(),
'DS-Jfleg-sp': probes.JflegProbe('abnirml:jfleg/sp'),
'DS-Gyafc': probes.GyafcProbe(),
'DS-Gyafc-family': probes.GyafcProbe(genre_filter='Family_Relationships'),
'DS-Gyafc-enter': probes.GyafcProbe(genre_filter='Entertainment_Music'),
'DS-CnnDmProbe-cnn': probes.CnnDmProbe(source='cnn'),
'DS-CnnDmProbe-dm': probes.CnnDmProbe(source='dm'),
'DS-XSum': probes.XSumProbe(),
'DS-Bias': probes.BiasProbe(),
'DS-Paraphrase-mspc': probes.ParaphraseProbe('abnirml:mspc'),
'DS-Factuality-nq-tertok-train-PERSON': probes.FactualityProbe('dpr-w100/natural-questions/train', valid_entities=('PERSON'), tokenizer=trecdl19_index),
'DS-Factuality-nq-tertok-train-GPE': probes.FactualityProbe('dpr-w100/natural-questions/train', valid_entities=('GPE'), tokenizer=trecdl19_index),
'DS-Factuality-nq-tertok-train-LOC': probes.FactualityProbe('dpr-w100/natural-questions/train', valid_entities=('LOC'), tokenizer=trecdl19_index),
'DS-Factuality-nq-tertok-train-NORP': probes.FactualityProbe('dpr-w100/natural-questions/train', valid_entities=('NORP'), tokenizer=trecdl19_index),
'DS-Factuality-nq-tertok-train-ORG': probes.FactualityProbe('dpr-w100/natural-questions/train', valid_entities=('ORG'), tokenizer=trecdl19_index),
'DS-Simplification-wikiturk': probes.SimplificationProbe('abnirml:wikiturk'),
}
PROBES = {k: probes.CachedProbe(v, base_path/'cache'/'probes'/f'{k}.cache') for k, v in PROBES.items()}
def _monot5():
import pyterrier_t5
return pyterrier_t5.MonoT5ReRanker(batch_size=64, verbose=False)
def _monot5_large():
import pyterrier_t5
return pyterrier_t5.MonoT5ReRanker(model='castorini/monot5-large-msmarco', batch_size=32, verbose=False)
def _colbert():
from pyterrier_colbert.ranking import ColBERTFactory
pt_colbert = ColBERTFactory("http://www.dcs.gla.ac.uk/~craigm/colbert.dnn.zip", None, None)
return pt_colbert.text_scorer()
def _wmd():
from pyterrier_gensim import WmdScorer
import logging
logging.getLogger('gensim').level = 999
return WmdScorer("glove-wiki-gigaword-100", verbose=False)
def _ance():
import pyterrier_ance
return pyterrier_ance.ANCEReRanker('ance_checkpoint')
def _sbert_bi(model_name):
def wrapped():
import pyterrier_sbert
return pyterrier_sbert.SbertBiScorer(model_name)
return wrapped
def _onir(f, gpu=True):
def wrapped():
import onir_pt
return onir_pt.reranker.from_checkpoint(f, config={'verbose': False, 'batch_size': 64, 'gpu': gpu})
return wrapped
# NOTE: below the delta values are assigned using the median values from abnirml.eval.topk_diffs
SCORERS = {
'BM25-dl19': scorers.TerrierBM25(index=trecdl19.docs_ptindex()),
'T5': scorers.PyTerrierScorer('T5-pt', _monot5, batch_size=64),
'T5-l': scorers.PyTerrierScorer('T5-l-pt', _monot5_large, batch_size=32),
'VBERT': scorers.VanillaBERT(weight_path="/home/sean/data/abnirml/vbert.p"),
'EPIC': scorers.PyTerrierScorer('epic42', _onir('/home/sean/data/onir/model_checkpoints/epic.42.tar.gz'), batch_size=16),
'ColBERT': scorers.PyTerrierScorer('ColBERT-pt', _colbert),
'ANCE': scorers.PyTerrierScorer('ANCE-pt', _ance),
'WMD': scorers.PyTerrierScorer('WMD-pt', _wmd),
'SBERT': scorers.PyTerrierScorer('SBERT-bi-pt', _sbert_bi("sentence-transformers/distilbert-multilingual-nli-stsb-quora-ranking")),
'KNRM': scorers.PyTerrierScorer('knrm', _onir('/home/sean/data/onir/model_checkpoints/knrm.msmarco.new.tar.gz', gpu=False), batch_size=64),
'ConvKNRM': scorers.PyTerrierScorer('convknrm', _onir('/home/sean/data/onir/model_checkpoints/convknrm.msmarco.tar.gz', gpu=False), batch_size=64),
'S2': scorers.S2(),
'DocTTTTTQuery-BM25-dl19': scorers.DocTTTTTQuery(scorers.TerrierBM25(index=trecdl19.docs_ptindex())),
}
SCORERS = {k: scorers.CachedScorer(v, base_path/'cache'/'scorers'/f'{k}.cache') for k, v in SCORERS.items()}
# Tuned on top TREC DL'19
DELTAS = {
'BM25-dl19': 0.35495386740345936,
'T5': 0.0038296878337860107,
'T5-l': 0.004211690276861191,
'VBERT': 0.04632759094238281,
'EPIC': 0.2732887268066406,
'ColBERT': 0.15594482421875,
'ANCE': 0.109130859375,
'WMD': 0.00643495,
'SBERT': 0.0011723,
'KNRM': 0.15517234802246094,
'ConvKNRM': 0.3034172058105469,
'S2': 0.0,
'DocTTTTTQuery-BM25-dl19': 0.3584156381449404,
}
| abnirml-master | abnirml/__init__.py |
import traceback
import os
import fcntl
from fnmatch import fnmatch
import json
import ir_datasets
import abnirml
_logger = ir_datasets.log.easy()
class Locker:
def __init__(self, file):
self.file = file
self.fp = None
def __enter__ (self):
self.fp = open(self.file, 'w')
fcntl.flock(self.fp.fileno(), fcntl.LOCK_EX)
def __exit__(self, exc_type, exc_val, exc_tb):
fcntl.flock(self.fp.fileno(), fcntl.LOCK_UN)
self.fp.close()
def main_cache_probes(args):
for aname, axiom in abnirml.PROBES.items():
if args.probe is not None and not fnmatch(aname, args.probe):
continue
if not axiom.cache_exists():
with _logger.duration(aname):
for _ in axiom.pairs_iter():
pass
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--probe', nargs='+')
parser.add_argument('--scorer', nargs='+')
parser.add_argument('--cache_probes', action='store_true')
args = parser.parse_args()
if args.cache_probes:
return main_cache_probes(args)
cache = {}
if os.path.exists('results.jsonl'):
with open('results.jsonl', 'r+t') as result_file:
for line in result_file:
if not line.startswith('#'):
record = json.loads(line)
cache[record['probe'], record['scorer']] = record
for sname, scorer in abnirml.SCORERS.items():
if args.scorer is not None and not any(fnmatch(sname, s) for s in args.scorer):
continue
for aname, axiom in abnirml.PROBES.items():
if args.probe is not None and not any(fnmatch(aname, p) for p in args.probe):
continue
key = (aname, sname)
if key not in cache:
with _logger.duration(key):
try:
result = abnirml.ev.ProbeExperiment(scorer, axiom)
result['probe'] = aname
result['scorer'] = sname
_logger.info(key)
_logger.info(result)
cache[key] = result
with Locker('results.jsonl.lock'):
with open('results.jsonl', 'at') as result_file:
json.dump(result, result_file)
result_file.write('\n')
except Exception as ex:
traceback.print_exc()
if __name__ == '__main__':
main()
| abnirml-master | abnirml/__main__.py |
import os
import shutil
import json
from pytools import memoize_method
import pyterrier
import ir_datasets
from abnirml.java import J
_logger = ir_datasets.log.easy()
class TerrierIndex:
def __init__(self, path):
self._path = path
def path(self):
return self._path
def _index_ref(self):
return pyterrier.IndexRef.of(os.path.join(self.path(), 'data.properties'))
@memoize_method
def _index(self):
J.initialize()
return pyterrier.IndexFactory.of(self._index_ref())
def tokenize(self, text, include_stops=True):
J.initialize()
pyterrier.index.run_autoclass()
tokenizer = pyterrier.index.Tokeniser.getTokeniser()
stemmer = J._autoclass("org.terrier.terms.PorterStemmer")()
toks = tokenizer.tokenise(J._autoclass("java.io.StringReader")(text))
if not include_stops:
stopwords = J._autoclass("org.terrier.terms.Stopwords")(None)
result = []
while toks.hasNext():
tok = toks.next()
if not include_stops and stopwords.isStopword(tok):
continue
if tok is not None: # for some reason, always ends in None
result.append(stemmer.stem(tok))
return result
def did_lookup(self, internal_id):
meta_index = self._index().getMetaIndex()
return meta_index.getItem('docno', internal_id)
def internal_id_lookup(self, did):
meta_index = self._index().getMetaIndex()
return meta_index.getDocument('docno', did)
def built(self):
return os.path.exists(os.path.join(self.path(), 'config.json'))
def build(self, doc_iter, field):
J.initialize()
path = self.path()
path_exists = os.path.exists(os.path.join(path, 'config.json'))
if not path_exists:
tmp_path = path + '.tmp'
# TODO: handle multiple fields
def _doc_iter():
dids = set()
for doc in doc_iter:
if doc.doc_id in dids:
_logger.warn(f'did {doc.doc_id} already encountered. Ignoring this occurrence.')
else:
dids.add(doc.doc_id)
if field is None:
doc_dict = dict(docno=doc.doc_id, **dict(zip(doc._fields[1:], doc[1:])))
else:
doc_dict = {'docno': doc.doc_id, field: doc[doc._fields.index(field)]}
yield doc_dict
indexer = pyterrier.IterDictIndexer(tmp_path)
indexer.setProperties(**{'indexer.meta.reverse.keys': 'docno'})
with _logger.duration('indexing'):
indexer.index(_doc_iter())
with open(os.path.join(tmp_path, 'config.json'), 'wt') as f:
json.dump({}, f)
if path_exists:
_logger.warn('removing existing index')
shutil.rmtree(path)
os.rename(tmp_path, path)
def idf(self, term):
idx = self._index()
lex = idx.getLexicon()
if term in lex:
return 1 / (lex[term].getDocumentFrequency() + 1)
return 1 / (idx.getCollectionStatistics().getNumberOfDocuments() + 1)
def doc(self, did):
return TerrierDoc(self._index(), self.internal_id_lookup(did), did)
def parse_query(self, query, field=None):
if isinstance(query, str):
result = self.parse_query({'or': query}, field)
else:
result = self._parse_query(query, field)
if result.strip() in ('()', ''):
result = 'a' # dummy query
return f'applypipeline:off {result}'
def _parse_query(self, query, field=None):
if isinstance(query, str):
return self.tokenize(query)
if isinstance(query, list):
return [self._parse_query(q, field) for q in query]
if 'or' in query:
result = []
for subq in self._parse_query(query['or'], field=query.get('field', field)):
if not isinstance(subq, list):
subq = [subq]
for q in subq:
result.append(q)
result = ' '.join(result)
result = f'({result})'
if 'weight' in query:
result = f'{result}^{query["weight"]}'
return result
if 'and' in query:
result = []
for subq in self._parse_query(query['and'], field=query.get('field', field)):
if not isinstance(subq, list):
subq = [subq]
for q in subq:
result.append(f'+{q}')
result = ' '.join(result)
result = f'({result})'
if 'weight' in query:
result = f'{result}^{query["weight"]}'
return result
if 'terms' in query:
result = []
for subq in self._parse_query(query['terms'], field=query.get('field', field)):
if not isinstance(subq, list):
subq = [subq]
for q in subq:
result.append(q)
result = ' '.join(result)
result = f'({result})'
if 'weight' in query:
result = f'{result}^{query["weight"]}'
return result
if 'seq' in query:
result = []
for subq in self._parse_query(query['seq'], field=query.get('field', field)):
if not isinstance(subq, list):
subq = [subq]
for q in subq:
result.append(q)
result = ' '.join(result)
result = f'"{result}"'
if 'slop' in query:
result = f'{result}~{query["slop"]}'
if 'weight' in query:
result = f'{result}^{query["weight"]}'
return result
raise ValueError(query)
class TerrierDoc:
def __init__(self, terrier_index, internal_id, did):
self.did = did
self.internal_id = internal_id
self.terrier_index = terrier_index
def __len__(self):
if self.internal_id == -1: # not found
return 0
doi = self.terrier_index.getDocumentIndex()
return doi.getDocumentLength(self.internal_id)
def tfs(self, terms=None):
di = self.terrier_index.getDirectIndex()
doi = self.terrier_index.getDocumentIndex()
lex = self.terrier_index.getLexicon()
if terms is not None:
term_id_map = {}
for term in terms:
if term in lex:
term_id_map[lex[term].getTermId()] = term
result = {term: 0 for term in terms}
else:
term_id_map = {}
result = {}
try:
for posting in di.getPostings(doi.getDocumentEntry(self.internal_id)):
termid = posting.getId()
if terms is not None:
if termid in term_id_map:
result[term_id_map[termid]] = posting.getFrequency()
else:
lee = lex.getLexiconEntry(termid)
result[lee.getKey()] = posting.getFrequency()
except J.JavaException as e:
_logger.warn(f'Unable to get tfs for did={self.did}: {e}')
return result
class PtIndexWrapper:
def __new__(cls, dataset, *args, **kwargs):
# don't re-wrap
if hasattr(dataset, '_ptindex'):
return dataset
result = super().__new__(cls)
result.__init__(dataset, *args, **kwargs)
return result
def __init__(self, dataset, field=None): # None means all
self._dataset = dataset
self._ptindex = None
self._field = field
def __getattr__(self, attr):
return getattr(self._dataset, attr)
def docs_ptindex(self):
if self._ptindex is None:
if self._field:
idx = TerrierIndex(f'{self._dataset.docs_path()}.terrier.{self._field}')
else:
idx = TerrierIndex(f'{self._dataset.docs_path()}.terrier')
if not idx.built():
doc_iter = self._dataset.docs_iter()
doc_iter = _logger.pbar(doc_iter, 'building terrier index')
idx.build(doc_iter, self._field)
self._ptindex = idx
return self._ptindex
| abnirml-master | abnirml/indices/terrier.py |
from .terrier import TerrierIndex, PtIndexWrapper
| abnirml-master | abnirml/indices/__init__.py |
import random
import spacy
import ir_datasets
import abnirml
from .base import Probe
class BiasProbe(Probe):
def __init__(self, dataset='abnirml:nbias', doc_field='text', query_inferer=None):
self.dataset = ir_datasets.load(dataset)
self.doc_field = doc_field
self.query_inferer = query_inferer or abnirml.util.CommonNounChunk()
def pairs_iter(self):
for doc in self.dataset.docs_iter():
doc_a = doc.neutral_text
doc_b = doc.biased_text
if doc_a != doc_b:
for query in self.query_inferer.infer_queries(doc_a, doc_b):
yield [
{'query_text': query, 'doc_text': doc_a},
{'query_text': query, 'doc_text': doc_b},
]
| abnirml-master | abnirml/probes/bias.py |
import itertools
from .base import Probe
class ConstVar:
def __init__(self, axiom, epsilon=0):
self.axiom = axiom
self.epsilon = epsilon
def score(self, query, doc_id, rel):
raise NotImplementedError
def is_const(self, a, b):
return abs(a - b) <= self.epsilon
def is_var(self, a, b):
return a - self.epsilon > b
def sort_key(self, val):
return val
class Len(ConstVar):
def __init__(self, axiom, epsilon=0):
super().__init__(axiom, epsilon)
self.index = axiom.dataset.docs_ptindex()
def score(self, query, doc_id, rel):
return len(self.index.doc(doc_id))
class Tf(ConstVar):
def __init__(self, axiom, epsilon=0):
super().__init__(axiom, epsilon)
self.index = axiom.dataset.docs_ptindex()
self._prev_query = None
self._prev_qtoks = None
def score(self, query, doc_id, rel):
if query['query_text'] != self._prev_query:
self._prev_query = query['query_text']
self._prev_qtoks = self.index.tokenize(self._prev_query)
return self.index.doc(doc_id).tfs(self._prev_qtoks)
def is_const(self, a, b):
return all(abs(a.get(t, 0) - b.get(t, 0)) <= self.epsilon for t in a.keys() | b.keys())
def is_var(self, a, b):
# all are at least as large, and one is strictly larger
return all(a.get(t, 0) - self.epsilon >= b.get(t, 0) for t in a.keys() | b.keys()) and \
any(a.get(t, 0) - self.epsilon > b.get(t, 0) for t in a.keys() | b.keys())
def sort_key(self, val):
return sum(val.values())
class SumTf(ConstVar):
def __init__(self, axiom, epsilon=0):
super().__init__(axiom, epsilon)
self.index = axiom.dataset.docs_ptindex()
self._prev_query = None
self._prev_qtoks = None
def score(self, query, doc_id, rel):
if query['query_text'] != self._prev_query:
self._prev_query = query['query_text']
self._prev_qtoks = self.index.tokenize(self._prev_query)
return sum(self.index.doc(doc_id).tfs(self._prev_qtoks).values())
class Overlap(ConstVar):
def __init__(self, axiom, epsilon=0):
super().__init__(axiom, epsilon)
self.index = axiom.dataset.docs_ptindex()
self._prev_query = None
self._prev_qtoks = None
def score(self, query, doc_id, rel):
if query['query_text'] != self._prev_query:
self._prev_query = query['query_text']
self._prev_qtoks = self.index.tokenize(self._prev_query)
doc = self.index.doc(doc_id)
tfs = doc.tfs(self._prev_qtoks)
if len(doc) > 0:
return sum(tfs.values()) / len(doc)
return 0
def is_const(self, a, b):
if a == 0. or b == 0.:
return False # don't do 0s
return super().is_const(a, b)
def is_var(self, a, b):
if a == 0. or b == 0.:
return False # don't do 0s
return super().is_var(a, b)
class Rel(ConstVar):
def score(self, query, doc_id, rel):
return rel
class ConstVarQrelsProbe(Probe):
def __init__(self, dataset, const, var, const_epsilon=0, var_epsilon=0, query_field='text', doc_field='text'):
self.dataset = dataset
self.constant = const
self.variable = var
self.const_epsilon = const_epsilon
self.var_epsilon = var_epsilon
self.query_field = query_field
self.doc_field = doc_field
def pairs_iter(self):
qrels = self.dataset.qrels_dict()
docstore = self.dataset.docs_store()
const = self.constant(self, self.const_epsilon)
var = self.variable(self, self.var_epsilon)
query_field_idx = self.dataset.queries_cls()._fields.index(self.query_field)
queries_namespace = self.dataset.queries_namespace()
docs_namespace = self.dataset.docs_namespace()
for query in self.dataset.queries_iter():
docs = []
query = {'query_id': query.query_id, 'query_text': query[query_field_idx]}
for doc_id, rel in qrels.get(query['query_id'], {}).items():
docs.append({
'id': doc_id,
'const': const.score(query, doc_id, rel),
'var': var.score(query, doc_id, rel),
})
docs = sorted(docs, key=lambda x: const.sort_key(x['const']))
for i, doc_a in enumerate(docs):
docs_gt = itertools.takewhile(lambda doc_b: const.is_const(doc_a['const'], doc_b['const']), docs[i+1:])
docs_lt = itertools.takewhile(lambda doc_b: const.is_const(doc_a['const'], doc_b['const']), docs[i-1::-1])
for doc_b in itertools.chain(docs_gt, docs_lt):
if var.is_var(doc_a['var'], doc_b['var']):
yield [
dict(**query, doc_id=doc_a['id'], doc_text=docstore.get(doc_a['id'], self.doc_field)),
dict(**query, doc_id=doc_b['id'], doc_text=docstore.get(doc_b['id'], self.doc_field)),
]
| abnirml-master | abnirml/probes/const_var.py |
import random
import spacy
import ir_datasets
import abnirml
from .base import Probe
class JflegProbe(Probe):
def __init__(self, source='abnirml:jfleg', query_inferer=None):
self.dataset = ir_datasets.load(source)
self.query_inferer = query_inferer or abnirml.util.CommonNounChunk()
def pairs_iter(self):
for doc in self.dataset.docs_iter():
text_a = doc.nonfluent
for text_b in doc.fluents:
for query in self.query_inferer.infer_queries(text_a, text_b):
yield [
{'query_text': query, 'doc_text': text_a},
{'query_text': query, 'doc_text': text_b},
]
| abnirml-master | abnirml/probes/fluency.py |
from . import base
from .base import Probe
from . import const_var
from . import transform
from .bias import BiasProbe
from .fluency import JflegProbe
from .formality import GyafcProbe
from .factuality import FactualityProbe
from .summarization import XSumProbe, CnnDmProbe
from .paraphrase import ParaphraseProbe
from .simplification import SimplificationProbe
from .nlaug import NLAugProbe
from .cached import CachedProbe
| abnirml-master | abnirml/probes/__init__.py |
import os
import gzip
import pickle
import lz4.frame
from pathlib import Path
from .base import Probe
import ir_datasets
class CachedProbe(Probe):
def __init__(self, probe, path):
self.probe = probe
self.path = Path(path)
def pair_symmetry(self):
return self.probe.pair_symmetry()
def pairs_iter(self):
if not self.path.exists():
with ir_datasets.util.finialized_file(self.path, 'wb') as f:
with lz4.frame.LZ4FrameFile(f, 'wb') as f:
for pair in self.probe.pairs_iter():
pickle.dump(pair, f)
yield pair
else:
with lz4.frame.LZ4FrameFile(self.path, 'rb') as f:
while f.peek():
yield pickle.load(f)
def cache_exists(self):
return os.path.exists(self.path) or os.path.exists(f'{self.path}.tmp')
| abnirml-master | abnirml/probes/cached.py |
import spacy
import ir_datasets
from .base import Probe
class GyafcProbe(Probe):
def __init__(self, spacy_model='en_core_web_sm', genre_filter=None, yahoo_l6_dataset=None, gyafc_dataset=None):
self.spacy_model = spacy_model
self.genre_filter = genre_filter
self.yahoo_l6_dataset = yahoo_l6_dataset or ir_datasets.load('abnirml:yahoo-l6')
self.gyafc_dataset = gyafc_dataset or ir_datasets.load('abnirml:gyafc')
def pairs_iter(self):
l6_docstore = self.yahoo_l6_dataset.docs_store()
nlp = spacy.load(self.spacy_model, disable=["parser"])
for gyafc_doc in self.gyafc_dataset.docs_iter():
if gyafc_doc.mapped_l6_id == '_':
# _ indicates not match was found
continue
if self.genre_filter is not None and gyafc_doc.genre != self.genre_filter:
# ignore this genre
continue
src_question = l6_docstore.get(gyafc_doc.mapped_l6_id, 'subject')
src_question_lemmas = {t.lemma_.lower() for t in nlp(src_question) if not t.is_stop and not t.is_punct and not t.like_num and str(t).strip() != ''}
formal_lemmas = {t.lemma_ for t in nlp(gyafc_doc.formal)}
informal_lemmas = {t.lemma_ for t in nlp(gyafc_doc.informal)}
if not src_question_lemmas & formal_lemmas & informal_lemmas:
# no ovelapping terms among the query and 2 documents
continue
yield [
{'query_text': src_question, 'doc_text': gyafc_doc.formal},
{'query_text': src_question, 'doc_text': gyafc_doc.informal},
]
| abnirml-master | abnirml/probes/formality.py |
from .base import Probe
class NLAugProbe(Probe):
def __init__(self, dataset, generator, rel_range=None, query_field='text', doc_field='text'):
self.dataset = dataset
self.generator = generator
if rel_range is not None:
if isinstance(rel_range, (tuple, list)):
assert len(rel_range) == 2
else:
rel_range = (rel_range, rel_range)
self.rel_range = rel_range
self.query_field = query_field
self.doc_field = doc_field
def pairs_iter(self):
qrels = self.dataset.qrels_dict()
docstore = self.dataset.docs_store()
query_field_idx = self.dataset.queries_cls()._fields.index(self.query_field)
for query in self.dataset.queries_iter():
query = {'query_id': query.query_id, 'query_text': query[query_field_idx]}
these_qrels = qrels.get(query['query_id'], {})
for doc_id, rel in these_qrels.items():
if self.rel_range is None or self.rel_range[0] <= rel <= self.rel_range[1]:
dtext_a = docstore.get(doc_id, self.doc_field)
sample_a = dict(**query, doc_id=doc_id, doc_text=dtext_a)
for generated_text in generator.generate(dtext_a):
if text != generated_text:
sample_b = dict(**query, doc_text=generated_text)
yield [sample_a, sample_b]
| abnirml-master | abnirml/probes/nlaug.py |
import re
import random
import spacy
import ir_datasets
import abnirml
from .base import Probe
_logger = ir_datasets.log.easy()
class FactualityProbe(Probe):
def __init__(self, dataset='dpr-w100/natural-questions/dev', spacy_model='en_core_web_sm', random_seed=42, valid_entities=('PERSON', 'NORP', 'FAC', 'ORG', 'GPE', 'LOC', 'PRODUCT', 'EVENT', 'WORK_OF_ART', 'LAW', 'LANGUAGE'), tokenizer=None):
self.dataset = ir_datasets.load(dataset)
self.spacy_model = spacy_model
self.random_seed = random_seed
self.valid_entities = valid_entities
self.tokenizer = tokenizer
def pairs_iter(self):
nlp = spacy.load(self.spacy_model, disable=["parser"])
docs_store = self.dataset.docs_store()
queries = [q for q in self.dataset.queries_iter() if len(q.answers) == 1]
query_map = {q.query_id: q for q in queries}
query_answer_parsed = {q.query_id: nlp(q.answers[0]) for q in _logger.pbar(queries, desc='parsing answers')}
qids_by_tok_count_ent = {}
for qid, qtok in query_answer_parsed.items():
qlen = len(self.tokenizer.tokenize(str(qtok), include_stops=False)) if self.tokenizer else len(qtok)
ent = [e for e in qtok.ents if e.label_ in self.valid_entities]
if len(ent) > 0:
ent = ent[0].label_ if len(ent) > 0 else ''
key = (qlen, ent)
qids_by_tok_count_ent.setdefault(key, []).append(qid)
query_answer_parsed[qid] = key
else:
query_answer_parsed[qid] = None
print({k: len(qids_by_tok_count_ent[k]) for k in qids_by_tok_count_ent})
qrels = self.dataset.qrels_dict()
for query in queries:
if not query_answer_parsed[query.query_id]:
continue
these_pos_dids = [d for d, s in qrels[query.query_id].items() if s > 0]
these_pos_docs = docs_store.get_many(these_pos_dids)
for did in sorted(these_pos_dids):
doc = these_pos_docs[did]
answer_matcher = ' ?'.join([re.escape(a) for a in query.answers[0].split(' ')])
if not re.search(answer_matcher, doc.text, flags=re.IGNORECASE):
_logger.info(f'answer not found in text qid={query.query_id} did={did}')
continue
if any(re.search(r'\b'+re.escape(t)+r'\b', query.text, flags=re.IGNORECASE) for t in query.answers[0].split(' ')):
# this filter is a bit aggressive, but needs to be safe to prevent messing up "A or B"-like questions (where the answer is then either A or B).
continue
rng = random.Random(repr((query.query_id, did, self.random_seed)))
new_qid = rng.choice(qids_by_tok_count_ent[query_answer_parsed[query.query_id]])
new_answer = query_map[new_qid].answers[0]
new_text = re.sub(answer_matcher, new_answer, doc.text, flags=re.IGNORECASE)
if new_text != doc.text: # also handles if this qid is selected
yield [
{'query_text': query.text, 'doc_text': doc.text},
{'query_text': query.text, 'doc_text': new_text},
]
| abnirml-master | abnirml/probes/factuality.py |
import re
import itertools
import random
import string
import spacy
from ..scorers import doctttttquery
from .base import Probe
from abnirml.java import J
class TransformProbe(Probe):
def __init__(self, dataset, transform, rel_range=None, query_field='text', doc_field='text'):
self.dataset = dataset
if rel_range is not None:
if isinstance(rel_range, (tuple, list)):
assert len(rel_range) == 2
else:
rel_range = (rel_range, rel_range)
self.rel_range = rel_range
self.query_field = query_field
self.doc_field = doc_field
self.transform = transform
def pairs_iter(self):
qrels = self.dataset.qrels_dict()
docstore = self.dataset.docs_store()
query_field_idx = self.dataset.queries_cls()._fields.index(self.query_field)
with self.transform:
for query in self.dataset.queries_iter():
query = {'query_id': query.query_id, 'query_text': query[query_field_idx]}
these_qrels = qrels.get(query['query_id'], {})
for doc_id, rel in these_qrels.items():
if self.rel_range is None or self.rel_range[0] <= rel <= self.rel_range[1]:
dtext_a = docstore.get(doc_id, self.doc_field)
sample_a = dict(**query, doc_id=doc_id, doc_text=dtext_a)
sample_b = self.transform.transform(sample_a, these_qrels, docstore, self.doc_field)
if sample_b is not None:
yield [sample_a, sample_b]
class CtxtManager:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
class Transform(CtxtManager):
def __init__(self):
super().__init__()
self.record = None
self.qrels = None
self.docstore = None
self.doc_field = None
def transform(self, sample, qrels, docstore, doc_field):
dtext_a = sample['doc_text']
self.record = sample
self.qrels = qrels
self.docstore = docstore
self.doc_field = doc_field
dtext_b = self.transform_text(dtext_a)
self.record = None
self.qrels = None
self.docstore = None
self.doc_field = None
if dtext_b and dtext_a != dtext_b:
return {**sample, 'doc_id': None, 'doc_text': dtext_b}
return None
def transform_text(self, text):
raise NotImplementedError
class SpacyMixin(CtxtManager):
def __init__(self, **kwargs):
super().__init__()
self.spacy_model = kwargs.get('spacy_model', 'en_core_web_sm')
self.nlp = None
def __enter__(self):
super().__enter__()
self.nlp = spacy.load(self.spacy_model)
def __exit__(self, exc_type, exc_val, exc_tb):
super().__exit__(exc_type, exc_val, exc_tb)
self.nlp = None
class RandomMixin(CtxtManager):
def __init__(self, **kwargs):
super().__init__()
self.random_seed = kwargs.get('random_seed', 42)
self.rng = None
def __enter__(self):
super().__enter__()
self.rng = random.Random(self.random_seed)
def __exit__(self, exc_type, exc_val, exc_tb):
super().__exit__(exc_type, exc_val, exc_tb)
self.rng = None
class CaseFold(Transform):
def transform_text(self, text):
return text.lower()
class DelPunct(Transform):
def __init__(self):
super().__init__()
self.trans_punct = str.maketrans(string.punctuation, ' ' * len(string.punctuation))
def transform_text(self, text):
return text.translate(self.trans_punct)
class DelSent(Transform, SpacyMixin, RandomMixin):
def __init__(self, position='rand', **kwargs):
super().__init__(**kwargs)
self.position = position
def transform_text(self, text):
sents = list(self.nlp(text).sents)
if len(sents) > 1: # don't remove if only 1 sentence
if self.position == 'start':
sents = sents[1:]
elif self.position == 'end':
sents = sents[:-1]
elif self.position == 'rand':
pos = self.rng.randrange(len(sents))
sents = sents[:pos] + sents[pos+1:]
else:
raise ValueError()
return ' '.join(str(s) for s in sents)
return None
class AddSent(Transform, SpacyMixin, RandomMixin):
def __init__(self, position='start', rel=0, **kwargs):
super().__init__(**kwargs)
self.position = position
self.rel = rel
def transform_text(self, text):
doc_candidates = [did for did, score in self.qrels.items() if score == self.rel]
if doc_candidates:
doc_id = self.rng.choice(doc_candidates)
dtext = self.docstore.get(doc_id, self.doc_field)
sents = list(self.nlp(dtext).sents)
sent = self.rng.choice(sents)
if self.position == 'start':
text = f'{sent} {text}'
elif self.position == 'end':
text = f'{text} {sent}'
else:
raise ValueError()
return text
return None
class Lemmatize(Transform, SpacyMixin):
def transform_text(self, text):
dtext_b = [t.lemma_ if not t.is_stop else t for t in self.nlp(text)]
return ' '.join(str(s) for s in dtext_b)
class ShufWords(Transform, SpacyMixin, RandomMixin):
def transform_text(self, text):
dtoks = [str(t) for t in self.nlp(text)]
self.rng.shuffle(dtoks)
return ' '.join(str(s) for s in dtoks)
class ShufWordsKeepSents(Transform, SpacyMixin, RandomMixin):
def transform_text(self, text):
self.rng = random.Random(str(self.random_seed) + text)
dsents = []
for sent in self.nlp(text).sents:
sent_toks = [str(s) for s in sent[:-1]]
self.rng.shuffle(sent_toks)
sent_toks = sent_toks + [str(sent[-1])]
dsents.append(' '.join(sent_toks))
return ' '.join(dsents)
class ShufWordsKeepSentsAndNPs(Transform, SpacyMixin, RandomMixin):
def transform_text(self, text):
self.rng = random.Random(str(self.random_seed) + text)
dsents = []
parsed_text = self.nlp(text)
noun_chunks = list(parsed_text.noun_chunks)
noun_chunk_idxs = set(itertools.chain(*(range(c.start, c.end) for c in noun_chunks)))
for sent in parsed_text.sents:
these_noun_chunks = [str(c) for c in noun_chunks if c.start >= sent.start and c.end <= sent.end]
these_non_noun_chunks = [str(parsed_text[i]) for i in range(sent.start, sent.end - 1) if i not in noun_chunk_idxs]
sent_toks = these_noun_chunks + these_non_noun_chunks
self.rng.shuffle(sent_toks)
sent_toks = sent_toks + [str(sent[-1])]
dsents.append(' '.join(sent_toks))
return ' '.join(dsents)
class ShufWordsKeepNPs(Transform, SpacyMixin, RandomMixin):
def transform_text(self, text):
self.rng = random.Random(str(self.random_seed) + text)
parsed_text = self.nlp(text)
noun_chunks = list(parsed_text.noun_chunks)
noun_chunk_idxs = set(itertools.chain(*(range(c.start, c.end) for c in noun_chunks)))
noun_chunks = [str(c) for c in noun_chunks]
non_noun_chunks = [str(t) for i, t in enumerate(parsed_text) if i not in noun_chunk_idxs]
toks = noun_chunks + non_noun_chunks
self.rng.shuffle(toks)
return ' '.join(toks)
class ShufNPSlots(Transform, SpacyMixin, RandomMixin):
def transform_text(self, text):
self.rng = random.Random(str(self.random_seed) + text)
parsed_text = self.nlp(text)
noun_chunks = list(parsed_text.noun_chunks)
noun_chunk_idxs = {}
for i, np in enumerate(noun_chunks):
for j in range(np.start, np.end):
noun_chunk_idxs[j] = i
chunks = []
i = 0
while i < len(parsed_text):
if i in noun_chunk_idxs:
chunks.append(noun_chunk_idxs[i])
i = noun_chunks[noun_chunk_idxs[i]].end
else:
chunks.append(str(parsed_text[i]))
i += 1
self.rng.shuffle(noun_chunks)
toks = []
for chunk in chunks:
if isinstance(chunk, int):
toks.append(str(noun_chunks[chunk]))
else:
toks.append(chunk)
return ' '.join(toks)
class ShufPrepositions(Transform, SpacyMixin, RandomMixin):
def transform_text(self, text):
self.rng = random.Random(str(self.random_seed) + text)
parsed_text = self.nlp(text)
preps = list(t for t in parsed_text if t.pos_ == 'ADP')
prep_idxs = {}
for i, prep in enumerate(preps):
prep_idxs[prep.idx] = i
chunks = []
i = 0
while i < len(parsed_text):
if i in prep_idxs:
chunks.append(prep_idxs[i])
else:
chunks.append(str(parsed_text[i]))
i += 1
self.rng.shuffle(preps)
toks = []
for chunk in chunks:
if isinstance(chunk, int):
toks.append(str(preps[chunk]))
else:
toks.append(chunk)
return ' '.join(toks)
class SwapNumNPSlots2(Transform, SpacyMixin, RandomMixin):
def transform_text(self, text):
self.rng = random.Random(str(self.random_seed) + text)
parsed_text = self.nlp(text)
num_swaps = len(list(parsed_text.noun_chunks))
toks = [str(t) for t in parsed_text]
new_toks = [str(t) for t in parsed_text]
positions = self.rng.sample(range(len(toks)), k=num_swaps)
shuf_positions = list(positions)
self.rng.shuffle(shuf_positions)
for old, new in zip(positions, shuf_positions):
new_toks[new] = toks[old]
return ' '.join(new_toks)
class ReverseNPSlots(Transform, SpacyMixin):
def transform_text(self, text):
parsed_text = self.nlp(text)
noun_chunks = list(parsed_text.noun_chunks)
noun_chunk_idxs = {}
for i, np in enumerate(noun_chunks):
for j in range(np.start, np.end):
noun_chunk_idxs[j] = i
chunks = []
i = 0
while i < len(parsed_text):
if i in noun_chunk_idxs:
chunks.append(noun_chunk_idxs[i])
i = noun_chunks[noun_chunk_idxs[i]].end
else:
chunks.append(str(parsed_text[i]))
i += 1
noun_chunks = list(reversed(noun_chunks))
toks = []
for chunk in chunks:
if isinstance(chunk, int):
toks.append(str(noun_chunks[chunk]))
else:
toks.append(chunk)
return ' '.join(toks)
class ShufSents(Transform, SpacyMixin, RandomMixin):
def transform_text(self, text):
dsents = [str(s) for s in self.nlp(text).sents]
self.rng.shuffle(dsents)
dtext_b = ' '.join(str(s) for s in dsents)
return dtext_b
class ReverseSents(Transform, SpacyMixin):
def transform_text(self, text):
dsents = [str(s) for s in self.nlp(text).sents]
dtext_b = ' '.join(str(s) for s in reversed(dsents))
return dtext_b
class ReverseWords(Transform, SpacyMixin):
def transform_text(self, text):
dtext_b = [str(s) for s in reversed(self.nlp(text))]
return ' '.join(dtext_b)
class ShufSents(Transform, SpacyMixin, RandomMixin):
def transform_text(self, text):
dsents = [str(s) for s in self.nlp(text).sents]
self.rng.shuffle(dsents)
dtext_b = ' '.join(str(s) for s in dsents)
return dtext_b
class Typo(Transform, RandomMixin):
def __init__(self, no_stops=False, *args, **kwargs):
super().__init__(*args, **kwargs)
self._typo_list = None
self._typo_regex = None
self._no_stops = no_stops
def typo_list(self):
if self._typo_list is None:
self._typo_list = {}
if self._no_stops:
J.initialize()
stopwords = J._autoclass("org.terrier.terms.Stopwords")(None)
for line in open('etc/wiki_typos.tsv'):
typo, corrects = line.rstrip().split('\t')
corrects = corrects.split(', ')
for correct in corrects:
if self._no_stops and stopwords.isStopword(correct.lower()):
continue
if correct not in self._typo_list:
self._typo_list[correct] = []
self._typo_list[correct].append(typo)
self._typo_regex = '|'.join(re.escape(c) for c in self._typo_list)
self._typo_regex = re.compile(f'\\b({self._typo_regex})\\b')
return self._typo_list, self._typo_regex
def transform_text(self, text):
typos, regex = self.typo_list()
match = regex.search(text)
while match:
typo_candidates = typos[match.group(1)]
if len(typo_candidates) > 1:
typo = self.rng.choice(typo_candidates)
else:
typo = typo_candidates[0]
text = text[:match.start()] + typo + text[match.end():]
match = regex.search(text)
return text
class DocTTTTTQuery(Transform):
def __init__(self, count=4):
super().__init__()
self.model = None
self.count = count
def transform_text(self, text):
if self.model is None:
self.model = doctttttquery.DocTTTTTQueryModel(count=self.count)
exp = self.model.expand_document(text)
return f'{text} {exp}'
class Query(Transform):
def transform_text(self, text):
return self.record['query_text']
class PrependQuery(Transform):
def transform_text(self, text):
return self.record['query_text'] + ' ' + text
class RmStops(Transform, SpacyMixin):
def __init__(self, source='terrier'):
super().__init__()
if source == 'terrier':
self.STOPS = set(x.strip() for x in open('etc/terrier-stops.txt'))
else:
raise ValueError(source)
def transform_text(self, text):
terms = [str(t) for t in self.nlp(text) if str(t).lower() not in self.STOPS]
return ' '.join(terms)
class Multi(Transform):
def __init__(self, transforms):
self.transforms = transforms
def __enter__(self):
for t in self.transforms:
t.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
for t in self.transforms:
t.__exit__(exc_type, exc_val, exc_tb)
def transform_text(self, text):
for t in self.transforms:
if text:
text = t.transform_text(text)
return text
| abnirml-master | abnirml/probes/transform.py |
import ir_datasets
import os
import hashlib
from glob import glob
import spacy
from .base import Probe
class XSumProbe(Probe):
def __init__(self, spacy_model='en_core_web_sm', dataset='abnirml:xsum'):
super().__init__()
self.spacy_model = spacy_model
self.dataset = ir_datasets.load(dataset)
def pairs_iter(self):
nlp = spacy.load(self.spacy_model, disable=["parser"])
for doc in self.dataset.docs_iter():
m = hashlib.sha256(f'{doc.doc_id}.summary'.encode())
if m.digest()[0] % 10 != 0:
continue # Sample 10%
title_lemmas = {t.lemma_.lower() for t in nlp(doc.title) if not t.is_stop and not t.is_punct and not t.like_num and str(t).strip() != ''}
summary_lemmas = {t.lemma_ for t in nlp(doc.first_sentence)}
content_lemmas = {t.lemma_ for t in nlp(doc.rest_body)}
if not (title_lemmas & summary_lemmas & content_lemmas):
# no ovelapping terms among the query and 2 documents
continue
if doc.title and doc.rest_body and doc.first_sentence:
yield [
{'query_text': doc.title, 'doc_text': doc.first_sentence},
{'query_text': doc.title, 'doc_text': doc.rest_body},
]
class CnnDmProbe(Probe):
def __init__(self, spacy_model='en_core_web_sm', source='both', dataset='abnirml:cnn_dailymail'):
super().__init__()
assert source in ('both', 'cnn', 'dm')
self.spacy_model = spacy_model
self.source = source
self.dataset = ir_datasets.load(dataset)
def pairs_iter(self):
nlp = spacy.load(self.spacy_model, disable=["parser"])
for doc in self.dataset.docs_iter():
if self.source == 'cnn' and not doc.doc_id.startswith('cnn:'):
continue
if self.source == 'dm' and not doc.doc_id.startswith('dailymail:'):
continue
m = hashlib.sha256(doc.doc_id.split(':')[1].encode())
if m.digest()[0] % 10 != 0:
continue # Sample 10%
title_lemmas = {t.lemma_.lower() for t in nlp(doc.title) if not t.is_stop and not t.is_punct and not t.like_num and str(t).strip() != ''}
summary_lemmas = {t.lemma_ for t in nlp(doc.summary)}
content_lemmas = {t.lemma_ for t in nlp(doc.body)}
if not (title_lemmas & summary_lemmas & content_lemmas):
# no ovelapping terms among the query and 2 documents
continue
yield [
{'query_text': doc.title, 'doc_text': doc.summary},
{'query_text': doc.title, 'doc_text': doc.body},
]
| abnirml-master | abnirml/probes/summarization.py |
import random
import spacy
import ir_datasets
import abnirml
from .base import Probe
class SimplificationProbe(Probe):
def __init__(self, dataset='abnirml:wikiturk', query_inferer=None):
self.dataset = ir_datasets.load(dataset)
self.query_inferer = query_inferer or abnirml.util.CommonNounChunk()
def pairs_iter(self):
for doc in self.dataset.docs_iter():
for simp in doc.simplifications:
for query in self.query_inferer.infer_queries(doc.source, simp):
yield [
{'query_text': query, 'doc_text': doc.source},
{'query_text': query, 'doc_text': simp},
]
| abnirml-master | abnirml/probes/simplification.py |
import random
import spacy
import ir_datasets
import abnirml
from .base import Probe
class ParaphraseProbe(Probe):
def __init__(self, dataset='abnirml:mspc', doc_field='text', paraphrase_label=True, query_inferer=None):
self.dataset = ir_datasets.load(dataset)
self.doc_field = doc_field
self.paraphrase_label = paraphrase_label
self.query_inferer = query_inferer or abnirml.util.CommonNounChunk()
def pair_symmetry(self):
return 'symmetric'
def pairs_iter(self):
docstore = self.dataset.docs_store()
for docpair in self.dataset.docpairs_iter():
if docpair.paraphrase != self.paraphrase_label:
continue
doc_a = getattr(docstore.get(docpair.doc_id_a), self.doc_field)
doc_b = getattr(docstore.get(docpair.doc_id_b), self.doc_field)
if doc_a != doc_b:
for query in self.query_inferer.infer_queries(doc_a, doc_b):
yield [
{'query_text': query, 'doc_text': doc_a},
{'query_text': query, 'doc_text': doc_b},
]
| abnirml-master | abnirml/probes/paraphrase.py |
class Probe:
def pair_symmetry(self):
return 'asymmetric' # most probes are asymmetric
def pairs_iter(self):
raise NotImplementedError
| abnirml-master | abnirml/probes/base.py |
import ir_datasets
import random
import spacy
__all__ = ['QueryInferer', 'CommonNounChunk', 'SelectAll', 'RandomSelector']
class QueryInferer:
def infer_queries(self, text_a, text_b):
raise NotImplementedError()
class CommonNounChunk(QueryInferer):
def __init__(self, spacy_model='en_core_web_sm', min_noun_chunk_len=5, selector=None):
self.nlp = ir_datasets.util.Lazy(lambda: spacy.load(spacy_model))
self.min_noun_chunk_len = min_noun_chunk_len
self.selector = selector if selector is not None else RandomSelector()
def infer_queries(self, text_a, text_b):
parsed_a = self.nlp()(text_a)
parsed_b = self.nlp()(text_b)
noun_chunks_a = set(str(c).lower() for c in parsed_a.noun_chunks if len(str(c)) > self.min_noun_chunk_len)
noun_chunks_b = set(str(c).lower() for c in parsed_b.noun_chunks if len(str(c)) > self.min_noun_chunk_len)
candiates = noun_chunks_a & noun_chunks_b
return self.selector.select(candiates, text_a, text_b)
class SelectAll:
def select(self, candiates, text_a, text_b):
return candiates
class RandomSelector:
def __init__(self, random_seed=42):
self.random_seed = random_seed
def select(self, candiates, text_a, text_b):
if candiates:
rng = random.Random(repr((text_a, text_b, self.random_seed)))
return [rng.choice(sorted(candiates))]
return []
| abnirml-master | abnirml/util/query_inference.py |
from .query_inference import *
| abnirml-master | abnirml/util/__init__.py |
from collections import namedtuple
import io
import contextlib
import ir_datasets
from . import DownloadConfig
from ir_datasets import Dataset
NAME = 'abnirml:jfleg'
BASE_PATH = ir_datasets.util.home_path() / NAME
dlc = DownloadConfig.context(NAME, BASE_PATH)
JflegDoc = namedtuple('JflegDoc', ['doc_id', 'nonfluent', 'fluents'])
class JflegDocs(ir_datasets.formats.BaseDocs):
def __init__(self, src_dlc, refs_dlc):
self.src_dlc = src_dlc
self.refs_dlc = refs_dlc
def docs_iter(self):
with contextlib.ExitStack() as ctxt:
src = io.TextIOWrapper(ctxt.enter_context(self.src_dlc.stream()))
refs = [io.TextIOWrapper(ctxt.enter_context(r.stream())) for r in self.refs_dlc]
for i, items in enumerate(zip(src, *refs)):
nonfluent, *fluents = items
yield JflegDoc(str(i), nonfluent, tuple(fluents))
def docs_cls(self):
return JflegDoc
SUBSETS = {}
SUBSETS['dev'] = JflegDocs(dlc['dev/src'], [dlc['dev/ref0'], dlc['dev/ref1'], dlc['dev/ref2'], dlc['dev/ref3']])
SUBSETS['dev/sp'] = JflegDocs(dlc['dev/src.sp'], [dlc['dev/ref0'], dlc['dev/ref1'], dlc['dev/ref2'], dlc['dev/ref3']])
SUBSETS['test'] = JflegDocs(dlc['test/src'], [dlc['test/ref0'], dlc['test/ref1'], dlc['test/ref2'], dlc['test/ref3']])
SUBSETS['test/sp'] = JflegDocs(dlc['test/src.sp'], [dlc['test/ref0'], dlc['test/ref1'], dlc['test/ref2'], dlc['test/ref3']])
SUBSETS['all'] = ir_datasets.datasets.base.Concat(SUBSETS['dev'], SUBSETS['test'])
SUBSETS['sp'] = ir_datasets.datasets.base.Concat(SUBSETS['dev/sp'], SUBSETS['test/sp'])
for s_name, subset in SUBSETS.items():
if s_name == 'all':
ir_datasets.registry.register(NAME, Dataset(subset))
else:
ir_datasets.registry.register(f'{NAME}/{s_name}', Dataset(subset))
| abnirml-master | abnirml/datasets/jfleg.py |
from typing import NamedTuple, Tuple
import io
import contextlib
import ir_datasets
from ir_datasets.indices import PickleLz4FullStore
from . import DownloadConfig
from ir_datasets import Dataset
# Text simplification dataset from <https://github.com/cocoxu/simplification>
# Wei Xu and Courtney Napoles and Ellie Pavlick and Quanze Chen and Chris Callison-Burch.
# Optimizing Statistical Machine Translation for Text Simplification. TACL 2016.
NAME = 'abnirml:wikiturk'
BASE_PATH = ir_datasets.util.home_path() / NAME
dlc = DownloadConfig.context(NAME, BASE_PATH)
class WikiTurkDoc(NamedTuple):
doc_id: str
source: str
simplifications: Tuple[str, ...]
class WikiTurkDocs(ir_datasets.formats.BaseDocs):
def __init__(self, source_dlc, simp_dlcs, did_prefix):
self._source_dlc = source_dlc
self._simp_dlcs = simp_dlcs
self._did_prefix = did_prefix
@ir_datasets.util.use_docstore
def docs_iter(self):
with contextlib.ExitStack() as stack:
src = io.TextIOWrapper(stack.enter_context(self._source_dlc.stream()))
simps = [io.TextIOWrapper(stack.enter_context(s.stream())) for s in self._simp_dlcs]
for i, texts in enumerate(zip(src, *simps)):
texts = [t.strip().replace('-lrb-', '(').replace('-rrb-', ')') for t in texts]
yield WikiTurkDoc(f'{self._did_prefix}{i}', texts[0], tuple(texts[1:]))
def docs_cls(self):
return WikiTurkDoc
def docs_store(self, field='doc_id'):
return PickleLz4FullStore(
path=f'{ir_datasets.util.home_path()/NAME}/docs.pklz4',
init_iter_fn=self.docs_iter,
data_cls=self.docs_cls(),
lookup_field=field,
index_fields=['doc_id'],
)
def docs_count(self):
return self.docs_store().count()
def docs_namespace(self):
return NAME
def docs_lang(self):
return 'en'
SUBSETS = {}
SUBSETS['test'] = ir_datasets.datasets.base.Dataset(
WikiTurkDocs(dlc['test.norm'], [dlc[f'test.{i}'] for i in range(8)], 'test'),
)
SUBSETS['tune'] = ir_datasets.datasets.base.Dataset(
WikiTurkDocs(dlc['tune.norm'], [dlc[f'tune.{i}'] for i in range(8)], 'tune'),
)
SUBSETS[''] = ir_datasets.datasets.base.Concat(SUBSETS['test'], SUBSETS['tune'])
for s_name, subset in SUBSETS.items():
if s_name == '':
ir_datasets.registry.register(NAME, Dataset(subset))
else:
ir_datasets.registry.register(f'{NAME}/{s_name}', Dataset(subset))
| abnirml-master | abnirml/datasets/wikiturk.py |
import os
import yaml
from ir_datasets.util.download import _DownloadConfig
DownloadConfig = _DownloadConfig(contents=yaml.load(open('./abnirml/etc/downloads.yaml'), Loader=yaml.BaseLoader))
from . import cnn_dailymail
from . import gyafc
from . import jfleg
from . import mspc
from . import nbias
from . import wikiturk
from . import yahoo_l6
from . import xsum
| abnirml-master | abnirml/datasets/__init__.py |
import tarfile
import io
import os
from collections import namedtuple
import ir_datasets
from ir_datasets.indices import PickleLz4FullStore
from ir_datasets.util import ZipExtract
from . import DownloadConfig
from ir_datasets import Dataset
NAME = 'abnirml:xsum'
BASE_PATH = ir_datasets.util.home_path() / NAME
dlc = DownloadConfig.context(NAME, BASE_PATH)
XSumDoc = namedtuple('XSumDoc', ['doc_id', 'url', 'title', 'first_sentence', 'rest_body'])
class XSumDocs(ir_datasets.formats.BaseDocs):
def __init__(self, source_dlc):
self.source_dlc = source_dlc
def docs_iter(self):
return iter(self.docs_store())
def _docs_iter(self):
with self.source_dlc.stream() as stream, \
tarfile.open(fileobj=stream, mode='r|gz') as tarf:
for record in tarf:
if record.path.endswith('.summary'):
doc_id = record.path.split('/')[-1].split('.')[0]
text = tarf.extractfile(record).read().decode()
parts = text.split('[SN]')
yield XSumDoc(doc_id, parts[2].strip(), parts[4].strip(), parts[6].strip(), parts[8].strip())
def docs_path(self):
return self.source_dlc.path()
def docs_store(self, field='doc_id'):
return PickleLz4FullStore(
path=f'{ir_datasets.util.home_path()/NAME}/docs.pklz4',
init_iter_fn=self._docs_iter,
data_cls=self.docs_cls(),
lookup_field=field,
index_fields=['doc_id'],
)
def docs_cls(self):
return XSumDoc
XSUM_DATASET = Dataset(XSumDocs(dlc['docs']))
ir_datasets.registry.register(NAME, XSUM_DATASET)
| abnirml-master | abnirml/datasets/xsum.py |
from collections import namedtuple
import io
import ir_datasets
from ir_datasets.util import GzipExtract, LocalDownload
from ir_datasets.indices import PickleLz4FullStore
from . import DownloadConfig
from ir_datasets import Dataset
NAME = 'abnirml:yahoo-l6'
BASE_PATH = ir_datasets.util.home_path() / NAME
dlc = DownloadConfig.context(NAME, BASE_PATH)
YahooL6Doc = namedtuple('YahooL6Doc', ['doc_id', 'type', 'subject', 'content'])
class YahooL6Docs(ir_datasets.formats.BaseDocs):
def __init__(self, dlcs):
self.dlcs = dlcs
@ir_datasets.util.use_docstore
def docs_iter(self):
doc = ''
bs4 = ir_datasets.lazy_libs.bs4()
for streamer in self.dlcs:
with streamer.stream() as stream:
for line in io.TextIOWrapper(stream):
if doc or '<vespaadd>' in line:
doc += line
if '</vespaadd>' in line:
soup = bs4.BeautifulSoup(doc, 'lxml')
topic = soup.find('document')
did = topic.find('uri').get_text()
content = topic.find('content')
yield YahooL6Doc(
f'{did}-q',
'question',
topic.find('subject').get_text().replace('<br />', '\n'),
content.get_text().replace('<br />', '\n') if content else '')
for i, nba in enumerate(topic.find_all('answer_item')):
yield YahooL6Doc(
f'{did}-a{i}',
'answer',
'',
nba.get_text().replace('<br />', '\n'))
doc = ''
def docs_path(self):
return self.dlcs[0].path()
def docs_store(self, field='doc_id'):
return PickleLz4FullStore(
path=f'{ir_datasets.util.home_path()/NAME}/docs.pklz4',
init_iter_fn=self.docs_iter,
data_cls=self.docs_cls(),
lookup_field=field,
index_fields=['doc_id'],
)
def docs_cls(self):
return YahooL6Doc
base_path = ir_datasets.util.home_path() / 'yahoo-l6'
L6_DATASET = Dataset(YahooL6Docs([
GzipExtract(dlc['docs1']),
GzipExtract(dlc['docs2']),
]))
ir_datasets.registry.register(NAME, L6_DATASET)
| abnirml-master | abnirml/datasets/yahoo_l6.py |
import io
import os
from collections import namedtuple
import ir_datasets
from ir_datasets.util import ZipExtract
from ir_datasets import Dataset
from . import DownloadConfig
NAME = 'abnirml:nbias'
BASE_PATH = ir_datasets.util.home_path() / NAME
dlc = DownloadConfig.context(NAME, BASE_PATH)
NBiasDoc = namedtuple('NBiasDoc', ['doc_id', 'biased_text', 'neutral_text'])
class NBiasDocs(ir_datasets.formats.BaseDocs):
def __init__(self, source_dlc):
self.source_dlc = source_dlc
def docs_iter(self):
with self.source_dlc.stream() as s:
for line in io.TextIOWrapper(s):
cols = line.split('\t')
yield NBiasDoc(cols[0], cols[3], cols[4])
def docs_path(self):
return self.source_dlc.path()
def docs_cls(self):
return NBiasDoc
NBIAS_DATASET = Dataset(NBiasDocs(ZipExtract(dlc['docs'], 'bias_data/WNC/biased.full')))
ir_datasets.registry.register(NAME, NBIAS_DATASET)
| abnirml-master | abnirml/datasets/nbias.py |
import os
from collections import namedtuple
import ir_datasets
from ir_datasets.util import ZipExtractCache
from . import DownloadConfig
from ir_datasets import Dataset
NAME = 'abnirml:gyafc'
BASE_PATH = ir_datasets.util.home_path() / NAME
dlc = DownloadConfig.context(NAME, BASE_PATH)
GyafcDoc = namedtuple('GyafcDoc', ['doc_id', 'genre', 'split', 'is_orig_formal', 'formal', 'informal', 'mapped_l6_id'])
class GyafcDocs(ir_datasets.formats.BaseDocs):
def __init__(self, source_dlc):
self.source_dlc = source_dlc
def docs_iter(self):
base = self.source_dlc.path()
for src, ref in [('informal', 'formal'), ('formal', 'informal')]:
for genre in ['Entertainment_Music', 'Family_Relationships']:
for split in ['tune', 'test']:
for iref in range(4):
# TODO: etc/gyafc qid mappings... Should this be stored somewhere it can be
# downloaded independently of the abnirml repo?
with open(base/'GYAFC_Corpus'/genre/split/src, 'rt') as s, \
open(base/'GYAFC_Corpus'/genre/split/f'{ref}.ref{iref}', 'rt') as r, \
open(os.path.join('etc', 'gyafc', genre, split, f'{src}.qids'), 'rt') as qids:
for i, (s_line, r_line, qid) in enumerate(zip(s, r, qids)):
s_line = s_line.rstrip()
r_line = r_line.rstrip()
qid = qid.rstrip()
yield GyafcDoc(
f'{src}-{ref}-{genre}-{split}-{i}-{iref}',
genre,
split,
src == 'formal',
s_line if src == 'formal' else r_line,
r_line if src == 'formal' else s_line,
qid,
)
def docs_path(self):
return self.source_dlc.path()
def docs_cls(self):
return GyafcDoc
GYAFC_DATASET = Dataset(GyafcDocs(ZipExtractCache(dlc['docs'], BASE_PATH / 'GYAFC_Corpus')))
ir_datasets.registry.register(NAME, GYAFC_DATASET)
| abnirml-master | abnirml/datasets/gyafc.py |
import tarfile
import io
import os
from collections import namedtuple
import ir_datasets
from ir_datasets.indices import PickleLz4FullStore
from ir_datasets.util import ZipExtract
from ir_datasets import Dataset
from . import DownloadConfig
NAME = 'abnirml:cnn_dailymail'
BASE_PATH = ir_datasets.util.home_path() / NAME
dlc = DownloadConfig.context(NAME, BASE_PATH)
CnnDailyMailDoc = namedtuple('CnnDailyMailDoc', ['doc_id', 'title', 'summary', 'body'])
class CnnDailyMailDocs(ir_datasets.formats.BaseDocs):
def __init__(self, cnn_stories_dlc, dailymail_stories_dlc, cnn_titles_dlc, dailymail_titles_dlc):
self.cnn_stories_dlc = cnn_stories_dlc
self.dailymail_stories_dlc = dailymail_stories_dlc
self.cnn_titles_dlc = cnn_titles_dlc
self.dailymail_titles_dlc = dailymail_titles_dlc
def docs_iter(self):
return iter(self.docs_store())
def _docs_iter(self):
for prefix, stories_dlc, titles_dlc in [('cnn:', self.cnn_stories_dlc, self.cnn_titles_dlc), ('dailymail:', self.dailymail_stories_dlc, self.dailymail_titles_dlc)]:
titles_byid = {}
with titles_dlc.stream() as stream:
for line in io.TextIOWrapper(stream):
doc_id, title = line.split('\t')
titles_byid[doc_id] = title.strip()
with stories_dlc.stream() as stream, \
tarfile.open(fileobj=stream, mode='r|gz') as tarf:
for record in tarf:
if record.path.endswith('.story'):
doc_id = record.path.split('/')[-1].split('.')[0]
title = titles_byid[doc_id]
doc_id = prefix + doc_id
text = tarf.extractfile(record).read().decode()
parts = text.split('@highlight')
summary = '\n'.join(parts[1:])
yield CnnDailyMailDoc(doc_id, title, summary, parts[0].strip())
def docs_path(self):
return self.source_dlc.path()
def docs_store(self, field='doc_id'):
return PickleLz4FullStore(
path=f'{ir_datasets.util.home_path()/NAME}/docs.pklz4',
init_iter_fn=self._docs_iter,
data_cls=self.docs_cls(),
lookup_field=field,
index_fields=['doc_id'],
)
def docs_cls(self):
return CnnDailyMailDoc
CNN_DAILYMAIL_DATASET = Dataset(CnnDailyMailDocs(
dlc['cnn_stories'],
dlc['dailymail_stories'],
dlc['cnn_titles'],
dlc['dailymail_titles'],
))
ir_datasets.registry.register(NAME, CNN_DAILYMAIL_DATASET)
| abnirml-master | abnirml/datasets/cnn_dailymail.py |
from collections import namedtuple
from typing import NamedTuple
import io
import contextlib
import ir_datasets
from ir_datasets.indices import PickleLz4FullStore
from . import DownloadConfig
from ir_datasets import Dataset
NAME = 'abnirml:mspc'
BASE_PATH = ir_datasets.util.home_path() / NAME
dlc = DownloadConfig.context(NAME, BASE_PATH)
class MspcDoc(NamedTuple):
doc_id: str
text: str
author: str
url: str
agency: str
date: str
web_date: str
class MspcDocpair(NamedTuple):
doc_id_a: str
doc_id_b: str
paraphrase: bool
class MspcDocs(ir_datasets.formats.BaseDocs):
def __init__(self, dlc):
self.dlc = dlc
@ir_datasets.util.use_docstore
def docs_iter(self):
with self.dlc.stream() as stream:
for i, line in enumerate(io.TextIOWrapper(stream)):
if i == 0:
continue # header
cols = line.rstrip().split('\t')
assert len(cols) == 7
yield MspcDoc(*cols)
def docs_cls(self):
return MspcDoc
def docs_store(self, field='doc_id'):
return PickleLz4FullStore(
path=f'{ir_datasets.util.home_path()/NAME}/docs.pklz4',
init_iter_fn=self.docs_iter,
data_cls=self.docs_cls(),
lookup_field=field,
index_fields=['doc_id'],
)
def docs_count(self):
return self.docs_store().count()
def docs_namespace(self):
return NAME
def docs_lang(self):
return 'en'
class MspcDocpairs(ir_datasets.formats.BaseDocPairs):
def __init__(self, dlcs):
self.dlcs = dlcs
def docpairs_iter(self):
for dlc in self.dlcs:
with dlc.stream() as stream:
for i, line in enumerate(io.TextIOWrapper(stream)):
if i == 0:
continue # header
cols = line.rstrip().split('\t')
assert len(cols) == 5, cols
yield MspcDocpair(cols[1], cols[2], cols[0] == '1')
def docpairs_cls(self):
return MspcDocpair
SUBSETS = {}
docs = MspcDocs(dlc['sentences'])
SUBSETS[''] = ir_datasets.datasets.base.Dataset(docs, MspcDocpairs([dlc['pairs/train'], dlc['pairs/test']]))
SUBSETS['train'] = ir_datasets.datasets.base.Dataset(docs, MspcDocpairs([dlc['pairs/train']]))
SUBSETS['test'] = ir_datasets.datasets.base.Dataset(docs, MspcDocpairs([dlc['pairs/test']]))
for s_name, subset in SUBSETS.items():
if s_name == '':
ir_datasets.registry.register(NAME, Dataset(subset))
else:
ir_datasets.registry.register(f'{NAME}/{s_name}', Dataset(subset))
| abnirml-master | abnirml/datasets/mspc.py |
import pyterrier
import pandas as pd
from abnirml.java import J
from .base import Scorer
class _TerrerRetriever(Scorer):
def __init__(self, name, index, batch_size=100):
super().__init__(name)
self.index = index
self.batch_size = batch_size
def batch_score(self, queries, texts):
J.initialize()
ti = self.index
retr = pyterrier.batchretrieve.TextScorer(
background_index=ti._index(),
controls=self._controls(),
properties=self._properties(),
num_results=len(queries))
df = []
qid_map = {}
doc_map = {}
for q, t in zip(queries, texts):
q = ti.parse_query(q)
if q not in qid_map:
qid_map[q] = len(qid_map)
if t not in doc_map:
doc_map[t] = len(doc_map)
df.append((str(qid_map[q]), q, str(doc_map[t]), t))
df = pd.DataFrame(df, columns=['qid', 'query', 'docno', 'body'], dtype=str)
run = retr.transform(df)
result = []
for tup in df.itertuples():
r = run[(run.qid == tup.qid) & (run.docno == tup.docno)]['score']
if len(r) > 0:
result.append(list(r)[0])
else:
result.append(0.)
return result
def score_iter(self, it):
batch_size = self.batch_size
q_batch, t_batch = [], []
for query, text in it:
q_batch.append(query)
t_batch.append(text)
if len(q_batch) >= batch_size:
scores = self.batch_score(q_batch, t_batch)
yield from scores
q_batch, t_batch = [], []
if q_batch:
scores = self.batch_score(q_batch, t_batch)
yield from scores
def _controls(self):
return {}
def _properties(self):
return {}
class TerrierBM25(_TerrerRetriever):
def __init__(self, index, k1=1.2, b=0.8, batch_size=100, delta=0.0):
super().__init__("TerrierBM25", index, batch_size)
self.k1 = k1
self.b = b
self._delta = delta
def delta(self):
return self._delta
def _controls(self):
return {
'wmodel': 'BM25',
'c': str(self.b)
}
def _properties(self):
return {
'bm25.k_1': str(self.k1)
}
| abnirml-master | abnirml/scorers/terrier.py |
import hashlib
import gzip
import torch
import transformers
import ir_datasets
from .base import Scorer
from .cached import SimpleFsCache
_logger = ir_datasets.log.easy()
class DocTTTTTQuery(Scorer):
def __init__(self, scorer, count=4, delta=0):
super().__init__(None)
self.scorer = scorer
self.count = count
self.model = None
self.cache = None
self._delta = delta
def delta(self):
return self._delta
def score_iter(self, it):
self.build()
def iter_exp_docs():
for qtext, dtext in it:
dexp = self.model.expand_document(dtext)
dtext = f'{dtext} {dexp}'
yield qtext, dtext
return self.scorer.score_iter(iter_exp_docs())
def build(self):
if self.model is None:
with _logger.duration('loading T5 model'):
self.model = DocTTTTTQueryModel(count=self.count)
class DocTTTTTQueryModel:
def __init__(self, tok_base='t5-base', model_base='castorini/doc2query-t5-base-msmarco', count=4):
super().__init__()
self.tokenizer = transformers.T5Tokenizer.from_pretrained(tok_base)
self.config = transformers.T5Config.from_pretrained('t5-base')
self.model = transformers.T5ForConditionalGeneration.from_pretrained(model_base, config=self.config)
if torch.cuda.is_available():
self.model.cuda()
self.count = count
self.cache = SimpleFsCache('cache/doctttttquery.cache', gzip.open)
def expand_document(self, doc_text):
key = hashlib.md5(doc_text.encode()).digest()
if key not in self.cache:
expansions = []
expansions.append("")
doc_text += ' </s>'
input_ids = self.tokenizer.encode(doc_text, return_tensors='pt')
if torch.cuda.is_available():
input_ids = input_ids.cuda()
input_ids = input_ids[:, :1024]
outputs = self.model.generate(
input_ids=input_ids,
max_length=64,
do_sample=True,
top_k=10,
num_return_sequences=self.count)
for i in range(self.count):
exp = self.tokenizer.decode(outputs[i], skip_special_tokens=True)
expansions.append(exp)
self.cache[key] = ' '.join(expansions)
return self.cache[key]
| abnirml-master | abnirml/scorers/doctttttquery.py |
import os
import re
import pickle
import hashlib
from pathlib import Path
import torch
import numpy as np
from torch import nn
import torch.nn.functional as F
from nltk import word_tokenize
import ir_datasets
from .base import NeuralScorer
_logger = ir_datasets.log.easy()
class ConvKNRM(NeuralScorer):
def __init__(self, weight_path, batch_size=8):
super().__init__(None, batch_size)
self.weight_path = weight_path
def delta(self):
return 0.434799 # 50th percentile delta from msmarco dev
def build(self):
if self.model is None:
with _logger.duration('loading ConvKNRM model'):
self.model = ConvKNRMModel()
self.model.load_state_dict(torch.load(self.weight_path, map_location=torch.device('cpu') ), strict=False)
if torch.cuda.is_available():
self.model.cuda()
class KNRM(NeuralScorer):
def __init__(self, weight_path, batch_size=8):
super().__init__(None, batch_size)
self.weight_path = weight_path
def delta(self):
return 0.31069 # 50th percentile delta from msmarco dev
def build(self):
if self.model is None:
with _logger.duration('loading KNRM model'):
self.model = KNRMModel()
self.model.load_state_dict(torch.load(self.weight_path, map_location=torch.device('cpu') ), strict=False)
if torch.cuda.is_available():
self.model.cuda()
class ConvKNRMModel(torch.nn.Module):
"""
Implementation of the ConvKNRM model from:
> Zhuyun Dai, Chenyan Xiong, Jamie Callan, and Zhiyuan Liu. 2018. Convolutional Neural
> Networks for Soft-Matching N-Grams in Ad-hoc Search. In WSDM.
"""
def __init__(self):
super().__init__()
self.tokenizer = Tokenizer()
self.embed = self.tokenizer.encoder()
self.simmat = InteractionMatrix()
self.padding, self.convs = nn.ModuleList(), nn.ModuleList()
for conv_size in range(1, 4):
if conv_size > 1:
self.padding.append(nn.ConstantPad1d((0, conv_size-1), 0))
else:
self.padding.append(nn.Sequential()) # identity
self.convs.append(nn.ModuleList())
self.convs[-1].append(nn.Conv1d(self.embed.dim(), 128, conv_size))
self.kernels = RbfKernelBank.from_strs('-0.9,-0.7,-0.5,-0.3,-0.1,0.1,0.3,0.5,0.7,0.9,1.0', '0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.001', dim=1, requires_grad=True)
channels = 3 ** 2
self.combine = nn.Linear(self.kernels.count() * channels, 1)
def forward(self, query_text, doc_text):
inputs = self.tokenizer.tokenize_queries_docs(query_text, doc_text)
enc = self.embed.enc_query_doc(**inputs)
a_embed, b_embed = enc['query'], enc['doc']
a_reps, b_reps = [], []
for pad, conv in zip(self.padding, self.convs):
a_reps.append(conv[0](pad(a_embed.permute(0, 2, 1))).permute(0, 2, 1))
b_reps.append(conv[0](pad(b_embed.permute(0, 2, 1))).permute(0, 2, 1))
simmats = []
for a_rep in a_reps:
for b_rep in b_reps:
simmats.append(self.simmat(a_rep, b_rep, inputs['query_tok'], inputs['doc_tok']))
simmats = torch.cat(simmats, dim=1)
mask = (simmats != 0.).unsqueeze(1) # which cells are not padding?
kernels = self.kernels(simmats)
kernels[~mask.expand(kernels.shape)] = 0. # zero out padding
BATCH, KERNELS, VIEWS, QLEN, DLEN = kernels.shape
kernels = kernels.reshape(BATCH, KERNELS * VIEWS, QLEN, DLEN)
result = kernels.sum(dim=3) # sum over document
simmats = simmats.reshape(BATCH, 1, VIEWS, QLEN, DLEN) \
.expand(BATCH, KERNELS, VIEWS, QLEN, DLEN) \
.reshape(BATCH, KERNELS * VIEWS, QLEN, DLEN)
result = torch.where(mask[:,:,0].sum(dim=-1) != 0, (result + 1e-6).log(), torch.zeros_like(result))
result = result.sum(dim=2) # sum over query terms
result = self.combine(result)
return result.flatten()
class KNRMModel(nn.Module):
def __init__(self):
super().__init__()
self.tokenizer = Tokenizer()
self.encoder = self.tokenizer.encoder()
self.simmat = InteractionMatrix()
self.kernels = RbfKernelBank.from_strs('-0.9,-0.7,-0.5,-0.3,-0.1,0.1,0.3,0.5,0.7,0.9,1.0', '0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.001', dim=1, requires_grad=True)
self.combine = nn.Linear(self.kernels.count(), 1)
def forward(self, query_text, doc_text):
inputs = self.tokenizer.tokenize_queries_docs(query_text, doc_text)
simmat = self.simmat.encode_query_doc(self.encoder, **inputs)
kernel_scores = self.kernel_pool(simmat)
result = self.combine(kernel_scores) # linear combination over kernels
return result.flatten()
def kernel_pool(self, simmat):
mask = (simmat != 0.).unsqueeze(1) # which cells are not padding?
kernels = self.kernels(simmat)
kernels[~mask.expand(kernels.shape)] = 0. # zero out padding
BATCH, KERNELS, VIEWS, QLEN, DLEN = kernels.shape
kernels = kernels.reshape(BATCH, KERNELS * VIEWS, QLEN, DLEN)
result = kernels.sum(dim=3) # sum over document
simmat = simmat.reshape(BATCH, 1, VIEWS, QLEN, DLEN) \
.expand(BATCH, KERNELS, VIEWS, QLEN, DLEN) \
.reshape(BATCH, KERNELS * VIEWS, QLEN, DLEN)
result = torch.where(mask[:,0].sum(dim=3) != 0, (result + 1e-6).log(), torch.zeros_like(result))
result = result.sum(dim=2) # sum over query terms
return result
class RbfKernelBank(nn.Module):
def __init__(self, mus=None, sigmas=None, dim=0, requires_grad=True):
super().__init__()
self.mus = nn.Parameter(torch.tensor(mus), requires_grad=requires_grad)
self.sigmas = nn.Parameter(torch.tensor(sigmas), requires_grad=requires_grad)
self.dim = dim
def forward(self, data):
shape = list(data.shape)
shape.insert(self.dim, 1)
data = data.reshape(*shape)
shape = [1]*len(data.shape)
shape[self.dim] = -1
mus, sigmas = self.mus.reshape(*shape), self.sigmas.reshape(*shape)
adj = data - mus
return torch.exp(-0.5 * adj * adj / sigmas / sigmas)
def count(self):
return self.mus.shape[0]
@staticmethod
def from_strs(mus='-0.9,-0.7,-0.5,-0.3,-0.1,0.1,0.3,0.5,0.7,0.9,1.0', \
sigmas='0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.001', dim=-1, requires_grad=True):
mus = [float(x) for x in mus.split(',')]
sigmas = [float(x) for x in sigmas.split(',')]
return RbfKernelBank(mus, sigmas, dim=dim, requires_grad=requires_grad)
@staticmethod
def evenly_spaced(count=11, sigma=0.1, rng=(-1, 1), dim=-1, requires_grad=True):
mus = [x.item() for x in torch.linspace(rng[0], rng[1], steps=count)]
sigmas = [sigma for _ in mus]
return RbfKernelBank(mus, sigmas, dim=dim, requires_grad=requires_grad)
def binmat(a, b, padding=None):
BAT, A, B = a.shape[0], a.shape[1], b.shape[1]
a = a.reshape(BAT, A, 1)
b = b.reshape(BAT, 1, B)
result = (a == b)
if padding is not None:
result = result & (a != padding) & (b != padding)
return result.float()
def cos_simmat(a, b, amask=None, bmask=None):
BAT, A, B = a.shape[0], a.shape[1], b.shape[1]
a_denom = a.norm(p=2, dim=2).reshape(BAT, A, 1) + 1e-9 # avoid 0div
b_denom = b.norm(p=2, dim=2).reshape(BAT, 1, B) + 1e-9 # avoid 0div
result = a.bmm(b.permute(0, 2, 1)) / (a_denom * b_denom)
if amask is not None:
result = result * amask.reshape(BAT, A, 1)
if bmask is not None:
result = result * bmask.reshape(BAT, 1, B)
return result
class InteractionMatrix(nn.Module):
def __init__(self, padding=-1):
super().__init__()
self.padding = padding
def forward(self, a_embed, b_embed, a_tok, b_tok):
wrap_list = lambda x: x if isinstance(x, list) else [x]
a_embed = wrap_list(a_embed)
b_embed = wrap_list(b_embed)
BAT, A, B = a_embed[0].shape[0], a_embed[0].shape[1], b_embed[0].shape[1]
simmats = []
for a_emb, b_emb in zip(a_embed, b_embed):
if a_emb.dtype is torch.long and len(a_emb.shape) == 2 and \
b_emb.dtype is torch.long and len(b_emb.shape) == 2:
# binary matrix
simmats.append(binmat(a_emb, b_emb, padding=self.padding))
else:
# cosine similarity matrix
a_mask = (a_tok.reshape(BAT, A, 1) != self.padding).float()
b_mask = (b_tok.reshape(BAT, 1, B) != self.padding).float()
simmats.append(cos_simmat(a_emb, b_emb, a_mask, b_mask))
return torch.stack(simmats, dim=1)
def encode_query_doc(self, encoder, **inputs):
enc = encoder.enc_query_doc(**inputs)
return self(enc['query'], enc['doc'], inputs['query_tok'], inputs['doc_tok'])
class Tokenizer:
def __init__(self):
with open('bing.pkl', 'rb') as f:
self._terms, self._weights = pickle.load(f)
self._term2idx = {t: i for i, t in enumerate(self._terms)}
self._hashspace = 1000
random = np.random.RandomState(42)
hash_weights = random.normal(scale=0.5, size=(self._hashspace, self._weights.shape[1]))
self._weights = np.concatenate([self._weights, hash_weights])
def tokenize(self, text):
"""
Meant to be overwritten in to provide vocab-specific tokenization when necessary
e.g., BERT's WordPiece tokenization
"""
text = text.lower()
text = re.sub(r'[^a-z0-9]', ' ', text)
return text.split()
def id2tok(self, idx):
return self._terms[idx]
def tokenize_queries_docs(self, queries, docs):
queries = [[self.tok2id(t) for t in self.tokenize(q)] for q in queries]
docs = [[self.tok2id(t) for t in self.tokenize(d)] for d in docs]
query_len = [len(q) for q in queries]
max_q = max(query_len)
queries = [q + [-1] * (max_q - len(q)) for q in queries]
doc_len = [len(d) for d in docs]
max_d = max(doc_len)
docs = [d + [-1] * (max_d - len(d)) for d in docs]
result = {'query_tok': torch.tensor(queries), 'query_len': torch.tensor(query_len),
'doc_tok': torch.tensor(docs), 'doc_len': torch.tensor(doc_len)}
if torch.cuda.is_available():
result = {k: v.cuda() for k, v in result.items()}
return result
def lexicon_size(self) -> int:
return len(self._terms)
def tok2id(self, tok):
try:
return self._term2idx[tok]
except KeyError:
# NOTE: use md5 hash (or similar) here because hash() is not consistent across runs
item = tok.encode()
item_hash = int(hashlib.md5(item).hexdigest(), 16)
item_hash_pos = item_hash % self._hashspace
return len(self._terms) + item_hash_pos
def encoder(self):
return WordvecEncoder(self)
class WordvecEncoder(nn.Module):
def __init__(self, vocabulary):
super().__init__()
matrix = vocabulary._weights
self.size = matrix.shape[1]
matrix = np.concatenate([np.zeros((1, self.size)), matrix]) # add padding record (-1)
self.embed = nn.Embedding.from_pretrained(torch.from_numpy(matrix.astype(np.float32)))
def forward(self, toks, lens=None):
# lens ignored
return self.embed(toks + 1) # +1 to handle padding at position -1
def enc_query_doc(self, query_tok, query_len, doc_tok, doc_len):
return {
'query': self.forward(query_tok, query_len),
'doc': self.forward(doc_tok, doc_len),
}
def dim(self):
return self.embed.weight.shape[1]
| abnirml-master | abnirml/scorers/conv_knrm.py |
# import os
# import torch
# import torch.nn.functional as F
# from transformers import BertTokenizerFast, BertForNextSentencePrediction
# from pytorch_transformers.modeling_bert import BertForPreTraining, BertPreTrainedModel, BertEmbeddings, BertEncoder, BertPreTrainingHeads
# from .base import NeuralScorer
# class EPIC(NeuralScorer):
# def __init__(self, weight_path=None, batch_size=8):
# super().__init__("EPIC", batch_size)
# self.weight_path = weight_path
# def build(self):
# if self.model is None:
# self.model = EpicModel()
# weight_path = self.weight_path
# if os.path.exists(weight_path):
# self.model.load_state_dict(torch.load(weight_path, map_location=torch.device('cpu')))
# if torch.cuda.is_available():
# self.model = self.model.cuda()
# def delta(self):
# return 0.294334 # 50th percentile delta from msmarco dev
# class EpicModel(torch.nn.Module):
# """
# Implementation of the EPIC model from:
# > Sean MacAvaney, Franco Maria Nardini, Raffaele Perego, Nicola Tonellotto,
# > Nazli Goharian, and Ophir Frieder. 2020. Expansion via Prediction of Importance with
# > Contextualization. In SIGIR.
# """
# def __init__(self):
# super().__init__()
# self.encoder = SepBertEncoder()
# self.query_salience = torch.nn.Linear(768, 1)
# self.doc_salience = torch.nn.Linear(768, 1)
# self.activ = lambda x: (1. + F.softplus(x)).log()
# self._nil = torch.nn.Parameter(torch.zeros(1))
# self.doc_quality = torch.nn.Linear(768, 1)
# def forward(self, query_text, doc_text):
# encoded = self.encoder.enc_query_doc(query_text, doc_text)
# query_vecs = self.query_full_vector(encoded['query'], encoded['query_len'], encoded['query_tok'])
# doc_vecs, _ = self.doc_full_vector(encoded['doc'], encoded['doc_len'], encoded['doc_cls'])
# return self.similarity(query_vecs, doc_vecs)
# def doc_full_vector(self, doc_tok_reps, doc_len, doc_cls):
# tok_salience = self.doc_salience(doc_tok_reps)
# tok_salience = self.activ(tok_salience)
# exp_raw = self.encoder.bert.cls.predictions(doc_tok_reps)
# mask = lens2mask(doc_len, exp_raw.shape[1])
# exp = self.activ(exp_raw)
# exp = exp * tok_salience * mask.unsqueeze(2).float()
# exp, _ = exp.max(dim=1)
# qlty = torch.sigmoid(self.doc_quality(doc_cls))
# exp = qlty * exp
# qlty = qlty.reshape(doc_cls.shape[0])
# return exp, qlty
# def query_full_vector(self, query_tok_reps, query_len, query_tok, dense=True):
# tok_salience = self._query_salience(query_tok_reps, query_len, query_tok)
# idx0 = torch.arange(tok_salience.shape[0], device=tok_salience.device).reshape(tok_salience.shape[0], 1).expand(tok_salience.shape[0], tok_salience.shape[1]).flatten()
# idx1 = query_tok.flatten()
# idx1[idx1 == -1] = 0
# s = torch.Size([query_tok.shape[0], self.encoder.lexicon_size()])
# result = torch.sparse.FloatTensor(torch.stack((idx0, idx1)), tok_salience.flatten(), s)
# if dense:
# result = result.to_dense()
# return result
# def _query_salience(self, query_tok_reps, query_len, query_tok):
# inputs = query_tok_reps
# tok_salience = self.query_salience(inputs)
# tok_salience = self.activ(tok_salience).squeeze(2)
# mask = lens2mask(query_len, query_tok.shape[1])
# tok_salience = tok_salience * mask.float()
# return tok_salience
# def similarity(self, query_vecs, doc_vecs):
# return (query_vecs * doc_vecs).sum(dim=1)
# class SepBertEncoder(torch.nn.Module):
# def __init__(self):
# super().__init__()
# self.tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')
# self.bert = CustomBertModelWrapper.from_pretrained('bert-base-uncased')
# self.CLS = self.tokenizer.convert_tokens_to_ids('[CLS]')
# self.SEP = self.tokenizer.convert_tokens_to_ids('[SEP]')
# self.bert.set_trainable(True)
# def lexicon_size(self):
# return self.tokenizer._tokenizer.get_vocab_size()
# def enc_query_doc(self, query_text, doc_text):
# enc_query = self.tokenizer.batch_encode_plus(query_text, return_tensors='pt', padding=True, truncation=True)
# enc_query = {k: v[:, :500] for k, v in enc_query.items()}
# if torch.cuda.is_available():
# enc_query = {k: v.cuda() for k, v in enc_query.items()}
# result_query = self.bert(**enc_query)
# query_tok = enc_query['input_ids'][:, 1:]
# query_tok[query_tok < 1000] = -1
# enc_doc = self.tokenizer.batch_encode_plus(doc_text, return_tensors='pt', padding=True, truncation=True)
# enc_doc = {k: v[:, :500] for k, v in enc_doc.items()}
# if torch.cuda.is_available():
# enc_doc = {k: v.cuda() for k, v in enc_doc.items()}
# enc_doc['token_type_ids'][:, :] = 1
# result_doc = self.bert(**enc_doc)
# doc_tok = enc_doc['input_ids'][:, 1:]
# doc_tok[doc_tok < 1000] = -1
# return {
# 'query': result_query[-1][:, 1:],
# 'query_cls': result_query[-1][:, 0],
# 'query_tok': query_tok,
# 'query_len': enc_query['attention_mask'].sum(dim=1) - 2,
# 'doc': result_doc[-1][:, 1:],
# 'doc_cls': result_doc[-1][:, 0],
# 'doc_tok': doc_tok,
# 'doc_len': enc_doc['attention_mask'].sum(dim=1) - 2,
# }
# class CustomBertModelWrapper(BertForPreTraining):
# def __init__(self, config, depth=None):
# config.output_hidden_states = True
# super().__init__(config)
# self.bert = CustomBertModel(config, depth) # replace with custom model
# def forward(self, input_ids, token_type_ids, attention_mask):
# return self.bert(input_ids, token_type_ids, attention_mask)
# @classmethod
# def from_pretrained(cls, *args, **kwargs):
# result = super().from_pretrained(*args, **kwargs)
# if result.bert.depth is not None:
# # limit the depth by cutting out layers it doesn't need to calculate
# result.bert.encoder.layer = result.bert.encoder.layer[:result.bert.depth]
# else:
# result.depth = len(result.bert.encoder.layer)
# return result
# def set_trainable(self, trainable):
# for param in self.parameters():
# param.requires_grad = trainable
# class CustomBertModel(BertPreTrainedModel):
# """
# Based on pytorch_pretrained_bert.BertModel, but with some extra goodies:
# - depth: number of layers to run in BERT, where 0 is the raw embeddings, and -1 is all
# available layers
# """
# def __init__(self, config, depth=None):
# super(CustomBertModel, self).__init__(config)
# self.depth = depth
# self.embeddings = BertEmbeddings(config)
# self.encoder = BertEncoder(config)
# self.cls = BertPreTrainingHeads(config)
# self.apply(self.init_weights)
# def forward(self, input_ids, token_type_ids, attention_mask):
# """
# Based on pytorch_pretrained_bert.BertModel
# """
# embedding_output = self.embeddings(input_ids, token_type_ids)
# if self.depth == 0:
# return [embedding_output]
# return self.forward_from_layer(embedding_output, attention_mask)
# def forward_from_layer(self, embedding_output, attention_mask):
# extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
# extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# head_mask = [None] * self.config.num_hidden_layers
# _, encoded_layers = self.encoder(embedding_output, extended_attention_mask, head_mask)
# return list(encoded_layers)
# def lens2mask(lens, size):
# mask = []
# for l in lens.cpu():
# l = l.item()
# mask.append(([1] * l) + ([0] * (size - l)))
# return torch.tensor(mask, device=lens.device).long()
| abnirml-master | abnirml/scorers/epic.py |
import torch
import torch.nn.functional as F
import transformers
import ir_datasets
from .base import NeuralScorer
_logger = ir_datasets.log.easy()
class Seq2seqT5(NeuralScorer):
"""
From:
> Rodrigo Nogueira, Zhiying Jiang, and Jimmy Lin. Document Ranking with a Pretrained
> Sequence-to-Sequence Model. arxiv 2020.
"""
def __init__(self, model_base='t5-large', tok_base='t5-large', batch_size=8):
super().__init__("VanillaT5", batch_size)
self.model_base = model_base
self.tok_base = tok_base
def delta(self):
return 0.00237513 # 50th percentile delta from msmarco dev
def build(self):
if self.model is None:
with _logger.duration('loading T5 model'):
self.model = Seq2seqT5Model(self.model_base, self.tok_base)
if torch.cuda.is_available():
self.model.cuda()
class Seq2seqT5Model(torch.nn.Module):
def __init__(self, model_base, tok_base):
super().__init__()
self.tokenizer = transformers.T5Tokenizer.from_pretrained(tok_base)
if model_base.startswith('/'):
self.model = transformers.T5ForConditionalGeneration.from_pretrained(None, config=model_base.replace('pytorch_model.bin', 'config.json'), state_dict=torch.load(model_base, map_location=torch.device('cpu') ))
else:
self.model = transformers.T5ForConditionalGeneration.from_pretrained(model_base)
self.REL = self.tokenizer.encode('true')[0]
self.NREL = self.tokenizer.encode('false')[0]
def forward(self, **inputs):
enc = self.tokenizer.batch_encode_plus([f'Query: {q} Document: {d} Relevant:' for q, d in zip(inputs['query_text'], inputs['doc_text'])], return_tensors='pt', pad_to_max_length=True)
enc['decoder_input_ids'] = torch.full(
(len(inputs['query_text']), 1),
self.model.config.decoder_start_token_id,
dtype=torch.long
)
for field in list(enc):
enc[field] = enc[field][:, :512] # crop to 512 (max length)
if torch.cuda.is_available():
enc = {k: v.cuda() for k, v in enc.items()}
result, _, _ = self.model(**enc)
result = result[:, 0, (self.REL, self.NREL)]
return F.log_softmax(result, dim=1)[:, 0]
| abnirml-master | abnirml/scorers/seq2seq_t5.py |
from . import base
from .base import PyTerrierScorer, Scorer, NeuralScorer
from .seq2seq_t5 import Seq2seqT5
from .terrier import TerrierBM25
from .cached import CachedScorer
from .vanilla_bert import VanillaBERT
from .doctttttquery import DocTTTTTQuery
from .conv_knrm import ConvKNRM, KNRM
from .s2 import S2
| abnirml-master | abnirml/scorers/__init__.py |
from pathlib import Path
import gzip
import zlib
import itertools
import hashlib
import pickle
import ir_datasets
_logger = ir_datasets.log.easy()
class SimpleFsCache:
def __init__(self, path, open_fn=open):
self._memcache = {}
if Path(path).exists():
with open_fn(path, 'r+b') as f:
underlying_f = f.fileobj if isinstance(f, gzip.GzipFile) else f
last_valid_pos = None
while True:
try:
pos = underlying_f.tell()
key, value = pickle.load(f), pickle.load(f)
if isinstance(key, list) and isinstance(value, list):
for k, v in zip(key, value):
self._memcache[k] = v
else:
self._memcache[key] = value
last_valid_pos = pos
except EOFError:
break # done reading cache
except zlib.error as e:
# hmmmm, problem decoding.
underlying_f.seek(last_valid_pos)
underlying_f.truncate()
_logger.warn(f'gzip error loading: {repr(e)}. Tuncating to last valid position in file.')
break
_logger.info(f'loaded cache with {len(self._memcache)} items')
self._file = open_fn(path, 'ab')
def __contains__(self, key):
return key in self._memcache
def __getitem__(self, key):
return self._memcache[key]
def __setitem__(self, key, value):
# avoid bloat by re-saving same value (common when things are be batched)
if key not in self._memcache or self._memcache[key] != value:
pickle.dump(key, self._file)
pickle.dump(value, self._file)
self._file.flush()
self._memcache[key] = value
class CachedScorer:
def __init__(self, scorer, cache_path, hashfn=hashlib.md5):
self._scorer = scorer
self._cache_path = Path(cache_path)
self._cache = None
self._hashfn = hashfn
def delta(self):
return self._scorer.delta()
def cache(self):
if self._cache is None:
self._cache = SimpleFsCache(self._cache_path)
return self._cache
def score_iter(self, it):
cache = self.cache()
will_be_cached = set() # items that will enter the cache. This may not seem like it does anything, but due to batched scoring, it does
def cache_lookup_iter(it):
for qtext, dtext in it:
key = (qtext, dtext)
key = self._hashfn(repr(key).encode()).digest()
if key in cache or key in will_be_cached:
yield qtext, dtext, key, False # should not score
else:
will_be_cached.add(key)
yield qtext, dtext, key, True # should score
it = cache_lookup_iter(it)
it1, it2 = itertools.tee(it, 2)
def to_be_scored_iter(it):
for qtext, dtext, _, should_score in it:
if should_score:
yield qtext, dtext
it2 = self._scorer.score_iter(to_be_scored_iter(it2))
for _, _, key, should_score in it1:
if should_score:
score = next(it2, StopIteration)
if score is StopIteration:
break
cache[key] = score
will_be_cached.discard(key) # it's been cached!
yield cache[key]
| abnirml-master | abnirml/scorers/cached.py |
import os
import math
import torch
import torch.nn.functional as F
from transformers import BertTokenizerFast, BertForNextSentencePrediction
from pytorch_transformers.modeling_bert import BertForPreTraining, BertPreTrainedModel, BertEmbeddings, BertEncoder, BertPreTrainingHeads
from .base import NeuralScorer
class VanillaBERT(NeuralScorer):
def __init__(self, model_base='bert-base-uncased', weight_path=None, batch_size=8, outputs=2, delta=0.):
super().__init__("VanillaBERT", batch_size)
self.model_base = model_base
self.weight_path = weight_path
self.outputs = outputs
self._delta = delta
def delta(self):
return self._delta
def build(self):
if self.model is None:
self.model = VanillaBERTModel(self.model_base, self.outputs)
if self.weight_path is not None:
self.model.load_state_dict(torch.load(self.weight_path, map_location=torch.device('cpu')))
if torch.cuda.is_available():
self.model.cuda()
class UntunedBERT(NeuralScorer):
def build(self):
if self.model is None:
self.model = UntunedBERTModel()
self.model.cuda()
class UntunedBERTModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')
self.model = BertForNextSentencePrediction.from_pretrained('bert-base-uncased')
def forward(self, query_text, doc_text):
enc = self.tokenizer.batch_encode_plus(list(zip(query_text, doc_text)), return_tensors='pt', padding=True, truncation=True)
enc = {k: v[:, :500] for k, v in enc.items()}
if torch.cuda.is_available():
enc = {k: v.cuda() for k, v in enc.items()}
result, = self.model(**enc)
return F.log_softmax(result, dim=1)[:, 0]
class VanillaBERTModel(torch.nn.Module):
def __init__(self, bert_base, outputs=2):
super().__init__()
self.encoder = JointBertEncoder(bert_base)
self.ranker = torch.nn.Linear(1024 if 'large' in bert_base else 768, outputs)
def forward(self, **inputs):
pooled_output = self.encoder.enc_query_doc(**inputs)['cls']
result = self.ranker(pooled_output)
return result[:, 0]
class JointBertEncoder(torch.nn.Module):
def __init__(self, bert_base):
super().__init__()
self.tokenizer = BertTokenizerFast.from_pretrained(bert_base)
self.bert = CustomBertModelWrapper.from_pretrained(bert_base)
self.CLS = self.tokenizer.convert_tokens_to_ids('[CLS]')
self.SEP = self.tokenizer.convert_tokens_to_ids('[SEP]')
self.bert.set_trainable(True)
def enc_query_doc(self, **inputs):
enc = self.tokenizer.batch_encode_plus(list(zip(inputs['query_text'], inputs['doc_text'])), return_tensors='pt', padding=True, truncation=True)
enc = {k: v[:, :500] for k, v in enc.items()}
if torch.cuda.is_available():
enc = {k: v.cuda() for k, v in enc.items()}
result = self.bert(**enc)
return {
'cls': result[-1][:, 0]
}
def subbatch(toks, maxlen):
_, DLEN = toks.shape[:2]
SUBBATCH = math.ceil(DLEN / maxlen)
S = math.ceil(DLEN / SUBBATCH) if SUBBATCH > 0 else 0 # minimize the size given the number of subbatch
stack = []
if SUBBATCH == 1:
return toks, SUBBATCH
else:
for s in range(SUBBATCH):
stack.append(toks[:, s*S:(s+1)*S])
if stack[-1].shape[1] != S:
nulls = torch.zeros_like(toks[:, :S - stack[-1].shape[1]])
stack[-1] = torch.cat([stack[-1], nulls], dim=1)
try:
return torch.cat(stack, dim=0), SUBBATCH
except:
import pdb; pdb.set_trace()
pass
def un_subbatch(embed, toks, maxlen):
BATCH, DLEN = toks.shape[:2]
SUBBATCH = math.ceil(DLEN / maxlen)
if SUBBATCH == 1:
return embed
else:
embed_stack = []
for b in range(SUBBATCH):
embed_stack.append(embed[b*BATCH:(b+1)*BATCH])
embed = torch.cat(embed_stack, dim=1)
embed = embed[:, :DLEN]
return embed
def lens2mask(lens, size):
mask = []
for l in lens.cpu():
l = l.item()
mask.append(([1] * l) + ([0] * (size - l)))
return torch.tensor(mask, device=lens.device).long()
class CustomBertModelWrapper(BertForPreTraining):
def __init__(self, config, depth=None):
config.output_hidden_states = True
super().__init__(config)
self.bert = CustomBertModel(config, depth) # replace with custom model
def forward(self, input_ids, token_type_ids, attention_mask):
return self.bert(input_ids, token_type_ids, attention_mask)
@classmethod
def from_pretrained(cls, *args, **kwargs):
result = super().from_pretrained(*args, **kwargs)
if result.bert.depth is not None:
# limit the depth by cutting out layers it doesn't need to calculate
result.bert.encoder.layer = result.bert.encoder.layer[:result.bert.depth]
else:
result.depth = len(result.bert.encoder.layer)
return result
def set_trainable(self, trainable):
for param in self.parameters():
param.requires_grad = trainable
class CustomBertModel(BertPreTrainedModel):
"""
Based on pytorch_pretrained_bert.BertModel, but with some extra goodies:
- depth: number of layers to run in BERT, where 0 is the raw embeddings, and -1 is all
available layers
"""
def __init__(self, config, depth=None):
super(CustomBertModel, self).__init__(config)
self.depth = depth
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.cls = BertPreTrainingHeads(config)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids, attention_mask):
"""
Based on pytorch_pretrained_bert.BertModel
"""
embedding_output = self.embeddings(input_ids, token_type_ids)
if self.depth == 0:
return [embedding_output]
return self.forward_from_layer(embedding_output, attention_mask)
def forward_from_layer(self, embedding_output, attention_mask):
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
head_mask = [None] * self.config.num_hidden_layers
_, encoded_layers = self.encoder(embedding_output, extended_attention_mask, head_mask)
return list(encoded_layers)
| abnirml-master | abnirml/scorers/vanilla_bert.py |
from typing import Dict, List, Tuple
import pickle
import datetime
from collections import Counter
import re
import numpy as np
import pandas as pd
import kenlm
from nlpre import unidecoder
from nltk.util import ngrams
from blingfire import text_to_words
import ir_datasets
from .base import NeuralScorer
class S2(NeuralScorer):
def __init__(self, delta=0.0):
super().__init__(None)
self._delta = delta
def delta(self):
return self._delta
def build(self):
if self.model is None:
self.model = S2Ranker()
class S2Ranker:
def __init__(self):
super().__init__()
#with open("/net/nfs.corp/s2-research/search_eval/search_click_ranker/trained_ranker_model_minimal.pickle", "rb") as model:
with open("/home/sean/data/s2/trained_ranker_model_minimal.pickle", "rb") as model:
self.model = pickle.load(model)
def __call__(self, **inputs):
query_texts, doc_texts = inputs['query_text'], inputs['doc_text']
features = featureize(query_texts, doc_texts)
return self.model.predict(features)
def featureize(query_texts, doc_texts):
feats = []
for query, doc in zip(query_texts, doc_texts):
candidate = {}
candidate["paper_year"] = 2020
candidate["n_citations"] = 0
candidate["n_key_citations"] = 0
candidate["paper_title_cleaned"] = fix_text(doc)
candidate["paper_abstract_cleaned"] = candidate["paper_title_cleaned"]
candidate["paper_venue_cleaned"] = fix_text('arxiv')
candidate["author_name"] = []
feats.append(make_features(query, candidate))
return np.array(feats)
def prepare_candidate(hit: dict) -> Dict:
"""Convert a dict to a format that features.py understands"""
candidate = {}
source = hit["_source"]
author_names = [author["name"] for author in source.get("authors", [])]
year = source.get("year", None)
return candidate
def rerank_and_dedupe_candidates(query_string: str, raw_hits: List[dict], raw_total_hits: int) -> Tuple[int, List]:
"""Method that dedupes and reranks a candidate result set"""
deduped_candidates = dedupe_by_id(raw_hits)
par_candidates = build_par_candidates(query_string, deduped_candidates)
featurized_candidates = np.array(F_POOL.starmap(make_features, par_candidates))
predictions = get_predictions(featurized_candidates)
adjusted_scores = posthoc_score_adjust(predictions, featurized_candidates, query_string)
scores_argsort = np.argsort(adjusted_scores)[::-1]
reranked_candidates = []
for _, index in enumerate(scores_argsort):
reranked_candidates.append(deduped_candidates[index])
# When we dedupe the candidates we want to subtract them from the total
# hit count in the response
adjusted_total_hits = raw_total_hits - (len(raw_hits) - len(deduped_candidates))
return (adjusted_total_hits, reranked_candidates)
now = datetime.datetime.now()
gorc_lm = ir_datasets.util.Lazy(lambda: kenlm.Model('/home/sean/data/s2/gorc_lm.binary'))
authors_lm = ir_datasets.util.Lazy(lambda: kenlm.Model('/home/sean/data/s2/authors_lm.binary'))
venues_lm = ir_datasets.util.Lazy(lambda: kenlm.Model('/home/sean/data/s2/venues_lm.binary'))
def nanwrapper(f, x):
"""numpy freaks out if you pass an empty arrays
to many of its functions (like min or max).
This wrapper just returns a nan in that case.
"""
if len(x) == 0:
return np.nan
else:
return f(x)
def remove_unigrams(s, st):
return " ".join([i for i in s.split(" ") if i not in st])
def make_feature_names_and_constraints():
feats = ["abstract_is_available", "paper_year_is_in_query"]
# for lightgbm, 1 means positively monotonic, -1 means negatively monotonic and 0 means non-constraint
constraints = ["1", "1"]
# features just for title, abstract, venue
for field in ["title", "abstract", "venue"]:
feats.extend(
[
f"{field}_frac_of_query_matched_in_text", # total fraction of the query that was matched in text
f"{field}_mean_of_log_probs", # statistics of the log-probs
f"{field}_sum_of_log_probs*match_lens",
]
)
constraints.extend(
["1", "-1", "-1",]
)
# features for author field only
feats.extend(
[
"sum_matched_authors_len_divided_by_query_len", # total amount of (fractional wrt query) matched authors
"max_matched_authors_len_divided_by_query_len", # largest (fractional wrt query) author match
"author_match_distance_from_ends", # how far the author matches are from the front/back of the author list
]
)
constraints.extend(
["1", "1", "-1",]
)
feats.extend(
[
"paper_oldness",
"paper_n_citations", # no need for log due to decision trees
"paper_n_key_citations",
"paper_n_citations_divided_by_oldness",
]
)
# note: DO NOT change the paper_oldness constraint to -1
# if you do, then seminal papers will stop being on top.
constraints.extend(["0", "1", "1", "1"])
feats.extend(
[
"fraction_of_unquoted_query_matched_across_all_fields",
"sum_log_prob_of_unquoted_unmatched_unigrams",
"fraction_of_quoted_query_matched_across_all_fields",
"sum_log_prob_of_quoted_unmatched_unigrams",
]
)
constraints.extend(["1", "1", "1", "1"])
return np.array(feats), ",".join(constraints)
def make_features(query, result_paper, max_q_len=128, max_field_len=1024):
# the language model should have the beginning and end of sentences turned off
lm_dict = {
"title_abstract": lambda s: gorc_lm().score(s, eos=False, bos=False),
"author": lambda s: authors_lm().score(s, eos=False, bos=False),
"venue": lambda s: venues_lm().score(s, eos=False, bos=False),
}
# apply the language model in the field as necessary
def lm_score(s, which_lm="title"):
if "title" in which_lm or "abstract" in which_lm:
return lm_dict["title_abstract"](s)
elif "venue" in which_lm:
return lm_dict["venue"](s)
elif "author" in which_lm:
return lm_dict["author"](s)
elif "max" in which_lm:
return np.max(
[
lm_dict["title_abstract"](s),
lm_dict["venue"](s),
lm_dict["author"](s),
]
)
try:
year = int(result_paper["paper_year"])
year = np.minimum(now.year, year) # papers can't be from the future.
except:
year = np.nan
if result_paper["author_name"] is None:
authors = []
else:
authors = result_paper["author_name"]
# fix the text and separate out quoted and unquoted
query = str(query)
q = fix_text(query)[:max_q_len]
q_quoted = [i for i in extract_from_between_quotations(q) if len(i) > 0]
q_split_on_quotes = [i.strip() for i in q.split('"') if len(i.strip()) > 0]
q_unquoted = [
i.strip() for i in q_split_on_quotes if i not in q_quoted and len(i.strip()) > 0
]
q_unquoted_split_set = set(" ".join(q_unquoted).split())
q_quoted_split_set = set(" ".join(q_quoted).split())
q_split_set = q_unquoted_split_set | q_quoted_split_set
q_split_set -= STOPWORDS
# we will find out how much of a match we have *across* fields
unquoted_matched_across_fields = []
quoted_matched_across_fields = []
# overall features for the paper and query
q_quoted_len = np.sum([len(i) for i in q_quoted]) # total length of quoted snippets
q_unquoted_len = np.sum(
[len(i) for i in q_unquoted]
) # total length of non-quoted snippets
q_len = q_unquoted_len + q_quoted_len
# if there's no query left at this point, we return NaNs
# which the model natively supports
if q_len == 0:
return [np.nan] * len(FEATURE_NAMES)
# testing whether a year is somewhere in the query and making year-based features
if re.search(
"\d{4}", q
): # if year is in query, the feature is whether the paper year appears in the query
year_feat = str(year) in q_split_set
else: # if year isn't in the query, we don't care about matching
year_feat = np.nan
feats = [
result_paper["paper_abstract_cleaned"] is not None
and len(result_paper["paper_abstract_cleaned"]) > 1,
year_feat, # whether the year appears anywhere in the (split) query
]
# if year is matched, add it to the matched_across_all_fields but remove from query
# so it doesn't get matched in author/title/venue/abstract later
if np.any([str(year) in i for i in q_quoted]):
quoted_matched_across_fields.append(str(year))
if np.any([str(year) in i for i in q_unquoted]):
unquoted_matched_across_fields.append(str(year))
# if year is matched, we don't need to match it again, so removing
if year_feat is True and len(q_split_set) > 1:
q_split_set.remove(str(year))
# later we will filter some features based on nonsensical unigrams in the query
# this is the log probability lower-bound for sensible unigrams
log_prob_nonsense = lm_score("qwertyuiop", "max")
# features title, abstract, venue
title_and_venue_matches = set()
title_and_abstract_matches = set()
for field in [
"paper_title_cleaned",
"paper_abstract_cleaned",
"paper_venue_cleaned",
]:
if result_paper[field] is not None:
text = result_paper[field][:max_field_len]
else:
text = ""
text_len = len(text)
# unquoted matches
(
unquoted_match_spans,
unquoted_match_text,
unquoted_longest_starting_ngram,
) = find_query_ngrams_in_text(q_unquoted, text, quotes=False)
unquoted_matched_across_fields.extend(unquoted_match_text)
unquoted_match_len = len(unquoted_match_spans)
# quoted matches
(
quoted_match_spans,
quoted_match_text,
quoted_longest_starting_ngram,
) = find_query_ngrams_in_text(q_quoted, text, quotes=True)
quoted_matched_across_fields.extend(quoted_match_text)
quoted_match_len = len(quoted_match_text)
# now we (a) combine the quoted and unquoted results
match_spans = unquoted_match_spans + quoted_match_spans
match_text = unquoted_match_text + quoted_match_text
# and (b) take the set of the results
# while excluding sub-ngrams if longer ngrams are found
# e.g. if we already have 'sentiment analysis', then 'sentiment' is excluded
match_spans_set = []
match_text_set = []
for t, s in sorted(zip(match_text, match_spans), key=lambda s: len(s[0]))[::-1]:
if t not in match_text_set and ~np.any([t in i for i in match_text_set]):
match_spans_set.append(s)
match_text_set.append(t)
# remove venue results if they already entirely appeared
if "venue" in field:
text_unigram_len = len(text.split(" "))
match_spans_set_filtered = []
match_text_set_filtered = []
for sp, tx in zip(match_spans_set, match_text_set):
tx_unigrams = set(tx.split(" "))
# already matched all of these unigrams in title or abstract
condition_1 = (
tx_unigrams.intersection(title_and_abstract_matches) == tx_unigrams
)
# and matched too little of the venue text
condition_2 = len(tx_unigrams) / text_unigram_len <= 2 / 3
if not (condition_1 and condition_2):
match_spans_set_filtered.append(sp)
match_text_set_filtered.append(tx)
match_spans_set = match_spans_set_filtered
match_text_set = match_text_set_filtered
# match_text_set but unigrams
matched_text_unigrams = set()
for i in match_text_set:
i_split = i.split()
matched_text_unigrams.update(i_split)
if "title" in field or "venue" in field:
title_and_venue_matches.update(i_split)
if "title" in field or "abstract" in field:
title_and_abstract_matches.update(i_split)
if (
len(match_text_set) > 0 and text_len > 0
): # if any matches and the text has any length
# log probabilities of the scores
if "venue" in field:
lm_probs = [lm_score(match, "venue") for match in match_text_set]
else:
lm_probs = [lm_score(match, "max") for match in match_text_set]
# match character lengths
match_lens = [len(i) for i in match_text_set]
# match word lens
match_word_lens = [len(i.split()) for i in match_text_set]
# we have one feature that takes into account repetition of matches
match_text_counter = Counter(match_text)
match_spans_len_normed = np.log1p(list(match_text_counter.values())).sum()
# remove stopwords from unigrams
matched_text_unigrams -= STOPWORDS
feats.extend(
[
len(q_split_set.intersection(matched_text_unigrams))
/ np.maximum(
len(q_split_set), 1
), # total fraction of the query that was matched in text
np.nanmean(lm_probs), # average log-prob of the matches
np.nansum(
np.array(lm_probs) * np.array(match_word_lens)
), # sum of log-prob of matches times word-lengths
]
)
else:
# if we have no matches, then the features are deterministically 0
feats.extend([0, 0, 0])
# features for author field only
# note: we aren't using citation info
# because we don't know which author we are matching
# in the case of multiple authors with the same name
q_auth = fix_author_text(query)[:max_q_len]
q_quoted_auth = extract_from_between_quotations(q_auth)
q_split_on_quotes = [i.strip() for i in q_auth.split('"') if len(i.strip()) > 0]
q_unquoted_auth = [i for i in q_split_on_quotes if i not in q_quoted_auth]
# remove any unigrams that we already matched in title or venue
# but not abstract since citations are included there
# note: not sure if this make sense for quotes, but keeping it for those now
q_quoted_auth = [remove_unigrams(i, title_and_venue_matches) for i in q_quoted_auth]
q_unquoted_auth = [
remove_unigrams(i, title_and_venue_matches) for i in q_unquoted_auth
]
unquoted_match_lens = [] # normalized author matches
quoted_match_lens = [] # quoted author matches
match_fracs = []
for paper_author in authors:
len_author = len(paper_author)
if len_author > 0:
# higher weight for the last name
paper_author_weights = np.ones(len_author)
len_last_name = len(paper_author.split(" ")[-1])
paper_author_weights[
-len_last_name:
] *= 10 # last name is ten times more important to match
paper_author_weights /= paper_author_weights.sum()
#
for quotes_flag, q_loop in zip(
[False, True], [q_unquoted_auth, q_quoted_auth]
):
matched_spans, match_text, _ = find_query_ngrams_in_text(
q_loop,
paper_author,
quotes=quotes_flag,
len_filter=0,
remove_stopwords=True, # only removes entire matches that are stopwords. too bad for people named 'the' or 'less'
use_word_boundaries=False,
)
if len(matched_spans) > 0:
matched_text_joined = " ".join(match_text)
# edge case: single character matches are not good
if len(matched_text_joined) == 1:
matched_text_joined = ""
weight = np.sum(
[paper_author_weights[i:j].sum() for i, j in matched_spans]
)
match_frac = np.minimum((len(matched_text_joined) / q_len), 1)
match_fracs.append(match_frac)
if quotes_flag:
quoted_match_lens.append(match_frac * weight)
quoted_matched_across_fields.append(matched_text_joined)
else:
unquoted_match_lens.append(match_frac * weight)
unquoted_matched_across_fields.append(matched_text_joined)
else:
if quotes_flag:
quoted_match_lens.append(0)
else:
unquoted_match_lens.append(0)
# since we ran this separately (per author) for quoted and uquoted, we want to avoid potential double counting
match_lens_max = np.maximum(unquoted_match_lens, quoted_match_lens)
nonzero_inds = np.flatnonzero(match_lens_max)
# the closest index to the ends of author lists
if len(nonzero_inds) == 0:
author_ind_feature = np.nan
else:
author_ind_feature = np.minimum(
nonzero_inds[0], len(authors) - 1 - nonzero_inds[-1]
)
feats.extend(
[
np.nansum(match_lens_max), # total amount of (weighted) matched authors
nanwrapper(np.nanmax, match_lens_max), # largest (weighted) author match
author_ind_feature, # penalizing matches that are far away from ends of author list
]
)
# oldness and citations
feats.extend(
[
now.year - year, # oldness (could be nan if year is missing)
result_paper["n_citations"], # no need for log due to decision trees
result_paper["n_key_citations"],
np.nan
if np.isnan(year)
else result_paper["n_citations"] / (now.year - year + 1),
]
)
# special features for how much of the unquoted query was matched/unmatched across all fields
q_unquoted_split_set -= STOPWORDS
if len(q_unquoted_split_set) > 0:
matched_split_set = set()
for i in unquoted_matched_across_fields:
matched_split_set.update(i.split())
# making sure stopwords aren't an issue
matched_split_set -= STOPWORDS
# fraction of the unquery matched
numerator = len(q_unquoted_split_set.intersection(matched_split_set))
feats.append(numerator / np.maximum(len(q_unquoted_split_set), 1))
# the log-prob of the unmatched unquotes
unmatched_unquoted = q_unquoted_split_set - matched_split_set
log_probs_unmatched_unquoted = [lm_score(i, "max") for i in unmatched_unquoted]
feats.append(
np.nansum(
[i for i in log_probs_unmatched_unquoted if i > log_prob_nonsense]
)
)
else:
feats.extend([np.nan, np.nan])
# special features for how much of the quoted query was matched/unmatched across all fields
if len(q_quoted) > 0:
numerator = len(set(" ".join(quoted_matched_across_fields).split()))
feats.append(numerator / len(q_quoted_split_set))
# the log-prob of the unmatched quotes
unmatched_quoted = set(q_quoted) - set(quoted_matched_across_fields)
feats.append(np.nansum([lm_score(i, "max") for i in unmatched_quoted]))
else:
feats.extend([np.nan, np.nan])
return feats
# get globals to use for posthoc_score_adjust
FEATURE_NAMES, FEATURE_CONSTRAINTS = make_feature_names_and_constraints()
feature_names = list(FEATURE_NAMES)
quotes_feat_ind = feature_names.index(
"fraction_of_quoted_query_matched_across_all_fields"
)
year_match_ind = feature_names.index("paper_year_is_in_query")
author_match_ind = feature_names.index("max_matched_authors_len_divided_by_query_len")
matched_all_ind = feature_names.index(
"fraction_of_unquoted_query_matched_across_all_fields"
)
title_match_ind = feature_names.index("title_frac_of_query_matched_in_text")
abstract_match_ind = feature_names.index("abstract_frac_of_query_matched_in_text")
venue_match_ind = feature_names.index("venue_frac_of_query_matched_in_text")
def posthoc_score_adjust(scores, X, query=None):
if query is None:
query_len = 100
else:
query_len = len(str(query).split(" "))
# need to modify scores if there are any quote matches
# this ensures quoted-matching results are on top
quotes_frac_found = X[:, quotes_feat_ind]
has_quotes_to_match = ~np.isnan(quotes_frac_found)
scores[has_quotes_to_match] += 1000000 * quotes_frac_found[has_quotes_to_match]
# if there is a year match, we want to boost that a lot
year_match = np.isclose(X[:, year_match_ind], 1.0)
scores += 100000 * year_match
# full author matches if the query is long enough
if query_len > 1:
full_author_match = np.isclose(X[:, author_match_ind], 1.0)
scores += 100000 * full_author_match
# then those with all ngrams matched anywhere
matched_all_flag = np.isclose(X[:, matched_all_ind], 1.0)
scores += 1000 * matched_all_flag
# need to heavily penalize those with 0 percent ngram match
matched_none_flag = np.isclose(X[:, matched_all_ind], 0.0)
scores -= 1000 * matched_none_flag
# find the most common match appearance pattern and upweight those
if query_len > 1:
if '"' in query:
qualifying_for_cutoff = (
np.isclose(X[:, quotes_feat_ind], 1.0) & matched_all_flag
)
else:
qualifying_for_cutoff = matched_all_flag
scores_argsort = np.argsort(scores)[::-1]
where_zeros = np.where(qualifying_for_cutoff[scores_argsort] == 0)
if len(where_zeros[0]) > 0:
top_cutoff = where_zeros[0][0]
if top_cutoff > 1:
top_inds = scores_argsort[:top_cutoff]
pattern_of_matches = (
1000
* (
(X[top_inds, title_match_ind] > 0)
| (X[top_inds, abstract_match_ind] > 0)
)
+ 100 * (X[top_inds, author_match_ind] > 0)
+ 10 * (X[top_inds, venue_match_ind] > 0)
+ year_match[top_inds]
)
most_common_pattern = Counter(pattern_of_matches).most_common()[0][0]
# print(Counter(pattern_of_matches).most_common())
# don't do this if title/abstract matches are the most common
# because usually the error is usually not irrelevant matches in author/venue
# but usually irrelevant matches in title + abstract
if most_common_pattern != 1000:
scores[top_inds[pattern_of_matches == most_common_pattern]] += 1e8
return scores
unidecode = unidecoder()
regex_translation_table = str.maketrans("", "", "^$.\+*?{}[]()|")
def extract_from_between_quotations(text):
"""Get everything that's in double quotes
"""
results = re.findall('"([^"]*)"', text)
return [i.strip() for i in results]
def remove_single_non_alphanumerics(text):
"""Removes any single characters that are not
alphanumerics and not important punctuation.
"""
text = re.sub(r"\B[^\w\"\s]\B", "", text)
return standardize_whitespace_length(text)
def replace_special_whitespace_chars(text: str) -> str:
"""It's annoying to deal with nonbreaking whitespace chars like u'xa0'
or other whitespace chars. Let's replace all of them with the standard
char before doing any other processing."""
text = re.sub(r"\s", " ", text)
return text
def standardize_whitespace_length(text: str) -> str:
"""Tokenization is problematic when there are extra-long whitespaces.
Make them all a single character in length.
Also remove any whitespaces at beginning/end of a string"""
return re.sub(r" +", " ", text).strip()
def fix_text(s):
"""General purpose text fixing using nlpre package
and then tokenizing with blingfire
"""
if pd.isnull(s):
return ""
s = unidecode(s)
# fix cases when quotes are repeated
s = re.sub('"+', '"', s)
# dashes make quote matching difficult
s = re.sub("-", " ", s)
s = replace_special_whitespace_chars(s)
# tokenize
s = text_to_words(s).lower().strip()
# note: removing single non-alphanumerics
# means that we will match ngrams that are
# usually separate by e.g. commas in the text
# this will improve # of matches but also
# surface false positives
return remove_single_non_alphanumerics(s)
def fix_author_text(s):
"""Author text gets special treatment.
No de-dashing, no tokenization, and
replace periods by white space.
"""
if pd.isnull(s):
return ""
s = unidecode(s)
# fix cases when quotes are repeated
s = re.sub('"+', '"', s)
# no periods as those make author first letter matching hard
s = re.sub(r"\.", " ", s)
s = replace_special_whitespace_chars(s)
s = standardize_whitespace_length(s)
return text_to_words(s).lower().strip()
def find_query_ngrams_in_text(
q,
t,
quotes=False,
len_filter=1,
remove_stopwords=True,
use_word_boundaries=True,
max_ngram_len=7,
):
"""A function to find instances of ngrams of query q
inside text t. Finds all possible ngrams and returns their
character-level span.
Note: because of the greedy match this function can miss
some matches when there's repetition in the query, but
this is likely rare enough that we can ignore it
Arguments:
q {str} -- query
t {str} -- text
Returns:
match_spans -- a list of span tuples
match_text_tokenized -- a list of matched tokens
"""
longest_starting_ngram = ""
if len(q) == 0 or len(t) == 0:
return [], [], longest_starting_ngram
if type(q[0]) is not str or type(t) is not str:
return [], [], longest_starting_ngram
q = [standardize_whitespace_length(i.translate(regex_translation_table)) for i in q]
q = [i for i in q if len(i) > 0]
t = standardize_whitespace_length(t.translate(regex_translation_table))
# if not between quotes, we get all ngrams
if quotes is False:
match_spans = []
match_text_tokenized = []
for q_sub in q:
q_split = q_sub.split()
n_grams = []
longest_ngram = np.minimum(max_ngram_len, len(q_split))
for i in range(int(longest_ngram), 0, -1):
n_grams += [
" ".join(ngram).replace("|", "\|") for ngram in ngrams(q_split, i)
]
for i in n_grams:
if t.startswith(i) and len(i) > len(longest_starting_ngram):
longest_starting_ngram = i
if use_word_boundaries:
matches = list(
re.finditer("|".join(["\\b" + i + "\\b" for i in n_grams]), t)
)
else:
matches = list(re.finditer("|".join(n_grams), t))
match_spans.extend(
[i.span() for i in matches if i.span()[1] - i.span()[0] > len_filter]
)
match_text_tokenized.extend(
[i.group() for i in matches if i.span()[1] - i.span()[0] > len_filter]
)
# now we remove any of the results if the entire matched ngram is just a stopword
if remove_stopwords:
match_spans = [
span
for i, span in enumerate(match_spans)
if match_text_tokenized[i] not in STOPWORDS
]
match_text_tokenized = [
text for text in match_text_tokenized if text not in STOPWORDS
]
# now matches for the between-quotes texts
# we only care about exact matches
else:
match_spans = []
match_text_tokenized = []
for q_sub in q:
if use_word_boundaries:
matches = list(re.finditer("\\b" + q_sub + "\\b", t))
else:
matches = list(re.finditer(q_sub, t))
if t.startswith(q_sub) and len(q_sub) > len(longest_starting_ngram):
longest_starting_ngram = q_sub
match_spans.extend([i.span() for i in matches])
match_text_tokenized.extend([i.group() for i in matches])
return match_spans, match_text_tokenized, longest_starting_ngram
STOPWORDS = set(
[
"i",
"me",
"my",
"myself",
"we",
"our",
"ours",
"ourselves",
"you",
"you're",
"you've",
"you'll",
"you'd",
"your",
"yours",
"yourself",
"yourselves",
"he",
"him",
"his",
"himself",
"she",
"she's",
"her",
"hers",
"herself",
"it",
"it's",
"its",
"itself",
"they",
"them",
"their",
"theirs",
"themselves",
"what",
"which",
"who",
"whom",
"this",
"that",
"that'll",
"these",
"those",
"am",
"is",
"are",
"was",
"were",
"be",
"been",
"being",
"have",
"has",
"had",
"having",
"do",
"does",
"did",
"doing",
"a",
"an",
"the",
"and",
"but",
"if",
"or",
"because",
"as",
"until",
"while",
"of",
"at",
"by",
"for",
"with",
"about",
"against",
"between",
"into",
"through",
"during",
"before",
"after",
"above",
"below",
"to",
"from",
"up",
"down",
"in",
"out",
"on",
"off",
"over",
"under",
"again",
"further",
"then",
"once",
"here",
"there",
"when",
"where",
"why",
"how",
"all",
"any",
"both",
"each",
"few",
"more",
"most",
"other",
"some",
"such",
"no",
"nor",
"not",
"only",
"own",
"same",
"so",
"than",
"too",
"very",
"s",
"t",
"can",
"will",
"just",
"don",
"don't",
"should",
"should've",
"now",
"d",
"ll",
"m",
"o",
"re",
"ve",
"y",
"ain",
"aren",
"aren't",
"couldn",
"couldn't",
"didn",
"didn't",
"doesn",
"doesn't",
"hadn",
"hadn't",
"hasn",
"hasn't",
"haven",
"haven't",
"isn",
"isn't",
"ma",
"mightn",
"mightn't",
"mustn",
"mustn't",
"needn",
"needn't",
"shan",
"shan't",
"shouldn",
"shouldn't",
"wasn",
"wasn't",
"weren",
"weren't",
"won",
"won't",
"wouldn",
"wouldn't",
]
)
| abnirml-master | abnirml/scorers/s2.py |
import torch
import pandas as pd
import pyterrier as pt
class Scorer:
def __init__(self, name):
self.name = name
def delta(self):
return 0.
def score_iter(self, it):
raise NotImplementedError
class NeuralScorer(Scorer):
def __init__(self, name, batch_size=8):
super().__init__(name)
self.batch_size = batch_size
self.model = None
def build(self):
pass
def score(self, query, doc):
return self.batch_score([query], [doc])[0]
def batch_score(self, queries, texts):
self.build()
with torch.no_grad():
return self.model(query_text=queries, doc_text=texts).tolist()
def score_iter(self, it):
batch_size = self.batch_size
q_batch, d_batch = [], []
for query, doc in it:
q_batch.append(query)
d_batch.append(doc)
if len(q_batch) >= batch_size:
scores = self.batch_score(q_batch, d_batch)
yield from scores
q_batch, d_batch = [], []
if q_batch:
scores = self.batch_score(q_batch, d_batch)
yield from scores
class PyTerrierScorer(Scorer):
def __init__(self, name, transformerfn=None, delta=0., batch_size=None):
if transformerfn is None:
name, transformerfn = "", name
super().__init__(name)
self.transformerfn = transformerfn
self.transformer = None
if batch_size is None and hasattr(transformerfn, 'batch_size'):
batch_size = transformerfn.batch_size
else:
batch_size = 64
self.batch_size = batch_size
self._delta = delta
def delta(self):
return self._delta
def score_iter(self, it):
if not pt.started():
pt.init()
batch_size = self.batch_size
q_batch, d_batch = [], []
for query, doc in it:
q_batch.append(query)
d_batch.append(doc)
if len(q_batch) >= batch_size:
scores = self.batch_score(q_batch, d_batch)
yield from scores
q_batch, d_batch = [], []
if q_batch:
scores = self.batch_score(q_batch, d_batch)
yield from scores
def batch_score(self, queries, texts):
ids = [str(i) for i in range(len(queries))]
df = pd.DataFrame({'qid': ids, 'docno': ids, 'query': queries, 'text': texts})
if self.transformer is None:
if hasattr(self.transformerfn, 'transform'):
self.transformer = self.transformerfn
else:
self.transformer = self.transformerfn()
return self.transformer(df)['score'].tolist()
| abnirml-master | abnirml/scorers/base.py |
import json
import itertools
import hashlib
import scipy
import ir_datasets
_logger = ir_datasets.log.easy()
class AxiomEvaluator:
def __init__(self, scorer, axiom, hash_fn=hashlib.md5, epsilon=1e-6):
self.scorer = scorer
self.axiom = axiom
self.hash_fn = hash_fn
self.epsilon = epsilon
def eval(self):
records, it2 = itertools.tee(self.axiom.axiom_pairs_iter(), 2)
def flat_records_iter(it):
for records in it:
for record in records:
yield record['query_text'], record['doc_text']
score_iter = self.scorer.score_iter(flat_records_iter(it2))
axiom_scores = []
axiom_hash = self.hash_fn()
pos, neg, neu = 0, 0, 0
for i, record in enumerate(_logger.pbar(records, desc='axiom pairs')):
assert len(record['samples']) == 2
# Keep a hash of the query & doc texts used for this axiom. This is used for verifying
# that other runs of the axiom yield the same records.
js = [[i, r['query_text'], r['doc_text']] for r in record['samples']]
axiom_hash.update(json.dumps(js).encode())
scores = [s for r, s in zip(record, score_iter)]
axiom_score = scores[0] - scores[1]
axiom_scores.append(axiom_score)
delta = self.scorer.delta(record['context'])
if axiom_score > delta:
pos += 1
elif axiom_score < -delta:
neg += 1
else:
neu += 1
return {
'pos': pos,
'neg': neg,
'neu': neu,
'score': (pos - neg) / len(axiom_scores) if len(axiom_scores) > 0 else 0,
'count': len(axiom_scores),
'mean_diff': sum(axiom_scores) / len(axiom_scores) if len(axiom_scores) > 0 else 0,
'median_diff': sorted(axiom_scores)[len(axiom_scores) // 2] if len(axiom_scores) > 0 else 0,
'p_val': scipy.stats.ttest_1samp(axiom_scores, 0)[1] if len(axiom_scores) > 0 else 0,
'hash': axiom_hash.hexdigest(),
}
| abnirml-master | abnirml/eval/axiom_eval.py |
from .axiom_eval import AxiomEvaluator
from .probe_experiment import ProbeExperiment, ProbeDist, rerank, topk_diffs, CutoffCompare
| abnirml-master | abnirml/eval/__init__.py |
import json
import itertools
import hashlib
import scipy
from collections import Counter
import ir_datasets
import abnirml
import seaborn as sns
_logger = ir_datasets.log.easy()
def _asymmetric_probe_scorer(score_a, score_b):
return score_a - score_b
def _symmetric_probe_scorer(score_a, score_b):
return abs(score_a - score_b)
def _prober(scorer, probe):
if hasattr(scorer, 'transform') and not hasattr(scorer, 'score_iter'):
scorer = abnirml.PyTerrierScorer(scorer) # allow pyterrier transformers
probe_scorer = {
'asymmetric': _asymmetric_probe_scorer,
'symmetric': _symmetric_probe_scorer,
}[probe.pair_symmetry()]
records, it2 = itertools.tee(probe.pairs_iter(), 2)
def flat_records_iter(it):
for record in it:
assert len(record) == 2
for rec in record:
yield rec['query_text'], rec['doc_text']
score_iter = scorer.score_iter(flat_records_iter(it2))
probe_scores = []
probe_hash = hashlib.md5()
for i, record in enumerate(_logger.pbar(records, desc='axiom pairs')):
# Keep a hash of the query & doc texts used for this axiom. This is used for verifying
# that other runs of the axiom yield the exact same records.
probe_hash.update(json.dumps([[i, r['query_text'], r['doc_text']] for r in record]).encode())
scores = [s for r, s in zip(record, score_iter)]
probe_score = probe_scorer(scores[0], scores[1])
probe_scores.append(probe_score)
return {
'scores': probe_scores,
'hash': probe_hash.hexdigest(),
}
def ProbeExperiment(scorer, probe, delta=0.):
probe_info = _prober(scorer, probe)
probe_scores = probe_info['scores']
pos = sum(1 for s in probe_scores if s > delta)
neg = sum(1 for s in probe_scores if s < -delta)
neu = sum(1 for s in probe_scores if -delta <= s <= delta)
return {
'pos': pos,
'neg': neg,
'neu': neu,
'score': (pos - neg) / len(probe_scores) if len(probe_scores) > 0 else 0,
'count': len(probe_scores),
'mean_diff': sum(probe_scores) / len(probe_scores) if len(probe_scores) > 0 else 0,
'median_diff': sorted(probe_scores)[len(probe_scores) // 2] if len(probe_scores) > 0 else 0,
'p_val': scipy.stats.ttest_1samp(probe_scores, 0)[1] if len(probe_scores) > 0 else 0,
'hash': probe_info['hash'],
}
def ProbeDist(scorer, probes):
all_scores = []
for probe in probes:
probe_info = _prober(scorer, probe)
sns.kdeplot(probe_info['scores'])
all_scores.append(probe_info['scores'])
return scipy.stats.mannwhitneyu(all_scores[0], all_scores[1])
def rerank(scorer, dataset):
docs = dataset.docs_store()
queries = {q.query_id: q for q in dataset.queries_iter()}
records = []
inputs = []
for s in dataset.scoreddocs_iter():
records.append(s)
inputs.append((queries[s.query_id].text, docs.get(s.doc_id).text))
score_iter = scorer.score_iter(inputs)
results = {}
for record, score in zip(_logger.pbar(records, desc='calculating scores'), score_iter):
if record.query_id not in results:
results[record.query_id] = {}
results[record.query_id][record.doc_id] = score
return results
def topk_diffs(scorer, dataset, k=10):
diffs = []
for scoreddocs in rerank(scorer, dataset).values():
scoreddocs = Counter(scoreddocs)
topk = list(scoreddocs.most_common(k))
for (_, s0), (_, s1) in zip(topk, topk[1:]):
diffs.append(s0 - s1)
return diffs
def CutoffCompare(probe, scorers_cutoffs):
mn, mx = 1, -1
for n, scorer, cutoffs in scorers_cutoffs:
probe_info = abnirml.eval.probe_experiment._prober(scorer, probe)
x, y = [], []
for i in range(100):
if i == 99:
cutoff = cutoffs[-1]
if i == 0:
cutoff = 0
else:
cutoff = cutoffs[math.floor(len(cutoffs) * i / 100)]
pos = sum(1 for x in probe_info['scores'] if x > cutoff)
neg = sum(1 for x in probe_info['scores'] if x < -cutoff)
x.append(100 - i)
y.append((pos - neg) / len(probe_info['scores']))
plt.plot(x, y, label=n)
mn = min(mn, *y)
mx = max(mx, *y)
mx = 0.1 * math.ceil(mx/0.1)
mn = 0.1 * math.floor(mn/0.1)
plt.ylim(mn, mx)
plt.axhline(0, c='#444444', ls=':')
plt.axvline(50, c='#444444', ls=':')
plt.xlabel('Percentile')
plt.ylabel('Score')
plt.legend()
| abnirml-master | abnirml/eval/probe_experiment.py |
import openai
import argparse
import json
import csv
import random
import os
from annoy import AnnoyIndex
from sklearn.metrics import accuracy_score
from utils import chunks, read_lines
from baselines.utils import gpt3_completion, write_items
from sentence_transformers import SentenceTransformer
from sklearn.neighbors import KNeighborsClassifier
import tqdm
def record_to_gpt3_instance(record, is_test=False):
context = record['context'].strip()
if not (context.endswith("?") or context.endswith(".")):
context = context + "."
if context.endswith("."):
context = "Is it true that {}?".format(context.lower()[:-1])
situation_text = "Question: {}".format(context)
if is_test:
return '\n'.join(
[situation_text,
"Answer:"
]
)
else:
return '\n'.join(
[
situation_text,
"Answer: {}.".format(record['answer']),
"##"
]
)
DAVINCI_INSTRUCTION = "Given a question, classify it into one of two categories: Yes or No.\n\n"
def main(train_file, test_file, output_file, gpt3_version, few_shot_strategy, num_random_tries,
annoy_index_file, num_generations, num_few_shot_examples_per_group, do_eval, sample, randomize_prompt_examples, seed):
random.seed(seed)
train_records = [json.loads(r) for r in read_lines(train_file)]
test_records = [json.loads(r) for r in read_lines(test_file)]
sampled_test_set = random.sample(test_records, sample)
if few_shot_strategy.startswith("random"):
if few_shot_strategy == "random":
few_shot_records = random.sample(train_records, num_few_shot_examples_per_group)
else:
grouped_by_field = {}
for _train_record in train_records:
if few_shot_strategy == "random_per_relation":
group_key = _train_record['metadata']['relational_prompt']
elif few_shot_strategy == "random_per_label":
group_key = _train_record['answer']
elif few_shot_strategy == "random_pair_for_target_relation":
group_key = _train_record['metadata']['relational_prompt'] \
+ "~" \
+ _train_record['answer']
else:
raise Exception("Incorrect strategy")
if group_key not in grouped_by_field:
grouped_by_field[group_key] = []
grouped_by_field[group_key].append(_train_record)
few_shot_records = []
for k, v in grouped_by_field.items():
few_shot_records.extend(
random.sample(v, num_few_shot_examples_per_group))
random.shuffle(few_shot_records)
few_shot_text_instances = [record_to_gpt3_instance(_inst) for _inst in few_shot_records]
few_shot_examples_for_gpt3 = '\n'.join(few_shot_text_instances)
for idx, _tr in enumerate(tqdm.tqdm(sampled_test_set)):
if randomize_prompt_examples:
random.shuffle(few_shot_records)
few_shot_text_instances = [record_to_gpt3_instance(_inst) for _inst in few_shot_records]
few_shot_examples_for_gpt3 = '\n'.join(few_shot_text_instances)
if few_shot_strategy == "random_pair_for_target_relation":
## This is a bit hacky. It overwrites the previously populated prompt
target_relation_positive_examples = random.sample(
grouped_by_field[_tr['metadata']['relational_prompt'] + "~" + "yes"],
num_few_shot_examples_per_group
)
target_relation_negative_examples = random.sample(
grouped_by_field[_tr['metadata']['relational_prompt'] + "~" + "no"],
num_few_shot_examples_per_group
)
few_shot_text_instances = \
[record_to_gpt3_instance(_inst) for _inst in
target_relation_positive_examples + target_relation_negative_examples
]
few_shot_examples_for_gpt3 = '\n'.join(few_shot_text_instances)
# fixed_prompt = "Question: Do authors ever think of a title after they finish writing their books?\nAnswer: yes.\n##\nQuestion: A play performed in a school is smaller than a play performed on Broadway\nAnswer: yes.\n##\nQuestion: Mexico has Mexican food.\nAnswer: yes.\n##\nQuestion: Are there usually only a few people who teach in a school, relative to the number of students?\nAnswer: yes.\n##\nQuestion: Martin Luther King Jr. was a catholic priest known for his involvement in civil rights movement\nAnswer: no.\n##\nQuestion: frank is only a male name\nAnswer: no.\n##\nQuestion: japan can be the host of the summer Olympics this year\nAnswer: yes.\n##\nQuestion: Colorado has four vowels.\nAnswer: yes.\n##\nQuestion: Before justice is served, a trial has to be done. There is no other way to get justice\nAnswer: no.\n##\nQuestion: Almost all lakes are freshwater, other than a few saltwater ones, like the Great Salt Lake in Utah.\nAnswer: yes.\n##"
gpt3_prompt = few_shot_examples_for_gpt3 + "\n" + record_to_gpt3_instance(_tr,
is_test=True)
if gpt3_version == "davinci-instruct-beta":
gpt3_prompt = DAVINCI_INSTRUCTION + gpt3_prompt
if idx < 5:
print("******* Example {}".format(idx))
print("Prompt:")
print(gpt3_prompt)
print("\n\n")
gpt3_predictions = [g['text'].strip() for g in
gpt3_completion(gpt3_prompt, gpt3_version, max_tokens=1,
temperature=0.0, logprobs=1, echo=False,
num_outputs=num_generations, top_p=1,
best_of=num_generations)['choices']]
_tr["gpt3_prediction"] = gpt3_predictions
_tr['gpt3_prompt'] = gpt3_prompt
_tr['engine'] = gpt3_version
write_items([json.dumps(r) for r in sampled_test_set], output_file)
elif few_shot_strategy == "knn":
annoy_index = AnnoyIndex(768, "dot")
model = SentenceTransformer('bert-base-nli-mean-tokens')
if annoy_index_file is not None and os.path.exists(annoy_index_file):
annoy_index.load(annoy_index_file)
else:
all_embeddings = []
for batch in tqdm.tqdm(chunks(train_records, 8)):
sentences = [b['context'] for b in batch]
sentence_embeddings = model.encode(sentences)
all_embeddings.extend(sentence_embeddings)
print(len(all_embeddings))
for idx, emb in enumerate(all_embeddings):
annoy_index.add_item(idx, emb)
annoy_index.build(10)
if annoy_index_file is not None:
annoy_index.save(annoy_index_file)
for idx, tr in enumerate(tqdm.tqdm(sampled_test_set)):
test_emb = model.encode([tr['context']])[0]
nns = annoy_index.get_nns_by_vector(test_emb, num_few_shot_examples_per_group)
few_shot_records = [train_records[nn] for nn in nns]
few_shot_records.reverse()
few_shot_text_instances = [record_to_gpt3_instance(_inst) for _inst in few_shot_records]
few_shot_examples_for_gpt3 = '\n'.join(few_shot_text_instances)
gpt3_prompt = few_shot_examples_for_gpt3 + "\n" + record_to_gpt3_instance(tr,
is_test=True)
if gpt3_version == "davinci-instruct-beta":
gpt3_prompt = DAVINCI_INSTRUCTION + gpt3_prompt
if idx < 5:
print("******* Example {}".format(idx))
print("Example: {}".format(idx))
print("Prompt:")
print(gpt3_prompt)
print("\n\n\n")
gpt3_predictions = [g['text'].strip() for g in
gpt3_completion(gpt3_prompt, gpt3_version, max_tokens=10,
temperature=0.0, logprobs=1, echo=False,
num_outputs=num_generations, top_p=1,
best_of=num_generations)['choices']]
tr["gpt3_prediction"] = gpt3_predictions
tr['gpt3_prompt'] = gpt3_prompt
tr['engine'] = gpt3_version
write_items([json.dumps(r) for r in sampled_test_set], output_file)
if do_eval:
acc = accuracy_score([r['answer'] for r in sampled_test_set],
[r['gpt3_prediction'][0] for r in sampled_test_set]
)
print("Accuracy = {}".format(acc))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Script to generate sequence probs as assigned by gpt2')
# Required Parameters
parser.add_argument('--train_file', type=str, help='Location of test file', default=None)
parser.add_argument('--test_file', type=str, help='Location of test file', default=None)
parser.add_argument('--output_file', type=str, help='File to output', default=None)
parser.add_argument('--annoy_index_file', type=str, help='ANN Index File', default=None)
parser.add_argument('--gpt3_version', type=str, help='GPT2 XL or L', default="davinci")
parser.add_argument('--few_shot_strategy', type=str, help='Strategy for few shot learning',
default="random_per_label")
parser.add_argument('--num_random_tries', type=int, help='Strategy for few shot learning',
default=1)
parser.add_argument('--num_generations', type=int, help='No. of gpt3 generations', default=1)
parser.add_argument('--num_few_shot_examples_per_group', type=int,
help='No. of few shot examples in prompt', default=1)
parser.add_argument('--sample', type=int, help='No. of test samples to try on', default=10)
parser.add_argument('--do_eval', action="store_true", dest="do_eval")
parser.add_argument('--no_eval', action="store_false", dest="do_eval")
parser.add_argument('--randomize_prompt_examples', action="store_true", dest="randomize_prompt_examples")
parser.add_argument('--fixed_prompt_examples', action="store_false", dest="randomize_prompt_examples")
parser.add_argument('--seed', type=int, default=31555)
args = parser.parse_args()
print('====Input Arguments====')
print(json.dumps(vars(args), indent=2, sort_keys=True))
print("=======================")
main(
args.train_file,
args.test_file,
args.output_file,
args.gpt3_version,
args.few_shot_strategy,
args.num_random_tries,
args.annoy_index_file,
args.num_generations,
args.num_few_shot_examples_per_group,
args.do_eval,
args.sample,
args.randomize_prompt_examples,
args.seed
)
| csqa2-master | baselines/gpt3.py |
csqa2-master | baselines/__init__.py |
|
import openai
import os
import time
import sys
from typing import List
openai.api_key = os.environ["OPENAI_API_KEY"]
def gpt3_completion(prompt, model_name, max_tokens, temperature, logprobs, echo, num_outputs, top_p, best_of):
# call GPT-3 API until result is provided and then return it
response = None
received = False
# prevent over 600 requests per minute
while not received:
try:
response = openai.Completion.create(
engine=model_name,
prompt=prompt,
max_tokens=max_tokens,
temperature=temperature,
logprobs=logprobs,
echo=echo,
stop=[".", "\n"],
n=num_outputs,
top_p=top_p,
best_of=best_of)
received = True
except:
error = sys.exc_info()[0]
if error == openai.error.InvalidRequestError: # something is wrong: e.g. prompt too long
print(f"InvalidRequestError\nPrompt passed in:\n\n{prompt}\n\n")
assert False
print("API error:", error)
time.sleep(0.15)
return response
def write_items(items: List[str], output_file):
with open(output_file, 'w') as f:
for concept in items:
f.write(concept + "\n")
f.close()
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def read_lines(input_file: str) -> List[str]:
lines = []
with open(input_file, "rb") as f:
for l in f:
lines.append(l.decode().strip())
return lines | csqa2-master | baselines/utils.py |
import argparse
import os
import sys
import logging
from typing import Tuple
from gevent.pywsgi import WSGIServer
from flask import Flask, Response, request, jsonify
from app.ui import create_ui
from app.utils import StackdriverJsonFormatter
from werkzeug.middleware.proxy_fix import ProxyFix
from datetime import date
def start():
"""
Starts up a HTTP server attached to the provider port, and optionally
in development mode (which is ideal for local development but unideal
for production use).
"""
parser = argparse.ArgumentParser(description='Starts your application\'s HTTP server.')
parser.add_argument(
'--port',
'-p',
help='The port to listen on',
default=8000
)
parser.add_argument(
'--prod',
help=
'If specified the server is started in production mode, where ' +
'the server isn\'t restarted as changes to the source code occur.',
action='store_true'
)
args = parser.parse_args()
# Locally we don't specify any handlers, which causes `basicConfig` to set
# up one for us that writes human readable messages.
handlers = None
# If we're in production we setup a handler that writes JSON log messages
# in a format that Google likes.
if args.prod:
json_handler = logging.StreamHandler()
json_handler.setFormatter(StackdriverJsonFormatter())
handlers = [ json_handler ]
logging.basicConfig(
level=os.environ.get('LOG_LEVEL', default=logging.INFO),
handlers=handlers
)
logger = logging.getLogger()
logger.debug("AHOY! Let's get this boat out to water...")
app = Flask("app")
@app.template_filter()
def formatted_date(d: date) -> str:
return d.strftime("%b %-d, %Y")
# Bind the API functionality to our application. You can add additional
# API endpoints by editing api.py.
logger.debug("Starting: init API...")
app.register_blueprint(create_ui(), url_prefix='/')
logger.debug("Complete: init API...")
# In production we use a HTTP server appropriate for production.
if args.prod:
logger.debug("Starting: gevent.WSGIServer...")
# There are two proxies -- the one that's run as a sibling of this process, and
# the Ingress controller that runs on the cluster.
# See: https://skiff.allenai.org/templates.html
num_proxies = 2
proxied_app = ProxyFix(app, x_for=num_proxies, x_proto=num_proxies, x_host=num_proxies,
x_port=num_proxies)
http_server = WSGIServer(('0.0.0.0', args.port), proxied_app, log=logger,
error_log=logger)
app.logger.info(f'Server listening at http://0.0.0.0:{args.port}')
http_server.serve_forever()
else:
logger.debug("Starting: Flask development server...")
num_proxies = 1
proxied_app = ProxyFix(app, x_for=num_proxies, x_proto=num_proxies, x_host=num_proxies,
x_port=num_proxies)
app.run(host='0.0.0.0', port=args.port)
if __name__ == '__main__':
start()
| allennlp-gallery-main | app/start.py |
from flask import Blueprint, render_template
from .projects import load_all_projects, Project
from typing import Optional
from werkzeug.exceptions import NotFound
from markdown import markdown
def create_ui() -> Blueprint:
app = Blueprint("app", __name__)
@app.app_template_filter()
def md_to_html(md: str) -> str:
return markdown(md, extensions=['tables'], output_format='html')
@app.app_template_filter()
def newlines_to_spaces(s: str) -> str:
return s.replace("\n", " ")
default_title = "AllenNLP Project Gallery"
default_description = (
"A list of publicly available, state-of-the-art paper implementations built with "
"AllenNLP. The featured projects are developed both by the Allen Institute for AI and "
"the larger community."
)
@app.route("/")
def index():
projects = sorted(load_all_projects(), key=lambda p : p.config.submission_date)
projects.reverse()
return render_template("index.html", projects=projects, title=default_title,
description=default_description)
@app.route("/add-project")
def add_project():
title = f"Add Your Project — {default_title}"
description = (
"We welcome and encourage submissions from the community. Accepted submissions "
"are displayed in the Gallery and available for public use."
)
return render_template("add_project.html", title=title, description=description)
@app.route("/project/<string:project_id>")
def project_details(project_id: str):
projects = load_all_projects()
project: Optional[Project] = None
for m in projects:
if m.id == project_id:
project = m
break
if project is None:
raise NotFound(f"No project with id {project_id}.")
title = f"{project.config.title} — {default_title}"
description = project.description
return render_template("project_details.html", project=project, title=title,
description=description)
return app
| allennlp-gallery-main | app/app/ui.py |
allennlp-gallery-main | app/app/__init__.py |
|
from pythonjsonlogger import jsonlogger
class StackdriverJsonFormatter(jsonlogger.JsonFormatter):
"""
Custom log JSON log formatter that adds the severity member, allowing
end users to filter logs by the level of the log message emitted.
TODO: Parse request logs and add fields for each request element (user-agent
processing time, etc)
TODO:Add a timestamp that's used in place of Stackdriver's records (which
reflect the time the log was written to Stackdriver, I think).
"""
def add_fields(self, log_record, record, message_dict):
super(StackdriverJsonFormatter, self).add_fields(log_record, record,
message_dict)
log_record['severity'] = record.levelname
| allennlp-gallery-main | app/app/utils.py |
import json
from dataclasses import dataclass, field
from typing import Optional, List, Set
from datetime import date, datetime
from pathlib import Path
from os import listdir
from logging import getLogger
@dataclass(frozen=True)
class Author:
name: str
affiliation: Optional[str] = None
email: Optional[str] = None
photo_url: Optional[str] = None
twitter: Optional[str] = None
s2_author_page: Optional[str] = None
google_scholar_author_page: Optional[str] = None
def initials(self) -> str:
parts = [n[0] for n in self.name.split(" ")]
if len(parts) < 2:
return parts[0]
return "".join(parts[0] + parts[-1])
@staticmethod
def from_dict(obj: dict) -> "Author":
return Author(
obj["name"],
obj.get("affiliation"),
obj.get("email"),
obj.get("photo_url"),
obj.get("twitter"),
obj.get("s2_author_page"),
obj.get("google_scholar_author_page"),
)
@dataclass(frozen=True)
class Dataset:
name: str
link: str
@staticmethod
def from_dict(obj: dict) -> "Dataset":
return Dataset(obj["name"], obj["link"])
@dataclass(frozen=True)
class Paper:
title: str
link: str
@staticmethod
def from_dict(obj: dict) -> "Paper":
return Paper(obj["title"], obj["link"])
@dataclass(frozen=True)
class ProjectConfig:
title: str
authors: List[Author]
submission_date: date
github_link: str
allennlp_version: str
datasets: List[Dataset] = field(default_factory=list)
tags: List[str] = field(default_factory=list)
supported_languages: List[str] = field(default_factory=list)
papers: List[Paper] = field(default_factory=list)
demo_link: Optional[str] = None
def affiliations(self) -> Set[str]:
return {
author.affiliation
for author in self.authors
if author.affiliation is not None
}
@staticmethod
def from_dict(obj: dict) -> "ProjectConfig":
return ProjectConfig(
obj["title"],
[Author.from_dict(a) for a in obj["authors"]],
datetime.strptime(obj["submission_date"], "%Y-%m-%d").date(),
obj["github_link"],
obj["allennlp_version"],
[Dataset.from_dict(d) for d in obj.get("datasets", [])],
obj.get("tags", []),
obj.get("supported_languages", []),
[Paper.from_dict(p) for p in obj.get("papers", [])],
obj.get("demo_link"),
)
@dataclass(frozen=True)
class Project:
id: str
config: ProjectConfig
description: str
@staticmethod
def from_dict(id: str, config: dict, description: str) -> "Project":
return Project(id, ProjectConfig.from_dict(config), description)
def load_all_projects() -> List[Project]:
logger = getLogger(__name__)
projects = []
projects_dir = Path(Path(__file__) / ".." / "projects").resolve()
for mid in listdir(projects_dir):
path = projects_dir / mid
if not path.is_dir:
continue
try:
logger.info(f"Loading config from {path}")
with open(path / "config.json") as cf:
with open(path / "description.md") as df:
projects.append(Project.from_dict(mid, json.load(cf), df.read()))
except FileNotFoundError as err:
logger.error(f"Project '{mid}' Skipped: {err}")
continue
return projects
if __name__ == "__main__":
from logging import basicConfig, INFO
basicConfig(level=INFO)
load_all_projects()
| allennlp-gallery-main | app/app/projects.py |
# -*- coding: utf-8 -*-
import requests
import time
import math
import signal
def is_ok(url: str) -> bool:
"""
Returns True if the provided URL responds with a 2XX when fetched via
a HTTP GET request.
"""
try:
resp = requests.get(url)
except:
return False
return True if math.floor(resp.status_code / 100) == 2 else False
def scan():
"""
Broadcasts the availability of the proxy's HTTP server once both the
API and UI are ready for traffic.
This script exists solely to ease confusion locally, as both Flask and
the HTTP server bundled with `create-react-app` output logs telling the
user about the ports they're bound to (even though they're inaccessible).
"""
print("")
print("⚓️ Ahoy!")
print("")
print(
"Your application is starting and will be available at " +
"http://localhost:8080 when it's ready."
)
print("")
# If someone tries to cancel the `docker-compose up` invocation, docker
# will send a SIGTERM to the program. We need to handle this and set a
# value that allows the loop to be broken.
term = False
def handle_interrupt(signal_number, stack_frame):
global term
term = True
signal.signal(signal.SIGTERM, handle_interrupt)
last_check = time.perf_counter()
is_app_live = False
while (is_app_live != True):
if term is True:
break
# We don't use `time.sleep()`, as that'd prevent us from being able
# to break the loop quickly in the event of a SIGTERM.
now = time.perf_counter()
if (now - last_check >= 5):
last_check = now
if not is_app_live:
is_app_live = is_ok("http://app:8000")
if is_app_live:
print("")
print("✨ Your local environment is ready:")
print("")
print(" http://localhost:8080")
print("")
print("⛵️ Smooth sailing!")
print("")
if __name__ == "__main__":
scan()
| allennlp-gallery-main | sonar/ping.py |
'''
This script is the source code for a project that
Field Cady and Oren Etzioni are working on.
'''
import pandas as pd
import matplotlib
from matplotlib import pyplot as plt
from scipy import stats
import pys2 # library internal to Allen Institute
# The field we use to tell rank paper importance
CITATION_COUNT_FIELD = "estimated citation count"
# Pull down table of AI papers from Redshift, and add on columns for
# the final US/China heuristics and the cutoffs for levels of how much
# a paper is cited.
df = pys2._evaluate_redshift_query('select * from ai_papers_any_author_table where yr<2019 and yr>1980')
df["citation_count"] = df[CITATION_COUNT_FIELD].astype(int)
df['china'] = df.dotcn.astype(bool) | df.dothk.astype(bool) \
| df.china_name.astype(bool) | df.china_language.astype(bool) \
| df.china_city.astype(bool)
df['us'] = df.dotedu.astype(bool) | df.dotedu.astype(bool)
df['top_half_cutoff'] = df.groupby('yr').citation_count.transform(lambda x: (x-x)+x.quantile(0.5))
df['top_tenth_cutoff'] = df.groupby('yr').citation_count.transform(lambda x: (x-x)+x.quantile(0.9))
df['top_twentieth_cutoff'] = df.groupby('yr').citation_count.transform(lambda x: (x-x)+x.quantile(0.95))
df['top_hundredth_cutoff'] = df.groupby('yr').citation_count.transform(lambda x: (x-x)+x.quantile(0.99))
df['top_halfpercent_cutoff'] = df.groupby('yr').citation_count.transform(lambda x: (x-x)+x.quantile(0.995))
#
# Plot all figures
#
# Number of China papers vs their market share in the bottom 50% and top 10%
plt.close()
sums = df.groupby('yr').china.sum()
ax1 = sums.plot(label="# Papers", color='b')
ax1.set_xlabel(''); ax1.set_ylabel('# Papers')
ax2 = ax1.twinx()
df[df.citation_count>df.top_tenth_cutoff].groupby('yr').china.mean().plot(
label='Top 10%', ax=ax2, color='g', style='--')
df[df.citation_count<=df.top_half_cutoff].groupby('yr').china.mean().plot(
label='Bottom Half', ax=ax2, color='r', style='--')
ax2.set_xlabel(''); ax2.set_ylabel('Market Shares')
plt.title("Chinas Drop was in Bad Papers")
plt.minorticks_on()
plt.legend()
plt.savefig('chinas_drop_vs_market_share.jpg')
# Raw number of papers
plt.close()
df.groupby('yr').china.sum().plot(label='China')
df.groupby('yr').us.sum().plot(label='US')
plt.title('All AI Papers')
plt.legend(); plt.xlabel(''); plt.ylabel('# Papers')
plt.minorticks_on()
plt.savefig('all_papers.jpg')
# Market share for different levels of citation
cutoffcol_title_pairs = [
('top_half_cutoff', 'Top 50% of AI Papers'),
('top_twentieth_cutoff', 'Share of Papers in the Top 10% '),
('top_halfpercent_cutoff', 'Share of Papers in the Top 1%')
]
for cutoffcol, title in cutoffcol_title_pairs:
print(title)
# Create time series for each country
china_ts = df[df.citation_count>df[cutoffcol]].groupby('yr').china.mean()
us_ts = df[df.citation_count>df[cutoffcol]].groupby('yr').us.mean()
# fit lines to last 4 years
china_slope, china_intercept, r_value, p_value, std_err = stats.linregress([2015, 2016, 2017, 2018],china_ts[-4:])
us_slope, us_intercept, r_value, p_value, std_err = stats.linregress([2015, 2016, 2017, 2018],us_ts[-4:])
intercept_year = (china_intercept-us_intercept) / (us_slope-china_slope)
# Compute interpolations to plot
fit_years = pd.Series(range(2014, 2026), index=range(2014, 2026))
china_fit = fit_years*china_slope+china_intercept
us_fit = fit_years*us_slope+us_intercept
# Save a CSV
pd.DataFrame({'China': china_ts, 'US':us_ts,
'China Fit': china_fit, 'US Fit': us_fit}).to_csv(title+'.csv')
# Plot
plt.close()
china_ts.plot(label='China')
us_ts.plot(label='US')
china_fit.plot(style='--', label='China Fit')
us_fit.plot(style='--', label='US Fit')
plt.title(title + ' : Intercept in ' + str(int(intercept_year)))
plt.legend(); plt.xlabel(''); plt.ylabel('Market Share')
plt.minorticks_on()
plt.savefig(title + '.jpg')
| china_ai-master | main.py |
from setuptools import find_packages, setup
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
# version.py defines the VERSION and VERSION_SHORT variables.
# We use exec here so we don't import allennlp whilst setting up.
VERSION = {} # type: ignore
with open("allennlp_server/version.py", "r") as version_file:
exec(version_file.read(), VERSION)
setup(
name="allennlp-server",
version=VERSION["VERSION"],
description="Simple demo server for AllenNLP models and training config builder.",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: Flake8",
"Framework :: Flask",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="allennlp simple demo server serve models configuration file NLP deep learning machine reading",
url="https://github.com/allenai/allennlp-server",
author="Allen Institute for Artificial Intelligence",
author_email="[email protected]",
license="Apache",
packages=find_packages(
exclude=[
"*.tests",
"*.tests.*",
"tests.*",
"tests",
"test_fixtures",
"test_fixtures.*",
"benchmarks",
"benchmarks.*",
]
),
install_requires=[
"allennlp>=2.0,<3.0",
"allennlp_models>=2.0,<3.0",
"flask>=1.0.2",
"flask-cors>=3.0.7",
"gevent>=1.3.6",
],
include_package_data=True,
python_requires=">=3.6.1",
zip_safe=False,
)
| allennlp-server-master | setup.py |
import os
_MAJOR = "1"
_MINOR = "0"
# On main and in a nightly release the patch should be one ahead of the last
# released build.
_PATCH = "0"
# This is mainly for nightly builds which have the suffix ".dev$DATE". See
# https://semver.org/#is-v123-a-semantic-version for the semantics.
_SUFFIX = os.environ.get("ALLENNLP_SERVER_VERSION_SUFFIX", "")
VERSION_SHORT = "{0}.{1}".format(_MAJOR, _MINOR)
VERSION = "{0}.{1}.{2}{3}".format(_MAJOR, _MINOR, _PATCH, _SUFFIX)
| allennlp-server-master | allennlp_server/version.py |
import allennlp_server.commands
| allennlp-server-master | allennlp_server/__init__.py |
from allennlp_server.commands.server_simple import SimpleServer
| allennlp-server-master | allennlp_server/commands/__init__.py |
"""
A `Flask <https://palletsprojects.com/p/flask/>`_ server for serving predictions
from a single AllenNLP model. It also includes a very, very bare-bones
web front-end for exploring predictions (or you can provide your own).
$ allennlp serve --help
usage: allennlp serve [-h] --archive-path ARCHIVE_PATH --predictor PREDICTOR
[--weights-file WEIGHTS_FILE]
[--cuda-device CUDA_DEVICE] [-o OVERRIDES]
[--static-dir STATIC_DIR] [--title TITLE]
[--field-name FIELD_NAME] [--host HOST] [-p PORT]
[--include-package INCLUDE_PACKAGE]
Serve up a simple model.
optional arguments:
-h, --help show this help message and exit
--archive-path ARCHIVE_PATH
path to trained archive file
--predictor PREDICTOR
name of predictor
--cuda-device CUDA_DEVICE
id of GPU to use (if any) (default = -1)
-o OVERRIDES, --overrides OVERRIDES
a JSON structure used to override the experiment
configuration
--static-dir STATIC_DIR
serve index.html from this directory
--title TITLE change the default page title (default = AllenNLP
Demo)
--field-name FIELD_NAME
field names to include in the demo
--host HOST interface to serve the demo on (default = 127.0.0.1)
-p PORT, --port PORT port to serve the demo on (default = 8000)
--include-package INCLUDE_PACKAGE
additional packages to include
"""
import argparse
import json
import logging
import os
import sys
from string import Template
from typing import List, Callable, Optional, Any, Iterable, Dict
from allennlp.commands import Subcommand
from allennlp.common import JsonDict
from allennlp.common.checks import check_for_gpu
from allennlp.predictors import Predictor
from flask import Flask, request, Response, jsonify, send_file, send_from_directory
from flask_cors import CORS
from gevent.pywsgi import WSGIServer
logger = logging.getLogger(__name__)
class ServerError(Exception):
def __init__(
self, message: str, status_code: int = 400, payload: Optional[Iterable[Any]] = None
) -> None:
super().__init__(self)
self.message = message
self.status_code = status_code
self.payload = payload
def to_dict(self) -> Dict[Any, Any]:
error_dict = dict(self.payload or ())
error_dict["message"] = self.message
return error_dict
def make_app(
predictor: Predictor,
field_names: List[str] = None,
static_dir: str = None,
sanitizer: Callable[[JsonDict], JsonDict] = None,
title: str = "AllenNLP Demo",
) -> Flask:
"""
Creates a Flask app that serves up the provided ``Predictor``
along with a front-end for interacting with it.
If you want to use the built-in bare-bones HTML, you must provide the
field names for the inputs (which will be used both as labels
and as the keys in the JSON that gets sent to the predictor).
If you would rather create your own HTML, call it index.html
and provide its directory as ``static_dir``. In that case you
don't need to supply the field names -- that information should
be implicit in your demo site. (Probably the easiest thing to do
is just start with the bare-bones HTML and modify it.)
In addition, if you want somehow transform the JSON prediction
(e.g. by removing probabilities or logits)
you can do that by passing in a ``sanitizer`` function.
"""
if static_dir is not None:
static_dir = os.path.abspath(static_dir)
if not os.path.exists(static_dir):
logger.error("app directory %s does not exist, aborting", static_dir)
sys.exit(-1)
elif static_dir is None and field_names is None:
print(
"Neither build_dir nor field_names passed. Demo won't render on this port.\n"
"You must use nodejs + react app to interact with the server."
)
app = Flask(__name__)
@app.errorhandler(ServerError)
def handle_invalid_usage(error: ServerError) -> Response:
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
@app.route("/")
def index() -> Response:
if static_dir is not None:
return send_file(os.path.join(static_dir, "index.html"))
else:
html = _html(title, field_names or [])
return Response(response=html, status=200)
@app.route("/predict", methods=["POST", "OPTIONS"])
def predict() -> Response:
"""make a prediction using the specified model and return the results"""
if request.method == "OPTIONS":
return Response(response="", status=200)
data = request.get_json()
prediction = predictor.predict_json(data)
if sanitizer is not None:
prediction = sanitizer(prediction)
log_blob = {"inputs": data, "outputs": prediction}
logger.info("prediction: %s", json.dumps(log_blob))
return jsonify(prediction)
@app.route("/predict_batch", methods=["POST", "OPTIONS"])
def predict_batch() -> Response:
"""make a prediction using the specified model and return the results"""
if request.method == "OPTIONS":
return Response(response="", status=200)
data = request.get_json()
prediction = predictor.predict_batch_json(data)
if sanitizer is not None:
prediction = [sanitizer(p) for p in prediction]
return jsonify(prediction)
@app.route("/<path:path>")
def static_proxy(path: str) -> Response:
if static_dir is not None:
return send_from_directory(static_dir, path)
else:
raise ServerError("static_dir not specified", 404)
return app
def _get_predictor(args: argparse.Namespace) -> Predictor:
check_for_gpu(args.cuda_device)
return Predictor.from_path(
args.archive_path,
predictor_name=args.predictor,
cuda_device=args.cuda_device,
overrides=args.overrides,
)
@Subcommand.register("serve")
class SimpleServer(Subcommand):
def add_subparser(self, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
description = """Serve up a simple model."""
subparser = parser.add_parser(
self.name,
description=description,
help="Serve up a simple model.",
)
subparser.add_argument(
"--archive-path",
type=str,
required=True,
help="path to trained archive file",
)
subparser.add_argument("--predictor", type=str, help="registered name of predictor")
subparser.add_argument(
"--cuda-device", type=int, default=-1, help="id of GPU to use (if any)"
)
subparser.add_argument(
"-o",
"--overrides",
type=str,
default="",
help="a JSON structure used to override the experiment configuration",
)
subparser.add_argument(
"--static-dir", type=str, help="serve index.html from this directory"
)
subparser.add_argument(
"--title",
type=str,
help="change the default page title",
default="AllenNLP Demo",
)
subparser.add_argument(
"--field-name",
type=str,
action="append",
dest="field_names",
metavar="FIELD_NAME",
help="field names to include in the demo",
)
subparser.add_argument(
"--host",
type=str,
default="127.0.0.1",
help="interface to serve the demo on",
)
subparser.add_argument(
"-p", "--port", type=int, default=8000, help="port to serve the demo on"
)
subparser.set_defaults(func=serve)
return subparser
def serve(args: argparse.Namespace) -> None:
predictor = _get_predictor(args)
app = make_app(
predictor=predictor,
field_names=args.field_names,
static_dir=args.static_dir,
title=args.title,
)
CORS(app)
http_server = WSGIServer((args.host, args.port), app)
print(f"Model loaded, serving demo on http://{args.host}:{args.port}")
http_server.serve_forever()
#
# HTML and Templates for the default bare-bones app are below
#
_PAGE_TEMPLATE = Template(
"""
<html>
<head>
<title>
$title
</title>
<style>
$css
</style>
</head>
<body>
<div class="pane-container">
<div class="pane model">
<div class="pane__left model__input">
<div class="model__content">
<h2><span>$title</span></h2>
<div class="model__content">
$inputs
<div class="form__field form__field--btn">
<button type="button" class="btn btn--icon-disclosure" onclick="predict()">
Predict
</button>
</div>
</div>
</div>
</div>
<div class="pane__right model__output model__output--empty">
<div class="pane__thumb"></div>
<div class="model__content">
<div id="output" class="output">
<div class="placeholder">
<div class="placeholder__content">
<p>Run model to view results</p>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</body>
<script>
function predict() {
var quotedFieldList = $qfl;
var data = {};
quotedFieldList.forEach(function(fieldName) {
data[fieldName] = document.getElementById("input-" + fieldName).value;
})
var xhr = new XMLHttpRequest();
xhr.open('POST', 'predict');
xhr.setRequestHeader('Content-Type', 'application/json');
xhr.onload = function() {
if (xhr.status == 200) {
// If you want a more impressive visualization than just
// outputting the raw JSON, change this part of the code.
var htmlResults = "<pre>" + JSON.stringify(JSON.parse(xhr.responseText), null, 2) + "</pre>";
document.getElementById("output").innerHTML = htmlResults;
}
};
xhr.send(JSON.stringify(data));
}
</script>
</html>
"""
)
_SINGLE_INPUT_TEMPLATE = Template(
"""
<div class="form__field">
<label for="input-$field_name">$field_name</label>
<input type="text" id="input-$field_name" type="text" required value placeholder="input goes here">
</div>
"""
)
_CSS = """
body,
html {
min-width: 48em;
background: #f9fafc;
font-size: 16px
}
* {
font-family: sans-serif;
color: #232323
}
section {
background: #fff
}
code,
code span,
pre,
.output {
font-family: 'Roboto Mono', monospace!important
}
code {
background: #f6f8fa
}
li,
p,
td,
th {
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
font-size: 1.125em;
line-height: 1.5em;
margin: 1.2em 0
}
pre {
margin: 2em 0
}
h1,
h2 {
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
font-weight: 300
}
h2 {
font-size: 2em;
color: rgba(35, 35, 35, .75)
}
img {
max-width: 100%
}
hr {
display: block;
border: none;
height: .375em;
background: #f6f8fa
}
blockquote,
hr {
margin: 2.4em 0
}
.btn {
text-decoration: none;
cursor: pointer;
text-transform: uppercase;
font-size: 1em;
margin: 0;
-moz-appearance: none;
-webkit-appearance: none;
border: none;
color: #fff!important;
display: block;
background: #2085bc;
padding: .9375em 3.625em;
-webkit-transition: background-color .2s ease, opacity .2s ease;
transition: background-color .2s ease, opacity .2s ease
}
.btn.btn--blue {
background: #2085bc
}
.btn:focus,
.btn:hover {
background: #40affd;
outline: 0
}
.btn:focus {
box-shadow: 0 0 1.25em rgba(50, 50, 150, .05)
}
.btn:active {
opacity: .66;
background: #2085bc;
-webkit-transition-duration: 0s;
transition-duration: 0s
}
.btn:disabled,
.btn:disabled:active,
.btn:disabled:hover {
cursor: default;
background: #d0dae3
}
form {
display: block
}
.form__field {
-webkit-transition: margin .2s ease;
transition: margin .2s ease
}
.form__field+.form__field {
margin-top: 2.5em
}
.form__field label {
display: block;
font-weight: 600;
font-size: 1.125em
}
.form__field label+* {
margin-top: 1.25em
}
.form__field input[type=text],
.form__field textarea {
-moz-appearance: none;
-webkit-appearance: none;
width: 100%;
font-size: 1em;
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
padding: .8125em 1.125em;
color: #232323;
border: .125em solid #d4dce2;
display: block;
box-sizing: border-box;
-webkit-transition: background-color .2s ease, color .2s ease, border-color .2s ease, opacity .2s ease;
transition: background-color .2s ease, color .2s ease, border-color .2s ease, opacity .2s ease
}
.form__field input[type=text]::-webkit-input-placeholder,
.form__field textarea::-webkit-input-placeholder {
color: #b4b4b4
}
.form__field input[type=text]:-moz-placeholder,
.form__field textarea:-moz-placeholder {
color: #b4b4b4
}
.form__field input[type=text]::-moz-placeholder,
.form__field textarea::-moz-placeholder {
color: #b4b4b4
}
.form__field input[type=text]:-ms-input-placeholder,
.form__field textarea:-ms-input-placeholder {
color: #b4b4b4
}
.form__field input[type=text]:focus,
.form__field textarea:focus {
outline: 0;
border-color: #63a7d4;
box-shadow: 0 0 1.25em rgba(50, 50, 150, .05)
}
.form__field textarea {
resize: vertical;
min-height: 8.25em
}
.form__field .btn {
-webkit-user-select: none;
-moz-user-select: none;
-ms-user-select: none;
user-select: none;
-webkit-touch-callout: none
}
.form__field--btn {
display: -webkit-box;
display: -ms-flexbox;
display: -webkit-flex;
display: flex;
-webkit-flex-direction: row;
-ms-flex-direction: row;
-webkit-box-orient: horizontal;
-webkit-box-direction: normal;
flex-direction: row;
-webkit-justify-content: flex-end;
-ms-justify-content: flex-end;
-webkit-box-pack: end;
-ms-flex-pack: end;
justify-content: flex-end
}
@media screen and (max-height:760px) {
.form__instructions {
margin: 1.875em 0 1.125em
}
.form__field:not(.form__field--btn)+.form__field:not(.form__field--btn) {
margin-top: 1.25em
}
}
body,
html {
width: 100%;
height: 100%;
margin: 0;
padding: 0;
font-family: 'Source Sans Pro', sans-serif
}
h1 {
font-weight: 300
}
.model__output {
background: #fff
}
.model__output.model__output--empty {
background: 0 0
}
.placeholder {
width: 100%;
height: 100%;
display: -webkit-box;
display: -ms-flexbox;
display: -webkit-flex;
display: flex;
-webkit-align-items: center;
-ms-flex-align: center;
-webkit-box-align: center;
align-items: center;
-webkit-justify-content: center;
-ms-justify-content: center;
-webkit-box-pack: center;
-ms-flex-pack: center;
justify-content: center;
-webkit-user-select: none;
-moz-user-select: none;
-ms-user-select: none;
user-select: none;
-webkit-touch-callout: none;
cursor: default
}
.placeholder .placeholder__content {
display: -webkit-box;
display: -ms-flexbox;
display: -webkit-flex;
display: flex;
-webkit-flex-direction: column;
-ms-flex-direction: column;
-webkit-box-orient: vertical;
-webkit-box-direction: normal;
flex-direction: column;
-webkit-align-items: center;
-ms-flex-align: center;
-webkit-box-align: center;
align-items: center;
text-align: center
}
.placeholder svg {
display: block
}
.placeholder svg.placeholder__empty,
.placeholder svg.placeholder__error {
width: 6em;
height: 3.625em;
fill: #e1e5ea;
margin-bottom: 2em
}
.placeholder svg.placeholder__error {
width: 4.4375em;
height: 4em
}
.placeholder p {
font-size: 1em;
margin: 0;
padding: 0;
color: #9aa8b2
}
.placeholder svg.placeholder__working {
width: 3.4375em;
height: 3.4375em;
-webkit-animation: working 1s infinite linear;
animation: working 1s infinite linear
}
@-webkit-keyframes working {
0% {
-webkit-transform: rotate(0deg)
}
100% {
-webkit-transform: rotate(360deg)
}
}
@keyframes working {
0% {
-webkit-transform: rotate(0deg);
-ms-transform: rotate(0deg);
transform: rotate(0deg)
}
100% {
-webkit-transform: rotate(360deg);
-ms-transform: rotate(360deg);
transform: rotate(360deg)
}
}
.model__content {
padding: 1.875em 2.5em;
margin: auto;
-webkit-transition: padding .2s ease;
transition: padding .2s ease
}
.model__content:not(.model__content--srl-output) {
max-width: 61.25em
}
.model__content h2 {
margin: 0;
padding: 0;
font-size: 1em
}
.model__content h2 span {
font-size: 2em;
color: rgba(35, 35, 35, .75)
}
.model__content h2 .tooltip,
.model__content h2 span {
vertical-align: top
}
.model__content h2 span+.tooltip {
margin-left: .4375em
}
.model__content>h2:first-child {
margin: -.25em 0 0 -.03125em
}
.model__content__summary {
font-size: 1em;
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
padding: 1.25em;
background: #f6f8fa
}
@media screen and (min-height:800px) {
.model__content {
padding-top: 4.6vh;
padding-bottom: 4.6vh
}
}
.pane-container {
display: -webkit-box;
display: -ms-flexbox;
display: -webkit-flex;
display: flex;
-webkit-flex-direction: column;
-ms-flex-direction: column;
-webkit-box-orient: vertical;
-webkit-box-direction: normal;
flex-direction: column;
height: 100%
}
.pane {
display: -webkit-box;
display: -ms-flexbox;
display: -webkit-flex;
display: flex;
-webkit-flex-direction: row;
-ms-flex-direction: row;
-webkit-box-orient: horizontal;
-webkit-box-direction: normal;
flex-direction: row;
position: relative;
-webkit-box-flex: 2;
-webkit-flex: 2;
-ms-flex: 2;
flex: 2;
height: auto;
min-height: 100%;
min-height: 34.375em
}
.pane__left,
.pane__right {
width: 100%;
height: 100%;
-webkit-align-self: stretch;
-ms-flex-item-align: stretch;
align-self: stretch;
min-width: 24em;
min-height: 34.375em
}
.pane__left {
height: auto;
min-height: 100%
}
.pane__right {
width: 100%;
overflow: auto;
height: auto;
min-height: 100%
}
.pane__right .model__content.model__content--srl-output {
display: inline-block;
margin: auto
}
.pane__thumb {
height: auto;
min-height: 100%;
margin-left: -.625em;
position: absolute;
width: 1.25em
}
.pane__thumb:after {
display: block;
position: absolute;
height: 100%;
top: 0;
content: "";
width: .25em;
background: #e1e5ea;
left: .5em
}
"""
def _html(title: str, field_names: List[str]) -> str:
"""
Returns bare bones HTML for serving up an input form with the
specified fields that can render predictions from the configured model.
"""
inputs = "".join(
_SINGLE_INPUT_TEMPLATE.substitute(field_name=field_name) for field_name in field_names
)
quoted_field_names = (f"'{field_name}'" for field_name in field_names)
quoted_field_list = f"[{','.join(quoted_field_names)}]"
return _PAGE_TEMPLATE.substitute(title=title, css=_CSS, inputs=inputs, qfl=quoted_field_list)
| allennlp-server-master | allennlp_server/commands/server_simple.py |
allennlp-server-master | tests/__init__.py |
|
allennlp-server-master | tests/commands/__init__.py |
|
import importlib
import io
import json
import os
import sys
from contextlib import redirect_stdout
import flask.testing
from allennlp.commands import main
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.util import JsonDict
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
from allennlp_server.commands.server_simple import make_app
def post_json(client: flask.testing.FlaskClient, endpoint: str, data: JsonDict) -> flask.Response:
return client.post(endpoint, content_type="application/json", data=json.dumps(data))
PAYLOAD = {
"passage": """
The Matrix is a 1999 science fiction action film written and directed by The Wachowskis,
starring Keanu Reeves, Laurence Fishburne, Carrie-Anne Moss, Hugo Weaving, and Joe Pantoliano.""",
"question": """Who stars in the matrix?""",
}
class TestSimpleServer(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
importlib.import_module("allennlp_models.rc")
archive = load_archive("tests/fixtures/bidaf/model.tar.gz")
self.bidaf_predictor = Predictor.from_archive(
archive, "allennlp_models.rc.ReadingComprehensionPredictor"
)
def teardown_method(self):
super().teardown_method()
try:
os.remove("access.log")
os.remove("error.log")
except FileNotFoundError:
pass
def test_standard_model(self):
app = make_app(predictor=self.bidaf_predictor, field_names=["passage", "question"])
app.testing = True
client = app.test_client()
# First test the HTML
response = client.get("/")
data = response.get_data()
assert b"passage" in data
assert b"question" in data
# Now test the backend
response = post_json(client, "/predict", PAYLOAD)
data = json.loads(response.get_data())
assert "best_span_str" in data
assert "span_start_logits" in data
# Test the batch predictor
batch_size = 8
response = post_json(client, "/predict_batch", [PAYLOAD] * batch_size)
data_list = json.loads(response.get_data())
assert len(data_list) == batch_size
for data in data_list:
assert "best_span_str" in data
assert "span_start_logits" in data
def test_subcommand_plugin_is_available(self):
# Test originally copied from
# `allennlp.tests.commands.main_test.TestMain.test_subcommand_plugin_is_available`.
sys.argv = ["allennlp"]
with io.StringIO() as buf, redirect_stdout(buf):
main()
output = buf.getvalue()
assert " serve" in output
def test_sanitizer(self):
def sanitize(result: JsonDict) -> JsonDict:
return {key: value for key, value in result.items() if key.startswith("best_span")}
app = make_app(
predictor=self.bidaf_predictor,
field_names=["passage", "question"],
sanitizer=sanitize,
)
app.testing = True
client = app.test_client()
response = post_json(client, "/predict", PAYLOAD)
data = json.loads(response.get_data())
assert "best_span_str" in data
assert "span_start_logits" not in data
batch_size = 8
response = post_json(client, "/predict_batch", [PAYLOAD] * batch_size)
data_list = json.loads(response.get_data())
assert len(data_list) == batch_size
for data in data_list:
assert "best_span_str" in data
assert "span_start_logits" not in data
def test_static_dir(self):
html = """<html><body>THIS IS A STATIC SITE</body></html>"""
jpg = """something about a jpg"""
with open(os.path.join(self.TEST_DIR, "index.html"), "w") as f:
f.write(html)
with open(os.path.join(self.TEST_DIR, "jpg.txt"), "w") as f:
f.write(jpg)
app = make_app(predictor=self.bidaf_predictor, static_dir=self.TEST_DIR)
app.testing = True
client = app.test_client()
response = client.get("/")
data = response.get_data().decode("utf-8")
assert data == html
response = client.get("jpg.txt")
data = response.get_data().decode("utf-8")
assert data == jpg
| allennlp-server-master | tests/commands/server_simple_test.py |
#!/usr/bin/env python3
import argparse
from typing import Dict
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("version_type", choices=["stable", "latest", "current"])
parser.add_argument("--minimal", action="store_true", default=False)
parser.add_argument("--as-range", action="store_true", default=False)
return parser.parse_args()
def post_process(version: str, minimal: bool = False, as_range: bool = False):
assert not (minimal and as_range)
if version.startswith("v"):
version = version[1:]
if as_range:
major, minor, *_ = version.split(".")
return f">={version},<{major}.{int(minor)+1}"
return version if minimal else f"v{version}"
def get_current_version() -> str:
VERSION: Dict[str, str] = {}
with open("allennlp_server/version.py", "r") as version_file:
exec(version_file.read(), VERSION)
return VERSION["VERSION"]
def get_latest_version() -> str:
# Import this here so this requirements isn't mandatory when we just want to
# call `get_current_version`.
import requests
resp = requests.get("https://api.github.com/repos/allenai/allennlp-server/tags")
return resp.json()[0]["name"]
def get_stable_version() -> str:
import requests
resp = requests.get("https://api.github.com/repos/allenai/allennlp-server/releases/latest")
return resp.json()["tag_name"]
def main() -> None:
opts = parse_args()
if opts.version_type == "stable":
print(post_process(get_stable_version(), opts.minimal, opts.as_range))
elif opts.version_type == "latest":
print(post_process(get_latest_version(), opts.minimal, opts.as_range))
elif opts.version_type == "current":
print(post_process(get_current_version(), opts.minimal, opts.as_range))
else:
raise NotImplementedError
if __name__ == "__main__":
main()
| allennlp-server-master | scripts/get_version.py |
import json
import pickle
import logging
from collections import defaultdict
from typing import Any, Dict, List, Iterable, Text
from overrides import overrides
import torch
from allennlp.data.fields import (
MetadataField,
TextField,
IndexField,
ListField,
)
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import PretrainedTransformerIndexer
from allennlp.data.tokenizers import Token, PretrainedTransformerTokenizer
logger = logging.getLogger(__name__)
@DatasetReader.register("p3_jsonl")
class P3ClusterReader(DatasetReader):
def __init__(
self,
model_name: str = "google/t5-xl-lm-adapt",
max_query_length: int = 512,
max_answer_length: int = 256,
return_original_input: bool = False,
**kwargs,
) -> None:
super().__init__(
manual_distributed_sharding=True,
manual_multiprocess_sharding=True,
**kwargs,
)
self.return_original_input = return_original_input
self._transformer_model_name = model_name
self._tokenizer = PretrainedTransformerTokenizer(model_name)
self._token_indexers = {
"tokens": PretrainedTransformerIndexer(model_name)
}
self._max_query_length = max_query_length
self._max_answer_length = max_answer_length
self._stats = None
@overrides
def _read(self, file_path: str) -> Iterable[Instance]:
for instance in self.shard_iterable(self.__read(file_path)):
yield instance
def __read(self, file_path: str) -> Iterable[Instance]:
self._stats = defaultdict(int)
logger.info(f"Reading data from {file_path}")
for line in open(file_path):
instance_data = json.loads(line)
if "target" not in instance_data or "input" not in instance_data:
self._stats["Instances without inputs or targets (skipped)"] += 1
continue
if not isinstance(instance_data["target"], str):
self._stats["Instances whose targets are not strings (skipped)"] += 1
continue
target = instance_data["target"].strip()
if "answer_choices" not in instance_data:
self._stats["Instances without answer options (kept)"] += 1
answer_options = [target]
else:
answer_options = [c.strip() for c in instance_data["answer_choices"]]
if target not in answer_options:
answer_options.append(target)
self._stats["Instances with targets not in answer choices (kept)"] += 1
yield self.text_to_instance(
instance_data["input"],
target,
answer_options
)
logger.info("Dataset stats:")
for key, value in self._stats.items():
logger.info(f"\t{key}: {value}")
def text_to_instance(
self, # type: ignore # pylint: disable=arguments-differ
input_text: str,
target: str,
options: List[str],
metadata: Dict[str, Any] = None,
) -> Instance:
fields = {}
tokenized_input = self._tokenizer.tokenize(input_text)
if len(tokenized_input) > self._max_query_length:
self._stats["Truncated inputs"] += 1
tokenized_input = tokenized_input[:self._max_query_length]
input_field = TextField(tokenized_input)
fields["prompt_and_input"] = input_field
if self.return_original_input:
fields['pretokenized_input'] = input_text
answer_option_fields = []
for option in options:
tokenized_option = self._tokenizer.tokenize(option)
if len(tokenized_option) > self._max_answer_length:
self._stats["Truncated options"] += 1
tokenized_option = tokenized_option[:self._max_answer_length]
answer_option_fields.append(TextField(tokenized_option))
options_list_field = ListField(answer_option_fields)
fields["answer_options"] = options_list_field
answer_index = None
for i, option in enumerate(options):
if target in option:
answer_index = i
break
fields["correct_answer_index"] = IndexField(answer_index, options_list_field)
if metadata is not None:
fields["metadata"] = MetadataField(metadata)
return Instance(fields)
@overrides
def apply_token_indexers(self, instance: Instance) -> None:
instance.fields["prompt_and_input"].token_indexers = self._token_indexers
for field in instance.fields["answer_options"].field_list:
field.token_indexers = self._token_indexers
| data-efficient-finetuning-main | attribution/p3_jsonl_reader.py |
import json
import logging
import random
from collections import defaultdict
from typing import List, Iterable, Optional, Tuple, Dict
import torch
from overrides import overrides
import datasets
from allennlp.data.fields import (
MetadataField,
TextField,
)
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import PretrainedTransformerIndexer
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
from transformers import AutoTokenizer
from natural_instructions.ni_collator import DataCollatorForNI
logger = logging.getLogger(__name__)
# full reader for ni instructions, similar to drop reader
@DatasetReader.register("ni_reader")
class NaturalInstructionsReader(DatasetReader):
def __init__(
self,
model_name: str = "google/t5-small-lm-adapt",
max_query_length: int = 1024,
split_name: str = "train",
return_original_instance: bool = False,
max_num_instances_per_task: int = 100,
max_num_instances_per_eval_task: int = 100,
num_pos_examples: int = 0, # set to '2' for the 'tk-instruct' model
add_task_definition: bool = True, # set to true for tk-instruct
**kwargs,
) -> None:
super().__init__(
manual_distributed_sharding=True,
manual_multiprocess_sharding=True,
**kwargs,
)
self.return_original_instance = return_original_instance
self._dataset = datasets.load_dataset(
"natural_instructions/ni_dataset.py",
max_num_instances_per_task=max_num_instances_per_task,
max_num_instances_per_eval_task=max_num_instances_per_eval_task,
split=split_name,
)
self._transformer_model_name = model_name
self._tokenizer = PretrainedTransformerTokenizer(model_name)
self._collator = DataCollatorForNI(
tokenizer=AutoTokenizer.from_pretrained(model_name),
num_pos_examples=num_pos_examples,
add_task_definition=add_task_definition,
text_only=True,
max_source_length=max_query_length
)
self._token_indexers = {"tokens": PretrainedTransformerIndexer(model_name)}
self._max_query_length = max_query_length
self._stats = defaultdict(int)
@overrides
def _read(self, file_path) -> Iterable[Instance]:
for sample in self._dataset:
converted_sample = self._collator([sample])
yield self.text_to_instance(
converted_sample['inputs'][0],
sample['id'],
converted_sample['labels'][0])
def text_to_instance(
self,
input_text: str,
query_id: str,
target: str,
) -> Instance:
fields = {}
tokenized_input = self._tokenizer.tokenize(input_text)
if len(tokenized_input) > self._max_query_length:
self._stats["Truncated inputs"] += 1
tokenized_input = tokenized_input[: self._max_query_length]
input_field = TextField(tokenized_input)
fields["prompt_and_input"] = input_field
if self.return_original_instance:
fields["pretokenized_input"] = input_text
tokenized_target = self._tokenizer.tokenize(target)
if len(tokenized_target) > self._max_query_length:
self._stats["Truncated targets"] += 1
tokenized_target = tokenized_target[: self._max_query_length]
target_field = TextField(tokenized_target)
fields["target"] = target_field
if self.return_original_instance:
fields["pretokenized_target"] = target
query_id_field = MetadataField(query_id)
fields["instance_id"] = query_id_field
if self.return_original_instance:
fields["preindexed_id"] = query_id
return Instance(fields)
@overrides
def apply_token_indexers(self, instance: Instance) -> None:
instance.fields["prompt_and_input"].token_indexers = self._token_indexers
instance.fields["target"].token_indexers = self._token_indexers
| data-efficient-finetuning-main | attribution/ni_reader.py |
import json
import logging
import random
from collections import defaultdict
from typing import List, Iterable, Optional, Tuple, Dict
import torch
from overrides import overrides
import datasets
from allennlp.data.fields import (
MetadataField,
TextField,
IndexField,
ListField,
)
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import PretrainedTransformerIndexer
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
logger = logging.getLogger(__name__)
# A reader for rank classification tasks.
class HuggingfaceReaderRankClassification(DatasetReader):
def __init__(
self,
model_name: str = "google/t5-small-lm-adapt",
max_query_length: int = 512,
split_name: str = "train",
val_size: int = 1000,
use_val_split: bool = True,
split_mapping: Dict[str, str] = {"train": "train", "validation": "validation"},
return_original_instance: bool = False,
seed: int = 42,
**kwargs,
) -> None:
super().__init__(
manual_distributed_sharding=True,
manual_multiprocess_sharding=True,
**kwargs,
)
dataset_name, subset_name = self.get_dataset_name()
data_dir = self.get_dataset_dir() # for story cloze
self._dataset_name = dataset_name
self._subset_name = subset_name
self.split_name = split_name
self.return_original_instance = return_original_instance
original_val_set = datasets.load_dataset(
dataset_name,
subset_name,
split=split_mapping["validation"],
data_dir=data_dir
)
small_val_size = val_size # I consider under 1000 examples as small
val_split_size = val_size # when splitting out val, get 1000 examples
if use_val_split:
if split_name == "train":
if len(original_val_set) >= small_val_size:
self._dataset = datasets.load_dataset(
dataset_name, subset_name, split=split_mapping["train"], data_dir=data_dir
)
else:
# for small val sets, split out val from train and use old val as test
# this is because some casehold splits are specially designed, so I want
# to keep these as-is (rather than just split the val set in half)
self._dataset = datasets.load_dataset(
dataset_name, subset_name, split=split_mapping["train"], data_dir=data_dir
).train_test_split(test_size=val_split_size, seed=seed)["train"]
if split_name == "validation":
# for large val sets, just split out from val
if len(original_val_set) >= small_val_size:
self._dataset = original_val_set.train_test_split(
train_size=val_split_size, seed=seed
)["train"]
else:
# for small val sets, split out val from train and use old val as test
self._dataset = datasets.load_dataset(
dataset_name, subset_name, split=split_mapping["train"], data_dir=data_dir
).train_test_split(test_size=val_split_size, seed=seed)["test"]
elif split_name == "test":
# for large val sets, test is the small split from val
if len(original_val_set) >= small_val_size:
self._dataset = original_val_set.train_test_split(
train_size=val_split_size, seed=seed
)["test"]
else:
# for small val sets, split our new val from train (val becomes test)
self._dataset = datasets.load_dataset(
dataset_name, subset_name, split=split_mapping["validation"], data_dir=data_dir
)
else:
self._dataset = datasets.load_dataset(
dataset_name, subset_name, split=split_mapping[split_name], data_dir=data_dir
)
if split_name == "train":
self._dataset = self._dataset.shuffle(seed)
self._transformer_model_name = model_name
self._tokenizer = PretrainedTransformerTokenizer(model_name)
self._token_indexers = {"tokens": PretrainedTransformerIndexer(model_name)}
self._max_query_length = max_query_length
self._stats = defaultdict(int)
def hf_to_instance(self, instance) -> Tuple[str, str]:
raise NotImplementedError("Implement a dataset-specific version")
def get_dataset_name(self) -> Tuple[str, Optional[str]]:
raise NotImplementedError("Specify ds name for hf")
# we usually dont need this, but story cloze requires it.
def get_dataset_dir(self) -> Optional[str]:
return None
@overrides
def _read(self, file_path) -> Iterable[Instance]:
for sample in self._dataset:
converted_sample = self.hf_to_instance(sample)
for inputs, options, idx in converted_sample:
yield self.text_to_instance(inputs, options, idx)
def text_to_instance(
self, input_text: str, answer_options: List[str], correct_answer_idx: int
) -> Instance:
fields = {}
tokenized_input = self._tokenizer.tokenize(input_text)
if len(tokenized_input) > self._max_query_length:
self._stats["Truncated inputs"] += 1
tokenized_input = tokenized_input[: self._max_query_length]
input_field = TextField(tokenized_input)
fields["prompt_and_input"] = input_field
if self.return_original_instance:
fields["pretokenized_input"] = input_text
answer_option_fields = [
TextField(self._tokenizer.tokenize(option)) for option in answer_options
]
options_list_field = ListField(answer_option_fields)
fields["answer_options"] = options_list_field
if self.return_original_instance:
fields["answer_options_pretokenized"] = answer_options
fields["correct_answer_index"] = IndexField(
correct_answer_idx, options_list_field
)
if self.return_original_instance:
fields["correct_answer_index_value"] = correct_answer_idx
return Instance(fields)
@overrides
def apply_token_indexers(self, instance: Instance) -> None:
instance.fields["prompt_and_input"].token_indexers = self._token_indexers
for field in instance.fields["answer_options"].field_list:
field.token_indexers = self._token_indexers
@DatasetReader.register("casehold_reader")
class CaseHOLDReader(HuggingfaceReaderRankClassification):
def get_dataset_name(self) -> Tuple[str, Optional[str]]:
return "lex_glue", "case_hold"
def hf_to_instance(self, instance) -> Tuple[str, str]:
# following setup similar to unifiedqa
input = f"What is the correct holding statement for the following text?\nText: {instance['context']} \n(A): {instance['endings'][0]}\n(B): {instance['endings'][1]}\n(C): {instance['endings'][2]}\n(D): {instance['endings'][3]}\n(E): {instance['endings'][4]}"
return [[input, instance["endings"], instance["label"]]]
@DatasetReader.register("unfair_tos_reader")
class UnfairTOSReader(HuggingfaceReaderRankClassification):
labels_to_terms = {
0: "Limitation of liability",
1: "Unilateral termination",
2: "Unilateral change",
3: "Content removal",
4: "Contract by using",
5: "Choice of law",
6: "Jurisdiction",
7: "Arbitration",
}
def get_dataset_name(self) -> Tuple[str, Optional[str]]:
return "lex_glue", "unfair_tos"
def hf_to_instance(self, instance) -> Tuple[str, str]:
# we convert this into 8 instances asking if label applies
samples = []
answer_options = ["yes", "no"]
for label, term in self.labels_to_terms.items():
input = f'Is there an instance of {term} in the following text?. Answer yes or no.\nText: {instance["text"]}'
output = "yes" if label in instance["labels"] else "no"
samples.append((input, answer_options, answer_options.index(output)))
return samples
@DatasetReader.register("eurlex_reader")
class EurlexReader(HuggingfaceReaderRankClassification):
def __init__(
self,
model_name: str = "google/t5-small-lm-adapt",
max_query_length: int = 512,
split_name: str = "train",
**kwargs,
) -> None:
super().__init__(model_name, max_query_length, split_name, **kwargs)
self._labels = self._dataset.features["labels"].feature.names
self._concept_dict = json.load(open("data/eurovoc_descriptors.json", "r"))
self._r = random.Random(42)
self._split = split_name
def get_dataset_name(self) -> Tuple[str, Optional[str]]:
return "lex_glue", "eurlex"
def hf_to_instance(self, instance) -> Tuple[str, str]:
# convert into 127 instances asking yes/no.
samples = []
answer_options = ["yes", "no"]
# pick random labels + the true labels, adding up to 10 - train only
if self._split == "train":
label_sel = [l for l in instance["labels"]]
options = [l for l in range(len(self._labels)) if l not in label_sel]
while len(label_sel) < 5:
label_sel.append(self._r.choice(options))
self._r.shuffle(label_sel)
label_sel = [
self._dataset.features["labels"].feature.int2str(l) for l in label_sel
]
else:
# validation, only check the labels we know should be on the thing.
label_sel = [
self._dataset.features["labels"].feature.int2str(l)
for l in instance["labels"]
]
for label in label_sel:
concept_name = self._concept_dict[label]["en"]
input = f"Does the following text involve {concept_name}? Answer yes or no.\nText: {instance['text']}."
output = (
"yes"
if self._dataset.features["labels"].feature.str2int(label)
in instance["labels"]
else "no"
)
samples.append((input, answer_options, answer_options.index(output)))
return samples
@DatasetReader.register("ledgar_reader")
class LedgarReader(HuggingfaceReaderRankClassification):
def __init__(
self,
model_name: str = "google/t5-small-lm-adapt",
max_query_length: int = 512,
split_name: str = "train",
**kwargs,
) -> None:
super().__init__(model_name, max_query_length, split_name, **kwargs)
self.labels = self._dataset.features["label"]
def get_dataset_name(self) -> Tuple[str, Optional[str]]:
return "lex_glue", "ledgar"
def hf_to_instance(self, instance) -> Tuple[str, str]:
# rank classification seems fine here
input = f"What is the main topic of the following contract provision?\nContract: {instance['text']}"
return [[input, self.labels.names, instance["label"]]]
@DatasetReader.register("sciq_reader")
class SciQReader(HuggingfaceReaderRankClassification):
def get_dataset_name(self) -> Tuple[str, Optional[str]]:
return "sciq", None
def hf_to_instance(self, instance) -> Tuple[str, str]:
# sciq in training, so using 'multiple choice' prompt.
answers = [
instance["distractor1"],
instance["distractor2"],
instance["distractor3"],
instance["correct_answer"],
]
# shuffle answers so model cant learn!
random.Random(42).shuffle(answers)
correct_answer_idx = answers.index(instance["correct_answer"])
input = f"Answer the following question given this paragraph:\n{instance['support']}\nQ: {instance['question']}\nChoices:\n-{answers[0]}\n-{answers[1]}\n-{answers[2]}\n-{answers[3]}"
return [[input, answers, correct_answer_idx]]
@DatasetReader.register("rte_reader")
class RTEReader(HuggingfaceReaderRankClassification):
def get_dataset_name(self) -> Tuple[str, Optional[str]]:
return "super_glue", "rte"
def hf_to_instance(self, instance) -> Tuple[str, str]:
# using highest-avg prompt for rte from my other project.
input = f"{instance['premise']}\n Question: Does this imply that \"{instance['hypothesis']}\"? Yes or no?"
answers = ["yes", "no"]
# 0 = entail, 1 = not entail
correct_answer = 0 if instance["label"] == 0 else 1
return [[input, answers, correct_answer]]
# CB
@DatasetReader.register("cb_reader")
class CBReader(HuggingfaceReaderRankClassification):
def get_dataset_name(self) -> Tuple[str, Optional[str]]:
return "super_glue", "cb"
def hf_to_instance(self, instance) -> Tuple[str, str]:
# using gpt-3 style prompt
input = f"{instance['premise']}\nQuestion: {instance['hypothesis']} True, False, or Neither?"
# 0 = entail, 1 = contradict, 2 = neutral
answers = ["true", "false", "neither"]
correct_answer = int(instance["label"])
return [[input, answers, correct_answer]]
# HellaSwag
@DatasetReader.register("hellaswag_reader")
class HellaSwagReader(HuggingfaceReaderRankClassification):
def get_dataset_name(self) -> Tuple[str, Optional[str]]:
return "hellaswag", "None"
def hf_to_instance(self, instance) -> Tuple[str, str]:
input = f"Complete the description with an appropriate ending:\nFirst, {instance['ctx_a']} Then, {instance['ctx_b']} ...\n(a) {instance['endings'][0]}\n(b) {instance['endings'][1]}\n(c) {instance['endings'][2]}\n(d) {instance['endings'][3]}"
answers = instance['endings']
correct_answer = int(instance['label'])
return [[input, answers, correct_answer]]
# StoryCloze
## NB: requires downloading separately as it requires agreeing to a thing
# following ia3, we use the val as train and test as val.
@DatasetReader.register("story_cloze_reader")
class StoryClozeReader(HuggingfaceReaderRankClassification):
def __init__(
self,
split_mapping={"train": "validation", "validation": "test"},
**kwargs,
) -> None:
super().__init__(split_mapping=split_mapping, **kwargs)
def get_dataset_name(self) -> Tuple[str, Optional[str]]:
return "story_cloze", "2016"
# we usually dont need this, but story cloze requires it.
# TODO: replace with the location of downloaded story cloze data.
def get_dataset_dir(self) -> Optional[str]:
return ''
def hf_to_instance(self, instance) -> Tuple[str, str]:
input = f"{instance['input_sentence_1']} {instance['input_sentence_2']} {instance['input_sentence_3']} {instance['input_sentence_4']} What is a possible continuation for the story given the following options ?\n- {instance['sentence_quiz1']}\n- {instance['sentence_quiz2']}"
answers = [instance['sentence_quiz1'], instance['sentence_quiz2']]
# answers given are 1-indexed
correct_answer = instance['answer_right_ending'] - 1
return [[input, answers, correct_answer]]
# WinoGrande
@DatasetReader.register("winogrande_reader")
class WinoGrandeReader(HuggingfaceReaderRankClassification):
def get_dataset_name(self) -> Tuple[str, Optional[str]]:
return "winogrande", "winogrande_xl"
def hf_to_instance(self, instance) -> Tuple[str, str]:
# using underscore refer to
input = f"{instance['sentence']}\nWhat does the _ in the above sentence refer to? {instance['option1']} or {instance['option2']}?"
# 0 = false, 1 = true
answers = ['1', '2']
assert instance['answer'] in answers
correct_answer = answers.index(instance['answer'])
return [[input, answers, correct_answer]]
# WSC
@DatasetReader.register("wsc_reader")
class WSCReader(HuggingfaceReaderRankClassification):
def get_dataset_name(self) -> Tuple[str, Optional[str]]:
return "super_glue", "wsc"
def hf_to_instance(self, instance) -> Tuple[str, str]:
# using gpt3 style
input = f"Passage: {instance['text']} \nQuestion: In the passage above, does the pronoun \"{instance['span2_text']}\" refer to {instance['span1_text']}?\nAnswer:"
# 0 = false, 1 = true
answers = ['False', 'True']
correct_answer = int(instance["label"])
return [[input, answers, correct_answer]]
# COPA
@DatasetReader.register("copa_reader")
class COPAReader(HuggingfaceReaderRankClassification):
def get_dataset_name(self) -> Tuple[str, Optional[str]]:
return "super_glue", "copa"
def hf_to_instance(self, instance) -> Tuple[str, str]:
# using using 'plausible alternatives' prompt
input = f"{instance['premise']} As a consequence... \nHelp me pick the more plausible option:\n- {instance['choice1']}\n- {instance['choice2']}"
# 0 = choice1, 1 = choice2
answers = [instance['choice1'], instance['choice2']]
correct_answer = int(instance["label"])
return [[input, answers, correct_answer]]
# WiC
@DatasetReader.register("wic_reader")
class WiCReader(HuggingfaceReaderRankClassification):
def get_dataset_name(self) -> Tuple[str, Optional[str]]:
return "super_glue", "wic"
def hf_to_instance(self, instance) -> Tuple[str, str]:
# using gpt-3 style prompt
input = f"{instance['sentence1']}\n{instance['sentence2']}\nQuestion: Is the word '{instance['word']}' used in the same sense in the two sentences above? Yes, No?"
# 0 = false, 1 = true
answers = ["no", "yes"]
correct_answer = int(instance["label"])
return [[input, answers, correct_answer]]
## anli is handled specially as its splits have weird names.
# ANLI R1
@DatasetReader.register("anli_r1_reader")
class ANLIR1Reader(HuggingfaceReaderRankClassification):
def __init__(
self,
split_mapping={"train": "train_r1", "validation": "dev_r1"},
**kwargs,
) -> None:
super().__init__(split_mapping=split_mapping, **kwargs)
def get_dataset_name(self) -> Tuple[str, Optional[str]]:
return "anli", None
def hf_to_instance(self, instance) -> Tuple[str, str]:
# using gpt-3 style prompt
input = f"{instance['premise']}\nQuestion: {instance['hypothesis']} True, False, or Neither?"
# 0 = entail, 1 = neutral, 2 = contradiction
answers = ["true", "neither", 'false']
correct_answer = instance['label']
return [[input, answers, correct_answer]]
# the other anlis are identical, just diff splits :)
# ANLI R2
@DatasetReader.register("anli_r2_reader")
class ANLIR2Reader(ANLIR1Reader):
def __init__(
self,
**kwargs,
) -> None:
super().__init__(
split_mapping={"train": "train_r2", "validation": "dev_r2"},
**kwargs,
)
# ANLI R3
@DatasetReader.register("anli_r3_reader")
class ANLIR3Reader(ANLIR1Reader):
def __init__(
self,
**kwargs,
) -> None:
super().__init__(
split_mapping={"train": "train_r3", "validation": "dev_r3"},
**kwargs,
)
# to generate files containing training data
# easy to repurpose to generate whatever you want.
if __name__ == '__main__':
import json
data_classes = [StoryClozeReader, RTEReader, CBReader, HellaSwagReader, COPAReader, WinoGrandeReader, WSCReader, WiCReader, ANLIR1Reader, ANLIR2Reader, ANLIR3Reader]
data_names = ['story_cloze', 'rte', 'cb', 'hellaswag', 'copa', 'winogrande', 'wsc', 'wic', 'anli_r1', 'anli_r2', 'anli_r3']
for cls, name in zip(data_classes, data_names):
print(name)
reader = cls(
model_name="google/t5-large-lm-adapt",
max_query_length=512,
split_name='train',
val_size=1e100,
use_val_split=False
)
lines = []
for sample in reader.read('dummy'):
lines.append(json.dumps({
"prompt_and_input": sample['prompt_and_input_pretokenized'],
"answer_options": sample['answer_options_pretokenized'],
'correct_answer_index': sample['correct_answer_index_value']
}))
with open(f'retrieve_data/{name}_val_data.jsonl', 'w') as f:
f.write('\n'.join(lines))
| data-efficient-finetuning-main | attribution/huggingface_readers.py |
data-efficient-finetuning-main | attribution/__init__.py |
|
from typing import Any, Dict, List
from overrides import overrides
import logging
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
import torch
from torch.nn import CrossEntropyLoss
from allennlp.nn import util
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.models import Model
from allennlp.training.metrics import Average, FBetaMeasure
logger = logging.getLogger(__name__)
@Model.register("seq2seq")
class BasicSeq2Seq(Model):
def __init__(
self,
vocab: Vocabulary,
model_name: str = 'google/t5-xl-lm-adapt',
relevant_label_index: int=0,
gradient_checkpointing: bool=False,
fake_training: bool = False,
checkpoint_for_initialization: str = None,
weights_file : str = None,
**kwargs
):
super().__init__(vocab, **kwargs)
self.transformer = AutoModelForSeq2SeqLM.from_pretrained(model_name)
if checkpoint_for_initialization:
logger.info(f"Loading weights from checkpoint: {checkpoint_for_initialization}")
self.load_state_dict(torch.load(checkpoint_for_initialization))
if gradient_checkpointing:
self.transformer.gradient_checkpointing_enable()
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
self._accuracy = Average()
# used for LexGLUE tasks
self._micro = FBetaMeasure(average="micro")
self._macro = FBetaMeasure(average="macro")
# We use this to compute precision and recall. If not set, precision and recall will be 0.
self._relevant_label_index = relevant_label_index
self._precision = Average()
self._recall = Average()
self._fake_training = fake_training
self.loss_fct = CrossEntropyLoss(ignore_index=-100, reduction="none") # match hf t5
if self._fake_training:
logger.info("Faking training. This will only dump the pretrained transformer into a model archive.")
if weights_file is not None:
with open(weights_file, 'rb') as f:
self.load_state_dict(torch.load(f))
def forward(
self,
prompt_and_input: TextFieldTensors,
answer_options: TextFieldTensors = None,
correct_answer_index: torch.Tensor = None,
metadata: Dict[str, Any] = None,
) -> Dict[str, torch.Tensor]:
input_ids = util.get_token_ids_from_text_field_tensors(prompt_and_input)
attention_mask = util.get_text_field_mask(prompt_and_input)
# (batch_size, num_options, answer_length)
answer_option_ids = util.get_token_ids_from_text_field_tensors(answer_options)
answer_option_ids[answer_option_ids == 0] = -100
# (batch_size, answer_length)
correct_answer_ids = answer_option_ids[
torch.arange(answer_option_ids.shape[0]),
correct_answer_index.squeeze()
]
output = self.transformer(
input_ids=input_ids,
attention_mask=attention_mask,
labels=correct_answer_ids,
use_cache=False,
return_dict=True,
)
loss = output['loss']
if self._fake_training:
loss = loss * 0.0
output_dict = {'loss': loss, 'response': []}
if not self.training:
batch_size, num_options, _ = answer_option_ids.shape
for i in range(batch_size):
# setup - we pass through all options as a batch for minor speedup
instance_input_ids = input_ids[i:i+1]
instance_input_ids = instance_input_ids.repeat(num_options, 1)
instance_attention_mask = attention_mask[i:i+1]
instance_attention_mask = instance_attention_mask.repeat(num_options, 1)
correct_option_id = correct_answer_index[i].detach().cpu()[0]
option_ids = answer_option_ids[i:i+1].squeeze(0)
# pass through
option_output = self.transformer(
input_ids=instance_input_ids,
attention_mask=instance_attention_mask,
labels=option_ids,
use_cache=False,
return_dict=True,
)
logits = option_output["logits"].detach()
losses = self.loss_fct(logits.permute([0, 2, 1]), option_ids)
losses = losses.sum(dim=-1) #/ (losses != 0).sum(dim=-1)
min_loss = None
best_option_id = 0
for j, option_loss in enumerate(losses):
if min_loss is None or min_loss > option_loss:
min_loss = option_loss
best_option_id = j
self._accuracy(correct_option_id == best_option_id)
# None since we need a batch_size dim.
option_losses = -losses[None, ].detach().cpu()
self._micro(option_losses, torch.tensor([correct_option_id]))
self._macro(option_losses, torch.tensor([correct_option_id]))
if best_option_id == self._relevant_label_index:
self._precision(correct_option_id == best_option_id)
if correct_option_id == self._relevant_label_index:
self._recall(correct_option_id == best_option_id)
output_dict['response'].append(best_option_id)
return output_dict
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
metrics_dict = {
"accuracy": self._accuracy.get_metric(reset),
"precision": self._precision.get_metric(reset),
"recall": self._recall.get_metric(reset),
}
# Without this check, when called before any evaluation, causes error
if self._macro._true_positive_sum is not None:
metrics_dict.update(
{
"macro_f1": self._macro.get_metric(reset)["fscore"],
"micro_f1": self._micro.get_metric(reset)["fscore"],
}
)
return metrics_dict
# a regular model, but we load the underlying model from a .th file.
# useful for training on top of other trained models.
@Model.register("load_seq2seq")
class LoadBasicSeq2Seq(BasicSeq2Seq):
def __init__(
self,
load_from_file:str = None,
**kwargs
):
super().__init__(**kwargs)
if load_from_file is not None:
with open(load_from_file, 'rb') as f:
self.load_state_dict(torch.load(f)) | data-efficient-finetuning-main | attribution/model.py |
"""
Adapted from t-few repo:
https://github.com/r-three/t-few/blob/master/src/models/lora.py
"""
import re
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import PreTrainedModel
from allennlp.models import Model
from attribution.model import BasicSeq2Seq
logger = logging.getLogger(__name__)
class IA3Linear(nn.Module):
def __init__(self, linear_layer: nn.Linear):
super().__init__()
self.in_features = linear_layer.in_features
self.out_features = linear_layer.out_features
self.weight = linear_layer.weight
self.bias = linear_layer.bias
self.ia3_vector = nn.Parameter(
torch.ones(linear_layer.out_features, 1)
)
def forward(self, input):
return F.linear(input, self.weight, self.bias) * self.ia3_vector.flatten()
def extra_repr(self):
return "in_features={}, out_features={}, bias={}".format(
self.in_features, self.out_features, self.bias is not None,
)
def modify_with_ia3(transformer: PreTrainedModel, lora_modules: str, lora_layers: str):
for m_name, module in dict(transformer.named_modules()).items():
if re.fullmatch(lora_modules, m_name):
for c_name, layer in dict(module.named_children()).items():
if re.fullmatch(lora_layers, c_name):
assert isinstance(
layer, nn.Linear
), f"IA3 can only be applied to torch.nn.Linear, but {layer} is {type(layer)}."
setattr(
module,
c_name,
IA3Linear(layer),
)
return transformer
@Model.register("ia3_seq2seq")
class IA3BasicSeq2Seq(BasicSeq2Seq):
def __init__(
self,
**kwargs
):
super().__init__(**kwargs)
# regex from https://github.com/r-three/t-few/blob/master/configs/ia3.json
self.transformer = modify_with_ia3(
self.transformer,
".*SelfAttention|.*EncDecAttention|.*DenseReluDense",
"k|v|wi_1.*"
)
# only train ia3 parameters
for name, param in self.transformer.named_parameters():
if "ia3" in name:
param.requires_grad = True
else:
param.requires_grad = False
# a ia3 model, but we load the underlying model from a .th file.
@Model.register("ia3_seq2seq_load")
class IA3BasicSeq2Seq(BasicSeq2Seq):
def __init__(
self,
load_from_file:str = None,
**kwargs
):
super().__init__(**kwargs)
if load_from_file is not None:
with open(load_from_file, 'rb') as f:
self.load_state_dict(torch.load(f))
# regex from https://github.com/r-three/t-few/blob/master/configs/ia3.json
self.transformer = modify_with_ia3(
self.transformer,
".*SelfAttention|.*EncDecAttention|.*DenseReluDense",
"k|v|wi_1.*"
)
# only train ia3 parameters
for name, param in self.transformer.named_parameters():
if "ia3" in name:
param.requires_grad = True
else:
param.requires_grad = False | data-efficient-finetuning-main | attribution/ia3.py |
from typing import Dict, List
from overrides import overrides
import logging
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
import torch
from allennlp.nn import util
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.models import Model
from allennlp.training.metrics import Average
logger = logging.getLogger(__name__)
# just for ni eval
@Model.register("ni_model")
class DropModel(Model):
def __init__(
self,
vocab: Vocabulary,
model_name: str = "google/t5-small-lm-adapt",
max_length: int = 128,
fake_training: bool = False,
**kwargs
):
super().__init__(vocab, **kwargs)
self.transformer = AutoModelForSeq2SeqLM.from_pretrained(model_name)
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
self.max_len = max_length
self._accuracy = Average()
self._fake_training = fake_training
self._use_drop_metrics = True
if self._fake_training:
logger.info(
"Faking training. This will only dump the pretrained transformer into a model archive."
)
def forward(
self,
prompt_and_input: TextFieldTensors,
instance_id: List[str] = None,
target: TextFieldTensors = None,
) -> Dict[str, torch.Tensor]:
input_ids = util.get_token_ids_from_text_field_tensors(prompt_and_input)
attention_mask = util.get_text_field_mask(prompt_and_input)
target_ids = util.get_token_ids_from_text_field_tensors(target)
answer_option_ids = target_ids
answer_option_ids[answer_option_ids == 0] = -100
output = self.transformer(
input_ids=input_ids,
attention_mask=attention_mask,
labels=answer_option_ids,
use_cache=False,
return_dict=True,
)
loss = output["loss"]
if self._fake_training:
loss = loss * 0.0
output_dict = {"loss": loss}
if not self.training:
assert len(instance_id) == 1, "only bsz 1 is supported rn."
outputs = self.transformer.generate(
input_ids=input_ids,
attention_mask=attention_mask,
do_sample=False,
max_length=self.max_len,
)
if not self._use_drop_metrics:
self._accuracy(target_ids == outputs)
else:
output_dict["prediction"] = [
self.tokenizer.decode(o, skip_special_tokens=True) for o in outputs
][0]
output_dict["id"] = instance_id[0]
return output_dict
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {"accuracy": self._accuracy.get_metric(reset)}
| data-efficient-finetuning-main | attribution/ni_model.py |
'''
Mix and Match Adapter using recommended settings from https://arxiv.org/abs/2110.04366
'''
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers.models.t5.modeling_t5 import (
T5LayerCrossAttention,
T5LayerSelfAttention,
T5Block
)
from transformers import PreTrainedModel
from allennlp.models import Model
from attribution.model import BasicSeq2Seq
logger = logging.getLogger(__name__)
class Adapter(nn.Module):
def __init__(self, adapter_size, hidden_size):
super().__init__()
self.adapter_input_size = hidden_size
self.adapter_latent_size = adapter_size
self.non_linearity = nn.ReLU()
# down projection
self.down_proj = nn.Linear(self.adapter_input_size, self.adapter_latent_size, bias=False)
# up projection
self.up_proj = nn.Linear(self.adapter_latent_size, self.adapter_input_size, bias=False)
# layer norm
self.ln = nn.LayerNorm(self.adapter_input_size)
self.init_weights()
def init_weights(self):
"""Initialize the weights -> so that initially we the whole Adapter layer is a near-identity function"""
self.down_proj.weight.data.normal_(mean=0.0, std=0.02)
self.up_proj.weight.data.normal_(mean=0.0, std=0.02)
def forward(self, x):
return self.ln(self.up_proj(self.non_linearity(self.down_proj(x))))
# adapter modifies the feedforward layer
class T5LayerFFWithAdapter(nn.Module):
def __init__(self, T5LayerFF, adapter_size, hidden_size):
super().__init__()
self.DenseReluDense = T5LayerFF.DenseReluDense
self.adapter = Adapter(adapter_size, hidden_size)
self.layer_norm = T5LayerFF.layer_norm
self.dropout = T5LayerFF.dropout
def forward(self, hidden_states):
ln_hidden_states = self.layer_norm(hidden_states)
forwarded_states = self.DenseReluDense(ln_hidden_states)
hidden_states = hidden_states + self.dropout(forwarded_states) + self.adapter(ln_hidden_states)
return hidden_states
# prefixes modify the attention layers.
class T5AttentionPrefixTuning(nn.Module):
def __init__(self, attention_layer, num_prefix_tokens, parameterization, shared=None):
super().__init__()
self.is_decoder = attention_layer.is_decoder
self.has_relative_attention_bias = attention_layer.has_relative_attention_bias
self.relative_attention_num_buckets = attention_layer.relative_attention_num_buckets
self.d_model = attention_layer.d_model
self.key_value_proj_dim = attention_layer.key_value_proj_dim
self.n_heads = attention_layer.n_heads
self.dropout = attention_layer.dropout
self.inner_dim = self.n_heads * self.key_value_proj_dim
self.prune_heads = attention_layer.prune_heads
self._relative_position_bucket = attention_layer._relative_position_bucket
self.compute_bias = attention_layer.compute_bias
self.q = attention_layer.q
self.k = attention_layer.k
self.v = attention_layer.v
self.o = attention_layer.o
if self.has_relative_attention_bias:
self.relative_attention_bias = attention_layer.relative_attention_bias
self.pruned_heads = attention_layer.pruned_heads
self.gradient_checkpointing = attention_layer.gradient_checkpointing
self.parameterization = parameterization
self.num_prefix_tokens = num_prefix_tokens
self.mode = "apply"
self.setup_prefix(shared)
def forward(
self,
hidden_states,
mask=None,
key_value_states=None,
position_bias=None,
past_key_value=None,
layer_head_mask=None,
query_length=None,
use_cache=False,
output_attentions=False,
):
"""
Modified from T5Attention forward
Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
pask_key_value, query_length, use_cache disabled
"""
assert past_key_value is None
assert query_length is None
assert not use_cache
# Input is (batch_size, seq_length, dim)
# Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length)
# past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head)
batch_size, seq_length = hidden_states.shape[:2]
key_length = seq_length if key_value_states is None else key_value_states.shape[1]
def shape(states):
"""projection"""
return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
def unshape(states):
"""reshape"""
return states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim)
def project(hidden_states, proj_layer, key_value_states, prefix_states):
"""projects hidden states correctly to key/query states"""
if key_value_states is None:
# self-attn
output_states = proj_layer(hidden_states)
else:
# cross-attn
output_states = proj_layer(key_value_states)
if prefix_states is not None:
output_states = torch.cat([prefix_states, output_states], dim=1)
return output_states
# get query states
query_states = shape(self.q(hidden_states)) # (batch_size, n_heads, seq_length, dim_per_head)
# get key/value states
if self.mode == "apply":
prefix = self.get_prefix(batch_size)
else:
prefix = (None, None)
key_states = project(hidden_states, self.k, key_value_states, prefix[0])
value_states = project(hidden_states, self.v, key_value_states, prefix[1])
if self.mode == "store":
self.stored_key_value_states = (key_states, value_states)
key_states, value_states = shape(key_states), shape(value_states)
# compute scores
scores = torch.matmul(
query_states, key_states.transpose(3, 2)
) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9
if position_bias is None:
if not self.has_relative_attention_bias:
position_bias = torch.zeros(
(1, self.n_heads, seq_length, key_length), device=scores.device, dtype=scores.dtype
)
# rather than use the relative attention bias, we instead append 0 as the bias to
# prevent the model struggling to make use of the prefixes.
if self.gradient_checkpointing and self.training:
position_bias.requires_grad = True
else:
if self.mode == "apply":
position_bias = self.compute_bias(seq_length, key_length)[
:, :, -seq_length:, :
]
else:
position_bias = self.compute_bias(seq_length, key_length)
if prefix[0] is not None:
position_bias = torch.cat([
torch.zeros((1, self.n_heads, seq_length, prefix[0].size(1)), device=scores.device, dtype=scores.dtype), position_bias
], dim=-1)
if mask is not None:
if self.mode == "apply":
mask = F.pad(mask, value=-0.0, pad=(self.num_prefix_tokens, 0))
position_bias = position_bias + mask # (batch_size, n_heads, seq_length, key_length)
scores += position_bias
attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(
scores
) # (batch_size, n_heads, seq_length, key_length)
attn_weights = nn.functional.dropout(
attn_weights, p=self.dropout, training=self.training
) # (batch_size, n_heads, seq_length, key_length)
# Mask heads if we want to
if layer_head_mask is not None:
attn_weights = attn_weights * layer_head_mask
attn_output = unshape(torch.matmul(attn_weights, value_states)) # (batch_size, seq_length, dim)
attn_output = self.o(attn_output)
outputs = (attn_output,) + (None,) + (position_bias,)
if output_attentions:
outputs = outputs + (attn_weights,)
return outputs
def setup_prefix(self, shared):
self.prefix_emb = shared["prefix_emb"]
self.prefix_mlp = nn.Sequential(
shared["prefix_linear"],
nn.Tanh(),
nn.Linear(shared["prefix_linear"].out_features, self.inner_dim * 2),
)
def get_prefix(self, bs):
prefix = self.prefix_mlp(self.prefix_emb.weight)
batch_prefix = prefix.unsqueeze(dim=0).expand(bs, -1, -1)
key_prefix, value_prefix = batch_prefix.chunk(dim=-1, chunks=2)
return key_prefix, value_prefix
def set_mode(self, mode):
self.mode = mode
if self.mode == "store":
self.stored_key_value_states = None
def modify_with_mam(transformer: PreTrainedModel, adapter_size: int, num_prefix_tokens: int, prefix_reparam_hidden_size: int):
# prefix setup
hidden_size = prefix_reparam_hidden_size
num_prefix_tokens = num_prefix_tokens
adapter_size = adapter_size
shared = {
"prefix_emb": nn.Embedding(num_prefix_tokens, transformer.config.d_model),
"prefix_linear": nn.Linear(transformer.config.d_model, hidden_size),
}
# attention modules become prefix, ff become adapter
for _, module in dict(transformer.named_modules()).items():
if isinstance(module, T5LayerCrossAttention):
module.EncDecAttention = T5AttentionPrefixTuning(module.EncDecAttention, num_prefix_tokens, hidden_size, shared)
elif isinstance(module, T5LayerSelfAttention):
module.SelfAttention = T5AttentionPrefixTuning(module.SelfAttention, num_prefix_tokens, hidden_size, shared)
if isinstance(module, T5Block):
module.layer[-1] = T5LayerFFWithAdapter(module.layer[-1], adapter_size, transformer.config.d_model)
return transformer
# MaM adapter model
# defaults from "toward unified view of parameter efficient learning"
# https://arxiv.org/abs/2110.04366
@Model.register("mam_seq2seq")
class MaMSeq2Seq(BasicSeq2Seq):
def __init__(
self,
adapter_size: int = 512,
num_prefix_tokens: int = 30,
prefix_reparam_hidden_size: int = 512,
**kwargs
):
super().__init__(**kwargs)
self.transformer = modify_with_mam(
self.transformer,
adapter_size,
num_prefix_tokens,
prefix_reparam_hidden_size
)
# only train lora parameters
for name, param in self.transformer.named_parameters():
# 'prefix_' to get the prefix reparameterisation params
if "adapter" in name or 'prefix_' in name or 'layer_norm' in name:
param.requires_grad = True
else:
param.requires_grad = False
| data-efficient-finetuning-main | attribution/mam.py |
from gzip import READ
import json
import logging
import random
from collections import defaultdict
from typing import List, Iterable, Optional, Tuple, Dict
from overrides import overrides
import datasets
from allennlp.data.fields import (
MetadataField,
TextField,
IndexField,
ListField,
)
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import PretrainedTransformerIndexer
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
from attribution.huggingface_readers import *
logger = logging.getLogger(__name__)
READER_MAPPING = {
"rte": RTEReader,
"anli_r1": ANLIR1Reader,
"anli_r2": ANLIR2Reader,
"anli_r3": ANLIR3Reader,
"cb": CBReader,
"hellaswag": HellaSwagReader,
"story_cloze": StoryClozeReader,
"winogrande": WinoGrandeReader,
"wsc": WSCReader,
"copa": COPAReader,
"wic": WiCReader
}
@DatasetReader.register("icl_reader")
class ICLReader(DatasetReader):
def __init__(
self,
reader_class_name: str = 'rte',
model_name='google/t5-base-lm-adapt',
retrieve_file='dummy',
split_name='train',
max_query_length=1024,
use_val_split=False,
val_size=1000,
**kwargs,
):
super().__init__(
manual_distributed_sharding=True,
manual_multiprocess_sharding=True,
**kwargs,
)
reader_class = READER_MAPPING[reader_class_name]
if 'split_name' in kwargs:
kwargs.pop('split_name')
self.instance_reader = reader_class(model_name=model_name, max_query_length=max_query_length, split_name='validation', use_val_split=False, return_original_instance=True, **kwargs)
self._tokenizer = PretrainedTransformerTokenizer(model_name)
self._train_reader = reader_class(model_name=model_name, split_name='train', use_val_split=False, return_original_instance=True, **kwargs)
self.retrieve_file = retrieve_file
self.retrieve_iterator = self._train_reader.read(self.retrieve_file)
self.random = random.Random(42)
self.max_query_length = max_query_length
self._token_indexers = {"tokens": PretrainedTransformerIndexer(model_name)}
self._stats = defaultdict(int)
@overrides
def _read(self, file_path) -> Iterable[Instance]:
for instance in self.instance_reader.read(file_path):
instance = instance.fields
text_input = instance['pretokenized_input'] + '\nAnswer:'
self._stats['counter'] += 1
while True:
try:
sample_instance = next(self.retrieve_iterator)
except StopIteration:
self.retrieve_iterator = self._train_reader.read(self.retrieve_file)
sample_instance = next(self.retrieve_iterator)
sample_instance = sample_instance.fields
icl_sample = sample_instance['pretokenized_input'] + '\n Answer:' + sample_instance['answer_options_pretokenized'][sample_instance['correct_answer_index_value']]
if len(self._tokenizer.tokenize(icl_sample + '\n' + text_input)) < self.max_query_length:
text_input = icl_sample + '\n' + text_input
self._stats['num_examples'] += 1
else:
break
self._stats['avg_examples_per_instance'] = self._stats['num_examples'] / self._stats['counter']
# write our augmented input back
fields = {}
fields['prompt_and_input'] = TextField(self._tokenizer.tokenize(text_input))
fields['answer_options'] = instance['answer_options']
fields['correct_answer_index'] = instance['correct_answer_index']
yield Instance(fields)
logger.info("Dataset stats:")
for key, value in self._stats.items():
logger.info("%s: %d", key, value)
@overrides
def apply_token_indexers(self, instance: Instance) -> None:
instance.fields["prompt_and_input"].token_indexers = self._token_indexers
for field in instance.fields["answer_options"].field_list:
field.token_indexers = self._token_indexers
| data-efficient-finetuning-main | attribution/icl_readers.py |
from typing import Dict, List
from overrides import overrides
import logging
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
import torch
from allennlp.nn import util
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.models import Model
from allennlp.training.metrics import Average
logger = logging.getLogger(__name__)
# just for drop eval
@Model.register("drop_model")
class DropModel(Model):
def __init__(
self,
vocab: Vocabulary,
model_name: str = "google/t5-small-lm-adapt",
max_length: int = 128,
fake_training: bool = False,
**kwargs
):
super().__init__(vocab, **kwargs)
self.transformer = AutoModelForSeq2SeqLM.from_pretrained(model_name)
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
self.max_len = max_length
self._accuracy = Average()
self._fake_training = fake_training
self._use_drop_metrics = True
if self._fake_training:
logger.info(
"Faking training. This will only dump the pretrained transformer into a model archive."
)
def forward(
self,
prompt_and_input: TextFieldTensors,
query_id: List[str] = None,
target: TextFieldTensors = None,
) -> Dict[str, torch.Tensor]:
input_ids = util.get_token_ids_from_text_field_tensors(prompt_and_input)
attention_mask = util.get_text_field_mask(prompt_and_input)
target_ids = util.get_token_ids_from_text_field_tensors(target)
answer_option_ids = target_ids
answer_option_ids[answer_option_ids == 0] = -100
output = self.transformer(
input_ids=input_ids,
attention_mask=attention_mask,
labels=answer_option_ids,
use_cache=False,
return_dict=True,
)
loss = output["loss"]
if self._fake_training:
loss = loss * 0.0
output_dict = {"loss": loss}
if not self.training:
outputs = self.transformer.generate(
input_ids=input_ids,
attention_mask=attention_mask,
do_sample=False,
max_length=self.max_len,
)
if not self._use_drop_metrics:
self._accuracy(target_ids == outputs)
else:
output_dict["answer"] = [
self.tokenizer.decode(o, skip_special_tokens=True) for o in outputs
]
output_dict["query_id"] = query_id
return output_dict
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {"accuracy": self._accuracy.get_metric(reset)}
| data-efficient-finetuning-main | attribution/drop_model.py |
import logging
from collections import defaultdict
from typing import Iterable, Optional, Tuple
from overrides import overrides
import datasets
from allennlp.data.fields import (
MetadataField,
TextField,
)
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import PretrainedTransformerIndexer
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
from attribution.huggingface_readers import HuggingfaceReaderRankClassification
logger = logging.getLogger(__name__)
# full reader for drop to pass through query id
@DatasetReader.register("drop_reader")
class DROPReader(DatasetReader):
def __init__(
self,
model_name: str = "google/t5-small-lm-adapt",
max_query_length: int = 512,
split_name: str = "train",
val_size: int = 1000,
return_original_instance: bool = False,
**kwargs,
) -> None:
super().__init__(
manual_distributed_sharding=True,
manual_multiprocess_sharding=True,
**kwargs,
)
dataset_name, subset_name = self.get_dataset_name()
self._dataset_name = dataset_name
self._subset_name = subset_name
self.return_original_instance = return_original_instance
original_val_set = datasets.load_dataset(
dataset_name, subset_name, split="validation"
)
small_val_size = val_size # I consider under 2000 examples as small
val_split_size = val_size # when splitting out val, get 1000 examples
seed = 42
if split_name == "train":
if len(original_val_set) >= small_val_size:
self._dataset = datasets.load_dataset(
dataset_name, subset_name, split="train"
)
else:
# for small val sets, split out val from train and use old val as test
# this is because some casehold splits are specially designed, so I want
# to keep these as-is (rather than just split the val set in half)
self._dataset = datasets.load_dataset(
dataset_name, subset_name, split="train"
).train_test_split(test_size=val_split_size, seed=seed)["train"]
if split_name == "validation":
# for large val sets, just split out from val
if len(original_val_set) >= small_val_size:
self._dataset = original_val_set.train_test_split(
train_size=val_split_size, seed=seed
)["train"]
else:
# for small val sets, split out val from train and use old val as test
self._dataset = datasets.load_dataset(
dataset_name, subset_name, split="train"
).train_test_split(test_size=val_split_size, seed=seed)["test"]
elif split_name == "test":
# for large val sets, test is the small split from val
if len(original_val_set) >= small_val_size:
self._dataset = original_val_set.train_test_split(
train_size=val_split_size, seed=seed
)["test"]
else:
# for small val sets, split our new val from train (val becomes test)
self._dataset = datasets.load_dataset(
dataset_name, subset_name, split="validation"
)
self._transformer_model_name = model_name
self._tokenizer = PretrainedTransformerTokenizer(model_name)
self._token_indexers = {"tokens": PretrainedTransformerIndexer(model_name)}
self._max_query_length = max_query_length
self._stats = defaultdict(int)
def get_dataset_name(self) -> Tuple[str, Optional[str]]:
return "drop", None
def hf_to_instance(self, instance) -> Tuple[str, str]:
# using GPT-3 DROP prompt.
input = (
f"Passage: {instance['passage']}\nQuestion: {instance['question']}\nAnswer:"
)
answer = instance["answers_spans"]["spans"][0]
return [[input, instance["query_id"], answer]]
@overrides
def _read(self, file_path) -> Iterable[Instance]:
for sample in self._dataset:
converted_samples = self.hf_to_instance(sample)
for inputs, qid, targets in converted_samples:
yield self.text_to_instance(inputs, qid, targets)
def text_to_instance(
self,
input_text: str,
query_id: str,
target: str,
) -> Instance:
fields = {}
tokenized_input = self._tokenizer.tokenize(input_text)
if len(tokenized_input) > self._max_query_length:
self._stats["Truncated inputs"] += 1
tokenized_input = tokenized_input[: self._max_query_length]
input_field = TextField(tokenized_input)
fields["prompt_and_input"] = input_field
if self.return_original_instance:
fields["pretokenized_input"] = input_text
tokenized_target = self._tokenizer.tokenize(target)
if len(tokenized_target) > self._max_query_length:
self._stats["Truncated targets"] += 1
tokenized_target = tokenized_target[: self._max_query_length]
target_field = TextField(tokenized_target)
fields["target"] = target_field
query_id_field = MetadataField(query_id)
fields["query_id"] = query_id_field
return Instance(fields)
@overrides
def apply_token_indexers(self, instance: Instance) -> None:
instance.fields["prompt_and_input"].token_indexers = self._token_indexers
instance.fields["target"].token_indexers = self._token_indexers
# drop reader that aligns with the other formats for multitask
# NOTE: don't use this for eval!
@DatasetReader.register("multi_task_drop_reader")
class DropMReader(HuggingfaceReaderRankClassification):
def get_dataset_name(self) -> Tuple[str, Optional[str]]:
return "drop", None
def hf_to_instance(self, instance) -> Tuple[str, str]:
input = (
f"Passage: {instance['passage']}\nQuestion: {instance['question']}\nAnswer:"
)
answer = [instance["answers_spans"]["spans"][0]]
return [[input, answer, 0]]
| data-efficient-finetuning-main | attribution/drop_reader.py |
import json
import pickle
import logging
from collections import defaultdict
from typing import Any, Dict, List, Iterable
import random
from overrides import overrides
import torch
from allennlp.common.util import JsonDict
from allennlp.data.fields import (
MetadataField,
TextField,
IndexField,
ListField,
)
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import PretrainedTransformerIndexer
from allennlp.data.tokenizers import Token, PretrainedTransformerTokenizer
logger = logging.getLogger(__name__)
random.seed(23019)
@DatasetReader.register("qasper_evidence_prompt")
class QasperEvidencePromptReader(DatasetReader):
def __init__(
self,
model_name: str = "bigscience/T0_3B",
max_query_length: int = 512,
answer_options: List[str] = ["Yes", "No"],
negative_sample_ratio: float = 1.0,
return_original_query: bool = False,
max_train_samples: int = -1,
**kwargs,
) -> None:
super().__init__(
manual_distributed_sharding=True,
manual_multiprocess_sharding=True,
**kwargs,
)
self._return_original_query = return_original_query
self._transformer_model_name = model_name
self._tokenizer = PretrainedTransformerTokenizer(model_name)
self._token_indexers = {
"tokens": PretrainedTransformerIndexer(model_name)
}
self._max_query_length = max_query_length
self._answer_options = answer_options
self._negative_sample_ratio = negative_sample_ratio
self._stats = defaultdict(int)
self._max_train_samples = max_train_samples
@overrides
def _read(self, file_path: str) -> Iterable[Instance]:
logger.info("Reading the dataset")
counter = 0
with open(file_path, "r") as datafile:
data = json.load(datafile)
for article_id, article in self.shard_iterable(data.items()):
if not article["full_text"]:
continue
article["article_id"] = article_id
for sample in self._article_to_instances(article):
if self._max_train_samples > 0 and counter >= self._max_train_samples:
break
counter += 1
yield sample
logger.info("Dataset stats:")
for key, value in self._stats.items():
logger.info("%s: %d", key, value)
def _article_to_instances(self, article: Dict[str, Any]) -> Iterable[Instance]:
paragraphs = self._get_paragraphs_from_article(article)
self._stats["number of documents"] += 1
for question_answer in article["qas"]:
question = question_answer['question']
self._stats["number of questions"] += 1
self._stats["number of answers"] += len(question_answer["answers"])
if len(question_answer["answers"]) > 1:
self._stats["questions with multiple answers"] += 1
all_evidence = set()
for answer_annotation in question_answer["answers"]:
evidence = self._extract_evidence(
answer_annotation["answer"]
)
for span in evidence:
all_evidence.add(span)
evidence_mask = self._get_evidence_mask(list(all_evidence), paragraphs)
for paragraph_index, (paragraph, is_evidence) in enumerate(zip(paragraphs, evidence_mask)):
input_ = f"Question: {question} Paragraph: {paragraph} Is the answer to the question in the paragraph? Answer Yes or No."
target = "Yes" if is_evidence else "No"
if target == "Yes":
self._stats["number of positive targets"] += 1
elif random.random() <= self._negative_sample_ratio:
self._stats["number of negative targets"] += 1
else:
continue
metadata = {
"question_id": question_answer["question_id"],
"paper_id": article.get("article_id"),
"question": question,
"paragraph": paragraph,
"paragraph_index": paragraph_index,
"query": input_,
"target": target,
"answer_options": self._answer_options
}
yield self.text_to_instance(
input_,
target,
self._answer_options,
metadata
)
self._stats["number of instances"] += 1
def _get_paragraphs_from_article(self, article: JsonDict) -> List[str]:
full_text = article["full_text"]
paragraphs = []
for section_info in full_text:
# TODO (pradeep): It is possible there are other discrepancies between plain text, LaTeX and HTML.
# Do a thorough investigation and add tests.
if section_info["section_name"] is not None:
paragraphs.append(section_info["section_name"])
for paragraph in section_info["paragraphs"]:
paragraph_text = paragraph.replace("\n", " ").strip()
if paragraph_text:
paragraphs.append(paragraph_text)
return paragraphs
def _extract_evidence(
self, answer: List[JsonDict]
) -> List[str]:
evidence_spans = [x.replace("\n", " ").strip() for x in answer["evidence"]]
evidence_spans = [x for x in evidence_spans if x != ""]
if not evidence_spans:
self._stats["answers with no evidence"] += 1
# TODO (pradeep): Deal with figures and tables.
if any(["FLOAT SELECTED" in span for span in evidence_spans]):
# Ignoring question if any of the selected evidence is a table or a figure.
self._stats["answers with table or figure as evidence"] += 1
if len(evidence_spans) > 1:
self._stats["multiple_evidence_spans_count"] += 1
return evidence_spans
@staticmethod
def _get_evidence_mask(evidence: List[str], paragraphs: List[str]) -> List[bool]:
evidence_mask = []
for paragraph in paragraphs:
for evidence_str in evidence:
if evidence_str in paragraph:
evidence_mask.append(True)
break
else:
evidence_mask.append(False)
return evidence_mask
def text_to_instance(
self, # type: ignore # pylint: disable=arguments-differ
input_text: str,
target: str,
options: List[str],
metadata: Dict[str, Any] = None,
) -> Instance:
fields = {}
tokenized_input = self._tokenizer.tokenize(input_text)
if len(tokenized_input) > self._max_query_length:
self._stats["Truncated inputs"] += 1
tokenized_input = tokenized_input[:self._max_query_length]
input_field = TextField(tokenized_input)
fields["prompt_and_input"] = input_field
if self._return_original_query:
fields['pretokenized_input'] = input_text
answer_option_fields = [
TextField(self._tokenizer.tokenize(option)) for option in options
]
options_list_field = ListField(answer_option_fields)
fields["answer_options"] = options_list_field
answer_index = None
for i, option in enumerate(options):
if option == target:
answer_index = i
break
fields["correct_answer_index"] = IndexField(answer_index, options_list_field)
if metadata is not None:
fields["metadata"] = MetadataField(metadata)
return Instance(fields)
@overrides
def apply_token_indexers(self, instance: Instance) -> None:
instance.fields["prompt_and_input"].token_indexers = self._token_indexers
for field in instance.fields["answer_options"].field_list:
field.token_indexers = self._token_indexers
| data-efficient-finetuning-main | attribution/qasper_reader.py |
import json
import pickle
import logging
from collections import defaultdict
from typing import Any, Dict, List, Iterable
from overrides import overrides
import torch
from allennlp.data.fields import (
MetadataField,
TextField,
IndexField,
ListField,
)
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import PretrainedTransformerIndexer
from allennlp.data.tokenizers import Token, PretrainedTransformerTokenizer
logger = logging.getLogger(__name__)
@DatasetReader.register("p3_cluster")
class P3ClusterReader(DatasetReader):
def __init__(
self,
p3_data_path: str,
split_name: str,
model_name: str = "google/t5-small-lm-adapt",
max_query_length: int = 512,
**kwargs,
) -> None:
super().__init__(
manual_distributed_sharding=True,
manual_multiprocess_sharding=True,
**kwargs,
)
self._p3_data = json.load(open(p3_data_path))
self._split_name = split_name
self._transformer_model_name = model_name
self._tokenizer = PretrainedTransformerTokenizer(model_name)
self._token_indexers = {
"tokens": PretrainedTransformerIndexer(model_name)
}
self._max_query_length = max_query_length
self._stats = None
@overrides
def _read(self, file_path: str) -> Iterable[Instance]:
self._stats = defaultdict(int)
logger.info("Reading the cluster file")
cluster_data = pickle.load(open(file_path, "rb"))
for dataset_name, cluster_info in cluster_data.items():
for instance_id in cluster_info[self._split_name]:
if dataset_name not in self._p3_data:
self._stats["Instances skipped due to missing dataset partitions"] += 1
continue
if str(instance_id) not in self._p3_data[dataset_name][self._split_name]:
self._stats["Instances skipped due to missing instance ids"] += 1
continue
instance_info = self._p3_data[dataset_name][self._split_name][str(instance_id)]
if len(instance_info["options"]) <= 1:
self._stats["Instances without multiple options"] += 1
elif len(instance_info["options"]) > 10:
self._stats["Instances with too many options"] += 1
elif instance_info["target"] not in instance_info["options"]:
self._stats["Target not in options"] += 1
elif not instance_info["is_correct"]:
self._stats["Instance has incorrect answer"] += 1
else:
yield self.text_to_instance(
instance_info["input"],
instance_info["target"],
instance_info["options"],
{"dataset_name": instance_info["dataset"], "index": instance_info["index"]}
)
print("Dataset stats:")
for key, value in self._stats.items():
print(f"\t{key}: {value}")
def text_to_instance(
self, # type: ignore # pylint: disable=arguments-differ
input_text: str,
target: str,
options: List[str],
metadata: Dict[str, Any] = None,
) -> Instance:
fields = {}
tokenized_input = self._tokenizer.tokenize(input_text)
if len(tokenized_input) > self._max_query_length:
self._stats["Truncated inputs"] += 1
tokenized_input = tokenized_input[:self._max_query_length]
input_field = TextField(tokenized_input)
fields["prompt_and_input"] = input_field
answer_option_fields = [
TextField(self._tokenizer.tokenize(option)) for option in options
]
options_list_field = ListField(answer_option_fields)
fields["answer_options"] = options_list_field
answer_index = None
for i, option in enumerate(options):
if option == target:
answer_index = i
break
fields["correct_answer_index"] = IndexField(answer_index, options_list_field)
if metadata is not None:
fields["metadata"] = MetadataField(metadata)
return Instance(fields)
@overrides
def apply_token_indexers(self, instance: Instance) -> None:
instance.fields["prompt_and_input"].token_indexers = self._token_indexers
for field in instance.fields["answer_options"].field_list:
field.token_indexers = self._token_indexers
| data-efficient-finetuning-main | attribution/p3_cluster_reader.py |
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
import json
from tqdm import tqdm
import random
random.seed(23103)
text_data = []
clusters_data = json.load(open("./t0_cluster_data.json"))
for cluster_id, cluster_data in clusters_data.items():
text = set([x["input"] for x in cluster_data])
text_data.extend([(x, cluster_id) for x in text])
vectorizer = TfidfVectorizer(decode_error='replace', strip_accents='unicode', analyzer='word', stop_words='english')
index = vectorizer.fit_transform([x[0] for x in text_data])
closest_in_cluster = []
indices_set = list(range(len(text_data)))
random.shuffle(indices_set)
eval_indices_set = indices_set[:10000]
for i in tqdm(eval_indices_set):
_, input_cluster = text_data[i]
query_vector = index[i]
similarities = cosine_similarity(index, query_vector).flatten()
# -2 because -1 will be the same as the query.
most_similar_index = np.argsort(similarities, axis=0)[-2]
closest_data_point, its_cluster_id = text_data[most_similar_index]
closest_in_cluster.append(input_cluster == its_cluster_id)
print(f"Closest in cluster: {sum(closest_in_cluster) / len(closest_in_cluster) * 100}%")
| data-efficient-finetuning-main | scripts/analyze_surface_similarity.py |
import json
import sys
from transformers import T5TokenizerFast
import re
from datasets import load_dataset
from sklearn.metrics import f1_score
from sklearn.preprocessing import MultiLabelBinarizer
file = sys.argv[1]
tokenizer = T5TokenizerFast.from_pretrained('t5-base')
mlb = MultiLabelBinarizer()
concept_dict = json.load(open("data/eurovoc_descriptors.json", "r"))
eurlex_val = load_dataset('lex_glue', 'eurlex', split='validation')
labels = eurlex_val.features["labels"].feature.names
label_to_code = {concept_dict[l]['en']: l for l in labels}
total_labs = len(labels)
question_pattern = re.compile(r"Does the following text involve ([\w\s\-\,]+)\? Answer yes or no. Text: (.*).")
with open(file, 'r') as f:
lines = f.readlines()
answers = []
last_text = 'im not in the first line'
cur_ans = []
cur_sample = -1
for i, line in enumerate(lines):
result = json.loads(line)
chosen_answer = tokenizer.decode(result['chosen_answer'][0], skip_special_tokens=True).strip().lower()
chosen_answer_question = tokenizer.decode(result['chosen_answer_question'][0], skip_special_tokens=True)
# extract the label we were asking about
match = re.match(question_pattern, chosen_answer_question)
term = match.group(1)
text = match.group(2)
if 'yes' in chosen_answer:
cur_ans.append(eurlex_val.features["labels"].feature.str2int(label_to_code[term]))
# sometimes truncation can differ
if last_text not in text and text not in last_text:
answers.append(cur_ans)
cur_ans = []
cur_sample += 1
#if text not in eurlex_val[cur_sample]['text'].replace('\n', ' '):
# import pdb; pdb.set_trace()
last_text = text
preds = answers
true = eurlex_val['labels']
unlabelled_label = max([x for t in true for x in t])
# unfair-tos eval we add an extra 'unlabelled' label
for p in preds:
if len(p) == 0:
p.append(unlabelled_label)
for p in true:
if len(p) == 0:
p.append(unlabelled_label)
if len(preds) < len(true):
print('huh?')
while len(preds) < len(true):
preds.append([])
mlb.fit(true + preds)
true = mlb.transform(true)
preds = mlb.transform(preds)
print('micro', f1_score(true, preds, average='micro'))
print('macro', f1_score(true, preds, average='macro'))
| data-efficient-finetuning-main | scripts/evaluate_eurlex_preds.py |
import tqdm
import sys
import json
import argparse
datasets = [
"rte",
"anli_r1",
"anli_r2",
"anli_r3",
"wic",
"copa",
"wsc",
"winogrande",
"hellaswag",
"cb",
"story_cloze",
"casehold",
"drop",
"qasper"
]
parser = argparse.ArgumentParser()
parser.add_argument("--infile_path", type=str, required=True, help="directory containing index files")
parser.add_argument("--outfile_path", type=str, required=True, help="directory to output files")
parser.add_argument("--suffix", type=str, required=True, help="what to name the files with form \{dataset\}_\{suffix\}")
parser.add_argument("--p3_data", type=str, required=True, help="file containing the p3 data the index files reference")
args = parser.parse_args()
infile_path = args.infile_path
outfile_path = args.outfile_path
suffix = args.suffix
p3_data = args.p3_data
infiles = [f'{infile_path}/{ds}_idxes.txt' for ds in datasets]
diff_indices = [set([int(i) for i in open(file, 'r')]) for file in infiles]
print('indices read')
outfiles = [f'{outfile_path}/{ds}_{suffix}.jsonl' for ds in datasets]
files = [open(o, "w") for o in outfiles]
for i, line in tqdm.tqdm(enumerate(open(p3_data, 'r'))):
for j, indices in enumerate(diff_indices):
if i in indices:
instance = json.loads(line)
instance["index_id"] = i
print(json.dumps(instance), file=files[j])
| data-efficient-finetuning-main | scripts/indices_to_file.py |
import torch
import json
import os
import pickle
from tqdm import tqdm
from collections import defaultdict
import numpy
from sklearn.decomposition import PCA
from sklearn import mixture
from sklearn.metrics.pairwise import cosine_distances
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
model_name = "google/t5-small-lm-adapt"
#model_name = "t5-small"
encoder_block_name = "encoder.block.7"
#encoder_block_name = "encoder.block.5"
max_num_weights = 2048
num_clusters = 15
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
model.cuda()
parameters_of_interest = []
for name, parameter in model.named_parameters():
if name.startswith(encoder_block_name) or name.startswith("encoder.final_layer"):
parameters_of_interest.append((name, parameter))
data = json.load(open("p3_data_simplified.json"))
#data = json.load(open("p3_data.json"))
cluster_dir = f"/home/pradeepd/data/p3_dev_{model_name.replace('/', '_')}_final_layer_gradient_clusters/"
if not os.path.exists(cluster_dir):
os.makedirs(cluster_dir)
print(f"Computing gradients on {model_name}, and will write clusters to {cluster_dir}")
print(f"Computing gradients only on the block {encoder_block_name} and the final layer norm weight")
print(f"Will keep track of only the {max_num_weights} max gradients")
instances = []
for dataset_info in data.values():
for value in dataset_info["validation"].values():
value["split"] = "validation"
instances.append(value)
for value in dataset_info["train"].values():
value["split"] = "train"
instances.append(value)
max_indices_file = os.path.join(cluster_dir, "max_indices.pkl")
if os.path.exists(max_indices_file):
print(f"Found max indices at {max_indices_file}")
max_indices = pickle.load(open(max_indices_file, "rb"))
else:
indices_counts = None
print("Computing gradients, first pass")
for instance in tqdm(instances):
inputs = tokenizer.encode(instance["input"], return_tensors="pt").cuda()
targets = tokenizer.encode(instance["target"], return_tensors="pt").cuda()
model_outputs = model(input_ids=inputs, labels=targets, return_dict=True)
loss = model_outputs['loss']
loss.backward(inputs=[p for n, p in parameters_of_interest])
gradients = torch.cat([p.grad.flatten() for _, p in parameters_of_interest]).detach().cpu().numpy()
if indices_counts is None:
indices_counts = numpy.zeros_like(gradients)
indices_counts[numpy.argsort(gradients)[-max_num_weights:]] += 1
model.zero_grad()
max_indices = numpy.argsort(indices_counts)[-max_num_weights:]
coverage = sum(indices_counts[max_indices]) / sum(indices_counts)
print(f"Coverage: {coverage}")
with open(max_indices_file, "wb") as outfile:
pickle.dump(max_indices, outfile)
max_gradients_file = os.path.join(cluster_dir, "all_max_gradients.pkl")
if os.path.exists(max_gradients_file):
print(f"Found max gradients at {max_gradients_file}")
all_max_gradients = pickle.load(open(max_gradients_file, "rb"))
else:
print("Computing gradients, second pass")
all_max_gradients = numpy.zeros((len(instances), max_num_weights))
i = 0
for instance in tqdm(instances):
inputs = tokenizer.encode(instance["input"], return_tensors="pt").cuda()
targets = tokenizer.encode(instance["target"], return_tensors="pt").cuda()
model_outputs = model(input_ids=inputs, labels=targets, return_dict=True)
loss = model_outputs['loss']
loss.backward(inputs=[p for n, p in parameters_of_interest])
gradients = torch.cat([p.grad.flatten() for _, p in parameters_of_interest]).detach().cpu().numpy()
all_max_gradients[i] = gradients[max_indices]
i += 1
model.zero_grad()
with open(max_gradients_file, "wb") as outfile:
pickle.dump(all_max_gradients, outfile)
print("Running PCA")
pca = PCA(n_components=50, random_state=0)
ld_indexed_data = pca.fit_transform(all_max_gradients)
print("Clustering")
gmm = mixture.GaussianMixture(
n_components=num_clusters,
covariance_type='full',
max_iter=150,
random_state=0
)
gmm = gmm.fit(ld_indexed_data)
cluster_labels = gmm.predict(ld_indexed_data)
cluster_distances = cosine_distances(gmm.means_)
cluster_counts = [0] * 15
for label in cluster_labels:
cluster_counts[label] += 1
print("Cluster counts:", cluster_counts)
cluster_index_map = defaultdict(lambda: defaultdict(lambda: {'train': [], 'validation': []}))
for cluster_label, instance in zip(cluster_labels, instances):
cluster_index_map[cluster_label][instance['dataset']][instance['split']].append(instance['index'])
for cluster_label, cluster_data in cluster_index_map.items():
with open(os.path.join(cluster_dir, f"cluster_{cluster_label}_indices.pkl"), "wb") as outfile:
pickle.dump(dict(cluster_data), outfile)
with open(os.path.join(cluster_dir, "cluster_distances.pkl"), "wb") as outfile:
pickle.dump(cluster_distances, outfile)
| data-efficient-finetuning-main | scripts/make_gradient_clusters.py |
import json
import os
import tqdm
import gzip
import argparse
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import faiss
import numpy
import torch
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, required=True)
parser.add_argument("--output_prefix", type=str, required=True)
parser.add_argument("--data_file", type=str, required=True)
parser.add_argument("--index_type", type=str, default="hnsw")
parser.add_argument("--max_batch_tokens", type=int, default=32000)
parser.add_argument("--add_interval", type=int, default=1000, help="Each index.add() will add add_interval points")
parser.add_argument("--write_interval", type=int, default=2000000, help="Each time after indexing roughly these many points the index will be written to disk")
parser.add_argument("--neighbors_per_node", type=int, default=512, help="HNSW parameter, default from DPR paper")
parser.add_argument("--construction_depth", type=int, default=200, help="HNSW parameter, default from DPR paper")
parser.add_argument("--search_depth", type=int, default=128, help="HNSW parameter, default from DPR paper")
parser.add_argument("--encoding_dim", type=int, default=512, help="Reduced dimensionality for OPQ")
parser.add_argument("--sq_train_size", type=int, default=1000000)
parser.add_argument("--device_ids", type=int, nargs="+")
args = parser.parse_args()
index_factory_string = f"OPQ8_{args.encoding_dim},HNSW{args.neighbors_per_node},PQ8"
index_prefix = f"p3_{args.model.replace('/', '-')}_{index_factory_string.replace(',', '-')}"
with open(os.path.join(args.output_prefix, f"{index_prefix}_hyperparameters.json"), "w") as outfile:
json.dump(
{
"neighbors_per_node": args.neighbors_per_node,
"construction_depth": args.construction_depth,
"search_depth": args.search_depth,
"encoding_dim": args.encoding_dim,
"sq_train_size": args.sq_train_size
}, outfile)
text_instances_file = args.data_file
assert os.path.exists(text_instances_file), "Text instances file does not exist!"
tokenizer = AutoTokenizer.from_pretrained(args.model)
model = AutoModelForSeq2SeqLM.from_pretrained(args.model)
model.eval()
if torch.cuda.is_available():
model.cuda(device=args.device_ids[0])
num_gpus_available = len(args.device_ids)
print(f"Using DataParallel for the encoder on {num_gpus_available} GPUs with ids {args.device_ids}")
encoder = torch.nn.DataParallel(model.encoder, device_ids=args.device_ids)
else:
encoder = model.encoder
if text_instances_file.endswith(".gz"):
instances_file_ptr = gzip.open(text_instances_file, "rt")
else:
instances_file_ptr = open(text_instances_file, "r")
def get_batches(num_instances_to_skip: int=0):
batch = []
num_batch_tokens = 0
max_num_batch_tokens = args.max_batch_tokens
num_batches_yielded = 0
num_instances_yielded = 0
num_truncated_instances = 0
num_instances_read = 0
started_batching = False
while True:
line = instances_file_ptr.readline()
if not line:
break
num_instances_read += 1
if num_instances_read <= num_instances_to_skip:
continue
if not started_batching:
print(f"Starting to batch instances from instance number: {num_instances_read}")
started_batching = True
instance = json.loads(line)
input_ = instance["input"]
tokens = tokenizer.tokenize(input_)
num_tokens = len(tokens)
if num_tokens > tokenizer.max_len_single_sentence:
num_truncated_instances += 1
if num_tokens + num_batch_tokens < max_num_batch_tokens:
batch.append(input_)
num_batch_tokens += num_tokens
else:
yield batch
num_instances_yielded += len(batch)
num_batches_yielded += 1
if num_batches_yielded % 10000 == 0:
print(f"Average batch size: {num_instances_yielded / num_batches_yielded}")
print(f"Truncated instances so far: {num_truncated_instances}")
batch = [input_]
num_batch_tokens = num_tokens
if batch:
yield batch
print(f"Average batch size: {num_instances_yielded / num_batches_yielded}")
print(f"Truncated instances so far: {num_truncated_instances}")
index_file = os.path.join(
args.output_prefix,
f"{index_prefix}.index"
)
index = None
last_written_index_size = 0
if os.path.exists(index_file):
print(f"Index file exists. Reading {index_file}")
index = faiss.read_index(index_file)
last_written_index_size = index.ntotal
print(f"Done reading index of size {last_written_index_size}")
else:
print(f"Will write index to {index_file}")
aggregated_encoded_batches = []
print("Computing representations and indexing them")
with torch.inference_mode():
for batch in tqdm.tqdm(get_batches(last_written_index_size)):
input_data = tokenizer.batch_encode_plus(batch,
return_tensors="pt",
padding=True,
truncation=True)
input_ids = input_data['input_ids']
# (batch_size, num_tokens)
mask = input_data['attention_mask']
if torch.cuda.is_available():
input_ids = input_ids.cuda(device=args.device_ids[0])
mask = mask.cuda(device=args.device_ids[0])
encoder_outputs = encoder(input_ids=input_ids,
attention_mask=mask,
return_dict=True)
# (batch_size, num_tokens, hidden_size)
hidden_states = encoder_outputs["last_hidden_state"]
# (batch_size, hidden_size)
pooled_hidden_states = (hidden_states * mask.unsqueeze(-1)).sum(1) / mask.sum(1).unsqueeze(-1)
pooled_hidden_states_np = pooled_hidden_states.detach().cpu().numpy()
aggregated_encoded_batches.append(pooled_hidden_states_np)
if index is None:
hidden_size = pooled_hidden_states_np.shape[1]
index = faiss.index_factory(hidden_size, index_factory_string)
# We cannot access the HNSW parameters directly. `index` is of type IndexPreTransform. We need to downcast
# the actual index to do this.
hnswpq_index = faiss.downcast_index(index.index)
hnswpq_index.hnsw.efConstruction = args.construction_depth
hnswpq_index.hnsw.efSearch = args.search_depth
if not index.is_trained and sum([x.shape[0] for x in aggregated_encoded_batches]) >= args.sq_train_size:
print("Training index")
data_to_train = numpy.concatenate(aggregated_encoded_batches)
index.train(data_to_train)
if index.is_trained and sum([x.shape[0] for x in aggregated_encoded_batches]) >= args.add_interval:
data_to_add = numpy.concatenate(aggregated_encoded_batches)
index.add(data_to_add)
print(f"Added {data_to_add.shape[0]} points to index")
aggregated_encoded_batches = []
index_size = index.ntotal
if index_size - last_written_index_size >= args.write_interval:
print(f"Writing index of size {index_size}")
faiss.write_index(index, index_file)
last_written_index_size = index_size
if aggregated_encoded_batches:
if not index.is_trained:
print("Training index")
data_to_train = numpy.concatenate(aggregated_encoded_batches)
index.train(data_to_train)
data_to_add = numpy.concatenate(aggregated_encoded_batches)
index.add(data_to_add)
print(f"Added {data_to_add.shape[0]} points to index")
faiss.write_index(index, index_file)
| data-efficient-finetuning-main | scripts/index_p3_train_reps.py |
import faiss
import argparse
import torch
import numpy
import json
import tqdm
from scipy.stats import entropy
from sklearn.cluster import kmeans_plusplus
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
numpy.random.seed(20389)
parser = argparse.ArgumentParser()
parser.add_argument("--training_data", type=str, required=True)
parser.add_argument("--index", type=str, required=True)
parser.add_argument("--model", type=str)
parser.add_argument("--encoding_batch_size", type=int)
parser.add_argument("--selected_training_data", type=str)
parser.add_argument("--num_shots", type=int, default=32)
parser.add_argument("--apply_opq", action="store_true")
args = parser.parse_args()
tokenizer = AutoTokenizer.from_pretrained(args.model)
model = AutoModelForSeq2SeqLM.from_pretrained(args.model)
assert torch.cuda.is_available()
cuda_devices = list(range(torch.cuda.device_count()))
print(f"Using CUDA devices {cuda_devices} for encoding training data")
model.cuda(device=cuda_devices[0])
model.eval()
encoder = torch.nn.DataParallel(model.encoder, device_ids=cuda_devices)
def encode_batch(batched_inputs):
input_data = tokenizer.batch_encode_plus(batched_inputs,
return_tensors="pt",
padding=True)
input_ids = input_data['input_ids']
# (batch_size, num_tokens)
mask = input_data['attention_mask']
if torch.cuda.is_available():
input_ids = input_ids.cuda(device=cuda_devices[0])
mask = mask.cuda(device=cuda_devices[0])
encoder_outputs = encoder(input_ids=input_ids,
attention_mask=mask,
return_dict=True)
# (batch_size, num_tokens, hidden_size)
hidden_states = encoder_outputs["last_hidden_state"]
# (batch_size, hidden_size)
pooled_hidden_states = (hidden_states * mask.unsqueeze(-1)).sum(1) / mask.sum(1).unsqueeze(-1)
return pooled_hidden_states.detach().cpu().numpy()
raw_training_data = []
encoded_training_data = []
with torch.inference_mode():
batch = []
for line in tqdm.tqdm(open(args.training_data)):
instance = json.loads(line)
raw_training_data.append(instance)
batch.append(instance["prompt_and_input"])
if len(batch) == args.encoding_batch_size:
encoded_batch = encode_batch(batch)
batch = []
encoded_training_data.append(encoded_batch)
if batch:
encoded_batch = encode_batch(batch)
encoded_training_data.append(encoded_batch)
training_data_matrix = numpy.concatenate(encoded_training_data)
if args.apply_opq:
print("Applying OPQ transform from the index")
print("Loading index..")
index = faiss.read_index(args.index)
print("Done loading index")
opq_matrix = faiss.downcast_VectorTransform(index.chain.at(0))
training_data_matrix = opq_matrix.apply(training_data_matrix)
_, coreset_indices = kmeans_plusplus(training_data_matrix, args.num_shots)
selected_shots = [raw_training_data[i] for i in coreset_indices]
with open(args.selected_training_data, "w") as outfile:
for instance in selected_shots:
print(json.dumps(instance), file=outfile) | data-efficient-finetuning-main | scripts/select_few_shots.py |
import json
import sys
stuff = {}
file = open(sys.argv[1], 'r')
for line in file:
sample = json.loads(line)
stuff[sample['query_id'][0]] = sample['answer'][0]
with open('drop_preds.json', 'w') as f:
json.dump(stuff, f)
| data-efficient-finetuning-main | scripts/convert_allennlp_pred_to_drop_eval_format.py |
"""
A script to construct balanced random p3 sets. We fully balance this and uniformly sample from the list of tasks,
then sample a random input from this task. Note that this does not fully match t0 training (which only lightly balances
dataset sizes).
"""
import torch
import json
import os
import pickle
from tqdm import tqdm
from collections import defaultdict
import numpy
from scipy.spatial.distance import mahalanobis
from sklearn import mixture
from sklearn.cluster import AgglomerativeClustering
from sklearn.decomposition import PCA
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import gzip
import random
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--p3_data_file", type=str, required=True)
parser.add_argument("--p3_attribution_file", type=str, required=True)
parser.add_argument("--p3_dataset_mapping_file", type=str, required=True)
parser.add_argument("--num_samples", default=10000)
parser.add_argument('--output_folder', default='retrieve/rand_balanced')
parser.add_argument("--seed", type=int, default=42)
args = parser.parse_args()
rand = random.Random(args.seed)
p3_data = gzip.open(args.p3_data_file, mode='r')
p3_attribution = gzip.open(args.p3_attribution_file, 'r')
mapping = json.load(open(args.p3_dataset_mapping_file, 'r'))
num_samples = []
dataset_to_idxs = defaultdict(list)
all_datasets = list(mapping.keys())
for idx, line in p3_attribution:
dataset_and_prompt, _, _ = line.strip().split('\t')
dataset_to_idxs[dataset_and_prompt].append(idx)
samples_to_idxes = defaultdict(set)
for num_sample in num_samples:
idxes = set()
for i in range(0, num_sample):
# randomly sample dataset
dataset = rand.choice(all_datasets)
# randomly sample prompt
dataset_and_prompt = rand.choice(mapping[dataset])
# randomly sample idx
instance_idx = rand.choice(dataset_to_idxs[dataset_and_prompt])
idxes.add(instance_idx)
samples_to_idxes[num_sample] = idxes
# dump everything out
outfiles = [open(f'{n}_rand.json' for n in num_samples)]
for idx, sample in enumerate(p3_data):
for j, n in enumerate(num_samples):
if idx in num_samples[n]:
instance = json.loads(sample)
instance["index_id"] = i
print(json.dumps(instance), file=outfiles[j])
| data-efficient-finetuning-main | scripts/construct_balanced_sample.py |
import argparse
import random
import faiss
import numpy
from sklearn.cluster import kmeans_plusplus
import torch
import json
import gzip
import tqdm
from collections import defaultdict
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from attribution.qasper_reader import QasperEvidencePromptReader
parser = argparse.ArgumentParser()
parser.add_argument("--dev_data", type=str)
parser.add_argument("--negative_sample_ratio", type=float, default=1.0)
parser.add_argument("--index", type=str)
parser.add_argument("--model", type=str)
parser.add_argument("--search_output", type=str, required=True)
parser.add_argument("--batch_size", type=int, default=4)
parser.add_argument("--num_neighbors_search", type=int, default=500)
parser.add_argument("--p3_data", type=str, help="If provided, will write training data to `training_data`")
parser.add_argument("--training_data", type=str)
parser.add_argument("--num_neighbors_write", type=int, default=500)
parser.add_argument("--write_positive_neighbors_only", action="store_true", help="If set, will write neighbors of positive dev instances alone")
parser.add_argument("--coreset_size", type=int, default=None, help="If set, will use KMeans++ to select these many diverse points")
parser.add_argument("--p3_dataset_indices", type=str, help="If provided, will compute P3 dataset stats")
parser.add_argument("--stats_log", type=str, help="File to write the dataset stats")
parser.add_argument("--cuda_devices", type=int, nargs="+")
parser.add_argument("--retrieval_set_size", type=int, default=1000)
args = parser.parse_args()
indices_frequencies = defaultdict(int)
index = None
if not os.path.exists(args.search_output):
assert args.dev_data is not None
assert args.index is not None
assert args.model is not None
reader = QasperEvidencePromptReader(model_name=args.model, negative_sample_ratio=args.negative_sample_ratio)
tokenizer = AutoTokenizer.from_pretrained(args.model)
model = AutoModelForSeq2SeqLM.from_pretrained(args.model)
cuda_devices = [0]
print(f"Using CUDA devices {cuda_devices}")
if torch.cuda.is_available():
model.cuda(device=cuda_devices[0])
model.eval()
encoder = torch.nn.DataParallel(model.encoder, device_ids=cuda_devices)
print('loading index... (make take some time)')
index = faiss.read_index(args.index)
print('loaded index!')
def query_index(queries):
input_data = tokenizer.batch_encode_plus(queries,
return_tensors="pt",
padding=True)
input_ids = input_data['input_ids']
# (batch_size, num_tokens)
mask = input_data['attention_mask']
if torch.cuda.is_available():
input_ids = input_ids.cuda(device=cuda_devices[0])
mask = mask.cuda(device=cuda_devices[0])
encoder_outputs = encoder(input_ids=input_ids,
attention_mask=mask,
return_dict=True)
# (batch_size, num_tokens, hidden_size)
hidden_states = encoder_outputs["last_hidden_state"]
# (batch_size, hidden_size)
pooled_hidden_states = (hidden_states * mask.unsqueeze(-1)).sum(1) / mask.sum(1).unsqueeze(-1)
pooled_hidden_states_np = pooled_hidden_states.detach().cpu().numpy()
return index.search(pooled_hidden_states_np, k=args.num_neighbors_search)
instances = [i for i in reader.read(args.dev_data)]
random.shuffle(instances)
instances = instances[:args.retrieval_set_size]
outputfile = open(args.search_output, "w")
batch = []
with torch.inference_mode():
for instance in tqdm.tqdm(instances): #tqdm.tqdm(reader.read(args.dev_data)):
metadata = instance.fields['metadata'].metadata
batch.append({"question_id": metadata["question_id"], "query": metadata["query"], "paragraph_index": metadata["paragraph_index"], "target": metadata["target"]})
if len(batch) == args.batch_size:
batch_distances, batch_indices = query_index([i["query"] for i in batch])
for instance_, distances, indices in zip(batch, batch_distances, batch_indices):
ids = [int(id_) for id_ in indices]
if not args.write_positive_neighbors_only or "Yes" in instance_["target"]:
for id_ in ids[:args.num_neighbors_write]:
indices_frequencies[id_] += 1
distances = [float(distance) for distance in distances]
datum = {"question_id": instance_["question_id"], "paragraph_index": instance_["paragraph_index"], "target": instance_["target"], "ids": ids, "distances": distances}
print(json.dumps(datum), file=outputfile)
outputfile.flush()
batch = []
print("\nDone searching.")
else:
print("Search output exists. Reading it instead of querying the index.")
retrieved_data = [json.loads(line) for line in open(args.search_output)]
for datum in retrieved_data:
if not args.write_positive_neighbors_only or "Yes" in datum["target"]:
for id_ in datum["ids"][:args.num_neighbors_write]:
indices_frequencies[id_] += 1
if args.coreset_size is not None:
print(f"Filtering down the retrieved training set to {args.coreset_size} points")
if index is None:
print("Loading index..")
index = faiss.read_index(args.index)
print("Done loading index")
retrieved_indices = list(indices_frequencies.keys())
# Inner index
retrieved_vectors = numpy.asarray([index.index.reconstruct(i) for i in retrieved_indices])
_, coreset_indices = kmeans_plusplus(retrieved_vectors, args.coreset_size)
print("Finished running KMeans++")
selected_indices = [retrieved_indices[i] for i in coreset_indices]
indices_frequencies = {i: indices_frequencies[i] for i in selected_indices}
max_freq_indices = sorted(indices_frequencies.items(), key=lambda x: x[1], reverse=True)[:10]
print(f"\nMost frequent indices: {max_freq_indices}")
max_index = max(indices_frequencies.keys())
if args.p3_data:
with open(args.training_data, "w") as outfile:
for i, line in tqdm.tqdm(enumerate(open(args.p3_data, "rt"))):
if i > max_index:
break
if i in indices_frequencies:
instance = json.loads(line)
instance["index_id"] = i
instance["attribution_frequency"] = indices_frequencies[i]
print(json.dumps(instance), file=outfile)
print("\nDone writing training data")
if args.p3_dataset_indices:
dataset_stats = defaultdict(lambda: {"seen": 0, "attributed": 0})
for i, line in enumerate(gzip.open(args.p3_dataset_indices, "rt")):
if i > max_index:
break
dataset_name, _, _ = line.strip().split("\t")
dataset_stats[dataset_name]["seen"] += 1
if i in indices_frequencies:
dataset_stats[dataset_name]["attributed"] += 1
num_all_seen = sum(x["seen"] for x in dataset_stats.values())
num_all_attributed = sum(x["attributed"] for x in dataset_stats.values())
stats = {}
for d in dataset_stats:
stats[d] = {"seen": dataset_stats[d]["seen"] / num_all_seen, "attributed": dataset_stats[d]["attributed"] / num_all_attributed}
json.dump(stats, open(args.stats_log, "w"), indent=2)
| data-efficient-finetuning-main | scripts/retrieve_training_data.py |
#!/usr/bin/python
from collections import defaultdict
from typing import Any, Dict, List, Set, Tuple, Union, Optional
import json
import argparse
import string
import re
import numpy as np
from scipy.optimize import linear_sum_assignment
# From here through _normalize_answer was originally copied from:
# https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/
# Then cleaned up and modified a bit.
def _remove_articles(text: str) -> str:
regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
return re.sub(regex, " ", text)
def _white_space_fix(text: str) -> str:
return " ".join(text.split())
EXCLUDE = set(string.punctuation)
def _remove_punc(text: str) -> str:
if not _is_number(text):
return "".join(ch for ch in text if ch not in EXCLUDE)
else:
return text
def _lower(text: str) -> str:
return text.lower()
def _tokenize(text: str) -> List[str]:
return re.split(" |-", text)
def _normalize_answer(text: str) -> str:
"""Lower text and remove punctuation, articles and extra whitespace."""
parts = [
_white_space_fix(
_remove_articles(_normalize_number(_remove_punc(_lower(token))))
)
for token in _tokenize(text)
]
parts = [part for part in parts if part.strip()]
normalized = " ".join(parts).strip()
return normalized
def _is_number(text: str) -> bool:
try:
float(text)
return True
except ValueError:
return False
def _normalize_number(text: str) -> str:
if _is_number(text):
return str(float(text))
else:
return text
def _answer_to_bags(
answer: Union[str, List[str], Tuple[str, ...]]
) -> Tuple[List[str], List[Set[str]]]:
if isinstance(answer, (list, tuple)):
raw_spans = answer
else:
raw_spans = [answer]
normalized_spans: List[str] = []
token_bags = []
for raw_span in raw_spans:
normalized_span = _normalize_answer(raw_span)
normalized_spans.append(normalized_span)
token_bags.append(set(normalized_span.split()))
return normalized_spans, token_bags
def _align_bags(predicted: List[Set[str]], gold: List[Set[str]]) -> List[float]:
"""
Takes gold and predicted answer sets and first finds the optimal 1-1 alignment
between them and gets maximum metric values over all the answers.
"""
scores = np.zeros([len(gold), len(predicted)])
for gold_index, gold_item in enumerate(gold):
for pred_index, pred_item in enumerate(predicted):
if _match_numbers_if_present(gold_item, pred_item):
scores[gold_index, pred_index] = _compute_f1(pred_item, gold_item)
row_ind, col_ind = linear_sum_assignment(-scores)
max_scores = np.zeros([max(len(gold), len(predicted))])
for row, column in zip(row_ind, col_ind):
max_scores[row] = max(max_scores[row], scores[row, column])
return max_scores
def _compute_f1(predicted_bag: Set[str], gold_bag: Set[str]) -> float:
intersection = len(gold_bag.intersection(predicted_bag))
if not predicted_bag:
precision = 1.0
else:
precision = intersection / float(len(predicted_bag))
if not gold_bag:
recall = 1.0
else:
recall = intersection / float(len(gold_bag))
f1 = (
(2 * precision * recall) / (precision + recall)
if not (precision == 0.0 and recall == 0.0)
else 0.0
)
return f1
def _match_numbers_if_present(gold_bag: Set[str], predicted_bag: Set[str]) -> bool:
gold_numbers = set()
predicted_numbers = set()
for word in gold_bag:
if _is_number(word):
gold_numbers.add(word)
for word in predicted_bag:
if _is_number(word):
predicted_numbers.add(word)
if (not gold_numbers) or gold_numbers.intersection(predicted_numbers):
return True
return False
def get_metrics(
predicted: Union[str, List[str], Tuple[str, ...]],
gold: Union[str, List[str], Tuple[str, ...]],
) -> Tuple[float, float]:
"""
Takes a predicted answer and a gold answer (that are both either a string or a list of
strings), and returns exact match and the DROP F1 metric for the prediction. If you are
writing a script for evaluating objects in memory (say, the output of predictions during
validation, or while training), this is the function you want to call, after using
:func:`answer_json_to_strings` when reading the gold answer from the released data file.
"""
predicted_bags = _answer_to_bags(predicted)
gold_bags = _answer_to_bags(gold)
if set(predicted_bags[0]) == set(gold_bags[0]) and len(predicted_bags[0]) == len(
gold_bags[0]
):
exact_match = 1.0
else:
exact_match = 0.0
f1_per_bag = _align_bags(predicted_bags[1], gold_bags[1])
f1 = np.mean(f1_per_bag)
f1 = round(f1, 2)
return exact_match, f1
def answer_json_to_strings(answer: Dict[str, Any]) -> Tuple[Tuple[str, ...], str]:
"""
Takes an answer JSON blob from the DROP data release and converts it into strings used for
evaluation.
"""
if "number" in answer and answer["number"]:
return tuple([str(answer["number"])]), "number"
elif "spans" in answer and answer["spans"]:
return tuple(answer["spans"]), "span" if len(answer["spans"]) == 1 else "spans"
elif "date" in answer:
return (
tuple(
[
"{0} {1} {2}".format(
answer["date"]["day"],
answer["date"]["month"],
answer["date"]["year"],
)
]
),
"date",
)
else:
raise ValueError(
f"Answer type not found, should be one of number, spans or date at: {json.dumps(answer)}"
)
def evaluate_json(
annotations: Dict[str, Any], predicted_answers: Dict[str, Any]
) -> Tuple[float, float]:
"""
Takes gold annotations and predicted answers and evaluates the predictions for each question
in the gold annotations. Both JSON dictionaries must have query_id keys, which are used to
match predictions to gold annotations (note that these are somewhat deep in the JSON for the
gold annotations, but must be top-level keys in the predicted answers).
The ``annotations`` are assumed to have the format of the dev set in the DROP data release.
The ``predicted_answers`` JSON must be a dictionary keyed by query id, where the value is a string
(or list of strings) that is the answer.
"""
instance_exact_match = []
instance_f1 = []
# for each type as well
type_to_em: Dict[str, List[float]] = defaultdict(list)
type_to_f1: Dict[str, List[float]] = defaultdict(list)
for _, annotation in annotations.items():
for qa_pair in annotation["qa_pairs"]:
query_id = qa_pair["query_id"]
max_em_score = 0.0
max_f1_score = 0.0
max_type = None
if query_id in predicted_answers:
predicted = predicted_answers[query_id]
candidate_answers = [qa_pair["answer"]]
if "validated_answers" in qa_pair and qa_pair["validated_answers"]:
candidate_answers += qa_pair["validated_answers"]
for answer in candidate_answers:
gold_answer, gold_type = answer_json_to_strings(answer)
em_score, f1_score = get_metrics(predicted, gold_answer)
if gold_answer[0].strip() != "":
max_em_score = max(max_em_score, em_score)
max_f1_score = max(max_f1_score, f1_score)
if max_em_score == em_score and max_f1_score == f1_score:
max_type = gold_type
else:
print("Missing prediction for question: {}".format(query_id))
if qa_pair and qa_pair["answer"]:
max_type = answer_json_to_strings(qa_pair["answer"])[1]
else:
max_type = "number"
max_em_score = 0.0
max_f1_score = 0.0
instance_exact_match.append(max_em_score)
instance_f1.append(max_f1_score)
type_to_em[max_type].append(max_em_score)
type_to_f1[max_type].append(max_f1_score)
global_em = np.mean(instance_exact_match)
global_f1 = np.mean(instance_f1)
print("Exact-match accuracy {0:.2f}".format(global_em * 100))
print("F1 score {0:.2f}".format(global_f1 * 100))
print("{0:.2f} & {1:.2f}".format(global_em * 100, global_f1 * 100))
print("----")
total = np.sum([len(v) for v in type_to_em.values()])
for typ in sorted(type_to_em.keys()):
print(
"{0}: {1} ({2:.2f}%)".format(
typ, len(type_to_em[typ]), 100.0 * len(type_to_em[typ]) / total
)
)
print(" Exact-match accuracy {0:.3f}".format(100.0 * np.mean(type_to_em[typ])))
print(" F1 score {0:.3f}".format(100.0 * np.mean(type_to_f1[typ])))
return global_em, global_f1
def evaluate_prediction_file(
prediction_path: str, gold_path: str, output_path: Optional[str] = None
) -> Tuple[float, float]:
"""
Takes a prediction file and a gold file and evaluates the predictions for each question in the
gold file. Both files must be json formatted and must have query_id keys, which are used to
match predictions to gold annotations. The gold file is assumed to have the format of the dev
set in the DROP data release. The prediction file must be a JSON dictionary keyed by query id,
where the value is either a JSON dictionary with an "answer" key, or just a string (or list of
strings) that is the answer. Writes a json with global_em and global_f1 metrics to file at
the specified output path, unless None is passed as output path.
"""
predicted_answers = json.load(open(prediction_path, encoding="utf-8"))
annotations = json.load(open(gold_path, encoding="utf-8"))
global_em, global_f1 = evaluate_json(annotations, predicted_answers)
# Output predictions to file if an output path is given
if output_path is not None:
output_dict = {"global_em": global_em, "global_f1": global_f1}
with open(output_path, "w", encoding="utf8") as outfile:
json.dump(output_dict, outfile)
return (global_em, global_f1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="evaluate on drop dataset")
parser.add_argument(
"--gold_path",
type=str,
required=False,
default="drop_dataset_test.gold.json",
help="location of the gold file",
)
parser.add_argument(
"--prediction_path",
type=str,
required=False,
default="sample_predictions.json",
help="location of the prediction file",
)
parser.add_argument(
"--output_path",
type=str,
required=False,
default=None,
help="location of the output metrics file",
)
args = parser.parse_args()
evaluate_prediction_file(args.prediction_path, args.gold_path, args.output_path)
| data-efficient-finetuning-main | scripts/drop_eval_script.py |
import argparse
import faiss
import numpy
from sklearn.cluster import kmeans_plusplus
import torch
import json
import gzip
import tqdm
from collections import defaultdict
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from attribution.p3_jsonl_reader import P3ClusterReader
parser = argparse.ArgumentParser()
parser.add_argument("--dev_data", type=str)
parser.add_argument("--negative_sample_ratio", type=float, default=1.0)
parser.add_argument("--index", type=str)
parser.add_argument("--model", type=str)
parser.add_argument("--search_output", type=str, required=True)
parser.add_argument("--batch_size", type=int, default=4)
parser.add_argument("--num_neighbors_search", type=int, default=1000)
parser.add_argument("--p3_data", type=str, help="If provided, will write training data to `training_data`")
parser.add_argument("--training_data", type=str)
parser.add_argument("--num_neighbors_write", type=int, default=1000)
parser.add_argument("--write_positive_neighbors_only", action="store_true", help="If set, will write neighbors of positive dev instances alone")
parser.add_argument("--coreset_size", type=int, default=None, help="If set, will use KMeans++ to select these many diverse points")
parser.add_argument("--p3_dataset_indices", type=str, help="If provided, will compute P3 dataset stats")
parser.add_argument("--stats_log", type=str, help="File to write the dataset stats")
parser.add_argument("--cuda_devices", type=int, nargs="+")
parser.add_argument("--retrieval_set_size", type=int, default=1000)
args = parser.parse_args()
indices_frequencies = defaultdict(int)
index = None
if not os.path.exists(args.search_output):
assert args.dev_data is not None
assert args.index is not None
assert args.model is not None
reader = P3ClusterReader(max_query_length=1024, return_original_input=True)
tokenizer = AutoTokenizer.from_pretrained(args.model)
model = AutoModelForSeq2SeqLM.from_pretrained(args.model)
cuda_devices = [0]
print(f"Using CUDA devices {cuda_devices}")
if torch.cuda.is_available():
model.cuda(device=cuda_devices[0])
model.eval()
encoder = torch.nn.DataParallel(model.encoder, device_ids=cuda_devices)
print('loading index... (make take some time)')
index = faiss.read_index(args.index)
print('loaded index!')
def query_index(queries):
input_data = tokenizer.batch_encode_plus(queries,
return_tensors="pt",
padding=True)
input_ids = input_data['input_ids']
# (batch_size, num_tokens)
mask = input_data['attention_mask']
if torch.cuda.is_available():
input_ids = input_ids.cuda(device=cuda_devices[0])
mask = mask.cuda(device=cuda_devices[0])
encoder_outputs = encoder(input_ids=input_ids,
attention_mask=mask,
return_dict=True)
# (batch_size, num_tokens, hidden_size)
hidden_states = encoder_outputs["last_hidden_state"]
# (batch_size, hidden_size)
pooled_hidden_states = (hidden_states * mask.unsqueeze(-1)).sum(1) / mask.sum(1).unsqueeze(-1)
pooled_hidden_states_np = pooled_hidden_states.detach().cpu().numpy()
return index.search(pooled_hidden_states_np, k=args.num_neighbors_search)
instances = [i for i in reader.read(args.dev_data)]
import random
random.Random(42).shuffle(instances)
instances = instances[:args.retrieval_set_size]
outputfile = open(args.search_output, "w")
batch = []
with torch.inference_mode():
for instance in tqdm.tqdm(instances): #tqdm.tqdm(reader.read(args.dev_data)):
batch.append({"query": instance["pretokenized_input"]})
if len(batch) == args.batch_size:
batch_distances, batch_indices = query_index([i["query"] for i in batch])
for instance_, distances, indices in zip(batch, batch_distances, batch_indices):
ids = [int(id_) for id_ in indices]
if not args.write_positive_neighbors_only or "Yes" in instance_["target"]:
for id_ in ids[:args.num_neighbors_write]:
indices_frequencies[id_] += 1
distances = [float(distance) for distance in distances]
datum = {"input": instance["pretokenized_input"], "ids": ids, "distances": distances}
print(json.dumps(datum), file=outputfile)
outputfile.flush()
batch = []
print("\nDone searching.")
else:
print("Search output exists. Reading it instead of querying the index.")
retrieved_data = [json.loads(line) for line in open(args.search_output)]
for datum in retrieved_data:
if not args.write_positive_neighbors_only or "Yes" in datum["target"]:
for id_ in datum["ids"][:args.num_neighbors_write]:
indices_frequencies[id_] += 1
if args.coreset_size is not None:
print(f"Filtering down the retrieved training set to {args.coreset_size} points")
if index is None:
print("Loading index..")
index = faiss.read_index(args.index)
print("Done loading index")
retrieved_indices = list(indices_frequencies.keys())
# Inner index
retrieved_vectors = numpy.asarray([index.index.reconstruct(i) for i in retrieved_indices])
_, coreset_indices = kmeans_plusplus(retrieved_vectors, args.coreset_size)
print("Finished running KMeans++")
selected_indices = [retrieved_indices[i] for i in coreset_indices]
indices_frequencies = {i: indices_frequencies[i] for i in selected_indices}
max_freq_indices = sorted(indices_frequencies.items(), key=lambda x: x[1], reverse=True)[:10]
print(f"\nMost frequent indices: {max_freq_indices}")
max_index = max(indices_frequencies.keys())
if args.p3_data:
with open(args.training_data, "w") as outfile:
for i, line in tqdm.tqdm(enumerate(open(args.p3_data, "rt"))):
if i > max_index:
break
if i in indices_frequencies:
instance = json.loads(line)
instance["index_id"] = i
instance["attribution_frequency"] = indices_frequencies[i]
print(json.dumps(instance), file=outfile)
print("\nDone writing training data")
if args.p3_dataset_indices:
dataset_stats = defaultdict(lambda: {"seen": 0, "attributed": 0})
for i, line in enumerate(gzip.open(args.p3_dataset_indices, "rt")):
if i > max_index:
break
dataset_name, _, _ = line.strip().split("\t")
dataset_stats[dataset_name]["seen"] += 1
if i in indices_frequencies:
dataset_stats[dataset_name]["attributed"] += 1
num_all_seen = sum(x["seen"] for x in dataset_stats.values())
num_all_attributed = sum(x["attributed"] for x in dataset_stats.values())
stats = {}
for d in dataset_stats:
stats[d] = {"seen": dataset_stats[d]["seen"] / num_all_seen, "attributed": dataset_stats[d]["attributed"] / num_all_attributed}
json.dump(stats, open(args.stats_log, "w"), indent=2)
| data-efficient-finetuning-main | scripts/retrieve_training_data_ni.py |
import gzip
import json
import random
from tqdm import tqdm
import sys
file = sys.argv[1]
outfile = open(sys.argv[1][:-6] + '_index.txt', 'w')
# start by loading the file into memory
with open(file) as f:
for sample in f:
outfile.write(f'{json.loads(sample)["index_id"]}\n')
| data-efficient-finetuning-main | scripts/grab_instance_idxes.py |
import json
from collections import defaultdict
data = json.load(open("./t0_cluster_data.json"))
cluster_stats = {}
dataset_to_clusters = defaultdict(list)
for cluster_id, cluster_data in data.items():
input_set = set([x["input"] for x in cluster_data])
dataset_set = set([x["dataset"] for x in cluster_data])
cluster_stats[cluster_id] = {
"size": len(input_set),
"datasets": sorted(list(dataset_set))
}
for dataset in dataset_set:
dataset_to_clusters[dataset].append(cluster_id)
with open("t0_cluster_stats.json", "w") as outfile:
json.dump({"clusters": cluster_stats, "dataset_to_cluster": dataset_to_clusters}, outfile, indent=2)
| data-efficient-finetuning-main | scripts/compute_stats.py |
import json
import os
import tqdm
import gzip
import argparse
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import faiss
import numpy
import torch
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, required=True)
parser.add_argument("--output_prefix", type=str, required=True)
parser.add_argument("--index_type", type=str, default="hnsw")
parser.add_argument("--max_batch_tokens", type=int, default=4000)
parser.add_argument("--add_interval", type=int, default=1000, help="Each index.add() will add add_interval points")
parser.add_argument("--write_interval", type=int, default=2000000, help="Each time after indexing roughly these many points the index will be written to disk")
parser.add_argument("--neighbors_per_node", type=int, default=512, help="HNSW parameter, default from DPR paper")
parser.add_argument("--construction_depth", type=int, default=200, help="HNSW parameter, default from DPR paper")
parser.add_argument("--search_depth", type=int, default=128, help="HNSW parameter, default from DPR paper")
parser.add_argument("--encoding_dim", type=int, default=512, help="Reduced dimensionality for OPQ")
parser.add_argument("--sq_train_size", type=int, default=1000000)
parser.add_argument("--device_ids", type=int, nargs="+")
args = parser.parse_args()
index_factory_string = f"OPQ8_{args.encoding_dim},HNSW{args.neighbors_per_node},PQ8"
index_prefix = f"ni_raw_{args.model.replace('/', '-')}_{index_factory_string.replace(',', '-')}"
with open(os.path.join(args.output_prefix, f"{index_prefix}_hyperparameters.json"), "w") as outfile:
json.dump(
{
"neighbors_per_node": args.neighbors_per_node,
"construction_depth": args.construction_depth,
"search_depth": args.search_depth,
"encoding_dim": args.encoding_dim,
"sq_train_size": args.sq_train_size
}, outfile)
text_instances_file = os.path.join(args.output_prefix, "ni_dump_raw.jsonl")
assert os.path.exists(text_instances_file), "Text instances file does not exist!"
tokenizer = AutoTokenizer.from_pretrained(args.model)
model = AutoModelForSeq2SeqLM.from_pretrained(args.model)
model.eval()
if torch.cuda.is_available():
model.cuda(device=args.device_ids[0])
num_gpus_available = len(args.device_ids)
print(f"Using DataParallel for the encoder on {num_gpus_available} GPUs with ids {args.device_ids}")
encoder = torch.nn.DataParallel(model.encoder, device_ids=args.device_ids)
else:
encoder = model.encoder
if text_instances_file.endswith(".gz"):
instances_file_ptr = gzip.open(text_instances_file, "rt")
else:
instances_file_ptr = open(text_instances_file, "r")
def get_batches(num_instances_to_skip: int=0):
batch = []
num_batch_tokens = 0
max_num_batch_tokens = args.max_batch_tokens
num_batches_yielded = 0
num_instances_yielded = 0
num_truncated_instances = 0
num_instances_read = 0
started_batching = False
while True:
line = instances_file_ptr.readline()
if not line:
break
num_instances_read += 1
if num_instances_read <= num_instances_to_skip:
continue
if not started_batching:
print(f"Starting to batch instances from instance number: {num_instances_read}")
started_batching = True
instance = json.loads(line)
input_ = instance["input"]
tokens = tokenizer.tokenize(input_)
num_tokens = len(tokens)
if num_tokens > 1024: #tokenizer.max_len_single_sentence:
num_truncated_instances += 1
if num_tokens + num_batch_tokens < max_num_batch_tokens:
batch.append(input_)
num_batch_tokens += num_tokens
else:
yield batch
num_instances_yielded += len(batch)
num_batches_yielded += 1
if num_batches_yielded % 10000 == 0:
print(f"Average batch size: {num_instances_yielded / num_batches_yielded}")
print(f"Truncated instances so far: {num_truncated_instances}")
batch = [input_]
num_batch_tokens = num_tokens
if batch:
yield batch
print(f"Average batch size: {num_instances_yielded / num_batches_yielded}")
print(f"Truncated instances so far: {num_truncated_instances}")
index_file = os.path.join(
args.output_prefix,
f"{index_prefix}.index"
)
index = None
last_written_index_size = 0
if os.path.exists(index_file):
print(f"Index file exists. Reading {index_file}")
index = faiss.read_index(index_file)
last_written_index_size = index.ntotal
print(f"Done reading index of size {last_written_index_size}")
else:
print(f"Will write index to {index_file}")
aggregated_encoded_batches = []
print("Computing representations and indexing them")
with torch.inference_mode():
for batch in tqdm.tqdm(get_batches(last_written_index_size)):
input_data = tokenizer.batch_encode_plus(batch,
return_tensors="pt",
padding=True,
truncation=True,
max_length=1024)
input_ids = input_data['input_ids']
# (batch_size, num_tokens)
mask = input_data['attention_mask']
if torch.cuda.is_available():
input_ids = input_ids.cuda(device=args.device_ids[0])
mask = mask.cuda(device=args.device_ids[0])
encoder_outputs = encoder(input_ids=input_ids,
attention_mask=mask,
return_dict=True)
# (batch_size, num_tokens, hidden_size)
hidden_states = encoder_outputs["last_hidden_state"]
# (batch_size, hidden_size)
pooled_hidden_states = (hidden_states * mask.unsqueeze(-1)).sum(1) / mask.sum(1).unsqueeze(-1)
pooled_hidden_states_np = pooled_hidden_states.detach().cpu().numpy()
aggregated_encoded_batches.append(pooled_hidden_states_np)
if index is None:
hidden_size = pooled_hidden_states_np.shape[1]
index = faiss.index_factory(hidden_size, index_factory_string)
# We cannot access the HNSW parameters directly. `index` is of type IndexPreTransform. We need to downcast
# the actual index to do this.
hnswpq_index = faiss.downcast_index(index.index)
hnswpq_index.hnsw.efConstruction = args.construction_depth
hnswpq_index.hnsw.efSearch = args.search_depth
if not index.is_trained and sum([x.shape[0] for x in aggregated_encoded_batches]) >= args.sq_train_size:
print("Training index")
data_to_train = numpy.concatenate(aggregated_encoded_batches)
index.train(data_to_train)
if index.is_trained and sum([x.shape[0] for x in aggregated_encoded_batches]) >= args.add_interval:
data_to_add = numpy.concatenate(aggregated_encoded_batches)
index.add(data_to_add)
print(f"Added {data_to_add.shape[0]} points to index")
aggregated_encoded_batches = []
index_size = index.ntotal
if index_size - last_written_index_size >= args.write_interval:
print(f"Writing index of size {index_size}")
faiss.write_index(index, index_file)
last_written_index_size = index_size
if aggregated_encoded_batches:
# only happens for small indices
if not index.is_trained:
print("Training index")
data_to_train = numpy.concatenate(aggregated_encoded_batches)
index.train(data_to_train)
data_to_add = numpy.concatenate(aggregated_encoded_batches)
index.add(data_to_add)
print(f"Added {data_to_add.shape[0]} points to index")
faiss.write_index(index, index_file)
| data-efficient-finetuning-main | scripts/index_ni_train_reps.py |
import gzip
import json
import random
from tqdm import tqdm
import sys
outfile_path = sys.argv[1]
p3_data = sys.argv[2]
# fill in these values with yours.
outfiles = ['qasper']#'anli_r1', 'anli_r2', 'anli_r3', 'casehold', 'cb', 'copa', 'drop', 'hellaswag', 'rte', 'story_cloze', 'wic', 'winogrande', 'wsc']
indices = [58019]#721247, 755358, 1215546, 70622, 214031, 175802, 193014, 296924, 1049514, 205667, 197293, 189911, 132661]
max_index = 102864371
files = [open(f'{outfile_path}/{o}_random.jsonl', "w") for o in outfiles]
# generate random indices
random_indices = []
for idx in tqdm(indices):
cur_idxes = []
used = set()
while len(cur_idxes) < idx:
rand_int = random.randrange(max_index)
if rand_int not in used:
cur_idxes.append(rand_int)
used.add(rand_int)
random_indices.append(set(cur_idxes))
for i, line in tqdm(enumerate(open(p3_data))):
for j, idx_set in enumerate(random_indices):
if i in idx_set:
instance = json.loads(line)
print(json.dumps(instance), file=files[j])
print("\nDone writing random data")
| data-efficient-finetuning-main | scripts/get_random_indices.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.