python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
import re
import io
from typing import Tuple
from pathlib import Path
import boto3
import botocore
s3_client = boto3.client("s3")
class RetryableDownloadFailure(Exception):
def __init__(self, err: Exception):
self.err = err
def try_get_content(url: str) -> bytes:
bucket,key = _parse_url(url)
try:
buffer = io.BytesIO()
s3_client.download_fileobj(Bucket=bucket,
Key=key,
Fileobj=buffer,
Config=boto3.s3.transfer.TransferConfig(use_threads=False))
except botocore.exceptions.ClientError as e:
message = e.args[0] if isinstance(e.args[0], str) else ""
if not "SlowDown" in message:
raise e
raise RetryableDownloadFailure(e)
return buffer.getvalue()
def exists(url: str) -> bool:
bucket,key = _parse_url(url)
try:
s3_client.head_object(Bucket=bucket, Key=key)
return True
except botocore.exceptions.ClientError as ex:
if ex.response['Error']['Code'] == 'NoSuchKey':
return False
def upload(path: Path, url: str):
if path.is_dir():
for f in path.iterdir():
upload(f, f"{url}/{f.name}")
return
bucket, key = _parse_url(url)
with open(path, "rb") as data:
s3_client.upload_fileobj(data, bucket, key)
def download(url: str, path: Path):
bucket,key = _parse_url(url)
path.parent.mkdir(parents=True, exist_ok=True)
with open(path, "wb") as file:
s3_client.download_fileobj(bucket, key, file)
def _parse_url(url: str) -> Tuple[str,str]:
path_pattern = re.search("s3://([^/]*)/(.*)", url)
bucket = path_pattern.group(1)
prefix = path_pattern.group(2)
return bucket, prefix
| cc_net-main | cc_net/s3util.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import func_argparse
import cc_net.mine
def main():
func_argparse.parse_and_call(cc_net.mine.get_main_parser())
if __name__ == "__main__":
main()
| cc_net-main | cc_net/__main__.py |
from datetime import datetime, timezone
from typing import Optional
from cc_net import jsonql
class Ai2Formatter(jsonql.Transformer):
def __init__(self):
super().__init__()
def do(self, doc: dict) -> Optional[dict]:
d = {}
d["source"] = "common-crawl"
d["id"] = doc["url"]
d["text"] = doc["raw_content"]
d["added"] = datetime.now(timezone.utc).isoformat()
d["created"] = doc["date_download"]
m = {}
m.update(doc)
del m["raw_content"]
d["metadata"] = m
return d
| cc_net-main | cc_net/ai2_format.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import collections
from pathlib import Path
from typing import Dict, Optional
import fasttext # type: ignore
from cc_net import jsonql
def get_args():
parser = argparse.ArgumentParser(
description="Read a list of json files and split them ",
parents=[jsonql.io_parser()],
)
parser.add_argument("--pattern", type=str)
parser.add_argument("--field", type=str, default="raw_content")
parser.add_argument("--threshold", type=float, default=0)
parser.add_argument("--model", type=str, required=True)
parser.add_argument("--out_field", type=str, default="language")
parser.add_argument("--top", type=int, default=1)
return vars(parser.parse_args())
def predict(model, text: str, k: int = 1):
labels, scores = model.predict(text, k=k)
labels = [l.replace("__label__", "") for l in labels]
return labels, scores
def avg_predict(model, text):
# Overall gives the same results than predict(model, text.replace("\n", ""))
text = text.split("\n")
text_len = sum(len(line) for line in text)
if text_len == 0:
return None, 0
scores = [predict(model, line) for line in text]
scores_by_label: Dict[str, float] = collections.defaultdict(float)
for (label, score), line in zip(scores, text):
scores_by_label[label] += score * len(line)
label, score = max(scores_by_label.items(), key=lambda kv: kv[1])
return label, score / text_len
class Classifier(jsonql.Transformer):
def __init__(
self,
model: Path,
field: str,
out_field: str,
threshold: float = 0,
top: int = 1,
language: str = None,
rounding: int = 2,
):
super().__init__()
self.model = model
assert model.exists(), f"Model {model} doesn't exist."
self.field = field
self.out_field = out_field
self.threshold = threshold
self.top = top
self.language = language
self.rounding = rounding
# Fasttext model is a C object and can't be pickled
self.fasttext_model: fasttext._FastText = None
self.n_doc, self.n_accepted, self.n_ignored, self.n_disagreement = 0, 0, 0, 0
self.cnt: Dict[str, int] = {}
def _prepare(self):
self.log(f"Loading {self.model}")
self.fasttext_model = fasttext.load_model(str(self.model))
def predict(self, text):
return predict(self.fasttext_model, text.replace("\n", ""), k=self.top)
def do(self, doc: dict) -> Optional[dict]:
text = doc.get(self.field, None)
if not text:
return None
if self.language and doc.get("language") != self.language:
self.n_ignored += 1
return doc
self.n_doc += 1
labels, scores = self.predict(text)
scores.round(self.rounding, out=scores)
for l in labels:
self.cnt[l] = self.cnt.get(l, 0) + 1
if self.top == 1:
existing_label = doc.get(self.out_field, None)
if existing_label and labels[0] != existing_label:
self.n_disagreement += 1
if all(s < self.threshold for s in scores):
return None
self.n_accepted += 1
if self.top == 1:
doc[self.out_field] = labels[0]
doc[self.out_field + "_score"] = scores[0]
else:
doc[self.out_field] = {l: s for l, s in zip(labels, scores)}
return doc
def summary(self):
n_doc, n_accepted, n_disagreement, cnt, out_field = (
self.n_doc,
self.n_accepted,
self.n_disagreement,
self.cnt,
self.out_field,
)
summ = super().summary()
if self.threshold > 0:
ratio = n_accepted / n_doc if n_doc else 0
summ.append(f"Kept {n_accepted} docs over {n_doc} ({ratio :.1%})")
summ.append(f"Found {len(cnt)} {out_field} labels: {cnt}")
disagreement = n_disagreement / n_doc if n_doc else 0
if disagreement:
summ.append(f"{out_field} disagreement is at {disagreement:.1%}.")
return summ
def __repr__(self):
return f"Classifier({self.model})"
def classify_and_split(file, output, pattern, **kwargs):
classifier = Classifier(**kwargs)
splitter = jsonql.split(pattern)
jsonql.run_pipes(classifier, splitter, file=file, output=output)
if __name__ == "__main__":
args = get_args()
pattern = args.get("pattern")
if pattern:
classify_and_split(**args)
else:
args.pop("pattern")
jsonql.run_pipe(Classifier, args)
| cc_net-main | cc_net/split_by_lang.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import contextlib
import functools
import gzip
import logging
import multiprocessing
from collections import defaultdict
from pathlib import Path
from typing import Callable, Dict, Iterator, List, NamedTuple, Optional, Tuple
import cc_net
from cc_net import jsonql
from cc_net.process_wet_file import CCSegmentsReader
# Set this to a directory to use as cache for intermediary files.
# This helps for debugging.
WET_CACHE = None
# WET_CACHE = Path("wet_cache")
S3_BUCKET = "https://dl.fbaipublicfiles.com/cc100"
VERSION = "1.0.0"
CC_100_SNAPSHOTS = [
"2018-05",
"2018-09",
"2018-13",
"2018-17",
"2018-22",
"2018-26",
"2018-30",
"2018-34",
"2018-39",
"2018-43",
"2018-47",
"2018-51",
]
BIG_LANGUAGES = {
"es_XX",
"fr_XX",
"de_DE",
"ja_XX",
"ru_RU",
"zh_CN",
"en_XX",
"it_IT",
"ar_AR",
"nl_XX",
"pl_PL",
"pt_XX",
"tr_TR",
"zh_TW",
}
class Paragraph(NamedTuple):
lang: str
text: str
lm_score: float
def _dl_shard(snapshot: str, shard: int) -> Iterator[Paragraph]:
"""
Download metadata from a shards.
Sample metadata:
{
"cc_segment": "crawl-data/CC-MAIN-2018-51/segments/1544376823009.19/wet/CC-MAIN-20181209185547-20181209211547-00000.warc.wet.gz",
"digest": "sha1:222LWNHN5FM26XGS7WJSMI6IISTVWBKJ",
"url": "http://personals.gearplay.com/ads/DRJONES.htm",
"line_ids": [10],
"languages": ["en_XX"],
"lm_scores": [-2.658],
}
"""
snapshot = snapshot.replace("-", "_")
name = f"snap_{snapshot}_batch_{shard}.json.gz"
url = "/".join([S3_BUCKET, VERSION, name])
shard_metadata: Dict[str, Dict[str, dict]] = defaultdict(dict)
try:
cache_file: Optional[Path] = None
if WET_CACHE is not None:
cache_file = WET_CACHE / name
metadata_file = jsonql.open_remote_file(url, cache_file)
except:
logging.warning(f"Couldn't open {url}")
return
for meta in jsonql.read_jsons(metadata_file):
shard_metadata[meta["cc_segment"]][meta["digest"]] = meta
found_pars, missed_pars = 0, 0
for seg, segment_metadata in shard_metadata.items():
for doc in CCSegmentsReader([seg], cache_dir=WET_CACHE):
if doc["digest"] not in segment_metadata:
continue
meta = segment_metadata[doc["digest"]]
full_pars = [doc["title"]] + doc["raw_content"].split("\n")
assert len(meta["line_ids"]) == len(meta["languages"])
assert len(meta["line_ids"]) == len(meta["lm_scores"])
for i, lang, score in zip(
meta["line_ids"], meta["languages"], meta["lm_scores"]
):
if snapshot != "2018-51" and lang in BIG_LANGUAGES:
# Big languages only come from "2018-51" snapshot
continue
if i >= len(full_pars):
# This is because CC100 was created by saving only urls.
# Some urls appears in different snapshot with slightly different
# versions, but we don't know which one is correct.
# Here we read both versions, but some index may end up
# being incorrect.
# This impact ~3% documents.
missed_pars += 1
continue
yield Paragraph(lang, full_pars[i], score)
found_pars += 1
if missed_pars > 0:
logging.warning(
f"Missed {missed_pars} ({missed_pars / found_pars:%}) paragraphes."
)
def _split_by_par(
paragraphes: Iterator[Paragraph], snapshot: str, shard: int, outdir: Path
) -> int:
outdir.mkdir(exist_ok=True)
outfiles = {}
num_pars = 0
try:
for par in paragraphes:
# MODIFY ME: filter paragraph if needed (languages, score, ...)
if par.lang not in outfiles:
(outdir / par.lang).mkdir(exist_ok=True)
outfile = outdir / par.lang / f"snap_{snapshot}_batch_{shard}.gz"
outfiles[par.lang] = gzip.open(outfile, "wt")
print(par.text, file=outfiles[par.lang])
num_pars += 1
finally:
for o in outfiles.values():
o.close()
logging.info(f"Extracted {num_pars:_d} paragraphs from shard {snapshot}_{shard}")
return num_pars
def dl_shard(snapshot: str, shard: int, outdir: Path) -> int:
return _split_by_par(_dl_shard(snapshot, shard), snapshot, shard, outdir)
@contextlib.contextmanager
def unordered_map(processes: int):
if processes == 0:
yield map
return
with multiprocessing.Pool(processes) as pool:
yield pool.imap_unordered
def dl_snapshot(snapshot: str, outdir: Path, processes: int = 1) -> None:
_dl_shard = functools.partial(dl_shard, snapshot, outdir=outdir)
with unordered_map(processes) as umap:
num_pars = sum(umap(_dl_shard, range(500)))
logging.info(f"Extracted {num_pars:_d} paragraphs from snapshot {snapshot}.")
def dl(
snapshot: str = None, outdir: Path = Path("data_cc100"), processes: int = 1
) -> None:
"""
Download CC100 corpus.
Will create one text file per language and CC snapshot.
- snapshot: restrict to one snapshot. Useful for parallelization.
- outdir: output directory
- processes: number of processes to use
"""
if snapshot is None:
snapshots = CC_100_SNAPSHOTS
else:
snapshots = snapshot.split(",")
invalids = [s for s in snapshots if s not in CC_100_SNAPSHOTS]
assert not invalids, f"Invalid snapshots {invalids}, chose from {CC_100_SNAPSHOTS}"
for snapshot in snapshots:
dl_snapshot(snapshot, outdir, processes)
if __name__ == "__main__":
import func_argparse
func_argparse.single_main(dl)
| cc_net-main | cc_net/tools/dl_cc_100.py |
cc_net-main | cc_net/tools/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
This code is used to train a fastText classifier to label document with DMOZ categories.
The data, distributed under the cc-by 3.0 license
(https://web.archive.org/web/20140605215533/http://www.dmoz.org/license.html),
can be downloaded from
https://web.archive.org/web/20140617145301/http://rdf.dmoz.org/rdf/content.rdf.u8.gz.
"""
import urllib.request
from io import StringIO
from pathlib import Path
from typing import Dict, Set
from urllib.parse import urlparse
import func_argparse
from lxml import etree # type: ignore
from cc_net import jsonql
TaggedUrls = Dict[str, Set[str]]
DMOZ_TAGS_URL = "https://web.archive.org/web/20140617145301/http://rdf.dmoz.org/rdf/content.rdf.u8.gz"
def add_tags(url: str, tags: Set[str], url2tags: TaggedUrls):
if url in url2tags:
url2tags[url] &= tags
else:
url2tags[url] = tags
def load_tags(filename: Path = None) -> TaggedUrls:
if filename is None:
with StringIO("".join(jsonql.open_remote_file(DMOZ_TAGS_URL))) as dmoz:
tree = etree.parse(dmoz)
else:
tree = etree.parse(str(filename))
root = tree.getroot()
url2tags: Dict[str, Set[str]] = {}
for external_page in root.iterfind("{http://dmoz.org/rdf/}ExternalPage"):
url = external_page.get("about")
domain = urlparse(url).netloc
for topic in external_page.iterfind("{http://dmoz.org/rdf/}topic"):
# print(url, topic.text)
# Tags looks like Top/Arts/Animation/Anime/Collectibles
tags = set(topic.text.split("/")[1:])
add_tags(url, tags, url2tags)
add_tags(domain, tags, url2tags)
return url2tags
def dl(output: Path) -> None:
urllib.request.urlretrieve(DMOZ_TAGS_URL, output)
def make_corpus(file: Path, tags_file: Path = None, output: Path = None) -> None:
"""
Loads a tags file and create a training dataset using the given webpages.
Arguments:
- file: CC shard file
- tags_file: dmoz tagging file, (like the one produced by `dl`)
- output: ""
"""
url2tags = load_tags(tags_file)
with jsonql.open_write(output) as o:
for document in jsonql.read_jsons(file):
if not document:
continue
url = document["url"]
domain = document["source_domain"]
if url in url2tags:
tags = url2tags[url]
elif domain in url2tags:
tags = url2tags[domain]
else:
continue
if len(tags) == 0:
continue
fasttext_tags = ["__label__" + tag for tag in tags]
content = document["tokenized"].replace("\n", " ").lower()
if len(content) > 200:
print(" ".join(fasttext_tags), content, file=o) # type: ignore
if __name__ == "__main__":
func_argparse.single_main(make_corpus)
| cc_net-main | cc_net/tools/make_dmoz_corpus.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Tools to search sentences in CC similar to sentences in another corpus.
"""
import functools
import logging
import math
import subprocess
from collections import Counter
from pathlib import Path
from typing import Iterable, List, Optional, Set, Tuple
import func_argparse
import submitit
from kenlm import Model as KenlmModel # type: ignore
from sentence_splitter import SentenceSplitter # type: ignore
from sentencepiece import SentencePieceProcessor # type: ignore
from cc_net import dedup, jsonql, perplexity, text_normalizer
KENLM = Path("./bin/lmplz")
KENLM_BUILD = Path("./bin/build_binary")
VOCAB_SIZE = 2 ** 16 - 10
PROCESSES = 16
def normalize(corpus: Path, output_dir: Path) -> Path:
normalized = output_dir / (corpus.stem + ".normalized")
if normalized.exists():
return normalized
print("Will normalize", corpus, "to", normalized)
jsonql.run_pipes(
jsonql.Mapper(text_normalizer.normalize),
file=corpus,
output=normalized,
processes=PROCESSES,
)
return normalized
# TODO use classic files directory.
def sp_model(lang: str) -> Path:
return Path(f"/checkpoint/guw/cc_clean/lm_sp/{lang}.sp.model")
def _dataset(dataset: Optional[Path], lang: str) -> Path:
return (
dataset
or Path("/datasets01_101/common_crawl/020919") / f"{lang}_head_*.json.gz"
)
class SentencePiece(jsonql.Transformer):
def __init__(self, model: Path):
super().__init__()
self.model = model
self.sp: SentencePieceProcessor = None # type: ignore
def _prepare(self):
self.sp = SentencePieceProcessor()
self.sp.load(str(self.model))
def do(self, line: str) -> str:
return " ".join(self.sp.encode_as_pieces(line))
class ExtractSentences(jsonql.Transformer):
def __init__(
self,
sp_model: Path,
lm_model: Path,
field: str = "raw_content",
threshold: float = float("+inf"),
):
super().__init__()
self.sp_model = sp_model
self.lm_model = lm_model
self.field = field
self.threshold = threshold
self.sp: SentencePieceProcessor = None
self.lm: KenlmModel = None
self.splitter: SentenceSplitter = None
self.hashes: Set[int] = set()
def _prepare(self):
self.sp = SentencePieceProcessor()
self.sp.load(str(self.sp_model))
self.splitter = SentenceSplitter("en")
self.lm = KenlmModel(str(self.lm_model))
def do(self, document: dict) -> Optional[str]:
content: Optional[str] = document.get(self.field)
if not content:
return None
all_sentences = [
s for l in content.split("\n") if l for s in self.splitter.split(text=l)
]
unique_sentences = []
for s in all_sentences:
if not s:
continue
h = dedup.str_hash(s)
if h in self.hashes:
continue
self.hashes.add(h)
unique_sentences.append(s)
scores = []
for sentence in unique_sentences:
normalized = text_normalizer.normalize(sentence)
pieces = self.sp.encode_as_pieces(normalized)
log_score = self.lm.score(" ".join(pieces))
pp = -1
if len(pieces):
pp = perplexity.pp(log_score, len(pieces))
scores.append(pp)
res = filter(
lambda pp_s: self.threshold > pp_s[0] > 0, zip(scores, unique_sentences)
)
return "\n".join(f"{pp}\t{s}" for (pp, s) in res) or None
def tokenize(corpus: Path, output_dir: Path, lang: str) -> Path:
tokenized = output_dir / (corpus.stem + ".tokenized")
if tokenized.exists():
return tokenized
print("Will SentencePiece", corpus, "to", tokenized)
jsonql.run_pipes(
SentencePiece(sp_model(lang)),
file=normalize(corpus, output_dir),
output=tokenized,
processes=PROCESSES,
)
return tokenized
def train_lm(
corpus: Path,
output_dir: Path,
lang: str = "en",
vocab_size: int = VOCAB_SIZE,
ngrams: int = 5,
):
lm_text_file = output_dir / (corpus.stem + ".arpa")
lm_bin_file = output_dir / (corpus.stem + ".arpa.bin")
if lm_bin_file.exists():
return lm_bin_file
assert KENLM.exists(), f"{KENLM} binary to train kenlm model not found."
normalized = normalize(corpus, output_dir)
tokenized = tokenize(normalized, output_dir, lang)
print("Will train LM", lm_text_file, "on", tokenized)
kenlm_cmd = [
str(KENLM),
f"--order={ngrams}",
"--memory=8G",
f"--temp_prefix={jsonql._tmp_dir()}",
f"--text={tokenized}",
f"--arpa={lm_text_file}",
f"--vocab_estimate={vocab_size}",
"--discount_fallback",
]
subprocess.run(kenlm_cmd, check=True)
print("Will create binary model", lm_bin_file, "from", lm_text_file)
subprocess.run([str(KENLM_BUILD), str(lm_text_file), str(lm_bin_file)], check=True)
return lm_bin_file
def uniform_sampling_wrt_perplexity(
paragraphes: Iterable[str],
rounding: float = 100.0,
cut: float = 1000.0,
samples: int = 20,
) -> Iterable[str]:
max_samples = math.floor(cut / rounding * samples)
n = 0
buckets = Counter([0.0])
logging.info(f"Will sample {max_samples} sentences.")
for lines in paragraphes:
for line in lines.split("\n"):
if not line:
continue
pp = float(line[: line.find("\t")])
pp = math.floor(pp / rounding) * rounding
if pp > cut:
continue
if buckets[pp] > samples:
continue
yield line
buckets[pp] += 1
if buckets[pp] > samples:
logging.info(f"Bucket {pp} is full ({samples} samples, {n} total)")
n += 1
if n > max_samples:
return
def sample(
corpus: Path,
output_dir: Path,
dataset: Path = None,
n: int = 10_000,
lang: str = "en",
) -> Path:
sample_file = output_dir / (corpus.stem + ".pp_sample.tsv")
if sample_file.exists():
return sample_file
dataset = _dataset(dataset, lang)
extractor = ExtractSentences(
sp_model(lang), train_lm(corpus, output_dir), field="raw_content"
)
sampling = functools.partial(
uniform_sampling_wrt_perplexity, rounding=100.0, cut=1000.0, samples=n // 10
)
print(f"Will sample data from {dataset} to {sample_file}")
try:
jsonql.run_pipes(
extractor, sampling, file=dataset, output=sample_file, processes=PROCESSES
)
except Exception:
sample_file.unlink()
raise
subprocess.run(["sort", "-n", "-o", sample_file, sample_file], check=True)
subprocess.run(["head", sample_file], check=True)
return sample_file
def mine(
corpus: Path,
output_dir: Path,
threshold: float,
dataset: Path = None,
lang: str = "en",
) -> List[Path]:
"""Search sentences in CC similar to the one in the given corpus.
Args:
- corpus: corpus to train the LM one. Assumes one sentence per line.
- output_dir: where to store the results
- threshold: maximum perplexity to have
- dataset: glob pattern matching CC shards.
- lang: search in the files of this language
"""
dataset = _dataset(dataset, lang)
files = list(dataset.parent.glob(dataset.name))
outputs = [output_dir / (f.stem + ".tsv") for f in files]
if all(o.exists() for o in outputs):
return outputs
n = len(outputs)
sp = [sp_model(lang)] * n
lm = [train_lm(corpus, output_dir)] * n
thresholds = [threshold] * n
ex = submitit.AutoExecutor(output_dir / "mining_logs")
ex.update_parameters(
name="mine",
cpus_per_task=PROCESSES,
timeout_min=60 * 24 // PROCESSES,
mem_gb=10,
)
jobs = ex.map_array(_mine, files, outputs, sp, lm, thresholds)
print("Submited job array:", jobs[0])
for j in submitit.helpers.as_completed(jobs):
(i, o) = j.result()
print("Mined sentences from", i, "to", o)
return outputs
def _mine(
file: Path, output: Path, sp: Path, lm: Path, threshold: float
) -> Tuple[Path, Path]:
extractor = ExtractSentences(sp, lm, field="raw_content", threshold=threshold)
jsonql.run_pipes(extractor, file=file, output=output, processes=PROCESSES)
return (file, output)
if __name__ == "__main__":
func_argparse.main(sample, mine)
| cc_net-main | cc_net/tools/expand_corpus.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import json
from pathlib import Path
from typing import Iterable, Sequence
from cc_net import dedup, jsonql
from cc_net.dedup import str_hash
from cc_net.flat_hash_set import FlatHashSet
def text(*args: str) -> str:
return "\n".join(args)
def write_docs(file: Path, docs: Iterable[Sequence[str]]):
file.parent.mkdir(exist_ok=True)
with open(file, "w") as f:
for sentences in docs:
doc = dict(text=text(*sentences))
print(json.dumps(doc), file=f)
def as_dict(hash_set):
if not isinstance(hash_set, dict):
hash_set = {k: v for (k, v) in hash_set.items()}
return hash_set
def load_hashes(file):
results = dedup.FlatHashSet()
results.load(file)
return as_dict(results)
LENGTHS = ["original_length", "length"]
def assert_documents_equal(expected, actual, ignoring={}):
expected = [{k: doc[k] for k in doc if k not in ignoring} for doc in expected]
actual = [{k: doc[k] for k in doc if k not in ignoring} for doc in expected]
assert expected == actual
def test_simple_dedup(tmp_path: Path) -> None:
write_docs(
tmp_path / "docs.json",
[
["_Hello", "_World", "I'm so original"],
["_world", "I'm originaler", "_Hello"],
],
)
results = list(dedup.deduplicate(tmp_path / "docs.json", field="text"))
expected = [
# First document is untouched
dict(
text=text("_Hello", "_World", "I'm so original"),
original_nlines=3,
nlines=3,
line_ids=[0, 1, 2],
),
# Second documents loses several lines
dict(text="I'm originaler", original_nlines=3, nlines=1, line_ids=[1]),
]
assert_documents_equal(expected, results, ignoring=LENGTHS)
def test_dedup_with_dump(tmp_path: Path):
hashes = tmp_path / "hashes.bin"
documents = [
dict(text=text("_Hello", "_World", "I'm so original")),
dict(text=text("_world", "I'm originaler", "_Hello")),
]
collector = dedup.HashesCollector(field="text", output=hashes)
list(collector.map(documents))
results = load_hashes(hashes)
expected = {
str_hash(l): l.startswith("_")
for l in ["_hello", "_world", "i'm so original", "i'm originaler"]
}
assert expected == results
def test_dedup_with_np_dump(tmp_path: Path):
hashes = tmp_path / "hashes.bin"
documents = [
dict(text=text("_Hello", "_World", "I'm so original")),
dict(text=text("_world", "I'm originaler", "_Hello")),
]
with dedup.HashesCollector(field="text", output=hashes) as d:
list(d.map(documents))
results = FlatHashSet()
results.load_np(hashes)
expected = set(
str_hash(l) for l in ["_hello", "_world", "i'm so original", "i'm originaler"]
)
assert expected == set(results.keys())
def test_dedup_from_hashes(tmp_path: Path):
documents = [
dict(text=text("_Hello", "World", "I'm so original")),
dict(text=text("Good morning", "World", "I'm originaler")),
]
seen = ["_hello", "i'm originaler", "world"]
hashes = [str_hash(h) for h in seen]
h = dedup.FlatHashSet()
h.add(hashes)
# Note: 'world' appears only once and won't be treated as a duplicate.
h.add(hashes[:-1])
h.dump(tmp_path / "hashes.bin")
results = list(
dedup.DuplicatesRemover("text", [tmp_path / "hashes.bin"]).map(documents)
)
expected = [
dict(
text=text("World", "I'm so original"),
original_nlines=3,
nlines=2,
line_ids=[1, 2],
),
dict(
text=text("Good morning", "World"),
original_nlines=3,
nlines=2,
line_ids=[0, 1],
),
]
assert_documents_equal(expected, results, ignoring=LENGTHS)
def test_dedup_fast(tmp_path: Path):
data = tmp_path / "data"
part_0 = [["Hello", "_World", "I'm so original"]]
write_docs(data / "part_0.json", part_0)
part_1 = [["Good morning", "_World", "I'm originaler"]]
write_docs(data / "part_1.json", part_1)
parts = [data / "part_0.json", data / "part_1.json"]
res = tmp_path / "res"
res.mkdir()
h = tmp_path / "hashes.bin"
field = "text"
jsonql.run_pipes(dedup.HashesCollector(field, output=h), file=parts)
for part in parts:
jsonql.run_pipes(
dedup.DuplicatesRemover(field, [h]), file=part, output=res / part.name
)
jsonql.run_pipes(
dedup.DuplicatesRemover(field, [h]), file=part, output=res / part.name
)
results_0 = list(jsonql.read_jsons(res / "part_0.json"))
expected_0 = [
dict(
text=text("Hello", "I'm so original"),
original_nlines=3,
nlines=2,
line_ids=[0, 2],
)
]
assert_documents_equal(expected_0, results_0, ignoring=LENGTHS)
results_1 = list(jsonql.read_jsons(res / "part_1.json"))
expected_1 = [
dict(
text=text("Good morning", "I'm originaler"),
original_nlines=3,
nlines=2,
line_ids=[0, 2],
)
]
assert_documents_equal(expected_1, results_1, ignoring=LENGTHS)
words = [w for part in [part_0, part_1] for doc in part for w in doc]
expected = {str_hash(s.lower()): s.startswith("_") for s in words}
assert expected == load_hashes(h)
def test_remove_duplicates_sharded(tmp_path: Path):
data = tmp_path / "data"
part_0 = [["Hello", "_World", "I'm so original"]]
write_docs(data / "part_0.json", part_0)
part_1 = [["_Good morning", "_World", "I'm originaler"]]
write_docs(data / "part_1.json", part_1)
h = tmp_path / "hashes"
h.mkdir()
h0 = FlatHashSet()
h0.add([str_hash(s.lower()) for doc in part_0 for s in doc])
h0.add([str_hash("_world")])
h0.dump(h / "part_0.bin")
assert {
str_hash("hello"): False,
str_hash("_world"): True,
str_hash("i'm so original"): False,
} == as_dict(h0)
h1 = FlatHashSet()
h1.add([str_hash(s.lower()) for doc in part_1 for s in doc])
h1.add([str_hash("_good morning")])
h1.dump(h / "part_1.bin")
assert {
str_hash("_good morning"): True,
str_hash("_world"): False,
str_hash("i'm originaler"): False,
} == as_dict(h1)
res = tmp_path / "res"
res.mkdir()
# dedup.DISABLE_MULTI_PROCESSING = True # Simplifies debugging
dedup.remove_duplicates_sharded(
files=[data / "part_0.json", data / "part_1.json"],
outputs=[res / "part_0.json", res / "part_1.json"],
field="text",
hashes_dir=h,
)
results_0 = list(jsonql.read_jsons(res / "part_0.json"))
expected_0 = [
dict(
text=text("Hello", "I'm so original"),
original_nlines=3,
nlines=2,
line_ids=[0, 2],
)
]
assert_documents_equal(expected_0, results_0, ignoring=LENGTHS)
# First pass removes "_world", second "_good morning".
results_1 = list(jsonql.read_jsons(res / "part_1.json"))
expected_1 = [
dict(text=text("I'm originaler"), original_nlines=3, nlines=1, line_ids=[2])
]
assert_documents_equal(expected_1, results_1, ignoring=LENGTHS)
| cc_net-main | tests/test_dedup.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import cc_net.text_normalizer as txt
def test_unicode_punct():
weird = ",。、„”“«»1」「《》´∶:?!();–—.~’…━〈〉【】%"
replaced = ',.,""""""""""\'::?!();- - . ~\'...-<>[]%'
assert txt.replace_unicode_punct(weird) == replaced
assert txt.remove_unicode_punct(weird) == ""
def test_numbers():
weird = "023456789 | 0123456789"
normalized = "000000000 | 0000000000"
assert txt.normalize(weird, numbers=True) == normalized
assert txt.normalize(weird, numbers=False) == weird
def test_normalize_for_dedup():
weird = "023´∶:\x10 | ;012 hèllo"
normalized = "000 | ;000 hèllo"
assert normalized == txt.slow_normalize_for_dedup(weird)
assert normalized == txt.normalize_for_dedup(weird)
| cc_net-main | tests/test_normalizer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
from pathlib import Path
from cc_net import process_wet_file
def test_parsing():
sample = Path(__file__).parent / "data" / "sample.warc.txt"
with open(sample) as f:
documents = list(process_wet_file.parse_warc_file(f))
expected_urls = [
"http://sample_english.com",
"http://sample_chinese.zh",
"http://sample_russian.ru",
]
assert expected_urls == [d["url"] for d in documents]
expected_domains = ["sample_english.com", "sample_chinese.zh", "sample_russian.ru"]
assert expected_domains == [d["source_domain"] for d in documents]
expected_date = [
"2019-03-18T00:00:00Z",
"2019-03-18T00:00:01Z",
"2019-03-18T00:00:02Z",
]
assert expected_date == [d["date_download"] for d in documents]
expected_title = [
"Famous Mark Twain Quotes",
"馬克·吐溫名言",
"Цитаты знаменитого Марка Твена",
]
assert expected_title == [d["title"] for d in documents]
expected_quotes = """Don't part with your illusions. When they are gone you may still exist, but you have ceased to live.
Education: that which reveals to the wise, and conceals from the stupid, the vast limits of their knowledge.
Facts are stubborn things, but statistics are more pliable.
Fiction is obliged to stick to possibilities. Truth isn't.
"""
assert expected_quotes == documents[0]["raw_content"]
| cc_net-main | tests/test_parse_wet_file.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import pytest
from cc_net.flat_hash_set import HASH_TYPE, FlatHashSet, NaiveHashSet
def as_dict(flat_hash_set) -> dict:
return {k: v for (k, v) in flat_hash_set.items()}
need_getpy = pytest.mark.skipif(
FlatHashSet == NaiveHashSet, reason="getpy isn't installed"
)
def same_behavior(test_case):
def run_case():
naive = as_dict(test_case(FlatHashSet))
flat = as_dict(test_case(NaiveHashSet))
assert naive == flat
return need_getpy(run_case)
@same_behavior
def test_setitem(hash_set_cls):
h = hash_set_cls()
h[np.arange(10, dtype=h.dtype)] = np.zeros(10, dtype=np.uint8)
h[np.arange(5, dtype=h.dtype)] = np.ones(5, dtype=np.uint8)
return h
@same_behavior
def test_add_dup(hash_set_cls):
h = hash_set_cls()
h.add(np.arange(10, dtype=h.dtype))
h.add(np.arange(5, dtype=h.dtype))
expected = {i: i < 5 for i in range(10)}
assert expected == as_dict(h), f"add_dup with {hash_set_cls.__name__}"
return h
@need_getpy
def test_gp_dict():
import getpy as gp # type: ignore
h = gp.Dict(HASH_TYPE, np.uint8)
h[np.arange(10, dtype=HASH_TYPE)] = np.zeros(10, dtype=np.uint8)
h[np.arange(5, dtype=HASH_TYPE)] = np.ones(5, dtype=np.uint8)
expected = {i: i < 5 for i in range(10)}
assert expected == as_dict(h)
def check_reload(h, dump, load, tmp_path):
dump_path = tmp_path / dump.__name__
dump(h, dump_path)
h2 = type(h)()
load(h2, dump_path)
assert as_dict(h) == as_dict(h2)
@pytest.mark.parametrize("hash_set_cls", [FlatHashSet, NaiveHashSet])
def test_loading(tmp_path, hash_set_cls):
h = hash_set_cls()
x = np.random.randint(0, 2 ** 32, (100,), dtype=h.dtype)
h.add(x)
check_reload(h, hash_set_cls.dump, hash_set_cls.load, tmp_path)
check_reload(h, hash_set_cls.dump_np, hash_set_cls.load_np, tmp_path)
if hasattr(hash_set_cls, "dump_gp"):
check_reload(h, hash_set_cls.dump_gp, hash_set_cls.load_gp, tmp_path)
| cc_net-main | tests/test_flat_hash_set.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import pytest
def _request_is_disabled(self, *args, **kwargs):
raise Exception(
f"Your code tried to call 'request' with: {args}, {kwargs}. Unit test aren't allowed to reach internet."
)
@pytest.fixture(autouse=True)
def no_requests(monkeypatch):
"""Remove requests.sessions.Session.request for all tests."""
monkeypatch.setattr("requests.sessions.Session.request", _request_is_disabled)
| cc_net-main | tests/conftest.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
#
| cc_net-main | tests/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import time
from cc_net import jsonql, regroup
def check_regroup(tmp_path, regroup_fn, check_blocks_boundaries=False):
n_shards = 4
n_docs = 20
shards = [
[dict(id=i, shard=s, raw_content="hello world") for i in range(n_docs)]
for s in range(n_shards)
]
shards_files = [tmp_path / f"{s:04d}.json.gz" for s in range(n_shards)]
for shard, shard_file in zip(shards, shards_files):
jsonql.run_pipes(inputs=shard, output=shard_file)
regroup_file = tmp_path / "regroup.json.gz"
start = time.time()
regroup_fn(shards_files, regroup_file)
duration = time.time() - start
print(f"{regroup_fn.__module__}.{regroup_fn.__name__} took {duration}s")
regrouped = list(jsonql.read_jsons(regroup_file))
assert [doc for shard in shards for doc in shard] == regrouped
readers = jsonql.get_block_readers(regroup_file, n_shards)
if not check_blocks_boundaries:
assert [doc for shard in shards for doc in shard] == [
doc for reader in readers for doc in jsonql.read_jsons(reader)
]
return
for shard, reader in zip(shards, readers):
block = [doc for doc in jsonql.read_jsons(reader)]
assert shard == block
def test_regroup(tmp_path):
# With regroup boundaries will be every 256Mb.
check_regroup(tmp_path, regroup.reshard, check_blocks_boundaries=False)
def test_fast_regroup(tmp_path):
# With fast regroup boundaries should match the shards.
check_regroup(tmp_path, regroup.fast_reshard, check_blocks_boundaries=True)
| cc_net-main | tests/test_regroup.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import io
from pathlib import Path
from typing import Sequence
import numpy as np
import pytest
from cc_net import jsonql
def bar(small_bar: str) -> str:
return small_bar.replace(" ", " " * 10).replace("█", "█" * 10)
def get_output(transformer, data, **kwargs):
with io.StringIO() as output:
# Convert data to a generator so that it's not interpreted as a file list.
jsonql.run_pipe(transformer, kwargs, file=(x for x in data), output=output)
return output.getvalue()
def test_split(tmp_path: Path):
data = [
dict(text="Hello world", lang="en"),
dict(text="Boujour les amis", lang="fr"),
dict(text="Rock your boat", lang="en"),
]
with jsonql.split(tmp_path / "{lang}.json") as split:
list(split.map(data))
summary = split.summary()
assert "Found 2 splits." in summary
en_docs = list(jsonql.read_jsons(tmp_path / "en.json"))
assert [data[0], data[2]] == en_docs
fr_docs = list(jsonql.read_jsons(tmp_path / "fr.json"))
assert [data[1]] == fr_docs
def test_split_bad_pattern(tmp_path: Path):
data = [dict(text="Hello world", lang="en")]
with pytest.raises(KeyError):
with jsonql.split(tmp_path / "{language}.json") as split:
list(split.map(data))
def test_histogram():
data = [0.1, 0.1, 0.1, 0.1, 0.4, 0.4, 0.9, 0.9]
hist, bins = jsonql.histogram(data, bins=8, weights=None)
np.testing.assert_almost_equal(bins, [0.1 * x for x in range(1, 10)])
np.testing.assert_almost_equal(hist, [4, 0, 0, 2, 0, 0, 0, 2])
data = [0, 0.1, 0.1, 0.1, 0.1, 0.4, 0.4, 0.8, 0.8, 1]
hist, bins = jsonql.histogram(data, bins=10, weights=None)
np.testing.assert_almost_equal(bins, [0.1 * x for x in range(11)])
np.testing.assert_almost_equal(hist, [1, 4, 0, 0, 2, 0, 0, 0, 2, 1])
def test_display_stats():
stats = {
jsonql.ALL_DOCUMENTS: 100,
"title": 80,
"title.length": 80 * 50,
"text": 100,
"text.length": 100 * 1000,
"popularity": 8,
"popularity.val": [0.1, 0.1, 0.1, 0.1, 0.4, 0.4, 0.9, 0.9],
}
(title,) = jsonql.display_stats(stats, "title")
assert "title" in title
assert "saw 80 times" in title
assert "average length is" in title
assert "\n" not in title
(text,) = jsonql.display_stats(stats, "text")
assert "text" in text
assert "saw 100 times" in text
assert "average length is" in text
assert "\n" not in text
histogram = jsonql.display_stats(
stats, "popularity", bins=[x / 10 for x in range(1, 10)]
)
assert "popularity" in histogram[0]
assert "saw 8 times" in histogram[0]
assert "histogram is" in histogram[0]
assert "0.100 " + bar("████████") in histogram[1]
assert "0.400 " + bar("████ ") in histogram[2]
assert "0.800 " + bar("████ ") in histogram[3]
cum_histogram = jsonql.display_stats(stats, "popularity", bins=8, cumulative=True)
assert "popularity" in cum_histogram[0]
assert "saw 8 times" in cum_histogram[0]
assert "histogram is" in cum_histogram[0]
assert "0.100 " + bar("████ ") in cum_histogram[1]
assert "0.400 " + bar("██████ ") in cum_histogram[2]
assert "0.800 " + bar("████████") in cum_histogram[3]
def test_describe():
def sample(pop):
return dict(title="Lorem", text="Lorem ipsum dolor sit amet.", popularity=pop)
data = [sample(pop) for pop in [0.1, 0.1, 0.1, 0.1, 0.4, 0.4, 0.9, 0.9]]
desc = get_output(
jsonql.describe, data, columns=None, bins=[x / 10 for x in range(1, 10)]
)
assert "Field title saw 8 times (100.0%), average length is 5" in desc
assert "Field text saw 8 times (100.0%), average length is 27" in desc
assert "Field popularity saw 8 times (100.0%), histogram is" in desc
assert "0.100 " + bar("████████") in desc
assert "0.400 " + bar("████ ") in desc
assert "0.800 " + bar("████ ") in desc
desc = get_output(jsonql.describe, data, columns=["text"])
assert "Field title saw 8 times (100.0%), average length is 5" not in desc
assert "Field text saw 8 times (100.0%), average length is 27" in desc
assert "Field popularity, histogram is:" not in desc
def test_custom_pipe():
def transformer(source, sep=" "):
for i, line in enumerate(source):
res = f"{i}{sep}{line}"
yield res
data = ["hello", "world"]
assert get_output(transformer, data) == "0 hello\n1 world\n"
assert get_output(transformer, data, sep="_") == "0_hello\n1_world\n"
def test_open_read_write(tmp_path: Path):
def _lines(filename: Path) -> Sequence[str]:
# jsonql.lines calls open_read
return list(jsonql.lines(filename))
tmp = tmp_path
with jsonql.open_write(tmp / "a.txt") as o:
print("a", file=o)
assert _lines(tmp / "a.txt") == ["a"]
jsonql.write_jsons([{"a": 1}], tmp / "a.txt")
assert _lines(tmp / "a.txt") == ['{"a": 1}']
with jsonql.open_write(tmp / "a.gz") as o:
print("a", file=o)
assert _lines(tmp / "a.gz") == ["a"]
with jsonql.open_write([tmp / "a0.txt", tmp / "a1.txt"]) as o:
print("a", file=o)
assert _lines(tmp / "a0.txt") == ["a"]
assert not (tmp / "a1.txt").is_file()
with jsonql.open_write([tmp / "b0.txt", tmp / "b1.txt"], max_size="1k") as o:
print("0" * 2000, file=o)
print("1" * 2000, file=o)
assert _lines(tmp / "b0.txt") == ["0" * 2000]
assert _lines(tmp / "b1.txt") == ["1" * 2000]
with jsonql.open_write(tmp / "a_????.json") as o:
print("a", file=o)
assert _lines(tmp / "a_0000.json") == ["a"]
assert not (tmp / "a_0001.json").is_file()
assert _lines(tmp / "a_*.json") == ["a"]
with jsonql.open_write(tmp / "b_??.json", max_size="1k") as o:
print("0" * 2000, file=o)
print("1" * 2000, file=o)
assert _lines(tmp / "b_00.json") == ["0" * 2000]
assert _lines(tmp / "b_01.json") == ["1" * 2000]
assert _lines(tmp / "b_*.json") == ["0" * 2000, "1" * 2000]
def test_split_file(tmp_path: Path):
file = tmp_path / "test.txt"
content = "Hello\nWorld\n"
with open(file, "w") as o:
o.write(content)
with jsonql.SplitFile(file, chunk=0, n_chunks=2) as f:
assert f.readlines() == ["Hello\n"]
with jsonql.SplitFile(file, chunk=1, n_chunks=2) as f:
assert f.readlines() == ["World\n"]
def test_split_file_middle_of_line(tmp_path: Path):
file = tmp_path / "test.txt"
content = "Hello _|_\nWorld\n"
# split is here ^
with open(file, "w") as o:
o.write(content)
with jsonql.SplitFile(file, chunk=0, n_chunks=2) as f:
assert f.readlines() == ["Hello _|_\n"]
with jsonql.SplitFile(file, chunk=1, n_chunks=2) as f:
assert f.readlines() == ["World\n"]
def test_split_file_middle_of_char(tmp_path: Path):
file = tmp_path / "test.txt"
content = "Hello\U0001F40D\nWorld\n"
# split is here ^^
with open(file, "w") as o:
o.write(content)
with jsonql.SplitFile(file, chunk=0, n_chunks=2) as f:
assert f.readlines() == ["Hello🐍\n"]
with jsonql.SplitFile(file, chunk=1, n_chunks=2) as f:
assert f.readlines() == ["World\n"]
def test_blocked_gzip(tmp_path: Path):
file = tmp_path / "test.gz"
f = str(file)
# Each object is 10/11 bytes long. We have 2 of them by block.
content = ['{"xx": %d}' % i for i in range(80)]
with jsonql.BlockedGzipWriter(file, "wt", block_size="20B") as o:
for line in content:
print(line, file=o)
jr = jsonql.JsonReader(strict=True)
expected = list(jr.map(content))
# read as one file
assert expected == list(jsonql.read_jsons(file))
# read first block
assert expected[:2] == list(jsonql.read_jsons(f + "[0/40]"))
# read last block
assert expected[-2:] == list(jsonql.read_jsons(f + "[39/40]"))
readers = jsonql.get_block_readers(file, 9)
read_as_several_files = [list(jsonql.read_jsons(r)) for r in readers]
# 40 splits of 2 docs, 9 readers -> 5 splits, 10 docs per reader
assert list(jsonql.grouper(expected, 10)) == read_as_several_files
def test_enter_exit(capsys):
class MyTransformer(jsonql.Transformer):
def __enter__(self):
print("trans: started")
self.ready = True
return self
def __exit__(self, *args):
print("trans: done")
def do(self, x):
return (x, x)
def acc(values):
print("acc: started")
res = 0
for (x, _) in values:
res += int(x)
print("acc: done")
yield f"acc: result={res}"
t = MyTransformer()
data = (str(x) for x in range(10))
print("pipeline: started")
# Print to stdout.
jsonql.run_pipes(t, acc, file=data)
print("pipeline: done")
out = capsys.readouterr().out
assert (
"\n".join(
[
"pipeline: started",
"trans: started",
"acc: started",
"acc: done",
f"acc: result=45",
# Transformers are closed at the very end.
"trans: done",
"pipeline: done\n",
]
)
== out
)
def test_write_to_stdout(capsys):
lines = [str(x) for x in range(10)]
jsonql.run_pipes(file=iter(lines))
out = capsys.readouterr().out
assert out == "\n".join(lines) + "\n"
def test_write_to_stdout_handle_newlines(capsys):
lines = [str(x) + "\n" for x in range(10)]
jsonql.run_pipes(file=iter(lines))
out = capsys.readouterr().out
assert out == "".join(lines)
def test_multiprocess(capsys):
mult = jsonql.Mapper(lambda x: f"2x = {2 * int(x)}")
jsonql.run_pipes(mult, processes=2, file=(str(x) for x in range(10)))
out = set(capsys.readouterr().out.strip("\n").split("\n"))
assert set(f"2x = {2 * x}" for x in range(10)) == out
| cc_net-main | tests/test_jsonql.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import json
from pathlib import Path
import pytest
import cc_net
import cc_net.minify as minify
from cc_net import jsonql, process_wet_file
from cc_net.minify import (
HASH_SIZE,
decode_hashes,
encode_hashes,
encode_line_ids,
get_hashes,
)
def test_encode_decode():
sentences = ["Hello world !", "Is everyone happy in here ?"]
hashes = get_hashes(sentences)
assert all([len(h) == HASH_SIZE for h in hashes])
hashes_int = [minify._b2i(h) for h in hashes]
encoded = encode_hashes(hashes)
decoded = decode_hashes(encoded)
assert all([len(d) == HASH_SIZE for d in decoded])
decoded_int = [minify._b2i(d) for d in decoded]
assert hashes_int == decoded_int
assert hashes == decoded
def test_minify():
doc = {
"raw_content": "Hello world !\nIs everyone happy in here ?",
"language": "en",
"perplexity": 120.0,
"line_ids": [0, 4],
}
expected = {"line_ids": "AAAEAA==", "language": "en", "perplexity": 120.0}
minifier = minify.Minifier()
assert expected == minifier(doc)
@pytest.fixture
def http_from_disk(monkeypatch):
def read_sample_file(url: str, n_retry: int = 3) -> bytes:
expected_url = process_wet_file.WET_URL_ROOT + "/crawl-data/sample.warc.wet"
assert expected_url == url
file = Path(__file__).parent / "data" / "sample.warc.txt"
return file.read_bytes()
monkeypatch.setattr(cc_net.jsonql, "request_get_content", read_sample_file)
def test_minify_and_fetch(http_from_disk, tmp_path: Path):
full_quotes = """Don't part with your illusions. When they are gone you may still exist, but you have ceased to live.
Education: that which reveals to the wise, and conceals from the stupid, the vast limits of their knowledge.
Facts are stubborn things, but statistics are more pliable.
Fiction is obliged to stick to possibilities. Truth isn't."""
# We don't need no education.
chosen_quotes = "\n".join(
l for l in full_quotes.splitlines() if "Education" not in l
)
cc_doc = {
"url": "http://sample_english.com",
"date_download": "2019-03-18T00:00:00Z",
"digest": "sha1:XQZHW7QWIG54HVAV3KPRW6MK5ILDNCER",
"source_domain": "sample_english.com",
"title": "Famous Mark Twain Quotes",
"raw_content": full_quotes,
"cc_segment": "crawl-data/sample.warc.wet",
"nlines": 4,
"length": 353,
}
ccnet_metadata = {
"language": "en",
"language_score": 0.99,
"perplexity": 151.5,
"bucket": "head",
"raw_content": chosen_quotes,
"nlines": 3,
"length": len(chosen_quotes),
"original_nlines": 4,
"original_length": 353,
"line_ids": [0, 2, 3],
}
ccnet_doc = dict(cc_doc, **ccnet_metadata)
mini = minify.Minifier()(ccnet_doc.copy())
assert mini is not ccnet_doc
important_fields = [
"url",
"digest",
"cc_segment",
"language",
"language_score",
"perplexity",
"bucket",
"line_ids",
]
expected = {k: ccnet_doc[k] for k in important_fields}
expected["line_ids"] = encode_line_ids(expected["line_ids"]) # type: ignore
assert expected == mini
with jsonql.open_write(tmp_path / "sample.json") as o:
print(json.dumps(mini), file=o)
fetcher = minify.MetadataFetcher(tmp_path)
# line_ids is removed when unminifying
ccnet_doc.pop("line_ids")
assert ccnet_doc == fetcher(cc_doc)
def test_fetch(http_from_disk, tmp_path: Path):
mini_docs = [
{
"url": "http://sample_chinese.com",
"digest": "sha1:Y4E6URVYGIAFNVRTPZ5S3J64RTZTP6HJ",
"cc_segment": "crawl-data/sample.warc.wet",
"line_ids": encode_line_ids([2]),
"bucket": "not_that_great",
},
{
"url": "http://sample_english.com",
"digest": "sha1:XQZHW7QWIG54HVAV3KPRW6MK5ILDNCER",
"cc_segment": "crawl-data/sample.warc.wet",
"line_ids": encode_line_ids([3]),
"bucket": "top_notch",
},
]
with jsonql.open_write(tmp_path / "sample.json") as o:
for mini in mini_docs:
print(json.dumps(mini), file=o)
fetcher = minify.MetadataFetcher(tmp_path)
cc = process_wet_file.CCSegmentsReader(["crawl-data/sample.warc.wet"])
docs = [d for d in fetcher.map(cc) if d is not None]
assert cc.retrieved_segments == 1
# Note: documents are retrieved as they are ordered in the .warc.wet file
assert [
"Facts are stubborn things, but statistics are more pliable.",
"事實是固執的東西,但統計數字卻比較柔和。",
] == [d["raw_content"] for d in docs]
assert ["top_notch", "not_that_great"] == [d["bucket"] for d in docs]
| cc_net-main | tests/test_minify.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import inspect
import pickle
from pathlib import Path
import pytest
from cc_net import dedup, jsonql, perplexity, split_by_lang, tokenizer
def get_transformers(module):
return [
v
for v in vars(module).values()
if type(v) is type
and issubclass(v, jsonql.Transformer)
and v != jsonql.Transformer
]
ALL_TRANSFORMERS = (
get_transformers(jsonql)
+ get_transformers(dedup)
+ get_transformers(perplexity)
+ get_transformers(tokenizer)
+ get_transformers(split_by_lang)
)
def check_transformer_is_calling_super_init(cls: type):
assert issubclass(cls, jsonql.Transformer)
# accessing __init__ is generally an error, but here we do want to inspect
# the __init__method.
code = inspect.getsource(cls.__init__) # type: ignore
code = code.replace(" ", "")
# Check that super().__init__ is called.
assert "super().__init__()" in code
def test_bad_transformers_are_caught():
class BadTransformer(jsonql.Transformer):
def __init__(self, arg):
# We aren't calling super /!\
self.arg = arg
with pytest.raises(AssertionError):
check_transformer_is_calling_super_init(BadTransformer)
@pytest.mark.parametrize("transformer", ALL_TRANSFORMERS)
def test_transformer_is_correctly_implemented(transformer):
check_transformer_is_calling_super_init(transformer)
@pytest.mark.skipif(
not Path("bin/lid.bin").exists(), reason="bin/lid.bin not found, run `make install`"
)
def test_can_pickle_transformer(tmp_path):
model = Path("bin/lid.bin")
if not model.exists():
return
classifier = split_by_lang.Classifier(model, "text", "lang")
classifier.__enter__()
doc = dict(text="Hello world ! This is English btw.")
original_results = classifier(doc)
with open(tmp_path / "transformer.pkl", "wb") as o:
pickle.dump(classifier, o)
with open(tmp_path / "transformer.pkl", "rb") as f:
classifier = pickle.load(f)
assert original_results == classifier(doc)
# Do it again with the unpickled object.
with open(tmp_path / "transformer.pkl", "wb") as o:
pickle.dump(classifier, o)
with open(tmp_path / "transformer.pkl", "rb") as f:
classifier = pickle.load(f)
assert original_results == classifier(doc)
| cc_net-main | tests/test_transformer.py |
"""setup.py file for packaging amti."""
from setuptools import setup
with open('readme.md', 'r') as readme_file:
readme = readme_file.read()
setup(
name='amti',
version='0.0.2',
description="A Mechanical Turk Interface",
long_description=readme,
url='http://github.com/allenai/amti',
author='Nicholas Lourie',
author_email='[email protected]',
keywords='amti mechanical turk mturk crowdsourcing',
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Artificial Intelligence'
],
license='Apache',
packages=['amti'],
install_requires=[
'Jinja2 >= 2.11.2',
'boto3 >= 1.12.39',
'click >= 7.1.1'
],
python_requires='>=3.6',
scripts=[
'scripts/amti'
],
zip_safe=False
)
| amti-master | setup.py |
"""A Mechanical Turk Interface"""
from amti import (
actions,
clis,
settings,
utils)
| amti-master | amti/__init__.py |
"""Constants and default settings that ship with ``amti``"""
# AWS client configuration
MAX_ATTEMPTS = 25
"""The number of retries to perform for requests."""
# Mechanical Turk environment values
ENVS = {
'live': {
'region_name': 'us-east-1',
'endpoint_url': 'https://mturk-requester.us-east-1.amazonaws.com',
'worker_url': 'https://www.mturk.com/',
'requester_url': 'https://requester.mturk.com/'
},
'sandbox': {
'region_name': 'us-east-1',
'endpoint_url': 'https://mturk-requester-sandbox.us-east-1.amazonaws.com',
'worker_url': 'https://workersandbox.mturk.com/',
'requester_url': 'https://requestersandbox.mturk.com/'
}
}
# MTURK overhead multiplier
TURK_OVERHEAD_FACTOR = 1.2
# batch directory structure and values
BATCH_README = """
This directory is a batch made by A Mechanical Turk Interface.
The files in this directory represent the definition for and potentially
the results of a batch of HITs on Amazon Mechanical Turk.
See the A Mechanical Turk Interface documentation for details.
"""
# a batch directory should have the following structure:
#
# batch-$BATCHID : root directory for the batch
# |- README : a text file for developers about the batch
# |- COMMIT : the commit SHA for the code that generated the batch
# |- BATCHID : a random UUID for the batch
# |- definition : files defining the HIT / HIT Type
# | |- NOTES : any notes for developers that go along with the task
# | |- question.xml.j2 : a jinja2 template for the HITs' question
# | |- hittypeproperties.json : properties for the HIT Type
# | |- hitproperties.json : properties for the HIT
# |- data.jsonl : data used to generate each HIT in the batch
# |- results : results from the HITs on the MTurk site
# | |- hit-$ID : results for a single HIT from the batch
# | | |- hit.jsonl : data about the HIT from the MTurk site
# | | |- assignments.jsonl : results from the assignments
# | |- ...
#
# The following data structure maps a logical name for a structure (such
# as 'readme' for the 'README' file) to a pair giving the path name for
# that structure and the substructure of that structure (an empty
# dictionary if it's a file, or a similar structure if it's a
# directory).
#
# All directory structure should be captured here, so that it has a
# single source of truth.
BATCH_DIR_STRUCTURE = ('batch-{batch_id}', {
'readme': ('README', {}),
'commit': ('COMMIT', {}),
'batchid': ('BATCHID', {}),
'definition': ('definition', {
'notes': ('NOTES', {}),
'question_template': ('question.xml.j2', {}),
'hittype_properties': ('hittypeproperties.json', {}),
'hit_properties': ('hitproperties.json', {})
}),
'data': ('data.jsonl', {}),
'results': ('results', {
'hit_dir': ('hit-{hit_id}', {
'hit': ('hit.jsonl', {}),
'assignments': ('assignments.jsonl', {})
})
})
})
# the name of the file used to denote a batch which has been uploaded to
# MTurk but is not yet complete. This file also stores information
# relevant to completing the batch (i.e., the open HIT IDs and HIT Type
# ID).
INCOMPLETE_FILE_NAME = '_INCOMPLETE'
# template for the directories that contain the XML answers for an
# assignment
XML_DIR_NAME_TEMPLATE = 'batch-{batch_id}-xml'
# template for the files that contain the XML answers for an assignment
XML_FILE_NAME_TEMPLATE = 'assignment-{assignment_id}.xml'
HITTYPE_PROPERTIES = {
'AutoApprovalDelayInSeconds': int,
'AssignmentDurationInSeconds': int,
'Reward': str,
'Title': str,
'Keywords': str,
'Description': str
}
HIT_PROPERTIES = {
'MaxAssignments': int,
'LifetimeInSeconds': int
}
QUALIFICATIONTYPE_DIR_STRUCTURE = \
('qualification-type-{qualificationtype_id}', {
'definition': ('definition', {
'properties': ('qualificationtypeproperties.json', {}),
'test': ('test.xml', {}),
'answerkey': ('answerkey.xml', {})
}),
'qualificationtype': (
'qualificationtype-{qualificationtype_id}.jsonl',
{}
)
})
QUALIFICATIONTYPE_PROPERTIES = {
'Name': str,
'Keywords': str,
'Description': str,
'QualificationTypeStatus': str,
'RetryDelayInSeconds': int,
'TestDurationInSeconds': int
}
| amti-master | amti/settings.py |
"""
amti.utils.logging
==================
Utilities for logging.
"""
import logging
import subprocess
import sys
LOG_FORMAT = '%(asctime)s:%(levelname)s:%(name)s:%(message)s'
def config_logging(log_level, file_path=None):
"""Configure python logging.
Parameters
----------
log_level : int
the level to log at. Rather than passing an integer directly,
you should import one of the log levels from the logging
module, with ``logging.INFO`` and ``logging.DEBUG` being the
most common.
file_path : Optional[str]
if a string is present, a file path at which the logging
output should be written, otherwise ``None`` in which case
logs will be written to STDOUT.
Returns
-------
None.
"""
# set logging on noisy dependencies
logging.getLogger('boto').setLevel(logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
if file_path:
logging.basicConfig(
filename=file_path,
filemode='a',
format=LOG_FORMAT,
level=log_level)
else:
logging.basicConfig(
stream=sys.stdout,
format=LOG_FORMAT,
level=log_level)
def check_git_installed():
"""Return ``True`` if git is installed, otherwise return ``False``.
Returns
-------
bool
``True`` if git is installed, ``False`` otherwise.
"""
process = subprocess.run(
['git', '--version'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
return process.returncode == 0
def get_current_commit():
"""Return the current commit of the current directory.
Return the current commit of the current directory. If the current
directory is not a git repo or if git is not installed then return
``None``.
Returns
-------
Optional[str]
the full SHA for the commit of the current directory, or
``None`` if the current directory is not a git repo or git is
not installed.
"""
process = subprocess.run(
['git', 'rev-parse', '--verify', 'HEAD'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
git_installed = check_git_installed()
if not git_installed:
current_commit = None
elif b'fatal: not a git repository' in process.stderr.lower():
current_commit = None
else:
process.check_returncode()
current_commit = process.stdout.decode('utf-8').rstrip()
return current_commit
def is_repo_clean():
"""Return ``True`` if the repo has a clean working directory.
Return ``True`` if the repo has a clean working directory, ``False``
if it doesn't, and ``None`` if the current directory is not a git
repo or git is not installed.
Returns
-------
Optional[bool]
``True`` if the current working directory is a clean git repo,
``False`` if the current working directory is a dirty git repo,
and ``None`` if the current working directory is not a git repo
or if git is not installed.
"""
process = subprocess.run(
['git', 'status', '--porcelain'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
git_installed = check_git_installed()
if not git_installed:
clean_repo = None
elif b'fatal: not a git repository' in process.stderr.lower():
clean_repo = None
else:
process.check_returncode()
clean_repo = process.stdout.decode('utf-8') == ''
return clean_repo
def log_current_commit(logger):
"""Log the current commit and whether the repo is clean.
Log the current commit and whether the working directory for the
repo is clean.
Parameters
----------
logger : logging.Logger
the logger with which to log messages.
Returns
-------
None.
"""
current_commit = get_current_commit()
clean_repo = is_repo_clean()
if current_commit is None or clean_repo is None:
logger.info(
'Current directory does not appear to be a git repo.')
else:
status = 'clean' if clean_repo else 'dirty'
logger.info(f'Current commit: {current_commit}')
logger.info(f'Working directory status: {status}')
| amti-master | amti/utils/log.py |
"""Utilities for processing XML."""
from xml.dom import minidom
def get_node_text(node):
"""Return the text from a node that has only text as content.
Calling this function on a node with multiple children or a non-text
node child raises a ``ValueError``.
Parameters
----------
node : xml.dom.minidom.Node
The node to extract text from.
Returns
-------
str
The text from node.
"""
# handle the empty node case
if len(node.childNodes) == 0:
return ''
# check error conditions
if len(node.childNodes) > 1:
raise ValueError(
f'node ({node}) has multiple child nodes.')
if not isinstance(node.childNodes[0], minidom.Text):
raise ValueError(
f"node's ({node}) child node is not a Text node.")
# return the child node's text
return node.childNodes[0].wholeText
| amti-master | amti/utils/xml.py |
"""Utilities for ``amti``"""
from amti.utils import (
log,
mturk,
serialization,
validation,
workers,
xml)
| amti-master | amti/utils/__init__.py |
""" Module for worker management functions """
import boto3
import click
import csv
from typing import List
def chunk_list(items: List, n: int = 100) -> List:
"""Create generatator that yields n sized chunks of input list."""
for i in range(0, len(items), n):
yield items[i:i + n]
def read_workerids_from_file(file: click.Path) -> List:
"""Read WorkerIds from file.
Read WorkerIds from CSV file. Return list of extracted WorkerIds.
Parameters
----------
file : click.Path
Path to CSV file of WorkerIds.
Returns
-------
list
List of extracted WorkerId strings.
"""
worker_ids = []
with open(file, 'r') as f:
reader = csv.reader(f)
# check if first row is header
first_row = next(reader)
if 'WorkerId' not in first_row:
worker_ids += first_row
for row in reader:
worker_ids += row
return worker_ids | amti-master | amti/utils/workers.py |
"""Utilities for interacting with MTurk."""
import logging
import os
import boto3
from botocore.config import Config
from typing import Optional
from amti import settings
logger = logging.getLogger(__name__)
def get_mturk_client(env):
"""Return a client for Mechanical Turk.
Return a client for Mechanical Turk that is configured for ``env``,
the environment in which we'd like to run.
Parameters
----------
env : str
The environment to get a client for. The value of ``env`` should
be one of the supported environments.
Returns
-------
MTurk.Client
A boto3 client for Mechanical Turk configured for ``env``.
"""
region_name = settings.ENVS[env]['region_name']
endpoint_url = settings.ENVS[env]['endpoint_url']
profile = os.getenv('AWS_PROFILE')
if profile is None:
logger.debug('Creating mturk session with default environment/profile values.')
session = boto3.session.Session()
else:
logger.debug(f'Creating mturk session with profile_name {profile}')
session = boto3.session.Session(profile_name=profile)
logger.debug(
f'Creating mturk client in region {region_name} with endpoint'
f' {endpoint_url}.')
config = Config(retries={'max_attempts': settings.MAX_ATTEMPTS})
client = session.client(
service_name='mturk',
region_name=region_name,
endpoint_url=endpoint_url,
config=config
)
return client
def get_qual_by_name(client: boto3.client, qual_name: str) -> Optional[dict]:
"""Find qual by name.
Search MTurk qualifications for qual with qual_name. Return if found in
first 100 results.
NOTE: Only searches quals created/owned by the user's MTurk account.
Parameters
----------
client : boto3.client
Boto3 MTurk client.
qual_name : str
Name of qualification to search for.
Returns
-------
dict or None
If qual found, return Dict with qual info.
Else, return None.
"""
response = client.list_qualification_types(
Query=qual_name,
MustBeRequestable=True,
MustBeOwnedByCaller=True,
MaxResults=100
)
for qual in response['QualificationTypes']:
name = qual.pop('Name')
if name == qual_name:
return qual.pop('QualificationTypeId')
| amti-master | amti/utils/mturk.py |
"""Utilities for data serialization"""
import datetime
def json_helper(obj):
"""Help ``json.dump`` serialize objects to JSON.
This function is written to be passed into ``json.dump`` as the
argument to the ``default`` parameter, so that we can serialize a
broader range of data types. Currently, this helper function can
serialize ``datetime.date`` and ``datetime.datetime`` objects. If
the object cannot be serialized, a ``TypeError`` is raised.
Parameters
----------
obj : object
an object to be serialized.
Returns
-------
str
a string representing ``obj`` serialized as JSON.
"""
if isinstance(obj, datetime.date):
serialized_obj = obj.isoformat()
elif isinstance(obj, datetime.datetime):
serialized_obj = obj.isoformat()
else:
raise TypeError(f'Failed to serialize {obj}.')
return serialized_obj
| amti-master | amti/utils/serialization.py |
"""Utilities for validating data"""
def validate_dict(dict_like, schema):
"""Return a list of validation error strings.
Compare ``dict_like`` to ``schema``, returning a list of strings
describing all the validation errors. In particular, validate that
``dict_like`` contains every key specified in ``schema`` as well as
that each key's value is an instance of the type specified by
``schema``. It is *not* considered a validation error if
``dict_like`` has keys which are not in ``schema``.
Parameters
----------
dict_like : Dict
a dictionary-like object to be validated against ``schema``.
schema : Dict[String, type]
a dictionary mapping strings to instances of ``type`` (i.e.,
classes and built-in types like ``str``).
Returns
-------
List[str]
a list of strings describing all validation errors found when
comparing ``dict_like`` to ``schema``.
Example
-------
The following example shows a dictionary validated against a
schema where the dictionary passes validation::
>>> schema = {'foo': int}
>>> data = {'foo': 3}
>>> validate_dict(dict_like=data, schema=schema)
[]
"""
validation_errors = []
for key, value_type in schema.items():
if key not in dict_like:
validation_errors.append(f'Key ({key}) was not found.')
elif not isinstance(dict_like[key], value_type):
validation_errors.append(
f'Key ({key}) is not of type {value_type}')
return validation_errors
| amti-master | amti/utils/validation.py |
"""Functions for saving HITs to storage"""
import json
import logging
import os
import shutil
import tempfile
from amti import settings
from amti import utils
logger = logging.getLogger(__name__)
def save_batch(
client,
batch_dir):
"""Save results from turkers working a batch to disk.
In order to save the results from a batch to disk, every HIT in the
batch must be in a reviewable state.
Parameters
----------
client : MTurk.Client
a boto3 client for MTurk.
batch_dir : str
the path to the batch's directory.
Returns
-------
None.
"""
# construct important paths
batch_dir_name, batch_dir_subpaths = settings.BATCH_DIR_STRUCTURE
batchid_file_name, _ = batch_dir_subpaths['batchid']
results_dir_name, results_dir_subpaths = batch_dir_subpaths['results']
hit_dir_name, hit_dir_subpaths = results_dir_subpaths['hit_dir']
hit_file_name, _ = hit_dir_subpaths['hit']
assignments_file_name, _ = hit_dir_subpaths['assignments']
incomplete_file_name = settings.INCOMPLETE_FILE_NAME
batchid_file_path = os.path.join(
batch_dir, batchid_file_name)
incomplete_file_path = os.path.join(
batch_dir, settings.INCOMPLETE_FILE_NAME)
results_dir = os.path.join(batch_dir, results_dir_name)
with open(batchid_file_path) as batchid_file:
batch_id = batchid_file.read().strip()
if not os.path.isfile(incomplete_file_path):
raise ValueError(
f'No {incomplete_file_name} file was found in {batch_dir}.'
f' Please make sure that the directory is a batch that has'
f' HITs waiting for review.')
with open(incomplete_file_path) as incomplete_file:
hit_ids = json.load(incomplete_file)['hit_ids']
logger.info(f'Retrieving HIT data for batch {batch_id}.')
# construct the results in a temporary directory. Using a temporary
# directory allows us to eagerly construct the results directory
# without worrying about clean up in the event of an error
# condition.
with tempfile.TemporaryDirectory() as working_dir:
for hit_id in hit_ids:
hit_dir = os.path.join(
working_dir,
hit_dir_name.format(hit_id=hit_id))
os.mkdir(hit_dir)
hit_file_path = os.path.join(hit_dir, hit_file_name)
assignments_file_path = os.path.join(
hit_dir, assignments_file_name)
logger.debug(f'Fetching HIT (ID: {hit_id}).')
hit = client.get_hit(HITId=hit_id)
logger.debug(f'Writing HIT (ID: {hit_id}) to {hit_file_path}.')
with open(hit_file_path, 'w') as hit_file:
json.dump(
hit,
hit_file, default=utils.serialization.json_helper)
hit_status = hit['HIT']['HITStatus']
if hit_status != 'Reviewable':
raise ValueError(
f'HIT (ID: {hit_id}) has status "{hit_status}".'
f' In order to save a batch all HITs must have'
f' "Reviewable" status.')
logger.debug(f'Fetching assignments for HIT (ID: {hit_id}).')
assignments_paginator = client.get_paginator(
'list_assignments_for_hit')
assignments_pages = assignments_paginator.paginate(HITId=hit_id)
with open(assignments_file_path, 'w') as assignments_file:
for i, assignments_page in enumerate(assignments_pages):
logger.debug(f'Saving assignments. Page {i}.')
for assignment in assignments_page['Assignments']:
assignment_id = assignment['AssignmentId']
assignment_status = assignment['AssignmentStatus']
logger.debug(
f'Assignment (ID: {assignment_id}) Status:'
f' {assignment_status}.')
if assignment_status not in ['Approved', 'Rejected']:
raise ValueError(
f'Assignment (ID: {assignment_id}) has status'
f' "{assignment_status}". In order to save a'
f' batch all assignments must have "Approved"'
f' or "Rejected" status.')
assignments_file.write(
json.dumps(
assignment,
default=utils.serialization.json_helper
) + '\n')
logger.info(f'Finished saving HIT (ID: {hit_id}).')
shutil.copytree(working_dir, results_dir)
# remove the incomplete file since the batch is now complete
os.remove(incomplete_file_path)
logger.info(f'Saving batch {batch_id} is complete.')
| amti-master | amti/actions/save.py |
"""Functions for deleting HITs from MTurk"""
import json
import logging
import os
from amti import settings
logger = logging.getLogger(__name__)
def delete_hit(
client,
hit_id):
"""Delete the HIT corresponding to ``hit_id`` from MTurk.
Parameters
----------
client : MTurk.Client
a boto3 client for MTurk.
hit_id : str
the ID for the HIT to delete.
Returns
-------
None.
"""
logger.debug(f'Deleting HIT (ID: {hit_id}).')
client.delete_hit(HITId=hit_id)
logger.debug(f'HIT (ID: {hit_id}) deleted.')
def delete_batch(
client,
batch_dir):
"""Delete the batch of HITs represented by ``batch_dir`` from MTurk.
Only batches that have their results collected can be deleted.
Parameters
----------
client : MTurk.Client
a boto3 client for MTurk.
batch_dir : str
the path to the batch directory.
Returns
-------
None.
"""
batch_dir_name, batch_dir_subpaths = settings.BATCH_DIR_STRUCTURE
batchid_file_name, _ = batch_dir_subpaths['batchid']
results_dir_name, results_dir_subpaths = batch_dir_subpaths['results']
hit_dir_name, hit_dir_subpaths = results_dir_subpaths['hit_dir']
hit_file_name, _ = hit_dir_subpaths['hit']
batchid_file_path = os.path.join(
batch_dir, batchid_file_name)
results_dir = os.path.join(batch_dir, results_dir_name)
with open(batchid_file_path) as batchid_file:
batch_id = batchid_file.read().strip()
logger.info(f'Deleting batch {batch_id}.')
for dir_path, dir_names, file_names in os.walk(results_dir):
if hit_file_name in file_names:
# read the HIT ID from the HIT's file
hit_file_path = os.path.join(dir_path, hit_file_name)
with open(hit_file_path, 'r') as hit_file:
hit_id = json.load(hit_file)['HIT']['HITId']
logger.debug(f'Deleting HIT (ID: {hit_id}).')
client.delete_hit(HITId=hit_id)
| amti-master | amti/actions/delete.py |
"""Functions for creating HITs"""
import json
import logging
import os
import shutil
import tempfile
import uuid
import jinja2
from amti import settings
from amti import utils
logger = logging.getLogger(__name__)
def initialize_batch_directory(
definition_dir,
data_path,
save_dir):
"""Create a directory on disk that represents a batch.
Create a directory on disk which brings together the basic elements
of a batch, i.e. files defining a HIT template and a JSON lines file
providing data with which to populate the template. This batch
directory can then be used to upload a batch of HITs in MTurk.
To simultaneously create the batch directory and upload the HITs to
MTurk, use the ``create_batch`` function.
Parameters
----------
definition_dir : str
the path to the definition directory.
data_path : str
the path to a JSONL file holding the data that should be used to
generate the HITs in the batch.
save_dir : str
the path to the directory in which to write the batch
directory.
Returns
-------
batch_dir : str
the path to the batch directory.
"""
# construct important paths
batch_dir_name, batch_dir_subpaths = settings.BATCH_DIR_STRUCTURE
readme_file_name, _ = batch_dir_subpaths['readme']
commit_file_name, _ = batch_dir_subpaths['commit']
batchid_file_name, _ = batch_dir_subpaths['batchid']
data_file_name, _ = batch_dir_subpaths['data']
definition_dir_name, definition_dir_subpaths = \
batch_dir_subpaths['definition']
hittype_properties_path = os.path.join(
definition_dir,
definition_dir_subpaths['hittype_properties'][0])
hit_properties_path = os.path.join(
definition_dir,
definition_dir_subpaths['hit_properties'][0])
# create a UUID for the batch and the path to the batch dir
batch_id = str(uuid.uuid4())
batch_dir = os.path.join(
save_dir, batch_dir_name.format(batch_id=batch_id))
# use a temporary working directory to build up the batch directory
with tempfile.TemporaryDirectory() as working_dir:
# write the README file
readme_path = os.path.join(working_dir, readme_file_name)
with open(readme_path, 'w') as readme_file:
readme_file.write(settings.BATCH_README)
# write the COMMIT file
current_commit = utils.log.get_current_commit() or '<none>'
commit_path = os.path.join(working_dir, commit_file_name)
with open(commit_path, 'w') as commit_file:
commit_file.write(current_commit)
# write the BATCHID file
batchid_path = os.path.join(working_dir, batchid_file_name)
with open(batchid_path, 'w') as batchid_file:
batchid_file.write(batch_id)
# validate the definition data
with open(hittype_properties_path, 'r') as hittype_properties_file:
hittype_properties = json.load(hittype_properties_file)
hittype_validation_errors = utils.validation.validate_dict(
hittype_properties, settings.HITTYPE_PROPERTIES)
if hittype_validation_errors:
raise ValueError(
'HIT Type properties file ({hittype_properties_path})'
' had the following validation errors:'
'\n{validation_errors}'.format(
hittype_properties_path=hittype_properties_path,
validation_errors='\n'.join(hittype_validation_errors)))
with open(hit_properties_path, 'r') as hit_properties_file:
hit_properties = json.load(hit_properties_file)
hit_validation_errors = utils.validation.validate_dict(
hit_properties, settings.HIT_PROPERTIES)
if hit_validation_errors:
raise ValueError(
'HIT properties file ({hit_properties_path})'
' had the following validation errors:'
'\n{validation_errors}'.format(
hit_properties_path=hit_properties_path,
validation_errors='\n'.join(hit_validation_errors)))
# copy the definition data to the working directory
working_definition_dir = os.path.join(
working_dir, definition_dir_name)
os.mkdir(working_definition_dir)
for _, (file_name, _) in definition_dir_subpaths.items():
shutil.copyfile(
os.path.join(definition_dir, file_name),
os.path.join(working_definition_dir, file_name))
# validate the batch data (data.jsonl)
with open(data_path, 'r') as data_file:
for i, ln in enumerate(data_file):
try:
json.loads(ln.rstrip())
except ValueError:
raise ValueError(
f'Line {i+1} of {data_path} did not validate as'
f' JSON. Please make sure file is in JSON Lines'
f' format.')
# copy the batch data (data.jsonl) to the working directory
working_data_path = os.path.join(working_dir, data_file_name)
shutil.copyfile(data_path, working_data_path)
# copy the working_dir to the correct location
shutil.copytree(working_dir, batch_dir)
return batch_dir
def estimate_batch_cost(definition_dir, data_path):
"""
Estimate the cost of a batch.
This function takes ``definition_dir`` and ``data_path`` strings and then
estimates the cost of a batch created with them. Since this function takes
the defintion directory and data file, it can estimate the cost of a batch
either before or after it's created.
Parameters
----------
definition_dir : str
the path to the batch's definition directory.
data_path : str
the path to the batch's data file.
Returns
-------
float
the estimated cost of uploading this batch to MTURK (in USD),
including MTURK overhead.
"""
# construct all necessary paths
_, definition_dir_subpaths = settings.BATCH_DIR_STRUCTURE[1]['definition']
hittype_properties_file_name, _ = \
definition_dir_subpaths['hittype_properties']
hit_properties_file_name, _ = definition_dir_subpaths['hit_properties']
hittype_properties_path = os.path.join(
definition_dir,
hittype_properties_file_name)
hit_properties_path = os.path.join(
definition_dir,
hit_properties_file_name)
# Load relevant files
with open(hittype_properties_path, 'r') as hittype_properties_file:
hittype_properties = json.load(hittype_properties_file)
with open(hit_properties_path, 'r') as hit_properties_file:
hit_properties = json.load(hit_properties_file)
with open(data_path, "r") as data_file:
n_hits = sum(1 for ln in data_file if ln.strip() != '')
# Estimate cost
estimated_cost = float(hittype_properties["Reward"]) \
* int(hit_properties["MaxAssignments"]) \
* n_hits \
* settings.TURK_OVERHEAD_FACTOR
return estimated_cost
def upload_batch(
client,
batch_dir):
"""Upload a batch to MTurk.
Upload a batch to MTurk by creating HITs for it. To create a batch,
use the ``initialize_batch_directory`` function.
Parameters
----------
client : MTurk.Client
a boto3 client for MTurk.
batch_dir : str
the path to the batch directory.
Returns
-------
hittype_id : str
the HIT Type ID for the HIT Type created for the batch.
hit_ids : List[str]
the HIT IDs for the newly created HITs.
"""
# construct all necessary paths
_, batch_dir_subpaths = settings.BATCH_DIR_STRUCTURE
definition_dir_name, definition_dir_subpaths = \
batch_dir_subpaths['definition']
batchid_file_name, _ = batch_dir_subpaths['batchid']
question_template_file_name, _ = \
definition_dir_subpaths['question_template']
hittype_properties_file_name, _ = \
definition_dir_subpaths['hittype_properties']
hit_properties_file_name, _ = definition_dir_subpaths['hit_properties']
data_file_name, _ = batch_dir_subpaths['data']
batchid_path = os.path.join(
batch_dir, batchid_file_name)
question_template_path = os.path.join(
batch_dir,
definition_dir_name,
question_template_file_name)
hittype_properties_path = os.path.join(
batch_dir,
definition_dir_name,
hittype_properties_file_name)
hit_properties_path = os.path.join(
batch_dir,
definition_dir_name,
hit_properties_file_name)
data_path = os.path.join(
batch_dir, data_file_name)
# load relevant data
with open(batchid_path, 'r') as batchid_file:
batch_id = batchid_file.read().rstrip()
with open(question_template_path, 'r') as question_template_file:
question_template = jinja2.Template(question_template_file.read())
with open(hittype_properties_path, 'r') as hittype_properties_file:
hittype_properties = json.load(hittype_properties_file)
with open(hit_properties_path, 'r') as hit_properties_file:
hit_properties = json.load(hit_properties_file)
logger.debug(f'Creating HIT Type with properties: {hittype_properties}')
hittype_response = client.create_hit_type(**hittype_properties)
hittype_id = hittype_response['HITTypeId']
logger.debug(f'New HIT Type (ID: {hittype_id}) created.')
hit_ids = []
with open(data_path, 'r') as data_file:
for i, ln in enumerate(data_file):
if ln.strip() == '':
logger.warning(f'Line {i+1} in {data_path} is empty. Skipping.')
continue
else:
logger.debug(f'Creating HIT {i+1} using data: {ln}')
ln_data = json.loads(ln.rstrip())
question = question_template.render(**ln_data)
requester_annotation = f'batch={batch_id}'
hit_response = client.create_hit_with_hit_type(
HITTypeId=hittype_id,
Question=question,
RequesterAnnotation=requester_annotation,
**hit_properties)
hit_id = hit_response['HIT']['HITId']
logger.debug(f'Created New HIT (ID: {hit_id}).')
hit_ids.append(hit_id)
ids = {
'hittype_id': hittype_id,
'hit_ids': hit_ids
}
incomplete_file_path = os.path.join(
batch_dir, settings.INCOMPLETE_FILE_NAME)
with open(incomplete_file_path, 'w') as incomplete_file:
json.dump(ids, incomplete_file)
logger.info(f'Created {i+1} HITs.')
return ids
def create_batch(
client,
definition_dir,
data_path,
save_dir):
"""Create a batch, writing it to disk and uploading it to MTurk.
Parameters
----------
client : MTurk.Client
a boto3 client for MTurk.
definition_dir : str
the path to the definition directory.
data_path : str
the path to a JSONL file holding the data that should be used to
generate the HITs in the batch.
save_dir : str
the path to the directory in which to write the batch directory.
Returns
-------
str
the path to the batch directory.
"""
logger.info('Writing batch.')
batch_dir = initialize_batch_directory(
definition_dir=definition_dir,
data_path=data_path,
save_dir=save_dir)
logger.info('Uploading batch to MTurk.')
ids = upload_batch(client=client, batch_dir=batch_dir)
logger.info('HIT Creation Complete.')
return batch_dir
def create_qualificationtype(
client,
definition_dir,
save_dir):
"""Create a qualification type in MTurk.
Parameters
----------
client : MTurk.Client
a boto3 client for MTurk.
definition_dir : str
the path to the directory defining the qualification type.
save_dir : str
the path to the directory in which to write the qualification
type directory.
Returns
-------
None
"""
logger.info('Creating qualification type directory.')
qualificationtype_dir_name, qualificationtype_dir_subpaths = \
settings.QUALIFICATIONTYPE_DIR_STRUCTURE
definition_dir_name, definition_dir_subpaths = \
qualificationtype_dir_subpaths['definition']
properties_file_name, _ = definition_dir_subpaths['properties']
test_file_name, _ = definition_dir_subpaths['test']
answerkey_file_name, _ = definition_dir_subpaths['answerkey']
qualificationtype_file_name, _ = \
qualificationtype_dir_subpaths['qualificationtype']
# construct useful paths
properties_path = os.path.join(
definition_dir, properties_file_name)
test_path = os.path.join(
definition_dir, test_file_name)
answerkey_path = os.path.join(
definition_dir, answerkey_file_name)
# read in and validate the qualification type properties
with open(properties_path, 'r') as properties_file:
properties = json.load(properties_file)
properties_validation_errors = utils.validation.validate_dict(
properties, settings.QUALIFICATIONTYPE_PROPERTIES)
if properties_validation_errors:
raise ValueError(
'Qualification Type properties file ({properties_path}) had'
' the following validation errors:'
'\n{validation_errors}'.format(
properties_path=properties_path,
validation_errors='\n'.join(properties_validation_errors)))
# read in the qualification test
if os.path.exists(test_path):
with open(test_path, 'r') as test_file:
properties['Test'] = test_file.read()
# read in the answerkey
if os.path.exists(answerkey_path):
with open(answerkey_path, 'r') as answerkey_file:
properties['AnswerKey'] = answerkey_file.read()
with tempfile.TemporaryDirectory() as working_dir:
# construct output paths
working_definition_dir = os.path.join(
working_dir, definition_dir_name)
os.mkdir(working_definition_dir)
# copy the definition files over
for _, (file_name, _) in definition_dir_subpaths.items():
original_path = os.path.join(definition_dir, file_name)
new_path = os.path.join(working_definition_dir, file_name)
if (file_name not in [test_file_name, answerkey_file_name]
or os.path.exists(original_path)):
shutil.copyfile(original_path, new_path)
# create the qualification type
qualificationtype_response = client.create_qualification_type(**properties)
qualificationtype_id = \
qualificationtype_response['QualificationType']['QualificationTypeId']
qualificationtype_path = os.path.join(
working_dir, qualificationtype_file_name.format(
qualificationtype_id=qualificationtype_id))
with open(qualificationtype_path, 'w') as qualificationtype_file:
json.dump(
qualificationtype_response,
qualificationtype_file,
default=utils.serialization.json_helper)
shutil.copytree(
working_dir,
os.path.join(
save_dir,
qualificationtype_dir_name.format(
qualificationtype_id=qualificationtype_id)))
logger.info(
f'Created Qualification Type (ID: {qualificationtype_id}).')
| amti-master | amti/actions/create.py |
"""Actions for managing HITs and their results"""
from amti.actions import (
create,
delete,
expire,
extraction,
review,
save,
status)
| amti-master | amti/actions/__init__.py |
"""Functions for reviewing HITs"""
import json
import logging
import os
from xml.dom import minidom
import click
from amti import settings
logger = logging.getLogger(__name__)
def review_hit(
client,
hit_id,
approve_all):
"""Manually review the results from a HIT.
Parameters
----------
client : MTurk.Client
a boto3 client for MTurk.
hit_id : str
the ID for the HIT to approve or reject.
approve_all : bool
a flag to decide approve all submissions
Returns
-------
List[Dict[str, str]]
A list of dictionaries representing the marked assignments. Each dictionary
has ``"assignment_id"``, ``"action"``, and ``"reason"`` keys.
"""
logger.debug(f'Fetching HIT (ID: {hit_id}).')
marked_assignments = []
hit = client.get_hit(HITId=hit_id)
hit_status = hit['HIT']['HITStatus']
if hit_status != 'Reviewable':
logger.info(
f'HIT (ID: {hit_id}) has status "{hit_status}" and is not'
f' "Reviewable". Skipping.')
return marked_assignments
logger.info(f'HIT {hit_id} Status: {hit_status}')
assignments_paginator = client.get_paginator(
'list_assignments_for_hit')
assignments_pages = assignments_paginator.paginate(HITId=hit_id)
for i, assignments_page in enumerate(assignments_pages):
logger.debug(f'Reviewing assignments. Page {i}.')
for assignment in assignments_page['Assignments']:
assignment_id = assignment['AssignmentId']
assignment_status = assignment['AssignmentStatus']
answers_xml = minidom.parseString(assignment['Answer'])
logger.info(
f'Assignment (ID: {assignment_id}) Status: {assignment_status}.')
if assignment_status != 'Submitted':
logger.debug(
f'Assignment (ID: {assignment_id}) does not have status'
f' "Submitted". Skipping.')
continue
elif assignment_status == 'Submitted':
if approve_all:
logger.info(f'Approving assignment (ID: {assignment_id}).')
client.approve_assignment(
AssignmentId=assignment_id,
OverrideRejection=False)
else:
logger.info(f'Reviewing assignment (ID: {assignment_id}).')
click.echo(
'HIT ID: {hit_id}'
'\nAssignment ID: {assignment_id}'
'\n'
'\nAnswers'
'\n======='
'\n{answers}'.format(
hit_id=hit_id,
assignment_id=assignment_id,
answers=answers_xml.toprettyxml()))
assignment_action = click.prompt(
'Would you like to (a)ccept, (r)eject, (s)kip or'
' (m)ark the assignment?',
type=click.Choice(
['a', 'r', 's', 'm'],
case_sensitive=False
)
)
if assignment_action == 'm':
logger.info('Marking assignment.')
assignment_action = click.prompt(
'Marking assignment. After, would you like to'
' (a)ccept, (r)eject, or (s)kip this assignment?',
type=click.Choice(
['a', 'r', 's'],
case_sensitive=False
)
)
mark_reason = click.prompt(
'(optional) Reason for marking the assignment?',
type=str
)
marked_assignments.append({
'assignment_id': assignment_id,
'action': assignment_action,
'reason': mark_reason
})
# Accept the assignment.
if assignment_action == 'a':
logger.info(
f'Approving assignment (ID: {assignment_id}).')
client.approve_assignment(
AssignmentId=assignment_id,
OverrideRejection=False)
# Reject the assignment.
elif assignment_action == 'r':
# Ask for confirmation before rejecting.
rejection_confirmed = click.confirm(
'Are you sure you want to reject this'
' assignment?'
)
# Reject the assignment.
if rejection_confirmed:
rejection_feedback = click.prompt(
'Feedback for the rejection',
type=str
)
client.reject_assignment(
AssignmentId=assignment_id,
RequesterFeedback=rejection_feedback)
logger.info(
f'Rejecting assignment'
f' (ID: {assignment_id}).')
# Abort rejecting the assignment.
else:
logger.info(
f'Did not reject assignment'
f' (ID: {assignment_id}). Skipping.')
# Skip the assignment.
else:
logger.info(
f'Skipping assignment (ID: {assignment_id}).')
return marked_assignments
def review_batch(
client,
batch_dir,
approve_all,
mark_file_path):
"""Manually review the HITs in a batch.
Parameters
----------
client : MTurk.Client
a boto3 client for MTurk.
batch_dir : str
the path to the directory for the batch.
approve_all : bool
a flag to decide approve all submissions
mark_file_path : str
the path at which to save the assignment marks.
Returns
-------
None.
"""
batch_dir_name, batch_dir_subpaths = settings.BATCH_DIR_STRUCTURE
batchid_file_name, _ = batch_dir_subpaths['batchid']
incomplete_file_name = settings.INCOMPLETE_FILE_NAME
batchid_file_path = os.path.join(
batch_dir, batchid_file_name)
incomplete_file_path = os.path.join(
batch_dir, settings.INCOMPLETE_FILE_NAME)
with open(batchid_file_path) as batchid_file:
batch_id = batchid_file.read().strip()
if not os.path.isfile(incomplete_file_path):
raise ValueError(
f'No {incomplete_file_name} file was found in {batch_dir}.'
f' Please make sure that the directory is a batch that has'
f' HITs waiting for review.')
with open(incomplete_file_path) as incomplete_file:
hit_ids = json.load(incomplete_file)['hit_ids']
logger.info(f'Reviewing batch {batch_id}.')
marked_assignments = []
for hit_id in hit_ids:
marked_assignments.extend(review_hit(
client=client,
hit_id=hit_id,
approve_all=approve_all))
logger.info(
'Finished reviewing assignments. Writing out marked'
' assignments.'
)
with click.open_file(mark_file_path, 'w') as mark_file:
mark_file.write(''.join(
json.dumps(marked_assignment) + '\n'
for marked_assignment in marked_assignments
))
logger.info(f'Review of batch {batch_id} is complete.')
| amti-master | amti/actions/review.py |
"""Functions for expiring all (unanswered) HITs"""
import json
import logging
import os
import datetime
from amti import settings
logger = logging.getLogger(__name__)
def expire_batch(
client,
batch_dir):
"""Expire all the (unanswered) HITs in the batch.
Parameters
----------
client : MTurk.Client
a boto3 client for MTurk.
batch_dir : str
the path to the directory for the batch.
Returns
-------
Dict[str, int]
A dictionary mapping strings to integers. The dictionary will
have the following form::
{
'batch_id': batch_id,
}
where ``batch_id`` is the UUID for the batch.
"""
# construct important paths
batch_dir_name, batch_dir_subpaths = settings.BATCH_DIR_STRUCTURE
batchid_file_name, _ = batch_dir_subpaths['batchid']
incomplete_file_name = settings.INCOMPLETE_FILE_NAME
batchid_file_path = os.path.join(
batch_dir, batchid_file_name)
incomplete_file_path = os.path.join(
batch_dir, settings.INCOMPLETE_FILE_NAME)
with open(batchid_file_path) as batchid_file:
batch_id = batchid_file.read().strip()
if not os.path.isfile(incomplete_file_path):
raise ValueError(
f'No {incomplete_file_name} file was found in {batch_dir}.'
f' Please make sure that the directory is a batch that has'
f' open HITs to be expired.')
with open(incomplete_file_path) as incomplete_file:
hit_ids = json.load(incomplete_file)['hit_ids']
logger.info(f'Expiring HITs in batch {batch_id}.')
for hit_id in hit_ids:
hit = client.update_expiration_for_hit(
HITId=hit_id,
ExpireAt=datetime.datetime.now())
logger.info(f'All HITs in batch {batch_id} are now expired.')
return {
'batch_id': batch_id
}
| amti-master | amti/actions/expire.py |
"""Functions for retrieving status information about HITs"""
import collections
import json
import logging
import os
from amti import settings
logger = logging.getLogger(__name__)
def status_batch(
client,
batch_dir):
"""Retrieve the status for a batch of HITs.
Parameters
----------
client : MTurk.Client
a boto3 client for MTurk.
batch_dir : str
the path to the directory for the batch.
Returns
-------
Dict[str, int]
A dictionary mapping strings to integers. The dictionary will
have the following form::
{
'batch_id': batch_id,
'hit_count': hit_count,
'hit_status_counts': hit_status_counts
}
where ``batch_id`` is the UUID for the batch, ``hit_count`` is a
count of all the HITs in the batch and ``hit_status_counts`` is
a dictionary counting the number of HITs with each of the
different statuses.
"""
# construct important paths
batch_dir_name, batch_dir_subpaths = settings.BATCH_DIR_STRUCTURE
batchid_file_name, _ = batch_dir_subpaths['batchid']
incomplete_file_name = settings.INCOMPLETE_FILE_NAME
batchid_file_path = os.path.join(
batch_dir, batchid_file_name)
incomplete_file_path = os.path.join(
batch_dir, settings.INCOMPLETE_FILE_NAME)
with open(batchid_file_path) as batchid_file:
batch_id = batchid_file.read().strip()
if not os.path.isfile(incomplete_file_path):
raise ValueError(
f'No {incomplete_file_name} file was found in {batch_dir}.'
f' Please make sure that the directory is a batch that has'
f' HITs waiting for review.')
with open(incomplete_file_path) as incomplete_file:
hit_ids = json.load(incomplete_file)['hit_ids']
logger.info(f'Retrieving status for batch {batch_id}.')
hit_count = 0
hit_status_counts = collections.defaultdict(int)
for hit_id in hit_ids:
hit = client.get_hit(HITId=hit_id)
hit_count += 1
hit_status_counts[hit['HIT']['HITStatus']] += 1
logger.info(f'Retrieving status of batch {batch_id} is complete.')
return {
'batch_id': batch_id,
'hit_count': hit_count,
'hit_status_counts': hit_status_counts
}
| amti-master | amti/actions/status.py |
"""A function for extracting batch data into a tabular format."""
import csv
import html
import json
import logging
import os
from xml.dom import minidom
import click
from amti import settings
from amti import utils
logger = logging.getLogger(__name__)
TABULAR_SUPPORTED_FILE_FORMATS = [
'csv',
'json',
'jsonl'
]
"""File formats supported by the ``tabular`` function."""
# Make sure to update the doc strings for
# ``amti.actions.extraction.tabular.tabular`` and
# ``amti.clis.extraction.tabular.tabular`` if you edit this constant.
def tabular(
batch_dir,
output_path,
file_format):
"""Extract data in ``batch_dir`` to ``output_path`` as a table.
Extract batch data into a tabular format; however, some metadata may
not be copied over. Each assignment will become it's own row in the
table with a separate column for each form field, as well as much of
the assignment's metadata. The table will be written to
``output_path`` in the format specified by ``file_format``.
Parameters
----------
batch_dir : str
the path to the batch's directory.
output_path : str
the path where the output file should be saved.
file_format : str
the file format to use when writing the data. Must be one of the
supported file formats: csv (CSV), json (JSON), jsonl (JSON
Lines).
Returns
-------
None.
"""
if file_format not in TABULAR_SUPPORTED_FILE_FORMATS:
raise ValueError(
'file_format must be one of {formats}.'.format(
formats=', '.join(TABULAR_SUPPORTED_FILE_FORMATS)))
# construct important paths
_, batch_dir_subpaths = settings.BATCH_DIR_STRUCTURE
batchid_file_name, _ = batch_dir_subpaths['batchid']
results_dir_name, results_dir_subpaths = batch_dir_subpaths['results']
_, hit_dir_subpaths = results_dir_subpaths['hit_dir']
hit_file_name, _ = hit_dir_subpaths['hit']
assignments_file_name, _ = hit_dir_subpaths['assignments']
batchid_file_path = os.path.join(
batch_dir, batchid_file_name)
results_dir = os.path.join(batch_dir, results_dir_name)
with open(batchid_file_path) as batchid_file:
batch_id = batchid_file.read().strip()
logger.info(
f'Beginning to extract batch {batch_id} to tabular format.')
rows = []
for dir_path, dir_names, file_names in os.walk(results_dir):
hit = None
assignments = None
for file_name in file_names:
if file_name == hit_file_name:
hit_path = os.path.join(dir_path, file_name)
with open(hit_path, 'r') as hit_file:
hit = json.load(hit_file)
elif file_name == assignments_file_name:
assignments_path = os.path.join(
dir_path, assignments_file_name)
with open(assignments_path, 'r') as assignments_file:
assignments = [
json.loads(ln.strip())
for ln in assignments_file
]
else:
logger.warning(
f'Unexected file ({file_name}) located in'
f' {dir_path}')
if hit is None or assignments is None:
# if both ``hit`` and ``assignments`` are ``None``, then
# this directory is simply not one we're interested in;
# however, if exactly one is ``None`` then there's likely
# been an error.
if hit is None and assignments is not None:
logger.warning(
f'Found assignments but no HIT in {dir_path}.')
elif hit is not None and assignments is None:
logger.warning(
f'Found HIT but no assignments in {dir_path}.')
continue
for assignment in assignments:
row = {}
# add relevant metadata from the HIT
row['HITId'] = hit['HIT']['HITId']
row['AssignmentDurationInSeconds'] =\
hit['HIT']['AssignmentDurationInSeconds']
row['AutoApprovalDelayInSeconds'] =\
hit['HIT']['AutoApprovalDelayInSeconds']
row['Expiration'] = hit['HIT']['Expiration']
row['CreationTime'] = hit['HIT']['CreationTime']
# add relevant metadata from the assignment
row['AssignmentId'] = assignment['AssignmentId']
row['WorkerId'] = assignment['WorkerId']
row['AssignmentStatus'] = assignment['AssignmentStatus']
row['AutoApprovalTime'] = assignment['AutoApprovalTime']
row['AcceptTime'] = assignment['AcceptTime']
row['SubmitTime'] = assignment['SubmitTime']
row['ApprovalTime'] = assignment['ApprovalTime']
# parse the response and add it to the row
xml = minidom.parseString(assignment['Answer'])
for answer_tag in xml.getElementsByTagName('Answer'):
[question_identifier_tag] =\
answer_tag.getElementsByTagName(
'QuestionIdentifier')
question_identifier = utils.xml.get_node_text(
question_identifier_tag)
if question_identifier == 'doNotRedirect':
# some workers on Mechanical Turk modify their
# browser requests to send a 'doNotRedirect'
# field when posting results.
logger.warning(
f'Found a "doNotRedirect" field in'
f' {dir_path}. Dropping the field.')
continue
[free_text_tag] = answer_tag.getElementsByTagName(
'FreeText')
free_text = html.unescape(
utils.xml.get_node_text(free_text_tag))
row[question_identifier] = free_text
rows.append(row)
with click.open_file(output_path, 'w') as output_file:
if file_format == 'csv':
csv_writer = csv.DictWriter(
output_file,
fieldnames=rows[0].keys())
csv_writer.writeheader()
csv_writer.writerows(rows)
elif file_format == 'json':
json.dump(rows, output_file)
elif file_format == 'jsonl':
output_file.write('\n'.join([
json.dumps(row)
for row in rows
]))
else:
raise NotImplementedError(
f'Support for {file_format} has not been implemented.')
logger.info(
f'Finished extracting batch {batch_id} to tabular format.')
| amti-master | amti/actions/extraction/tabular.py |
"""A function for extracting data from a batch as XML"""
import json
import logging
import os
import shutil
import tempfile
from xml.dom import minidom
from amti import settings
logger = logging.getLogger(__name__)
def xml(
batch_dir,
output_dir):
"""Extract the XML from assignments in a batch.
Extract the XML from assignments in the batch represented by
``batch_dir`` and save the results to ``output_dir``. ``batch_dir``
should be a batch that has all its HITs and assignments reviewed and
downloaded. By default, the HITs and assignments are stored in a
JSON lines format, so this function extracts the answer XML from the
assignments into separate pretty-printed XML files for better
consumption by humans and other systems.
Parameters
----------
batch_dir : str
the path to the batch's directory.
output_dir : str
the path to the directory in which to save the output.
Returns
-------
None.
"""
# construct important paths
batch_dir_name, batch_dir_subpaths = settings.BATCH_DIR_STRUCTURE
batchid_file_name, _ = batch_dir_subpaths['batchid']
results_dir_name, results_dir_subpaths = batch_dir_subpaths['results']
_, hit_dir_subpaths = results_dir_subpaths['hit_dir']
hit_file_name, _ = hit_dir_subpaths['hit']
assignments_file_name, _ = hit_dir_subpaths['assignments']
batchid_file_path = os.path.join(
batch_dir, batchid_file_name)
results_dir = os.path.join(batch_dir, results_dir_name)
with open(batchid_file_path) as batchid_file:
batch_id = batchid_file.read().strip()
logger.info(
f'Beginning to extract batch {batch_id} to XML.')
xml_dir_name = settings.XML_DIR_NAME_TEMPLATE.format(
batch_id=batch_id)
xml_dir_path = os.path.join(output_dir, xml_dir_name)
with tempfile.TemporaryDirectory() as working_dir:
for dir_path, dir_names, file_names in os.walk(results_dir):
if hit_file_name in file_names:
hit_dir_name = os.path.split(dir_path)[-1]
hit_dir = os.path.join(working_dir, hit_dir_name)
os.mkdir(hit_dir)
assignments_path = os.path.join(
dir_path, assignments_file_name)
with open(assignments_path, 'r') as assignments_file:
for ln in assignments_file:
assignment = json.loads(ln.rstrip())
assignment_id = assignment['AssignmentId']
xml = minidom.parseString(assignment['Answer'])
xml_file_name = settings.XML_FILE_NAME_TEMPLATE.format(
assignment_id=assignment_id)
xml_output_path = os.path.join(
hit_dir, xml_file_name)
with open(xml_output_path, 'w') as xml_output_file:
xml_output_file.write(
xml.toprettyxml(indent=' '))
shutil.copytree(working_dir, xml_dir_path)
logger.info(
f'Finished extracting batch {batch_id} to XML.')
| amti-master | amti/actions/extraction/xml.py |
"""Actions for extracting data from batches"""
from amti.actions.extraction import (
tabular,
xml)
| amti-master | amti/actions/extraction/__init__.py |
"""Command line interfaces for saving HITs"""
import logging
import click
from amti import actions
from amti import settings
from amti import utils
logger = logging.getLogger(__name__)
@click.command(
context_settings={
'help_option_names': ['--help', '-h']
})
@click.argument(
'batch_dir',
type=click.Path(exists=True, file_okay=False, dir_okay=True))
@click.option(
'--live', '-l',
is_flag=True,
help='Save HITs from the live MTurk site.')
def save_batch(batch_dir, live):
"""Save results from the batch of HITs defined in BATCH_DIR.
Given a directory (BATCH_DIR) that represents a batch of HITs with
HITs out in MTurk, all of which have been reviewed and either
approved or rejected, collect the results and save them into
BATCH_DIR.
"""
env = 'live' if live else 'sandbox'
client = utils.mturk.get_mturk_client(env)
actions.save.save_batch(
client=client,
batch_dir=batch_dir)
logger.info('Finished saving batch.')
| amti-master | amti/clis/save.py |
"""Command line interfaces for deleting HITs"""
import logging
import click
from amti import actions
from amti import settings
from amti import utils
logger = logging.getLogger(__name__)
@click.command(
context_settings={
'help_option_names': ['--help', '-h']
})
@click.argument(
'batch_dir',
type=click.Path(exists=True, file_okay=False, dir_okay=True))
@click.option(
'--live', '-l',
is_flag=True,
help='Delete HITs from the live MTurk site.')
def delete_batch(batch_dir, live):
"""Delete the batch of HITs defined in BATCH_DIR.
Given a directory (BATCH_DIR) that represents a batch of HITs with
HITs, delete all the HITs from MTurk.
"""
env = 'live' if live else 'sandbox'
client = utils.mturk.get_mturk_client(env)
actions.delete.delete_batch(client=client, batch_dir=batch_dir)
logger.info('Finished deleting batch.')
| amti-master | amti/clis/delete.py |
"""CLI for running a web server to preview HITs"""
import html
from http import server
import json
import logging
import os
import re
from xml.etree import ElementTree
import click
import jinja2
from amti import settings
logger = logging.getLogger(__name__)
class Server(server.HTTPServer):
"""A server for previewing HTMLQuestion HITs."""
def __init__(
self,
server_address,
request_handler_class,
template_path,
data_path,
):
super().__init__(server_address, request_handler_class)
self.template_path = template_path
self.data_path = data_path
with open(self.template_path, 'r') as template_file:
template_xml = ElementTree.fromstring(template_file.read())
html_content = template_xml.find(
'mturk:HTMLContent',
{'mturk': 'http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2011-11-11/HTMLQuestion.xsd'}
)
if html_content is None:
raise ValueError(
'The preview server can only preview HTMLQuestion HITs.')
html_template_string = html_content.text
self.template = jinja2.Template(html_template_string)
with open(self.data_path, 'r') as data_file:
self.data = [json.loads(ln) for ln in data_file]
if len(self.data) == 0:
raise ValueError('The data file cannot be empty.')
class Handler(server.BaseHTTPRequestHandler):
"""A request handler for rendering HTMLQuestion HITs."""
URL_PATTERN = re.compile(r'^/hits/(?P<hit_idx>\d+)/$')
def _render_error_page(self, status, error, message):
status = str(int(status))
error = html.escape(error)
message = html.escape(message)
return (
f'<!DOCTYPE html>\n'
f'<html>\n'
f' <head>\n'
f' <meta charset="utf-8">\n'
f' <title>HIT Preview Server Error: {status}</title>\n'
f' </head>\n'
f' <body>\n'
f' <h1>({status}) {error}</h1>\n'
f' <p>{message}</p>\n'
f' </body>\n'
f'</html>\n'
)
def _create_response(self, path):
match = self.URL_PATTERN.match(self.path)
if match is None:
return (
self._render_error_page(
status=404,
error='Page not found: bad URL',
message='The URL should look like: /hits/${HIT_IDX}/.'),
404
)
hit_idx = int(match.groupdict()['hit_idx'])
template = self.server.template
data = self.server.data
# Check that the HIT index is in range.
if hit_idx < 0 or hit_idx >= len(data):
return (
self._render_error_page(
status=404,
error='Page not found: HIT index out of range',
message='The HIT index from the URL was out of range. The'
' index must be an integer between 0 and'
f' {len(data) - 1}, inclusive.'),
404
)
return template.render(**data[hit_idx]), 200
def do_GET(self):
body, status = self._create_response(path=self.path)
# Set the headers.
self.send_response(status)
self.send_header('Content-type', 'text/html')
self.end_headers()
# Write out the message body.
self.wfile.write(body.encode())
@click.command(
context_settings={
'help_option_names': ['--help', '-h']
})
@click.argument(
'definition_dir',
type=click.Path(exists=True, file_okay=False, dir_okay=True))
@click.argument(
'data_path',
type=click.Path(exists=True, file_okay=True, dir_okay=False))
@click.option(
'--port', type=int, default=8000,
help='The port on which to run the server. Defaults to 8000.')
def preview_batch(definition_dir, data_path, port):
"""Preview HTMLQuestion HITs based on DEFINITION_DIR and DATA_PATH.
Run a web server that previews the HITs defined by DEFINITION_DIR
and DATA_PATH. The HIT corresponding to each row of the data file
can be previewed by navigating to
http://127.0.0.1:$PORT/hits/${HIT_IDX}/ where $HIT_IDX is the row's
index (starting from zero).
"""
# Construct the template path.
_, batch_dir_subpaths = settings.BATCH_DIR_STRUCTURE
_, definition_dir_subpaths = \
batch_dir_subpaths['definition']
template_file_name, _ = definition_dir_subpaths['question_template']
template_path = os.path.join(definition_dir, template_file_name)
# Instantiate the HIT preview server.
httpd = Server(
server_address=('127.0.0.1', port),
request_handler_class=Handler,
template_path=template_path,
data_path=data_path)
# Run the server.
logger.info(
f'\n'
f'\n Running the HIT preview server at http://127.0.0.1:{port}/.'
f'\n'
f'\n Navigate to http://127.0.0.1:{port}/hits/0/ to view the first HIT.'
f'\n')
httpd.serve_forever()
| amti-master | amti/clis/preview.py |
"""Command line interfaces for creating HITs"""
import logging
import click
from amti import actions
from amti import settings
from amti import utils
logger = logging.getLogger(__name__)
@click.command(
context_settings={
'help_option_names': ['--help', '-h']
})
@click.argument(
'definition_dir',
type=click.Path(exists=True, file_okay=False, dir_okay=True))
@click.argument(
'data_path',
type=click.Path(exists=True, file_okay=True, dir_okay=False))
@click.argument(
'save_dir',
type=click.Path(exists=True, file_okay=False, dir_okay=True))
@click.option(
'--check-cost/--no-check-cost', '-c/-n',
default=True,
help="Whether to prompt for cost approval before uploading the batch.")
@click.option(
'--live', '-l',
is_flag=True,
help='Create HITs on the live MTurk site.')
def create_batch(definition_dir, data_path, save_dir, check_cost, live):
"""Create a batch of HITs using DEFINITION_DIR and DATA_PATH.
Create a batch of HITs using DEFINITION_DIR and DATA_PATH, and then
save that batch's defining data to SAVE_DIR.
DEFINITION_DIR should be a directory containing files with the
following names:
\b
- NOTES: a text file with any notes about the definition, for
example if a server must be run in order for the HIT to work
then you could document that here.
- question.xml.j2: a jinja2 template for an MTurk question xml
file
- hitproperties.json: a json file defining properties for the HITs
- hittypeproperties.json: a json file defining properties for the
HIT type that the newly created HITs will have.
For an example, see
`here <https://github.com/allenai/amti/tree/master/examples/external-question/definition`.
DATA_PATH should be a `JSON Lines <http://jsonlines.org/>`_
file. For an example, see
`here <https://github.com/allenai/amti/tree/master/examples/external-question/data.jsonl>`.
SAVE_DIR should be a path to a directory in which the batch's data
will be saved.
"""
env = 'live' if live else 'sandbox'
worker_url = settings.ENVS[env]['worker_url']
client = utils.mturk.get_mturk_client(env)
estimated_cost = actions.create.estimate_batch_cost(
definition_dir, data_path)
logger.info(
f'The estimated cost for this batch is ~{estimated_cost:.2f} USD.')
if check_cost:
cost_approved = click.confirm(
f'Approve cost (~{estimated_cost:.2f} USD) and upload?')
if not cost_approved:
logger.info(
'The batch cost was not approved. Aborting batch creation.')
return
batch_dir = actions.create.create_batch(
client=client,
definition_dir=definition_dir,
data_path=data_path,
save_dir=save_dir)
logger.info(
f'Finished creating batch directory: {batch_dir}.'
f'\n'
f'\n Preview HITs: {worker_url}'
f'\n')
@click.command(
context_settings={
'help_option_names': ['--help', '-h']
})
@click.argument(
'definition_dir',
type=click.Path(exists=True, file_okay=False, dir_okay=True))
@click.argument(
'save_dir',
type=click.Path(exists=True, file_okay=False, dir_okay=True))
@click.option(
'--live', '-l',
is_flag=True,
help='Create the Qualification Type on the live MTurk site.')
def create_qualificationtype(definition_dir, save_dir, live):
"""Create a Qualification Type using DEFINITION_DIR.
Create a Qualification Type using DEFINITION_DIR, and then save
that Qualification Type's defining data to SAVE_DIR.
DEFINITION_DIR should be a directory containing files with the
following names:
\b
- qualificationtypeproperties.json: the defining properties for
the qualication type.
- test.xml: the XML defining the qualification test for the
qualification type.
- answerkey.xml: the answer key for the qualification test.
For an example, see
`here <https://github.com/allenai/amti/tree/master/examples/qualification-type/definition>`.
SAVE_DIR should be a path to a directory in which the qualification
type's data will be saved.
"""
env = 'live' if live else 'sandbox'
requester_url = settings.ENVS[env]['requester_url']
client = utils.mturk.get_mturk_client(env)
actions.create.create_qualificationtype(
client=client,
definition_dir=definition_dir,
save_dir=save_dir)
logger.info(
f'Finished.'
f'\n'
f'\n View the Qualification Type: {requester_url}'
f'\n')
| amti-master | amti/clis/create.py |
"""Command line interfaces for unblocking Workers"""
import logging
import click
import csv
from amti import actions
from amti import settings
from amti import utils
logger = logging.getLogger(__name__)
@click.command(
context_settings={
'help_option_names': ['--help', '-h']
})
@click.argument(
'ids',
type=str,
nargs=-1)
@click.option(
'--file', '-f',
type=click.Path(exists=True, file_okay=True, dir_okay=False),
help="Path to file of WorkerIds to block.")
@click.option(
'--reason', '-r',
default="Worker was blocked by mistake.",
help='Reason for blocking worker(s) (workers do not see).')
@click.option(
'--live', '-l',
is_flag=True,
help='View the status of HITs from the live MTurk site.')
def unblock_workers(file, ids, reason, live):
"""Unblock workers by WorkerId.
Given a space seperated list of WorkerIds (IDS) and/or a path to
a CSV of WorkerIds, remove a block for each worker listed.
"""
env = 'live' if live else 'sandbox'
client = utils.mturk.get_mturk_client(env)
worker_ids = list(ids)
# read ids from file (adds to provided ids)
if file is not None:
worker_ids += utils.workers.read_workerids_from_file(file)
# remove blocks
for worker_id in worker_ids:
logger.info(f'Removing block for worker {worker_id}.')
response = client.delete_worker_block(
WorkerId=worker_id,
Reason=reason
)
logger.info('Finished removing blocks.') | amti-master | amti/clis/unblock.py |
"""CLIs for managing HITs and their results"""
from amti.clis import (
associate,
block,
create,
delete,
disassociate,
expire,
extract,
extraction,
notify,
review,
save,
status,
unblock,
preview
)
| amti-master | amti/clis/__init__.py |
"""Command line interfaces for blocking Workers"""
import logging
import click
import csv
from amti import actions
from amti import settings
from amti import utils
logger = logging.getLogger(__name__)
@click.command(
context_settings={
'help_option_names': ['--help', '-h']
})
@click.argument(
'ids',
type=str,
nargs=-1)
@click.option(
'--file', '-f',
type=click.Path(exists=True, file_okay=True, dir_okay=False),
help="Path to file of WorkerIds to block.")
@click.option(
'--subject', '-s',
help='Subject line for message.')
@click.option(
'--message', '-m',
help='Text content of message.')
@click.option(
'--message_file',
type=click.Path(exists=True, file_okay=True, dir_okay=False),
help="Path to file of WorkerIds to block.")
@click.option(
'--live', '-l',
is_flag=True,
help='View the status of HITs from the live MTurk site.')
def notify_workers(file, ids, subject, message, message_file, live):
"""Send notification message to workers.
Given a space seperated list of WorkerIds (IDS), or a path to
a CSV of WorkerIds, send a notification to each worker.
"""
env = 'live' if live else 'sandbox'
client = utils.mturk.get_mturk_client(env)
worker_ids = list(ids)
# read ids from file (adds to provided ids)
if file is not None:
worker_ids += utils.workers.read_workerids_from_file(file)
# create message (file values overrides Subject, Message args)
message = {'Subject': subject, 'MessageText': message}
if message_file is not None:
with open(args.message, 'r') as f:
message = json.loads(f.read())
if any(val is None for val in message.values()):
raise ValueError('Missing Message or Subject value.')
# break ids into chunks of 100, notify workers in each chunk
for chunk_ids in utils.workers.chunk_list(worker_ids):
logger.info(f"Sending notification to workers: {chunk_ids}")
response = client.notify_workers(
Subject=message['Subject'],
MessageText=message['MessageText'],
WorkerIds=chunk_ids
)
for failure in response['NotifyWorkersFailureStatuses']:
logger.warn(f"Notification failed for {failure.pop('WorkerId')}: {failure}")
logger.info('Finished sending notifications.') | amti-master | amti/clis/notify.py |
"""Command line interfaces for reviewing HITs"""
import logging
import boto3
import click
from amti import actions
from amti import settings
from amti import utils
logger = logging.getLogger(__name__)
@click.command(
context_settings={
'help_option_names': ['--help', '-h']
})
@click.argument(
'batch_dir',
type=click.Path(exists=True, file_okay=False, dir_okay=True))
@click.option(
'--live', '-l',
is_flag=True,
help='Review HITs on the live MTurk site.')
@click.option(
'--approve-all', '-a',
is_flag=True,
help="Approve all submissions.")
@click.option(
'--mark-file-path', '-m',
type=click.Path(
exists=False,
file_okay=True,
dir_okay=False,
writable=True,
allow_dash=True),
default='-',
help='The path to the file in which to save the marked assignments.'
' Defaults to STDOUT.')
def review_batch(batch_dir, live, approve_all, mark_file_path):
"""Review the batch of HITs defined in BATCH_DIR.
Given a directory (BATCH_DIR) that represents a batch of HITs with
HITs out in MTurk and waiting for review, manually review each of
the ready HITs at the command line.
"""
env = 'live' if live else 'sandbox'
client = utils.mturk.get_mturk_client(env)
actions.review.review_batch(
client=client,
batch_dir=batch_dir,
approve_all=approve_all,
mark_file_path=mark_file_path)
logger.info('Finished reviewing batch.')
| amti-master | amti/clis/review.py |
"""Command line interfaces for expiring the HITs"""
import logging
import click
from amti import actions
from amti import settings
from amti import utils
logger = logging.getLogger(__name__)
@click.command(
context_settings={
'help_option_names': ['--help', '-h']
})
@click.argument(
'batch_dir',
type=click.Path(exists=True, file_okay=False, dir_okay=True))
@click.option(
'--live', '-l',
is_flag=True,
help='Expire the HITs from the live MTurk site.')
def expire_batch(batch_dir, live):
"""Expire all the HITs defined in BATCH_DIR.
Given a directory (BATCH_DIR) that represents a batch of HITs in MTurk,
expire all the unanswered HITs.
"""
env = 'live' if live else 'sandbox'
client = utils.mturk.get_mturk_client(env)
batch_expire = actions.expire.expire_batch(
client=client,
batch_dir=batch_dir)
batch_id = batch_expire['batch_id']
logger.info('Finished expiring batch.')
| amti-master | amti/clis/expire.py |
"""Command line interfaces for blocking Workers"""
import logging
import click
import csv
from amti import actions
from amti import settings
from amti import utils
logger = logging.getLogger(__name__)
@click.command(
context_settings={
'help_option_names': ['--help', '-h']
})
@click.argument(
'ids',
type=str,
nargs=-1)
@click.option(
'--file', '-f',
type=click.Path(exists=True, file_okay=True, dir_okay=False),
help="Path to file of WorkerIds to block.")
@click.option(
'--reason', '-r',
default="Worker has produced low quality work, or is suspected of producing spam.",
help='Reason for blocking worker(s) (workers do not see).')
@click.option(
'--live', '-l',
is_flag=True,
help='View the status of HITs from the live MTurk site.')
def block_workers(file, ids, reason, live):
"""Block workers by WorkerId.
Given a space seperated list of WorkerIds (IDS) and/or a path to
a CSV of WorkerIds, create a block for each worker in the list.
"""
env = 'live' if live else 'sandbox'
client = utils.mturk.get_mturk_client(env)
worker_ids = list(ids)
# read ids from file (adds to provided ids)
if file is not None:
worker_ids += utils.workers.read_workerids_from_file(file)
# create blocks
for worker_id in worker_ids:
logger.info(f'Creating block for worker {worker_id}.')
response = client.create_worker_block(
WorkerId=worker_id,
Reason=reason
)
logger.info('Finished creating blocks.') | amti-master | amti/clis/block.py |
"""Command line interface for disassociating quals with Workers"""
import logging
import click
import csv
from amti import actions
from amti import settings
from amti import utils
logger = logging.getLogger(__name__)
@click.command(
context_settings={
'help_option_names': ['--help', '-h']
})
@click.argument(
'ids',
type=str,
nargs=-1)
@click.option(
'--file', '-f',
type=click.Path(exists=True, file_okay=True, dir_okay=False),
help="Path to file of WorkerIds.")
@click.option(
'--qual', '-q',
help='QualificationId (or name, if --name flag passed).')
@click.option(
'--name', '-n',
is_flag=True,
help='Search for qual by name instead of id.')
@click.option(
'--reason', '-r',
help='Reason for disassociation (worker sees this).')
@click.option(
'--live', '-l',
is_flag=True,
help='View the status of HITs from the live MTurk site.')
def disassociate_qual(file, ids, qual, name, reason, live):
"""Disassociate workers with a qualification.
Given a space seperated list of WorkerIds (IDS) and/or a path to
a CSV of WorkerIds, disassociate each worker with a qualification
(QUAL).
NOTE: Only works with quals that both exist and are owned by the user.
"""
env = 'live' if live else 'sandbox'
client = utils.mturk.get_mturk_client(env)
worker_ids = list(ids)
# read ids from file (adds to provided ids)
if file is not None:
worker_ids += utils.workers.read_workerids_from_file(file)
# set qual_id
qual_id = qual
if name:
qual_id = utils.mturk.get_qual_by_name(client, qual)
if qual_id is None:
raise ValueError(f"No qual with name {qual} found.")
args = {"QualificationTypeId": qual_id}
if reason is not None:
args['Reason'] = reason
# associate qual with workers
for worker_id in worker_ids:
logger.info(f'Disassociating qualification {qual_id} from worker {worker_id}.')
response = client.disassociate_qualification_from_worker(
WorkerId=worker_id,
**args
)
logger.info('Finished disassociating quals.') | amti-master | amti/clis/disassociate.py |
"""Command line interfaces for extracting data from a batch"""
import logging
import click
# import extraction directly to avoid a circular import
from amti.clis import extraction
logger = logging.getLogger(__name__)
@click.group(
context_settings={
'help_option_names': ['--help', '-h']
})
def extract():
"""Extract data from a batch to various formats.
See the subcommands for extracting batch data into a specific
format.
"""
pass
subcommands = [
# tabular
extraction.tabular.tabular,
# xml
extraction.xml.xml
]
for subcommand in subcommands:
extract.add_command(subcommand)
if __name__ == '__main__':
extract()
| amti-master | amti/clis/extract.py |
"""Command line interface for associating quals with Workers"""
import logging
import click
import csv
from amti import actions
from amti import settings
from amti import utils
logger = logging.getLogger(__name__)
@click.command(
context_settings={
'help_option_names': ['--help', '-h']
})
@click.argument(
'ids',
type=str,
nargs=-1)
@click.option(
'--file', '-f',
type=click.Path(exists=True, file_okay=True, dir_okay=False),
help="Path to file of WorkerIds.")
@click.option(
'--qual', '-q',
help='QualificationId (or name, if --name flag passed).')
@click.option(
'--integer-value', '-i',
type=int,
help='Integer value for qual.')
@click.option(
'--name', '-n',
is_flag=True,
help='Search for qual by name instead of id.')
@click.option(
'--notify',
is_flag=True,
help='Send notification message about qual.')
@click.option(
'--live', '-l',
is_flag=True,
help='View the status of HITs from the live MTurk site.')
def associate_qual(file, ids, qual, name, integer_value, notify, live):
"""Associate workers with a qualification.
Given a space seperated list of WorkerIds (IDS) and/or a path to
a CSV of WorkerIds, associate each worker with a qualification (QUAL).
NOTE: Only works with quals that both exist and are owned by the user.
"""
env = 'live' if live else 'sandbox'
client = utils.mturk.get_mturk_client(env)
worker_ids = list(ids)
# read ids from file (adds to provided ids)
if file is not None:
worker_ids += utils.workers.read_workerids_from_file(file)
# set qual_id
qual_id = qual
if name:
qual_id = utils.mturk.get_qual_by_name(client, qual)
if qual_id is None:
raise ValueError(f"No qual with name {qual} found.")
args = {
"QualificationTypeId": qual_id,
"SendNotification": notify
}
if integer_value is not None:
args['IntegerValue'] = integer_value
# associate qual with workers
for worker_id in worker_ids:
logger.info(f'Associating qualification {qual_id} with worker {worker_id}.')
response = client.associate_qualification_with_worker(
WorkerId=worker_id,
**args
)
logger.info('Finished associating quals.') | amti-master | amti/clis/associate.py |
"""Command line interfaces for viewing the statuses of HITs"""
import logging
import click
from amti import actions
from amti import settings
from amti import utils
logger = logging.getLogger(__name__)
@click.command(
context_settings={
'help_option_names': ['--help', '-h']
})
@click.argument(
'batch_dir',
type=click.Path(exists=True, file_okay=False, dir_okay=True))
@click.option(
'--live', '-l',
is_flag=True,
help='View the status of HITs from the live MTurk site.')
def status_batch(batch_dir, live):
"""View the status of the batch of HITs defined in BATCH_DIR.
Given a directory (BATCH_DIR) that represents a batch of HITs with
HITs out in MTurk and waiting for review or that have been reviewed,
see that status of HITs in that batch.
"""
env = 'live' if live else 'sandbox'
client = utils.mturk.get_mturk_client(env)
batch_status = actions.status.status_batch(
client=client,
batch_dir=batch_dir)
batch_id = batch_status['batch_id']
hit_count = str(batch_status['hit_count'])
hit_status_counts = '\n '.join(
f'{status}: {count}'
for status, count in batch_status['hit_status_counts'].items())
click.echo(
f'\n'
f' Batch Status:'
f'\n ============='
f'\n Batch ID: {batch_id}'
f'\n HIT Count: {hit_count}'
f'\n HIT Status Counts:'
f'\n {hit_status_counts}'
f'\n')
logger.info('Finished retrieving batch status.')
| amti-master | amti/clis/status.py |
"""Command line interface for extracting tabular data from a batch"""
import logging
import click
from amti import actions
logger = logging.getLogger(__name__)
@click.command(
context_settings={
'help_option_names': ['--help', '-h']
})
@click.argument(
'batch_dir',
type=click.Path(exists=True, file_okay=False, dir_okay=True))
@click.argument(
'output_path',
type=click.Path(exists=False, file_okay=True, dir_okay=False))
@click.option(
'--format', '-f', 'file_format',
type=click.Choice(
actions.extraction.tabular.TABULAR_SUPPORTED_FILE_FORMATS),
default='jsonl',
help='The desired output file format.')
def tabular(batch_dir, output_path, file_format):
"""Extract data from BATCH_DIR to OUTPUT_PATH in a tabular format.
Given a directory (BATCH_DIR) that represents a batch of HITs that
have been reviewed and saved, extract the data to OUTPUT_PATH in a
tabular format. Every row of the table is an assignment, where each
form field has a column and also there are additional columns for
assignment metadata. By default, the table will be saved as JSON
Lines, but other formats may be specified with the --format option.
"""
actions.extraction.tabular.tabular(
batch_dir=batch_dir,
output_path=output_path,
file_format=file_format)
| amti-master | amti/clis/extraction/tabular.py |
"""Command line interface for extracting XML from a batch"""
import logging
import click
from amti import actions
logger = logging.getLogger(__name__)
@click.command(
context_settings={
'help_option_names': ['--help', '-h']
})
@click.argument(
'batch_dir',
type=click.Path(exists=True, file_okay=False, dir_okay=True))
@click.argument(
'output_dir',
type=click.Path(exists=True, file_okay=False, dir_okay=True))
def xml(batch_dir, output_dir):
"""Extract XML data from assignments in BATCH_DIR to OUTPUT_DIR.
Given a directory (BATCH_DIR) that represents a batch of HITs that
have been reviewed and saved, extract the XML data from the
assignments to OUTPUT_DIR.
"""
actions.extraction.xml.xml(
batch_dir=batch_dir,
output_dir=output_dir)
| amti-master | amti/clis/extraction/xml.py |
"""Commands for extracting batch data into various formats"""
from amti.clis.extraction import (
tabular,
xml)
| amti-master | amti/clis/extraction/__init__.py |
from setuptools import setup
version = {}
with open('beakerstore/version.py') as v:
exec(v.read(), version)
# TODO: license
setup(
name='beakerstore',
version=version['__version__'],
description='Local store for Beaker datasets and files.',
packages=['beakerstore'],
url='https://github.com/allenai/beakerstore',
author='Chloe Anastasiades',
author_email='[email protected]',
python_requires='>=3',
install_requires=[
'requests >= 2.22.0'
]
)
| beakerstore-master | setup.py |
import atexit
import logging
import os
import platform
import requests
import tempfile
import time
from enum import Enum
from pathlib import Path
from random import shuffle
from typing import Optional, Set, Union
from . import __version__
# Logging stuff
logging.basicConfig(format='%(levelname)s %(name)s %(asctime)s %(message)s', level=logging.INFO)
_logger = logging.getLogger('beakerstore')
# Cleanup stuff
# mostly taken from https://github.com/allenai/datastore
_cleanup_files: Set[Path] = set()
def _cleanup_cleanup_files() -> None:
global _cleanup_files
for p in _cleanup_files:
assert p.is_absolute() # safety
p.unlink()
_cleanup_files = set()
atexit.register(_cleanup_cleanup_files)
def remember_cleanup(p: Union[Path, str]) -> None:
global _cleanup_files
if type(p) is str:
p = Path(p)
_cleanup_files.add(p.absolute())
def forget_cleanup(p: Union[Path, str]) -> None:
global _cleanup_files
if type(p) is str:
p = Path(p)
_cleanup_files.remove(p)
class BeakerOptions(Enum):
INTERNAL = 'internal'
PUBLIC = 'public'
class Cache:
def __init__(self, custom_path: Optional[Path] = None):
self.base_path = Cache._get_default_cache_base() if custom_path is None else custom_path
if custom_path is not None:
_logger.info(f'Cache at custom path: {custom_path}')
@staticmethod
def _get_default_cache_base() -> Path:
# close to https://github.com/allenai/datastore
cache_loc_base = os.environ.get('AI2_BEAKERSTORE_DIR')
if cache_loc_base is not None:
cache_loc_base = Path(cache_loc_base)
else:
home = Path.home()
if platform.system() == 'Darwin':
cache_loc_base = home / 'Library' / 'Caches' / 'beakerstore'
elif platform.system() == 'Linux':
cache_loc_base = home / '.ai2' / 'beakerstore'
else:
raise ValueError(f'Unsupported platform: {platform.system()}')
if not cache_loc_base.is_dir():
cache_loc_base.mkdir(parents=True)
return cache_loc_base
def tmp_loc(self) -> Path:
return self.base_path / 'tmp'
def cache_base(self) -> Path:
return self.base_path
class BeakerItem:
"""Corresponds to a dataset or a file within a dataset on Beaker."""
def __init__(self,
is_dir: bool,
beaker_info: dict,
file_name: Optional[str],
which_beaker: BeakerOptions = BeakerOptions.PUBLIC):
# Note: this corresponds to whether the user wants a whole dataset, or just a file
# within a dataset. This is different from the Beaker single-file dataset idea.
self.is_dir = is_dir
self.beaker_info = beaker_info
self.file_name = None if file_name == '' else file_name
assert self.is_dir == (self.file_name is None)
self.which_beaker = which_beaker
def dataset_id(self) -> str:
return self.beaker_info['id']
def make_directory_manifest_request(self, sess: requests.Session, cursor: Optional[str]) -> requests.Response:
url = f'{self._get_file_heap_base_url()}/manifest'
params = {'cursor': cursor} if cursor is not None else None
return self._make_fileheap_request(url, sess, params=params)
def make_one_file_download_request(self, name: str, sess: requests.Session) -> requests.Response:
# name == self.file_name corresponds to the case where the user specified a file
# within a dataset. is_dir is False, and this BeakerItem corresponds to one instance
# of FileCacheEntry.
# is_dir corresponds to the case where the user specified only a dataset.
# In addition to being attached to a DirCacheEntry instance, the corresponding BeakerItem
# will also be attached all the FileCacheEntry instances that correspond to the files
# within the dataset.
assert (name == self.file_name) or self.is_dir, \
'Was expecting a directory BeakerItem or the same filename.'
url = f'{self._get_file_heap_base_url()}/files/{name}'
return self._make_fileheap_request(url, sess, stream=True)
def _get_file_heap_base_url(self) -> str:
return f'{self._get_storage_address()}/datasets/{self._get_storage_id()}'
def _make_fileheap_request(self,
url: str,
sess: requests.Session,
params: Optional[dict] = None,
stream: bool = False) -> requests.Response:
headers = {
'Authorization': f'Bearer {self._get_storage_token()}'
}
return sess.get(url, headers=headers, params=params, stream=stream)
def _get_storage_address(self) -> str:
return self.beaker_info['storage']['address']
def _get_storage_id(self) -> str:
return self.beaker_info['storage']['id']
def _get_storage_token(self) -> str:
return self.beaker_info['storage']['token']
class CacheEntry:
"""Corresponds to an entry in the cache, already in existence or to be created."""
def __init__(self, beaker_item: BeakerItem):
self.beaker_item = beaker_item
self.cache = None
def which_beaker(self) -> BeakerOptions:
return self.beaker_item.which_beaker
def dataset_id(self) -> str:
return self.beaker_item.dataset_id()
def get_cache(self) -> Cache:
if self.cache is None:
self.cache = Cache()
return self.cache
def set_cache(self, cache: Cache):
self.cache = cache
def is_dir(self) -> bool:
"""Does this entry correspond to a dataset?
The alternative is that this entry corresponds to a file within a dataset.
Note: this does not correspond to whether this represents a single-file Beaker dataset
or not.
"""
raise NotImplementedError()
def cache_path(self) -> Path:
"""The path to this entry in the cache."""
return self.cache_base() / self.cache_key()
def cache_base(self) -> Path:
"""The path to the root of the cache."""
return self.get_cache().cache_base()
def cache_key(self) -> str:
"""The key of this entry in the cache.
This is basically the relative path to this entry from the root of the cache.
"""
return f'{self.which_beaker().value}/{self.item_name()}'
def item_name(self) -> str:
"""The name of the item corresponding to this entry.
This is the dataset id, and the filename (if any). It corresponds to the cache key of this
entry without the 'public' or 'internal' part.
"""
raise NotImplementedError()
def download(self, sess: requests.Session) -> bool:
"""Download the Beaker dataset or file to the corresponding cache location."""
raise NotImplementedError()
def _prepare_parent_dir(self):
parent_dir = self.cache_path().parent
if not parent_dir.is_dir():
parent_dir.mkdir(parents=True)
@staticmethod
def from_beaker_item(beaker_item: BeakerItem):
if beaker_item.is_dir:
return DirCacheEntry(beaker_item)
else:
return FileCacheEntry(beaker_item, beaker_item.file_name)
class DirCacheEntry(CacheEntry):
def __init__(self, beaker_item: BeakerItem):
super().__init__(beaker_item)
def is_dir(self):
return True
def item_name(self) -> str:
return self.dataset_id()
def download(self, sess: requests.Session) -> None:
done = False
cursor: Optional[str] = None
while not done:
dir_res = self.beaker_item.make_directory_manifest_request(sess, cursor)
if not dir_res.status_code == 200:
raise BeakerstoreError(
(f'Unable to get the requested directory manifest. '
f'Response code: {dir_res.status_code}.'))
json_dir_res = dir_res.json()
file_names = list(map(lambda f: f['path'], json_dir_res['files']))
items_with_details = list(map(lambda file_name: self.dir_to_file(file_name), file_names))
# not totally necessary but it does mean that if you're running two of this at the same
# time on the same dataset, they may work on downloading different files (instead of going
# through the files in the same order, one downloading the current file, the other
# waiting on the lock)
shuffle(items_with_details)
for item in items_with_details:
item.download(sess)
cursor_key = 'cursor'
if cursor_key in json_dir_res:
cursor = json_dir_res[cursor_key]
else:
cursor = None
done = cursor is None
def dir_to_file(self, file_name: str):
"""Makes an instance of FileCacheEntry from this instance of DirCacheEntry.
The resulting entry corresponds to the file with filename 'file_name' within the dataset
that corresponds to this current entry.
"""
entry = FileCacheEntry(self.beaker_item, file_name)
entry.set_cache(self.cache)
return entry
class FileCacheEntry(CacheEntry):
def __init__(self, beaker_item: BeakerItem, file_name: str):
super().__init__(beaker_item)
self.file_name = file_name
def is_dir(self):
return False
def item_name(self) -> str:
return f'{self.dataset_id()}/{self.file_name}'
def already_exists(self) -> bool:
"""Does this entry already exist in the cache?"""
return self.cache_path().is_file()
def download(self, sess: requests.Session) -> None:
if self.already_exists():
return
_logger.info(f'Getting {self.file_name} of dataset {self.dataset_id()}.')
res = self.beaker_item.make_one_file_download_request(self.file_name, sess)
if not res.status_code == 200:
raise BeakerstoreError((f'Unable to get the requested file. '
f'Response code: {res.status_code}.'))
self._prepare_parent_dir()
lock = CacheLock(self)
lock.get_lock()
# If something else downloaded this in the meantime, no need to do it once more.
if not self.already_exists():
self._write_file_from_response(res)
lock.release_lock()
def _write_file_from_response(self, res: requests.Response) -> None:
def write_chunks(write_to, chunk_size=1024 * 256):
for chunk in res.iter_content(chunk_size=chunk_size):
if chunk:
write_to.write(chunk)
if self.already_exists():
return
# prepare the tmp location if necessary
tmp_dir = self.get_cache().tmp_loc()
if not tmp_dir.is_dir():
tmp_dir.mkdir(parents=True)
# make the file
tmp_file = tempfile.NamedTemporaryFile(
dir=tmp_dir,
prefix=self._tmp_file_prefix(),
suffix='.tmp',
delete=False)
remember_cleanup(tmp_file.name)
# write to the file
write_chunks(tmp_file)
tmp_file.close()
# put the file in the right place
Path(tmp_file.name).rename(self.cache_path())
forget_cleanup(tmp_file.name)
def _tmp_file_prefix(self) -> str:
no_subdirs = self.cache_key().replace('/', '%')
return f'ai2-beakerstore-{no_subdirs}'
class CacheLock:
def __init__(self, cache_entry: CacheEntry):
self.lock_loc = Path(f'{cache_entry.cache_path()}.lock')
self.item_name = cache_entry.item_name()
def _wait_for_lock(self) -> None:
if not self.lock_loc.is_file():
return
start = time.time()
_logger.info(f'Waiting for the lock for {self.item_name}.')
last_message_time = start
while self.lock_loc.is_file():
if time.time() - last_message_time > 60:
now = time.time()
_logger.info(f'Still waiting for the lock. It\'s been {now - start} seconds.')
last_message_time = now
time.sleep(1)
def get_lock(self) -> None:
self._wait_for_lock()
try:
self.lock_loc.touch(mode=0o644, exist_ok=False)
remember_cleanup(self.lock_loc)
except FileExistsError:
self.get_lock()
def release_lock(self) -> None:
self.lock_loc.unlink()
forget_cleanup(self.lock_loc)
class ItemRequest:
def __init__(self,
given_path: str,
which_beaker: BeakerOptions = BeakerOptions.PUBLIC):
self.given_path = given_path
self.which_beaker = which_beaker
def to_beaker_item(self, sess: requests.Session) -> BeakerItem:
try:
# this expects a format like: ds_abc
return self._get_dataset_details_helper(self._path_to_dataset_id(), sess)
except DatasetNotFoundError as e_id:
if len(self.given_path.split('/')) > 1:
try:
# we could have been given a dataset in this format: chloea/my-dataset.
# Try that.
return self._get_dataset_details_helper(self._path_to_author_and_name(), sess)
except DatasetNotFoundError as e_author_and_name:
raise DatasetNotFoundError(f'{e_id}\n{e_author_and_name}')
else:
raise e_id
def _path_to_dataset_id(self) -> str:
return self.given_path.split('/')[0]
def _path_to_author_and_name(self) -> str:
split = self.given_path.split('/')
assert len(split) > 1
return '/'.join(split[:2])
def _get_dataset_details_helper(self,
possible_identifier: str,
sess: requests.Session) -> BeakerItem:
try:
res = sess.get(self._get_beaker_dataset_url(possible_identifier), timeout=10)
except requests.exceptions.ConnectTimeout as e:
if self.which_beaker == BeakerOptions.INTERNAL:
raise BeakerstoreError(('Unable to connect to internal Beaker. '
'Are you set up to talk to it?'))
else:
raise e
if res.status_code == 200:
beaker_info = res.json()
# add 1 to get past the '/'
file_path = self.given_path[len(possible_identifier) + 1:]
is_dir = file_path == ''
return BeakerItem(is_dir, beaker_info, file_path, which_beaker=self.which_beaker)
elif res.status_code == 404:
raise DatasetNotFoundError(f'Could not find dataset \'{possible_identifier}\'.')
else:
raise BeakerstoreError(
(f'Encountered a problem when trying to find dataset \'{possible_identifier}\'. '
f'Response status code: {res.status_code}.'))
def _get_beaker_dataset_url(self, identifier: str) -> str:
beaker_prefix = 'allenai.' if self.which_beaker == BeakerOptions.INTERNAL else ''
beaker_base = f'https://{beaker_prefix}beaker.org'
return f'{beaker_base}/api/v3/datasets/{identifier}'
# some exceptions
class DatasetNotFoundError(Exception):
pass
class BeakerstoreError(Exception):
pass
# the central function
def path(given_path: str,
which_beaker: BeakerOptions = BeakerOptions.PUBLIC,
cache: Optional[Cache] = None) -> Path:
sess = requests.Session()
sess.headers.update({'User-Agent': f'beakerstore/{__version__}'})
item_request = ItemRequest(given_path, which_beaker)
beaker_item = item_request.to_beaker_item(sess)
cache_entry = CacheEntry.from_beaker_item(beaker_item)
if cache is not None:
cache_entry.set_cache(cache)
cache_entry.download(sess)
return cache_entry.cache_path()
| beakerstore-master | beakerstore/beakerstore.py |
__version__ = '1.1.0'
| beakerstore-master | beakerstore/version.py |
from .version import __version__
from .beakerstore import BeakerOptions, path
| beakerstore-master | beakerstore/__init__.py |
import pytest
# Most of this a variation of the example for skipping slow tests in pytest documentation
# here: https://docs.pytest.org/en/latest/example/simple.html
def pytest_addoption(parser):
parser.addoption(
'--run-internal', action='store_true', default=False, help='run internal Beaker tests'
)
def pytest_configure(config):
config.addinivalue_line('markers', 'internal: mark test as a test using internal Beaker')
def pytest_collection_modifyitems(config, items):
if config.getoption('--run-internal'):
return
skip_internal = pytest.mark.skip(reason='need --run-internal option to run')
for item in items:
if 'internal' in item.keywords:
item.add_marker(skip_internal)
| beakerstore-master | beakerstore/tests/conftest.py |
beakerstore-master | beakerstore/tests/__init__.py |
|
import pytest
import os
import unittest
from pathlib import Path
from .. import path, BeakerOptions
from ..beakerstore import Cache, DatasetNotFoundError
@pytest.fixture(scope='class')
def cache_test_dir(request, tmpdir_factory):
request.cls.tmpdir = tmpdir_factory.mktemp('cache_test_dir')
class TestBeakerstore(unittest.TestCase):
def single_directory_helper(self, directory, which_beaker, test_cache, exp_num_files):
dirpath = path(directory, which_beaker=which_beaker, cache=test_cache)
self.assertTrue(dirpath.exists())
self.assertEqual(len(os.listdir(str(dirpath))), exp_num_files)
def single_file_helper(self, filename, which_beaker, test_cache):
self.assertTrue(path(filename, which_beaker=which_beaker, cache=test_cache).is_file())
def nonexistent_helper(self, name, which_beaker, test_cache):
with self.assertRaises(DatasetNotFoundError):
path(name, which_beaker=which_beaker, cache=test_cache)
@pytest.mark.usefixtures('cache_test_dir')
class TestBeakerStorePublic(TestBeakerstore):
def test_directories(self):
test_cache = Cache(Path(str(self.tmpdir)))
# moby (around 1.2MiB)
# by id
self.single_directory_helper('ds_1hz9k6sgxi0a', which_beaker=BeakerOptions.PUBLIC,
test_cache=test_cache, exp_num_files=1)
# by author and name
self.single_directory_helper('examples/moby', which_beaker=BeakerOptions.PUBLIC,
test_cache=test_cache, exp_num_files=1)
# a larger dataset (around 182.5MiB)
self.single_directory_helper('ds_jq5fmdtd46zf', which_beaker=BeakerOptions.PUBLIC,
test_cache=test_cache, exp_num_files=1)
# a dataset with enough files to have multiple pages of paths
self.single_directory_helper('ds_w9w18mnetswx', which_beaker=BeakerOptions.PUBLIC,
test_cache=test_cache, exp_num_files=2222)
def test_file(self):
test_cache = Cache(Path(str(self.tmpdir)))
# word count (around 18B)
# by id
self.single_file_helper('ds_7ap4sx03m63n/words.txt', which_beaker=BeakerOptions.PUBLIC,
test_cache=test_cache)
# by author and name
self.single_file_helper('lane/wordcount/words.txt', which_beaker=BeakerOptions.PUBLIC,
test_cache=test_cache)
def test_nonexistent(self):
test_cache = Cache(Path(str(self.tmpdir)))
self.nonexistent_helper('nonexistent', which_beaker=BeakerOptions.PUBLIC,
test_cache=test_cache)
self.nonexistent_helper('chloea/nonexistent', which_beaker=BeakerOptions.PUBLIC,
test_cache=test_cache)
@pytest.mark.usefixtures('cache_test_dir')
class TestBeakerStoreInternal(TestBeakerstore):
@pytest.mark.internal
def test_directories(self):
test_cache = Cache(Path(str(self.tmpdir)))
# by id
self.single_directory_helper('ds_bv0874n13di9',
which_beaker=BeakerOptions.INTERNAL, test_cache=test_cache,
exp_num_files=1)
# by author and name
self.single_directory_helper('chloea/chloea-dedupe-test-ds-2',
which_beaker=BeakerOptions.INTERNAL, test_cache=test_cache,
exp_num_files=1)
# larger file
self.single_directory_helper('ds_0tssnayto2v2',
which_beaker=BeakerOptions.INTERNAL, test_cache=test_cache,
exp_num_files=1)
@pytest.mark.internal
def test_file(self):
test_cache = Cache(Path(str(self.tmpdir)))
# by id
self.single_file_helper('ds_bv0874n13di9/somewords.txt',
which_beaker=BeakerOptions.INTERNAL, test_cache=test_cache)
# by author and name
self.single_file_helper('chloea/chloea-dedupe-test-ds-2/somewords.txt',
which_beaker=BeakerOptions.INTERNAL, test_cache=test_cache)
@pytest.mark.internal
def test_nonexistent(self):
test_cache = Cache(Path(str(self.tmpdir)))
self.nonexistent_helper('nonexistent', which_beaker=BeakerOptions.INTERNAL,
test_cache=test_cache)
self.nonexistent_helper('chloea/nonexistent', which_beaker=BeakerOptions.INTERNAL,
test_cache=test_cache)
| beakerstore-master | beakerstore/tests/beakerstore_test.py |
"""
a script to train a word2vec model
"""
import argparse
from aristomini.common.wordtwovec import tokenizer
from gensim.models import Word2Vec
parser = argparse.ArgumentParser(description="train a word2vec model")
parser.add_argument("sentences_filename",
metavar="sentences-filename",
help="file with the sentences to train on, one per line")
parser.add_argument("output_model",
metavar="output-model",
help="where to save the model file")
parser.add_argument("--size", type=int, default=50, help="dimension of the embedding")
parser.add_argument("--window", type=int, default=5, help="size of the word window")
parser.add_argument("--min-count", type=int, default=5,
help="only include words appearing at least this many times")
class TokenizingIterator:
"""a wrapper class for reading one line at a time from a huge file and tokenizing it"""
def __init__(self, filename: str) -> None:
self.filename = filename
def __iter__(self):
with open(self.filename, 'r') as f:
for line in f:
yield tokenizer(line)
if __name__ == "__main__":
args = parser.parse_args()
sentences = TokenizingIterator(args.sentences_filename)
model = Word2Vec(sentences, min_count=args.min_count, window=args.window, size=args.size)
model.save(args.output_model)
| aristo-mini-master | scripts/train_word2vec_model.py |
#!/usr/bin/env python
from __future__ import print_function
import json
import re
import sys
try:
# for Python 3.0 and later
from urllib.request import urlopen
except ImportError:
# fallback to Python 2
from urllib2 import urlopen
# Reads text input on STDIN, splits it into sentences, gathers groups of
# sentences and issues bulk insert commands to an Elasticsearch server running
# on localhost.
ELASTIC_SEARCH_URL = 'http://localhost:9200/knowledge/sentence/_bulk'
DOCUMENTS_PER_POST = 100000
def sentences_to_elasticsearch_payload(sentences):
payload_lines = []
for sentence in sentences:
payload_lines += [ json.dumps({"index":{}}) ]
payload_lines += [ json.dumps({"body":sentence}) ]
return "\n".join(payload_lines)
def bulk_load_elasticsearch(sentences, url):
payload = sentences_to_elasticsearch_payload(sentences)
response_file = urlopen(url, payload.encode('utf8'))
response = json.loads(response_file.read().decode('utf8'))
print("Posted {0} documents ({1} bytes) to {2}. Elasticsearch errors = {3}".format(
len(sentences),
len(payload),
url,
str(response.get("errors", "?"))
))
def lines_to_sentences(line_stream):
for line in line_stream:
line_cleaned = re.sub(r'([^a-zA-Z0-9\.])', " ", line).strip()
for sentence in line_cleaned.split("."):
if len(sentence) == 0:
continue
yield sentence
def groups(stream, size):
batch = []
for item in stream:
batch += [item]
if len(batch) % size == 0:
yield batch
batch = []
if len(batch) > 0:
yield batch
def main():
sentence_count = 0
for sentences in groups(lines_to_sentences(sys.stdin), DOCUMENTS_PER_POST):
bulk_load_elasticsearch(sentences, ELASTIC_SEARCH_URL)
sentence_count += len(sentences)
print("Documents posted:", sentence_count)
if __name__ == "__main__":
main()
| aristo-mini-master | scripts/insert_text_to_elasticsearch.py |
aristo-mini-master | aristomini/__init__.py |
|
"""
This is a skeleton for building your own solver.
You just need to find and fix the two TODOs in this file.
"""
from typing import List
from aristomini.common.solver import SolverBase
from aristomini.common.models import MultipleChoiceQuestion, MultipleChoiceAnswer, ChoiceConfidence
# TODO: replace with your solver name.
MY_SOLVER_NAME = "replace me with the name of your solver"
class MySolver(SolverBase):
def solver_info(self) -> str:
return MY_SOLVER_NAME
def answer_question(self, question: MultipleChoiceQuestion) -> MultipleChoiceAnswer:
# pylint: disable=unused-variable
stem = question.stem
choices = question.choices
confidences: List[float] = []
for i, choice in enumerate(question.choices):
label = choice.label
text = choice.text
# TODO: compute confidence
confidence = 0
confidences.append(confidence)
return MultipleChoiceAnswer(
[ChoiceConfidence(choice, confidence)
for choice, confidence in zip(choices, confidences)]
)
if __name__ == "__main__":
solver = MySolver() # pylint: disable=invalid-name
solver.run()
| aristo-mini-master | aristomini/solvers/mysolver.py |
"""text search solver"""
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Q, Search
from aristomini.common.solver import SolverBase
from aristomini.common.models import MultipleChoiceQuestion, MultipleChoiceAnswer, ChoiceConfidence
class TextSearchSolver(SolverBase):
"""
runs a query against elasticsearch and sums up the top `topn` scores. by default,
`topn` is 1, which means it just returns the top score, which is the same behavior as the
scala solver
"""
def __init__(self, # pylint: disable=too-many-arguments
host: str="localhost",
port: int=9200,
index_name: str="knowledge",
field_name: str="body",
topn: int=1) -> None:
self.client = Elasticsearch([host], port=port)
print(self.client)
self.fields = [field_name]
self.index_name = index_name
self.topn = topn
def score(self, question_stem: str, choice_text: str) -> float:
"""get the score from elasticsearch"""
query_text = "{0} {1}".format(question_stem, choice_text)
query = Q('multi_match', query=query_text, fields=self.fields)
search = Search(using=self.client, index=self.index_name).query(query)[:self.topn]
response = search.execute()
return sum(hit.meta.score for hit in response)
def solver_info(self) -> str:
return "text_search"
def answer_question(self, question: MultipleChoiceQuestion) -> MultipleChoiceAnswer:
return MultipleChoiceAnswer(
[ChoiceConfidence(choice, self.score(question.stem, choice.text))
for choice in question.choices]
)
if __name__ == "__main__":
solver = TextSearchSolver() # pylint: disable=invalid-name
solver.run()
| aristo-mini-master | aristomini/solvers/textsearch.py |
"""random guesser solver"""
import random
from aristomini.common.solver import SolverBase
from aristomini.common.models import MultipleChoiceQuestion, MultipleChoiceAnswer, ChoiceConfidence
class RandomGuesserSolver(SolverBase):
"""guesses at random"""
def solver_info(self) -> str:
return "random_guesser"
def answer_question(self, question: MultipleChoiceQuestion) -> MultipleChoiceAnswer:
return MultipleChoiceAnswer(
[ChoiceConfidence(choice, random.random()) for choice in question.choices]
)
if __name__ == "__main__":
solver = RandomGuesserSolver() # pylint: disable=invalid-name
solver.run()
| aristo-mini-master | aristomini/solvers/randomguesser.py |
aristo-mini-master | aristomini/solvers/__init__.py |
|
"""pmi solver"""
from collections import defaultdict
import math
from typing import NamedTuple, Iterable, List, Dict, Set, Sequence
from aristomini.common.solver import SolverBase
from aristomini.common.models import MultipleChoiceQuestion, MultipleChoiceAnswer, ChoiceConfidence
from aristomini.common.nlp import all_grams, distinct_grams, get_sentences, stemmer
SOME_SMALL_DEFAULT_VALUE = 0.01
def pmi(prob_x: float, prob_y: float, prob_xy: float) -> float:
"""calculate pmi using probabilities, return 0.0 if p_xy is 0"""
if prob_xy > 0.0:
return math.log(prob_xy / prob_x / prob_y)
else:
return 0.0
SentenceConcepts = NamedTuple("SentenceConcepts",
[("grams", List[str]), ("concepts", Set[int])])
class AcmeScorer:
"""keeps all the indexes in memory"""
def __init__(self,
concept_sentences: Iterable[SentenceConcepts],
concepts: Sequence[str],
min_sentences: int=100,
stem: bool=True) -> None:
"""init"""
self.concepts = concepts
self.num_sentences = 0
self.gram_counts = defaultdict(int) # type: Dict[str, int]
self.concept_counts = defaultdict(int) # type: Dict[int, int]
# gram -> concept -> count
self.gram_concept_counts = defaultdict(lambda: defaultdict(int)) # type: Dict[str, Dict[int, int]]
for i, sc in enumerate(concept_sentences):
self.num_sentences += 1
for concept in sc.concepts:
self.concept_counts[concept] += 1
#grams = distinct_grams(all_grams(sc.sentence, stem))
grams = sc.grams
for gram in grams:
self.gram_counts[gram] += 1
for concept in sc.concepts:
self.gram_concept_counts[gram][concept] += 1
if i % 1000 == 0:
print("indexing {}".format(i))
self.concept_counts = {
concept: count
for concept, count in self.concept_counts.items()
if count >= min_sentences
}
def pmi(self, gram: str, concept: int) -> float:
"""pmi"""
n_gram = self.gram_counts[gram]
n_concept = self.concept_counts[concept]
n_gram_concept = self.gram_concept_counts[gram][concept]
p_gram = n_gram / self.num_sentences
p_gram_given_concept = n_gram_concept / n_concept
if p_gram_given_concept > 0:
return math.log(p_gram_given_concept / p_gram)
else:
return SOME_SMALL_DEFAULT_VALUE
def average_pmi(self, grams: Sequence[str], concept: int) -> float:
"""average pmi"""
pmis = [self.pmi(gram, concept) for gram in grams]
if not pmis:
return 0.0
else:
return sum(pmis) / len(pmis)
def score(self,
question: MultipleChoiceQuestion,
stem: bool=True,
topn: int=1) -> MultipleChoiceAnswer:
"""calculate the scores"""
question_text = question.stem
verbose = "Max is doing" in question_text
results = []
for choice in question.choices:
grams = distinct_grams(all_grams(question_text + " " + choice.text))
# get the relevant concepts
concepts = {
concept
for gram in grams
for concept in self.gram_concept_counts[gram]
if concept in self.concept_counts
}
concept_scores = sorted([
(concept, self.average_pmi(grams, concept))
for concept in concepts
], key=lambda pair: pair[-1], reverse=True)
results.append(sum(s for _, s in concept_scores[:topn]))
if verbose:
print(choice.text)
for concept, score in concept_scores:
print(self.concepts[concept], score)
print()
return MultipleChoiceAnswer(
[ChoiceConfidence(choice, pmi)
for choice, pmi in zip(question.choices, results)]
)
class AcmeSolver(SolverBase):
"""uses pmi"""
def __init__(self,
sentences: Iterable[str],
concepts: Iterable[str]) -> None:
print("creating scorer")
cs = []
concepts = list({
stemmer(concept) for concept in concepts
})
concept_index = { concept: i for i, concept in enumerate(concepts)}
for sentence in sentences:
cis = set() # type: Set[int]
grams = distinct_grams(all_grams(sentence))
for gram in grams:
ci = concept_index.get(gram)
if ci is not None:
cis.add(ci)
cs.append(SentenceConcepts(grams, cis))
if len(cs) % 1000 == 0:
print(len(cs))
self.scorer = AcmeScorer(cs, concepts)
print("loaded scorer")
def solver_info(self) -> str:
return "acme"
def answer_question(self, question: MultipleChoiceQuestion) -> MultipleChoiceAnswer:
return self.scorer.score(question)
if __name__ == "__main__":
print("loading concepts")
concepts = get_sentences("concepts.txt")
print("loaded {} concepts".format(len(concepts)))
print("loading sentences")
sentences = get_sentences('aristo-mini-corpus-v1.txt')
print("loaded {} sentences".format(len(sentences)))
solver = AcmeSolver(sentences, concepts)
solver.run()
| aristo-mini-master | aristomini/solvers/acme.py |
"""pmi solver"""
from collections import defaultdict
import math
from typing import NamedTuple, Iterable, List, Dict
from aristomini.common.solver import SolverBase
from aristomini.common.models import MultipleChoiceQuestion, MultipleChoiceAnswer, ChoiceConfidence
from aristomini.common.nlp import all_grams, distinct_grams, get_sentences
def pmi(prob_x: float, prob_y: float, prob_xy: float) -> float:
"""calculate pmi using probabilities, return 0.0 if p_xy is 0"""
if prob_xy > 0.0:
return math.log(prob_xy / prob_x / prob_y)
else:
return 0.0
class PmiScorer:
"""keeps all the indexes in memory"""
def __init__(self,
sentences: Iterable[str],
stem: bool=True,
within: int=10) -> None:
"""init"""
self.num_sentences = 0
self.index = defaultdict(lambda: defaultdict(list)) # type: Dict[str, Dict[int, List[int]]]
for i, sentence in enumerate(sentences):
self.num_sentences += 1
for gram in all_grams(sentence, stem):
self.index[gram.gram][i].append(gram.position)
if i % 1000 == 0:
print("indexing {}".format(i))
print("grams {}".format(len(self.index)))
self.within = within
def num_occurrences(self, gram: str) -> int:
"""get the number of sentences in which a gram appears"""
return len(self.index.get(gram, {}))
def num_co_occurrences(self, gram1: str, gram2: str) -> int:
"""get the number of sentences in which two grams occur closely"""
index1 = self.index.get(gram1, {})
index2 = self.index.get(gram2, {})
return len([
sentence
for sentence, locs in index1.items()
if any(abs(loc1-loc2) <= self.within
for loc1 in locs
for loc2 in index2.get(sentence, []))
])
def count_pmi(self, num_x: int, num_y: int, num_xy: int) -> float:
"""calculate pmi using counts"""
return pmi(num_x / self.num_sentences,
num_y / self.num_sentences,
num_xy / self.num_sentences)
def score(self, question: MultipleChoiceQuestion, stem: bool=True) -> MultipleChoiceAnswer:
"""calculate the scores"""
question_text = question.stem
verbose = "Max is doing" in question_text
# compute these outside the for loop so they only get computed once
q_grams = distinct_grams(all_grams(question_text, stem))
q_counts = [self.num_occurrences(gram) for gram in q_grams]
results = []
for choice in question.choices:
answer = choice.text
if verbose: print(answer)
total_pmi = 0.0
count = 0
for a_gram in distinct_grams(all_grams(answer)):
a_count = self.num_occurrences(a_gram)
for q_gram, q_count in zip(q_grams, q_counts):
co_count = self.num_co_occurrences(q_gram, a_gram)
cpmi = self.count_pmi(q_count, a_count, co_count)
total_pmi += cpmi
count += 1
if verbose and cpmi > 0:
print(q_gram, "/", a_gram, "/", q_count, a_count, co_count, cpmi)
if verbose:
print(total_pmi, count, total_pmi / count)
if count > 0:
results.append(total_pmi / count)
else:
results.append(0)
return MultipleChoiceAnswer(
[ChoiceConfidence(choice, pmi)
for choice, pmi in zip(question.choices, results)]
)
class PmiSolver(SolverBase):
"""uses pmi"""
def __init__(self, sentences: Iterable[str]) -> None:
print("creating scorer")
self.scorer = PmiScorer(sentences)
print("loaded scorer")
def solver_info(self) -> str:
return "pmi"
def answer_question(self, question: MultipleChoiceQuestion) -> MultipleChoiceAnswer:
return self.scorer.score(question)
if __name__ == "__main__":
print("loading sentences")
sentences = get_sentences('aristo-mini-corpus-v1.txt')
print("loaded {} sentences".format(len(sentences)))
solver = PmiSolver(sentences)
solver.run()
| aristo-mini-master | aristomini/solvers/pmi.py |
"""word vector similarity solver"""
import argparse
from aristomini.common.solver import SolverBase
from aristomini.common.models import MultipleChoiceQuestion, MultipleChoiceAnswer, ChoiceConfidence
from aristomini.common.wordtwovec import WordTwoVec
parser = argparse.ArgumentParser()
parser.add_argument("model_file")
class WordVectorSimilaritySolver(SolverBase):
"""uses word2vec to score questions"""
def __init__(self, model_file: str) -> None:
self.word_two_vec = WordTwoVec(model_file)
def solver_info(self) -> str:
return "word_vector_similarity"
def answer_question(self, question: MultipleChoiceQuestion) -> MultipleChoiceAnswer:
mca = MultipleChoiceAnswer(
[ChoiceConfidence(choice, self.word_two_vec.goodness(question.stem, choice.text))
for choice in question.choices]
)
return mca
if __name__ == "__main__":
args = parser.parse_args()
solver = WordVectorSimilaritySolver(args.model_file)
solver.run()
| aristo-mini-master | aristomini/solvers/wordvectorsimilarity.py |
"""
a wrapper class for the gensim Word2Vec model that has extra features we need, as well as some
helper functions for tokenizing and stemming and things like that.
"""
from functools import lru_cache
import math
from typing import Iterable, List
from gensim.parsing.preprocessing import STOPWORDS
from gensim.parsing.porter import PorterStemmer
from gensim.models import Word2Vec
from gensim.utils import simple_preprocess
import numpy as np
stemmer = PorterStemmer()
@lru_cache(maxsize=1024)
def stem(word: str) -> str:
"""stemming words is not cheap, so use a cache decorator"""
return stemmer.stem(word)
def tokenizer(sentence: str) -> List[str]:
"""use gensim's `simple_preprocess` and `STOPWORDS` list"""
return [stem(token) for token in simple_preprocess(sentence) if token not in STOPWORDS]
def cosine_similarity(v1: np.ndarray, v2: np.ndarray) -> float:
"""https://en.wikipedia.org/wiki/Cosine_similarity"""
num = np.dot(v1, v2)
d1 = np.dot(v1, v1)
d2 = np.dot(v2, v2)
if d1 > 0.0 and d2 > 0.0:
return num / math.sqrt(d1 * d2)
else:
return 0.0
class WordTwoVec:
"""
a wrapper for gensim.Word2Vec with added functionality to embed phrases and compute the
"goodness" of a question-answer pair based on embedding-vector similarity
"""
def __init__(self, model_file: str) -> None:
if model_file.endswith(".bin"):
self.model = Word2Vec.load_word2vec_format(model_file, binary=True)
else:
self.model = Word2Vec.load(model_file)
def embed(self, words: Iterable[str]) -> np.ndarray:
"""given a list of words, find their vector embeddings and return the vector mean"""
# first find the vector embedding for each word
vectors = [self.model[word] for word in words if word in self.model]
if vectors:
# if there are vector embeddings, take the vector average
return np.average(vectors, axis=0)
else:
# otherwise just return a zero vector
return np.zeros(self.model.vector_size)
def goodness(self, question_stem: str, choice_text: str) -> float:
"""how good is the choice for this question?"""
question_words = {word for word in tokenizer(question_stem)}
choice_words = {word for word in tokenizer(choice_text) if word not in question_words}
score = cosine_similarity(self.embed(question_words), self.embed(choice_words))
if "Max is doing" in question_stem:
print(choice_text, score)
return score
| aristo-mini-master | aristomini/common/wordtwovec.py |
"""base class that solvers should inherit from"""
from typing import Any
from aristomini.common.models import MultipleChoiceQuestion, MultipleChoiceAnswer, \
SolverAnswer, parse_question
# built in `json` module doesn't serialize namedtuples correctly; `simplejson` does.
import simplejson as json
from flask import Flask, request
from flask_cors import CORS
class SolverBase:
"""
interface for solvers. to implement one just inherit from this class and override
`answer_question` and `solver_info`
"""
def run(self, host='localhost', port=8000) -> None:
"""run the solver"""
app = Flask(__name__)
CORS(app)
@app.route('/answer', methods=['GET', 'POST'])
def solve() -> Any: # pylint: disable=unused-variable
"""
get a json-serialized MultipleChoiceQuestion out of the request body, feed it to
answer_question, and return the json-serialized result
"""
body = request.get_json(force=True)
question = parse_question(body)
multiple_choice_answer = self.answer_question(question)
solver_answer = SolverAnswer(solverInfo=self.solver_info(),
multipleChoiceAnswer=multiple_choice_answer)
return json.dumps(solver_answer)
@app.route('/solver-info')
def info(): # pylint: disable=unused-variable
"""return the solver name"""
return self.solver_info()
app.run(host=host, port=port)
def answer_question(self, question: MultipleChoiceQuestion) -> MultipleChoiceAnswer:
"""answer the question"""
raise NotImplementedError()
def solver_info(self) -> str:
"""info about the solver"""
raise NotImplementedError()
| aristo-mini-master | aristomini/common/solver.py |
"""
typed models for our data. these are the exact analogues of the case classes used by the scala
code (which is why the fields have unfortunate, non-pythonic names)
"""
from typing import NamedTuple, List, Any, Dict, NamedTuple
import simplejson as json
# pylint: disable=invalid-name
class Choice(NamedTuple):
label: str
text: str
class ChoiceConfidence(NamedTuple):
choice: Choice
confidence: float
class MultipleChoiceAnswer(NamedTuple):
choiceConfidences: List[ChoiceConfidence]
class SolverAnswer(NamedTuple):
solverInfo: str
multipleChoiceAnswer: MultipleChoiceAnswer
class MultipleChoiceQuestion(NamedTuple):
stem: str
choices: List[Choice]
id_: str = None
answerKey: str = None
@staticmethod
def from_jsonl(line: str) -> 'MultipleChoiceQuestion':
blob = json.loads(line)
question = blob['question']
return MultipleChoiceQuestion(
id_=blob['id'],
stem=question['stem'],
choices=[Choice(c["label"], c["text"]) for c in question['choices']],
answerKey=blob['answerKey'],
)
class Exam(NamedTuple):
name: str
questions: List[MultipleChoiceQuestion]
def parse_question(blob: Dict[str, Any]) -> MultipleChoiceQuestion:
"""parses a question from a json blob. is possibly too lenient to malformed json"""
return MultipleChoiceQuestion(
stem=blob.get("stem", ""),
choices=[Choice(c["label"], c["text"]) for c in blob.get("choices", [])]
)
| aristo-mini-master | aristomini/common/models.py |
aristo-mini-master | aristomini/common/__init__.py |
|
"""
nlp utils
"""
from functools import lru_cache
import re
from typing import List, NamedTuple, Iterable
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
STOPWORDS = set(stopwords.words('english'))
_stemmer = SnowballStemmer('english')
def get_sentences(filename: str) -> List[str]:
"""get sentences"""
with open(filename) as f:
return [line.strip() for line in f]
@lru_cache(maxsize=4096)
def stemmer(word: str) -> str:
"""memoized wrapper around PorterStemmer"""
return _stemmer.stem(word)
NGram = NamedTuple("NGram", [("gram", str), ("position", int)])
Token = NamedTuple("Token",
[("word", str),
("position", int),
("is_stopword", bool)])
def tokenize(sentence: str, stem: bool=True) -> List[Token]:
"""
lowercase a sentence, split it into tokens, label the stopwords, and throw out words that
don't contain alphabetic characters
"""
pre_tokens = [Token(stemmer(w) if stem else w, i, w in STOPWORDS)
for i, w in enumerate(word_tokenize(sentence.lower()))]
return [token for token in pre_tokens if re.match(r"^[a-z]+$", token.word)]
def ngrams(n: int, tokens: List[Token], skip: bool=False) -> List[NGram]:
"""generate all the ngrams of size n. do not allow ngrams that contain stopwords, except that a
3-gram may contain a stopword as its middle word"""
def stopwords_filter(subtokens: List[Token]) -> bool:
"""a filter"""
if n == 3:
return not subtokens[0].is_stopword and not subtokens[2].is_stopword
else:
return all(not token.is_stopword for token in subtokens)
def make_gram(subtokens: List[Token]) -> NGram:
"""make a gram using the position of the leftmost work and skipping the middle maybe"""
words = [token.word if not skip or i == 0 or i == len(subtokens) - 1 else "_"
for i, token in enumerate(subtokens)]
return NGram(" ".join(words), subtokens[0].position)
# if n is 1, we want len(tokens), etc..
slices = [tokens[i:(i+n)] for i in range(len(tokens) - n + 1)]
return [make_gram(slic) for slic in slices if stopwords_filter(slic)]
def distinct_grams(grams: List[NGram]) -> List[str]:
"""return the distinct grams from a bunch of ngrams"""
return list({gram.gram for gram in grams})
def all_grams_from_tokens(tokens: List[Token]) -> List[NGram]:
"""make all the 1, 2, 3, and skip-3 grams from some tokens"""
return (ngrams(1, tokens) +
ngrams(2, tokens) +
ngrams(3, tokens) +
ngrams(3, tokens, skip=True))
def all_grams(sentence: str, stem: bool=True) -> List[NGram]:
"""tokenize the sentence and make all the grams"""
return all_grams_from_tokens(tokenize(sentence, stem))
| aristo-mini-master | aristomini/common/nlp.py |
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import LSTM
from keras.optimizers import RMSprop
from keras.utils.data_utils import get_file
import numpy as np
import random
import string
import sys
import re
path = 'aristo-mini-corpus-v1.txt'
good_chars = set(string.printable)
text = "".join(
c for c in open(path).read().lower()
if c in good_chars
)
print('corpus length:', len(text))
chars = sorted(list(set(text)))
print('total chars:', len(chars))
print(sorted(chars))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
# cut the text in semi-redundant sequences of maxlen characters
maxlen = 40
step = 3
sentences = []
next_chars = []
for i in range(0, len(text) - maxlen, step):
sentences.append(text[i: i + maxlen])
next_chars.append(text[i + maxlen])
print('nb sequences:', len(sentences))
print('Vectorization...')
X = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
X[i, t, char_indices[char]] = 1
y[i, char_indices[next_chars[i]]] = 1
# build the model: a single LSTM
print('Build model...')
model = Sequential()
model.add(LSTM(128, input_shape=(maxlen, len(chars))))
model.add(Dense(len(chars)))
model.add(Activation('softmax'))
optimizer = RMSprop(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
def sample(preds, temperature=1.0):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
# train the model, output generated text after each iteration
for iteration in range(1, 60):
print()
print('-' * 50)
print('Iteration', iteration)
model.fit(X, y,
batch_size=128,
epochs=1)
start_index = random.randint(0, len(text) - maxlen - 1)
for diversity in [0.2, 0.5, 1.0, 1.2]:
print()
print('----- diversity:', diversity)
generated = ''
sentence = text[start_index: start_index + maxlen]
generated += sentence
print('----- Generating with seed: "' + sentence + '"')
sys.stdout.write(generated)
for i in range(400):
x = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(sentence):
x[0, t, char_indices[char]] = 1.
preds = model.predict(x, verbose=0)[0]
next_index = sample(preds, diversity)
next_char = indices_char[next_index]
generated += next_char
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print()
| aristo-mini-master | aristomini/common/rnn.py |
"""server to run the evaluation ui"""
from typing import Any
import argparse
import pathlib
import os
import json
import jinja2
# built in `json` module doesn't serialize namedtuples correctly; `simplejson` does.
import simplejson as json
from flask import Flask, request, send_from_directory
import requests
from aristomini.common.models import Exam, MultipleChoiceQuestion
HOST = 'localhost'
SOLVER_PORT = 8000
EVALUI_PORT = 9000
SOLVER_URL = f"http://{HOST}:{SOLVER_PORT}"
EVALUI_DIR = pathlib.Path(__file__).resolve().parent
EXAM_DIR = EVALUI_DIR.parent.parent / 'questions'
EXAM_PATHS = [exam for exam in EXAM_DIR.glob('*')]
EXAM_NAMES = [path.name for path in EXAM_PATHS]
def read_exam(path: pathlib.Path) -> Exam:
with open(path, 'r') as f:
questions = [MultipleChoiceQuestion.from_jsonl(line) for line in f]
name = path.name
return Exam(name=name, questions=questions)
EXAMS = [read_exam(path) for path in EXAM_PATHS]
def get_solver_name(solver_url: str = SOLVER_URL) -> str:
try:
resp = requests.get(f"{SOLVER_URL}/solver-info")
if resp.status_code == 200:
solver_name = resp.content.decode('utf-8')
return solver_name
else:
print(f"received status {resp.status_code} from solver at {solver_url}")
except requests.exceptions.ConnectionError:
print(f"ConnectionError: unable to connect to solver at {solver_url}")
return None
app = Flask(__name__)
@app.route('/')
def index(): # pylint: disable=unused-variable
with open(EVALUI_DIR / 'index.html', 'r') as f:
raw_html = f.read()
template = jinja2.Template(raw_html)
return template.render(solver_url=SOLVER_URL,
exam_names=EXAM_NAMES)
@app.route('/exam/<index>')
def exam(index: str): # pylint: disable=unused-variable
solver_name = get_solver_name() or ''
with open(EVALUI_DIR / 'exam.html', 'r') as f:
raw_html = f.read()
template = jinja2.Template(raw_html)
exam = json.loads(json.dumps(EXAMS[int(index)]))
return template.render(solver_name=solver_name,
solver_url=SOLVER_URL,
exam=exam)
@app.route('/static/<path:path>')
def send_static(path):
return send_from_directory('static', path)
app.run(host=HOST, port=EVALUI_PORT)
| aristo-mini-master | aristomini/evalui/evalui.py |
import argparse
from glob import glob
import shutil, sys, os, random, re
def randomize(keys, to_dir, source_txt_files, source_ann_files):
random.seed()
prefixes = [i for i in range(len(keys))]
random.shuffle(prefixes)
for key, prefix in zip(keys, prefixes):
shutil.copy2(source_txt_files[key],
os.path.join(to_dir,
'{}_{}'.format(prefix, os.path.basename(source_txt_files[key]))))
shutil.copy2(source_ann_files[key],
os.path.join(to_dir,
'{}_{}'.format(prefix, os.path.basename(source_ann_files[key]))))
sys.stderr.write('copying file pair: {} {}\n'
.format(os.path.basename(source_txt_files[key]),
os.path.basename(source_ann_files[key])))
return len(keys)
def derandomize(keys, to_dir, source_txt_files, source_ann_files):
remove_random_prefix = re.compile(r"^[0-9]+_([0-9]+)\.(ann|txt)$")
for key in keys:
basename = os.path.basename(source_txt_files[key])
m = remove_random_prefix.match(basename)
name = m.group(1) if m is not None else basename[:-4]
shutil.copy2(source_txt_files[key], os.path.join(to_dir, name + ".txt"))
shutil.copy2(source_ann_files[key], os.path.join(to_dir, name + ".ann"))
sys.stderr.write('copying file pair: {} {}\n'
.format(os.path.basename(source_txt_files[key]),
os.path.basename(source_ann_files[key])))
return len(keys)
parser = argparse.ArgumentParser(description='Copy .txt and .ann files from a source directory to a target directory, and add randomly generated prefixes to change the order in which files appear for the annotators. Also supports removing the randomly generated prefixes from files.')
parser.add_argument('--from-dir', help='source directory', required=True)
parser.add_argument('--to-dir', help='target directory', required=True)
parser.add_argument('--derandomize', help='remove random prefixes', action='store_true')
parser.add_argument('--abstract-only', help='only copy the abstract text',
action='store_true')
parser.add_argument('--override', action='store_true',
help='override existing files in the target directory')
args = parser.parse_args()
# read the .txt and .ann files
source_txt_files = { filename[:-3] : filename for filename in glob(os.path.join(args.from_dir, '*.txt')) }
source_ann_files = { filename[:-3] : filename for filename in glob(os.path.join(args.from_dir, '*.ann')) }
# only allow the to_dir to pre-exist if --override is specified.
if os.path.exists(args.to_dir) and not args.override:
sys.stderr.write('ERROR: Target directory already exists. To override it, add --override to the command line.\n')
# create target directory if needed.
if not os.path.exists(args.to_dir):
os.makedirs(args.to_dir)
# only copy files which have both (.txt, .ann) extensions.
valid_keys = list(set(source_txt_files.keys()).intersection(set(source_ann_files.keys())))
if args.derandomize:
count = derandomize(valid_keys, args.to_dir, source_txt_files, source_ann_files)
else:
count = randomize(valid_keys, args.to_dir, source_txt_files, source_ann_files)
sys.stderr.write('{} file pairs have been copied.\n'.format(count))
| brat-master | copy_randomize_files.py |
#!/usr/bin/env python
# Minimal standalone brat server based on SimpleHTTPRequestHandler.
# Run as apache, e.g. as
#
# APACHE_USER=`./apache-user.sh`
# sudo -u $APACHE_USER python standalone.py
import sys
import os
from posixpath import normpath
from urllib import unquote
from cgi import FieldStorage
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
from SocketServer import ForkingMixIn
import socket
# brat imports
sys.path.append(os.path.join(os.path.dirname(__file__), 'server/src'))
from server import serve
# pre-import everything possible (TODO: prune unnecessary)
import annlog
import annotation
import annotator
import auth
import common
import delete
import dispatch
import docimport
import document
import download
import filelock
import gtbtokenize
import jsonwrap
import message
import normdb
import norm
import predict
import projectconfig
import realmessage
import sdistance
import search
import server
import session
import simstringdb
import sosmessage
import ssplit
import sspostproc
import stats
import svg
import tag
import tokenise
import undo
import verify_annotations
_VERBOSE_HANDLER = False
_DEFAULT_SERVER_ADDR = ''
_DEFAULT_SERVER_PORT = 8000
_PERMISSIONS = """
Allow: /ajax.cgi
Disallow: *.py
Disallow: *.cgi
Disallow: /.htaccess
Disallow: *.py~ # no emacs backups
Disallow: *.cgi~
Disallow: /.htaccess~
Allow: /
"""
class PermissionParseError(Exception):
def __init__(self, linenum, line, message=None):
self.linenum = linenum
self.line = line
self.message = ' (%s)' % message if message is not None else ''
def __str__(self):
return 'line %d%s: %s' % (self.linenum, self.message, self.line)
class PathPattern(object):
def __init__(self, path):
self.path = path
self.plen = len(path)
def match(self, s):
# Require prefix match and separator/end.
return s[:self.plen] == self.path and (self.path[-1] == '/' or
s[self.plen:] == '' or
s[self.plen] == '/')
class ExtensionPattern(object):
def __init__(self, ext):
self.ext = ext
def match(self, s):
return os.path.splitext(s)[1] == self.ext
class PathPermissions(object):
"""Implements path permission checking with a robots.txt-like syntax."""
def __init__(self, default_allow=False):
self._entries = []
self.default_allow = default_allow
def allow(self, path):
# First match wins
for pattern, allow in self._entries:
if pattern.match(path):
return allow
return self.default_allow
def parse(self, lines):
# Syntax: "DIRECTIVE : PATTERN" where
# DIRECTIVE is either "Disallow:" or "Allow:" and
# PATTERN either has the form "*.EXT" or "/PATH".
# Strings starting with "#" and empty lines are ignored.
for ln, l in enumerate(lines):
i = l.find('#')
if i != -1:
l = l[:i]
l = l.strip()
if not l:
continue
i = l.find(':')
if i == -1:
raise PermissionParseError(ln, lines[ln], 'missing colon')
directive = l[:i].strip().lower()
pattern = l[i+1:].strip()
if directive == 'allow':
allow = True
elif directive == 'disallow':
allow = False
else:
raise PermissionParseError(ln, lines[ln], 'unrecognized directive')
if pattern.startswith('/'):
patt = PathPattern(pattern)
elif pattern.startswith('*.'):
patt = ExtensionPattern(pattern[1:])
else:
raise PermissionParseError(ln, lines[ln], 'unrecognized pattern')
self._entries.append((patt, allow))
return self
class BratHTTPRequestHandler(SimpleHTTPRequestHandler):
"""Minimal handler for brat server."""
permissions = PathPermissions().parse(_PERMISSIONS.split('\n'))
def log_request(self, code='-', size='-'):
if _VERBOSE_HANDLER:
SimpleHTTPRequestHandler.log_request(self, code, size)
else:
# just ignore logging
pass
def is_brat(self):
# minimal cleanup
path = self.path
path = path.split('?', 1)[0]
path = path.split('#', 1)[0]
if path == '/ajax.cgi':
return True
else:
return False
def run_brat_direct(self):
"""Execute brat server directly."""
remote_addr = self.client_address[0]
remote_host = self.address_string()
cookie_data = ', '.join(filter(None, self.headers.getheaders('cookie')))
query_string = ''
i = self.path.find('?')
if i != -1:
query_string = self.path[i+1:]
saved = sys.stdin, sys.stdout, sys.stderr
sys.stdin, sys.stdout = self.rfile, self.wfile
# set env to get FieldStorage to read params
env = {}
env['REQUEST_METHOD'] = self.command
content_length = self.headers.getheader('content-length')
if content_length:
env['CONTENT_LENGTH'] = content_length
if query_string:
env['QUERY_STRING'] = query_string
os.environ.update(env)
params = FieldStorage()
# Call main server
cookie_hdrs, response_data = serve(params, remote_addr, remote_host,
cookie_data)
sys.stdin, sys.stdout, sys.stderr = saved
# Package and send response
if cookie_hdrs is not None:
response_hdrs = [hdr for hdr in cookie_hdrs]
else:
response_hdrs = []
response_hdrs.extend(response_data[0])
self.send_response(200)
self.wfile.write('\n'.join('%s: %s' % (k, v) for k, v in response_hdrs))
self.wfile.write('\n')
self.wfile.write('\n')
# Hack to support binary data and general Unicode for SVGs and JSON
if isinstance(response_data[1], unicode):
self.wfile.write(response_data[1].encode('utf-8'))
else:
self.wfile.write(response_data[1])
return 0
def allow_path(self):
"""Test whether to allow a request for self.path."""
# Cleanup in part following SimpleHTTPServer.translate_path()
path = self.path
path = path.split('?', 1)[0]
path = path.split('#', 1)[0]
path = unquote(path)
path = normpath(path)
parts = path.split('/')
parts = filter(None, parts)
if '..' in parts:
return False
path = '/'+'/'.join(parts)
return self.permissions.allow(path)
def list_directory(self, path):
"""Override SimpleHTTPRequestHandler.list_directory()"""
# TODO: permissions for directory listings
self.send_error(403)
def do_POST(self):
"""Serve a POST request. Only implemented for brat server."""
if self.is_brat():
self.run_brat_direct()
else:
self.send_error(501, "Can only POST to brat")
def do_GET(self):
"""Serve a GET request."""
if not self.allow_path():
self.send_error(403)
elif self.is_brat():
self.run_brat_direct()
else:
SimpleHTTPRequestHandler.do_GET(self)
def do_HEAD(self):
"""Serve a HEAD request."""
if not self.allow_path():
self.send_error(403)
else:
SimpleHTTPRequestHandler.do_HEAD(self)
class BratServer(ForkingMixIn, HTTPServer):
def __init__(self, server_address):
HTTPServer.__init__(self, server_address, BratHTTPRequestHandler)
def main(argv):
# warn if root/admin
try:
if os.getuid() == 0:
print >> sys.stderr, """
! WARNING: running as root. The brat standalone server is experimental !
! and may be a security risk. It is recommend to run the standalone !
! server as a non-root user with write permissions to the brat work/ and !
! data/ directories (e.g. apache if brat is set up using standard !
! installation). !
"""
except AttributeError:
# not on UNIX
print >> sys.stderr, """
Warning: could not determine user. Note that the brat standalone
server is experimental and should not be run as administrator.
"""
if len(argv) > 1:
try:
port = int(argv[1])
except ValueError:
print >> sys.stderr, "Failed to parse", argv[1], "as port number."
return 1
else:
port = _DEFAULT_SERVER_PORT
try:
server = BratServer((_DEFAULT_SERVER_ADDR, port))
print >> sys.stderr, "Serving brat at http://%s:%d" % server.server_address
server.serve_forever()
except KeyboardInterrupt:
# normal exit
pass
except socket.error, why:
print >> sys.stderr, "Error binding to port", port, ":", why[1]
except Exception, e:
print >> sys.stderr, "Server error", e
raise
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| brat-master | standalone.py |
# -*- Mode: Python; tab-width: 4; indent-tabs-mode: nil; coding: utf-8; -*-
# This configuration file specifies the global setup of the brat
# server. It is recommended that you use the installation script
# instead of editing this file directly. To do this, run the following
# command in the brat directory:
#
# ./install.sh
#
# if you wish to configure the server manually, you will first need to
# make sure that this file appears as config.py in the brat server
# root directory. If this file is currently named config_template.py,
# you can do this as follows:
#
# cp config_template.py config.py
#
# you will then need to edit config.py, minimally replacing all
# instances of the string CHANGE_ME with their appropriate values.
# Please note that these values MUST appear in quotes, e.g. as in
#
# ADMIN_CONTACT_EMAIL = '[email protected]'
# Contact email for users to use if the software encounters errors
ADMIN_CONTACT_EMAIL = CHANGE_ME
# Directories required by the brat server:
#
# BASE_DIR: directory in which the server is installed
# DATA_DIR: directory containing texts and annotations
# WORK_DIR: directory that the server uses for temporary files
#
BASE_DIR = CHANGE_ME
DATA_DIR = CHANGE_ME
WORK_DIR = CHANGE_ME
# If you have installed brat as suggested in the installation
# instructions, you can set up BASE_DIR, DATA_DIR and WORK_DIR by
# removing the three lines above and deleting the initial '#'
# character from the following four lines:
#from os.path import dirname, join
#BASE_DIR = dirname(__file__)
#DATA_DIR = join(BASE_DIR, 'data')
#WORK_DIR = join(BASE_DIR, 'work')
# To allow editing, include at least one USERNAME:PASSWORD pair below.
# The format is the following:
#
# 'USERNAME': 'PASSWORD',
#
# For example, user `editor` and password `annotate`:
#
# 'editor': 'annotate',
USER_PASSWORD = {
# (add USERNAME:PASSWORD pairs below this line.)
}
########## ADVANCED CONFIGURATION OPTIONS ##########
# The following options control advanced aspects of the brat server
# setup. It is not necessary to edit these in a basic brat server
# installation.
### MAX_SEARCH_RESULT_NUMBER
# It may be a good idea to limit the max number of results to a search
# as very high numbers can be demanding of both server and clients.
# (unlimited if not defined or <= 0)
MAX_SEARCH_RESULT_NUMBER = 1000
### DEBUG
# Set to True to enable additional debug output
DEBUG = False
### TUTORIALS
# Unauthorised users can create tutorials (but not edit without a login)
TUTORIALS = False
### LOG_LEVEL
# If you are a developer you may want to turn on extensive server
# logging by enabling LOG_LEVEL = LL_DEBUG
LL_DEBUG, LL_INFO, LL_WARNING, LL_ERROR, LL_CRITICAL = range(5)
LOG_LEVEL = LL_WARNING
#LOG_LEVEL = LL_DEBUG
### BACKUP_DIR
# Define to enable backups
# from os.path import join
#BACKUP_DIR = join(WORK_DIR, 'backup')
try:
assert DATA_DIR != BACKUP_DIR, 'DATA_DIR cannot equal BACKUP_DIR'
except NameError:
pass # BACKUP_DIR most likely not defined
### SVG_CONVERSION_COMMANDS
# If export to formats other than SVG is needed, the server must have
# a software capable of conversion like inkscape set up, and the
# following must be defined.
# (SETUP NOTE: at least Inkscape 0.46 requires the directory
# ".gnome2/" in the apache home directory and will crash if it doesn't
# exist.)
#SVG_CONVERSION_COMMANDS = [
# ('png', 'inkscape --export-area-drawing --without-gui --file=%s --export-png=%s'),
# ('pdf', 'inkscape --export-area-drawing --without-gui --file=%s --export-pdf=%s'),
# ('eps', 'inkscape --export-area-drawing --without-gui --file=%s --export-eps=%s'),
#]
| brat-master | config_template.py |
from setuptools import setup
setup(name='ai2-brat',
version='0.1',
description='brat related utilities',
url='http://github.com/allenai/brat',
packages=['ai2_brat'],
install_requires=[],
zip_safe=False)
| brat-master | setup.py |
#!/usr/bin/env python
'''
Run brat using the built-in Python CGI server for testing purposes.
Author: Pontus Stenetorp <pontus stenetorp se>
Version: 2012-07-01
'''
from BaseHTTPServer import HTTPServer, test as simple_http_server_test
from CGIHTTPServer import CGIHTTPRequestHandler
# Note: It is a terrible idea to import the function below, but we don't have
# a choice if we want to emulate the super-class is_cgi method.
from CGIHTTPServer import _url_collapse_path_split
from sys import stderr
from urlparse import urlparse
# Note: The only reason that we sub-class in order to pull is the stupid
# is_cgi method that assumes the usage of specific CGI directories, I simply
# refuse to play along with this kind of non-sense.
class BRATCGIHTTPRequestHandler(CGIHTTPRequestHandler):
def is_cgi(self):
# Having a CGI suffix is really a big hint of being a CGI script.
if urlparse(self.path).path.endswith('.cgi'):
self.cgi_info = _url_collapse_path_split(self.path)
return True
else:
return CGIHTTPRequestHandler.is_cgi(self)
def main(args):
# BaseHTTPServer will look for the port in argv[1] or default to 8000
try:
try:
port = int(args[1])
except ValueError:
raise TypeError
except TypeError:
print >> stderr, '%s is not a valid port number' % args[1]
return -1
except IndexError:
port = 8000
print >> stderr, 'WARNING: This server is for testing purposes only!'
print >> stderr, (' You can also use it for trying out brat before '
'deploying on a "real" web server such as Apache.')
print >> stderr, (' Using this web server to run brat on an open '
'network is a security risk!')
print >> stderr
print >> stderr, 'You can access the test server on:'
print >> stderr
print >> stderr, ' http://localhost:%s/' % port
print >> stderr
simple_http_server_test(BRATCGIHTTPRequestHandler, HTTPServer)
if __name__ == '__main__':
from sys import argv
exit(main(argv))
| brat-master | testserver.py |
#!/usr/bin/env python
# Script to convert a CoNLL-flavored BIO-formatted entity-tagged file
# into BioNLP ST-flavored standoff with reference to the original
# text.
import sys
import re
import os
import codecs
try:
import psyco
psyco.full()
except:
pass
# what to do if an error in the tag sequence (e.g. "O I-T1" or "B-T1
# I-T2") is encountered: recover/discard the erroneously tagged
# sequence, or abord the entire process
# TODO: add a command-line option for this
SEQUENCE_ERROR_RECOVER, SEQUENCE_ERROR_DISCARD, SEQUENCE_ERROR_FAIL = range(3)
SEQUENCE_ERROR_PROCESSING = SEQUENCE_ERROR_RECOVER
# TODO: get rid of globals
# output goes to stdout by default
out = sys.stdout
reference_directory = None
output_directory = None
def reference_text_filename(fn):
# Tries to determine the name of the reference text file
# for the given CoNLL output file.
fnbase = os.path.basename(fn)
reffn = os.path.join(reference_directory, fnbase)
# if the file doesn't exist, try replacing the last dot-separated
# suffix in the filename with .txt
if not os.path.exists(reffn):
reffn = re.sub(r'(.*)\..*', r'\1.txt', reffn)
return reffn
def output_filename(fn):
if output_directory is None:
return None
reffn = reference_text_filename(fn)
return os.path.join(output_directory, os.path.basename(reffn).replace(".txt",".a1"))
def process(fn):
global out
reffn = reference_text_filename(fn)
try:
#reffile = open(reffn)
reffile = codecs.open(reffn, "rt", "UTF-8")
except:
print >> sys.stderr, "ERROR: failed to open reference file %s" % reffn
raise
reftext = reffile.read()
reffile.close()
# ... and the tagged file
try:
#tagfile = open(fn)
tagfile = codecs.open(fn, "rt", "UTF-8")
except:
print >> sys.stderr, "ERROR: failed to open file %s" % fn
raise
tagtext = tagfile.read()
tagfile.close()
# if an output directory is specified, write a file with an
# appropriate name there
if output_directory is not None:
outfn = output_filename(fn)
#out = codecs.open(outfn, "wt", "UTF-8")
out = open(outfn, "wt")
# parse CoNLL-X-flavored tab-separated BIO, storing boundaries and
# tagged tokens. The format is one token per line, with the
# following tab-separated fields:
#
# START END TOKEN LEMMA POS CHUNK TAG
#
# where we're only interested in the start and end offsets
# (START,END), the token text (TOKEN) for verification, and the
# NER tags (TAG). Additionally, sentence boundaries are marked by
# blank lines in the input.
taggedTokens = []
for ln, l in enumerate(tagtext.split('\n')):
if l.strip() == '':
# skip blank lines (sentence boundary markers)
continue
fields = l.split('\t')
assert len(fields) == 7, "Error: expected 7 tab-separated fields on line %d in %s, found %d: %s" % (ln+1, fn, len(fields), l.encode("UTF-8"))
start, end, ttext = fields[0:3]
tag = fields[6]
start, end = int(start), int(end)
# parse tag
m = re.match(r'^([BIO])((?:-[A-Za-z_]+)?)$', tag)
assert m, "ERROR: failed to parse tag '%s' in %s" % (tag, fn)
ttag, ttype = m.groups()
# strip off starting "-" from tagged type
if len(ttype) > 0 and ttype[0] == "-":
ttype = ttype[1:]
# sanity check
assert ((ttype == "" and ttag == "O") or
(ttype != "" and ttag in ("B","I"))), "Error: tag format '%s' in %s" % (tag, fn)
# verify that the text matches the original
assert reftext[start:end] == ttext, "ERROR: text mismatch for %s on line %d: reference '%s' tagged '%s': %s" % (fn, ln+1, reftext[start:end].encode("UTF-8"), ttext.encode("UTF-8"), l.encode("UTF-8"))
# store tagged token as (begin, end, tag, tagtype) tuple.
taggedTokens.append((start, end, ttag, ttype))
# transform input text from CoNLL-X flavored tabbed BIO format to
# inline-tagged BIO format for processing (this is a bit
# convoluted, sorry; this script written as a modification of an
# inline-format BIO conversion script).
### Output for entities ###
# returns a string containing annotation in the output format
# for an Entity with the given properties.
def entityStr(startOff, endOff, eType, idNum, fullText):
# sanity checks: the string should not contain newlines and
# should be minimal wrt surrounding whitespace
eText = fullText[startOff:endOff]
assert "\n" not in eText, "ERROR: newline in entity in %s: '%s'" % (fn, eText)
assert eText == eText.strip(), "ERROR: entity contains extra whitespace in %s: '%s'" % (fn, eText)
return "T%d\t%s %d %d\t%s" % (idNum, eType, startOff, endOff, eText)
idIdx = 1
prevTag, prevEnd = "O", 0
currType, currStart = None, None
for startoff, endoff, ttag, ttype in taggedTokens:
# special case for surviving format errors in input: if the
# type sequence changes without a "B" tag, change the tag
# to allow some output (assumed to be preferable to complete
# failure.)
if prevTag != "O" and ttag == "I" and currType != ttype:
if SEQUENCE_ERROR_PROCESSING == SEQUENCE_ERROR_RECOVER:
# reinterpret as the missing "B" tag.
ttag = "B"
elif SEQUENCE_ERROR_PROCESSING == SEQUENCE_ERROR_DISCARD:
ttag = "O"
else:
assert SEQUENCE_ERROR_PROCESSING == SEQUENCE_ERROR_FAIL
pass # will fail on later check
# similarly if an "I" tag occurs after an "O" tag
if prevTag == "O" and ttag == "I":
if SEQUENCE_ERROR_PROCESSING == SEQUENCE_ERROR_RECOVER:
ttag = "B"
elif SEQUENCE_ERROR_PROCESSING == SEQUENCE_ERROR_DISCARD:
ttag = "O"
else:
assert SEQUENCE_ERROR_PROCESSING == SEQUENCE_ERROR_FAIL
pass # will fail on later check
if prevTag != "O" and ttag != "I":
# previous entity does not continue into this tag; output
assert currType is not None and currStart is not None, "ERROR at %s (%d-%d) in %s" % (reftext[startoff:endoff], startoff, endoff, fn)
print >> out, entityStr(currStart, prevEnd, currType, idIdx, reftext).encode("UTF-8")
idIdx += 1
# reset current entity
currType, currStart = None, None
elif prevTag != "O":
# previous entity continues ; just check sanity
assert ttag == "I", "ERROR in %s" % fn
assert currType == ttype, "ERROR: entity of type '%s' continues as type '%s' in %s" % (currType, ttype, fn)
if ttag == "B":
# new entity starts
currType, currStart = ttype, startoff
prevTag, prevEnd = ttag, endoff
# if there's an open entity after all tokens have been processed,
# we need to output it separately
if prevTag != "O":
print >> out, entityStr(currStart, prevEnd, currType, idIdx, reftext).encode("UTF-8")
if output_directory is not None:
# we've opened a specific output for this
out.close()
def main(argv):
global reference_directory, output_directory
# (clumsy arg parsing, sorry)
# Take a mandatory "-d" arg that tells us where to find the original,
# unsegmented and untagged reference files.
if len(argv) < 3 or argv[1] != "-d":
print >> sys.stderr, "USAGE:", argv[0], "-d REF-DIR [-o OUT-DIR] (FILES|DIR)"
return 1
reference_directory = argv[2]
# Take an optional "-o" arg specifying an output directory for the results
output_directory = None
filenames = argv[3:]
if len(argv) > 4 and argv[3] == "-o":
output_directory = argv[4]
print >> sys.stderr, "Writing output to %s" % output_directory
filenames = argv[5:]
# special case: if we only have a single file in input and it specifies
# a directory, process all files in that directory
input_directory = None
if len(filenames) == 1 and os.path.isdir(filenames[0]):
input_directory = filenames[0]
filenames = [os.path.join(input_directory, fn) for fn in os.listdir(input_directory)]
print >> sys.stderr, "Processing %d files in %s ..." % (len(filenames), input_directory)
fail_count = 0
for fn in filenames:
try:
process(fn)
except Exception, e:
print >> sys.stderr, "Error processing %s: %s" % (fn, e)
fail_count += 1
# if we're storing output on disk, remove the output file
# to avoid having partially-written data
ofn = output_filename(fn)
try:
os.remove(ofn)
except:
# never mind if that fails
pass
if fail_count > 0:
print >> sys.stderr, """
##############################################################################
#
# WARNING: error in processing %d/%d files, output is incomplete!
#
##############################################################################
""" % (fail_count, len(filenames))
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| brat-master | tools/conll2standoff.py |
#!/usr/bin/env python
'''
Make a data back-up into the work directory.
This script is a quick hack until we come up with something better.
Author: Pontus Stenetorp <pontus stenetorp se>
Version: 2011-05-11
'''
from datetime import datetime
from os import mkdir, remove
from os.path import dirname, exists, basename
from os.path import join as path_join
from shlex import split as shlex_split
from subprocess import Popen
from sys import path as sys_path
from sys import stderr as sys_stderr
sys_path.append(path_join(dirname(__file__), '..'))
from config import WORK_DIR, DATA_DIR
### Constants
TOOL_BACKUP_DIR = path_join(WORK_DIR, 'bckup_tool')
###
def _safe_dirname(path):
# Handles the case of a trailing slash for the dir path
return basename(path) or dirname(dirname(path))
def main(args):
if not exists(TOOL_BACKUP_DIR):
mkdir(TOOL_BACKUP_DIR)
timestamp = datetime.utcnow().strftime('%Y-%m-%dT%H:%M%SZ')
backup_path = path_join(TOOL_BACKUP_DIR, '%s-%s.tar.gz' % (
_safe_dirname(DATA_DIR), timestamp))
data_dir_parent = path_join(DATA_DIR, '..')
tar_cmd = 'tar -c -z -f %s -C %s %s' % (backup_path, data_dir_parent,
_safe_dirname(DATA_DIR))
tar_p = Popen(shlex_split(tar_cmd))
tar_p.wait()
if tar_p.returncode != 0:
# We failed, remove the back-up and exit
remove(backup_path)
return -1
else:
return 0
if __name__ == '__main__':
from sys import argv
exit(main(argv))
| brat-master | tools/backup.py |
#!/usr/bin/env python
# -*- Mode: Python; tab-width: 4; indent-tabs-mode: nil; coding: utf-8; -*-
# vim:set ft=python ts=4 sw=4 sts=4 autoindent:
from __future__ import with_statement
'''
Merge BioNLP Shared Task annotation format into a single annotation file.
find data -name '*.a1' -o -name '*.a2' -o -name '*.rel' -o -name '*.co' \
| ./merge.py
Author: Pontus Stenetorp
Version: 2011-01-17
'''
from collections import defaultdict
from os.path import join as join_path
from os.path import split as split_path
from shlex import split as shlex_split
from sys import stderr, stdin
from subprocess import Popen, PIPE
try:
from argparse import ArgumentParser
except ImportError:
from os.path import basename
from sys import path as sys_path
# We are most likely on an old Python and need to use our internal version
sys_path.append(join_path(basename(__file__), '../server/lib'))
from argparse import ArgumentParser
### Constants
#TODO: Add to options?
UNMERGED_SUFFIXES=['a1', 'a2', 'co', 'rel']
#TODO: Add to options?
MERGED_SUFFIX='ann'
ARGPARSER = ArgumentParser(description=("Merge BioNLP'11 ST annotations "
'into a single file, reads paths from stdin'))
ARGPARSER.add_argument('-w', '--no-warn', action='store_true',
help='suppress warnings')
#ARGPARSER.add_argument('-d', '--debug', action='store_true',
# help='activate additional debug output')
###
def keynat(string):
'''
http://code.activestate.com/recipes/285264-natural-string-sorting/
'''
it = type(1)
r = []
for c in string:
if c.isdigit():
d = int(c)
if r and type( r[-1] ) == it:
r[-1] = r[-1] * 10 + d
else:
r.append(d)
else:
r.append(c.lower())
return r
def main(args):
argp = ARGPARSER.parse_args(args[1:])
# ID is the stem of a file
id_to_ann_files = defaultdict(list)
# Index all ID;s before we merge so that we can do a little magic
for file_path in (l.strip() for l in stdin):
if not any((file_path.endswith(suff) for suff in UNMERGED_SUFFIXES)):
if not argp.no_warn:
import sys
print >> sys.stderr, (
'WARNING: invalid file suffix for %s, ignoring'
) % (file_path, )
continue
dirname, basename = split_path(file_path)
id = join_path(dirname, basename.split('.')[0])
id_to_ann_files[id].append(file_path)
for id, ann_files in id_to_ann_files.iteritems():
#XXX: Check if output file exists
lines = []
for ann_file_path in ann_files:
with open(ann_file_path, 'r') as ann_file:
for line in ann_file:
lines.append(line)
with open(id + '.' + MERGED_SUFFIX, 'w') as merged_ann_file:
for line in lines:
merged_ann_file.write(line)
if __name__ == '__main__':
from sys import argv
exit(main(argv))
| brat-master | tools/merge.py |
#!/usr/bin/env python
# Script to revise the whitespace content of a PMC NXML file for text
# content extraction.
from __future__ import with_statement
import sys
import os
import re
import codecs
# TODO: switch to lxml
try:
import xml.etree.ElementTree as ET
except ImportError:
import cElementTree as ET
# TODO: the model of "space wrap" is unnecessarily crude in many
# cases. For example, for <issue> we would ideally want to have
# "<issue>1</issue><title>Foo</title>" spaced as "1 Foo" but
# "<issue>1</issue>: <page>100</page>" spaced as "1: 100". This could
# be addressed by differentiating between things that should be
# wrapped by space in all cases and ones where it's only needed
# at non-word-boundaries (\b).
# tag to use for inserted elements
INSERTED_ELEMENT_TAG = "n2t-spc"
INPUT_ENCODING="UTF-8"
OUTPUT_ENCODING="UTF-8"
# command-line options
options = None
newline_wrap_element = set([
"CURRENT_TITLE",
"CURRENT_AUTHORLIST",
"ABSTRACT",
"P",
"TABLE",
"FIGURE",
"HEADER",
"REFERENCE",
"article-title",
"abstract",
"title",
"sec",
"p",
"contrib", # contributor (author list)
"aff", # affiliation
"pub-date", # publication date
"copyright-statement",
"table",
"table-wrap",
"figure",
"fig", # figure (alternate)
"tr", # table row
"kwd-group", # keyword group
])
space_wrap_element = set([
"AUTHOR",
"SURNAME",
"CURRENT_AUTHOR",
"CURRENT_SURNAME",
"TITLE",
"JOURNAL",
"YEAR",
# author lists
"surname",
"given-names",
"email",
# citation details
"volume",
"issue",
"year",
"month",
"day",
"fpage",
"lpage",
"pub-id",
"copyright-year",
# journal meta
"journal-id",
"journal-title",
"issn",
"publisher-name",
# article meta
"article-id",
"kwd", # keyword
# miscellaneous
"label",
"th",
"td",
])
# strip anything that we're wrapping; this is a bit unnecessarily
# aggressive in cases but guarantees normalization
strip_element = newline_wrap_element | space_wrap_element
class Standoff:
def __init__(self, element, start, end):
self.element = element
self.start = start
self.end = end
def txt(s):
return s if s is not None else ""
def text_and_standoffs(e):
strings, standoffs = [], []
_text_and_standoffs(e, 0, strings, standoffs)
text = "".join(strings)
return text, standoffs
def _text_and_standoffs(e, curroff, strings, standoffs):
startoff = curroff
# to keep standoffs in element occurrence order, append
# a placeholder before recursing
so = Standoff(e, 0, 0)
standoffs.append(so)
if e.text is not None and e.text != "":
strings.append(e.text)
curroff += len(e.text)
curroff = _subelem_text_and_standoffs(e, curroff, strings, standoffs)
so.start = startoff
so.end = curroff
return curroff
def _subelem_text_and_standoffs(e, curroff, strings, standoffs):
startoff = curroff
for s in e:
curroff = _text_and_standoffs(s, curroff, strings, standoffs)
if s.tail is not None and s.tail != "":
strings.append(s.tail)
curroff += len(s.tail)
return curroff
def preceding_space(pos, text, rewritten={}):
while pos > 0:
pos -= 1
if pos not in rewritten:
# no rewrite, check normally
return text[pos].isspace()
elif rewritten[pos] is not None:
# refer to rewritten instead of original
return rewritten[pos].isspace()
else:
# character deleted, ignore position
pass
# accept start of text
return True
def following_space(pos, text, rewritten={}):
while pos < len(text):
if pos not in rewritten:
# no rewrite, check normally
return text[pos].isspace()
elif rewritten[pos] is not None:
# refer to rewritten instead of original
return rewritten[pos].isspace()
else:
# character deleted, ignore position
pass
pos += 1
# accept end of text
return True
def preceding_linebreak(pos, text, rewritten={}):
if pos >= len(text):
return True
while pos > 0:
pos -= 1
c = rewritten.get(pos, text[pos])
if c == "\n":
return True
elif c is not None and not c.isspace():
return False
else:
# space or deleted, check further
pass
return True
def following_linebreak(pos, text, rewritten={}):
while pos < len(text):
c = rewritten.get(pos, text[pos])
if c == "\n":
return True
elif c is not None and not c.isspace():
return False
else:
# space or deleted, check further
pass
pos += 1
return True
def index_in_parent(e, p):
"""
Returns the index of the given element in its parent element e.
"""
index = None
for i in range(len(p)):
if p[i] == e:
index = i
break
assert i is not None, "index_in_parent: error: not parent and child"
return i
def space_normalize(root, text=None, standoffs=None):
"""
Eliminates multiple consequtive spaces and normalizes newlines
(and other space) into regular space.
"""
if text is None or standoffs is None:
text, standoffs = text_and_standoffs(root)
# TODO: this is crude and destructive; improve!
for so in standoffs:
e = so.element
if e.text is not None and e.text != "":
e.text = re.sub(r'\s+', ' ', e.text)
if e.tail is not None and e.tail != "":
e.tail = re.sub(r'\s+', ' ', e.tail)
def strip_elements(root, elements_to_strip=set(), text=None, standoffs=None):
"""
Removes initial and terminal space from elements that either have
surrounding space or belong to given set of elements to strip.
"""
if text is None or standoffs is None:
text, standoffs = text_and_standoffs(root)
# during processing, keep note at which offsets spaces have
# been eliminated.
rewritten = {}
for so in standoffs:
e = so.element
# don't remove expressly inserted space
if e.tag == INSERTED_ELEMENT_TAG:
continue
# if the element contains initial space and is either marked
# for space stripping or preceded by space, remove the initial
# space.
if ((e.text is not None and e.text != "" and e.text[0].isspace()) and
(element_in_set(e, elements_to_strip) or
preceding_space(so.start, text, rewritten))):
l = 0
while l < len(e.text) and e.text[l].isspace():
l += 1
space, end = e.text[:l], e.text[l:]
for i in range(l):
assert so.start+i not in rewritten, "ERROR: dup remove at %d" % (so.start+i)
rewritten[so.start+i] = None
e.text = end
# element-final space is in e.text only if the element has no
# children; if it does, the element-final space is found in
# the tail of the last child.
if len(e) == 0:
if ((e.text is not None and e.text != "" and e.text[-1].isspace()) and
(element_in_set(e, elements_to_strip) or
following_space(so.end, text, rewritten))):
l = 0
while l < len(e.text) and e.text[-l-1].isspace():
l += 1
start, space = e.text[:-l], e.text[-l:]
for i in range(l):
o = so.end-i-1
assert o not in rewritten, "ERROR: dup remove"
rewritten[o] = None
e.text = start
else:
c = e[-1]
if ((c.tail is not None and c.tail != "" and c.tail[-1].isspace()) and
(element_in_set(e, elements_to_strip) or
following_space(so.end, text, rewritten))):
l = 0
while l < len(c.tail) and c.tail[-l-1].isspace():
l += 1
start, space = c.tail[:-l], c.tail[-l:]
for i in range(l):
o = so.end-i-1
assert o not in rewritten, "ERROR: dup remove"
rewritten[o] = None
c.tail = start
def trim_tails(root):
"""
Trims the beginning of the tail of elements where it is preceded
by space.
"""
# This function is primarily necessary to cover the special case
# of empty elements preceded and followed by space, as the
# consecutive spaces created by such elements are not accessible
# to the normal text content-stripping functionality.
# work with standoffs for reference
text, standoffs = text_and_standoffs(root)
for so in standoffs:
e = so.element
if (e.tail is not None and e.tail != "" and e.tail[0].isspace() and
preceding_space(so.end, text)):
l = 0
while l < len(e.tail) and e.tail[l].isspace():
l += 1
space, end = e.tail[:l], e.tail[l:]
e.tail = end
def reduce_space(root, elements_to_strip=set()):
"""
Performs space-removing normalizations.
"""
# convert tree into text and standoffs for reference
text, standoffs = text_and_standoffs(root)
strip_elements(root, elements_to_strip, text, standoffs)
trim_tails(root)
space_normalize(root, text, standoffs)
def element_in_set(e, s):
# strip namespaces for lookup
if e.tag[0] == "{":
tag = re.sub(r'\{.*?\}', '', e.tag)
else:
tag = e.tag
return tag in s
def process(fn):
global strip_element
global options
# ugly hack for testing: allow "-" for "/dev/stdin"
if fn == "-":
fn = "/dev/stdin"
try:
tree = ET.parse(fn)
except:
print >> sys.stderr, "Error parsing %s" % fn
raise
root = tree.getroot()
########## space normalization and stripping ##########
reduce_space(root, strip_element)
########## additional space ##########
# convert tree into text and standoffs
text, standoffs = text_and_standoffs(root)
# traverse standoffs and mark each position before which a space
# or a newline should be assured. Values are (pos, early), where
# pos is the offset where the break should be placed, and early
# determines whether to select the first or the last among
# multiple alternative tags before/after which to place the break.
respace = {}
for so in standoffs:
e = so.element
if element_in_set(e, newline_wrap_element):
# "late" newline gets priority
if not (so.start in respace and (respace[so.start][0] == "\n" and
respace[so.start][1] == False)):
respace[so.start] = ("\n", True)
respace[so.end] = ("\n", False)
elif element_in_set(e, space_wrap_element):
# newlines and "late" get priority
if not (so.start in respace and (respace[so.start][0] == "\n" or
respace[so.start][1] == False)):
respace[so.start] = (" ", True)
if not (so.end in respace and respace[so.end][0] == "\n"):
respace[so.end] = (" ", False)
# next, filter respace to remove markers where the necessary space
# is already present in the text.
# to allow the filter to take into account linebreaks that will be
# introduced as part of the processing, maintain rewritten
# positions separately. (live updating of the text would be too
# expensive computationally.) As the processing is left-to-right,
# it's enough to use this for preceding positions and to mark
# inserts as appearing "before" the place where space is required.
rewritten = {}
filtered = {}
for pos in sorted(respace.keys()):
if respace[pos][0] == " ":
# unnecessary if initial, terminal, or preceded/followed
# by a space
if not (preceding_space(pos, text, rewritten) or
following_space(pos, text, rewritten)):
filtered[pos] = respace[pos]
rewritten[pos-1] = " "
else:
assert respace[pos][0] == "\n", "INTERNAL ERROR"
# unnecessary if there's either a preceding or following
# newline connected by space
if not (preceding_linebreak(pos, text, rewritten) or
following_linebreak(pos, text, rewritten)):
filtered[pos] = respace[pos]
rewritten[pos-1] = "\n"
respace = filtered
# for reference, create a map from elements to their parents in the tree.
parent_map = {}
for parent in root.getiterator():
for child in parent:
parent_map[child] = parent
# for reference, create a map from positions to standoffs ending
# at each.
# TODO: avoid indexing everything; this is only required for
# standoffs ending at respace positions
end_map = {}
for so in standoffs:
if so.end not in end_map:
end_map[so.end] = []
end_map[so.end].append(so)
# traverse standoffs again, adding the new elements as needed.
for so in standoffs:
if so.start in respace and respace[so.start][1] == True:
# Early space needed here. The current node can be assumed
# to be the first to "discover" this, so it's appropriate
# to add space before the current node. We can further
# assume the current node has a parent (adding space
# before the root is meaningless), so we can add the space
# node as the preceding child in the parent.
e = so.element
assert e in parent_map, "INTERNAL ERROR: add space before root?"
p = parent_map[e]
i = index_in_parent(e, p)
rse = ET.Element(INSERTED_ELEMENT_TAG)
rse.text = respace[so.start][0]
p.insert(i, rse)
# done, clear
del respace[so.start]
if so.end in respace and respace[so.end][1] == False:
# Late space needed here. Add after the current node iff
# it's the first of the nodes with the longest span ending
# here (i.e. the outermost).
maxlen = max([s.end-s.start for s in end_map[so.end]])
if so.end-so.start != maxlen:
continue
longest = [s for s in end_map[so.end] if s.end-s.start == maxlen]
if so != longest[0]:
continue
# OK to add.
e = so.element
assert e in parent_map, "INTERNAL ERROR: add space after root?"
p = parent_map[e]
i = index_in_parent(e, p)
rse = ET.Element(INSERTED_ELEMENT_TAG)
rse.text = respace[so.end][0]
p.insert(i+1, rse)
# need to relocate tail
rse.tail = e.tail
e.tail = ""
# done, clear
del respace[so.end]
assert len(respace) == 0, "INTERNAL ERROR: failed to insert %s" % str(respace)
# re-process to clear out consequtive space potentially introduced
# in previous processing.
strip_elements(root)
trim_tails(root)
# all done, output
if options.stdout:
tree.write(sys.stdout, encoding=OUTPUT_ENCODING)
return True
if options is not None and options.directory is not None:
output_dir = options.directory
else:
output_dir = ""
output_fn = os.path.join(output_dir, os.path.basename(fn))
# TODO: better checking of path identify to protect against
# clobbering.
if output_fn == fn and not options.overwrite:
print >> sys.stderr, 'respace: skipping output for %s: file would overwrite input (consider -d and -o options)' % fn
else:
# OK to write output_fn
try:
with open(output_fn, 'w') as of:
tree.write(of, encoding=OUTPUT_ENCODING)
except IOError, ex:
print >> sys.stderr, 'respace: failed write: %s' % ex
return True
def argparser():
import argparse
ap=argparse.ArgumentParser(description='Revise whitespace content of a PMC NXML file for text extraction.')
ap.add_argument('-d', '--directory', default=None, metavar='DIR', help='output directory')
ap.add_argument('-o', '--overwrite', default=False, action='store_true', help='allow output to overwrite input files')
ap.add_argument('-s', '--stdout', default=False, action='store_true', help='output to stdout')
ap.add_argument('file', nargs='+', help='input PubMed Central NXML file')
return ap
def main(argv):
global options
options = argparser().parse_args(argv[1:])
for fn in options.file:
process(fn)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| brat-master | tools/respace.py |
#!/usr/bin/env python
import sys
import re
try:
import cElementTree as ET
except:
import xml.etree.cElementTree as ET
# tags of elements to exclude from standoff output
EXCLUDED_TAGS = [
"PAPER",
"s",
]
EXCLUDED_TAG = { t:True for t in EXCLUDED_TAGS }
# string to use to indicate elided text in output
ELIDED_TEXT_STRING = "[[[...]]]"
# maximum length of text strings printed without elision
MAXIMUM_TEXT_DISPLAY_LENGTH = 1000
# c-style string escaping for just newline, tab and backslash.
# (s.encode('string_escape') does too much for utf-8)
def c_escape(s):
return s.replace('\\', '\\\\').replace('\t','\\t').replace('\n','\\n')
def strip_ns(tag):
# remove namespace spec from tag, if any
return tag if tag[0] != '{' else re.sub(r'\{.*?\}', '', tag)
class Standoff:
def __init__(self, sid, element, start, end, text):
self.sid = sid
self.element = element
self.start = start
self.end = end
self.text = text
def strip(self):
while self.start < self.end and self.text[0].isspace():
self.start += 1
self.text = self.text[1:]
while self.start < self.end and self.text[-1].isspace():
self.end -= 1
self.text = self.text[:-1]
def compress_text(self, l):
if len(self.text) >= l:
el = len(ELIDED_TEXT_STRING)
sl = (l-el)/2
self.text = (self.text[:sl]+ELIDED_TEXT_STRING+self.text[-(l-sl-el):])
def tag(self):
return strip_ns(self.element.tag)
def attrib(self):
# remove namespace specs from attribute names, if any
attrib = {}
for a in self.element.attrib:
if a[0] == "{":
an = re.sub(r'\{.*?\}', '', a)
else:
an = a
attrib[an] = self.element.attrib[a]
return attrib
def __str__(self):
return "X%d\t%s %d %d\t%s\t%s" % \
(self.sid, self.tag(), self.start, self.end,
c_escape(self.text.encode("utf-8")),
" ".join(['%s="%s"' % (k.encode("utf-8"), v.encode("utf-8"))
for k,v in self.attrib().items()]))
def txt(s):
return s if s is not None else ""
next_free_so_id = 1
def text_and_standoffs(e, curroff=0, standoffs=None):
global next_free_so_id
if standoffs == None:
standoffs = []
startoff = curroff
# to keep standoffs in element occurrence order, append
# a placeholder before recursing
so = Standoff(next_free_so_id, e, 0, 0, "")
next_free_so_id += 1
standoffs.append(so)
setext, _ = subelem_text_and_standoffs(e, curroff+len(txt(e.text)),
standoffs)
text = txt(e.text) + setext
curroff += len(text)
so.start = startoff
so.end = curroff
so.text = text
return (text, standoffs)
def subelem_text_and_standoffs(e, curroff, standoffs):
startoff = curroff
text = ""
for s in e:
stext, dummy = text_and_standoffs(s, curroff, standoffs)
text += stext
text += txt(s.tail)
curroff = startoff + len(text)
return (text, standoffs)
NORM_SPACE_REGEX = re.compile(r'\s+')
def normalize_space(e, tags=None):
# eliminate document-initial space
if strip_ns(e.tag) == 'PAPER':
assert e.text == '' or e.text.isspace()
e.text = ''
if tags is None or strip_ns(e.tag) in tags:
if e.text is not None:
n = NORM_SPACE_REGEX.sub(' ', e.text)
e.text = n
if e.tail is not None:
n = NORM_SPACE_REGEX.sub(' ', e.tail)
e.tail = n
for c in e:
normalize_space(c)
def add_newlines(e):
if (strip_ns(e.tag) == 'segment' and
e.attrib.get('segtype').strip() == 'Header'):
assert e.tail == '' or e.tail.isspace(), 'unexpected content in tail'
e.text = '\n' + (e.text if e.text is not None else '')
e.tail = '\n'
for c in e:
add_newlines(c)
def generate_id(prefix):
if prefix not in generate_id._next:
generate_id._next[prefix] = 1
id_ = prefix+str(generate_id._next[prefix])
generate_id._next[prefix] += 1
return id_
generate_id._next = {}
def convert_segment(s):
sostrings = []
# ignore empties
if s.start == s.end:
return []
# first attempt:
# # segment maps to "segment" textbound, with "section" and
# # "segtype" attributes as attributes of this textbound.
# tid = generate_id("T")
# sostrings.append('%s\t%s %d %d\t%s' % \
# (tid, s.tag(), s.start, s.end, s.text.encode('utf-8')))
# aid = generate_id("A")
# sostrings.append('%s\tsection %s %s' % \
# (aid, tid, s.attrib()['section'].strip()))
# aid = generate_id("A")
# sostrings.append('%s\tsegtype %s %s' % \
# (aid, tid, s.attrib()['segtype'].strip()))
# second attempt:
# create a textbound of the type specified by the "type"
# attribute.
tid = generate_id('T')
sostrings.append('%s\t%s %d %d\t%s' % \
(tid, s.attrib()['segtype'].strip(), s.start, s.end,
s.text.encode('utf-8')))
return sostrings
convert_function = {
"segment" : convert_segment,
}
def main(argv=[]):
if len(argv) != 4:
print >> sys.stderr, "Usage:", argv[0], "IN-XML OUT-TEXT OUT-SO"
return -1
in_fn, out_txt_fn, out_so_fn = argv[1:]
# "-" for STDIN / STDOUT
if in_fn == "-":
in_fn = "/dev/stdin"
if out_txt_fn == "-":
out_txt_fn = "/dev/stdout"
if out_so_fn == "-":
out_so_fn = "/dev/stdout"
tree = ET.parse(in_fn)
root = tree.getroot()
# normalize space in target elements
normalize_space(root, ['segment'])
add_newlines(root)
text, standoffs = text_and_standoffs(root)
# eliminate extra space
for s in standoffs:
s.strip()
# filter
standoffs = [s for s in standoffs if not s.tag() in EXCLUDED_TAG]
# convert selected elements
converted = []
for s in standoffs:
if s.tag() in convert_function:
converted.extend(convert_function[s.tag()](s))
else:
converted.append(s)
standoffs = converted
for so in standoffs:
try:
so.compress_text(MAXIMUM_TEXT_DISPLAY_LENGTH)
except AttributeError:
pass
# open output files
out_txt = open(out_txt_fn, "wt")
out_so = open(out_so_fn, "wt")
out_txt.write(text.encode("utf-8"))
for so in standoffs:
print >> out_so, so
out_txt.close()
out_so.close()
if __name__ == "__main__":
sys.exit(main(sys.argv))
| brat-master | tools/discsegtostandoff.py |
#!/usr/bin/env python
# Script to convert a CoNLL 2002-flavored BIO-formatted entity-tagged
# file into BioNLP ST-flavored standoff and a reconstruction of the
# original text.
from __future__ import with_statement
import sys
import re
import os
import codecs
INPUT_ENCODING = "Latin-1"
OUTPUT_ENCODING = "UTF-8"
output_directory = None
def quote(s):
return s in ('"', )
def space(t1, t2, quote_count = None):
# Helper for reconstructing sentence text. Given the text of two
# consecutive tokens, returns a heuristic estimate of whether a
# space character should be placed between them.
if re.match(r'^[\(]$', t1):
return False
if re.match(r'^[.,\)\?\!]$', t2):
return False
if quote(t1) and quote_count is not None and quote_count % 2 == 1:
return False
if quote(t2) and quote_count is not None and quote_count % 2 == 1:
return False
return True
def tagstr(start, end, ttype, idnum, text):
# sanity checks
assert '\n' not in text, "ERROR: newline in entity '%s'" % (text)
assert text == text.strip(), "ERROR: tagged span contains extra whitespace: '%s'" % (text)
return "T%d\t%s %d %d\t%s" % (idnum, ttype, start, end, text)
def output(infn, docnum, sentences):
global output_directory
if output_directory is None:
txtout = sys.stdout
soout = sys.stdout
else:
outfn = os.path.join(output_directory, os.path.basename(infn)+'-doc-'+str(docnum))
txtout = codecs.open(outfn+'.txt', 'wt', encoding=OUTPUT_ENCODING)
soout = codecs.open(outfn+'.ann', 'wt', encoding=OUTPUT_ENCODING)
offset, idnum = 0, 1
doctext = ""
for si, sentence in enumerate(sentences):
prev_token = None
prev_tag = "O"
curr_start, curr_type = None, None
quote_count = 0
for token, ttag, ttype in sentence:
if curr_type is not None and (ttag != "I" or ttype != curr_type):
# a previously started tagged sequence does not
# continue into this position.
print >> soout, tagstr(curr_start, offset, curr_type, idnum, doctext[curr_start:offset])
idnum += 1
curr_start, curr_type = None, None
if prev_token is not None and space(prev_token, token, quote_count):
doctext = doctext + ' '
offset += 1
if curr_type is None and ttag != "O":
# a new tagged sequence begins here
curr_start, curr_type = offset, ttype
doctext = doctext + token
offset += len(token)
if quote(token):
quote_count += 1
prev_token = token
prev_tag = ttag
# leftovers?
if curr_type is not None:
print >> soout, tagstr(curr_start, offset, curr_type, idnum, doctext[curr_start:offset])
idnum += 1
if si+1 != len(sentences):
doctext = doctext + '\n'
offset += 1
print >> txtout, doctext
def process(fn):
docnum = 1
sentences = []
with codecs.open(fn, encoding=INPUT_ENCODING) as f:
# store (token, BIO-tag, type) triples for sentence
current = []
lines = f.readlines()
for ln, l in enumerate(lines):
l = l.strip()
if re.match(r'^\s*$', l):
# blank lines separate sentences
if len(current) > 0:
sentences.append(current)
current = []
continue
elif (re.match(r'^===*\s+O\s*$', l) or
re.match(r'^-DOCSTART-', l)):
# special character sequence separating documents
if len(sentences) > 0:
output(fn, docnum, sentences)
sentences = []
docnum += 1
continue
if (ln + 2 < len(lines) and
re.match(r'^\s*$', lines[ln+1]) and
re.match(r'^-+\s+O\s*$', lines[ln+2])):
# heuristic match for likely doc before current line
if len(sentences) > 0:
output(fn, docnum, sentences)
sentences = []
docnum += 1
# go on to process current normally
# Assume it's a normal line. The format for spanish is
# is word and BIO tag separated by space, and for dutch
# word, POS and BIO tag separated by space. Try both.
m = re.match(r'^(\S+)\s(\S+)$', l)
if not m:
m = re.match(r'^(\S+)\s\S+\s(\S+)$', l)
assert m, "Error parsing line %d: %s" % (ln+1, l)
token, tag = m.groups()
# parse tag
m = re.match(r'^([BIO])((?:-[A-Za-z_]+)?)$', tag)
assert m, "ERROR: failed to parse tag '%s' in %s" % (tag, fn)
ttag, ttype = m.groups()
if len(ttype) > 0 and ttype[0] == "-":
ttype = ttype[1:]
current.append((token, ttag, ttype))
# process leftovers, if any
if len(current) > 0:
sentences.append(current)
if len(sentences) > 0:
output(fn, docnum, sentences)
def main(argv):
global output_directory
# Take an optional "-o" arg specifying an output directory for the results
output_directory = None
filenames = argv[1:]
if len(argv) > 2 and argv[1] == "-o":
output_directory = argv[2]
print >> sys.stderr, "Writing output to %s" % output_directory
filenames = argv[3:]
fail_count = 0
for fn in filenames:
try:
process(fn)
except Exception, e:
print >> sys.stderr, "Error processing %s: %s" % (fn, e)
fail_count += 1
if fail_count > 0:
print >> sys.stderr, """
##############################################################################
#
# WARNING: error in processing %d/%d files, output is incomplete!
#
##############################################################################
""" % (fail_count, len(filenames))
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| brat-master | tools/conll02tostandoff.py |
#!/usr/bin/env python
# Script to convert a column-based BIO-formatted entity-tagged file
# into standoff with reference to the original text.
from __future__ import with_statement
import sys
import re
import os
import codecs
class taggedEntity:
def __init__(self, startOff, endOff, eType, idNum, fullText):
self.startOff = startOff
self.endOff = endOff
self.eType = eType
self.idNum = idNum
self.fullText = fullText
self.eText = fullText[startOff:endOff]
def __str__(self):
return "T%d\t%s %d %d\t%s" % (self.idNum, self.eType, self.startOff,
self.endOff, self.eText)
def check(self):
# sanity checks: the string should not contain newlines and
# should be minimal wrt surrounding whitespace
assert "\n" not in self.eText, \
"ERROR: newline in entity: '%s'" % self.eText
assert self.eText == self.eText.strip(), \
"ERROR: entity contains extra whitespace: '%s'" % self.eText
def BIO_to_standoff(BIOtext, reftext, tokenidx=2, tagidx=-1):
BIOlines = BIOtext.split('\n')
return BIO_lines_to_standoff(BIOlines, reftext, tokenidx, tagidx)
next_free_id_idx = 1
def BIO_lines_to_standoff(BIOlines, reftext, tokenidx=2, tagidx=-1):
global next_free_id_idx
taggedTokens = []
ri, bi = 0, 0
while(ri < len(reftext)):
if bi >= len(BIOlines):
print >> sys.stderr, "Warning: received BIO didn't cover given text"
break
BIOline = BIOlines[bi]
if re.match(r'^\s*$', BIOline):
# the BIO has an empty line (sentence split); skip
bi += 1
else:
# assume tagged token in BIO. Parse and verify
fields = BIOline.split('\t')
try:
tokentext = fields[tokenidx]
except:
print >> sys.stderr, "Error: failed to get token text " \
"(field %d) on line: %s" % (tokenidx, BIOline)
raise
try:
tag = fields[tagidx]
except:
print >> sys.stderr, "Error: failed to get token text " \
"(field %d) on line: %s" % (tagidx, BIOline)
raise
m = re.match(r'^([BIO])((?:-[A-Za-z0-9_-]+)?)$', tag)
assert m, "ERROR: failed to parse tag '%s'" % tag
ttag, ttype = m.groups()
# strip off starting "-" from tagged type
if len(ttype) > 0 and ttype[0] == "-":
ttype = ttype[1:]
# sanity check
assert ((ttype == "" and ttag == "O") or
(ttype != "" and ttag in ("B","I"))), \
"Error: tag/type mismatch %s" % tag
# go to the next token on reference; skip whitespace
while ri < len(reftext) and reftext[ri].isspace():
ri += 1
# verify that the text matches the original
assert reftext[ri:ri+len(tokentext)] == tokentext, \
"ERROR: text mismatch: reference '%s' tagged '%s'" % \
(reftext[ri:ri+len(tokentext)].encode("UTF-8"),
tokentext.encode("UTF-8"))
# store tagged token as (begin, end, tag, tagtype) tuple.
taggedTokens.append((ri, ri+len(tokentext), ttag, ttype))
# skip the processed token
ri += len(tokentext)
bi += 1
# ... and skip whitespace on reference
while ri < len(reftext) and reftext[ri].isspace():
ri += 1
# if the remaining part either the reference or the tagged
# contains nonspace characters, something's wrong
if (len([c for c in reftext[ri:] if not c.isspace()]) != 0 or
len([c for c in BIOlines[bi:] if not re.match(r'^\s*$', c)]) != 0):
assert False, "ERROR: failed alignment: '%s' remains in reference, " \
"'%s' in tagged" % (reftext[ri:], BIOlines[bi:])
standoff_entities = []
# cleanup for tagger errors where an entity begins with a
# "I" tag instead of a "B" tag
revisedTagged = []
prevTag = None
for startoff, endoff, ttag, ttype in taggedTokens:
if prevTag == "O" and ttag == "I":
print >> sys.stderr, "Note: rewriting \"I\" -> \"B\" after \"O\""
ttag = "B"
revisedTagged.append((startoff, endoff, ttag, ttype))
prevTag = ttag
taggedTokens = revisedTagged
# cleanup for tagger errors where an entity switches type
# without a "B" tag at the boundary
revisedTagged = []
prevTag, prevType = None, None
for startoff, endoff, ttag, ttype in taggedTokens:
if prevTag in ("B", "I") and ttag == "I" and prevType != ttype:
print >> sys.stderr, "Note: rewriting \"I\" -> \"B\" at type switch"
ttag = "B"
revisedTagged.append((startoff, endoff, ttag, ttype))
prevTag, prevType = ttag, ttype
taggedTokens = revisedTagged
prevTag, prevEnd = "O", 0
currType, currStart = None, None
for startoff, endoff, ttag, ttype in taggedTokens:
if prevTag != "O" and ttag != "I":
# previous entity does not continue into this tag; output
assert currType is not None and currStart is not None, \
"ERROR in %s" % fn
standoff_entities.append(taggedEntity(currStart, prevEnd, currType,
next_free_id_idx, reftext))
next_free_id_idx += 1
# reset current entity
currType, currStart = None, None
elif prevTag != "O":
# previous entity continues ; just check sanity
assert ttag == "I", "ERROR in %s" % fn
assert currType == ttype, "ERROR: entity of type '%s' continues " \
"as type '%s'" % (currType, ttype)
if ttag == "B":
# new entity starts
currType, currStart = ttype, startoff
prevTag, prevEnd = ttag, endoff
# if there's an open entity after all tokens have been processed,
# we need to output it separately
if prevTag != "O":
standoff_entities.append(taggedEntity(currStart, prevEnd, currType,
next_free_id_idx, reftext))
next_free_id_idx += 1
for e in standoff_entities:
e.check()
return standoff_entities
RANGE_RE = re.compile(r'^(-?\d+)-(-?\d+)$')
def parse_indices(idxstr):
# parse strings of forms like "4,5" and "6,8-11", return list of
# indices.
indices = []
for i in idxstr.split(','):
if not RANGE_RE.match(i):
indices.append(int(i))
else:
start, end = RANGE_RE.match(i).groups()
for j in range(int(start), int(end)):
indices.append(j)
return indices
def main(argv):
if len(argv) < 3 or len(argv) > 5:
print >> sys.stderr, "Usage:", argv[0], "TEXTFILE BIOFILE [TOKENIDX [BIOIDX]]"
return 1
textfn, biofn = argv[1], argv[2]
tokenIdx = None
if len(argv) >= 4:
tokenIdx = int(argv[3])
bioIdx = None
if len(argv) >= 5:
bioIdx = argv[4]
with open(textfn, 'rU') as textf:
text = textf.read()
with open(biofn, 'rU') as biof:
bio = biof.read()
if tokenIdx is None:
so = BIO_to_standoff(bio, text)
elif bioIdx is None:
so = BIO_to_standoff(bio, text, tokenIdx)
else:
try:
indices = parse_indices(bioIdx)
except:
print >> sys.stderr, 'Error: failed to parse indices "%s"' % bioIdx
return 1
so = []
for i in indices:
so.extend(BIO_to_standoff(bio, text, tokenIdx, i))
for s in so:
print s
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| brat-master | tools/BIOtoStandoff.py |
#!/usr/bin/env python
import sys
import re
import os
options = None
DEFAULT_INPUT = 'entities-anatomy.csv'
# Document ID format in BioContext data
BIOCONTEXT_ID_RE = re.compile(r'^([0-9]+|PMC[0-9]+\.[0-9]+\.[0-9])+$')
def argparser():
import argparse
ap=argparse.ArgumentParser(description='Convert BioContext data ' +
'into brat-flavored standoff.')
ap.add_argument('-d', '--directory', default=None,
help='Output directory (default output to STDOUT)')
ap.add_argument('-e', '--entitytype', default='Anatomical_entity',
help='Type to assign to annotations.')
ap.add_argument('-f', '--file', default=DEFAULT_INPUT,
help='BioContext data (default "'+DEFAULT_INPUT+'")')
ap.add_argument('-n', '--no-norm', default=False, action='store_true',
help='Do not output normalization annotations')
ap.add_argument('-o', '--outsuffix', default='ann',
help='Suffix to add to output files (default "ann")')
ap.add_argument('-v', '--verbose', default=False, action='store_true',
help='Verbose output')
ap.add_argument('id', metavar='ID/FILE', nargs='+',
help='IDs of documents for which to extract annotations.')
return ap
def read_ids(fn):
ids = set()
with open(fn, 'rU') as f:
for l in f:
l = l.rstrip('\n')
if not BIOCONTEXT_ID_RE.match(l):
print >> sys.stderr, 'Warning: ID %s not in expected format' % l
ids.add(l)
return ids
def get_ids(items):
"""Given a list of either document IDs in BioContext format or
names of files containing one ID per line, return the combined set
of IDs."""
combined = set()
for item in items:
if BIOCONTEXT_ID_RE.match(item):
combined.add(item)
else:
# assume name of file containing IDs
combined |= read_ids(item)
return combined
def convert_line(l, converted):
try:
doc_id, id_, eid, start, end, text, group = l.split('\t')
if id_ == 'NULL':
return 0
start, end = int(start), int(end)
except:
print >> sys.stderr, 'Format error: %s' % l
raise
# textbound annotation
converted.append('T%s\t%s %d %d\t%s' % (id_, options.entitytype,
start, end, text))
# normalization (grounding) annotation
if not options.no_norm:
converted.append('N%s\tReference T%s %s' % (id_, id_, eid))
def output_(out, ann):
for a in ann:
print >> out, a
def output(id_, ann, append):
if not options.directory:
output(sys.stdout, ann)
else:
fn = os.path.join(options.directory, id_+'.'+options.outsuffix)
with open(fn, 'a' if append else 'w') as f:
output_(f, ann)
def process_(f, ids):
ann, current, processed = [], None, set()
for l in f:
l = l.strip()
id_ = l.split('\t')[0]
if id_ == current:
if id_ in ids:
convert_line(l, ann)
else:
# new document
if current in ids:
output(current, ann, current in processed)
ann = []
processed.add(current)
if id_ in ids:
if id_ in processed and options.verbose:
print >> sys.stderr, 'Warning: %s split' % id_
convert_line(l, ann)
current = id_
# short-circuit after processing last
if ids == processed:
break
if ann:
output(current, ann, current in processed)
for id_ in ids - processed:
print >> sys.stderr, 'Warning: id %s not found' % id_
def process(fn, ids):
try:
with open(fn, 'rU') as f:
# first line should be header; skip and confirm
header = f.readline()
if not header.startswith('doc_id\tid'):
print >> sys.stderr, 'Warning: %s missing header' % fn
process_(f, ids)
except IOError, e:
print >> sys.stderr, e, '(try -f argument?)'
def main(argv=None):
global options
if argv is None:
argv = sys.argv
options = argparser().parse_args(argv[1:])
ids = get_ids(options.id)
process(options.file, ids)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| brat-master | tools/biocontext2standoff.py |
#!/usr/bin/env python
'''
An example of a tagging service using NER suite.
'''
from argparse import ArgumentParser
from os.path import join as path_join
from os.path import dirname
try:
from json import dumps
except ImportError:
# likely old Python; try to fall back on ujson in brat distrib
from sys import path as sys_path
sys_path.append(path_join(dirname(__file__), '../server/lib/ujson'))
from ujson import dumps
from subprocess import PIPE, Popen
from random import choice, randint
from sys import stderr
from urlparse import urlparse
try:
from urlparse import parse_qs
except ImportError:
# old Python again?
from cgi import parse_qs
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
import re
# use the brat sentence splitter
from sentencesplit import sentencebreaks_to_newlines
# and use this hack for converting BIO to standoff
from BIOtoStandoff import BIO_lines_to_standoff
### Constants
DOCUMENT_BOUNDARY = 'END-DOCUMENT'
NERSUITE_SCRIPT = path_join(dirname(__file__), './nersuite_tag.sh')
NERSUITE_COMMAND = [NERSUITE_SCRIPT, '-multidoc', DOCUMENT_BOUNDARY]
ARGPARSER = ArgumentParser(description='An example HTTP tagging service using NERsuite')
ARGPARSER.add_argument('-p', '--port', type=int, default=47111,
help='port to run the HTTP service on (default: 47111)')
###
### Globals
tagger_process = None
def run_tagger(cmd):
# runs the tagger identified by the given command.
global tagger_process
try:
tagger_process = Popen(cmd, stdin=PIPE, stdout=PIPE, bufsize=1)
except Exception, e:
print >> stderr, "Error running '%s':" % cmd, e
raise
def _apply_tagger(text):
global tagger_process, tagger_queue
# the tagger expects a sentence per line, so do basic splitting
try:
splittext = sentencebreaks_to_newlines(text)
except:
# if anything goes wrong, just go with the
# original text instead
print >> stderr, "Warning: sentence splitting failed for input:\n'%s'" % text
splittext = text
print >> tagger_process.stdin, splittext
print >> tagger_process.stdin, DOCUMENT_BOUNDARY
tagger_process.stdin.flush()
response_lines = []
while True:
l = tagger_process.stdout.readline()
l = l.rstrip('\n')
if l == DOCUMENT_BOUNDARY:
break
response_lines.append(l)
try:
tagged_entities = BIO_lines_to_standoff(response_lines, text)
except:
# if anything goes wrong, bail out
print >> stderr, "Warning: BIO-to-standoff conversion failed for BIO:\n'%s'" % '\n'.join(response_lines)
return {}
anns = {}
for t in tagged_entities:
anns["T%d" % t.idNum] = {
'type': t.eType,
'offsets': ((t.startOff, t.endOff), ),
'texts': (t.eText, ),
}
return anns
class NERsuiteTaggerHandler(BaseHTTPRequestHandler):
def do_GET(self):
# Get our query
query = parse_qs(urlparse(self.path).query)
try:
json_dic = _apply_tagger(query['text'][0])
except KeyError:
# We weren't given any text to tag, such is life, return nothing
json_dic = {}
# Write the response
self.send_response(200)
self.send_header('Content-type', 'application/json; charset=utf-8')
self.end_headers()
self.wfile.write(dumps(json_dic))
print >> stderr, ('Generated %d annotations' % len(json_dic))
def log_message(self, format, *args):
return # Too much noise from the default implementation
def main(args):
argp = ARGPARSER.parse_args(args[1:])
print >> stderr, 'Starting NERsuite ...'
run_tagger(NERSUITE_COMMAND)
server_class = HTTPServer
httpd = server_class(('localhost', argp.port), NERsuiteTaggerHandler)
print >> stderr, 'NERsuite tagger service started'
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
print >> stderr, 'NERsuite tagger service stopped'
if __name__ == '__main__':
from sys import argv
exit(main(argv))
| brat-master | tools/nersuitetaggerservice.py |
#!/usr/bin/env python
'''
An example of a tagging service using metamap.
'''
from argparse import ArgumentParser
from os.path import join as path_join
from os.path import dirname
try:
from json import dumps
except ImportError:
# likely old Python; try to fall back on ujson in brat distrib
from sys import path as sys_path
sys_path.append(path_join(dirname(__file__), '../server/lib/ujson'))
from ujson import dumps
from subprocess import PIPE, Popen
from random import choice, randint
from sys import stderr
from urlparse import urlparse
try:
from urlparse import parse_qs
except ImportError:
# old Python again?
from cgi import parse_qs
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
import re
# use the brat sentence splitter
from sentencesplit import sentencebreaks_to_newlines
# use this MetaMap output converter
from MetaMaptoStandoff import MetaMap_lines_to_standoff
### Constants
METAMAP_SCRIPT = path_join(dirname(__file__), './metamap_tag.sh')
METAMAP_COMMAND = [METAMAP_SCRIPT]
ARGPARSER = ArgumentParser(description='An example HTTP tagging service using MetaMap')
ARGPARSER.add_argument('-p', '--port', type=int, default=47111,
help='port to run the HTTP service on (default: 47111)')
###
def run_tagger(cmd):
# runs the tagger identified by the given command.
try:
tagger_process = Popen(cmd, stdin=PIPE, stdout=PIPE, bufsize=1)
return tagger_process
except Exception, e:
print >> stderr, "Error running '%s':" % cmd, e
raise
def _apply_tagger_to_sentence(text):
# can afford to restart this on each invocation
tagger_process = run_tagger(METAMAP_COMMAND)
print >> tagger_process.stdin, text
tagger_process.stdin.close()
tagger_process.wait()
response_lines = []
for l in tagger_process.stdout:
l = l.rstrip('\n')
response_lines.append(l)
try:
tagged_entities = MetaMap_lines_to_standoff(response_lines, text)
except:
# if anything goes wrong, bail out
print >> stderr, "Warning: MetaMap-to-standoff conversion failed for output:\n'%s'" % '\n'.join(response_lines)
raise
#return {}
# MetaMap won't echo matched text, so get this separately
for t in tagged_entities:
t.eText = text[t.startOff:t.endOff]
return tagged_entities
def _apply_tagger(text):
# MetaMap isn't too happy with large outputs, so process a
# sentence per invocation
try:
splittext = sentencebreaks_to_newlines(text)
except:
# if anything goes wrong, just go with the
# original text instead
print >> stderr, "Warning: sentence splitting failed for input:\n'%s'" % text
splittext = text
sentences = splittext.split('\n')
all_tagged = []
baseoffset = 0
for s in sentences:
tagged = _apply_tagger_to_sentence(s)
# adjust offsets
for t in tagged:
t.startOff += baseoffset
t.endOff += baseoffset
all_tagged.extend(tagged)
baseoffset += len(s)+1
anns = {}
idseq = 1
for t in all_tagged:
anns["T%d" % idseq] = {
'type': t.eType,
'offsets': ((t.startOff, t.endOff), ),
'texts': (t.eText, ),
}
idseq += 1
return anns
class MetaMapTaggerHandler(BaseHTTPRequestHandler):
def do_POST(self):
# Get our query
query = parse_qs(urlparse(self.path).query)
try:
json_dic = _apply_tagger(query['text'][0])
except KeyError:
# We weren't given any text to tag, such is life, return nothing
json_dic = {}
# Write the response
self.send_response(200)
self.send_header('Content-type', 'application/json; charset=utf-8')
self.end_headers()
self.wfile.write(dumps(json_dic))
print >> stderr, ('Generated %d annotations' % len(json_dic))
def log_message(self, format, *args):
return # Too much noise from the default implementation
def main(args):
argp = ARGPARSER.parse_args(args[1:])
print >> stderr, 'Starting MetaMap ...'
server_class = HTTPServer
httpd = server_class(('localhost', argp.port), MetaMapTaggerHandler)
print >> stderr, 'MetaMap tagger service started'
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
print >> stderr, 'MetaMap tagger service stopped'
if __name__ == '__main__':
from sys import argv
exit(main(argv))
| brat-master | tools/metamaptaggerservice.py |
#!/usr/bin/env python
# Check that text-bound annotations in .ann file align with the
# corresponding .txt file.
import sys
import re
import codecs
from collections import namedtuple
from os.path import basename
Textbound = namedtuple('Textbound', 'id type start end text')
TEXTBOUND_RE = re.compile(r'^([A-Z]\d+)\t(\S+) (\d+) (\d+)\t(.*)$')
class FormatError(Exception):
pass
def txt_for_ann(fn):
tfn = re.sub(r'\.ann$', '.txt', fn)
if tfn == fn:
raise FormatError
return tfn
def parse_textbound(s):
m = TEXTBOUND_RE.match(s)
if not m:
raise FormatError
id_, type_, start, end, text = m.groups()
start, end = int(start), int(end)
return Textbound(id_, type_, start, end, text)
def process(fn):
textbounds = []
with codecs.open(fn, 'rU', encoding='utf8', errors='strict') as f:
for l in f:
l = l.rstrip('\n')
if not l or l.isspace():
continue
if l[0] != 'T':
continue # assume not a textbound annotation
else:
textbounds.append(parse_textbound(l))
# debugging
# print >> sys.stderr, '%s: %d textbounds' % (basename(fn), len(textbounds))
with codecs.open(txt_for_ann(fn), 'rU', encoding='utf8',
errors='strict') as f:
text = f.read()
for id_, type_, start, end, ttext in textbounds:
try:
assert text[start:end] == ttext
except:
print 'Mismatch in %s: %s %d %d' % (basename(fn), id_, start, end)
print ' reference: %s' % \
ttext.encode('utf-8').replace('\n', '\\n')
print ' document : %s' % \
text[start:end].encode('utf-8').replace('\n', '\\n')
def main(argv=None):
if argv is None:
argv = sys.argv
if len(argv) < 2:
print >> sys.stderr, 'Usage:', argv[0], 'FILE [FILE [...]]'
return 1
for fn in argv[1:]:
process(fn)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| brat-master | tools/checkann.py |
#!/usr/bin/env python
# Script to convert MetaMap "fielded" ("-N" argument) output into
# standoff with reference to the original text.
import sys
import re
import os
import codecs
# Regex for the "signature" of a metamap "fielded" output line
FIELDED_OUTPUT_RE = re.compile(r'^\d+\|')
class taggedEntity:
def __init__(self, startOff, endOff, eType, idNum):
self.startOff = startOff
self.endOff = endOff
self.eType = eType
self.idNum = idNum
def __str__(self):
return "T%d\t%s %d %d" % (self.idNum, self.eType, self.startOff, self.endOff)
def MetaMap_lines_to_standoff(metamap_lines, reftext=None):
tagged = []
idseq = 1
for l in metamap_lines:
l = l.rstrip('\n')
# silently skip lines that don't match the expected format
if not FIELDED_OUTPUT_RE.match(l):
continue
# format is pipe-separated ("|") fields, the ones of interest
# are in the following indices:
# 3: preferred text form
# 4: CUI
# 5: semantic type (MetaMap code)
# 8: start offset and length of match
fields = l.split('|')
if len(fields) < 9:
print >> sys.stderr, "Note: skipping unparseable MetaMap output line: %s" % l
continue
ctext, CUI, semtype, offset = fields[3], fields[4], fields[5], fields[8]
# strip surrounding brackets from semantic type
semtype = semtype.replace('[','').replace(']','')
# parse length; note that this will only pick the of multiple
# discontinuous spans if they occur (simple heuristic for the
# head)
m = re.match(r'^(?:\d+:\d+,)*(\d+):(\d+)$', offset)
start, length = m.groups()
start, length = int(start), int(length)
tagged.append(taggedEntity(start, start+length, semtype, idseq))
idseq += 1
print >> sys.stderr, "MetaMaptoStandoff: returning %s tagged spans" % len(tagged)
return tagged
if __name__ == "__main__":
lines = [l for l in sys.stdin]
standoff = MetaMap_lines_to_standoff(lines)
for s in standoff:
print s
| brat-master | tools/MetaMaptoStandoff.py |
#!/usr/bin/env python
'''
An example of a tagging service.
Author: Pontus Stenetorp <pontus stenetorp se>
Version: 2012-03-05
'''
from argparse import ArgumentParser
from cgi import FieldStorage
try:
from json import dumps
except ImportError:
# likely old Python; try to fall back on ujson in brat distrib
from sys import path as sys_path
from os.path import join as path_join
from os.path import dirname
sys_path.append(path_join(dirname(__file__), '../server/lib/ujson'))
from ujson import dumps
from random import choice, randint
from sys import stderr
from urlparse import urlparse
try:
from urlparse import parse_qs
except ImportError:
# old Python again?
from cgi import parse_qs
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
### Constants
ARGPARSER = ArgumentParser(description='An example HTTP tagging service, '
'tagging Confuse-a-Cat **AND** Dead-parrot mentions!')
ARGPARSER.add_argument('-p', '--port', type=int, default=47111,
help='port to run the HTTP service on (default: 47111)')
###
def _random_span(text):
# A random span not starting or ending with spaces or including a new-line
attempt = 1
while True:
start = randint(0, len(text))
end = randint(start + 3, start + 25)
# Did we violate any constraints?
if (
# We landed outside the text!
end > len(text) or
# We contain a newline!
'\n' in text[start:end] or
# We have a leading or trailing space!
(text[start:end][-1] == ' ' or text[start:end][0] == ' ')
):
# Well, try again then...?
if attempt >= 100:
# Bail, we failed too many times
return None, None, None
attempt += 1
continue
else:
# Well done, we got one!
return start, end, text[start:end]
def _random_tagger(text):
# Generate some annotations
anns = {}
if not text:
# We got no text, bail
return anns
num_anns = randint(1, len(text) / 100)
for ann_num in xrange(num_anns):
ann_id = 'T%d' % ann_num
# Annotation type
_type = choice(('Confuse-a-Cat', 'Dead-parrot', ))
start, end, span_text = _random_span(text)
if start is None:
# Random failed, continue to the next annotation
continue
anns[ann_id] = {
'type': _type,
'offsets': ((start, end), ),
'texts': (span_text, ),
}
return anns
class RandomTaggerHandler(BaseHTTPRequestHandler):
def do_POST(self):
field_storage = FieldStorage(
headers=self.headers,
environ={
'REQUEST_METHOD':'POST',
'CONTENT_TYPE':self.headers['Content-type'],
},
fp=self.rfile)
# Do your random tagging magic
try:
json_dic = _random_tagger(field_storage.value.decode('utf-8'))
except KeyError:
# We weren't given any text to tag, such is life, return nothing
json_dic = {}
# Write the response
self.send_response(200)
self.send_header('Content-type', 'application/json; charset=utf-8')
self.end_headers()
self.wfile.write(dumps(json_dic))
print >> stderr, ('Generated %d random annotations' % len(json_dic))
def log_message(self, format, *args):
return # Too much noise from the default implementation
def main(args):
argp = ARGPARSER.parse_args(args[1:])
server_class = HTTPServer
httpd = server_class(('localhost', argp.port), RandomTaggerHandler)
print >> stderr, 'Random tagger service started on port %s' % (argp.port)
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
print >> stderr, 'Random tagger service stopped'
if __name__ == '__main__':
from sys import argv
exit(main(argv))
| brat-master | tools/randomtaggerservice.py |
#!/usr/bin/env python
# Convert CoNLL 2009 format file into brat-flavored standoff and a
# reconstruction of the original text.
from __future__ import with_statement
import sys
import re
import os
import codecs
# maximum number of sentences to include in single output document
# (if None, doesn't split into documents)
MAX_DOC_SENTENCES = 10
# whether to output an explicit root note
OUTPUT_ROOT = True
# the string to use to represent the root node
ROOT_STR = 'ROOT'
ROOT_POS = 'ROOT'
ROOT_FEAT = ''
INPUT_ENCODING = "UTF-8"
OUTPUT_ENCODING = "UTF-8"
# fields of interest in input data; full list: ID FORM LEMMA PLEMMA
# POS PPOS FEAT PFEAT HEAD PHEAD DEPREL PDEPREL FILLPRED PRED APREDs
# (http://ufal.mff.cuni.cz/conll2009-st/task-description.html)
F_ID, F_FORM, F_LEMMA, F_POS, F_FEAT, F_HEAD, F_DEPREL, F_FILLPRED, F_PRED, F_APRED1 = range(10)
output_directory = None
# rewrites for characters appearing in CoNLL-X types that cannot be
# directly used in identifiers in brat-flavored standoff
charmap = {
'<' : '_lt_',
'>' : '_gt_',
'+' : '_plus_',
'?' : '_question_',
'&' : '_amp_',
':' : '_colon_',
'.' : '_period_',
'!' : '_exclamation_',
}
def maptype(s):
return "".join([charmap.get(c,c) for c in s])
def tokstr(start, end, ttype, idnum, text):
# sanity checks
assert '\n' not in text, "ERROR: newline in entity '%s'" % (text)
assert text == text.strip(), "ERROR: tagged span contains extra whitespace: '%s'" % (text)
return "T%d\t%s %d %d\t%s" % (idnum, maptype(ttype), start, end, text)
def featstr(lemma, feats, idnum):
return "#%d\tData T%d\tLemma: %s, Feats: %s" % (idnum, idnum, lemma, feats)
def depstr(depid, headid, rel, idnum):
return "R%d\t%s Arg1:T%d Arg2:T%d" % (idnum, maptype(rel), headid, depid)
def output(infn, docnum, sentences):
global output_directory
if output_directory is None:
txtout = codecs.getwriter(OUTPUT_ENCODING)(sys.stdout)
soout = codecs.getwriter(OUTPUT_ENCODING)(sys.stdout)
else:
# add doc numbering if there is a sentence count limit,
# implying multiple outputs per input
if MAX_DOC_SENTENCES:
outfnbase = os.path.basename(infn)+'-doc-'+str(docnum)
else:
outfnbase = os.path.basename(infn)
outfn = os.path.join(output_directory, outfnbase)
txtout = codecs.open(outfn+'.txt', 'wt', encoding=OUTPUT_ENCODING)
soout = codecs.open(outfn+'.ann', 'wt', encoding=OUTPUT_ENCODING)
offset, idnum, ridnum = 0, 1, 1
doctext = ""
for si, sentence in enumerate(sentences):
tokens, deps = sentence
# store mapping from per-sentence token sequence IDs to
# document-unique token IDs
idmap = {}
# output tokens
prev_form = None
if OUTPUT_ROOT:
# add an explicit root node with seq ID 0 (zero)
tokens[0] = (ROOT_STR, ROOT_STR, ROOT_POS, ROOT_FEAT)
for id_ in tokens:
form, lemma, pos, feat = tokens[id_]
if prev_form is not None:
doctext = doctext + ' '
offset += 1
# output a token annotation
print >> soout, tokstr(offset, offset+len(form), pos, idnum, form)
print >> soout, featstr(lemma, feat, idnum)
assert id_ not in idmap, "Error in data: dup ID"
idmap[id_] = idnum
idnum += 1
doctext = doctext + form
offset += len(form)
prev_form = form
# output dependencies
for head in deps:
for dep in deps[head]:
for rel in deps[head][dep]:
# if root is not added, skip deps to the root (idx 0)
if not OUTPUT_ROOT and head == 0:
continue
print >> soout, depstr(idmap[dep], idmap[head], rel, ridnum)
ridnum += 1
if si+1 != len(sentences):
doctext = doctext + '\n'
offset += 1
print >> txtout, doctext
def read_sentences(fn):
"""Read sentences in CoNLL format.
Return list of sentences, each represented as list of fields.
"""
# original author: @fginter
sentences=[[]]
with codecs.open(fn, 'rU', INPUT_ENCODING) as f:
for line in f:
line=line.rstrip()
if not line:
continue
# igore lines starting with "#" as comments
if line and line[0] == "#":
continue
cols=line.split(u'\t')
# break sentences on token index instead of blank line;
# the latter isn't reliably written by all generators
if cols[0] == u'1' and sentences[-1]:
sentences.append([])
sentences[-1].append(cols)
return sentences
def resolve_format(sentences, options):
fields = {}
# TODO: identify CoNLL format variant by heuristics on the sentences
# CoNLL'09 field structure, using gold instead of predicted (e.g.
# POS instead of PPOS).
fields[F_ID] = 0
fields[F_FORM] = 1
fields[F_LEMMA] = 2
# PLEMMA = 3
fields[F_POS] = 4
# PPOS = 5
fields[F_FEAT] = 6
# PFEAT = 7
fields[F_HEAD] = 8
# PHEAD = 9
fields[F_DEPREL] = 10
# PDEPREL = 11
fields[F_FILLPRED] = 12
fields[F_PRED] = 13
fields[F_APRED1] = 14
return fields
def mark_dependencies(dependency, head, dependent, deprel):
if head not in dependency:
dependency[head] = {}
if dependent not in dependency[head]:
dependency[head][dependent] = []
dependency[head][dependent].append(deprel)
return dependency
def process_sentence(sentence, fieldmap):
# dependencies represented as dict of dicts of lists of dep types
# dependency[head][dependent] = [type1, type2, ...]
dependency = {}
# tokens represented as dict indexed by ID, values (form, lemma,
# POS, feat)
token = {}
for fields in sentence:
id_ = int(fields[fieldmap[F_ID]])
form = fields[fieldmap[F_FORM]]
lemma = fields[fieldmap[F_LEMMA]]
pos = fields[fieldmap[F_POS]]
feat = fields[fieldmap[F_FEAT]]
try:
head = int(fields[fieldmap[F_HEAD]])
except ValueError:
assert fields[fieldmap[F_HEAD]] == 'ROOT', \
'error: unexpected head: %s' % fields[fieldmap[F_HEAD]]
head = 0
deprel = fields[fieldmap[F_DEPREL]]
#fillpred = fields[fieldmap[F_FILLPRED]]
#pred = fields[fieldmap[F_PRED]]
#apreds = fields[fieldmap[F_APRED1]:]
mark_dependencies(dependency, head, id_, deprel)
assert id_ not in token
token[id_] = (form, lemma, pos, feat)
return token, dependency
def process(fn, options=None):
docnum = 1
sentences = read_sentences(fn)
fieldmap = resolve_format(sentences, options)
processed = []
for i, sentence in enumerate(sentences):
token, dependency = process_sentence(sentence, fieldmap)
processed.append((token, dependency))
# limit sentences per output "document"
if MAX_DOC_SENTENCES and len(processed) >= MAX_DOC_SENTENCES:
output(fn, docnum, processed)
processed = []
docnum += 1
def main(argv):
global output_directory
# Take an optional "-o" arg specifying an output directory for the results
output_directory = None
filenames = argv[1:]
if len(argv) > 2 and argv[1] == "-o":
output_directory = argv[2]
print >> sys.stderr, "Writing output to %s" % output_directory
filenames = argv[3:]
fail_count = 0
for fn in filenames:
try:
process(fn)
except Exception, e:
m = unicode(e).encode(OUTPUT_ENCODING)
raise
#print >> sys.stderr, "Error processing %s: %s" % (fn, m)
#fail_count += 1
if fail_count > 0:
print >> sys.stderr, """
##############################################################################
#
# WARNING: error in processing %d/%d files, output is incomplete!
#
##############################################################################
""" % (fail_count, len(filenames))
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| brat-master | tools/conll09tostandoff.py |
#!/usr/bin/env python
'''
Dictionary-based NER tagging server using PubDictionaries.
This code is based on that of randomtagger.py
Author: Han-Cheol Cho
(Author of the original script: Pontus Stenetorp)
Version: 2014-04-05
'''
from argparse import ArgumentParser
from cgi import FieldStorage
try:
from json import dumps
except ImportError:
# likely old Python; try to fall back on ujson in brat distrib
from sys import path as sys_path
from os.path import join as path_join
from os.path import dirname
sys_path.append(path_join(dirname(__file__), '../server/lib/ujson'))
from ujson import dumps
from random import choice, randint
from sys import stderr
from urlparse import urlparse
try:
from urlparse import parse_qs
except ImportError:
# old Python again?
from cgi import parse_qs
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
import json
import urllib
import urllib2
import base64
### Constants
ARGPARSER = ArgumentParser(description='An example HTTP tagging service, '
'tagging Confuse-a-Cat **AND** Dead-parrot mentions!')
ARGPARSER.add_argument('-p', '--port', type=int, default=56789,
help='port to run the HTTP service on (default: 56789)')
###
#
# 1. Use PubDictionaries's ID (email) and password to use both uploaded dictionary and
# modified information (disabled and added entries).
# 2. Use "" for both variables to use only originally uploaded dictionary.
# 3. PubDictionaries does not provide any encryption yet!!
#
def build_headers(email="", password=""):
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': b'Basic ' + base64.b64encode(email + b':' + password),
}
return headers
def build_data(text):
return json.dumps({'text': text}).encode('utf-8')
def convert_for_brat(pubdic_result, text):
anns = {}
for idx, entity in enumerate(pubdic_result):
ann_id = 'T%d' % idx
anns[ann_id] = {
'type': entity['obj'], # ID of an entry
'offsets': ((entity['begin'], entity['end']), ),
'texts': (text[entity['begin']:entity['end']], ),
# Use entity['dictionary_name'] to distinguish the dictionary of this entry
# when you use an annotator url of multiple dictionaries.
}
return anns
class RandomTaggerHandler(BaseHTTPRequestHandler):
def do_POST(self):
field_storage = FieldStorage(
headers=self.headers,
environ={
'REQUEST_METHOD':'POST',
'CONTENT_TYPE':self.headers['Content-type'],
},
fp=self.rfile)
# Do your random tagging magic
try:
# Prepare the request header and data
headers = build_headers("", "") # email and password of PubDictionaries
text = field_storage.value.decode('utf-8') # For "ann['texts']" in format conversion
data = build_data(text)
# Make a request and retrieve the result
annotator_url = "http://pubdictionaries.dbcls.jp:80/dictionaries/EntrezGene%20-%20Homo%20Sapiens/text_annotation?matching_method=approximate&max_tokens=6&min_tokens=1&threshold=0.8&top_n=0"
request = urllib2.Request(annotator_url, data=data, headers=headers)
f = urllib2.urlopen(request)
res = f.read()
f.close()
# Format the result for BRAT
json_dic = convert_for_brat(json.loads(res), text)
except KeyError:
# We weren't given any text to tag, such is life, return nothing
json_dic = {}
# Write the response
self.send_response(200)
self.send_header('Content-type', 'application/json; charset=utf-8')
self.end_headers()
self.wfile.write(dumps(json_dic))
print >> stderr, ('Generated %d annotations' % len(json_dic))
def log_message(self, format, *args):
return # Too much noise from the default implementation
def main(args):
argp = ARGPARSER.parse_args(args[1:])
server_class = HTTPServer
httpd = server_class(('localhost', argp.port), RandomTaggerHandler)
print >> stderr, 'PubDictionary NER tagger service started on port %s' % (argp.port)
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
print >> stderr, 'PubDictionary tagger service stopped'
if __name__ == '__main__':
from sys import argv
exit(main(argv))
| brat-master | tools/pubdic_tagger.py |
#!/usr/bin/env python
'''
Parse an annotation log and extract annotation statistics.
Author: Pontus Stenetorp <pontus stenetorp se>
Version: 2011-11-25
'''
from argparse import ArgumentParser
### Constants
ARGPARSER = ArgumentParser()#XXX:
ARGPARSER.add_argument('ann_log', nargs='+')
###
from collections import namedtuple
from datetime import datetime
from sys import stderr
# TODO: Some arguments left out
LogLine = namedtuple('LogLine', ('time', 'user', 'collection', 'document',
'state', 'action', 'line_no'))
def _parse_log_iter(log):
for line_no, line in enumerate((l.rstrip('\n') for l in log)):
date_stamp, time_stamp, user, collection, document, state, action = line.split()[:7]
dtime = datetime.strptime('%s %s' % (date_stamp, time_stamp, ),
'%Y-%m-%d %H:%M:%S,%f')
yield LogLine(
time=dtime,
user=user,
collection=collection,
document=document,
state=state,
action=action,
line_no=line_no,
)
Action = namedtuple('Action', ('start', 'end', 'action'))
# TODO: Give actions and sub actions
def _action_iter(log_lines):
start_by_action = {}
for log_line in log_lines:
#print >> stderr, log_line
if log_line.state == 'START':
start_by_action[log_line.action] = log_line
elif log_line.state == 'FINISH':
start_line = start_by_action[log_line.action]
del start_by_action[log_line.action]
yield Action(start=start_line, end=log_line,
action=log_line.action)
# TODO: Log summary object
def main(args):
argp = ARGPARSER.parse_args(args[1:])
for ann_log_path in argp.ann_log:
with open(ann_log_path, 'r') as ann_log:
log_lines = []
for log_line in _parse_log_iter(ann_log):
assert log_line.state in set(('START', 'FINISH',) ), 'unknown logged state'
log_lines.append(log_line)
clock_time = log_lines[-1].time - log_lines[0].time
print >> stderr, 'Clock time:', clock_time
from datetime import timedelta
ann_time = timedelta()
last_span_selected = None
for action in _action_iter(log_lines):
if (action.action == 'spanSelected'
or action.action == 'spanEditSelected'
or action.action == 'suggestSpanTypes'):
last_span_selected = action
if action.action == 'createSpan':
ann_time = ann_time + (action.end.time - last_span_selected.start.time)
last_span_selected = None
#print action
ann_port_of_clock = float(ann_time.seconds) / clock_time.seconds
print >> stderr, 'Annotation time: %s (portion of clock time: %.1f%%)' % (
ann_time, ann_port_of_clock * 100, )
'''
Ordinary sequence:
* spanSelected
* createSpan
'''
if __name__ == '__main__':
from sys import argv
exit(main(argv))
| brat-master | tools/anneval.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.