python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import tempfile
import crypten
import crypten.communicator as comm
import torch
import torch.nn as nn
import torch.nn.functional as F
from examples.util import NoopContextManager
from torchvision import datasets, transforms
def run_mpc_autograd_cnn(
context_manager=None,
num_epochs=3,
learning_rate=0.001,
batch_size=5,
print_freq=5,
num_samples=100,
):
"""
Args:
context_manager: used for setting proxy settings during download.
"""
crypten.init()
data_alice, data_bob, train_labels = preprocess_mnist(context_manager)
rank = comm.get().get_rank()
# assumes at least two parties exist
# broadcast dummy data with same shape to remaining parties
if rank == 0:
x_alice = data_alice
else:
x_alice = torch.empty(data_alice.size())
if rank == 1:
x_bob = data_bob
else:
x_bob = torch.empty(data_bob.size())
# encrypt
x_alice_enc = crypten.cryptensor(x_alice, src=0)
x_bob_enc = crypten.cryptensor(x_bob, src=1)
# combine feature sets
x_combined_enc = crypten.cat([x_alice_enc, x_bob_enc], dim=2)
x_combined_enc = x_combined_enc.unsqueeze(1)
# reduce training set to num_samples
x_reduced = x_combined_enc[:num_samples]
y_reduced = train_labels[:num_samples]
# encrypt plaintext model
model_plaintext = CNN()
dummy_input = torch.empty((1, 1, 28, 28))
model = crypten.nn.from_pytorch(model_plaintext, dummy_input)
model.train()
model.encrypt()
# encrypted training
train_encrypted(
x_reduced, y_reduced, model, num_epochs, learning_rate, batch_size, print_freq
)
def train_encrypted(
x_encrypted,
y_encrypted,
encrypted_model,
num_epochs,
learning_rate,
batch_size,
print_freq,
):
rank = comm.get().get_rank()
loss = crypten.nn.MSELoss()
num_samples = x_encrypted.size(0)
label_eye = torch.eye(2)
for epoch in range(num_epochs):
last_progress_logged = 0
# only print from rank 0 to avoid duplicates for readability
if rank == 0:
print(f"Epoch {epoch} in progress:")
for j in range(0, num_samples, batch_size):
# define the start and end of the training mini-batch
start, end = j, min(j + batch_size, num_samples)
# switch on autograd for training examples
x_train = x_encrypted[start:end]
x_train.requires_grad = True
y_one_hot = label_eye[y_encrypted[start:end]]
y_train = crypten.cryptensor(y_one_hot, requires_grad=True)
# perform forward pass:
output = encrypted_model(x_train)
loss_value = loss(output, y_train)
# backprop
encrypted_model.zero_grad()
loss_value.backward()
encrypted_model.update_parameters(learning_rate)
# log progress
if j + batch_size - last_progress_logged >= print_freq:
last_progress_logged += print_freq
print(f"Loss {loss_value.get_plain_text().item():.4f}")
# compute accuracy every epoch
pred = output.get_plain_text().argmax(1)
correct = pred.eq(y_encrypted[start:end])
correct_count = correct.sum(0, keepdim=True).float()
accuracy = correct_count.mul_(100.0 / output.size(0))
loss_plaintext = loss_value.get_plain_text().item()
print(
f"Epoch {epoch} completed: "
f"Loss {loss_plaintext:.4f} Accuracy {accuracy.item():.2f}"
)
def preprocess_mnist(context_manager):
if context_manager is None:
context_manager = NoopContextManager()
with context_manager:
# each party gets a unique temp directory
with tempfile.TemporaryDirectory() as data_dir:
mnist_train = datasets.MNIST(data_dir, download=True, train=True)
mnist_test = datasets.MNIST(data_dir, download=True, train=False)
# modify labels so all non-zero digits have class label 1
mnist_train.targets[mnist_train.targets != 0] = 1
mnist_test.targets[mnist_test.targets != 0] = 1
mnist_train.targets[mnist_train.targets == 0] = 0
mnist_test.targets[mnist_test.targets == 0] = 0
# compute normalization factors
data_all = torch.cat([mnist_train.data, mnist_test.data]).float()
data_mean, data_std = data_all.mean(), data_all.std()
tensor_mean, tensor_std = data_mean.unsqueeze(0), data_std.unsqueeze(0)
# normalize data
data_train_norm = transforms.functional.normalize(
mnist_train.data.float(), tensor_mean, tensor_std
)
# partition features between Alice and Bob
data_alice = data_train_norm[:, :, :20]
data_bob = data_train_norm[:, :, 20:]
train_labels = mnist_train.targets
return data_alice, data_bob, train_labels
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size=5, padding=0)
self.fc1 = nn.Linear(16 * 12 * 12, 100)
self.fc2 = nn.Linear(100, 2)
def forward(self, x):
out = self.conv1(x)
out = F.relu(out)
out = F.max_pool2d(out, 2)
out = out.view(-1, 16 * 12 * 12)
out = self.fc1(out)
out = F.relu(out)
out = self.fc2(out)
return out
| CrypTen-main | examples/mpc_autograd_cnn/mpc_autograd_cnn.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
To run mpc_autograd_cnn example:
$ python examples/mpc_autograd_cnn/launcher.py
To run mpc_linear_svm example on AWS EC2 instances:
$ python3 scripts/aws_launcher.py \
--ssh_key_file=$HOME/.aws/fair-$USER.pem \
--instances=i-038dd14b9383b9d79,i-08f057b9c03d4a916 \
--aux_files=examples/mpc_autograd_cnn/mpc_autograd_cnn.py \
examples/mpc_autograd_cnn/launcher.py
"""
import argparse
import logging
import os
from examples.multiprocess_launcher import MultiProcessLauncher
parser = argparse.ArgumentParser(description="CrypTen Autograd CNN Training")
def validate_world_size(world_size):
world_size = int(world_size)
if world_size < 2:
raise argparse.ArgumentTypeError(f"world_size {world_size} must be > 1")
return world_size
parser.add_argument(
"--world_size",
type=validate_world_size,
default=2,
help="The number of parties to launch. Each party acts as its own process",
)
parser.add_argument(
"--epochs", default=3, type=int, metavar="N", help="number of total epochs to run"
)
parser.add_argument(
"--lr",
"--learning-rate",
default=0.01,
type=float,
metavar="LR",
help="initial learning rate",
)
parser.add_argument(
"-b",
"--batch-size",
default=5,
type=int,
metavar="N",
help="mini-batch size (default: 5)",
)
parser.add_argument(
"--print-freq",
"-p",
default=5,
type=int,
metavar="PF",
help="print frequency (default: 5)",
)
parser.add_argument(
"--num-samples",
"-n",
default=100,
type=int,
metavar="N",
help="num of samples used for training (default: 100)",
)
def _run_experiment(args):
level = logging.INFO
if "RANK" in os.environ and os.environ["RANK"] != "0":
level = logging.CRITICAL
logging.getLogger().setLevel(level)
logging.basicConfig(
level=level,
format="%(asctime)s - %(process)d - %(name)s - %(levelname)s - %(message)s",
)
from mpc_autograd_cnn import run_mpc_autograd_cnn
run_mpc_autograd_cnn(
num_epochs=args.epochs,
learning_rate=args.lr,
batch_size=args.batch_size,
print_freq=args.print_freq,
num_samples=args.num_samples,
)
def main(run_experiment):
args = parser.parse_args()
# run multiprocess by default
launcher = MultiProcessLauncher(args.world_size, run_experiment, args)
launcher.start()
launcher.join()
launcher.terminate()
if __name__ == "__main__":
main(_run_experiment)
| CrypTen-main | examples/mpc_autograd_cnn/launcher.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Generate function and model benchmarks
To Run:
$ python benchmark.py
# Only function benchmarks
$ python benchmark.py --only-functions
$ python benchmark.py --only-functions --world-size 2
# Benchmark functions and all models
$ python benchmark.py --advanced-models
# Run benchmarks on GPU
$ python benchmark.py --device cuda
$ python benchmark.py --device cuda --world-size 2
# Run benchmarks on different GPUs for each party
$ python benchmark.py --world-size=2 --multi-gpu
# Save benchmarks to csv
$ python benchmark.py -p ~/Downloads/
"""
import argparse
import functools
import os
import timeit
from collections import namedtuple
import crypten
import crypten.communicator as comm
import numpy as np
import pandas as pd
import torch
from examples import multiprocess_launcher
try:
from . import data, models
except ImportError:
# direct import if relative fails
import data
import models
Runtime = namedtuple("Runtime", "mid q1 q3")
def time_me(func=None, n_loops=10):
"""Decorator returning average runtime in seconds over n_loops
Args:
func (function): invoked with given args / kwargs
n_loops (int): number of times to invoke function for timing
Returns: tuple of (time in seconds, inner quartile range, function return value).
"""
if func is None:
return functools.partial(time_me, n_loops=n_loops)
@functools.wraps(func)
def timing_wrapper(*args, **kwargs):
return_val = func(*args, **kwargs)
times = []
for _ in range(n_loops):
start = timeit.default_timer()
func(*args, **kwargs)
times.append(timeit.default_timer() - start)
mid_runtime = np.quantile(times, 0.5)
q1_runtime = np.quantile(times, 0.25)
q3_runtime = np.quantile(times, 0.75)
runtime = Runtime(mid_runtime, q1_runtime, q3_runtime)
return runtime, return_val
return timing_wrapper
class FuncBenchmarks:
"""Benchmarks runtime and error of crypten functions against PyTorch
Args:
tensor_size (int or tuple): size of tensor for benchmarking runtimes
"""
BINARY = ["add", "sub", "mul", "matmul", "gt", "lt", "eq"]
UNARY = [
"sigmoid",
"relu",
"tanh",
"exp",
"log",
"reciprocal",
"cos",
"sin",
"sum",
"mean",
"neg",
]
LAYERS = ["conv2d"]
DOMAIN = torch.arange(start=0.01, end=100, step=0.01)
# for exponential, sin, and cos
TRUNCATED_DOMAIN = torch.arange(start=0.001, end=10, step=0.001)
def __init__(self, tensor_size=(100, 100), device="cpu"):
self.device = torch.device(device)
self.tensor_size = tensor_size
# dataframe for benchmarks
self.df = None
def __repr__(self):
if self.df is not None:
return self.df.to_string(index=False, justify="left")
return "No Function Benchmarks"
@staticmethod
@time_me
def time_func(x, func, y=None):
"""Invokes func as a method of x"""
if y is None:
return getattr(x, func)()
if func in {"conv1d", "conv2d"}:
if torch.is_tensor(x):
return getattr(torch.nn.functional, func)(x, y)
return getattr(x, func)(y)
return getattr(x, func)(y)
def get_runtimes(self):
"""Returns plain text and crypten runtimes"""
x, y = (
torch.rand(self.tensor_size, device=self.device),
torch.rand(self.tensor_size, device=self.device),
)
x_enc, y_enc = crypten.cryptensor(x), crypten.cryptensor(y)
runtimes, runtimes_enc = [], []
for func in FuncBenchmarks.UNARY + FuncBenchmarks.BINARY:
second_operand, second_operand_enc = None, None
if func in FuncBenchmarks.BINARY:
second_operand, second_operand_enc = y, y_enc
runtime, _ = FuncBenchmarks.time_func(x, func, y=second_operand)
runtimes.append(runtime)
runtime_enc, _ = FuncBenchmarks.time_func(x_enc, func, y=second_operand_enc)
runtimes_enc.append(runtime_enc)
# add layer runtimes
runtime_layers, runtime_layers_enc = self.get_layer_runtimes()
runtimes.extend(runtime_layers)
runtimes_enc.extend(runtime_layers_enc)
return runtimes, runtimes_enc
def get_layer_runtimes(self):
"""Returns runtimes for layers"""
runtime_layers, runtime_layers_enc = [], []
for layer in FuncBenchmarks.LAYERS:
if layer == "conv1d":
x, x_enc, y, y_enc = self.random_conv1d_inputs()
elif layer == "conv2d":
x, x_enc, y, y_enc = self.random_conv2d_inputs()
else:
raise ValueError(f"{layer} not supported")
runtime, _ = FuncBenchmarks.time_func(x, layer, y=y)
runtime_enc, _ = FuncBenchmarks.time_func(x_enc, layer, y=y_enc)
runtime_layers.append(runtime)
runtime_layers_enc.append(runtime_enc)
return runtime_layers, runtime_layers_enc
def random_conv2d_inputs(self):
"""Returns random input and weight tensors for 2d convolutions"""
filter_size = [size // 10 for size in self.tensor_size]
x_conv2d = torch.rand(1, 1, *self.tensor_size, device=self.device)
weight2d = torch.rand(1, 1, *filter_size, device=self.device)
x_conv2d_enc = crypten.cryptensor(x_conv2d)
weight2d_enc = crypten.cryptensor(weight2d)
return x_conv2d, x_conv2d_enc, weight2d, weight2d_enc
def random_conv1d_inputs(self):
"""Returns random input and weight tensors for 1d convolutions"""
size = self.tensor_size[0]
filter_size = size // 10
(x_conv1d,) = torch.rand(1, 1, size, device=self.device)
weight1d = torch.rand(1, 1, filter_size, device=self.device)
x_conv1d_enc = crypten.cryptensor(x_conv1d)
weight1d_enc = crypten.cryptensor(weight1d)
return x_conv1d, x_conv1d_enc, weight1d, weight1d_enc
@staticmethod
def calc_abs_error(ref, out):
"""Computes total absolute error"""
ref, out = ref.cpu(), out.cpu()
if ref.dtype == torch.bool:
errors = (out != ref).numpy().sum()
return errors
errors = torch.abs(out - ref).numpy()
return errors.sum()
@staticmethod
def calc_relative_error(ref, out):
"""Computes average relative error"""
ref, out = ref.cpu(), out.cpu()
if ref.dtype == torch.bool:
errors = (out != ref).numpy().sum() // ref.nelement()
return errors
errors = torch.abs((out - ref) / ref)
# remove inf due to division by tiny numbers
errors = errors[errors != float("inf")].numpy()
return errors.mean()
def call_function_on_domain(self, func):
"""Call plain text and CrypTen function on given function
Uses DOMAIN, TRUNCATED_DOMAIN, or appropriate layer inputs
Returns: tuple of (plain text result, encrypted result)
"""
DOMAIN, TRUNCATED_DOMAIN = (
FuncBenchmarks.DOMAIN,
FuncBenchmarks.TRUNCATED_DOMAIN,
)
if hasattr(DOMAIN, "to") and hasattr(TRUNCATED_DOMAIN, "to"):
DOMAIN, TRUNCATED_DOMAIN = (
DOMAIN.to(device=self.device),
TRUNCATED_DOMAIN.to(device=self.device),
)
y = torch.rand(DOMAIN.shape, device=self.device)
DOMAIN_enc, y_enc = crypten.cryptensor(DOMAIN), crypten.cryptensor(y)
TRUNCATED_DOMAIN_enc = crypten.cryptensor(TRUNCATED_DOMAIN)
if func in ["exp", "cos", "sin"]:
ref, out_enc = (
getattr(TRUNCATED_DOMAIN, func)(),
getattr(TRUNCATED_DOMAIN_enc, func)(),
)
elif func in FuncBenchmarks.UNARY:
ref, out_enc = getattr(DOMAIN, func)(), getattr(DOMAIN_enc, func)()
elif func in FuncBenchmarks.LAYERS:
ref, out_enc = self._call_layer(func)
elif func in FuncBenchmarks.BINARY:
ref, out_enc = (getattr(DOMAIN, func)(y), getattr(DOMAIN_enc, func)(y_enc))
else:
raise ValueError(f"{func} not supported")
return ref, out_enc
def get_errors(self):
"""Computes the total error of approximations"""
abs_errors, relative_errors = [], []
functions = FuncBenchmarks.UNARY + FuncBenchmarks.BINARY
functions += FuncBenchmarks.LAYERS
for func in functions:
ref, out_enc = self.call_function_on_domain(func)
out = out_enc.get_plain_text()
abs_error = FuncBenchmarks.calc_abs_error(ref, out)
abs_errors.append(abs_error)
relative_error = FuncBenchmarks.calc_relative_error(ref, out)
relative_errors.append(relative_error)
return abs_errors, relative_errors
def _call_layer(self, layer):
"""Call supported layers"""
if layer == "conv1d":
x, x_enc, y, y_enc = self.random_conv1d_inputs()
elif layer == "conv2d":
x, x_enc, y, y_enc = self.random_conv2d_inputs()
else:
raise ValueError(f"{layer} not supported")
ref = getattr(torch.nn.functional, layer)(x, y)
out_enc = getattr(x_enc, layer)(y_enc)
return ref, out_enc
def save(self, path):
if self.device.type == "cuda":
csv_path = os.path.join(path, "func_benchmarks_cuda.csv")
else:
csv_path = os.path.join(path, "func_benchmarks.csv")
self.df.to_csv(csv_path, index=False)
def get_results(self):
return self.df
def run(self):
"""Runs and stores benchmarks in self.df"""
runtimes, runtimes_enc = self.get_runtimes()
abs_errors, relative_errors = self.get_errors()
self.df = pd.DataFrame.from_dict(
{
"function": FuncBenchmarks.UNARY
+ FuncBenchmarks.BINARY
+ FuncBenchmarks.LAYERS,
"runtime": [r.mid for r in runtimes],
"runtime Q1": [r.q1 for r in runtimes],
"runtime Q3": [r.q3 for r in runtimes],
"runtime crypten": [r.mid for r in runtimes_enc],
"runtime crypten Q1": [r.q1 for r in runtimes_enc],
"runtime crypten Q3": [r.q3 for r in runtimes_enc],
"total abs error": abs_errors,
"average relative error": relative_errors,
}
)
class ModelBenchmarks:
"""Benchmarks runtime and accuracy of crypten models
Models are benchmarked on synthetically generated
Gaussian clusters for binary classification. Resnet18 is
benchmarks use image data.
Args:
n_samples (int): number of samples for Gaussian cluster model training
n_features (int): number of features for the Gaussian clusters.
epochs (int): number of training epochs
lr_rate (float): learning rate.
"""
def __init__(self, device="cpu", advanced_models=False):
self.device = torch.device(device)
self.df = None
self.models = models.MODELS
if not advanced_models:
self.remove_advanced_models()
def __repr__(self):
if self.df is not None:
return self.df.to_string(index=False, justify="left")
return "No Model Benchmarks"
def remove_advanced_models(self):
"""Removes advanced models from instance"""
self.models = list(filter(lambda x: not x.advanced, self.models))
@time_me(n_loops=3)
def train(self, model, x, y, epochs, lr, loss):
"""Trains PyTorch model
Args:
model (PyTorch model): model to be trained
x (torch.tensor): inputs
y (torch.tensor): targets
epochs (int): number of training epochs
lr (float): learning rate
loss (str): type of loss to use for training
Returns:
model with update weights
"""
assert isinstance(model, torch.nn.Module), "must be a PyTorch model"
criterion = getattr(torch.nn, loss)()
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
for _ in range(epochs):
model.zero_grad()
output = model(x)
loss = criterion(output, y)
loss.backward()
optimizer.step()
return model
@time_me(n_loops=3)
def train_crypten(self, model, x, y, epochs, lr, loss):
"""Trains crypten encrypted model
Args:
model (CrypTen model): model to be trained
x (crypten.tensor): inputs
y (crypten.tensor): targets
epochs (int): number of training epochs
lr (float): learning rate
loss (str): type of loss to use for training
Returns:
model with update weights
"""
assert isinstance(model, crypten.nn.Module), "must be a CrypTen model"
criterion = getattr(crypten.nn, loss)()
for _ in range(epochs):
model.zero_grad()
output = model(x)
loss = criterion(output, y)
loss.backward()
model.update_parameters(lr)
return model
def time_training(self):
"""Returns training time per epoch for plain text and CrypTen"""
runtimes = []
runtimes_enc = []
for model in self.models:
x, y = model.data.x, model.data.y
x, y = x.to(device=self.device), y.to(device=self.device)
model_plain = model.plain
if hasattr(model_plain, "to"):
model_plain = model_plain.to(self.device)
runtime, _ = self.train(model_plain, x, y, 1, model.lr, model.loss)
runtimes.append(runtime)
if model.advanced:
y = model.data.y_onehot.to(self.device)
x_enc = crypten.cryptensor(x)
y_enc = crypten.cryptensor(y)
model_crypten = model.crypten
if hasattr(model_crypten, "to"):
model_crypten = model_crypten.to(self.device)
model_enc = model_crypten.encrypt()
runtime_enc, _ = self.train_crypten(
model_enc, x_enc, y_enc, 1, model.lr, model.loss
)
runtimes_enc.append(runtime_enc)
return runtimes, runtimes_enc
@time_me(n_loops=3)
def predict(self, model, x):
y = model(x)
return y
def time_inference(self):
"""Returns inference time for plain text and CrypTen"""
runtimes = []
runtimes_enc = []
for model in self.models:
x = model.data.x.to(self.device)
model_plain = model.plain
if hasattr(model_plain, "to"):
model_plain = model_plain.to(self.device)
runtime, _ = self.predict(model_plain, x)
runtimes.append(runtime)
model_crypten = model.crypten
if hasattr(model_crypten, "to"):
model_crypten = model_crypten.to(self.device)
model_enc = model_crypten.encrypt()
x_enc = crypten.cryptensor(x)
runtime_enc, _ = self.predict(model_enc, x_enc)
runtimes_enc.append(runtime_enc)
return runtimes, runtimes_enc
@staticmethod
def calc_accuracy(output, y, threshold=0.5):
"""Computes percent accuracy
Args:
output (torch.tensor): model output
y (torch.tensor): true label
threshold (float): classification threshold
Returns (float): percent accuracy
"""
output, y = output.cpu(), y.cpu()
predicted = (output > threshold).float()
correct = (predicted == y).sum().float()
accuracy = float((correct / y.shape[0]).cpu().numpy())
return accuracy
def evaluate(self):
"""Evaluates accuracy of crypten versus plain text models"""
accuracies, accuracies_crypten = [], []
for model in self.models:
model_plain = model.plain
if hasattr(model_plain, "to"):
model_plain = model_plain.to(self.device)
x, y = model.data.x, model.data.y
x, y = x.to(device=self.device), y.to(device=self.device)
_, model_plain = self.train(
model_plain, x, y, model.epochs, model.lr, model.loss
)
x_test = model.data.x_test.to(device=self.device)
y_test = model.data.y_test.to(device=self.device)
accuracy = ModelBenchmarks.calc_accuracy(model_plain(x_test), y_test)
accuracies.append(accuracy)
model_crypten = model.crypten
if hasattr(model_crypten, "to"):
model_crypten = model_crypten.to(self.device)
model_crypten = model_crypten.encrypt()
if model.advanced:
y = model.data.y_onehot.to(self.device)
x_enc = crypten.cryptensor(x)
y_enc = crypten.cryptensor(y)
_, model_crypten = self.train_crypten(
model_crypten, x_enc, y_enc, model.epochs, model.lr, model.loss
)
x_test_enc = crypten.cryptensor(x_test)
output = model_crypten(x_test_enc).get_plain_text()
accuracy = ModelBenchmarks.calc_accuracy(output, y_test)
accuracies_crypten.append(accuracy)
return accuracies, accuracies_crypten
def save(self, path):
if self.device.type == "cuda":
csv_path = os.path.join(path, "model_benchmarks_cuda.csv")
else:
csv_path = os.path.join(path, "model_benchmarks.csv")
self.df.to_csv(csv_path, index=False)
def get_results(self):
return self.df
def run(self):
"""Runs and stores benchmarks in self.df"""
training_runtimes, training_runtimes_enc = self.time_training()
inference_runtimes, inference_runtimes_enc = self.time_inference()
accuracies, accuracies_crypten = self.evaluate()
model_names = [model.name for model in self.models]
training_times_both = training_runtimes + training_runtimes_enc
inference_times_both = inference_runtimes + inference_runtimes_enc
half_n_rows = len(training_runtimes)
self.df = pd.DataFrame.from_dict(
{
"model": model_names + model_names,
"seconds per epoch": [t.mid for t in training_times_both],
"seconds per epoch q1": [t.q1 for t in training_times_both],
"seconds per epoch q3": [t.q3 for t in training_times_both],
"inference time": [t.mid for t in inference_times_both],
"inference time q1": [t.q1 for t in inference_times_both],
"inference time q3": [t.q3 for t in inference_times_both],
"is plain text": [True] * half_n_rows + [False] * half_n_rows,
"accuracy": accuracies + accuracies_crypten,
}
)
self.df = self.df.sort_values(by="model")
def get_args():
"""Parses command line arguments"""
parser = argparse.ArgumentParser(description="Benchmark Functions")
parser.add_argument(
"--path",
"-p",
type=str,
required=False,
default=None,
help="path to save function benchmarks",
)
parser.add_argument(
"--only-functions",
"-f",
required=False,
default=False,
action="store_true",
help="run only function benchmarks",
)
parser.add_argument(
"--world-size",
"-w",
type=int,
required=False,
default=1,
help="world size for number of parties",
)
parser.add_argument(
"--device",
"-d",
required=False,
default="cpu",
help="the device to run the benchmarks",
)
parser.add_argument(
"--multi-gpu",
"-mg",
required=False,
default=False,
action="store_true",
help="use different gpu for each party. Will override --device if selected",
)
parser.add_argument(
"--ttp",
"-ttp",
required=False,
default=False,
action="store_true",
help="initialize a trusted third party (TTP) as beaver triples' provider, world_size should be greater than 2",
)
parser.add_argument(
"--advanced-models",
required=False,
default=False,
action="store_true",
help="run advanced model (resnet, transformer, etc.) benchmarks",
)
args = parser.parse_args()
return args
def multiprocess_caller(args):
"""Runs multiparty benchmarks and prints/saves from source 0"""
rank = comm.get().get_rank()
if args.multi_gpu:
assert (
args.world_size >= torch.cuda.device_count()
), f"Got {args.world_size} parties, but only {torch.cuda.device_count()} GPUs found"
device = torch.device(f"cuda:{rank}")
else:
device = torch.device(args.device)
benchmarks = [
FuncBenchmarks(device=device),
ModelBenchmarks(device=device, advanced_models=args.advanced_models),
]
if args.only_functions:
benchmarks = [FuncBenchmarks(device=device)]
for benchmark in benchmarks:
benchmark.run()
rank = comm.get().get_rank()
if rank == 0:
pd.set_option("display.precision", 3)
print(benchmark)
if args.path:
benchmark.save(args.path)
def main():
"""Runs benchmarks and saves if path is provided"""
crypten.init()
args = get_args()
device = torch.device(args.device)
if not hasattr(crypten.nn.Module, "to") or not hasattr(crypten.mpc.MPCTensor, "to"):
if device.type == "cuda":
print(
"GPU computation is not supported for this version of CrypTen, benchmark will be skipped"
)
return
benchmarks = [
FuncBenchmarks(device=device),
ModelBenchmarks(device=device, advanced_models=args.advanced_models),
]
if args.only_functions:
benchmarks = [FuncBenchmarks(device=device)]
if args.world_size > 1:
if args.ttp:
crypten.mpc.set_default_provider(crypten.mpc.provider.TrustedThirdParty)
launcher = multiprocess_launcher.MultiProcessLauncher(
args.world_size, multiprocess_caller, fn_args=args
)
launcher.start()
launcher.join()
launcher.terminate()
else:
pd.set_option("display.precision", 3)
for benchmark in benchmarks:
benchmark.run()
print(benchmark)
if args.path:
benchmark.save(args.path)
if __name__ == "__main__":
main()
| CrypTen-main | benchmarks/benchmark.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Contains models used for benchmarking
"""
from dataclasses import dataclass
from typing import Any
import crypten
import torch
import torch.nn as nn
from torchvision import models
try:
from . import data
except ImportError:
# direct import if relative fails
import data
N_FEATURES = 20
@dataclass
class Model:
name: str
plain: torch.nn.Module
crypten: crypten.nn.Module
# must contains x, y, x_test, y_test attributes
data: Any
epochs: int
lr: float
loss: str
advanced: bool
class LogisticRegression(torch.nn.Module):
def __init__(self, n_features=N_FEATURES):
super().__init__()
self.linear = torch.nn.Linear(n_features, 1)
def forward(self, x):
return torch.sigmoid(self.linear(x))
class LogisticRegressionCrypTen(crypten.nn.Module):
def __init__(self, n_features=N_FEATURES):
super().__init__()
self.linear = crypten.nn.Linear(n_features, 1)
def forward(self, x):
return self.linear(x).sigmoid()
class FeedForward(torch.nn.Module):
def __init__(self, n_features=N_FEATURES):
super().__init__()
self.linear1 = torch.nn.Linear(n_features, n_features // 2)
self.linear2 = torch.nn.Linear(n_features // 2, n_features // 4)
self.linear3 = torch.nn.Linear(n_features // 4, 1)
def forward(self, x):
out = torch.relu(self.linear1(x))
out = torch.relu(self.linear2(out))
out = torch.sigmoid(self.linear3(out))
return out
class FeedForwardCrypTen(crypten.nn.Module):
def __init__(self, n_features=N_FEATURES):
super().__init__()
self.linear1 = crypten.nn.Linear(n_features, n_features // 2)
self.linear2 = crypten.nn.Linear(n_features // 2, n_features // 4)
self.linear3 = crypten.nn.Linear(n_features // 4, 1)
def forward(self, x):
out = (self.linear1(x)).relu()
out = (self.linear2(out)).relu()
out = (self.linear3(out)).sigmoid()
return out
class ResNet(nn.Module):
def __init__(self, n_layers=18):
super().__init__()
assert n_layers in [18, 34, 50]
self.model = getattr(models, "resnet{}".format(n_layers))(pretrained=True)
def forward(self, x):
return self.model(x)
class ResNetCrypTen(crypten.nn.Module):
def __init__(self, n_layers=18):
super().__init__()
assert n_layers in [18, 34, 50]
model = getattr(models, "resnet{}".format(n_layers))(pretrained=True)
dummy_input = torch.rand([1, 3, 224, 224])
self.model = crypten.nn.from_pytorch(model, dummy_input)
def forward(self, x):
return self.model(x)
MODELS = [
Model(
name="logistic regression",
plain=LogisticRegression(),
crypten=LogisticRegressionCrypTen(),
data=data.GaussianClusters(),
epochs=50,
lr=0.1,
loss="BCELoss",
advanced=False,
),
Model(
name="feedforward neural network",
plain=FeedForward(),
crypten=FeedForwardCrypTen(),
data=data.GaussianClusters(),
epochs=50,
lr=0.1,
loss="BCELoss",
advanced=False,
),
Model(
name="resnet18",
plain=ResNet(n_layers=18),
crypten=ResNetCrypTen(n_layers=18),
data=data.Images(),
epochs=2,
lr=0.1,
loss="CrossEntropyLoss",
advanced=True,
),
Model(
name="resnet34",
plain=ResNet(n_layers=34),
crypten=ResNetCrypTen(n_layers=34),
data=data.Images(),
epochs=2,
lr=0.1,
loss="CrossEntropyLoss",
advanced=True,
),
Model(
name="resnet50",
plain=ResNet(n_layers=50),
crypten=ResNetCrypTen(n_layers=50),
data=data.Images(),
epochs=2,
lr=0.1,
loss="CrossEntropyLoss",
advanced=True,
),
]
| CrypTen-main | benchmarks/models.py |
CrypTen-main | benchmarks/__init__.py |
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
A script to run historical benchmarks.
- writes monthly data to 'dash_app/data/`
- example: 'dash_app/data/2019-10-26/func_benchmarks.csv'
- example: 'dash_app/data/2019-10-26/model_benchmarks.csv'
- overwrite option
- script requires ability to 'git clone'
To run:
python run_historical_benchmarks.py
# overwrite existing data directories
python run_historical_benchmarks.py --overwrite True
"""
import argparse
import datetime
import os
import shutil
import subprocess
from dateutil.relativedelta import relativedelta
def parse_args():
"""Parses command line arguments"""
parser = argparse.ArgumentParser(description="Run Historical Benchmarks")
parser.add_argument(
"--overwrite",
required=False,
default=False,
action="store_true",
help="overwrite existing data directories",
)
parser.add_argument(
"--cuda-toolkit-version",
required=False,
default="10.1",
help="build pytorch with the corresponding version of cuda-toolkit",
)
args = parser.parse_args()
return args
def get_dates(day=26):
"""Generate dates to run benchmarks
Returns: list of strings in year-month-day format.
Example: ["2020-01-26", "2019-12-26"]
"""
dates = []
today = datetime.date.today()
end = datetime.date(2019, 10, day)
one_month = relativedelta(months=+1)
if today.day >= 26:
start = datetime.date(today.year, today.month, day)
else:
start = datetime.date(today.year, today.month, day) - one_month
while start >= end:
dates.append(start.strftime("%Y-%m-%d"))
start -= one_month
return dates
args = parse_args()
overwrite = args.overwrite
cuda_version = "".join(args.cuda_toolkit_version.split("."))
dates = get_dates()
PATH = os.getcwd()
# clone
subprocess.call(
"cd /tmp && git clone https://github.com/facebookresearch/CrypTen.git", shell=True
)
# create venv
subprocess.call("cd /tmp && python3 -m venv .venv", shell=True)
venv = "cd /tmp && . .venv/bin/activate && "
# install PyTorch
subprocess.call(
f"{venv} pip3 install onnx==1.6.0 tensorboard pandas sklearn", shell=True
)
stable_url = "https://download.pytorch.org/whl/torch_stable.html"
pip_torch = f"pip install torch==1.5.1+cu{cuda_version} torchvision==0.6.1+cu{cuda_version} -f https://download.pytorch.org/whl/torch_stable.html"
subprocess.call(f"{venv} {pip_torch} -f {stable_url}", shell=True)
modes = {"1pc": "", "2pc": "--world-size=2"}
for date in dates:
path_exists = os.path.exists(f"dash_app/data/{date}/func_benchmarks.csv")
if not overwrite and path_exists:
continue
# checkout closest version before date
subprocess.call(
f"cd /tmp/CrypTen && "
+ f"git checkout `git rev-list -n 1 --before='{date} 01:01' master`",
shell=True,
)
for mode, arg in modes.items():
subprocess.call(venv + "pip3 install CrypTen/.", shell=True)
subprocess.call(f"echo Generating {date} Benchmarks for {mode}", shell=True)
path = os.path.join(PATH, f"dash_app/data/{date}", mode)
subprocess.call(f"mkdir -p {path}", shell=True)
subprocess.call(
venv + f"cd {PATH} && python3 benchmark.py -p '{path}' {arg}", shell=True
)
subprocess.call(
venv + f"cd {PATH} && python3 benchmark.py -p '{path}' -d 'cuda' {arg}",
shell=True,
)
# clean up
shutil.rmtree("/tmp/.venv", ignore_errors=True)
shutil.rmtree("/tmp/CrypTen", ignore_errors=True)
| CrypTen-main | benchmarks/run_historical_benchmarks.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Profiler with snakeviz for probing inference / training call stack
Run via Jupyter
"""
from benchmark import ModelBenchmarks
# get_ipython().run_line_magic("load_ext", "snakeviz")
model_benchmarks = ModelBenchmarks()
# for logistic regression select 0
model = model_benchmarks.MODELS[1]
print(model.name)
model_crypten = model.crypten(model_benchmarks.n_features).encrypt()
# profile training
# get_ipython().run_cell_magic(
# "snakeviz", "", "\nmodel_benchmarks.train_crypten(model_crypten)"
# )
# profile inference
x_enc = model_benchmarks.x_enc
model_crypten.train = False
# get_ipython().run_cell_magic("snakeviz", "", "\n\nmodel_crypten(x_enc)")
| CrypTen-main | benchmarks/profiler.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Contains data used for training / testing model benchmarks
"""
import os
from pathlib import Path
import crypten
import PIL
import torch
import torch.nn.functional as F
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from torchvision import transforms
class GaussianClusters:
"""Generates Glussian clusters for binary classes"""
def __init__(self, n_samples=5000, n_features=20):
self.n_samples = n_samples
self.n_features = n_features
x, x_test, y, y_test = GaussianClusters.generate_data(n_samples, n_features)
self.x, self.y = x, y
self.x_test, self.y_test = x_test, y_test
@staticmethod
def generate_data(n_samples, n_features):
"""Generates Glussian clusters for binary classes
Args:
n_samples (int): number of samples
n_features (int): number of features
Returns: torch tensors with inputs and labels
"""
x, y = make_classification(
n_samples=n_samples,
n_features=n_features,
# by default, 2 features are redundant
n_informative=n_features - 2,
n_classes=2,
)
x = torch.tensor(x).float()
y = torch.tensor(y).float().unsqueeze(-1)
return train_test_split(x, y)
class Images:
def __init__(self):
self.x = self.preprocess_image()
# image net 1k classes
class_id = 463
self.y = torch.tensor([class_id]).long()
self.y_onehot = F.one_hot(self.y, 1000)
self.x_test, self.y_test = self.x, self.y
def preprocess_image(self):
"""Preprocesses sample image"""
path = os.path.dirname(os.path.realpath(__file__))
filename = "dog.jpg"
input_image = PIL.Image.open(Path(os.path.join(path, filename)))
preprocess = transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
]
)
input_tensor = preprocess(input_image)
input_batch = input_tensor.unsqueeze(0)
return input_batch
| CrypTen-main | benchmarks/data.py |
CrypTen-main | benchmarks/dash_app/__init__.py |
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pathlib
import dash
import dash_core_components as dcc
import dash_html_components as html
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from dash.dependencies import Input, Output
from load_data import get_aggregated_data, get_available_dates
from plotly.subplots import make_subplots
external_stylesheets = ["https://codepen.io/chriddyp/pen/bWLwgP.css"]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
# load data using relative data folder
PATH = pathlib.Path(__file__).parent
DATA_PATH = PATH.joinpath("data").resolve()
available_dates = get_available_dates(DATA_PATH)
subdirs = ["1pc", "2pc"]
func_df, model_df = get_aggregated_data(DATA_PATH, subdirs)
colors_discrete = px.colors.qualitative.Set2
template = "simple_white"
# Since we're adding callbacks to elements that don't exist in the app.layout,
# Dash will raise an exception to warn us that we might be
# doing something wrong.
# In this case, we're adding the elements through a callback, so we can ignore
# the exception.
app.config.suppress_callback_exceptions = True
app.layout = html.Div(
[dcc.Location(id="url", refresh=False), html.Div(id="page-content")]
)
index_page = html.Div(
children=[
html.Div(
[
html.Div(
[
html.Img(
src=app.get_asset_url("crypten-icon.png"),
id="plotly-image",
style={
"height": "60px",
"width": "auto",
"margin-bottom": "25px",
},
)
],
className="one-third column",
),
html.Div(
[
html.Div(
[
html.H2("CrypTen", style={"margin-bottom": "0px"}),
html.H4("Benchmarks", style={"margin-top": "0px"}),
]
)
],
className="one-half column",
id="title",
),
html.Div(
[
html.A(
html.Button("Compare Dates", id="learn-more-button"),
href="/compare",
)
],
className="one-third column",
id="button",
),
],
id="header",
className="row flex-display",
style={"margin-bottom": "25px"},
),
dcc.Tabs(
[
dcc.Tab(label="1 party", value="1pc"),
dcc.Tab(label="2 party", value="2pc"),
],
id="benchmark-tabs",
value="1pc",
),
html.Div(
[
dcc.Dropdown(
id="select_date",
options=[
{"label": date, "value": date}
for date in sorted(available_dates)
],
value=sorted(available_dates)[-1],
),
html.Div(
[
html.H3("Functions"),
dcc.Markdown(
"""To reproduce or view assumptions see
[benchmarks](
https://github.com/facebookresearch/CrypTen/blob/master/benchmarks/benchmark.py#L68)
"""
),
html.H5("Runtimes"),
dcc.Markdown(
"""
* function runtimes are averaged over 10 runs using a random tensor of size (100, 100).
* `max` and `argmax` are excluded as they take considerably longer.
* As of 02/25/2020, `max` / `argmax` take 3min 13s Β± 4.73s
""",
className="bullets",
),
]
),
html.Div(
[
html.Div(
[dcc.Graph(id="func-runtime-crypten")],
className="six columns",
),
html.Div(
[dcc.Graph(id="func-runtime-crypten-v-plain")],
className="six columns",
),
],
className="row",
),
html.H5("Errors"),
dcc.Markdown(
"""
* function errors are over the domain (0, 100] with step size 0.01
* exp, sin, and cos are over the domain (0, 10) with step size 0.001
""",
className="bullets",
),
html.Div(
[
html.Div(
[dcc.Graph(id="func-abs-error")], className="six columns"
),
html.Div(
[dcc.Graph(id="func-relative-error")],
className="six columns",
),
],
className="row",
),
html.Div(
[
html.H3("Models"),
dcc.Markdown(
"""
For model details or to reproduce see
[models](https://github.com/facebookresearch/CrypTen/blob/master/benchmarks/models.py)
and
[training details](
https://github.com/facebookresearch/CrypTen/blob/master/benchmarks/benchmark.py#L293).
* trained on Gaussian clusters for binary classification
* uses SGD with 5k samples, 20 features, over 20 epochs, and 0.1 learning rate
* feedforward has three hidden layers with intermediary RELU and
final sigmoid activations
* note benchmarks run with world size 1 using CPython
""",
className="bullets",
),
dcc.Dropdown(
id="select_comparison",
options=[
{"label": comp, "value": comp}
for comp in [
"CPU vs GPU",
"CPU vs Plaintext",
"GPU vs Plaintext",
]
],
value="CPU vs GPU",
),
html.Div(
[
html.Div(
[dcc.Graph(id="model-training-time")],
className="six columns",
),
html.Div(
[dcc.Graph(id="model-inference-time")],
className="six columns",
),
html.Div(
[dcc.Graph(id="model-accuracy")],
className="six columns",
),
],
className="row",
),
]
),
]
),
],
id="mainContainer",
style={"display": "flex", "flex-direction": "column"},
)
comparison_layout = html.Div(
[
html.Div(id="compare"),
html.Div(
[
html.Div(
[
html.Img(
src=app.get_asset_url("crypten-icon.png"),
id="plotly-image",
style={
"height": "60px",
"width": "auto",
"margin-bottom": "25px",
},
)
],
className="one-third column",
),
html.Div(
[
html.Div(
[
html.H2("CrypTen", style={"margin-bottom": "0px"}),
html.H4("Benchmarks", style={"margin-top": "0px"}),
]
)
],
className="one-half column",
id="title",
),
html.Div(
[
html.A(
html.Button("Benchmarks", id="learn-more-button"), href="/"
)
],
className="one-third column",
id="button",
),
],
id="header",
className="row flex-display",
style={"margin-bottom": "25px"},
),
html.Div(
[
html.H6("Previous Date"),
dcc.Dropdown(
id="start_date",
options=[
{"label": date, "value": date} for date in available_dates
],
value=sorted(available_dates)[0],
),
html.H6("Current Date"),
dcc.Dropdown(
id="end_date",
options=[
{"label": date, "value": date} for date in available_dates
],
value=sorted(available_dates)[-1],
),
html.Div(
[
html.H3("Functions"),
dcc.Dropdown(
options=[
{"label": func, "value": func}
for func in func_df["function"].unique()
],
multi=True,
value="sigmoid",
id="funcs",
),
dcc.Markdown(
"""
* function runtimes are averaged over 10 runs using a random tensor of size (100, 100).
* `max` and `argmax` are excluded as they take considerably longer.
* As of 02/25/2020, `max` / `argmax` take 3min 13s Β± 4.73s
""",
className="bullets",
),
]
),
html.Div(
[
html.Div(
[dcc.Graph(id="runtime-diff")], className="six columns"
),
html.Div([dcc.Graph(id="error-diff")], className="six columns"),
],
className="row",
),
html.Div(
[
html.Br(),
html.Br(),
html.Br(),
html.H4("Historical"),
html.Div(
[dcc.Graph(id="runtime-timeseries")],
className="six columns",
),
html.Div(
[dcc.Graph(id="error-timeseries")], className="six columns"
),
],
className="row",
),
]
),
]
)
@app.callback(
Output("func-runtime-crypten", "figure"),
[Input("select_date", "value"), Input("benchmark-tabs", "value")],
)
def update_runtime_crypten(selected_date, mode):
try:
filter_df = func_df[func_df["mode"] == mode]
filter_df = filter_df[filter_df["date"] == selected_date]
filter_df["runtime in seconds"] = filter_df["runtime crypten"]
except KeyError:
filter_df = pd.DataFrame()
if filter_df.empty:
return render_emtpy_figure()
fig = px.bar(
filter_df,
x="runtime in seconds",
y="function",
color="device",
orientation="h",
error_x="runtime crypten error plus",
error_x_minus="runtime crypten error minus",
color_discrete_sequence=colors_discrete,
template=template,
title="Crypten",
barmode="group",
)
fig.update_layout(height=500)
return fig
@app.callback(
Output("func-runtime-crypten-v-plain", "figure"),
[Input("select_date", "value"), Input("benchmark-tabs", "value")],
)
def update_runtime_crypten_v_plain(selected_date, mode):
try:
filter_df = func_df[func_df["mode"] == mode]
filter_df = filter_df[filter_df["date"] == selected_date]
except KeyError:
filter_df = pd.DataFrame()
if filter_df.empty:
return render_emtpy_figure()
fig = px.bar(
filter_df,
x="runtime gap",
y="function",
color="device",
orientation="h",
error_x="runtime gap error plus",
error_x_minus="runtime gap error minus",
color_discrete_sequence=colors_discrete,
template=template,
title="Crypten vs. Plaintext",
barmode="group",
)
fig.update_layout(height=500)
return fig
@app.callback(
Output("func-abs-error", "figure"),
[Input("select_date", "value"), Input("benchmark-tabs", "value")],
)
def update_abs_error(selected_date, mode):
try:
filter_df = func_df[func_df["mode"] == mode]
filter_df = filter_df[filter_df["date"] == selected_date]
except KeyError:
filter_df = pd.DataFrame()
if filter_df.empty:
return render_emtpy_figure()
fig = px.bar(
filter_df,
x="total abs error",
text="total abs error",
color="device",
log_x=True,
y="function",
orientation="h",
color_discrete_sequence=colors_discrete,
template=template,
title="Crypten Absolute Error",
barmode="group",
)
fig.update_traces(texttemplate="%{text:.1f}", textposition="outside")
fig.update_layout(height=500)
return fig
@app.callback(
Output("func-relative-error", "figure"),
[Input("select_date", "value"), Input("benchmark-tabs", "value")],
)
def update_abs_error(selected_date, mode):
try:
filter_df = func_df[func_df["mode"] == mode]
filter_df = filter_df[filter_df["date"] == selected_date]
except KeyError:
filter_df = pd.DataFrame()
if filter_df.empty:
return render_emtpy_figure()
fig = px.bar(
filter_df,
x="average relative error",
text="average relative error",
y="function",
color="device",
orientation="h",
color_discrete_sequence=colors_discrete,
template=template,
title="Crypten Relative Error",
barmode="group",
)
fig.update_traces(texttemplate="%{text:%}", textposition="outside")
fig.update_layout(height=500)
return fig
def process_comparison_options(filter_df, option):
color = "type"
if option == "CPU vs Plaintext":
filter_df = filter_df[filter_df["device"] == "cpu"]
filter_df["type"] = np.where(
filter_df["is plain text"], "Plain Text", "CrypTen"
)
elif option == "GPU vs Plaintext":
filter_df = filter_df[filter_df["device"] == "gpu"]
if not filter_df.empty:
filter_df["type"] = np.where(
filter_df["is plain text"], "Plain Text", "CrypTen"
)
elif option == "CPU vs GPU":
filter_df = filter_df[filter_df["is plain text"] is False]
color = "device"
return filter_df, color
def render_emtpy_figure():
return {
"layout": {
"xaxis": {"visible": False},
"yaxis": {"visible": False},
"annotations": [
{
"text": "No matching data found",
"xref": "paper",
"yref": "paper",
"showarrow": False,
"font": {"size": 28},
}
],
}
}
@app.callback(
Output("model-training-time", "figure"),
[
Input("select_date", "value"),
Input("benchmark-tabs", "value"),
Input("select_comparison", "value"),
],
)
def update_training_time(selected_date, mode, comp_opt):
try:
filter_df = model_df[model_df["mode"] == mode]
filter_df = filter_df[filter_df["date"] == selected_date]
filter_df, color = process_comparison_options(filter_df, comp_opt)
except KeyError:
filter_df = pd.DataFrame()
if filter_df.empty:
return render_emtpy_figure()
fig = px.bar(
filter_df,
x="seconds per epoch",
text="seconds per epoch",
y="model",
color=color,
orientation="h",
barmode="group",
color_discrete_sequence=colors_discrete,
template=template,
title="Model Training Time",
)
fig.update_layout(xaxis={"range": [0, filter_df["seconds per epoch"].max() * 1.1]})
fig.update_traces(texttemplate="%{text:.2f}", textposition="outside")
return fig
@app.callback(
Output("model-inference-time", "figure"),
[
Input("select_date", "value"),
Input("benchmark-tabs", "value"),
Input("select_comparison", "value"),
],
)
def update_training_time(selected_date, mode, comp_opt):
try:
filter_df = model_df[model_df["mode"] == mode]
filter_df = filter_df[filter_df["date"] == selected_date]
filter_df, color = process_comparison_options(filter_df, comp_opt)
except KeyError:
filter_df = pd.DataFrame()
if filter_df.empty:
return render_emtpy_figure()
fig = px.bar(
filter_df,
x="inference time",
text="inference time",
y="model",
color=color,
orientation="h",
barmode="group",
color_discrete_sequence=colors_discrete,
template=template,
title="Model Inference Time",
)
fig.update_layout(
xaxis={"range": [0, filter_df["inference time"].max() * 1.1]},
xaxis_title="inference time in seconds",
)
fig.update_traces(texttemplate="%{text:.2f}", textposition="outside")
return fig
@app.callback(
Output("model-accuracy", "figure"),
[
Input("select_date", "value"),
Input("benchmark-tabs", "value"),
Input("select_comparison", "value"),
],
)
def update_model_accuracy(selected_date, mode, comp_opt):
try:
filter_df = model_df[model_df["mode"] == mode]
filter_df = filter_df[filter_df["date"] == selected_date]
filter_df, color = process_comparison_options(filter_df, comp_opt)
except KeyError:
filter_df = pd.DataFrame()
if filter_df.empty:
return render_emtpy_figure()
return render_emtpy_figure()
fig = px.bar(
filter_df,
x="accuracy",
text="accuracy",
y="model",
color=color,
orientation="h",
barmode="group",
color_discrete_sequence=colors_discrete,
template=template,
title="Model Accuracy",
)
fig.update_layout(xaxis={"range": [0, 1.0]})
fig.update_traces(texttemplate="%{text:%}", textposition="outside")
return fig
@app.callback(
Output("runtime-diff", "figure"),
[Input("start_date", "value"), Input("end_date", "value"), Input("funcs", "value")],
)
def update_runtime_diff(start_date, end_date, funcs):
if type(funcs) is str:
funcs = [funcs]
try:
filter_df = func_df[func_df["mode"] == "1pc"]
func_df_cpu = filter_df[filter_df["device"] == "cpu"]
start_df = func_df_cpu[func_df_cpu["date"] == start_date]
end_df = func_df_cpu[func_df_cpu["date"] == end_date]
except KeyError:
filter_df = pd.DataFrame()
if filter_df.empty:
return render_emtpy_figure()
fig = make_subplots(
rows=len(funcs), cols=1, specs=[[{"type": "domain"}] for _ in range(len(funcs))]
)
for i, func in enumerate(funcs):
runtime = end_df[end_df["function"] == func]["runtime crypten"]
runtime_prev = start_df[start_df["function"] == func]["runtime crypten"]
func_text = func.capitalize()
fig.add_trace(
go.Indicator(
mode="number+delta",
value=float(runtime),
title={
"text": f"{func_text}<br><span style='font-size:0.8em;color:gray'>"
+ "runtime in seconds</span><br>"
},
delta={
"reference": float(runtime_prev),
"relative": True,
"increasing": {"color": "#ff4236"},
"decreasing": {"color": "#008000"},
},
),
row=i + 1,
col=1,
)
fig.update_layout(height=300 * len(funcs))
return fig
@app.callback(
Output("error-diff", "figure"),
[Input("start_date", "value"), Input("end_date", "value"), Input("funcs", "value")],
)
def update_error_diff(start_date, end_date, funcs):
if type(funcs) is str:
funcs = [funcs]
try:
filter_df = func_df[func_df["mode"] == "1pc"]
func_df_cpu = filter_df[filter_df["device"] == "cpu"]
start_df = func_df_cpu[func_df_cpu["date"] == start_date]
end_df = func_df_cpu[func_df_cpu["date"] == end_date]
except KeyError:
filter_df = pd.DataFrame()
if filter_df.empty:
return render_emtpy_figure()
fig = make_subplots(
rows=len(funcs), cols=1, specs=[[{"type": "domain"}] for _ in range(len(funcs))]
)
for i, func in enumerate(funcs):
error = end_df[end_df["function"] == func]["total abs error"]
error_prev = start_df[start_df["function"] == func]["total abs error"]
func_text = func.capitalize()
fig.add_trace(
go.Indicator(
mode="number+delta",
value=float(error),
title={
"text": f"{func_text}<br><span style='font-size:0.8em;color:gray'>"
+ "total abs error</span><br>"
},
delta={
"reference": float(error_prev),
"relative": True,
"increasing": {"color": "#ff4236"},
"decreasing": {"color": "#008000"},
},
),
row=i + 1,
col=1,
)
fig.update_layout(height=300 * len(funcs))
return fig
@app.callback(Output("runtime-timeseries", "figure"), [Input("funcs", "value")])
def update_runtime_timeseries(funcs):
if type(funcs) is str:
funcs = [funcs]
try:
filtered_df = func_df[func_df["function"].isin(funcs)]
filtered_df.sort_values("date", inplace=True)
except KeyError:
return render_emtpy_figure()
fig = px.line(
filtered_df, x="date", y="runtime crypten", template=template, color="function"
)
return fig
@app.callback(Output("error-timeseries", "figure"), [Input("funcs", "value")])
def update_error_timeseries(funcs):
if type(funcs) is str:
funcs = [funcs]
try:
filtered_df = func_df[func_df["function"].isin(funcs)]
filtered_df.sort_values("date", inplace=True)
except KeyError:
return render_emtpy_figure()
fig = px.line(
filtered_df, x="date", y="total abs error", template=template, color="function"
)
return fig
@app.callback(
dash.dependencies.Output("page-content", "children"),
[dash.dependencies.Input("url", "pathname")],
)
def display_page(pathname):
"""Routes to page based on URL"""
if pathname == "/compare":
return comparison_layout
else:
return index_page
if __name__ == "__main__":
app.run_server(debug=True)
| CrypTen-main | benchmarks/dash_app/app.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import pandas as pd
def get_aggregated_data(base_dir, subdirs):
"""Aggregate dataframe for model and func benchmarks assumining directory is structured as
DATA_PATH
|_2020-02-20
|_subdir1
|_func_benchmarks.csv
|_model_benchmarks.csv
|_func_benchmarks_cuda.csv (optional)
|_model_benchmarks_cuda.csv (optional)
|_subdir2
...
Args:
base_dir (pathlib.path): path containing month subdirectories
subdirs (list): a list of all subdirectories to aggreagate dataframes from
Returns: tuple of pd.DataFrames containing func and model benchmarks with dates
"""
available_dates = get_available_dates(base_dir)
func_df, model_df = pd.DataFrame(), pd.DataFrame()
for subdir in subdirs:
func_df_cpu, model_df_cpu = read_subdir(base_dir, available_dates, subdir)
func_df_gpu, model_df_gpu = read_subdir(
base_dir, available_dates, subdir, cuda=True
)
tmp_func_df = pd.concat([func_df_cpu, func_df_gpu])
tmp_model_df = pd.concat([model_df_cpu, model_df_gpu])
tmp_func_df["mode"] = subdir
tmp_model_df["mode"] = subdir
func_df = func_df.append(tmp_func_df)
model_df = model_df.append(tmp_model_df)
return func_df, model_df
def load_df(path, cuda=False):
"""Load dataframe for model and func benchmarks assumining directory is structured as
path
|_func_benchmarks.csv
|_model_benchmarks.csv
|_func_benchmarks_cuda.csv (optional)
|_model_benchmarks_cuda.csv (optional)
Args:
path (str): path containing model and func benchmarks
cuda (bool) : if set to true, read the corresponding func and model benchmarks for cuda
Returns: tuple of pd.DataFrames containing func and model benchmarks with dates
"""
postfix = "_cuda" if cuda else ""
func_path = os.path.join(path, f"func_benchmarks{postfix}.csv")
model_path = os.path.join(path, f"model_benchmarks{postfix}.csv")
func_df, model_df = pd.DataFrame(), pd.DataFrame()
if os.path.exists(func_path):
func_df = pd.read_csv(func_path)
if os.path.exists(model_path):
model_df = pd.read_csv(model_path)
return func_df, model_df
def read_subdir(base_dir, dates, subdir="", cuda=False):
"""Builds dataframe for model and func benchmarks assuming directory is structured as
DATA_PATH
|_2020-02-20
|_subdir
|_func_benchmarks.csv
|_model_benchmarks.csv
|_func_benchmarks_cuda.csv (optional)
|_model_benchmarks_cuda.csv (optional)
Args:
base_dir (pathlib.path): path containing month subdirectories
dates (list of str): containing dates / subdirectories available
subdir (str) : string indicating the name of the sub directory to read enchmarks from
cuda (bool) : if set to true, read the corresponding func and model benchmarks for cuda
Returns: tuple of pd.DataFrames containing func and model benchmarks with dates
"""
func_df, model_df = pd.DataFrame(), pd.DataFrame()
device = "gpu" if cuda else "cpu"
for date in dates:
path = os.path.join(base_dir, date, subdir)
tmp_func_df, tmp_model_df = load_df(path, cuda=cuda)
set_metadata(tmp_func_df, date, device)
set_metadata(tmp_model_df, date, device)
func_df = func_df.append(tmp_func_df)
model_df = model_df.append(tmp_model_df)
if not func_df.empty:
func_df = compute_runtime_gap(func_df)
func_df = add_error_bars(func_df)
return func_df, model_df
def get_available_dates(data_dir):
"""Returns list of available dates in DATA_PATH directory"""
available_dates = []
for sub_dir in os.listdir(data_dir):
if os.path.isdir(os.path.join(data_dir, sub_dir)):
available_dates.append(sub_dir)
return available_dates
def set_metadata(df, date, device):
"""Set the device and date attribute for the dataframe"""
df["date"] = date
df["device"] = device
def compute_runtime_gap(func_df):
"""Computes runtime gap between CrypTen and Plain Text"""
func_df["runtime gap"] = func_df["runtime crypten"] / func_df["runtime"]
func_df["runtime gap Q1"] = func_df["runtime crypten Q1"] / func_df["runtime"]
func_df["runtime gap Q3"] = func_df["runtime crypten Q3"] / func_df["runtime"]
return func_df
def add_error_bars(func_df):
"""Adds error bars for plotting based on Q1 and Q3"""
columns = ["runtime crypten", "runtime gap"]
for col in columns:
func_df = calc_error_bar(func_df, col)
return func_df
def calc_error_bar(df, column_name):
"""Adds error plus and minus for plotting"""
error_plus = df[column_name + " Q3"] - df[column_name]
error_minus = df[column_name] - df[column_name + " Q1"]
df[column_name + " error plus"] = error_plus
df[column_name + " error minus"] = error_minus
return df
| CrypTen-main | benchmarks/dash_app/load_data.py |
CrypTen-main | configs/__init__.py |
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import subprocess
import uuid
from argparse import ArgumentParser, REMAINDER
"""
Wrapper to launch MPC scripts as multiple processes.
"""
def main():
args = parse_args()
# set PyTorch distributed related environmental variables
current_env = os.environ.copy()
current_env["WORLD_SIZE"] = str(args.world_size)
processes = []
# Use random file so multiple jobs can be run simultaneously
INIT_METHOD = "file:///tmp/crypten-rendezvous-{}".format(uuid.uuid1())
for rank in range(0, args.world_size):
# each process's rank
current_env["RANK"] = str(rank)
current_env["RENDEZVOUS"] = INIT_METHOD
# spawn the processes
cmd = [args.training_script] + args.training_script_args
process = subprocess.Popen(cmd, env=current_env)
processes.append(process)
for process in processes:
process.wait()
if process.returncode != 0:
raise subprocess.CalledProcessError(
returncode=process.returncode, cmd=process.args
)
def parse_args():
"""
Helper function parsing the command line options
"""
parser = ArgumentParser(
description="PyTorch distributed training launch "
"helper utilty that will spawn up "
"parties for MPC scripts"
)
# Optional arguments for the launch helper
parser.add_argument(
"--world_size",
type=int,
default=1,
help="The number of parties to launch." "Each party acts as its own process",
)
# positional
parser.add_argument(
"training_script",
type=str,
help="The full path to the single GPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script",
)
# rest from the training program
parser.add_argument("training_script_args", nargs=REMAINDER)
return parser.parse_args()
if __name__ == "__main__":
main()
| CrypTen-main | scripts/distributed_launcher.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This file is a tool to run MPC distributed training over AWS.
To run distributed training, first multiple AWS instances needs to be created
with a public AMI "Deep Learning AMI (Ubuntu) Version 24.0":
$ aws ec2 run-instances \
--image-id ami-0ddba16a97b1dcda5 \
--count 2 \
--instance-type t2.micro \
--key-name fair-$USER \
--tag-specifications "ResourceType=instance,Tags=[{Key=fair-user,Value=$USER}]"
Two EC2 instances will be created by the command line shown above. Assume
the ids of the two instances created are i-068681e808235a851 and
i-0d7ebacfe1e3f28eb. Next, pytorch and crypten must be properly installed
on every instance.
Then the following command lines can run the mpc_linear_svm example on the two
EC2 instances created above:
$ python3 crypten/scripts/aws_launcher.py \
--SSH_keys=/home/$USER/.aws/fair-$USER.pem \
--instances=i-038dd14b9383b9d79,i-08f057b9c03d4a916 \
--aux_files=crypten/examples/mpc_linear_svm/mpc_linear_svm.py \
crypten/examples/mpc_linear_svm/launcher.py \
--features 50 \
--examples 100 \
--epochs 50 \
--lr 0.5 \
--skip_plaintext
If you want to train with AWS instances located at multiple regions, then you would need
to provide ssh_key_file for each instance:
$ python3 crypten/scripts/aws_launcher.py \
--regions=us-east-1,us-west-1 \
--SSH_keys=/home/$USER/.aws/east.pem,/home/$USER/.aws/west.pem \
--instances=i-038dd14b9383b9d79,i-08f057b9c03d4a916 \
--aux_files=crypten/examples/mpc_linear_svm/mpc_linear_svm.py \
crypten/examples/mpc_linear_svm/launcher.py \
--features 50 \
--examples 100 \
--epochs 50 \
--lr 0.5 \
--skip_plaintext
"""
import concurrent.futures
import configparser
import os
import sys
import time
import uuid
import warnings
from argparse import ArgumentParser, REMAINDER
from pathlib import Path
import boto3
import paramiko
def get_instances(ec2, instance_ids):
instances = list(
ec2.instances.filter(Filters=[{"Name": "instance-id", "Values": instance_ids}])
)
return instances
def connect_to_instance(instance, keypath, username, http_proxy=None):
print(f"Connecting to {instance.id}...")
ip_address = instance.public_ip_address
if http_proxy:
# paramiko.ProxyCommand does not do string substitution for %h %p,
# so 'nc --proxy-type http --proxy fwdproxy:8080 %h %p' would not work!
proxy = paramiko.ProxyCommand(
f"nc --proxy-type http --proxy {http_proxy} {ip_address} {22}"
)
proxy.settimeout(300)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
retries = 20
while retries > 0:
try:
client.connect(
ip_address,
username=username,
key_filename=keypath,
timeout=10,
sock=proxy if http_proxy else None,
)
print(f"Connected to {instance.id}")
break
except Exception as e:
print(f"Exception: {e} Retrying...")
retries -= 1
time.sleep(10)
return client
def add_prefix_each_line(prefix, str):
lines = [f"{prefix}{line}" for line in str.split("\n")]
return "\n".join(lines)
def run_command(instance, client, cmd, environment=None, inputs=None):
stdin, stdout, stderr = client.exec_command(
cmd, get_pty=True, environment=environment
)
if inputs:
for inp in inputs:
stdin.write(inp)
def read_lines(fin, fout, line_head):
line = ""
while not fin.channel.exit_status_ready():
line += fin.read(1).decode("utf8")
if line.endswith("\n"):
print(f"{line_head}{line[:-1]}", file=fout)
line = ""
if line:
# print what remains in line buffer, in case fout does not
# end with '\n'
print(f"{line_head}{line[:-1]}", file=fout)
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as printer:
printer.submit(read_lines, stdout, sys.stdout, f"[{instance} STDOUT] ")
printer.submit(read_lines, stderr, sys.stderr, f"[{instance} STDERR] ")
def upload_file(instance_id, client, localpath, remotepath):
ftp_client = client.open_sftp()
print(f"Uploading `{localpath}` to {instance_id}...")
ftp_client.put(localpath, remotepath)
ftp_client.close()
print(f"`{localpath}` uploaded to {instance_id}.")
def main():
args = parse_args()
cf = configparser.ConfigParser()
cf.read(args.credentials)
warnings.filterwarnings(
"ignore", category=ResourceWarning, message="unclosed.*<ssl.SSLSocket.*>"
)
regions = args.regions.split(",")
instance_ids = args.instances.split(",")
ssh_key_files = args.ssh_key_file.split(",")
instances = []
if len(regions) > 1:
print("Multiple regions detected")
assert len(instance_ids) == len(
ssh_key_files
), "{} instance ids are provided, but {} SSH keys found.".format(
len(instance_ids), len(ssh_key_files)
)
assert len(instance_ids) == len(
regions
), "{} instance ids are provided, but {} regions found.".format(
len(instance_ids), len(regions)
)
for i, region in enumerate(regions):
session = boto3.session.Session(
aws_access_key_id=cf["default"]["aws_access_key_id"],
aws_secret_access_key=cf["default"]["aws_secret_access_key"],
region_name=region,
)
ec2 = session.resource("ec2")
instance = get_instances(ec2, [instance_ids[i]])
instances += instance
else:
session = boto3.session.Session(
aws_access_key_id=cf["default"]["aws_access_key_id"],
aws_secret_access_key=cf["default"]["aws_secret_access_key"],
region_name=regions[0],
)
ec2 = session.resource("ec2")
instances = get_instances(ec2, instance_ids)
assert (
len(ssh_key_files) == 1
), "1 region is detected, but {} SSH keys found.".format(len(ssh_key_files))
ssh_key_files = [ssh_key_files[0] for _ in range(len(instances))]
assert len(instance_ids) == len(
instances
), "{} instance ids are provided, but {} found.".format(
len(instance_ids), len(instances)
)
# Only print the public IP addresses of the instances.
# Then do nothing else and return.
if args.only_show_instance_ips:
for instance in instances:
print(instance.public_ip_address)
return
world_size = len(instances)
print(f"Running world size {world_size} with instances: {instances}")
master_instance = instances[0]
# Key: instance id; value: paramiko.SSHClient object.
client_dict = {}
for i, instance in enumerate(instances):
client = connect_to_instance(
instance, ssh_key_files[i], args.ssh_user, args.http_proxy
)
client_dict[instance.id] = client
assert os.path.exists(
args.training_script
), f"File `{args.training_script}` does not exist"
file_paths = args.aux_files.split(",") if args.aux_files else []
for local_path in file_paths:
assert os.path.exists(local_path), f"File `{local_path}` does not exist"
remote_dir = f"aws-launcher-tmp-{uuid.uuid1()}"
script_basename = os.path.basename(args.training_script)
remote_script = os.path.join(remote_dir, script_basename)
# Upload files to all instances concurrently.
with concurrent.futures.ThreadPoolExecutor(max_workers=8) as uploaders:
for instance_id, client in client_dict.items():
run_command(instance_id, client, f"mkdir -p {remote_dir}")
uploaders.submit(
upload_file, instance_id, client, args.training_script, remote_script
)
for local_path in file_paths:
uploaders.submit(
upload_file,
instance_id,
client,
local_path,
os.path.join(remote_dir, os.path.basename(local_path)),
)
for instance_id, client in client_dict.items():
run_command(instance_id, client, f"chmod +x {remote_script}")
run_command(instance_id, client, f"ls -al {remote_dir}")
environment = {
"WORLD_SIZE": str(world_size),
"RENDEZVOUS": "env://",
"MASTER_ADDR": master_instance.private_ip_address,
"MASTER_PORT": str(args.master_port),
}
with concurrent.futures.ThreadPoolExecutor(max_workers=world_size) as executor:
rank = 0
for instance_id, client in client_dict.items():
environment["RANK"] = str(rank)
# TODO: Although paramiko.SSHClient.exec_command() can accept
# an argument `environment`, it seems not to take effect in
# practice. It might because "Servers may silently reject
# some environment variables" according to paramiko document.
# As a workaround, here all environment variables are explicitly
# exported.
environment_cmd = "; ".join(
[f"export {key}={value}" for (key, value) in environment.items()]
)
prepare_cmd = f"{args.prepare_cmd}; " if args.prepare_cmd else ""
cmd = "{}; {} {} {} {}".format(
environment_cmd,
f"cd {remote_dir} ;",
prepare_cmd,
f"./{script_basename}",
" ".join(args.training_script_args),
)
print(f"Run command: {cmd}")
executor.submit(run_command, instance_id, client, cmd, environment)
rank += 1
# Cleanup temp dir.
for instance_id, client in client_dict.items():
run_command(instance_id, client, f"rm -rf {remote_dir}")
client.close()
def parse_args():
"""
Helper function parsing the command line options
"""
parser = ArgumentParser(
description="PyTorch distributed training launch "
"helper utilty that will spawn up "
"parties for MPC scripts on AWS"
)
parser.add_argument(
"--credentials",
type=str,
default=f"{Path.home()}/.aws/credentials",
help="Credentials used to access AWS",
)
parser.add_argument(
"--only_show_instance_ips",
action="store_true",
default=False,
help="Only show public IPs of the given instances."
"No other actions will be done",
)
parser.add_argument("--regions", type=str, default="us-west-2", help="AWS Region")
parser.add_argument(
"--instances",
type=str,
required=True,
help="The comma-separated ids of AWS instances",
)
parser.add_argument(
"--master_port",
type=int,
default=29500,
help="The port used by master instance " "for distributed training",
)
parser.add_argument(
"--ssh_key_file",
type=str,
required=True,
help="Path to the RSA private key file " "used for instance authentication",
)
parser.add_argument(
"--ssh_user",
type=str,
default="ubuntu",
help="The username to ssh to AWS instance",
)
parser.add_argument(
"--http_proxy",
type=str,
default=None,
help="If not none, use the http proxy specified "
"(e.g., fwdproxy:8080) to ssh to AWS instance",
)
parser.add_argument(
"--aux_files",
type=str,
default=None,
help="The comma-separated paths of additional files "
" that need to be transferred to AWS instances. "
"If more than one file needs to be transferred, "
"the basename of any two files can not be the "
"same.",
)
parser.add_argument(
"--prepare_cmd",
type=str,
default="",
help="The command to run before running distribute "
"training for prepare purpose, e.g., setup "
"environment, extract data files, etc.",
)
# positional
parser.add_argument(
"training_script",
type=str,
help="The full path to the single machine training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script",
)
# rest from the training program
parser.add_argument("training_script_args", nargs=REMAINDER)
return parser.parse_args()
if __name__ == "__main__":
main()
| CrypTen-main | scripts/aws_launcher.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import torch
from torchvision import datasets, transforms
def _get_norm_mnist(dir, reduced=None, binary=False):
"""Downloads and normalizes mnist"""
mnist_train = datasets.MNIST(dir, download=True, train=True)
mnist_test = datasets.MNIST(dir, download=True, train=False)
# compute normalization factors
data_all = torch.cat([mnist_train.data, mnist_test.data]).float()
data_mean, data_std = data_all.mean(), data_all.std()
tensor_mean, tensor_std = data_mean.unsqueeze(0), data_std.unsqueeze(0)
# normalize
mnist_train_norm = transforms.functional.normalize(
mnist_train.data.float(), tensor_mean, tensor_std
)
mnist_test_norm = transforms.functional.normalize(
mnist_test.data.float(), tensor_mean, tensor_std
)
# change all nonzero labels to 1 if binary classification required
if binary:
mnist_train.targets[mnist_train.targets != 0] = 1
mnist_test.targets[mnist_test.targets != 0] = 1
# create a reduced dataset if required
if reduced is not None:
mnist_norm = (mnist_train_norm[:reduced], mnist_test_norm[:reduced])
mnist_labels = (mnist_train.targets[:reduced], mnist_test.targets[:reduced])
else:
mnist_norm = (mnist_train_norm, mnist_test_norm)
mnist_labels = (mnist_train.targets, mnist_test.targets)
return mnist_norm, mnist_labels
def split_features(
split=0.5, dir="/tmp", party1="alice", party2="bob", reduced=None, binary=False
):
"""Splits features between Party 1 and Party 2"""
mnist_norm, mnist_labels = _get_norm_mnist(dir, reduced, binary)
mnist_train_norm, mnist_test_norm = mnist_norm
mnist_train_labels, mnist_test_labels = mnist_labels
num_features = mnist_train_norm.shape[1]
split_point = int(split * num_features)
party1_train = mnist_train_norm[:, :, :split_point]
party2_train = mnist_train_norm[:, :, split_point:]
party1_test = mnist_test_norm[:, :, :split_point]
party2_test = mnist_test_norm[:, :, split_point:]
torch.save(party1_train, os.path.join(dir, party1 + "_train.pth"))
torch.save(party2_train, os.path.join(dir, party2 + "_train.pth"))
torch.save(party1_test, os.path.join(dir, party1 + "_test.pth"))
torch.save(party2_test, os.path.join(dir, party2 + "_test.pth"))
torch.save(mnist_train_labels, os.path.join(dir, "train_labels.pth"))
torch.save(mnist_test_labels, os.path.join(dir, "test_labels.pth"))
def split_observations(
split=0.5, dir="/tmp", party1="alice", party2="bob", reduced=None, binary=False
):
"""Splits observations between Party 1 and Party 2"""
mnist_norm, mnist_labels = _get_norm_mnist(dir, reduced, binary)
mnist_train_norm, mnist_test_norm = mnist_norm
mnist_train_labels, mnist_test_labels = mnist_labels
num_train_obs = mnist_train_norm.shape[0]
obs_train_split = int(split * num_train_obs)
num_test_obs = mnist_test_norm.shape[0]
obs_test_split = int(split * num_test_obs)
party1_train = mnist_train_norm[:obs_train_split, :, :]
party2_train = mnist_train_norm[obs_train_split:, :, :]
party1_test = mnist_test_norm[:obs_test_split, :, :]
party2_test = mnist_test_norm[obs_test_split:, :, :]
torch.save(party1_train, os.path.join(dir, party1 + "_train.pth"))
torch.save(party2_train, os.path.join(dir, party2 + "_train.pth"))
torch.save(party1_test, os.path.join(dir, party1 + "_test.pth"))
torch.save(party2_test, os.path.join(dir, party2 + "_test.pth"))
party1_train_labels = mnist_train_labels[:obs_train_split]
party1_test_labels = mnist_test_labels[:obs_test_split]
party2_train_labels = mnist_train_labels[obs_train_split:]
party2_test_labels = mnist_test_labels[obs_test_split:]
torch.save(party1_train_labels, os.path.join(dir, party1 + "_train_labels.pth"))
torch.save(party1_test_labels, os.path.join(dir, party1 + "_test_labels.pth"))
torch.save(party2_train_labels, os.path.join(dir, party2 + "_train_labels.pth"))
torch.save(party2_test_labels, os.path.join(dir, party2 + "_test_labels.pth"))
def split_features_v_labels(
dir="/tmp", party1="alice", party2="bob", reduced=None, binary=False
):
"""Gives Party 1 features and Party 2 labels"""
mnist_norm, mnist_labels = _get_norm_mnist(dir, reduced, binary)
mnist_train_norm, mnist_test_norm = mnist_norm
mnist_train_labels, mnist_test_labels = mnist_labels
torch.save(mnist_train_norm, os.path.join(dir, party1 + "_train.pth"))
torch.save(mnist_test_norm, os.path.join(dir, party1 + "_test.pth"))
torch.save(mnist_train_labels, os.path.join(dir, party2 + "_train_labels.pth"))
torch.save(mnist_test_labels, os.path.join(dir, party2 + "_test_labels.pth"))
def split_train_v_test(
dir="/tmp", party1="alice", party2="bob", reduced=None, binary=False
):
"""Gives Party 1 training data and Party 2 the test data"""
mnist_norm, mnist_labels = _get_norm_mnist(dir, reduced, binary)
mnist_train_norm, mnist_test_norm = mnist_norm
mnist_train_labels, mnist_test_labels = mnist_labels
torch.save(mnist_train_norm, os.path.join(dir, party1 + "_train.pth"))
torch.save(mnist_test_norm, os.path.join(dir, party2 + "_test.pth"))
torch.save(mnist_train_labels, os.path.join(dir, party1 + "_train_labels.pth"))
torch.save(mnist_test_labels, os.path.join(dir, party2 + "_test_labels.pth"))
def main():
parser = argparse.ArgumentParser("Split data for use in Tutorials")
parser.add_argument(
"--option",
type=str,
choices={"features", "data", "features_v_labels", "train_v_test"},
)
parser.add_argument("--ratio", type=float, default=0.72)
parser.add_argument("--name_party1", type=str, default="alice")
parser.add_argument("--name_party2", type=str, default="bob")
parser.add_argument("--dest", type=str, default="/tmp")
parser.add_argument("--reduced", type=int, default=None)
parser.add_argument("--binary", action="store_true")
args = parser.parse_args()
if args.option == "features":
split_features(
split=args.ratio,
dir=args.dest,
party1=args.name_party1,
party2=args.name_party2,
reduced=args.reduced,
binary=args.binary,
)
elif args.option == "data":
split_observations(
split=args.ratio,
dir=args.dest,
party1=args.name_party1,
party2=args.name_party2,
reduced=args.reduced,
binary=args.binary,
)
elif args.option == "features_v_labels":
split_features_v_labels(
dir=args.dest,
party1=args.name_party1,
party2=args.name_party2,
reduced=args.reduced,
binary=args.binary,
)
elif args.option == "train_v_test":
split_train_v_test(
dir=args.dest,
party1=args.name_party1,
party2=args.name_party2,
reduced=args.reduced,
binary=args.binary,
)
else:
raise ValueError("Invalid split option")
if __name__ == "__main__":
main()
| CrypTen-main | tutorials/mnist_utils.py |
#! /usr/bin/env python
import sys
if len(sys.argv) != 4:
print 'Wrong number of arguments'
print 'USAGE: ./compute_stats_helper.py $label_tsvfile $prediction_tsvfile $confidence'
exit(1)
label_filename = sys.argv[1]
pred_filename = sys.argv[2]
confidence = float(sys.argv[3])
# read label filename
labels = {}
id2labeler = {}
labelers = set()
with open(label_filename) as f:
for i, line in enumerate(f):
line = line.split('\t')
relationid = line[0].lower().strip()
is_correct = line[1].lower().strip()
labeler = line[2].strip()
id2labeler[relationid] = labeler
if labeler not in labelers:
labelers.add(labeler)
if is_correct == 't':
labels[relationid] = True
elif is_correct == 'f':
labels[relationid] = False
# read predictions
predictions = {}
with open(pred_filename) as f:
for i, line in enumerate(f):
line = line.split('\t')
relationid = line[0].lower().strip()
expectation = float(line[1].strip())
predictions[relationid] = expectation
# Evaluate number of true/false positives/negative
true_positives_total = 0
true_negatives_total = 0
false_positives_total = 0
false_negatives_total = 0
# evaluate number of true/false positives/negatives per labeler
labeler2FP = {}
labeler2TP = {}
labeler2FN = {}
labeler2TN = {}
for labeler in labelers:
labeler2FP[labeler] = 0
labeler2TP[labeler] = 0
labeler2FN[labeler] = 0
labeler2TN[labeler] = 0
# Count
for label_id in labels:
# if the labeled mention is in the prediction set
if label_id in predictions:
if labels[label_id] and predictions[label_id] >= confidence:
true_positives_total += 1
labeler2TP[id2labeler[label_id]] += 1
elif (not labels[label_id]) and predictions[label_id] >= confidence:
false_positives_total += 1
labeler2FP[id2labeler[label_id]] += 1
elif labels[label_id] and not predictions[label_id] >= confidence:
false_negatives_total += 1
labeler2FN[id2labeler[label_id]] += 1
else:
true_negatives_total += 1
labeler2TN[id2labeler[label_id]] += 1
# if the labeled mention is not in the prediction set (was ruled out)
else:
if labels[label_id]:
# was true but rejected
false_negatives_total += 1
labeler2FN[id2labeler[label_id]] += 1
else:
true_negatives_total += 1
labeler2TN[id2labeler[label_id]] += 1
# Print the total ones
print '##### SUMMARY #####'
print 'True positives:\t'+str(true_positives_total)
print 'True negatives:\t'+str(true_negatives_total)
print 'False positives:\t'+str(false_positives_total)
print 'False negatives:\t'+str(false_negatives_total)
print '\n'
# compute precision, recall and F1
precision = float(true_positives_total)/float(true_positives_total+false_positives_total)
recall = float(true_positives_total)/float(true_positives_total+false_negatives_total)
F1_score = 2*precision*recall / (precision+recall)
print 'Precision:\t'+str(precision)
print 'Recall:\t'+str(recall)
print 'F1 score:\t'+str(F1_score)
print '\n'
# Now do the same for every labeler
for labeler in labelers:
print '##### '+labeler+' #####'
print 'True positives:\t'+str(labeler2TP[labeler])
print 'True negatives:\t'+str(labeler2TN[labeler])
print 'False positives:\t'+str(labeler2FP[labeler])
print 'False negatives:\t'+str(labeler2FN[labeler])
print '\n'
# test for precision for robustness
if labeler2TP[labeler] + labeler2FP[labeler] > 0:
precision = float(labeler2TP[labeler])/float(labeler2TP[labeler]+labeler2FP[labeler])
else:
precision = 'N/A (not enough examples)'
# test for recall for robustness
if labeler2TP[labeler] + labeler2FN[labeler] > 0:
recall = float(labeler2TP[labeler])/float(labeler2TP[labeler]+labeler2FN[labeler])
else:
recall = 'N/A (not enough examples)'
# test for F1 for robustness
if (labeler2TP[labeler] + labeler2FP[labeler] > 0) and (labeler2TP[labeler] + labeler2FN[labeler] > 0) and precision+recall > 0 :
F1_score = 2*precision*recall / (precision+recall)
else:
F1_score = 'N/A (not enough examples)'
print 'Precision:\t'+str(precision)
print 'Recall:\t'+str(recall)
print 'F1 score:\t'+str(F1_score)
print '\n'
| dd-genomics-master | results_log/compute_stats_helper.py |
import ddext
from ddext import SD
def init():
ddext.input('doc_id', 'text')
ddext.input('sent_id', 'int')
ddext.input('words', 'text[]')
ddext.input('lemmas', 'text[]')
ddext.input('poses', 'text[]')
ddext.input('ners', 'text[]')
ddext.returns('doc_id', 'text')
ddext.returns('sent_id', 'int')
ddext.returns('wordidxs', 'int[]')
ddext.returns('mention_id', 'text')
ddext.returns('type', 'text')
ddext.returns('entity', 'text')
ddext.returns('words', 'text[]')
ddext.returns('is_correct', 'boolean')
def run(doc_id, sent_id, words, lemmas, poses, ners):
# TODO: currently we match only gene symbols and not phrases; consider matching phrases.
if 'genes' in SD:
genes = SD['genes']
else:
import os
APP_HOME = os.environ['GDD_HOME']
all_names = set()
dup_names = set()
all_synonyms = {}
dup_synonyms = set()
en_words = set([x.strip().lower() for x in open('%s/onto/dicts/english_words.tsv' % APP_HOME)])
gene_english = set([x.strip().lower() for x in open('%s/onto/manual/gene_english.tsv' % APP_HOME)])
gene_bigrams = set([x.strip().lower() for x in open('%s/onto/manual/gene_bigrams.tsv' % APP_HOME)])
gene_noisy = set([x.strip().lower() for x in open('%s/onto/manual/gene_noisy.tsv' % APP_HOME)])
gene_exclude = set([x.strip().lower() for x in open('%s/onto/manual/gene_exclude.tsv' % APP_HOME)])
SD['english'] = en_words
for line in open('%s/onto/data/genes.tsv' % APP_HOME):
#plpy.info(line)
name, synonyms, full_names = line.strip(' \r\n').split('\t')
synonyms = set(x.strip() for x in synonyms.split('|'))
synonyms.discard(name)
synonyms.discard('')
full_names = set(x.strip() for x in full_names.split('|'))
if name in all_names:
dup_names.add(name)
else:
all_names.add(name)
for s in synonyms:
if s in all_synonyms:
# we assign the synonym to the first name
dup_synonyms.add(s)
else:
all_synonyms[s] = name
plpy.info('===== DUPLICATE GENE NAMES')
plpy.info('\n'.join(sorted(dup_names)))
plpy.info('===== DUPLICATE GENE SYNONYMS')
plpy.info(sorted(dup_synonyms))
all_names -= gene_exclude
all_synonyms = {s: n for s, n in all_synonyms.iteritems() if s not in all_names and s not in gene_exclude}
genes = {
'names': all_names,
'synonyms': all_synonyms,
'names_lower': {x.lower(): x for x in all_names},
'synonyms_lower': {x.lower(): y for x, y in all_synonyms.iteritems()},
'exact_lower': set(x.lower() for x in gene_english | gene_bigrams | gene_noisy),
}
SD['genes'] = genes
for i in xrange(len(words)):
word = words[i]
if len(word) == 1:
continue
iword = word.lower()
match_type = None
if word in genes['names']:
match_type = 'NAME'
entity = word
elif word in genes['synonyms']:
match_type = 'SYN'
entity = genes['synonyms'][word]
else:
if iword in genes['exact_lower']:
continue
elif iword in genes['names_lower']:
match_type = 'iNAME'
entity = genes['names_lower'][iword]
elif iword in genes['synonyms_lower']:
match_type = 'iSYN'
entity = genes['synonyms_lower'][iword]
else:
continue
truth = True
# a two-letter capital word
if len(word) == 2 and word.isupper() and word.isalpha():
has_pub_date = 'DATE' in ners and 'NUMBER' in ners
# is or right next to a person/organization word
for j in xrange(max(0, i - 1), min(i + 2, len(words))):
if has_pub_date and ners[j] in ('PERSON', 'ORGANIZATION'):
truth = False
break
else:
truth = None
mid = '%s_%s_%d_1' % (doc_id, sent_id, i)
yield doc_id, sent_id, [i], mid, match_type, entity, [word], truth
| dd-genomics-master | xapp/code/gene_mentions.py |
import ddext
from ddext import SD
def init():
ddext.input('doc_id', 'text')
ddext.input('sent_id', 'int')
ddext.input('words', 'text[]')
ddext.input('lemmas', 'text[]')
ddext.input('poses', 'text[]')
ddext.input('ners', 'text[]')
ddext.returns('doc_id', 'text')
ddext.returns('sent_id', 'int')
ddext.returns('wordidxs', 'int[]')
ddext.returns('mention_id', 'text')
ddext.returns('type', 'text')
ddext.returns('entity', 'text')
ddext.returns('words', 'text[]')
ddext.returns('is_correct', 'boolean')
def run(doc_id, sent_id, words, lemmas, poses, ners):
if 'diseases' in SD:
trie = SD['trie']
diseases = SD['diseases']
diseases_bad = SD['diseases_bad']
genes = SD['genes']
delim_re = SD['delim_re']
else:
import os
APP_HOME = os.environ['GDD_HOME']
import re
diseases = {}
all_diseases = [x.strip().split('\t', 1) for x in open('%s/onto/data/all_diseases.tsv' % APP_HOME)]
diseases_en = set([x.strip() for x in open('%s/onto/data/all_diseases_en.tsv' % APP_HOME)])
diseases_en_good = set([x.strip() for x in open('%s/onto/manual/disease_en_good.tsv' % APP_HOME)])
diseases_bad = set([x.strip() for x in open('%s/onto/manual/disease_bad.tsv' % APP_HOME)])
SD['diseases_bad'] = diseases_bad
diseases_exclude = diseases_bad | diseases_en - diseases_en_good
delim_re = re.compile('[^\w-]+') # NOTE: this also removes apostrophe
SD['delim_re'] = delim_re
diseases_norm = {}
trie = {} # special key '$' means terminal nodes
for phrase, ids in all_diseases:
if phrase in diseases_exclude:
continue
diseases[phrase] = ids
phrase_norm = delim_re.sub(' ', phrase).strip()
# print phrase_norm
tokens = phrase_norm.split()
node = trie
for w in tokens:
if w not in node:
node[w] = {}
node = node[w]
if '$' not in node:
node['$'] = []
node['$'].append((ids, phrase))
if phrase_norm not in diseases_norm:
diseases_norm[phrase_norm] = ids
else:
diseases_norm[phrase_norm] = '|'.join(sorted(set(ids.split('|')) | set(diseases_norm[phrase_norm].split('|'))))
SD['diseases'] = diseases
SD['trie'] = trie
genes = set()
for line in open('%s/onto/data/genes.tsv' % APP_HOME):
#plpy.info(line)
name, synonyms, full_names = line.strip(' \r\n').split('\t')
synonyms = set(synonyms.split('|'))
genes.add(name.lower())
for s in synonyms:
genes.add(s.lower())
SD['genes'] = genes
# TODO: currently we do ignore-case exact match for single words; consider stemming.
# TODO: currently we do exact phrase matches; consider emitting partial matches.
for i in xrange(len(words)):
word = words[i]
iword = word.lower()
# single-token mention
if iword in diseases:
truth = True
mtype = 'ONE'
# http://www.ncbi.nlm.nih.gov/pubmed/23271346
# SCs for Stem Cells
# HFs for hair follicles
if word[-1] == 's' and word[:-1].isupper():
truth = False
mtype = 'PLURAL'
elif iword in genes:
truth = None
mtype = 'GSYM'
entity = iword
mid = '%s_%s_%d_1' % (doc_id, sent_id, i)
yield doc_id, sent_id, [i], mid, mtype, entity, [word], truth
# multi-token mentions
node = trie
depth = 0
for j in xrange(i, len(words)):
word = words[j]
iword = word.lower()
sword = delim_re.sub(' ', iword).strip()
if not sword:
if j == i:
break
continue
if sword in node:
node = node[sword]
depth += 1
if '$' in node and depth > 1:
for k, (ids, phrase) in enumerate(node['$']):
if phrase in diseases_bad:
continue
entity = phrase
mid = '%s_%s_%d_%d' % (doc_id, sent_id, i, j - i + 1)
if len(node['$']) > 1:
mid += '~' + str(k + 1)
wordids = range(i, j + 1)
yield doc_id, sent_id, wordids, mid, 'PHRASE', entity, words[i: j + 1], True
else:
break
| dd-genomics-master | xapp/code/pheno_mentions.py |
import ddext
def init():
ddext.input('doc_id', 'text')
ddext.input('sent_id', 'int')
ddext.input('words', 'text[]')
ddext.input('lemmas', 'text[]')
ddext.input('poses', 'text[]')
ddext.input('ners', 'text[]')
ddext.input('dep_paths', 'text[]')
ddext.input('dep_parents', 'int[]')
ddext.input('wordidxs', 'int[]')
ddext.input('relation_id', 'text')
ddext.input('wordidxs_1', 'int[]')
ddext.input('wordidxs_2', 'int[]')
ddext.returns('doc_id', 'text')
ddext.returns('relation_id', 'text')
ddext.returns('feature', 'text')
def run(doc_id, sent_id, words, lemmas, poses, ners, dep_paths, dep_parents, wordidxs, relation_id, wordidxs_1, wordidxs_2):
try:
import ddlib
except:
import os
DD_HOME = os.environ['DEEPDIVE_HOME']
from sys import path
path.append('%s/ddlib' % DD_HOME)
import ddlib
obj = dict()
obj['lemma'] = []
obj['words'] = []
obj['ner'] = []
obj['pos'] = []
obj['dep_graph'] = []
for i in xrange(len(words)):
obj['lemma'].append(lemmas[i])
obj['words'].append(words[i])
obj['ner'].append(ners[i])
obj['pos'].append(poses[i])
obj['dep_graph'].append(
str(int(dep_parents[i])) + "\t" + dep_paths[i] + "\t" + str(i))
word_obj_list = ddlib.unpack_words(
obj, lemma='lemma', pos='pos', ner='ner', words='words', dep_graph='dep_graph')
gene_span = ddlib.get_span(wordidxs_1[0], len(wordidxs_1))
pheno_span = ddlib.get_span(wordidxs_2[0], len(wordidxs_2))
features = set()
for feature in ddlib.get_generic_features_relation(word_obj_list, gene_span, pheno_span):
features.add(feature)
for feature in features:
yield doc_id, relation_id, feature
| dd-genomics-master | xapp/code/pair_features.py |
import ddext
from ddext import SD
def init():
ddext.input('doc_id', 'text')
ddext.input('sent_id_1', 'int')
ddext.input('mention_id_1', 'text')
ddext.input('wordidxs_1', 'int[]')
ddext.input('words_1', 'text[]')
ddext.input('entity_1', 'text')
ddext.input('type_1', 'text')
ddext.input('correct_1', 'boolean')
ddext.input('sent_id_2', 'int')
ddext.input('mention_id_2', 'text')
ddext.input('wordidxs_2', 'int[]')
ddext.input('words_2', 'text[]')
ddext.input('entity_2', 'text')
ddext.input('type_2', 'text')
ddext.input('correct_2', 'boolean')
ddext.returns('doc_id', 'text')
ddext.returns('sent_id_1', 'int')
ddext.returns('sent_id_2', 'int')
ddext.returns('relation_id', 'text')
ddext.returns('type', 'text')
ddext.returns('mention_id_1', 'text')
ddext.returns('mention_id_2', 'text')
ddext.returns('wordidxs_1', 'int[]')
ddext.returns('wordidxs_2', 'int[]')
ddext.returns('words_1', 'text[]')
ddext.returns('words_2', 'text[]')
ddext.returns('entity_1', 'text')
ddext.returns('entity_2', 'text')
ddext.returns('is_correct', 'boolean')
def run(doc_id, sent_id_1, mention_id_1, wordidxs_1, words_1, entity_1, mtype_1, correct_1, sent_id_2, mention_id_2, wordidxs_2, words_2, entity_2, mtype_2, correct_2):
if 'pos_pairs' in SD:
pos_pairs = SD['pos_pairs']
else:
import os
APP_HOME = os.environ['GDD_HOME']
pos_pairs = set()
gpheno = [x.strip().split('\t') for x in open('%s/onto/data/hpo_phenotype_genes.tsv' % APP_HOME)]
gdisease = [x.strip().split('\t') for x in open('%s/onto/data/hpo_disease_genes.tsv' % APP_HOME)]
for pheno, gene in gpheno + gdisease:
pos_pairs.add((gene, pheno))
SD['pos_pairs'] = pos_pairs
rid = '%s_%s_g%s_p%s' % (doc_id, sent_id_1,
'%d:%d' % (wordidxs_1[0], wordidxs_1[-1]),
'%d:%d' % (wordidxs_2[0], wordidxs_2[-1]),
)
truth = None
if correct_1 and correct_2:
gene = entity_1
for pheno in entity_2.split()[0].split('|'):
if (gene, pheno) in pos_pairs:
truth = True
elif correct_1 is False or correct_2 is False:
truth = False
yield (doc_id,
sent_id_1,
sent_id_2,
rid,
None,
mention_id_1,
mention_id_2,
wordidxs_1,
wordidxs_2,
words_1,
words_2,
entity_1,
entity_2,
truth
)
| dd-genomics-master | xapp/code/gene_pheno_pairs.py |
import ddext
def init():
ddext.input('doc_id', 'text')
ddext.input('sent_id', 'int')
ddext.input('words', 'text[]')
ddext.input('lemmas', 'text[]')
ddext.input('poses', 'text[]')
ddext.input('ners', 'text[]')
ddext.input('dep_paths', 'text[]')
ddext.input('dep_parents', 'int[]')
ddext.input('mention_id', 'text')
ddext.input('wordidxs', 'int[]')
ddext.returns('doc_id', 'text')
ddext.returns('mention_id', 'text')
ddext.returns('feature', 'text')
def run(doc_id, sent_id, words, lemmas, poses, ners, dep_paths, dep_parents, mention_id, wordidxs):
try:
import ddlib
except:
import os
DD_HOME = os.environ['DEEPDIVE_HOME']
from sys import path
path.append('%s/ddlib' % DD_HOME)
import ddlib
def unpack_(begin_char_offsets, end_char_offsets, words, lemmas, poses, ners, dep_parents, dep_paths):
wordobjs = []
for i in range(0, len(words)):
wordobjs.append(ddlib.Word(
begin_char_offset=None,
end_char_offset=None,
word=words[i],
lemma=lemmas[i],
pos=poses[i],
ner='', # NER is noisy on medical docs
dep_par=dep_parents[i],
dep_label=dep_paths[i]))
return wordobjs
begin_char_offsets = None
end_char_offsets = None
sentence = unpack_(begin_char_offsets, end_char_offsets, words, lemmas,
poses, ners, dep_parents, dep_paths)
span = ddlib.Span(begin_word_id=wordidxs[0], length=len(wordidxs))
for feature in ddlib.get_generic_features_mention(sentence, span):
yield doc_id, mention_id, feature
| dd-genomics-master | xapp/code/mention_features.py |
#!/usr/bin/env python
import sys
if len(sys.argv) != 2:
print 'Wrong number of arguments'
print 'USAGE: ./compute_stats_helper.py $label_tsvfile $prediction_tsvfile $confidence'
exit(1)
old_labels_fn = sys.argv[1]
with open(old_labels_fn) as f:
for i, line in enumerate(f):
line = line.split('\t')
doc_id = line[0].strip()
section_id = line[1].strip()
sentence_id = line[2].strip()
gene_idx = line[3].strip().strip('{}')
pheno_idx = line[4].strip().strip('{}')
pheno_idx = pheno_idx.split(',')
pheno_idx_form = '-'.join(pheno_idx)
relation_id = [doc_id, section_id, sentence_id, gene_idx, doc_id, section_id, sentence_id, pheno_idx_form]
relation_id = '_'.join(relation_id)
is_correct = line[5].strip()
labeler = line[6].split('_')[0]
print relation_id+'\t'+is_correct+'\t'+labeler
| dd-genomics-master | labeling/convert_old_gp_labels.py |
#!/usr/bin/env python
import json
import sys
import os.path
# get the labeling version number
version = 0 # in case the file doesn't exist
if os.path.exists('version_labeling'):
with open('version_labeling') as f:
for i, line in enumerate(f):
if i == 0:
version = line[0].strip()
else:
print 'version_labeling file doesn\'t exist'
print 'setting version to 0'
if __name__ == "__main__":
tags_file = sys.argv[1]
labeler = sys.argv[2]
with open(tags_file) as t:
results = json.load(t)
for key in results['by_key']:
if 'is_correct' in results['by_key'][key]:
rv = results['by_key'][key]['is_correct']
is_correct = None
if rv == True:
is_correct = 't'
if rv == False:
is_correct = 'f'
print '%s\t%s\t%s\t%s' % (key, is_correct, labeler,version)
| dd-genomics-master | labeling/extract_gene_labels_from_json.py |
#! /usr/bin/env python
import json
import sys
import os.path
# get the labeling version number
version = 0 # in case the file doesn't exist
if os.path.exists('version_labeling'):
with open('version_labeling') as f:
for i, line in enumerate(f):
if i == 0:
version = line[0].strip()
else:
print 'version_labeling file doesn\'t exist'
print 'setting version to 0'
if __name__ == "__main__":
tags_file = sys.argv[1]
labeler = sys.argv[2]
with open(tags_file) as t:
results = json.load(t)
for key in results['by_key']:
if 'is_correct' in results['by_key'][key]:
rv = results['by_key'][key]['is_correct']
type_value = None
if u'Association' in results['by_key'][key] and results['by_key'][key]['Association'] == True:
type_value = None
rv = False
elif u'Causation' in results['by_key'][key] and results['by_key'][key]['Causation']:
type_value = 'causation'
elif u'association' in results['by_key'][key] and results['by_key'][key]['association'] == True:
type_value = None
rv = False
elif u'causation' in results['by_key'][key] and results['by_key'][key]['causation'] == True:
type_value = 'causation'
is_correct = None
if rv == True:
is_correct = 't'
if rv == False:
is_correct = 'f'
if is_correct == 't':
if type_value is not None:
print '%s\t%s\t%s\t%s' % (key, is_correct, labeler, version)
elif is_correct == 'f':
print '%s\t%s\t%s\t%s' % (key, is_correct, labeler, version)
| dd-genomics-master | labeling/extract_genepheno_causation_labels_from_json.py |
#!/usr/bin/env python
import json
import sys
import os.path
# get the labeling version number
version = 0 # in case the file doesn't exist
if os.path.exists('version_labeling'):
with open('version_labeling') as f:
for i, line in enumerate(f):
if i == 0:
version = line[0].strip()
else:
print 'version_labeling file doesn\'t exist'
print 'setting version to 0'
if __name__ == "__main__":
tags_file = sys.argv[1]
labeler = sys.argv[2]
with open(tags_file) as t:
results = json.load(t)
for key in results['by_key']:
if 'is_correct' in results['by_key'][key]:
rv = results['by_key'][key]['is_correct']
is_correct = None
if rv == True:
is_correct = 't'
if rv == False:
is_correct = 'f'
print '%s\t%s\t%s\t%s' % (key, is_correct, labeler,version)
| dd-genomics-master | labeling/extract_pheno_labels_from_json.py |
#!/usr/bin/env python
import sys
if len(sys.argv) != 2:
print 'Wrong number of arguments'
print 'USAGE: ./compute_stats_helper.py $label_tsvfile $prediction_tsvfile $confidence'
exit(1)
old_labels_fn = sys.argv[1]
with open(old_labels_fn) as f:
for i, line in enumerate(f):
line = line.split('\t')
doc_id = line[0].strip()
section_id = line[1].strip()
sentence_id = line[2].strip()
gene_idx = line[3].strip().strip('{}')
pheno_idx = line[4].strip().strip('{}')
pheno_idx = pheno_idx.split(',')
pheno_idx_form = '-'.join(pheno_idx)
relation_id = [doc_id, section_id, sentence_id, gene_idx, doc_id, section_id, sentence_id, pheno_idx_form]
relation_id = '_'.join(relation_id)
is_correct = line[5].strip()
labeler = line[6].split('_')[0]
print relation_id+'\t'+is_correct+'\t'+labeler
| dd-genomics-master | labeling/convert_old_g_labels.py |
#! /usr/bin/env python
import json
import sys
import os.path
# get the labeling version number
version = 0 # in case the file doesn't exist
if os.path.exists('version_labeling'):
with open('version_labeling') as f:
for i, line in enumerate(f):
if i == 0:
version = line[0].strip()
else:
print 'version_labeling file doesn\'t exist'
print 'setting version to 0'
if __name__ == "__main__":
tags_file = sys.argv[1]
labeler = sys.argv[2]
with open(tags_file) as t:
results = json.load(t)
for key in results['by_key']:
if 'is_correct' in results['by_key'][key]:
rv = results['by_key'][key]['is_correct']
type_value = None
if u'Causation' in results['by_key'][key] and results['by_key'][key]['Causation'] == True:
type_value = None
rv = False
elif u'Association' in results['by_key'][key] and results['by_key'][key]['Association']:
type_value = 'association'
elif u'causation' in results['by_key'][key] and results['by_key'][key]['causation'] == True:
type_value = None
rv = False
elif u'association' in results['by_key'][key] and results['by_key'][key]['association'] == True:
type_value = 'association'
is_correct = None
if rv == True:
is_correct = 't'
if rv == False:
is_correct = 'f'
if is_correct == 't':
if type_value is not None:
print '%s\t%s\t%s\t%s' % (key, is_correct, labeler, version)
elif is_correct == 'f':
print '%s\t%s\t%s\t%s' % (key, is_correct, labeler, version)
| dd-genomics-master | labeling/extract_genepheno_association_labels_from_json.py |
#!/usr/bin/env python
import extractor_util as util
from collections import namedtuple
import os
import ddlib
import config
import sys
import re
parser = util.RowParser([
('relation_id', 'text'),
('doc_id', 'text'),
('section_id', 'text'),
('sent_id', 'int'),
('gene_mention_id', 'text'),
('gene_wordidxs', 'int[]'),
('pheno_mention_id', 'text'),
('pheno_wordidxs', 'int[]'),
('words', 'text[]'),
('lemmas', 'text[]'),
('poses', 'text[]'),
('ners', 'text[]'),
('dep_paths', 'text[]'),
('dep_parents', 'int[]')])
fr = config.GENE_PHENO['F']
Feature = namedtuple('Feature', ['doc_id', 'section_id', 'relation_id', 'name'])
bad_features = ['STARTS_WITH_CAPITAL_.*', 'NGRAM_1_\[to\]',
'NGRAM_1_\[a\]','NGRAM_1_\[and\]', 'NGRAM_1_\[in\]', 'IS_INVERTED',
'NGRAM_1_\[or\]', 'NGRAM_1_\[be\]', 'NGRAM_1_\[with\]',
'NGRAM_1_\[have\]', 'NER_SEQ_\[[O ]*\]$', 'W_NER_L_1_R_1_[\[\]O_]*$',
'LENGTHS_[0_1]', 'W_NER_L_[0-9]_R_[0-9]_[\[\] O_]*$',
'LENGTHS_\[[0-9]_[0-9]\]', 'NGRAM_2_\[have be\]', 'NGRAM_1_\[_\]',
'NGRAM_1_\[for\]', 'NGRAM_1_\[cause\]', 'KW_IND_\[cause\]',
'NGRAM_1_\[responsible\]', 'INV_KW_IND_\[inheritance\]',
'NGRAM_1_\[patient\]', 'KW_IND_\[mutation\]', 'NGRAM_2_\[cause of\]',
'NGRAM_1_\[result\]', 'NGRAM_2_\[result in\]', 'NGRAM_2_\[to cause\]',
'NER_SEQ_.*', 'NGRAM_1_\[of\]', 'NGRAM_2_\[as a\]', 'NGRAM_1_\[as\]',
'KW_IND_.*', 'IN_DICT_.*'
]
inv_bad_features = []
for f in bad_features:
inv_bad_features.append('INV_' + f)
bad_features.extend(inv_bad_features)
def create_ners_between(gene_wordidxs, pheno_wordidxs, ners):
if gene_wordidxs[0] < pheno_wordidxs[0]:
start = max(gene_wordidxs) + 1
end = min(pheno_wordidxs) - 1
else:
start = max(pheno_wordidxs) + 1
end = min(gene_wordidxs) - 1
prefix = 'NERS_BETWEEN_'
nonnull_ners = []
for i in xrange(start, end + 1):
ner = ners[i]
if ner != 'O':
nonnull_ners.append('[' + ner + ']')
rv = prefix + '_'.join(nonnull_ners)
return [rv]
non_alnum = re.compile('[\W_]+')
def take_feature(feat):
take = True
for bad_feature_pattern in bad_features:
# warning, match matches only from start of string
if re.match(bad_feature_pattern, feat):
take = False
break
return take
def get_sublists(lst, min_len):
for length in xrange(min_len, len(lst)):
for i in xrange(len(lst) - length + 1):
yield lst[i:i + length]
def get_my_window_features(row, l, r, prefix):
if r - l > 0:
words = [row.words[i].lower() for i in xrange(l, r+1)]
lemmas = [row.lemmas[i] for i in xrange(l, r+1)]
#ners = [row.ners[i] for i in xrange(l, r+1)]
for sublist in get_sublists(words, 2):
yield prefix + 'WORD_[' + '_'.join(sublist) + ']'
for sublist in get_sublists(lemmas, 2):
yield prefix + 'LEMMA_[' + '_'.join(sublist) + ']'
#for sublist in get_sublists(ners):
# yield left + 'NER_[' + '_'.join(sublist) + ']'
def get_features_around(row, wordidxs, prefix):
l1 = max(0, min(wordidxs) - 4)
r1 = max(0, min(wordidxs) - 1)
for feat in get_my_window_features(row, l1, r1, prefix + 'L_'):
yield feat
l2 = min(len(row.words) - 1, max(wordidxs) + 1)
r2 = min(len(row.words) - 1, max(wordidxs) + 4)
for feat in get_my_window_features(row, l2, r2, prefix + 'R_'):
yield feat
def get_cross_features_in(row, wordidxs1, wordidxs2, prefix):
if max(wordidxs1) <= min(wordidxs2):
l_wordidxs = wordidxs1
r_wordidxs = wordidxs2
else:
l_wordidxs = wordidxs2
r_wordidxs = wordidxs1
l = max(l_wordidxs) + 1
r = min(r_wordidxs) - 1
for i in xrange(l+1, r):
words1 = [row.words[j].lower() for j in xrange(l, i)]
lemmas1 = [row.lemmas[j] for j in xrange(l, i)]
words2 = [row.words[j].lower() for j in xrange(i, r+1)]
lemmas2 = [row.lemmas[j] for j in xrange(i, r+1)]
for sublist1 in get_sublists(words1, 1):
for sublist2 in get_sublists(words2, 1):
yield prefix + 'WORD_[' + '_'.join(sublist1) + ']_[' + '_'.join(sublist2) + ']'
for sublist1 in get_sublists(lemmas1, 1):
for sublist2 in get_sublists(lemmas2, 1):
yield prefix + 'LEMMA_[' + '_'.join(sublist1) + ']_[' + '_'.join(sublist2) + ']'
def get_custom_features(row, dds):
phrase = ' '.join(row.words)
lemma_phrase = ' '.join(row.lemmas)
global_sentence_patterns = fr['global-sent-words']
for p in global_sentence_patterns:
if re.findall(p, phrase) or re.findall(p, lemma_phrase):
yield 'GLOB_SENT_PATTERN_%s' % (non_alnum.sub('_', p))
for feat in get_features_around(row, row.gene_wordidxs, 'GENE_'):
yield feat
for feat in get_features_around(row, row.pheno_wordidxs, 'PHENO_'):
yield feat
# for feat in get_cross_features_in(row, row.gene_wordidxs, row.pheno_wordidxs, 'CROSS_'):
# yield feat
def get_features_for_candidate(row):
"""Extract features for candidate mention- both generic ones from ddlib & custom features"""
features = []
f = Feature(doc_id=row.doc_id, section_id=row.section_id, relation_id=row.relation_id, name=None)
dds = util.create_ddlib_sentence(row)
# (1) GENERIC FEATURES from ddlib
gene_span = ddlib.Span(begin_word_id=row.gene_wordidxs[0], length=len(row.gene_wordidxs))
pheno_span = ddlib.Span(begin_word_id=row.pheno_wordidxs[0], length=len(row.pheno_wordidxs))
for feat in ddlib.get_generic_features_relation(dds, gene_span, pheno_span):
if take_feature(feat):
features.append(f._replace(name=feat))
features.extend([f._replace(name=feat) for feat in get_custom_features(row, dds)])
# these seem to be hurting (?)
# start_span = ddlib.Span(begin_word_id=0, length=4)
# for feat in ddlib.get_generic_features_mention(dds, start_span, length_bin_size=2):
# features.append(f._replace(name='START_SENT_%s' % feat))
# WITH these custom features, I get a little LESS precision and a little MORE recall (!)
# features += [f._replace(name=feat) for feat in create_ners_between(row.gene_wordidxs, row.pheno_wordidxs, row.ners)]
return features
# Helper for loading in manually defined keywords
onto_path = lambda p : '%s/onto/%s' % (os.environ['GDD_HOME'], p)
if __name__ == '__main__':
ddlib.load_dictionary_map(fr['synonyms'])
util.run_main_tsv(row_parser=parser.parse_tsv_row, row_fn=get_features_for_candidate)
| dd-genomics-master | code/genepheno_extract_features.py |
#!/usr/bin/env python
import extractor_util as util
from collections import namedtuple
import os
import sys
import ddlib
import config
# This defines the Row object that we read in to the extractor
parser = util.RowParser([
('doc_id', 'text'),
('section_id', 'text'),
('sent_id', 'int'),
('words', 'text[]'),
('lemmas', 'text[]'),
('poses', 'text[]'),
('ners', 'text[]'),
('dep_paths', 'text[]'),
('dep_parents', 'int[]'),
('mention_id', 'text'),
('mention_wordidxs', 'int[]')])
OPTS = config.VARIANT['F']
def get_features_for_candidate(row):
"""Extract features for candidate mention- both generic ones from ddlib & custom features"""
features = []
dds = util.create_ddlib_sentence(row)
# (1) GENERIC FEATURES from ddlib
span = ddlib.Span(begin_word_id=row.mention_wordidxs[0], length=len(row.mention_wordidxs))
features += [(row.doc_id, row.section_id, row.mention_id, feat) \
for feat in ddlib.get_generic_features_mention(dds, span)]
# (2) Add the closest verb by raw distance
if OPTS.get('closest-verb'):
verb_idxs = [i for i,p in enumerate(row.poses) if p.startswith("VB")]
if len(verb_idxs) > 0:
dists = filter(lambda d : d[0] > 0, \
[(min([abs(i-j) for j in row.mention_wordidxs]), i) for i in verb_idxs])
if len(dists) > 0:
verb = row.lemmas[min(dists)[1]]
features.append((row.doc_id, row.section_id, row.mention_id, 'NEAREST_VERB_[%s]' % (verb,)))
return features
# Load in manually defined keywords
onto_path = lambda p : '%s/onto/%s' % (os.environ['GDD_HOME'], p)
if __name__ == '__main__':
# XXX TODO Johannes: let's look into keywords for variants, maybe
# if OPTS.get('sentence-kws'):
# ddlib.load_dictionary(onto_path('manual/pheno_sentence_keywords.tsv'), dict_id='pheno_kws')
util.run_main_tsv(row_parser=parser.parse_tsv_row, row_fn=get_features_for_candidate)
| dd-genomics-master | code/variant_extract_features.py |
import collections
import extractor_util as util
import data_util as dutil
import dep_util as deps
import random
import re
import sys
# This defines the Row object that we read in to the extractor
parser = util.RowParser([
('relation_id', 'text'),
('doc_id', 'text'),
('section_id', 'text'),
('sent_id', 'int'),
('gene_mention_id', 'text'),
('gene_name', 'text'),
('gene_wordidxs', 'int[]'),
('gene_is_correct', 'boolean'),
('pheno_mention_id', 'text'),
('pheno_entity', 'text'),
('pheno_wordidxs', 'int[]'),
('pheno_is_correct', 'boolean'),
('words', 'text[]'),
('lemmas', 'text[]'),
('poses', 'text[]'),
('ners', 'text[]'),
('dep_paths', 'text[]'),
('dep_parents', 'int[]')])
# This defines the output Relation object
Relation = collections.namedtuple('Relation', [
'dd_id',
'relation_id',
'doc_id',
'section_id',
'sent_id',
'gene_mention_id',
'gene_name',
'gene_wordidxs',
'pheno_mention_id',
'pheno_entity',
'pheno_wordidxs',
'is_correct',
'relation_supertype',
'relation_subtype'])
HPO_DAG = dutil.read_hpo_dag()
def replace_opts(opts, replaceList):
ret = {}
for name in opts:
strings = opts[name]
for (pattern, subst) in replaceList:
if name.endswith('rgx'):
subst = re.escape(subst)
strings = [s.replace(pattern, subst) for s in strings]
ret[name] = strings
return ret
def read_supervision():
"""Reads genepheno supervision data (from charite)."""
supervision_pairs = set()
with open('%s/onto/manual/charite_supervision.tsv' % util.APP_HOME) as f:
for line in f:
hpo_id, gene_name = line.strip().split('\t')
if hpo_id in HPO_DAG.edges:
hpo_ids = [hpo_id] + [parent for parent in HPO_DAG.edges[hpo_id]]
else:
hpo_ids = [hpo_id]
for h in hpo_ids:
supervision_pairs.add((h, gene_name))
return supervision_pairs
# count_g_or_p_false_none = 0
# count_adjacent_false_none = 0
CACHE = {}
def gp_between(gene_wordidxs, pheno_wordidxs, ners):
if gene_wordidxs[0] < pheno_wordidxs[0]:
start = max(gene_wordidxs) + 1
end = min(pheno_wordidxs) - 1
else:
start = max(pheno_wordidxs) + 1
end = min(gene_wordidxs) - 1
found_g = False
found_p = False
for i in xrange(start, end+1):
ner = ners[i]
if ner == 'NERGENE':
found_g = True
if ner == 'NERPHENO':
found_p = True
return found_g and found_p
charite_pos = 0
between_neg = 0
def config_supervise(r, row, pheno_entity, gene_name, gene, pheno,
phrase, between_phrase, \
lemma_phrase, between_phrase_lemmas, dep_dag, \
dep_path_between, gene_wordidxs,
charite_pairs, charite_allowed, VALS, SR):
global charite_pos
global between_neg
if SR.get('phrases-in-between'):
opts = SR['phrases-in-between']
orig_opts = opts.copy()
opts = replace_opts(opts, [('{{G}}', gene), ('{{P}}', pheno)])
for name, val in VALS:
if len(opts[name]) + len(opts['%s-rgx' % name]) > 0:
match = util.rgx_mult_search(between_phrase, opts[name], opts['%s-rgx' % name], orig_opts[name],
orig_opts['%s-rgx' % name], flags=re.I)
if match:
return r._replace(is_correct=val, relation_supertype='PHRASE_BETWEEN_%s' % name, relation_subtype=non_alnum.sub('_', match))
match = util.rgx_mult_search(between_phrase_lemmas, opts[name], opts['%s-rgx' % name], orig_opts[name],
orig_opts['%s-rgx' % name], flags=re.I)
if match:
return r._replace(is_correct=val, relation_supertype='PHRASE_BETWEEN_%s' % name, relation_subtype=non_alnum.sub('_', match))
if SR.get('phrases-in-sent'):
opts = SR['phrases-in-sent']
orig_opts = opts.copy()
opts = replace_opts(opts, [('{{G}}', gene), ('{{P}}', pheno)])
for name, val in VALS:
if len(opts[name]) + len(opts['%s-rgx' % name]) > 0:
match = util.rgx_mult_search(phrase, opts[name], opts['%s-rgx' % name], orig_opts[name],
orig_opts['%s-rgx' % name], flags=re.I)
if match:
return r._replace(is_correct=val, relation_supertype='PHRASE_%s' % name, relation_subtype=non_alnum.sub('_', match))
match = util.rgx_mult_search(lemma_phrase, opts[name], opts['%s-rgx' % name], orig_opts[name],
orig_opts['%s-rgx' % name], flags=re.I)
if match:
return r._replace(is_correct=val, relation_supertype='PHRASE_%s' % name, relation_subtype=non_alnum.sub('_', match))
if SR.get('primary-verb-modifiers') and dep_dag:
opts = SR['primary-verb-modifiers']
if dep_path_between:
verbs_between = [i for i in dep_path_between if row.poses[i].startswith("VB")]
if len(verbs_between) > 0:
for name, val in VALS:
mod_words = [i for i, x in enumerate(row.lemmas) if x in opts[name]]
mod_words += [i for i, x in enumerate(row.dep_paths) if x in opts['%s-dep-tag' % name]]
d = dep_dag.path_len_sets(verbs_between, mod_words)
if d and d < opts['max-dist'] + 1:
subtype = 'ModWords: ' + ' '.join([str(m) for m in mod_words]) + ', VerbsBetween: ' + ' '.join([str(m) for m in verbs_between]) + ', d: ' + str(d)
return r._replace(is_correct=val, relation_supertype='PRIMARY_VB_MOD_%s' % name, relation_subtype=non_alnum.sub('_', subtype))
if SR.get('dep-lemma-connectors') and dep_dag:
opts = SR['dep-lemma-connectors']
for name, val in VALS:
if dep_path_between:
connectors = [i for i, x in enumerate(row.lemmas) \
if i in dep_path_between and x in opts[name]]
if len(connectors) > 0:
return r._replace(is_correct=val,
relation_supertype='DEP_LEMMA_CONNECT_%s' % name,
relation_subtype=non_alnum.sub('_',
' '.join([str(x) for x in connectors])))
if SR.get('dep-lemma-neighbors') and dep_dag:
opts = SR['dep-lemma-neighbors']
for name, val in VALS:
for entity in ['g', 'p']:
lemmas = [i for i, x in enumerate(row.lemmas) if x in opts['%s-%s' % (name, entity)]]
d = dep_dag.path_len_sets(gene_wordidxs, lemmas)
if d and d < opts['max-dist'] + 1:
subtype = ' '.join([str(l) for l in lemmas]) + ', d: ' + str(d)
return r._replace(is_correct=val,
relation_supertype='DEP_LEMMA_NB_%s_%s' % (name, entity),
relation_subtype=non_alnum.sub('_', subtype))
if ('neg', False) in VALS:
if gp_between(row.gene_wordidxs, row.pheno_wordidxs, row.ners):
return r._replace(is_correct=False, relation_supertype='NEG_GP_BETWEEN')
if charite_allowed:
if SR.get('charite-all-pos-words'):
opts = SR['charite-all-pos-words']
match = util.rgx_mult_search(phrase + ' ' +
lemma_phrase, [],
opts, [], opts, flags=re.I)
if match and (pheno_entity, gene_name) in charite_pairs:
if not gp_between(row.gene_wordidxs, row.pheno_wordidxs, row.ners):
charite_pos += 1
return r._replace(is_correct=True, relation_supertype='CHARITE_SUP_WORDS',
relation_subtype=non_alnum.sub('_', match))
else:
return r._replace(is_correct=False, relation_supertype='CHARITE_NEG_GP_BETWEEN',
relation_subtype=non_alnum.sub('_', match))
return None
non_alnum = re.compile('[\W_]+')
def create_supervised_relation(row, superv_diff, SR, HF, charite_pairs, charite_allowed):
"""
Given a Row object with a sentence and several gene and pheno objects, create and
supervise a Relation output object for the ith gene and jth pheno objects
Note: outputs a list for convenience
Also includes an input for d = pos - neg supervision count, for neg supervision
"""
gene_mention_id = row.gene_mention_id
gene_name = row.gene_name
gene_wordidxs = row.gene_wordidxs
gene_is_correct = row.gene_is_correct
pheno_mention_id = row.pheno_mention_id
pheno_entity = row.pheno_entity
pheno_wordidxs = row.pheno_wordidxs
pheno_is_correct = row.pheno_is_correct
gene = row.gene_name
pheno = ' '.join([row.words[i] for i in row.pheno_wordidxs])
sv_synonyms = SR['sv_synonyms']
phrase = ' '.join(row.words)
lemma_phrase = ' '.join(row.lemmas)
b = sorted([gene_wordidxs[0], gene_wordidxs[-1], pheno_wordidxs[0], pheno_wordidxs[-1]])[1:-1]
assert b[0] + 1 < len(row.words), str((b[0] + 1, len(row.words), row.doc_id, row.section_id, row.sent_id, str(row.words)))
assert b[1] < len(row.words), str((b[1], len(row.words), row.doc_id, row.section_id, row.sent_id, str(row.words)))
between_phrase = ' '.join(row.words[i] for i in range(b[0] + 1, b[1]))
between_phrase_lemmas = ' '.join(row.lemmas[i] for i in range(b[0] + 1, b[1]))
# Create a dependencies DAG for the sentence
dep_dag = deps.DepPathDAG(row.dep_parents, row.dep_paths, row.words, max_path_len=HF['max-dep-path-dist'])
relation_id = '%s_%s' % (gene_mention_id, pheno_mention_id)
r = Relation(None, relation_id, row.doc_id, row.section_id, row.sent_id, gene_mention_id, gene_name, \
gene_wordidxs, pheno_mention_id, pheno_entity, pheno_wordidxs, None, None, None)
path_len_sets = dep_dag.path_len_sets(gene_wordidxs, pheno_wordidxs)
if not path_len_sets:
if SR.get('bad-dep-paths'):
return r._replace(is_correct=False, relation_supertype='BAD_OR_NO_DEP_PATH')
else:
return None
dep_path_between = frozenset(dep_dag.min_path_sets(gene_wordidxs, pheno_wordidxs)) if dep_dag else None
# distant supervision rules & hyperparameters
# NOTE: see config.py for all documentation & values
# global count_g_or_p_false_none
# global count_adjacent_false_none
if SR.get('g-or-p-false'):
opts = SR['g-or-p-false']
# The following line looks like it was written by a prosimian, but it is actually correct.
# Do not mess with the logic unless you know what you're doing.
# (Consider that Boolean variables can and will take the value ``None'' in this language.)
"""The above comment might be necessary for a Eukaryota, otherwise hopefully the below is self-explanatory"""
if gene_is_correct == False or pheno_is_correct == False:
if random.random() < opts['diff'] * superv_diff or random.random() < opts['rand']:
return r._replace(is_correct=False, relation_supertype='G_ANDOR_P_FALSE', relation_subtype='gene_is_correct: %s, pheno_is_correct: %s' % (gene_is_correct, pheno_is_correct))
else:
# count_g_or_p_false_none += 1
return None
if SR.get('adjacent-false'):
if re.search(r'[a-z]{3,}', between_phrase, flags=re.I) is None:
if random.random() < 0.5 * superv_diff or random.random() < 0.01:
st = non_alnum.sub('_', between_phrase)
return r._replace(is_correct=False, relation_supertype='G_P_ADJACENT', relation_subtype=st)
else:
# count_adjacent_false_none += 1
return None
gene_name = row.gene_name
VALS = [('neg', False)]
rv = config_supervise(r, row, pheno_entity, gene_name, gene, pheno,
phrase, between_phrase, \
lemma_phrase, between_phrase_lemmas, dep_dag, \
dep_path_between, gene_wordidxs,
charite_pairs, charite_allowed, VALS, SR)
if rv is not None:
return rv
VALS = [('pos', True)]
rv = config_supervise(r, row, pheno_entity, gene_name, gene, pheno,
phrase, between_phrase, \
lemma_phrase, between_phrase_lemmas, dep_dag, \
dep_path_between, gene_wordidxs,
charite_pairs, charite_allowed, VALS, SR)
if rv is not None:
return rv
# Return GP relation object
return r
def supervise(supervision_rules, hard_filters, charite_allowed):
# print >> sys.stderr, supervision_rules
# generate the mentions, while trying to keep the supervision approx. balanced
# print out right away so we don't bloat memory...
pos_count = 0
neg_count = 0
# load in static data
CACHE['example-trees'] = {}
if charite_allowed:
CHARITE_PAIRS = read_supervision()
else:
CHARITE_PAIRS = []
for line in sys.stdin:
row = parser.parse_tsv_row(line)
relation = create_supervised_relation(row, superv_diff=pos_count - neg_count, SR=supervision_rules, HF=hard_filters,
charite_pairs=CHARITE_PAIRS, charite_allowed=charite_allowed)
if relation:
if relation.is_correct == True:
pos_count += 1
elif relation.is_correct == False:
neg_count += 1
util.print_tsv_output(relation)
# sys.stderr.write('count_g_or_p_false_none: %s\n' % count_g_or_p_false_none)
# sys.stderr.write('count_adjacent_false_none: %s\n' % count_adjacent_false_none)
| dd-genomics-master | code/genepheno_supervision_util.py |
# -*- coding: utf-8 -*-
# CONFIG
# The master configuration file for candidate extraction, distant supervision and feature
# extraction hyperparameters / configurations
import sys
import copy
if sys.version_info < (2, 7):
assert False, "Need Python version 2.7 at least"
BOOL_VALS = [('neg', False), ('pos', True)]
GENE_ACRONYMS = {
'vals' : BOOL_VALS,
# # Features
'F' : {
},
'HF' : {},
'SR' : {
'levenshtein_cutoff' : 0.2
}
}
NON_GENE_ACRONYMS = {
'vals' : BOOL_VALS,
# # Features
'F' : {
},
'HF' : {},
'SR' : {
'levenshtein_cutoff' : 0.2,
'short-words': { 'the', 'and', 'or', 'at', 'in', 'see', 'as',
'an', 'data', 'for', 'not', 'our', 'ie', 'to',
'eg', 'one', 'age', 'on', 'center', 'right', 'left',
'from', 'based', 'total', 'via', 'but', 'resp', 'no' },
'manual-pairs' : { ('FRAXA') : ['fragile X'],
('IL1', 'IL2', 'IL3', 'IL4', 'IL5', 'EL1', 'EL2', 'EL3', 'EL4', 'EL5') :
[ 'intracellular loop', 'extracellular loop'],
('CNF') : ['Finnish type'],
('SRN1') : ['nephrotic', 'segmental'],
('C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'L1',
'L2', 'L3', 'L4', 'L5', 'S1', 'S2', 'S3', 'S4', 'S5') :
['vertebrae', 'spine', 'fusion', 'spina'],
('LCA10') : ['congenital amaurosis'],
('AR-JP') : [ 'parkinsonism' ],
('OCRL') : [ 'oculocerebrorenal syndrome of Lowe' ],
('PPD') : ['pallidopyramidal degeneration'],
('HDR') : ['sensorineural deafness']}
}
}
PHENO_ACRONYMS = {
'vals' : BOOL_VALS,
# # Features
'F' : {
},
'HF' : {},
'SR' : {
'bad-pheno-names': ['MIM'],
'difflib.pheno_cutoff' : 0.8,
'short-words': { 'the', 'and', 'or', 'at', 'in',
'see', 'as', 'an', 'data', 'for', 'not',
'our', 'ie', 'to', 'eg', 'one', 'age',
'on', 'center', 'right', 'left', 'from',
'based', 'total', 'via', 'but', 'resp', 'no' },
'rand-negs': True
},
}
# ## GENE
GENE = {
'vals' : BOOL_VALS,
# # Hard Filters (for candidate extraction)
'HF' : {
# Restricting the ENSEMBL mapping types we consider
# Types: CANONICAL_SYMBOL, NONCANONICAL_SYMBOL, REFSEQ
'ensembl-mapping-types' : ['CANONICAL_SYMBOL', 'NONCANONICAL_SYMBOL', 'ENSEMBL_ID', 'REFSEQ'],
'min-word-len': {
'CANONICAL_SYMBOL' : 2,
'NONCANONICAL_SYMBOL' : 3,
'ENSEMBL_ID' : 3,
'REFSEQ' : 3
},
'require-one-letter': True
},
# # Supervision Rules
'SR' : {
# Label some P mentions based on the toks / phrases that follow
'bad-genes': ['^ANOVA', '^MRI$', '^CO2$', '^gamma$', '^spatial$', '^tau$', '^Men$', \
'^ghrelin$', '^MIM$', '^NHS$', '^STD$', '^hole$', '^SDS$', '^p[0-9][0-9]$',
'^FTDP-17$', '^activin$', '^cbl[A-Z]$', '^LQT[0-9]*$'],
'manual-bad' : { ('FRAXA') : ['fragile X'],
('IL1', 'IL2', 'IL3', 'IL4', 'IL5', 'EL1', 'EL2', 'EL3', 'EL4', 'EL5') :
[ 'intracellular loop', 'extracellular loop'],
('GAA', 'AAA', 'AAG', 'GTA', 'AGA', 'ACT', 'TGT', 'ACT', 'GCG', 'GCA', 'GCT', 'CAT', 'CGA',
'AGT', 'ACG', 'GAT', 'GAA', 'AGT', 'CAC', 'AAT', 'TAT', 'TGC') : ['repeat', 'triplet'],
('TG', 'CG', 'AC', 'GA', 'GC', 'CT', 'TC') : ['repeat'],
('CNF') : ['Finnish type'],
('C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'L1',
'L2', 'L3', 'L4', 'L5', 'S1', 'S2', 'S3', 'S4', 'S5') :
['vertebrae', 'spine', 'fusion', 'spina'],
('LCA10') : ['congenital amaurosis'],
('GAN') : [ 'primer' ],
('AR-JP') : [ 'parkinsonism' ],
('SRN1') : ['nephrotic', 'segmental'],
('OCA1') : ['oculocutaneous albinism'],
('PPD') : ['pallidopyramidal degeneration']},
'post-neighbor-match' : {
# 'pos' : ['_ mutation', 'mutation', '_ mutations', 'mutations', 'mutant', \
# 'mutants', 'gene', 'exon', 'residue', 'residues', 'coding', \
# 'isoform', 'isoforms', 'deletion', 'mRNA', 'homozyous'],
'pos': ['gene'],
'neg' : ['+', 'pathway', 'inhibitor', 'inhibitors', 'cell', 'cells', 'syndrome', 'domain'],
'pos-rgx' : [],
# can't copy the lt-equal sign from anywhere now, it should be down there as well
'neg-rgx' : [r'cell(s|\slines?)', '< \d+', '<= \d+', 'β₯ \d+', '> \d+', '>= \d+']
},
'pre-neighbor-match' : {
'pos' : ['gene', 'mutations in'],
'neg' : ['encoding'],
'pos-rgx': [],
'neg-rgx': []
},
'neighbor-match': {
'pos' : ['mutations'],
'neg' : ['protein'],
'pos-rgx': [],
'neg-rgx': []
},
'phrases-in-sent': {
'pos' : [],
'neg' : ['serum', 'level', 'elevated', 'plasma',
'accumulate', 'accumulation', 'deposition'],
'pos-rgx': [],
'neg-rgx': []
},
'pubmed-paper-genes-true' : True,
'complicated-gene-names-true': True,
# all canonical and noncanonical
'all-symbols-true': False,
'all-canonical-true': True,
'rand-negs': True
},
# # Features
'F' : {
}
}
# ## VARIANT
VARIANT = {
'vals' : BOOL_VALS,
'HF' : {},
'SR' : {},
'F' : {}
}
# ## GENE-VARIANT
GENE_VARIANT = {
'vals' : BOOL_VALS,
'HF' : {}
}
# ## PHENO
PHENO = {
'vals' : BOOL_VALS,
# # Hard Filters (for candidate extraction)
'HF' : {
# Maximum n-gram length to consider
'max-len' : 8,
# Minimum word length to consider as a non-stopword
'min-word-len' : 3,
# Do not extract mentions which contain these toks- e.g. split on these
'split-list' : [',', ';'],
# Also split on above a certain number of consecutive stopwords
'split-max-stops' : 2,
# Consider permuted matches to HPO words
'permuted' : True,
# Consider exact matches with one ommited *interior* word
'omitted-interior' : True,
'rand-negs' : True,
'disallowed-phrases' : ['cancer', 'carcinoma']
},
# # Supervision Rules
'SR' : {
'bad-pheno-names': [],
'bad-phenos': ['HP:0001677', 'HP:0002092', 'HP:0100753', 'HP:0002511'],
# Label some P mentions based on the toks / phrases that follow
'post-match' : {
'pos' : [],
'neg' : [],
'pos-rgx' : [],
'neg-rgx' : [r'cell(s|\slines?)']
},
# Supervise with MeSH- optionally consider more specific terms also true (recommended)
'mesh-supervise' : True,
'mesh-specific-true' : True,
# Subsample exact matches which are also english words
'exact-english-word' : {
'p' : 0.1
},
# Get random negative examples which are phrases of consecutive non-stopwords w certain
# POS tags
'rand-negs' : {
'pos-tag-rgx' : r'NN.?|JJ.?'
}
},
# # Features
'F' : {
# Add the closest verb by raw distance
'closest-verb' : True,
# Incorporate user keyword dictionaries via DDLIB
'sentence-kws' : True
}
}
# ## GENE-PHENO
GENE_PHENO = {
# # Hard Filters (for candidate extraction)
'HF' : {
# Upper-bound the max min-dependency-path length between G and P
'max-dep-path-dist' : 20,
},
'vals' : BOOL_VALS,
'SR': {
# Whether to include GP pairs with no or long dep path links as neg. supervision (vs. skip)
'bad-dep-paths' : False,
# Subsample GP pairs where G and/or P is neg. example as neg. GP supervision
'g-or-p-false' : {'diff' : 0, 'rand' : 0},
# Supervise G adjacent to P as false
'adjacent-false' : True,
# Supervise as T/F based on phrases (exact or regex) only between the G and P mentions
'phrases-in-between' : False,
# Try to find the verb connecting the G and P, and supervise based on modifiers
# (e.g. negation, etc) of this verb
'primary-verb-modifiers' : {
'max-dist' : 1,
'pos' : [],
'neg' : [ # 'might'
],
'pos-dep-tag' : [],
'neg-dep-tag' : ['neg']
},
# Supervise GP pairs as T/F based on dependency-path neighbor lemmas of G and P
'dep-lemma-neighbors' : {
'max-dist' : 1,
'pos-g' : [],
'pos-p' : [],
'neg-g' : ['express',
'expression',
'coexpression',
'coexpress',
'co-expression',
'co-express',
'overexpress',
'overexpression',
'over-expression',
'over-express',
'somatic',
'infection',
'interacts',
'regulate',
'up-regulate',
'upregulate',
'down-regulate',
'downregulate',
'production',
'product',
'increased',
'increase',
'increas',
'exclude',
'inclusion',
],
'neg-p' : ['without', 'except']
},
# Label T all GP pairs in Charite dataset (and that haven't already been labeled T/F)
'charite-all-pos-words': ['(disrupt|mutat|delet|duplicat|truncat|SNP).*(caus|responsible for)',
'{{P}}.*secondary to.*{{G}}',
'identified.*(mutat|delet|duplicat|truncat|SNP).*{{G}}.*{{P}}',
'mutations.*{{G}}.*reported.*{{P}}',
'identified.*{{G}}.*(mutat|delet|duplicat|truncat|SNP).*{{P}}',
'{{P}}.*result.*from.*{{G}}',
'{{P}}.*caused by.*{{G}}',
'{{G}}.*result.*in.*{{P}}',
'{{G}}.*presenting.*{{P}}',
'{{G}}.*characterized.*by{{P}}',
'{{G}}.*patients.*with.*{{P}}',
'{{P}}.*due to.*{{G}}'],
# Supervise GP pairs based on words (e.g. esp verbs) on the min dep path connecting them
'dep-lemma-connectors' : {
'pos' : [],
'neg' : []
},
'phrases-in-sent' : {
'pos' : [],
'neg' : ['possible association',
'to investigate',
'could reveal',
'to determine',
'could not determine',
'unclear',
'hypothesize',
'to evaluate',
'plasma',
'expression',
'to detect',
'mouse',
'mice',
'to find out',
'inconclusive',
'further analysis',
'but not',
#'deficiency',
'activity',
'unravel',
'fish',
'sheep',
'cattle',
'dachshund',
'plant',
'algorithm',
'odds ratio',
],
'pos-rgx' : [],
'neg-rgx' : [
'{{P}}.*not.*cause.*{{G}}',
'{{G}}.*not.*cause.*{{P}}',
'\?\s*$',
'to determine',
'to evaluate',
'to investigate',
'we investigated',
'we examined',
'were examined',
'to examine',
'to test',
'to clarify',
'we requested',
'to study',
'indicating that',
'analysis.*was performed',
'\d+ h ',
'to assess',
'^\s*here we define',
'whether',
'unlikely.*{{G}}.*{{P}}',
'{{P}}.*not due to.*{{G}}',
'{{G}}.*unlikely.*cause.*{{P}}',
'{{P}}.*unlikely.*cause.*{{G}}',
'{{G}}.*excluded.*cause.*{{P}}',
'{{P}}.*excluded.*cause.*{{G}}',
'{{G}}.*can.*mimic.*{{P}}',
'{{G}}.*linked to.*{{P}}',
'attracted.*interest',
'{{P}}.*, while.*{{G}}',
'{{P}}.*, whereas.*{{G}}',
'{{G}}.*, while.*{{P}}',
'{{G}}.*, whereas.*{{P}}',
'{{G}}.*proposed.*{{P}}',
'target',
'{{G}}.*to determine.*{{P}}',
'{{G}}.*caus.*deregulation.*{{P}}',
'{{P}}.*caus.*deregulation.*{{G}}',
'dysregulation of.*{{G}}',
'{{G}}.*modifier of.*gene.*{{P}}',
'{{G}}.*except.*{{P}}',
'{{G}}.*excluded.*cause.*{{P}}',
'{{G}}.*should.*considered.*{{P}}',
'{{G}} deficiency',
'upregulation of.*{{G}}',
'without {{P}}',
]
},
'example-sentences': {
'pos': [],
'neg': []
},
'sv_synonyms': {'disease': set(['disease', 'disorder']),
'mutation': set(['mutat', 'polymorphism', 'delet', 'duplicat',
'truncat', 'SNP']),
'patient': set(['case', 'patient', 'subject', 'family', 'boy', 'girl']),
'present': set(['present', 'display', 'characterize']),
'mut_type': set(['nonsense', 'missense', 'frameshift']),
'identify': set(['identify', 'report', 'find', 'detect']),
'cause': set(['caus', 'result']),
'inheritance': set(['recessive', 'dominant'])},
},
# # Features
'F' : {
'synonyms': {'disease': set(['disease', 'disorder']),
'mutation': set(['mutation', 'missense', 'polymorphism', 'deletion', 'duplication',
'truncation', 'SNP', 'frameshift', 'nonsense']),
'patient': set(['case', 'patient', 'subject', 'family', 'boy', 'girl']),
'present': set(['present', 'display', 'characterize']),
'mut_type': set(['nonsense', 'missense', 'frameshift']),
'identify': set(['identify', 'report', 'find', 'detect']),
'cause': set(['cause', 'result']),
'inheritance': set(['recessive', 'dominant'])},
'global-sent-words' : ['to determine',
'to evaluate',
'to investigate',
'we investigated',
'we examined',
'were examined',
'to examine',
'to test',
'to clarify',
'we requested',
'to study',
'fish',
'sheep',
'cattle',
'dachshund',
'plant',
'mice',
'mouse',
'but not',
],
},
}
CAUSATION_SR = {
'example-sentences': {
'pos': [('onto/manual/true_causation_sentences1.tsv', 18),
('onto/manual/true_causation_sentences2.tsv', 27)],
'neg': []
},
# Supervise as T/F based on phrases (exact or regex) anywhere in sentence
'phrases-in-sent' : {
'pos' : [],
'neg' : [ # 'risk',
'variance',
'gwas',
'association study',
'possible association',
'to investigate',
'could reveal',
'to determine',
'unclear',
'hypothesize',
'to evaluate',
'plasma',
'expression',
'to detect',
'to find out',
'inconclusive',
'further analysis',
'association',
],
'pos-rgx' : ['(disrupt|expan.*repeat|mutat|delet|duplicat|truncat|SNP|polymorphism).*{{G}}.*cause.*{{P}}',
'(disrupt|expan.*repeat|mutat|delet|duplicat|truncat|SNP|polymorphism).*{{G}}.*(in|described).*patient.*{{P}}',
'{{G}}.*(in|described).*patient.*(expan.*repeat|mutat|delet|duplicat|truncat|SNP|polymorphism).*{{P}}',
'{{G}}.*present with.*{{P}}.*',
'(single nucleotide polymorphisms|SNPs) in {{G}}.*cause.*{{P}}',
'(mutation|deletion).*{{G}}.*described.*patients.*{{P}}',
'{{P}}.*secondary to.*{{G}}',
'identified.*mutations.*{{G}}.*{{P}}',
'mutations.*{{G}}.*reported.*{{P}}',
'identified.*{{G}}.*mutations.*{{P}}',
'{{P}}.*consequence of.*{{G}}',
'{{P}}.*caused by.*{{G}}',
'{{P}}.*result.*from.*{{G}}',
'{{P}}.*caused by.*{{G}}',
'{{G}}.*result.*in.*{{P}}',
'{{G}}.*cause.*of.*{{P}}',
],
'neg-rgx' : [
'{{G}}.*associated.*{{P}}',
'{{P}}.*associated.*{{G}}',
'associated.*{{G}}.*with.*{{P}}',
'associated with.*{{P}}'],
},
# Supervise GP pairs based on words (e.g. esp verbs) on the min dep path connecting them
'dep-lemma-connectors' : {
'pos': [],
'neg' : []
},
}
def extend(map1, map2):
rv = {}
for item in map1:
value = map1[item]
if isinstance(value, dict):
if item in map2:
rv[item] = extend(map1[item], map2[item])
else:
rv[item] = map1[item]
else:
rv[item] = map1[item]
if item in map2:
for v in map2[item]:
if v not in rv[item]:
rv[item].append(v)
return rv
GENE_PHENO_CAUSATION = copy.deepcopy(GENE_PHENO)
GENE_PHENO_CAUSATION['SR'] = extend(GENE_PHENO_CAUSATION['SR'], CAUSATION_SR)
# ## GENE-VARIANT-PHENO
GENE_VARIANT_PHENO = {
# # Hard Filters (for candidate extraction)
'HF' : {
# Upper-bound the max min-dependency-path length between G and P
'max-dep-path-dist' : 10,
# Only consider the closest GP pairs for duplicate GP pairs
'take-best-only-dups' : False,
# Only consider the closest GP pairs by dep-path distance such that all G,P are covered
'take-best-only' : False
},
# # Supervision Rules
'SR' : {
# Subsample GP pairs where G and/or P is neg. example as neg. GP supervision
'gv-or-p-false' : {'diff' : 0, 'rand' : 0.01},
# Supervise with ClinVar
'clinvar-sup' : True
},
# # Features
'F' : {}
}
| dd-genomics-master | code/config.py |
#!/usr/bin/env python
import extractor_util as util
from collections import namedtuple
import os
import sys
import ddlib
parser = util.RowParser([
('relation_id', 'text'),
('doc_id', 'text'),
('section_id', 'text'),
('sent_id', 'int'),
('genevar_mention_id', 'text'),
('genevar_wordidxs', 'int[]'),
('pheno_mention_id', 'text'),
('pheno_wordidxs', 'int[]'),
('words', 'text[]'),
('lemmas', 'text[]'),
('poses', 'text[]'),
('ners', 'text[]'),
('dep_paths', 'text[]'),
('dep_parents', 'int[]')])
Feature = namedtuple('Feature', ['doc_id', 'section_id', 'relation_id', 'name'])
def get_features_for_candidate(row):
"""Extract features for candidate mention- both generic ones from ddlib & custom features"""
features = []
f = Feature(doc_id=row.doc_id, section_id=row.section_id, relation_id=row.relation_id, name=None)
dds = util.create_ddlib_sentence(row)
# (1) GENERIC FEATURES from ddlib
genevar_span = ddlib.Span(begin_word_id=row.genevar_wordidxs[0], length=len(row.genevar_wordidxs))
pheno_span = ddlib.Span(begin_word_id=row.pheno_wordidxs[0], length=len(row.pheno_wordidxs))
features += [f._replace(name=feat) \
for feat in ddlib.get_generic_features_relation(dds, genevar_span, pheno_span)]
return features
if __name__ == '__main__':
util.run_main_tsv(row_parser=parser.parse_tsv_row, row_fn=get_features_for_candidate)
| dd-genomics-master | code/variantpheno_extract_features.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import collections
import extractor_util as util
import data_util as dutil
import random
import re
import os
import sys
import string
import config
import dep_util as deps
# This defines the Row object that we read in to the extractor
parser = util.RowParser([
('doc_id', 'text'),
('section_id', 'text'),
('sent_id', 'int'),
('words', 'text[]')])
# This defines the output Mention object
Mention = collections.namedtuple('Mention', [
'dd_id',
'doc_id',
'section_id',
'sent_id',
'wordidxs',
'mention_id',
'mention_supertype',
'mention_subtype',
'entity',
'variant_type',
'ivsNum',
'pos',
'posPlus',
'fromPos',
'toPos',
'seq',
'fromSeq',
'toSeq',
'words',
'is_correct'])
### CANDIDATE EXTRACTION ###
HF = config.VARIANT['HF']
### VARIANT ###
a = r'[cgrnm]'
i = r'IVS'
b = r'ATCGatcgu'
s1 = r'0-9\_\.\:'
s2 = r'\/>\?\(\)\[\]\;\:\*\_\-\+0-9'
s3 = r'\/><\?\(\)\[\]\;\:\*\_\-\+0-9'
b1 = r'[%s]' % b
bs1 = r'[%s%s]' % (b,s1)
bs2 = r'[%s %s]' % (b,s2)
bs3 = r'[%s %s]' % (b,s3)
c1 = r'(inv|del|ins|dup|tri|qua|con|delins|indel)'
c2 = r'(del|ins|dup|tri|qua|con|delins|indel)'
c3 = r'([Ii]nv|[Dd]el|[Ii]ns|[Dd]up|[Tt]ri|[Qq]ua|[Cc]on|[Dd]elins|[Ii]ndel|fsX|fsx|fs)'
p = r'CISQMNPKDTFAGHLRWVEYX'
ps2 = r'[%s %s]' % (p, s2)
ps3 = r'[%s %s]' % (p, s3)
d = '[ATCGRYUatgc]'
aa_long_to_short = {'CYS': 'C', 'ASP': 'D', 'SER': 'S', 'GLN': 'Q', 'LYS': 'K',
'ILE': 'I', 'PRO': 'P', 'THR': 'T', 'PHE': 'F', 'ASN': 'N',
'GLY': 'G', 'HIS': 'H', 'LEU': 'L', 'ARG': 'R', 'TRP': 'W',
'ALA': 'A', 'VAL':'V', 'GLU': 'E', 'TYR': 'Y', 'MET': 'M'}
aa_camel = {}
for aa in aa_long_to_short:
aa_camel[aa[0] + aa[1].lower() + aa[2].lower()] = aa_long_to_short[aa]
aal = '(' + '|'.join([x for x in aa_long_to_short] + [x for x in aa_camel]) + ')'
# regexes from tmVar paper
# See Table 3 in http://bioinformatics.oxfordjournals.org/content/early/2013/04/04/bioinformatics.btt156.full.pdf
def comp_gv_rgx():
# A bit silly, but copy from pdf wasn't working, and this format is simple to copy & debug...
# regexes correspond to gene ('g') or protein ('p') variants
GV_RGXS = [
(r'(%s\.%s+%s%s*)' % (a,bs3,c1,bs1), 'g'),
(r'(IVS%s+%s%s*)' % (bs3,c2,bs1), 'g'),
(r'((%s\.|%s)%s+)' % (a,i,bs2), 'g'),
(r'((%s\.)?%s[0-9]+%s)' % (a,b1,b1), 'g'),
(r'([0-9]+%s%s*)' % (c2,b1), 'g'),
(r'([p]\.%s+%s%s*)' % (ps3,c3,ps3), 'p'),
(r'([p]\.%s+)' % ps2, 'p'),
(r'([p]\.[A-Z][a-z]{0,2}[\W\-]{0,1}[0-9]+[\W\-]{0,1}([A-Z][a-z]{0,2}|(fs|fsx|fsX)))', 'p')]
# Just return as one giant regex for now
return [gvr[0] for gvr in GV_RGXS]
def extract_candidate_mentions(row, gv_rgxs):
mentions = []
covered = []
for i,word in enumerate(row.words):
if i in covered:
continue
for gv_rgx in gv_rgxs:
gv_rgx = '^(%s)$' % gv_rgx
if re.match(r'[cngrmp]\.|IVS.*', word):
for j in reversed(range(i+1, min(i+7, len(row.words)))):
words = row.words[i:j]
if re.match(gv_rgx, ''.join(words), flags=re.I):
mentions.append(Mention(
dd_id=None,
doc_id=row.doc_id,
section_id=row.section_id,
sent_id=row.sent_id,
wordidxs=range(i,j),
mention_id='%s_%s_%s_%s_%s_GV' % (row.doc_id, row.section_id, row.sent_id, i, j),
mention_supertype='GV_RGX_MATCH_%d' % (j - i),
mention_subtype=gv_rgx.replace('|', '/').replace('\\', '/'),
entity=''.join(words),
variant_type=None,
ivsNum=None,
pos=None,
posPlus=None,
fromPos=None,
toPos=None,
seq=None,
fromSeq=None,
toSeq=None,
words=words,
is_correct=True))
covered.extend(range(i,j))
break
else:
continue
break
return mentions
def extract_relative_coords(mention):
m = re.match(r'^([cgrnm]\.)?([0-9]+)([_]+([0-9]+))([\+\-\*][0-9]+)?(%s)[->/β](%s)' % (d, d), mention.entity)
if m:
if mention.entity.startswith('c.'):
vtype = 'coding_range_mut'
elif mention.entity.startswith('g.'):
vtype = 'gene_range_mut'
elif mention.entity.startswith('r.'):
vtype = 'RNA_range_mut'
elif mention.entity.startswith('n.'):
vtype = 'noncoding_range_mut'
elif mention.entity.startswith('m.'):
vtype = 'mitochondrial_range_mut'
else:
vtype = 'DNA_range_mut' % mtype
fromPos = m.group(2)
toPos = m.group(4)
fromSeq = m.group(6)
toSeq = m.group(7)
mention = mention._replace(variant_type = vtype, fromPos = fromPos, toPos = toPos, posPlus = m.group(5), fromSeq = fromSeq, toSeq = toSeq)
return mention
m = re.match(r'^([cgrnm]\.)?([0-9]+)([_]+([0-9]+))?([\+\-\*][0-9]+)?(%s)(%s+)?' % (c3, d), mention.entity)
if m:
mtype = m.group(6)
if mention.entity.startswith('c.'):
vtype = 'coding_%s' % mtype
elif mention.entity.startswith('g.'):
vtype = 'gene_%s' % mtype
elif mention.entity.startswith('r.'):
vtype = 'RNA_%s' % mtype
elif mention.entity.startswith('n.'):
vtype = 'noncoding_%s' % mtype
elif mention.entity.startswith('m.'):
vtype = 'mitochondrial_%s' % mtype
else:
vtype = 'DNA_%s' % mtype
fromPos = m.group(2)
toPos = m.group(4)
seq = m.group(8)
if seq:
seq = seq.upper()
if toPos is not None and m.group(3) != '':
mention = mention._replace(variant_type = vtype, fromPos = fromPos, toPos = toPos, posPlus = m.group(5), seq = seq)
else:
mention = mention._replace(variant_type = vtype, pos = fromPos, posPlus = m.group(5), seq = seq)
return mention
m = re.match(r'^[cgrnm]\.([0-9]+)?([\+\-\*][0-9]+)?(%s)[->/β](%s)' % (d, d), mention.entity)
if m:
if mention.entity.startswith('c.'):
vtype = 'coding_SNP'
if mention.entity.startswith('g.'):
vtype = 'gene_SNP'
if mention.entity.startswith('r.'):
vtype = 'RNA_SNP'
if mention.entity.startswith('n.'):
vtype = 'noncoding_SNP'
if mention.entity.startswith('m.'):
vtype = 'mitochondrial_SNP'
mention = mention._replace(variant_type = vtype, pos = m.group(1), posPlus = m.group(2), fromSeq = m.group(3).upper(), toSeq = m.group(4).upper())
return mention
m = re.match(r'^[cgrnm]\.([0-9]+)?([\+\-\*][0-9]+)?(%s)' % (d), mention.entity)
if m:
if mention.entity.startswith('c.'):
vtype = 'coding_SNP_from_U'
if mention.entity.startswith('g.'):
vtype = 'gene_SNP_from_U'
if mention.entity.startswith('r.'):
vtype = 'RNA_SNP_from_U'
if mention.entity.startswith('n.'):
vtype = 'noncoding_SNP_from_U'
if mention.entity.startswith('m.'):
vtype = 'mitochondrial_SNP_from_U'
mention = mention._replace(variant_type = vtype, pos = m.group(1), posPlus = m.group(2), fromSeq = 'U', toSeq = m.group(3).upper())
return mention
m = re.match(r'^IVS([0-9]*[abcd]?)([\+\-\*][0-9]+)?(%s)-*[>/β](%s)' % (d, d), mention.entity)
if m:
vtype = 'IVS_SNP'
ivsNum = m.group(1)
mention = mention._replace(variant_type = vtype, ivsNum = ivsNum, posPlus = m.group(2), fromSeq = m.group(3).upper(), toSeq = m.group(4).upper())
return mention
m = re.match(r'^IVS([0-9]*[abcd]?)([\+\-\*][0-9]+)?(%s?)(%s+)' % (c3, d), mention.entity)
if m:
mtype = m.group(3)
if mtype == '' or mtype is None:
mtype = 'mutation'
vtype = 'IVS_%s_from_U' % mtype
ivsNum = m.group(1)
toSeq = m.group(5)
if toSeq:
toSeq = toSeq.upper()
mention = mention._replace(variant_type = vtype, ivsNum = ivsNum, posPlus = m.group(2), fromSeq = 'U', toSeq = toSeq)
return mention
m = re.match(r'^p\.(([%s])|%s)([0-9]+)(([%s])|%s)' % (p, aal, p, aal), mention.entity)
if m:
fromSeq = m.group(1)
if fromSeq.upper() in aa_long_to_short:
fromSeq = aa_long_to_short[fromSeq.upper()]
toSeq = m.group(5)
if toSeq.upper() in aa_long_to_short:
toSeq = aa_long_to_short[toSeq.upper()]
mention = mention._replace(variant_type = 'protein_SAP', pos = m.group(4), fromSeq = fromSeq, toSeq = toSeq)
return mention
m = re.match(r'^p\.(([%s])|%s)([0-9]+)[_]+(([%s])|%s)([0-9]+)(%s)' % (p, aal, p, aal, c3), mention.entity)
if m:
fromSeq = m.group(1)
toSeq = m.group(5)
fromPos = m.group(4)
toPos = m.group(8)
mtype = m.group(9)
if fromSeq.upper() in aa_long_to_short:
fromSeq = aa_long_to_short[fromSeq.upper()]
if toSeq.upper() in aa_long_to_short:
toSeq = aa_long_to_short[toSeq.upper()]
mention = mention._replace(variant_type = 'protein_%s' % mtype, fromSeq = fromSeq, toSeq = toSeq, fromPos = fromPos, toPos = toPos)
return mention
m = re.match(r'^p\.(([%s])|%s)([0-9]+)(%s)' % (p, aal, c3), mention.entity)
if m:
seq = m.group(1)
mtype = m.group(5)
if seq.upper() in aa_long_to_short:
seq = aa_long_to_short[seq.upper()]
mention = mention._replace(variant_type = 'protein_%s_mut' % mtype, pos = m.group(3), seq = seq)
return mention
m = re.match(r'^(%s)([0-9]+)(%s)' % (d, d), mention.entity)
if m:
mention = mention._replace(variant_type = 'DNA_SNP', pos = m.group(2), fromSeq = m.group(1), toSeq = m.group(3))
return mention
return mention
if __name__ == '__main__':
GV_RGXs = comp_gv_rgx()
for line in sys.stdin:
row = parser.parse_tsv_row(line)
# Skip row if sentence doesn't contain a verb, contains URL, etc.
if util.skip_row(row):
continue
# Find candidate mentions & supervise
mentions_without_coords = extract_candidate_mentions(row, GV_RGXs)
mentions = []
for mention_without_coords in mentions_without_coords:
mention = extract_relative_coords(mention_without_coords)
mentions.append(mention)
# print output
for mention in mentions:
util.print_tsv_output(mention)
| dd-genomics-master | code/variant_extract_candidates.py |
#!/usr/bin/env python
import collections
import extractor_util as util
import re
import sys
CACHE = dict() # Cache results of disk I/O
# This defines the Row object that we read in to the extractor
parser = util.RowParser([
('doc_id', 'text'),
('section_id', 'text'),
('sent_id', 'int'),
('words', 'text[]'),
('lemmas', 'text[]'),
('poses', 'text'),
('dep_paths', 'text'),
('dep_parents', 'text'),
('gene_wordidxs', 'int[][]'),
('gene_supertypes', 'text[]'),
('pheno_wordidxs', 'int[][]'),
('pheno_supertypes', 'text[]')])
# This defines the output Mention object
Mention = collections.namedtuple('Mention', [
'doc_id',
'section_id',
'sent_id',
'words',
'words_ner',
'lemmas',
'lemmas_ner',
'poses',
'ners',
'dep_paths',
'dep_parents'])
def create_ners(row):
m = Mention(row.doc_id, row.section_id, row.sent_id, '|^|'.join(row.words), None, \
'|^|'.join(row.lemmas), None, row.poses, None, row.dep_paths, \
row.dep_parents)
words_ner = [word for word in row.words]
lemmas_ner = [lemma for lemma in row.lemmas]
ners = ['O' for _ in xrange(len(row.words))]
for i, wordidxs in enumerate(row.pheno_wordidxs):
pheno_supertype = row.pheno_supertypes[i]
if re.findall('RAND_NEG', pheno_supertype) or \
re.findall('BAD', pheno_supertype) or pheno_supertype == 'O':
continue
ners[wordidxs[0]] = 'NERPHENO'
for wordidx in wordidxs:
words_ner[wordidx] = 'NERPHENO'
lemmas_ner[wordidx] = 'nerpheno'
for i, wordidxs in enumerate(row.gene_wordidxs):
gene_supertype = row.gene_supertypes[i]
if gene_supertype == 'BAD_GENE' or gene_supertype == 'MANUAL_BAD' or gene_supertype == 'RAND_WORD_NOT_GENE_SYMBOL' \
or gene_supertype == 'ABBREVIATION' or gene_supertype == 'ALL_UPPER_NOT_GENE_SYMBOL' or gene_supertype == 'O':
continue
ners[wordidxs[0]] = 'NERGENE'
for wordidx in wordidxs:
if words_ner[wordidx] != 'NERPHENO':
words_ner[wordidx] = 'NERGENE'
lemmas_ner[wordidx] = 'nergene'
return m._replace(ners='|^|'.join(ners), words_ner='|^|'.join(words_ner),
lemmas_ner='|^|'.join(lemmas_ner))
if __name__ == '__main__':
# generate the mentions, while trying to keep the supervision approx. balanced
# print out right away so we don't bloat memory...
pos_count = 0
neg_count = 0
for line in sys.stdin:
row = parser.parse_tsv_row(line)
out_row = create_ners(row)
util.print_tsv_output(out_row)
| dd-genomics-master | code/sentences_input_ner_extraction.py |
"""Miscellaneous shared tools for maniuplating data used in the UDFs"""
from collections import defaultdict, namedtuple
import extractor_util as util
import os
import re
import sys
APP_HOME = os.environ['GDD_HOME']
onto_path = lambda p : '%s/onto/%s' % (os.environ['GDD_HOME'], p)
class Dag:
"""Class representing a directed acyclic graph."""
def __init__(self, nodes, edges):
self.nodes = nodes
self.node_set = set(nodes)
self.edges = edges # edges is dict mapping child to list of parents
self._has_child_memoizer = defaultdict(dict)
def has_child(self, parent, child):
"""Check if child is a child of parent."""
if child not in self.node_set:
raise ValueError('"%s" not in the DAG.' % child)
if parent not in self.node_set:
raise ValueError('"%s" not in the DAG.' % parent)
if child == parent:
return True
if child in self._has_child_memoizer[parent]:
return self._has_child_memoizer[parent][child]
for node in self.edges[child]:
if self.has_child(parent, node):
self._has_child_memoizer[parent][child] = True
return True
self._has_child_memoizer[parent][child] = False
return False
def read_hpo_dag():
with open('%s/onto/data/hpo_phenotypes.tsv' % APP_HOME) as f:
nodes = []
edges = {}
for line in f:
toks = line.strip(' \r\n').split('\t')
child = toks[0]
nodes.append(child)
parents_str = toks[5]
if parents_str:
edges[child] = parents_str.split('|')
else:
edges[child] = []
return Dag(nodes, edges)
def get_hpo_phenos(hpo_dag, parent='HP:0000118', exclude_parents=['HP:0002664', 'HP:0002527', 'HP:0012125']):
"""Get only the children of 'Phenotypic Abnormality' (HP:0000118), excluding all children of neoplasm (0002664) and falls (0002527) and prostate cancer(HP:0012125). Now also excluding coronary artery disease, alzheimer's and pulmonary artery hypertension, and schizophrenia"""
return [hpo_term for hpo_term in hpo_dag.nodes
if (hpo_dag.has_child(parent, hpo_term)
and all([not hpo_dag.has_child(p, hpo_term) for p in exclude_parents]))]
def read_hpo_synonyms(idx=2):
syn_dict = dict()
with open('%s/onto/data/hpo_phenotypes.tsv' % APP_HOME) as f:
for line in f:
toks = line.strip(' \r\n').split('\t')
node = toks[0]
syn_str = toks[idx]
syn_dict[node] = syn_str.split('|')
return syn_dict
def load_hgvs_to_hpo():
hgvs_to_hpo = defaultdict(set)
with open(onto_path('data/hgvs_to_hpo.tsv'), 'rb') as f:
for line in f:
hgvs_id, hpo_id = line.strip().split('\t')
hgvs_to_hpo[hgvs_id].update(hpo_id)
return hgvs_to_hpo
def load_pmid_to_hpo():
"""Load map from Pubmed ID to HPO term (via MeSH)"""
pmid_to_hpo = defaultdict(set)
# XXX HACK Johannes. TODO. Get rid of this file, load the full table into the database
with open(onto_path('data/hpo_to_pmid_via_mesh_with_doi.tsv')) as f:
for line in f:
hpo_id, pmid = line.strip().split('\t')
pmid_to_hpo[pmid].add(hpo_id)
return pmid_to_hpo
def get_pubmed_id_for_doc(doc_id):
"""Because our doc_id is currently just the PMID, and we intend to KEEP it this way, return the doc_id here"""
return doc_id
def read_doi_to_pmid():
"""Reads map from DOI to PMID, for PLoS docuemnts."""
doi_to_pmid = dict()
with open('%s/onto/data/plos_doi_to_pmid.tsv' % APP_HOME) as f:
for line in f:
doi, pmid = line.strip().split('\t')
doi_to_pmid[doi] = pmid
return doi_to_pmid
def gene_symbol_to_ensembl_id_map():
"""Maps a gene symbol from CHARITE -> ensembl ID"""
with open('%s/onto/data/ensembl_genes.tsv' % util.APP_HOME) as f:
eid_map = defaultdict(set)
for line in f:
eid_canonical, gene_name, mapping_type = line.rstrip('\n').split('\t')
eid = eid_canonical.split(':')[0]
canonical_name = eid_canonical.split(':')[1]
eid_map[gene_name].add((eid, canonical_name, mapping_type))
# XXX HACK Johannes: Maybe we shouldn't decide on case sensitivity in this helper method (?)
# if mapping_type == 'CANONICAL_SYMBOL':
# eid_map[gene_name.lower()].add((eid, canonical_name, mapping_type))
return eid_map
def get_parents(bottom_id, dag, root_id='HP:0000118'):
if bottom_id == root_id:
return set([bottom_id])
rv = set()
if bottom_id in dag.edges:
for parent in dag.edges[bottom_id]:
rv |= get_parents(parent, dag)
rv.add(bottom_id)
return rv
| dd-genomics-master | code/data_util.py |
#!/usr/bin/env python
from collections import namedtuple
import extractor_util as util
import os
import sys
import ddlib
import re
import config
# This defines the Row object that we read in to the extractor
parser = util.RowParser([
('doc_id', 'text'),
('section_id', 'text'),
('sent_id', 'int'),
('words', 'text[]'),
('lemmas', 'text[]'),
('poses', 'text[]'),
('ners', 'text[]'),
('dep_paths', 'text[]'),
('dep_parents', 'int[]'),
('mention_id', 'text'),
('mention_type', 'text'),
('short_wordidxs', 'int[]'),
('long_wordidxs', 'int[]')])
Feature = namedtuple('Feature', ['doc_id', 'section_id', 'mention_id', 'name'])
def get_features_for_row(row):
OPTS = config.NON_GENE_ACRONYMS['F']
features = []
f = Feature(doc_id=row.doc_id, section_id=row.section_id, mention_id=row.mention_id, name=None)
# (1) Get generic ddlib features
sentence = util.create_ddlib_sentence(row)
allWordIdxs = row.short_wordidxs + row.long_wordidxs
start = min(allWordIdxs)
length = max(allWordIdxs) - start
span = ddlib.Span(begin_word_id=start, length=length)
generic_features = [f._replace(name=feat) for feat in ddlib.get_generic_features_mention(sentence, span)]
# Optionally filter out some generic features
if OPTS.get('exclude_generic'):
generic_features = filter(lambda feat : not feat.startswith(tuple(OPTS['exclude_generic'])), generic_features)
features += generic_features
return features
if __name__ == '__main__':
util.run_main_tsv(row_parser=parser.parse_tsv_row, row_fn=get_features_for_row)
| dd-genomics-master | code/non_gene_acronyms_extract_features.py |
#!/usr/bin/env python
'''Link abbreviations to their full names
Based on
A Simple Algorithm for Identifying Abbreviations Definitions in Biomedical Text
A. Schwartz and M. Hearst
Biocomputing, 2003, pp 451-462.
# License: GNU General Public License, see http://www.clips.ua.ac.be/~vincent/scripts/LICENSE.txt
'''
__author__ = 'Vincent Van Asch, Johannes Birgmeier'
import re
def get_candidates(sentence):
rv = []
if '-LRB-' in sentence:
# XXX HACK (?) Johannes
# The original version checks some sentence properties here first:
# balanced parentheses and that the first paren is a left one
# We don't care that much since we only admit one-word abbrevs
close_idx = -1
while 1:
# Look for parenOpen parenthesis
try:
open_idx = close_idx + 1 + sentence[close_idx + 1:].index('-LRB-')
except ValueError:
break
close_idx = open_idx + 2
# XXX HACK (?) Johannes
# The original version picks up acronyms that include parentheses and multiple words.
# Since there are no such gene abbreviations, such words won't be confused for being
# genes anyways, so I just stop after the first word in the parenthesis
start = open_idx + 1
stop = start + 1
if start >= len(sentence):
break
abbrev = sentence[start]
if conditions(abbrev):
rv.append((start, stop, abbrev))
return rv
def conditions(string):
if re.match('([A-Za-z]\. ?){2,}', string.lstrip()):
return True
if len(string) < 2 or len(string) > 10:
return False
if len(string.split()) > 2:
return False
if not re.search('[A-Za-z]', string):
return False
if not string[0].isalnum():
return False
return True
def get_def((start_abbrev, stop_abbrev, abbrev), sentence, stop_last_abbrev):
':type candidate: (int, int, list[str])'
':type sentence: list[str]'
start_tokens = stop_last_abbrev + 1
# Take the tokens in front of the candidate
tokens = [word.lower() for word in sentence[start_tokens:start_abbrev - 1]]
# the char that we are looking for
key = abbrev[0].lower()
if len(tokens) == 0:
raise ValueError('[SUP] Not enough keys')
# Count the number of tokens that start with the same character as the
# candidate
first_chars = [t[0] for t in tokens if len(t) > 0]
def_freq = first_chars.count(key)
candidate_freq = abbrev.lower().count(key)
# Look for the list of tokens in front of candidate that
# have a sufficient number of tokens starting with key
if candidate_freq <= def_freq:
# we should at least have a good number of starts
count = 0
start = 0
start_idx = len(first_chars) - 1
while count < candidate_freq:
if abs(start) > len(first_chars):
raise ValueError('not found')
start -= 1
# Look up key in the definition
try:
start_idx = first_chars.index(key, len(first_chars) + start)
except ValueError:
pass
# Count the number of keys in definition
count = first_chars[start_idx:].count(key)
# We found enough keys in the definition so return the definition as a
# definition candidate
string = sentence[start_idx + start_tokens:start_abbrev - 1]
rv = (start_idx + start_tokens, start_abbrev - 1, string)
return rv
else:
raise ValueError(
'[SUP] Not enough keys')
def def_selection((start_def, stop_def, definition), (start_abbrev, stop_abbrev, abbrev)):
if abbrev.lower() in [word.lower() for word in definition]:
raise ValueError('[SUP] Abbrv = full word of def')
def_str = ' '.join(definition)
if len(def_str) < len(abbrev):
raise ValueError('[SUP] Abbrv longer than def')
s_idx = -1
l_word_idx = -1
l_char_idx = -1
stop_def = -1
# find all except the first char of the abbreviation
while 1:
assert s_idx < 0
assert s_idx >= -len(abbrev), (s_idx, abbrev)
short_char = abbrev[s_idx].lower()
if len(definition) == 0:
raise ValueError('[SUP] definition candidate is empty')
assert -l_word_idx <= len(definition), (definition, l_word_idx, len(definition))
if len(definition[l_word_idx]) == 0:
l_char_idx = -1
l_word_idx -= 1
if -l_word_idx >= (len(definition) + 1):
raise ValueError('[SUP] Abbrv not in def')
continue
assert l_word_idx < 0
assert -l_word_idx <= len(definition), (len(definition), l_word_idx)
assert l_char_idx < 0
assert -l_char_idx <= len(definition[l_word_idx]), (len(definition[l_word_idx]), l_char_idx)
long_char = definition[l_word_idx][l_char_idx].lower()
if short_char == long_char:
if stop_def == -1:
stop_def = start_def + len(definition) + l_word_idx + 1
s_idx -= 1
if (l_char_idx == -1 * len(definition[l_word_idx])):
l_char_idx = -1
l_word_idx -= 1
else:
l_char_idx -= 1
if -l_word_idx >= (len(definition) + 1):
raise ValueError('[SUP] Abbrv not in def')
if -s_idx == len(abbrev):
break;
# find the first char of the abbreviation as the first char of a word
while 1:
if len(definition[l_word_idx]) == 0:
l_word_idx -= 1
if -l_word_idx >= (len(definition) + 1):
raise ValueError('[SUP] Abbrv not in def')
assert s_idx == -1 * len(abbrev)
short_char = abbrev[s_idx].lower()
long_char = definition[l_word_idx][0].lower()
if short_char == long_char:
break;
l_word_idx -= 1
if -l_word_idx >= (len(definition) + 1):
raise ValueError('[SUP] Abbrv not in def')
definition = definition[len(definition) + l_word_idx:stop_def]
tokens = len(definition)
length = len(abbrev)
if tokens > min([length + 5, length * 2]):
raise ValueError('[SUP] length constraint violation')
# Do not return definitions that contain unbalanced parentheses
if definition.count('-LRB-') != definition.count('-RRB-'):
raise ValueError(
'[SUP] Unbalanced paren in def')
return (start_def, stop_def, definition)
def getabbreviations(sentence, abbrev_index=None):
rv = []
try:
if not abbrev_index:
abbrevs = get_candidates(sentence)
else:
if conditions(sentence[abbrev_index]):
abbrevs = [(abbrev_index, abbrev_index+1, sentence[abbrev_index])]
else:
abbrevs = []
except ValueError, e:
# sys.stderr.write('Abbreviation detection: omitting sentence\n')
# sys.stderr.write('Reason: %s\n' % e.args[0])
return rv
last_stop_abbrev = -1
for abbrev in abbrevs:
try:
definition = get_def(abbrev, sentence, last_stop_abbrev)
# sys.stderr.write(abbrev[2] + '=' + str(definition) + '\n')
except ValueError, e:
# sys.stderr.write('Omitting abbreviation candidate %s\n' % abbrev[2])
# sys.stderr.write('Reason: %s\n' % e.args[0])
if e.args[0].startswith('[SUP]'):
start_fake_def = max(abbrev[0] - 2 - len(abbrev[2]), 0)
stop_fake_def = abbrev[0] - 2
rv.append((False, abbrev, (start_fake_def, stop_fake_def,
sentence[start_fake_def:stop_fake_def]), e.args[0]))
else:
try:
definition = def_selection(definition, abbrev)
except ValueError, e:
# sys.stderr.write('Omitting abbreviation candidate %s\n' % abbrev[2])
# sys.stderr.write('Reason: %s\n' % e.args[0])
if e.args[0].startswith('[SUP]'):
start_fake_def = max(abbrev[0] - 2 - len(abbrev[2]), 0)
stop_fake_def = abbrev[1] - 2
rv.append((False, abbrev, (start_fake_def, stop_fake_def,
sentence[start_fake_def:stop_fake_def]), e.args[0]))
else:
rv.append((True, abbrev, definition, ''))
last_stop_abbrev = abbrev[1]
return rv
| dd-genomics-master | code/abbreviations.py |
#! /usr/bin/env python
import collections
import extractor_util as util
import data_util as dutil
import dep_util as deps
import random
import re
import sys
import config
# This defines the Row object that we read in to the extractor
parser = util.RowParser([
('relation_id', 'text'),
('doc_id', 'text'),
('section_id', 'text'),
('sent_id', 'int'),
('gene_mention_id', 'text'),
('gene_name', 'text'),
('gene_wordidxs', 'int[]'),
('gene_is_correct', 'boolean'),
('pheno_mention_id', 'text'),
('pheno_entity', 'text'),
('pheno_wordidxs', 'int[]'),
('pheno_is_correct', 'boolean'),
('words', 'text[]'),
('lemmas', 'text[]'),
('poses', 'text[]'),
('ners', 'text[]'),
('dep_paths', 'text[]'),
('dep_parents', 'int[]')])
# This defines the output Relation object
Feature = collections.namedtuple('Feature', ['doc_id', 'section_id', 'relation_id', 'name'])
HPO_DAG = dutil.read_hpo_dag()
def replace_opts(opts, replaceList):
ret = {}
for name in opts:
strings = opts[name]
for (pattern, subst) in replaceList:
if name.endswith('rgx'):
subst = re.escape(subst)
strings = [s.replace(pattern, subst) for s in strings]
ret[name] = strings
return ret
CACHE = {}
def gp_between(gene_wordidxs, pheno_wordidxs, ners):
if gene_wordidxs[0] < pheno_wordidxs[0]:
start = max(gene_wordidxs) + 1
end = min(pheno_wordidxs) - 1
else:
start = max(pheno_wordidxs) + 1
end = min(gene_wordidxs) - 1
found_g = False
found_p = False
for i in xrange(start, end+1):
ner = ners[i]
if ner == 'NERGENE':
found_g = True
if ner == 'NERPHENO':
found_p = True
return found_g and found_p
def config_supervise(r, row, pheno_entity, gene_name, gene, pheno,
phrase, between_phrase, lemma_phrase, between_phrase_lemmas,
dep_dag, dep_path_between, gene_wordidxs, VALS, SR):
if SR.get('phrases-in-between'):
opts = SR['phrases-in-between']
orig_opts = opts.copy()
opts = replace_opts(opts, [('{{G}}', gene), ('{{P}}', pheno)])
for name, val in VALS:
if len(opts[name]) + len(opts['%s-rgx' % name]) > 0:
match = util.rgx_mult_search(between_phrase, opts[name], opts['%s-rgx' % name], orig_opts[name],
orig_opts['%s-rgx' % name], flags=re.I)
if match:
yield r._replace(name='PHRASE_BETWEEN_%s_%s' % (name, non_alnum.sub('_', match)))
match = util.rgx_mult_search(between_phrase_lemmas, opts[name], opts['%s-rgx' % name],
orig_opts[name], orig_opts['%s-rgx' % name], flags=re.I)
if match:
yield r._replace(name='PHRASE_BETWEEN_%s_%s' % (name, non_alnum.sub('_', match)))
if SR.get('phrases-in-sent'):
opts = SR['phrases-in-sent']
orig_opts = opts.copy()
opts = replace_opts(opts, [('{{G}}', gene), ('{{P}}', pheno)])
for name, val in VALS:
if len(opts[name]) + len(opts['%s-rgx' % name]) > 0:
match = util.rgx_mult_search(phrase, opts[name], opts['%s-rgx' % name], orig_opts[name],
orig_opts['%s-rgx' % name], flags=re.I)
if match:
yield r._replace(name='PHRASE_%s_%s' % (name, non_alnum.sub('_', match)))
match = util.rgx_mult_search(lemma_phrase, opts[name], opts['%s-rgx' % name], orig_opts[name],
orig_opts['%s-rgx' % name], flags=re.I)
if match:
yield r._replace(name='PHRASE_%s_%s' % (name, non_alnum.sub('_', match)))
if SR.get('primary-verb-modifiers') and dep_dag:
opts = SR['primary-verb-modifiers']
if dep_path_between:
verbs_between = [i for i in dep_path_between if row.poses[i].startswith("VB")]
if len(verbs_between) > 0:
for name, val in VALS:
mod_words = [i for i, x in enumerate(row.lemmas) if x in opts[name]]
mod_words += [i for i, x in enumerate(row.dep_paths) if x in opts['%s-dep-tag' % name]]
d = dep_dag.path_len_sets(verbs_between, mod_words)
if d and d < opts['max-dist'] + 1:
subtype = 'ModWords: ' + ' '.join([str(m) for m in mod_words]) + ', VerbsBetween: ' + ' '.join([str(m) for m in verbs_between]) + ', d: ' + str(d)
yield r._replace(name='PRIMARY_VB_MOD_%s_%s' % (name, non_alnum.sub('_', subtype)))
if SR.get('dep-lemma-connectors') and dep_dag:
opts = SR['dep-lemma-connectors']
for name, val in VALS:
if dep_path_between:
connectors = [i for i, x in enumerate(row.lemmas) \
if i in dep_path_between and x in opts[name]]
if len(connectors) > 0:
yield r._replace(name='DEP_LEMMA_CONNECT_%s_%s' % (name, non_alnum.sub('_',
' '.join([str(x) for x in connectors]))))
if SR.get('dep-lemma-neighbors') and dep_dag:
opts = SR['dep-lemma-neighbors']
for name, val in VALS:
for entity in ['g', 'p']:
lemmas = [i for i, x in enumerate(row.lemmas) if x in opts['%s-%s' % (name, entity)]]
d = dep_dag.path_len_sets(gene_wordidxs, lemmas)
if d and d < opts['max-dist'] + 1:
subtype = ' '.join([str(l) for l in lemmas]) + ', d: ' + str(d)
yield r._replace(name='DEP_LEMMA_NB_%s_%s_%s' % (name,
entity,
non_alnum.sub('_', subtype)))
if ('neg', False) in VALS:
if gp_between(row.gene_wordidxs, row.pheno_wordidxs, row.ners):
yield r._replace(name='NEG_GP_BETWEEN')
yield r._replace(name='DEFAULT_FEATURE')
non_alnum = re.compile('[\W_]+')
def create_supervised_relation(row, SR, HF):
gene_wordidxs = row.gene_wordidxs
gene_is_correct = row.gene_is_correct
pheno_entity = row.pheno_entity
pheno_wordidxs = row.pheno_wordidxs
pheno_is_correct = row.pheno_is_correct
gene = row.gene_name
pheno = ' '.join([row.words[i] for i in row.pheno_wordidxs])
phrase = ' '.join(row.words)
lemma_phrase = ' '.join(row.lemmas)
b = sorted([gene_wordidxs[0], gene_wordidxs[-1], pheno_wordidxs[0], pheno_wordidxs[-1]])[1:-1]
assert b[0] + 1 < len(row.words), str((b[0] + 1, len(row.words), row.doc_id, row.section_id, row.sent_id, str(row.words)))
assert b[1] < len(row.words), str((b[1], len(row.words), row.doc_id, row.section_id, row.sent_id, str(row.words)))
between_phrase = ' '.join(row.words[i] for i in range(b[0] + 1, b[1]))
between_phrase_lemmas = ' '.join(row.lemmas[i] for i in range(b[0] + 1, b[1]))
dep_dag = deps.DepPathDAG(row.dep_parents, row.dep_paths, row.words, max_path_len=HF['max-dep-path-dist'])
r = Feature(row.doc_id, row.section_id, row.relation_id, None)
path_len_sets = dep_dag.path_len_sets(gene_wordidxs, pheno_wordidxs)
if not path_len_sets:
if SR.get('bad-dep-paths'):
yield r._replace(name='BAD_OR_NO_DEP_PATH')
dep_path_between = dep_dag.min_path_sets(gene_wordidxs, pheno_wordidxs) if dep_dag else None
if SR.get('g-or-p-false'):
if gene_is_correct == False or pheno_is_correct == False:
yield r._replace(name='G_ANDOR_P_FALSE_%s_%s' % (str(gene_is_correct), str(pheno_is_correct)))
if SR.get('adjacent-false'):
if re.search(r'[a-z]{3,}', between_phrase, flags=re.I) is None:
st = non_alnum.sub('_', between_phrase)
yield r._replace(name='G_P_ADJACENT_%s' % st)
gene_name = row.gene_name
VALS = [('neg', False)]
for rv in config_supervise(r, row, pheno_entity, gene_name, gene, pheno,
phrase, between_phrase, \
lemma_phrase, between_phrase_lemmas, dep_dag, \
dep_path_between, gene_wordidxs,
VALS, SR):
yield rv
VALS = [('pos', True)]
for rv in config_supervise(r, row, pheno_entity, gene_name, gene, pheno,
phrase, between_phrase, \
lemma_phrase, between_phrase_lemmas, dep_dag, \
dep_path_between, gene_wordidxs,
VALS, SR):
yield rv
def featurize(supervision_rules, hard_filters):
for line in sys.stdin:
row = parser.parse_tsv_row(line)
for rv in create_supervised_relation(row, SR=supervision_rules, HF=hard_filters):
util.print_tsv_output(rv)
if __name__ == '__main__':
sr = config.GENE_PHENO_CAUSATION['SR']
hf = config.GENE_PHENO_CAUSATION['HF']
featurize(sr, hf)
| dd-genomics-master | code/genepheno_extract_features2.py |
#!/usr/bin/env python
from collections import defaultdict, namedtuple
import sys
import re
import os
import random
from itertools import chain
import extractor_util as util
import data_util as dutil
import config
# This defines the Row object that we read in to the extractor
parser = util.RowParser([
('doc_id', 'text'),
('section_id', 'text'),
('sent_id', 'int'),
('words', 'text[]'),
('lemmas', 'text[]'),
('poses', 'text[]'),
('ners', 'text[]'),
('pa_abbrevs', 'text[]'),
('pheno_entities', 'text[]'),
('pa_section_ids', 'text[]'),
('pa_sent_ids', 'int[]')])
ExpandedRow = namedtuple('ExpandedRow', [
'doc_id',
'section_id',
'sent_id',
'words',
'lemmas',
'poses',
'ners',
'pa_abbrev',
'pheno_entity',
'pa_section_id',
'pa_sent_id'])
# This defines the output Mention object
Mention = namedtuple('Mention', [
'dd_id',
'doc_id',
'section_id',
'sent_id',
'wordidxs',
'mention_id',
'mention_supertype',
'mention_subtype',
'entity',
'words',
'is_correct'])
def expand_array_rows(array_row):
for i, pa_abbrev in enumerate(array_row.pa_abbrevs):
row = ExpandedRow(doc_id = array_row.doc_id,
section_id = array_row.section_id,
sent_id = array_row.sent_id,
words = array_row.words,
lemmas = array_row.lemmas,
poses = array_row.poses,
ners = array_row.ners,
pa_abbrev = pa_abbrev,
pheno_entity = array_row.pheno_entities[i],
pa_section_id = array_row.pa_section_ids[i],
pa_sent_id = array_row.pa_sent_ids[i])
yield row
### CANDIDATE EXTRACTION ###
SR = config.PHENO_ACRONYMS['SR']
def extract_candidate_mentions(row):
"""Extracts candidate phenotype mentions from an input row object"""
mentions = []
for i, word in enumerate(row.words):
if word == row.pa_abbrev:
mention_id = '%s_%s_%d_%d' % \
(row.doc_id, \
row.section_id, \
row.sent_id, \
i)
subtype = '%s_%s_%d_%s' % (row.doc_id, row.pa_section_id, row.pa_sent_id, row.pa_abbrev)
m = Mention(None, row.doc_id, row.section_id, row.sent_id,
[i], mention_id, "ABBREV", subtype, row.pheno_entity,
[word], True)
mentions.append(m)
return mentions
def generate_rand_negatives(row, pos, neg):
mentions = []
for i, word in enumerate(row.words):
if neg >= pos:
break
if word == row.pa_abbrev:
continue
if word.isupper() and word.strip() != '-LRB-' and word.strip() != '-RRB-':
mention_id = '%s_%s_%d_%d' % \
(row.doc_id, \
row.section_id, \
row.sent_id, \
i)
subtype = '%s_%s_%d_%s' % (row.doc_id, row.pa_section_id, row.pa_sent_id, row.pa_abbrev)
m = Mention(None, row.doc_id, row.section_id, row.sent_id,
[i], mention_id, 'ABBREV_RAND_NEG', subtype, None, [word], False)
neg += 1
return mentions
if __name__ == '__main__':
onto_path = lambda p : '%s/onto/%s' % (os.environ['GDD_HOME'], p)
pos = 0
neg = 0
# Read TSV data in as Row objects
for line in sys.stdin:
array_row = parser.parse_tsv_row(line)
abbrevs = set()
for row in expand_array_rows(array_row):
if row.pa_abbrev in abbrevs:
continue
abbrevs.add(row.pa_abbrev)
# Skip row if sentence doesn't contain a verb, contains URL, etc.
if util.skip_row(row):
continue
# find candidate mentions & supervise
mentions = extract_candidate_mentions(row)
pos += len(mentions)
if SR.get('rand-negs'):
negs = generate_rand_negatives(row, pos, neg)
neg += len(negs)
mentions.extend(negs)
# print output
for mention in mentions:
util.print_tsv_output(mention)
#!/usr/bin/env python
| dd-genomics-master | code/pheno_acronyms_to_mentions.py |
#!/usr/bin/env python
import collections
import extractor_util as util
import data_util as dutil
import dep_util as deps
import os
import random
import re
import sys
import config
# This defines the Row object that we read in to the extractor
parser = util.RowParser([
('doc_id', 'text'),
('section_id', 'text'),
('sent_id', 'int'),
('words', 'text[]'),
('lemmas', 'text[]'),
('poses', 'text[]'),
('dep_paths', 'text[]'),
('dep_parents', 'int[]'),
('gene_mention_id', 'text'),
('gene_name', 'text'),
('gene_wordidxs', 'int[]'),
('gene_is_correct', 'boolean'),
('pheno_mention_id', 'text'),
('pheno_entity', 'text'),
('pheno_wordidxs', 'int[]'),
('pheno_is_correct', 'boolean')])
# This defines the output Relation object
Relation = collections.namedtuple('Relation', [
'dd_id',
'relation_id',
'doc_id',
'section_id',
'sent_id',
'gene_mention_id',
'gene_name',
'gene_wordidxs',
'gene_is_correct',
'pheno_mention_id',
'pheno_entity',
'pheno_wordidxs',
'pheno_is_correct'])
### CANDIDATE EXTRACTION ###
def extract_candidate_relations(row):
"""
Given a row object having a sentence and some associated N gene and M phenotype mention
candidates, pick a subset of the N*M possible gene-phenotype relations to return as
candidate relations
"""
HF = config.GENE_PHENO['HF']
relations = []
# Create a dependencies DAG for the sentence
dep_dag = deps.DepPathDAG(row.dep_parents, row.dep_paths, row.words, max_path_len=HF['max-dep-path-dist'])
# Go through the G-P pairs in the sentence, which are passed in serialized format
pairs = []
rid = '%s_%s' % (row.gene_mention_id, row.pheno_mention_id)
r = Relation(None, rid, row.doc_id, row.section_id, row.sent_id, \
row.gene_mention_id, row.gene_name, \
row.gene_wordidxs, row.gene_is_correct, \
row.pheno_mention_id, row.pheno_entity, \
row.pheno_wordidxs, row.pheno_is_correct)
# Do not consider overlapping mention pairs
if len(set(r.gene_wordidxs).intersection(r.pheno_wordidxs)) > 0:
return []
# Get the min path length between any of the g / p phrase words
d = dep_dag.path_len_sets(r.gene_wordidxs, r.pheno_wordidxs)
if d is not None:
if d > HF['max-dep-path-dist']:
return []
return [r]
if __name__ == '__main__':
for line in sys.stdin:
row = parser.parse_tsv_row(line)
# find candidate mentions
relations = extract_candidate_relations(row)
# print output
for relation in relations:
util.print_tsv_output(relation)
| dd-genomics-master | code/genepheno_extract_candidates.py |
#!/usr/bin/env python
import collections
import os
import sys
import abbreviations
import config
import extractor_util as util
import levenshtein
CACHE = dict() # Cache results of disk I/O
# This defines the Row object that we read in to the extractor
parser = util.RowParser([
('doc_id', 'text'),
('section_id', 'text'),
('sent_id', 'int'),
('words', 'text[]'),
('dep_paths', 'text[]'),
('dep_parents', 'int[]'),
('lemmas', 'text[]'),
('poses', 'text[]'),
('ners', 'text[]'),
('gene_wordidx_array', 'int[]')])
# This defines the output Mention object
Mention = collections.namedtuple('Mention', [
'dd_id',
'doc_id',
'section_id',
'sent_id',
'short_wordidxs',
'long_wordidxs',
'mention_id',
'mention_supertype',
'mention_subtype',
'abbrev_word',
'definition_words',
'is_correct'])
### CANDIDATE EXTRACTION ###
# HF = config.NON_GENE_ACRONYMS['HF']
SR = config.NON_GENE_ACRONYMS['SR']
def contains_sublist(lst, sublst):
if len(lst) < len(sublst):
return False, None
n = len(sublst)
for i in xrange(len(lst) - n + 1):
if sublst == lst[i:i+n]:
return True, (i,i+n)
return False, None
def detect_manual(words, gene_idx):
gene_name = words[gene_idx]
manual_pairs = SR['manual-pairs']
rv = []
for names in manual_pairs:
definitions = manual_pairs[names]
if gene_name in names:
for definition in definitions:
def_lst = [d.lower() for d in definition.split(' ')]
contains, def_boundaries = contains_sublist([w.lower() for w in words], def_lst)
if contains:
# print >>sys.stderr, (contains, def_boundaries, def_lst)
def_start, def_stop = def_boundaries
definition = words[def_start:def_stop]
abbrev = gene_name
abbrev_start = gene_idx
abbrev_stop = gene_idx + 1
rv.append(((abbrev_start, abbrev_stop, abbrev), (def_start, def_stop, definition)))
return rv
def extract_candidate_mentions(row, pos_count, neg_count):
mentions = []
if abbreviations.conditions(row.words[row.gene_wordidx]):
for (is_correct, abbrev, definition, detector_message) in abbreviations.getabbreviations(row.words, abbrev_index=row.gene_wordidx):
m = create_supervised_mention(row, is_correct, abbrev, definition, detector_message, pos_count, neg_count)
if m:
mentions.append(m)
for (abbrev, definition) in detect_manual(row.words, row.gene_wordidx):
m = create_supervised_mention(row, True, abbrev, definition, 'MANUAL', pos_count, neg_count)
if m:
mentions.append(m)
return mentions
### DISTANT SUPERVISION ###
VALS = config.NON_GENE_ACRONYMS['vals']
def create_supervised_mention(row, is_correct,
(start_abbrev, stop_abbrev, abbrev),
(start_definition, stop_definition,
definition), detector_message, pos_count,
neg_count):
assert stop_abbrev == start_abbrev + 1
mid = '%s_%s_%s_%s' % (row.doc_id, row.section_id, row.sent_id, start_abbrev)
gene_to_full_name = CACHE['gene_to_full_name']
include = None
if is_correct:
# JB: super ugly, who cards
if detector_message == 'MANUAL':
m = Mention(None, row.doc_id, row.section_id,
row.sent_id, [i for i in xrange(start_abbrev, stop_abbrev + 1)],
[i for i in xrange(start_definition, stop_definition + 1)],
mid, 'MANUAL', None, abbrev, definition, is_correct);
return m
supertype = 'TRUE_DETECTOR'
subtype = None
elif is_correct is False:
supertype = 'FALSE_DETECTOR'
subtype = detector_message
else:
supertype = 'DETECTOR_OMITTED_SENTENCE'
subtype = None
include = False
# print >>sys.stderr, supertype
if is_correct and include is not False:
if abbrev in gene_to_full_name:
full_gene_name = gene_to_full_name[abbrev]
ld = levenshtein.levenshtein(full_gene_name.lower(), ' '.join(definition).lower())
fgl = len(full_gene_name)
dl = len(' '.join(definition))
if dl >= fgl*0.75 and dl <= fgl*1.25 and float(ld) \
/ len(' '.join(definition)) <= SR['levenshtein_cutoff']:
is_correct = False
supertype = 'FALSE_DEFINITION_IS_GENE_FULL'
# print >>sys.stderr, supertype
subtype = full_gene_name + '; LD=' + str(ld)
include = False
if include is not False and is_correct and len(definition) == 1 and definition[0] in gene_to_full_name:
is_correct = False
supertype = 'FALSE_DEFINITION_IS_GENE_ABBREV'
# print >>sys.stderr, supertype
subtype = None
if include is not False and is_correct and abbrev in SR['short-words']:
is_correct = False
supertype = 'FALSE_SHORT_WORD'
# print >>sys.stderr, supertype
subtype = None
# print >>sys.stderr, (include, is_correct, neg_count, pos_count)
if include is True or (include is not False and is_correct is True or (is_correct is False and neg_count < pos_count)):
# we're never gonna do inference here, so why all this shit
m = Mention(None, row.doc_id, row.section_id,
row.sent_id, [i for i in xrange(start_abbrev, stop_abbrev + 1)],
[i for i in xrange(start_definition, stop_definition + 1)],
mid, supertype, subtype, abbrev, definition, is_correct);
else:
m = None
return m
def read_gene_to_full_name():
rv = {}
with open(onto_path('data/gene_names.tsv')) as f:
for line in f:
parts = line.split('\t')
assert len(parts) == 2, parts
geneAbbrev = parts[0].strip()
geneFullName = parts[1].strip()
':type geneFullName: str'
geneFullName = geneFullName.replace('(', '-LRB-')
geneFullName = geneFullName.replace(')', '-RRB-')
assert geneAbbrev not in rv, geneAbbrev
rv[geneAbbrev] = geneFullName
return rv
if __name__ == '__main__':
# load static data
onto_path = lambda p : '%s/onto/%s' % (os.environ['GDD_HOME'], p)
CACHE['gene_to_full_name'] = read_gene_to_full_name()
# generate the mentions, while trying to keep the supervision approx. balanced
# print out right away so we don't bloat memory...
pos_count = 0
neg_count = 0
for line in sys.stdin:
row = parser.parse_tsv_row(line)
#Specific to ddlog, add two conditions that are not possible directly in the sql query.
row.gene_wordidx = row.gene_wordidx_array[0]
# print >> sys.stderr, 'patate'
# print >> sys.stderr, row
if '-LRB-' not in row.words[row.gene_wordidx - 1]:
continue
# Skip row if sentence doesn't contain a verb, contains URL, etc.
if util.skip_row(row):
continue
# Find candidate mentions & supervise
mentions = extract_candidate_mentions(row, pos_count, neg_count)
pos_count += len([m for m in mentions if m.is_correct])
neg_count += len([m for m in mentions if m.is_correct is False])
# print output
for mention in mentions:
util.print_tsv_output(mention)
| dd-genomics-master | code/non_gene_acronyms_extract_candidates.py |
#! /usr/bin/env python
import collections
import extractor_util as eutil
import sys
from dep_alignment.alignment_util import row_to_canonical_match_tree, DepParentsCycleException, OverlappingCandidatesException, RootException
from dep_alignment.multi_dep_alignment import MultiDepAlignment
import os
import random
import time
# This defines the Row object that we read in to the extractor
parser = eutil.RowParser([
('relation_id', 'text'),
('doc_id', 'text'),
('section_id', 'text'),
('sent_id', 'int'),
('gene_mention_id', 'text'),
('gene_name', 'text'),
('gene_wordidxs', 'int[]'),
('gene_is_correct', 'boolean'),
('pheno_mention_id', 'text'),
('pheno_entity', 'text'),
('pheno_wordidxs', 'int[]'),
('pheno_is_correct', 'boolean'),
('words', 'text[]'),
('lemmas', 'text[]'),
('poses', 'text[]'),
('dep_paths', 'text[]'),
('dep_parents', 'int[]'),
('ners', 'text')])
ds_parser = eutil.RowParser([
('words', 'text[]'),
('lemmas', 'text[]'),
('poses', 'text[]'),
('dep_paths', 'text[]'),
('dep_parents', 'int[]'),
('gene_wordidxs', 'int[]'),
('pheno_wordidxs', 'int[]')])
# This defines the output Relation object
Relation = collections.namedtuple('Relation', [
'dd_id',
'relation_id',
'doc_id',
'section_id',
'sent_id',
'gene_mention_id',
'gene_name',
'gene_wordidxs',
'pheno_mention_id',
'pheno_entity',
'pheno_wordidxs',
'is_correct',
'relation_supertype',
'relation_subtype',
'features',
'matching_scores',
'rescores'])
def read_candidate(row):
gene_mention_id = row.gene_mention_id
gene_name = row.gene_name
gene_wordidxs = row.gene_wordidxs
pheno_mention_id = row.pheno_mention_id
pheno_entity = row.pheno_entity
pheno_wordidxs = row.pheno_wordidxs
relation_id = '%s_%s' % (gene_mention_id, pheno_mention_id)
r = Relation(None, relation_id, row.doc_id, row.section_id, row.sent_id, gene_mention_id, gene_name, \
gene_wordidxs, pheno_mention_id, pheno_entity, pheno_wordidxs, None, None, None, [], None, None)
return r
def get_example_tree(example_sentences_filename, synonyms):
app_home = os.environ['APP_HOME']
match_trees = []
with open(app_home + '/' + example_sentences_filename) as f:
for line in f:
row = ds_parser.parse_tsv_row(line)
try:
match_trees.append(row_to_canonical_match_tree(row, [row.gene_wordidxs, row.pheno_wordidxs]))
except (DepParentsCycleException, OverlappingCandidatesException, RootException):
continue
mt_root1, match_tree1 = match_trees[0]
for mt_root0, match_tree0 in match_trees[1:]:
mda = MultiDepAlignment(mt_root0, match_tree0, mt_root1, match_tree1, 2, synonyms)
mt_root1, match_tree1 = mda.get_match_tree()
return mt_root1, match_tree1
def print_example_tree(example_sentences_filename, synonyms):
app_home = os.environ['APP_HOME']
match_trees = []
with open(app_home + '/' + example_sentences_filename) as f:
for line in f:
row = ds_parser.parse_tsv_row(line)
try:
match_trees.append(row_to_canonical_match_tree(row, [row.gene_wordidxs, row.pheno_wordidxs]))
except (DepParentsCycleException, OverlappingCandidatesException, RootException):
continue
mt_root1, match_tree1 = match_trees[0]
for mt_root0, match_tree0 in match_trees[1:]:
mda = MultiDepAlignment(mt_root0, match_tree0, mt_root1, match_tree1, 2, synonyms)
mt_root1, match_tree1 = mda.get_match_tree()
mda.print_match_tree(sys.stderr)
def get_score(genepheno_row, example_tree_root, example_tree, synonyms, rescores):
row = genepheno_row
try:
mt_root2, match_tree2 = row_to_canonical_match_tree(row, [row.gene_wordidxs, row.pheno_wordidxs])
assert len(match_tree2) <= len(row.words) + 1, (len(row.words), len(match_tree2), row.words, match_tree2)
except (DepParentsCycleException, OverlappingCandidatesException, RootException):
return -1, -1
mda = MultiDepAlignment(example_tree_root, example_tree, mt_root2, match_tree2, 2, synonyms)
score1 = mda.overall_score()
score2 = mda.rescore(rescores)
return score1, score2
if __name__ == '__main__':
app_home = os.environ['APP_HOME']
match_trees = []
with open(app_home + '/match_paths/match_paths%d.txt' % random.randint(0, 100000), 'a') as match_path_file:
with open(app_home + '/true_causation_sentences1.tsv') as f:
for line in f:
row = ds_parser.parse_tsv_row(line)
try:
match_trees.append(row_to_canonical_match_tree(row, [row.gene_wordidxs, row.pheno_wordidxs]))
except (DepParentsCycleException, OverlappingCandidatesException, RootException):
continue
mt_root1, match_tree1 = match_trees[0]
for mt_root0, match_tree0 in match_trees[1:]:
# for i, mc in enumerate(match_tree1):
# print >>sys.stderr, str(i+1) + ": " + str(mc)
mda = MultiDepAlignment(mt_root0, match_tree0, mt_root1, match_tree1, 2, \
[set(['disease', 'disorder']), \
set(['mutation', 'variant', 'allele', 'polymorphism']), \
set(['case', 'patient', 'subject', 'family', 'boy', 'girl']), \
set(['present', 'display', 'characterize']), \
set(['nonsense', 'missense', 'frameshift']), \
set(['identify', 'report', 'find', 'detect']), \
set(['cause', 'associate', 'link', 'lead']),
set(['mutation', 'inhibition']), \
set(['recessive', 'dominant'])])
mt_root1, match_tree1 = mda.get_match_tree()
# mt_root1, match_tree1 = match_trees[0]
mda.print_match_tree(match_path_file)
lc = 0
start_time = time.time()
for line in sys.stdin:
lc += 1
row = parser.parse_tsv_row(line)
if row.gene_is_correct == False or row.pheno_is_correct == False:
continue
try:
mt_root2, match_tree2 = row_to_canonical_match_tree(row, [row.gene_wordidxs, row.pheno_wordidxs])
assert len(match_tree2) <= len(row.words) + 1, (len(row.words), len(match_tree2), row.words, match_tree2)
except (DepParentsCycleException, OverlappingCandidatesException, RootException):
continue
matching_scores = []
rescores = []
# for (mt_root1, match_tree1) in match_trees:
mda = MultiDepAlignment(mt_root1, match_tree1, mt_root2, match_tree2, 2, \
[set(['disease', 'disorder']), \
set(['mutation', 'variant', 'allele', 'polymorphism', \
'SNP', 'truncation', 'deletion', 'duplication']), \
set(['case', 'patient']), \
set(['identify', 'report', 'find', 'detect']), \
set(['cause', 'associate', 'link', 'lead', 'result']),
set(['mutation', 'inhibition', 'deficiency'])])
# mda.print_matched_lemmas(match_path_file)
print >>match_path_file, ' '.join(row.words)
mda.print_match_tree(match_path_file)
score1 = mda.overall_score()
score2 = mda.rescore([(set(['cause', 'lead', 'result']), set(['associate', 'link']), -50),
(set(['mutation']), set(['inhibition', 'deficiency']), -50)])
r = read_candidate(row)
matching_scores.append(int(score1))
rescores.append(int(score1 + score2))
# end for
eutil.print_tsv_output(r._replace(matching_scores=matching_scores, rescores=rescores))
end_time = time.time()
if lc != 0:
print >>sys.stderr, "Number of lines: %d, time per line: %f seconds" % (lc, (end_time - start_time) / (float(lc)))
| dd-genomics-master | code/genepheno_sv_new.py |
#! /usr/bin/env python
import sys
import config
import genepheno_supervision_util as sv
if __name__ == '__main__':
sr = config.GENE_PHENO_CAUSATION['SR']
hf = config.GENE_PHENO_CAUSATION['HF']
sv.supervise(sr, hf, charite_allowed=True)
| dd-genomics-master | code/genepheno_causation_supervision.py |
#! /usr/bin/env python
import sys
import config
import genepheno_supervision_util as sv
if __name__ == '__main__':
sr = config.GENE_PHENO_CAUSATION['SR']
hf = config.GENE_PHENO_CAUSATION['HF']
sv.supervise(sr, hf, charite_allowed=False)
| dd-genomics-master | code/genepheno_causation_supervision_no_charite.py |
#!/usr/bin/env python
from collections import defaultdict, namedtuple
import sys
import re
import os
import random
from itertools import chain
import extractor_util as util
import data_util as dutil
import config
onto_path = lambda p : '%s/onto/%s' % (os.environ['GDD_HOME'], p)
# This defines the Row object that we read in to the extractor
parser = util.RowParser([
('doc_id', 'text'),
('section_id', 'text'),
('sent_id', 'int'),
('words', 'text[]'),
('lemmas', 'text[]'),
('poses', 'text[]'),
('ners', 'text[]')])
# This defines the output Mention object
Mention = namedtuple('Mention', [
'dd_id',
'doc_id',
'section_id',
'sent_id',
'wordidxs',
'mention_id',
'mention_supertype',
'mention_subtype',
'entity',
'words',
'is_correct'])
### CANDIDATE EXTRACTION ###
HF = config.PHENO['HF']
SR = config.PHENO['SR']
def enrich_phenos(rows):
ret = []
for row in rows:
hpoid, phrase, entry_type = [x.strip() for x in row]
ret.append([hpoid, phrase, entry_type])
new_pheno = ''
if phrase.lower().startswith('abnormality of the'):
new_pheno = (phrase[len('abnormality of the ') + 1:]).strip()
if phrase.lower().startswith('abnormality of'):
new_pheno = (phrase[len('abnormality of') + 1:]).strip()
if phrase.lower().startswith('abnormal'):
new_pheno = (phrase[len('abnormal') + 1:]).strip()
if len(new_pheno) > 0:
if len(new_pheno.split()) > 1:
ret.append([hpoid, new_pheno, 'MORPHED'])
aplasias = ['abnormality', 'abnormalities', 'physiology', \
'morphology', 'dysplasia', 'hypoplasia', 'aplasia', \
'hyperplasia']
next_pheno = new_pheno
for aplasia in aplasias:
if new_pheno.endswith(aplasia):
next_pheno = (new_pheno[:-len(aplasia)]).strip()
for aplasia in aplasias:
ret.append([hpoid, next_pheno + ' ' + aplasia, 'MORPHED'])
new_ret = []
for row in ret:
hpoid, pheno, entry_type = [x.strip() for x in row]
words = pheno.split()
for word in words:
# just assuming that only one slash occurs per line
if '/' in word:
nword = []
nword.append(word.split('/')[0])
nword.append(word.split('/')[1])
new_pheno = pheno.replace(word, nword[0])
new_pheno = pheno.replace(word, nword[1])
new_ret.append([hpoid, new_pheno, 'SLASHED'])
ret = ret + new_ret
return ret
def load_pheno_terms():
phenos = {}
pheno_sets = {}
"""
Load phenotypes (as phrases + as frozensets to allow permutations)
Output a dict with pheno phrases as keys, and a dict with pheno sets as keys
"""
# [See onto/prep_pheno_terms.py]
# Note: for now, we don't distinguish between lemmatized / exact
rows = [line.split('\t') for line in open(onto_path('manual/pheno_terms.tsv'), 'rb')]
rows = enrich_phenos(rows)
for row in rows:
hpoid, phrase, entry_type = [x.strip() for x in row]
if hpoid in hpo_phenos:
if phrase in phenos:
phenos[phrase].append((hpoid, entry_type))
else:
phenos[phrase] = [(hpoid, entry_type)]
phrase_bow = frozenset(phrase.split())
if phrase_bow in pheno_sets:
pheno_sets[phrase_bow].append((hpoid, entry_type))
else:
pheno_sets[phrase_bow] = [(hpoid, entry_type)]
return phenos, pheno_sets
allowed_diseases = [line.strip() for line in open(onto_path('manual/allowed_omim_ps.tsv'))]
def load_disease_terms():
diseases = {}
disease_sets = {}
rows = [line.split('\t') for line in open(onto_path('manual/disease_terms.tsv'), 'rb')]
for row in rows:
omimid, phrase, entry_type = [x.strip() for x in row]
if omimid in allowed_diseases:
if phrase in diseases:
diseases[phrase].append((omimid, entry_type))
else:
diseases[phrase] = [(omimid, entry_type)]
phrase_bow = frozenset(phrase.split())
if phrase_bow in disease_sets:
disease_sets[phrase_bow].append((omimid, entry_type))
else:
disease_sets[phrase_bow] = [(omimid, entry_type)]
return diseases, disease_sets
def keep_word(w):
return (w.lower() not in STOPWORDS and len(w) > HF['min-word-len'] - 1)
def extract_candidate_mentions(row, terms, term_sets):
"""Extracts candidate phenotype mentions from an input row object"""
mentions = []
# First we initialize a list of indices which we 'split' on,
# i.e. if a window intersects with any of these indices we skip past it
split_indices = set()
# split on certain characters / words e.g. commas
split_indices.update([i for i,w in enumerate(row.words) if w in HF['split-list']])
# split on segments of more than M consecutive skip words
seq = []
for i,w in enumerate(row.words):
if not keep_word(w):
seq.append(i)
else:
if len(seq) > HF['split-max-stops']:
split_indices.update(seq)
seq = []
# Next, pass a window of size n (dec.) over the sentence looking for candidate mentions
for n in reversed(range(1, min(len(row.words), HF['max-len'])+1)):
for i in range(len(row.words)-n+1):
wordidxs = range(i,i+n)
words = [re.sub(r'\W+', ' ', w.lower()) for w in row.words[i:i+n]]
lemmas = [re.sub(r'\W+', ' ', w.lower()) for w in row.lemmas[i:i+n]]
# skip this window if it intersects with the split set
if not split_indices.isdisjoint(wordidxs):
continue
# skip this window if it is sub-optimal: e.g. starts with a skip word, etc.
if not all(map(keep_word, [words[0], lemmas[0], words[-1], lemmas[-1]])):
continue
# Note: we filter stop words coordinated between word and lemma lists
# (i.e. if lemmatized version of a word is stop word, it should be stop word too)
# This also keeps these filtered lists in sync!
ws, lws = zip(*[(words[k], lemmas[k]) for k in range(n) if keep_word(words[k]) and keep_word(lemmas[k])])
# (1) Check for exact match (including exact match of lemmatized / stop words removed)
# If found add to split list so as not to consider subset phrases
p, lp = map(' '.join, [ws, lws])
if p in terms or lp in terms:
entities = terms[p] if p in terms else terms[lp]
for (entity, entry_type) in entities:
mentions.append(create_supervised_mention(row, wordidxs, entity, entry_type + '_EXACT'))
split_indices.update(wordidxs)
continue
# (2) Check for permuted match
# Note: avoid repeated words here!
if HF['permuted']:
ps, lps = map(frozenset, [ws, lws])
if (len(ps)==len(ws) and ps in term_sets) or (len(lps)==len(lws) and lps in term_sets):
entities = term_sets[ps] if ps in term_sets else term_sets[lps]
for (entity, entry_type) in entities:
mentions.append(create_supervised_mention(row, wordidxs, entity, entry_type + '_PERM'))
continue
# (3) Check for an exact match with one ommitted (interior) word/lemma
# Note: only consider ommiting non-stop words!
if HF['omitted-interior']:
if len(ws) > 2:
for omit in range(1, len(ws)-1):
p, lp = [' '.join([w for i,w in enumerate(x) if i != omit]) for x in [ws, lws]]
if p in terms or lp in terms:
entities = terms[p] if p in terms else terms[lp]
for (entity, entry_type) in entities:
mentions.append(create_supervised_mention(row, wordidxs, entity, entry_type + '_OMIT_%s' % omit))
return mentions
### DISTANT SUPERVISION ###
VALS = config.PHENO['vals']
def create_supervised_mention(row, idxs, entity=None, mention_supertype=None, mention_subtype=None):
"""Given a Row object consisting of a sentence, create & supervise a Mention output object"""
words = [row.words[i] for i in idxs]
idxs_strs = [str(i) for i in idxs]
mid = '%s_%s_%s_%s' % (row.doc_id, row.section_id, row.sent_id, '-'.join(idxs_strs))
m = Mention(None, row.doc_id, row.section_id, row.sent_id, idxs, mid, mention_supertype, mention_subtype, entity, words, None)
if SR.get('post-match'):
opts = SR['post-match']
phrase_post = " ".join(row.words[idxs[-1]:])
for name,val in VALS:
if len(opts[name]) + len(opts['%s-rgx' % name]) > 0:
match = util.rgx_mult_search(phrase_post, opts[name], opts['%s-rgx' % name], opts[name], opts['%s-rgx' % name], flags=re.I)
if match:
return m._replace(is_correct=val, mention_supertype='%s_POST_MATCH_%s_%s' % (mention_supertype, name, val), mention_subtype=match)
if SR.get('bad-pheno-names'):
if ' '.join(words) in SR['bad-pheno-names']:
return m._replace(is_correct=False, mention_supertype='%s_BAD_PHENO_NAME' % mention_supertype)
if SR.get('bad-phenos'):
if entity in SR['bad-phenos']:
return m._replace(is_correct=False, mention_supertype='%s_BAD_PHENO_ENTITY' % mention_supertype)
if SR.get('mesh-supervise'):
pubmed_id = dutil.get_pubmed_id_for_doc(row.doc_id)
if pubmed_id and pubmed_id in PMID_TO_HPO:
if entity in PMID_TO_HPO[pubmed_id]:
return m._replace(is_correct=True, mention_supertype='%s_MESH_SUPERV' % mention_supertype, mention_subtype=str(pubmed_id) + ' ::: ' + str(entity))
# If this is more specific than MeSH term, also consider true.
elif SR.get('mesh-specific-true') and entity in hpo_dag.node_set:
for parent in PMID_TO_HPO[pubmed_id]:
if hpo_dag.has_child(parent, entity):
return m._replace(is_correct=True, mention_supertype='%s_MESH_CHILD_SUPERV' % mention_supertype, mention_subtype=str(parent) + ' -> ' + str(entity))
phrase = " ".join(words).lower()
if mention_supertype == 'EXACT':
if SR.get('exact-english-word') and \
len(words) == 1 and phrase in ENGLISH_WORDS and random.random() < SR['exact-english-word']['p']:
return m._replace(is_correct=True, mention_supertype='%s_EXACT_AND_ENGLISH_WORD' % mention_supertype, mention_subtype=phrase)
else:
return m._replace(is_correct=True, mention_supertype='%s_NON_EXACT_AND_ENGLISH_WORD' % mention_supertype, mention_subtype=phrase)
# Else default to existing values / NULL
return m
### RANDOM NEGATIVE SUPERVISION ###
def generate_rand_negatives(s, candidates):
"""Generate some negative examples in 1:1 ratio with positive examples"""
negs = []
n_negs = len([c for c in candidates if c.is_correct])
if n_negs == 0:
return negs
# pick random noun / adj phrases which do not overlap with candidate mentions
covered = set(chain.from_iterable([m.wordidxs for m in candidates]))
idxs = set([i for i in range(len(s.words)) if re.match(SR['rand-negs']['pos-tag-rgx'], s.poses[i])])
for i in range(n_negs):
x = sorted(list(idxs - covered))
if len(x) == 0:
break
ridxs = [random.randint(0, len(x)-1)]
while random.random() > 0.5:
j = ridxs[-1]
if j + 1 < len(x) and x[j+1] == x[j] + 1:
ridxs.append(j+1)
else:
break
wordidxs = [x[j] for j in ridxs]
mtype = 'RAND_NEG'
wordidxs_strs = [str(i) for i in wordidxs]
mid = '%s_%s_%s_%s' % (s.doc_id, s.section_id, str(s.sent_id), '-'.join(wordidxs_strs))
negs.append(
Mention(dd_id=None, doc_id=s.doc_id, section_id=s.section_id, sent_id=s.sent_id, wordidxs=wordidxs,
mention_id=mid, mention_supertype=mtype, mention_subtype=None, entity=None, words=[s.words[i] for i in wordidxs],
is_correct=False))
for i in wordidxs:
covered.add(i)
return negs
if __name__ == '__main__':
# Load static dictionaries
# TODO: any simple ways to speed this up?
STOPWORDS = frozenset([w.strip() for w in open(onto_path('manual/stopwords.tsv'), 'rb')])
ENGLISH_WORDS = frozenset([w.strip() for w in open(onto_path('data/english_words.tsv'), 'rb')])
hpo_dag = dutil.read_hpo_dag()
hpo_phenos = set(dutil.get_hpo_phenos(hpo_dag))
if SR.get('mesh-supervise'):
# unnecessary and hope it will never be used again --- our doc id is the pmid currently
# DOI_TO_PMID = dutil.read_doi_to_pmid()
PMID_TO_HPO = dutil.load_pmid_to_hpo()
PHENOS, PHENO_SETS = load_pheno_terms()
DISEASES, DISEASE_SETS = load_disease_terms()
# Read TSV data in as Row objects
for line in sys.stdin:
row = parser.parse_tsv_row(line)
# Skip row if sentence doesn't contain a verb, contains URL, etc.
if util.skip_row(row):
continue
# find candidate mentions & supervise
disease_mentions = extract_candidate_mentions(row, DISEASES, DISEASE_SETS)
pheno_mentions = extract_candidate_mentions(row, PHENOS, PHENO_SETS)
dwi = [d.wordidxs for d in disease_mentions]
pheno_mentions_2 = []
for p in pheno_mentions:
if p.wordidxs not in dwi:
pheno_mentions_2.append(p)
mentions = disease_mentions + pheno_mentions_2
if SR.get('rand-negs'):
mentions += generate_rand_negatives(row, mentions)
# print output
for mention in mentions:
util.print_tsv_output(mention)
| dd-genomics-master | code/pheno_extract_candidates.py |
#!/usr/bin/env python
import collections
import extractor_util as util
import data_util as dutil
import random
import re
import os
import sys
import string
import config
import dep_util as deps
CACHE = dict() # Cache results of disk I/O
# This defines the Row object that we read in to the extractor
parser = util.RowParser([
('doc_id', 'text'),
('section_id', 'text'),
('sent_id', 'int'),
('words', 'text[]'),
('dep_paths', 'text[]'),
('dep_parents', 'int[]'),
('lemmas', 'text[]'),
('poses', 'text[]'),
('ners', 'text[]')])
# This defines the output Mention object
Mention = collections.namedtuple('Mention', [
'dd_id',
'doc_id',
'section_id',
'sent_id',
'wordidxs',
'mention_id',
'mapping_type',
'mention_supertype',
'mention_subtype',
'gene_name',
'words',
'is_correct'])
### CANDIDATE EXTRACTION ###
HF = config.GENE['HF']
SR = config.GENE['SR']
def read_pubmed_to_genes():
"""NCBI provides a list of articles (PMIDs) that discuss a particular gene (Entrez IDs).
These provide a nice positive distant supervision set, as mentions of a gene name in
an article about that gene are likely to be true mentions.
This returns a dictionary that maps from Pubmed ID to a set of ENSEMBL genes mentioned
in that article.
"""
pubmed_to_genes = collections.defaultdict(set)
with open('%s/onto/data/pmid_to_ensembl.tsv' % util.APP_HOME) as f:
for line in f:
pubmed,gene = line.rstrip('\n').split('\t')
pubmed_to_genes[pubmed].add(gene)
return pubmed_to_genes
def select_mapping_type(mapping_types):
mapping_order = HF['ensembl-mapping-types']
for m in mapping_order:
if m in mapping_types:
return m
assert False, ','.join(mapping_types)
def extract_candidate_mentions(row):
gene_name_to_genes = CACHE['gene_name_to_genes']
mentions = []
for i, word in enumerate(row.words):
if HF['require-one-letter']:
if not re.match(r'.*[a-zA-Z].*', word):
continue
if (word in gene_name_to_genes):
matches = gene_name_to_genes[word]
mapping_types = set()
for (eid, canonical_name, mapping_type) in matches:
mapping_types.add(mapping_type)
mapping_type = select_mapping_type(mapping_types)
if len(word) >= HF['min-word-len'][mapping_type]:
m = create_supervised_mention(row, i, gene_name=word, mapping_type=mapping_type)
if m:
mentions.append(m)
return mentions
def contains_sublist(lst, sublst):
if len(lst) < len(sublst):
return False, None
n = len(sublst)
for i in xrange(len(lst) - n + 1):
if sublst == lst[i:i+n]:
return True, (i,i+n)
return False, None
def detect_manual(gene_name, words):
manual_pairs = SR['manual-bad']
for names in manual_pairs:
definitions = manual_pairs[names]
if gene_name in names:
for definition in definitions:
def_lst = [d.lower() for d in definition.split(' ')]
contains, def_boundaries = contains_sublist([w.lower() for w in words], def_lst)
if contains:
def_start, def_stop = def_boundaries
definition = words[def_start:def_stop]
return ' '.join(definition)
return None
### DISTANT SUPERVISION ###
VALS = config.GENE['vals']
def create_supervised_mention(row, i, gene_name=None, mapping_type=None, mention_supertype=None, mention_subtype=None):
"""Given a Row object consisting of a sentence, create & supervise a Mention output object"""
word = row.words[i]
mid = '%s_%s_%s_%s' % (row.doc_id, row.section_id, row.sent_id, i)
m = Mention(None, row.doc_id, row.section_id, row.sent_id, [i], mid, mapping_type, mention_supertype, mention_subtype, gene_name, [word], None)
dep_dag = deps.DepPathDAG(row.dep_parents, row.dep_paths, row.words)
phrase = ' '.join(row.words)
lemma_phrase = ' '.join(row.lemmas)
if SR.get('post-neighbor-match') and i < len(row.words) - 1:
opts = SR['post-neighbor-match']
post_neighbor = row.words[i+1]
for name,val in VALS:
if len(opts[name]) + len(opts['%s-rgx' % name]) > 0:
match = util.rgx_mult_search(post_neighbor, opts[name], opts['%s-rgx' % name], opts[name], opts['%s-rgx' % name], flags=re.I)
if match:
return m._replace(is_correct=val, mention_supertype='POST_MATCH_%s_%s' % (name, val), mention_subtype=match)
if SR.get('bad-genes'):
if i > 0:
prev_word = row.words[i-1]
else:
prev_word = ''
if i < len(row.words) - 1:
next_word = row.words[i+1]
else:
next_word = ''
if next_word != 'gene' and prev_word != 'gene':
for bad_gene in SR['bad-genes']:
if re.search(bad_gene, gene_name):
return m._replace(is_correct=False, mention_supertype='BAD_GENE')
if SR.get('manual-bad'):
detected = detect_manual(gene_name, row.words)
if detected is not None:
return m._replace(is_correct=False, mention_supertype='MANUAL_BAD', mention_subtype=detected)
if SR.get('pre-neighbor-match') and i > 0:
opts = SR['pre-neighbor-match']
pre_neighbor = row.words[i-1]
for name,val in VALS:
if len(opts[name]) + len(opts['%s-rgx' % name]) > 0:
match = util.rgx_mult_search(pre_neighbor, opts[name], opts['%s-rgx' % name],
opts[name], opts['%s-rgx' % name], flags=re.I)
if match:
return m._replace(is_correct=val, mention_supertype='PRE_NEIGHBOR_MATCH_%s_%s' % (name, val), mention_subtype=match)
if SR.get('phrases-in-sent'):
opts = SR['phrases-in-sent']
for name,val in VALS:
if len(opts[name]) + len(opts['%s-rgx' % name]) > 0:
match = util.rgx_mult_search(phrase + ' ' + lemma_phrase, opts[name], opts['%s-rgx' % name],
opts[name], opts['%s-rgx' % name], flags=re.I)
if match:
# backslashes cause postgres errors in postgres 9
return m._replace(is_correct=val, mention_supertype='PHRASE_%s' % name, mention_subtype=match.replace('\\', '/'))
## DS RULE: matches from papers that NCBI annotates as being about the mentioned gene are likely true.
if SR['pubmed-paper-genes-true']:
pubmed_to_genes = CACHE['pubmed_to_genes']
pmid = dutil.get_pubmed_id_for_doc(row.doc_id)
if pmid and gene_name:
for (mention_ensembl_id, canonical_name, mapping_type) in CACHE['gene_name_to_genes'][gene_name]:
if mention_ensembl_id in pubmed_to_genes.get(pmid, {}):
return m._replace(is_correct=True, mention_supertype='%s_NCBI_ANNOTATION_TRUE' % mention_supertype, mention_subtype=mention_ensembl_id)
break
## DS RULE: Genes on the gene list with complicated names are probably good for exact matches.
if SR['complicated-gene-names-true']:
if m.mapping_type in HF['ensembl-mapping-types']:
if re.match(r'[a-zA-Z]{3}[a-zA-Z]*\d+\w*', word):
return m._replace(is_correct=True, mention_supertype='COMPLICATED_GENE_NAME')
if SR['all-canonical-true']:
if m.mapping_type == 'CANONICAL_SYMBOL':
return m._replace(is_correct=True, mention_supertype='%s_ALL_TRUE' % m.mention_supertype)
if SR['all-symbols-true']:
if m.mapping_type in HF['ensembl-mapping-types']:
return m._replace(is_correct=True, mention_supertype='%s_ALL_TRUE' % m.mention_supertype)
if SR.get('neighbor-match'):
opts = SR['neighbor-match']
for name,val in VALS:
if len(opts[name]) + len(opts['%s-rgx' % name]) > 0:
for neighbor_idx in dep_dag.neighbors(i):
neighbor = row.words[neighbor_idx]
match = util.rgx_mult_search(neighbor, opts[name], opts['%s-rgx' % name], opts[name], opts['%s-rgx' % name], flags=re.I)
if match:
return m._replace(is_correct=val, mention_supertype='NEIGHBOR_MATCH_%s_%s' % (name, val), mention_subtype='Neighbor: ' + neighbor + ', match: ' + match)
return m
def get_negative_mentions(row, mentions, d, per_row_max=2):
"""Generate random / pseudo-random negative examples, trying to keep set approx. balanced"""
negs = []
if d < 0:
return negs
existing_mention_idxs = [m.wordidxs[0] for m in mentions]
for i, word in enumerate(row.words):
if len(negs) > d or len(negs) > per_row_max:
return negs
# skip if an existing mention
if i in existing_mention_idxs:
continue
# Make a template mention object- will have mention_id opt with gene_name appended
mid = '%s_%s_%d_%s' % (row.doc_id, row.section_id, row.sent_id, i)
m = Mention(dd_id=None, doc_id=row.doc_id, section_id=row.section_id, sent_id=row.sent_id, wordidxs=[i], mention_id=mid, mapping_type=None, mention_supertype="RANDOM_NEGATIVE",mention_subtype=None, gene_name=None, words=[word], is_correct=None)
# Non-match all uppercase negative supervision
if word==word.upper() and len(word)>2 and word.isalnum() and not unicode(word).isnumeric():
if random.random() < 0.01*d:
negs.append(m._replace(mention_supertype='ALL_UPPER_NOT_GENE_SYMBOL', is_correct=False))
# Random negative supervision
elif random.random() < 0.005*d:
negs.append(m._replace(mention_supertype='RAND_WORD_NOT_GENE_SYMBOL', is_correct=False))
return negs
if __name__ == '__main__':
# load static data
CACHE['gene_name_to_genes'] = dutil.gene_symbol_to_ensembl_id_map()
CACHE['pubmed_to_genes'] = read_pubmed_to_genes()
# unnecessary currently b/c our doc id is the pmid, thankfully
# CACHE['doi_to_pmid'] = dutil.read_doi_to_pmid()
# generate the mentions, while trying to keep the supervision approx. balanced
# print out right away so we don't bloat memory...
pos_count = 0
neg_count = 0
for line in sys.stdin:
row = parser.parse_tsv_row(line)
# Skip row if sentence doesn't contain a verb, contains URL, etc.
if util.skip_row(row):
continue
# Find candidate mentions & supervise
mentions = extract_candidate_mentions(row)
pos_count += len([m for m in mentions if m.is_correct])
neg_count += len([m for m in mentions if m.is_correct is False])
# add negative supervision
if pos_count > neg_count and SR['rand-negs']:
negs = get_negative_mentions(row, mentions, pos_count - neg_count)
neg_count += len(negs)
mentions += negs
# print output
for mention in mentions:
util.print_tsv_output(mention)
| dd-genomics-master | code/gene_extract_candidates.py |
#!/usr/bin/env python
import extractor_util as util
import os
import ddlib
import config
# This defines the Row object that we read in to the extractor
parser = util.RowParser([
('doc_id', 'text'),
('section_id', 'text'),
('sent_id', 'int'),
('words', 'text[]'),
('lemmas', 'text[]'),
('poses', 'text[]'),
('ners', 'text[]'),
('dep_paths', 'text[]'),
('dep_parents', 'int[]'),
('mention_id', 'text'),
('mention_wordidxs', 'int[]')])
OPTS = config.PHENO['F']
def get_features_for_candidate(row):
"""Extract features for candidate mention- both generic ones from ddlib & custom features"""
features = []
dds = util.create_ddlib_sentence(row)
# (1) GENERIC FEATURES from ddlib
span = ddlib.Span(begin_word_id=row.mention_wordidxs[0], length=len(row.mention_wordidxs))
features += [(row.doc_id, row.section_id, row.mention_id, feat) \
for feat in ddlib.get_generic_features_mention(dds, span)]
# (2) Add the closest verb by raw distance
if OPTS.get('closest-verb'):
verb_idxs = [i for i,p in enumerate(row.poses) if p.startswith("VB")]
if len(verb_idxs) > 0:
dists = filter(lambda d : d[0] > 0, \
[(min([abs(i-j) for j in row.mention_wordidxs]), i) for i in verb_idxs])
if len(dists) > 0:
verb = row.lemmas[min(dists)[1]]
features.append((row.doc_id, row.section_id, row.mention_id, 'NEAREST_VERB_[%s]' % (verb,)))
return features
# Load in manually defined keywords
onto_path = lambda p : '%s/onto/%s' % (os.environ['GDD_HOME'], p)
if __name__ == '__main__':
if OPTS.get('sentence-kws'):
ddlib.load_dictionary(onto_path('manual/pheno_sentence_keywords.tsv'), dict_id='pheno_kws')
util.run_main_tsv(row_parser=parser.parse_tsv_row, row_fn=get_features_for_candidate)
| dd-genomics-master | code/pheno_extract_features.py |
#!/usr/bin/env python
import collections
import extractor_util as util
import data_util as dutil
import dep_util as deps
import os
import random
import re
import sys
import config
# This defines the Row object that we read in to the extractor
parser = util.RowParser([
('doc_id', 'text'),
('gene_section_id', 'text'),
('gene_sent_id', 'int'),
('variant_section_id', 'text'),
('variant_sent_id', 'int'),
('gene_words', 'text[]'),
('gene_lemmas', 'text[]'),
('gene_poses', 'text[]'),
('gene_dep_paths', 'text[]'),
('gene_dep_parents', 'int[]'),
('variant_words', 'text[]'),
('variant_lemmas', 'text[]'),
('variant_poses', 'text[]'),
('variant_dep_paths', 'text[]'),
('variant_dep_parents', 'int[]'),
('gene_mention_ids', 'text[]'),
('gene_names', 'text[]'),
('gene_wordidxs', 'int[][]'),
('gene_is_corrects', 'boolean[]'),
('variant_mention_ids', 'text[]'),
('variant_entities', 'text[]'),
('variant_wordidxs', 'int[][]'),
('variant_is_corrects', 'boolean[]')])
# This defines the output Relation object
Relation = collections.namedtuple('Relation', [
'dd_id',
'relation_id',
'doc_id',
'gene_section_id',
'gene_sent_id',
'variant_section_id',
'variant_sent_id',
'gene_mention_id',
'gene_name',
'gene_wordidxs',
'gene_is_correct',
'variant_mention_id',
'variant_entity',
'variant_wordidxs',
'variant_is_correct',
'is_correct',
'relation_supertype',
'relation_subtype'])
### CANDIDATE EXTRACTION ###
def extract_candidate_relations(row):
# HF = config.GENE_VARIANT['HF']
relations = []
# dep_dag = deps.DepPathDAG(row.dep_parents, row.dep_paths, row.words, max_path_len=HF['max-dep-path-dist'])
for i,gid in enumerate(row.gene_mention_ids):
for j,pid in enumerate(row.variant_mention_ids):
r = create_relation(row, i, j)
relations.append(r)
return relations
def create_relation(row, i, j):
gene_mention_id = row.gene_mention_ids[i]
gene_name = row.gene_names[i]
gene_wordidxs = row.gene_wordidxs[i]
gene_is_correct = row.gene_is_corrects[i]
variant_mention_id = row.variant_mention_ids[j]
variant_entity = row.variant_entities[j]
variant_wordidxs = row.variant_wordidxs[j]
variant_is_correct = row.variant_is_corrects[j]
# XXX HACK Johannes: Just set every possible variant-gene pair to true for the moment
is_correct = True
supertype = 'DEFAULT_EVERYTHING_TRUE'
subtype = None
relation_id = '%s_%s' % (gene_mention_id, variant_mention_id)
r = Relation(None, relation_id, row.doc_id, row.gene_section_id, \
row.gene_sent_id, row.variant_section_id, row.variant_sent_id, \
gene_mention_id, gene_name, gene_wordidxs, \
gene_is_correct, variant_mention_id, variant_entity, \
variant_wordidxs, variant_is_correct, is_correct, supertype, subtype)
return r
if __name__ == '__main__':
for line in sys.stdin:
row = parser.parse_tsv_row(line)
# find candidate mentions
relations = extract_candidate_relations(row)
# print output
for relation in relations:
util.print_tsv_output(relation)
| dd-genomics-master | code/genevariant_extract_candidates.py |
#!/usr/bin/env python
import collections
import os
import sys
import abbreviations
import config
import extractor_util as util
import levenshtein
import data_util as dutil
CACHE = dict() # Cache results of disk I/O
# This defines the Row object that we read in to the extractor
parser = util.RowParser([
('doc_id', 'text'),
('section_id', 'text'),
('sent_id', 'int'),
('wordidxs', 'int[]'),
('mention_ids', 'text[]'),
('supertypes', 'text[]'),
('subtypes', 'text[]'),
('entities', 'text[]'),
('words', 'text[]'),
('is_corrects', 'boolean[]'),])
# This defines the output Mention object
Mention = collections.namedtuple('Mention', [
'dd_id',
'doc_id',
'section_id',
'sent_id',
'wordidxs',
'mention_id',
'supertype',
'subtype',
'entity',
'words',
'is_correct'])
hpo_dag = dutil.read_hpo_dag()
def pheno_is_child_of(child_entity, parent_entity):
child_parent_ids = dutil.get_parents(child_entity, hpo_dag)
return parent_entity in child_parent_ids
def filter_phenos(row):
doc_id = None
section_id = None
sent_id = None
wordidxs = None
words = None
cands = []
for i in xrange(len(row.mention_ids)):
if doc_id is None:
doc_id = row.doc_id
assert doc_id == row.doc_id
if section_id is None:
section_id = row.section_id
assert section_id == row.section_id
if sent_id is None:
sent_id = row.sent_id
assert sent_id == row.sent_id
if wordidxs is None:
wordidxs = row.wordidxs
assert wordidxs == row.wordidxs
if words is None:
words = row.words
assert words == row.words
mention_id = row.mention_ids[i]
supertype = row.supertypes[i]
subtype = row.subtypes[i]
entity = row.entities[i]
is_correct = row.is_corrects[i]
current_pheno = Mention(None, doc_id, section_id, sent_id, wordidxs, mention_id, supertype, subtype, entity, words, is_correct)
found = False
for i in xrange(len(cands)):
cand = cands[i]
if pheno_is_child_of(cand.entity, current_pheno.entity):
found = True
break
if pheno_is_child_of(current_pheno.entity, cand.entity):
cands[i] = current_pheno
found = True
break
if not found:
cands.append(current_pheno)
return cands
if __name__ == '__main__':
onto_path = lambda p : '%s/onto/%s' % (os.environ['GDD_HOME'], p)
# generate the mentions, while trying to keep the supervision approx. balanced
# print out right away so we don't bloat memory...
for line in sys.stdin:
row = parser.parse_tsv_row(line)
# Find candidate mentions & supervise
mentions = filter_phenos(row)
# print output
for mention in mentions:
util.print_tsv_output(mention)
| dd-genomics-master | code/pheno_mentions_remove_super_dag_phenos.py |
#! /usr/bin/env python
import config
import sys
if __name__ == "__main__":
disallowed_phrases = config.PHENO['HF']['disallowed-phrases']
for line in sys.stdin:
take = True
for dp in disallowed_phrases:
if dp in line.lower():
take = False
break
if take:
sys.stdout.write(line)
| dd-genomics-master | code/create_allowed_diseases_list.py |
"""Miscellaneous shared tools for extractors."""
import os
import re
import sys
import ddlib
import traceback
FIX_DEP_PARENTS = True
def rgx_comp(strings=[], rgxs=[]):
r = r'|'.join(re.escape(w) for w in strings)
if len(rgxs) > 0:
if len(strings) > 0:
r += r'|'
r += r'(' + r')|('.join(rgxs) + r')'
return r
# XXX HACK Johannes: Catching regex exceptions and then continuing is not the nicest way
# but ever since I had a single regex error in the middle of a 5-hour run and the whole extractor failed
# I'd rather have the thing continue with a wrong value at the single position ...
def rgx_mult_search(phrase, strings, rgxs, orig_strings, orig_rgxs, flags=re.I):
for i, s in enumerate(strings):
try:
regex = re.escape(s)
if re.search(regex, phrase, flags):
return orig_strings[i]
except Exception:
traceback.print_exc()
print regex
for i, s in enumerate(rgxs):
try:
regex = s
if re.search(regex, phrase, flags):
return orig_rgxs[i]
except Exception:
traceback.print_exc()
print regex
return None
# HACK[Alex]: this is probably justified but a bit hackey still...
def skip_row(row):
"""
Filter sentences dynamically which we want to skip for now uniformly across all extractors
NOTE: could do this as preprocessing step but since this is a bit of a hack should be more
transparent...
Assumes Row object has words, poses attributes
"""
# Hack[Alex]: upper limit for sentences, specifically to deal w preprocessing errors
if len(row.words) > 90:
return True
# Require that there is a verb in the sentence
try:
if not any(pos.startswith("VB") for pos in row.poses):
return True
except AttributeError:
pass
# Filter out by certain specific identifying tokens
exclude_strings = ['http://', 'https://']
exclude_patterns = ['\w+\.(com|org|edu|gov)']
for ex in [re.escape(s) for s in exclude_strings] + exclude_patterns:
for word in row.words:
assert isinstance(ex, basestring), str(ex)
assert isinstance(word, basestring), str(row.words) + '\n' + str(row.lemmas) + '\n' + str(word)
if re.search(ex, word, re.I | re.S):
return True
return False
APP_HOME = os.environ['GDD_HOME']
def print_error(err_string):
"""Function to write to stderr"""
sys.stderr.write("ERROR[UDF]: " + str(err_string) + "\n")
def tsv_string_to_list(s, func=lambda x : x, sep='|^|'):
"""Convert a TSV string from the sentences_input table to a list,
optionally applying a fn to each element"""
if s.strip() == "":
return []
# Auto-detect separator
if re.search(r'^\{|\}$', s):
split = re.split(r'\s*,\s*', re.sub(r'^\{\s*|\s*\}$', '', s))
else:
split = s.split(sep)
# split and apply function
return [func(x) for x in split]
def tsv_string_to_listoflists(s, func=lambda x : x, sep1='|^|', sep2='|~|'):
"""Convert a TSV string from sentences_input table to a list of lists"""
return tsv_string_to_list(s, func=lambda x : tsv_string_to_list(x, func=func, sep=sep1), sep=sep2)
class Row:
def __str__(self):
return '<Row(' + ', '.join("%s=%s" % x for x in self.__dict__.iteritems()) + ')>'
def __repr__(self):
return str(self)
def bool_parser(b):
b = b.strip()
if b == 't':
return True
elif b == 'f':
return False
elif b == 'NULL' or b == '\\N':
return None
else:
raise Exception("Unrecognized bool type in RowParser:bool_parser: %s" % b)
# NOTE: array_to_string doesn't work well for bools! Just pass psql array out!
RP_PARSERS = {
'text' : lambda x : str(x.replace('\n', ' ')),
'text[]' : lambda x : tsv_string_to_list(x.replace('\n', ' ')),
'int' : lambda x : int(x.strip()),
'int[]' : lambda x : tsv_string_to_list(x, func=lambda x: int(x.strip())),
'int[][]' : lambda x : tsv_string_to_listoflists(x, func=lambda x: int(x.strip())),
'boolean' : lambda x : bool_parser(x),
'boolean[]' : lambda x : tsv_string_to_list(x, func=bool_parser)
}
class RowParser:
"""
Initialized with a list of duples (field_name, field_type)- see RP_PARSERS dict
Is a factory for simple Row class parsed from e.g. tsv input lines
"""
def __init__(self, fields):
self.fields = fields
def parse_tsv_row(self, line):
row = Row()
cols = line.split('\t')
for i, col in enumerate(cols):
field_name, field_type = self.fields[i]
if RP_PARSERS.has_key(field_type):
val = RP_PARSERS[field_type](col)
if FIX_DEP_PARENTS:
if field_name == 'dep_parents' and field_type == 'int[]':
for i in xrange(0, len(val)):
val[i] -= 1
else:
raise Exception("Unsupported type %s for RowParser class- please add.")
setattr(row, field_name, val)
return row
def create_ddlib_sentence(row):
"""Create a list of ddlib.Word objects from input row."""
sentence = []
for i, word in enumerate(row.words):
sentence.append(ddlib.Word(
begin_char_offset=None,
end_char_offset=None,
word=word,
lemma=row.lemmas[i],
pos=row.poses[i],
ner=row.ners[i],
dep_par=row.dep_parents[i],
dep_label=row.dep_paths[i]))
return sentence
def pg_array_escape(tok):
"""
Escape a string that's meant to be in a Postgres array.
We double-quote the string and escape backslashes and double-quotes.
"""
return '"%s"' % str(tok).replace('\\', '\\\\').replace('"', '\\\\"')
def list_to_pg_array(l):
"""Convert a list to a string that PostgreSQL's COPY FROM understands."""
return '{%s}' % ','.join(pg_array_escape(x) for x in l)
def print_tsv_output(out_record):
"""Print a tuple as output of TSV extractor."""
values = []
for x in out_record:
if isinstance(x, list) or isinstance(x, tuple):
cur_val = list_to_pg_array(x)
elif x is None:
cur_val = '\N'
else:
cur_val = x
values.append(cur_val)
print '\t'.join(str(x) for x in values)
def run_main_tsv(row_parser, row_fn):
"""
Runs through lines in sys.stdin, applying row_fn(row_parser(line))
Assumes that this outputs a list of rows, which get printed out in tsv format
Has standard error handling for malformed rows- optimally row_fn returns object with pretty print
"""
for line in sys.stdin:
for line_out in row_fn(row_parser(line)):
print_tsv_output(line_out)
| dd-genomics-master | code/extractor_util.py |
'''
Created on Aug 5, 2015
@author: jbirgmei
'''
# from wikipedia. let's hope it works
def levenshtein(s1, s2):
if len(s1) < len(s2):
return levenshtein(s2, s1)
# len(s1) >= len(s2)
if len(s2) == 0:
return len(s1)
previous_row = range(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer
deletions = current_row[j] + 1 # than s2
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1]
if __name__ == "__main__":
print levenshtein("asdf", "assdf")
print levenshtein("asdf", "asdf")
print levenshtein("asdf", "qwer") | dd-genomics-master | code/levenshtein.py |
#! /usr/bin/env python
from data_util import get_hpo_phenos, get_parents, read_hpo_dag, read_hpo_synonyms
if __name__ == "__main__":
hpo_dag = read_hpo_dag()
names = read_hpo_synonyms(1)
synonyms = read_hpo_synonyms()
allowed_phenos = set(get_hpo_phenos(hpo_dag))
for hpo_id in allowed_phenos.copy():
parent_ids = get_parents(hpo_id, hpo_dag) # includes the original hpo_id
assert hpo_id in parent_ids
if 'HP:0000118' not in parent_ids:
sys.stderr.write('"{0}": not a phenotypic abnormality\n'.format(hpo_id.strip()))
continue
parent_ids.remove('HP:0000118')
for parent_id in parent_ids:
allowed_phenos.add(parent_id)
for hpo_id in allowed_phenos:
print "%s\t%s\t%s" % (hpo_id, '|^|'.join(names[hpo_id]), '|^|'.join(synonyms[hpo_id]))
| dd-genomics-master | code/create_allowed_phenos_list.py |
from collections import defaultdict
# TODO: handle negations (neg, advmod + neg word) specially!
# See: http://nlp.stanford.edu/software/dependencies_manual.pdf
MAX_PATH_LEN = 100
class DepPathDAG:
def __init__(self, dep_parents, dep_paths, words, max_path_len=None, no_count_tags=('conj',), no_count_words=('_','*',)):
self.max_path_len = max_path_len
self.no_count_tags = tuple(no_count_tags)
self.no_count_words = no_count_words
self.words = words
self.edges = defaultdict(list)
self.edge_labels = {}
for i,dp in enumerate(dep_parents):
if dp >= 0:
self.edges[i].append(dp)
self.edges[dp].append(i)
self.edge_labels[(i,dp)] = dep_paths[i]
self.edge_labels[(dp,i)] = dep_paths[i]
def _path_len(self, path):
"""Get the length of a list of nodes, skipping counting of certain dep path types"""
l = 1
for i in range(len(path)-1):
if self.edge_labels[(path[i], path[i+1])].startswith(self.no_count_tags):
continue
if self.words[i] in self.no_count_words:
continue
l += 1
return l
def min_path(self, i, j, path=None):
if path is None:
path = []
if self.max_path_len and len(path) > self.max_path_len:
return None
min_path_len = MAX_PATH_LEN
min_path = None
for node in self.edges[i]:
if node in path:
continue
elif node == j:
return [j]
else:
p = self.min_path(node, j, path+[i])
if p and self._path_len(p) < min_path_len:
min_path = [node] + p
min_path_len = self._path_len(min_path)
return min_path
def min_path_sets(self, idx, jdx):
"""Return the minimum path between the closest members of two sets of indexes"""
min_path_len = None
min_path = None
if len(idx) == 0 or len(jdx) == 0:
return min_path
for i in idx:
for j in jdx:
p = self.min_path(i,j)
l = self._path_len(p) if p else None
if l and (min_path_len is None or l < min_path_len):
min_path_len = l
min_path = p
return min_path
def path_len(self, i, j):
"""Get the 'path length' i.e. the length of the min path between i and j"""
min_path = self.min_path(i, j)
return self._path_len(min_path) if min_path else None
def path_len_sets(self, idx, jdx):
"""Return the path length (length of minimum path) between the closest
members of two sets of indexes"""
min_path = self.min_path_sets(idx,jdx)
return self._path_len(min_path) if min_path else None
def neighbors(self, idx):
"""Return the indices or neighboring words (0-indexed return value)"""
rv = []
for i in xrange(0, len(self.edges)):
if idx in self.edges[i]:
rv.append(i)
return rv
| dd-genomics-master | code/dep_util.py |
#! /usr/bin/env python
import dep_util
import extractor_util as util
import sys
# This defines the Row object that we read in to the extractor
parser = util.RowParser([
('doc_id', 'text'),
('section_id', 'text'),
('sent_id', 'text'),
('dep_parents', 'int[]'),
('dep_paths', 'text[]'),
('words', 'text[]')])
if __name__ == "__main__":
for line in sys.stdin:
row = parser.parse_tsv_row(line)
dpd = dep_util.DepPathDAG(row.dep_parents, row.dep_paths, row.words)
for i in xrange(0, len(row.words)):
sys.stderr.write(str((i, row.words[i], dpd.neighbors(i), [row.words[i] for i in dpd.neighbors(i)])) + '\n')
| dd-genomics-master | code/test_nlp.py |
#!/usr/bin/env python
import collections
import os
import sys
import abbreviations
import config
import extractor_util as util
import levenshtein
CACHE = dict() # Cache results of disk I/O
# This defines the Row object that we read in to the extractor
parser = util.RowParser([
('doc_id', 'text'),
('section_id', 'text'),
('sent_id', 'int'),
('words', 'text[]'),
('dep_paths', 'text[]'),
('dep_parents', 'int[]'),
('lemmas', 'text[]'),
('poses', 'text[]'),
('ners', 'text[]'),
('pheno_wordidxs', 'int[]'),
('entity', 'text')])
# This defines the output Mention object
Mention = collections.namedtuple('Mention', [
'dd_id',
'doc_id',
'section_id',
'sent_id',
'short_wordidxs',
'long_wordidxs',
'mention_id',
'mention_supertype',
'mention_subtype',
'abbrev_word',
'definition_words',
'entity',
'is_correct'])
### CANDIDATE EXTRACTION ###
# HF = config.PHENO_ACRONYMS['HF']
SR = config.PHENO_ACRONYMS['SR']
def extract_candidate_mentions(row, pos_count, neg_count):
mentions = []
if max(row.pheno_wordidxs) + 2 < len(row.words) and len(row.words[max(row.pheno_wordidxs) + 2]) > 0:
for (is_correct, abbrev, definition, detector_message) in abbreviations.getabbreviations(row.words, abbrev_index=max(row.pheno_wordidxs) + 2):
m = create_supervised_mention(row, is_correct, abbrev, definition, detector_message, pos_count, neg_count)
if m:
mentions.append(m)
return mentions
### DISTANT SUPERVISION ###
VALS = config.PHENO_ACRONYMS['vals']
def create_supervised_mention(row, is_correct,
(start_abbrev, stop_abbrev, abbrev),
(start_definition, stop_definition,
definition), detector_message, pos_count,
neg_count):
assert stop_abbrev == start_abbrev + 1
mid = '%s_%s_%s_%s' % (row.doc_id, row.section_id, row.sent_id, start_abbrev)
include = None
if is_correct:
supertype = 'TRUE_DETECTOR'
subtype = None
elif is_correct is False:
supertype = 'FALSE_DETECTOR'
subtype = detector_message
else:
supertype = 'DETECTOR_OMITTED_SENTENCE'
subtype = None
include = False
if include is not False and is_correct and abbrev.islower():
is_correct = False
supertype = 'FALSE_ALL_LOWERCASE'
subtype = None
if include is not False and is_correct and abbrev in SR['short-words']:
is_correct = False
supertype = 'FALSE_SHORT_WORD'
subtype = None
if include is not False and is_correct and abbrev in SR['bad-pheno-names']:
is_correct = False
supertype = 'FALSE_BAD_PHENO_NAME'
subtype = None
if include is True or (include is not False and (is_correct is True or (is_correct is False and neg_count < pos_count))):
m = Mention(None, row.doc_id, row.section_id,
row.sent_id, [i for i in xrange(start_abbrev, stop_abbrev + 1)],
[i for i in xrange(start_definition, stop_definition + 1)],
mid, supertype, subtype, abbrev, definition, row.entity.strip(), is_correct);
else:
m = None
return m
if __name__ == '__main__':
# load static data
onto_path = lambda p : '%s/onto/%s' % (os.environ['GDD_HOME'], p)
# generate the mentions, while trying to keep the supervision approx. balanced
# print out right away so we don't bloat memory...
pos_count = 0
neg_count = 0
for line in sys.stdin:
row = parser.parse_tsv_row(line)
try:
if '-LRB-' not in row.words[row.pheno_wordidxs[len(row.pheno_wordidxs)-1] + 1]:
continue
except:
pass
#print >> sys.stderr, 'error in condition for extractor pheno_acronyms extract candidates'
# Skip row if sentence doesn't contain a verb, contains URL, etc.
if util.skip_row(row):
continue
# Find candidate mentions & supervise
mentions = extract_candidate_mentions(row, pos_count, neg_count)
pos_count += len([m for m in mentions if m.is_correct])
neg_count += len([m for m in mentions if m.is_correct is False])
# print output
for mention in mentions:
util.print_tsv_output(mention)
| dd-genomics-master | code/pheno_acronyms_extract_candidates.py |
#!/usr/bin/env python
from collections import namedtuple
import extractor_util as util
import os
import sys
import ddlib
import re
import config
# This defines the Row object that we read in to the extractor
parser = util.RowParser([
('doc_id', 'text'),
('section_id', 'text'),
('sent_id', 'int'),
('words', 'text[]'),
('lemmas', 'text[]'),
('poses', 'text[]'),
('ners', 'text[]'),
('dep_paths', 'text[]'),
('dep_parents', 'int[]'),
('mention_id', 'text'),
('mention_type', 'text'),
('short_wordidxs', 'int[]'),
('long_wordidxs', 'int[]')])
Feature = namedtuple('Feature', ['doc_id', 'section_id', 'mention_id', 'name'])
def get_features_for_row(row):
OPTS = config.PHENO_ACRONYMS['F']
features = []
f = Feature(doc_id=row.doc_id, section_id=row.section_id, mention_id=row.mention_id, name=None)
# (1) Get generic ddlib features
sentence = util.create_ddlib_sentence(row)
allWordIdxs = row.short_wordidxs + row.long_wordidxs
start = min(allWordIdxs)
length = max(allWordIdxs) - start
span = ddlib.Span(begin_word_id=start, length=length)
assert len(span) > 0, row
assert start+length < len(row.words), (start+length, len(row.words), row)
generic_features = [f._replace(name=feat) for feat in ddlib.get_generic_features_mention(sentence, span)]
# Optionally filter out some generic features
if OPTS.get('exclude_generic'):
generic_features = filter(lambda feat : not feat.startswith(tuple(OPTS['exclude_generic'])), generic_features)
features += generic_features
return features
if __name__ == '__main__':
util.run_main_tsv(row_parser=parser.parse_tsv_row, row_fn=get_features_for_row)
| dd-genomics-master | code/pheno_acronyms_extract_features.py |
#!/usr/bin/env python
from collections import namedtuple
import extractor_util as util
import ddlib
import re
# This defines the Row object that we read in to the extractor
parser = util.RowParser([
('doc_id', 'text'),
('section_id', 'text'),
('sent_id', 'int'),
('words', 'text[]'),
('lemmas', 'text[]'),
('poses', 'text[]'),
('ners', 'text[]'),
('dep_paths', 'text[]'),
('dep_parents', 'int[]'),
('mention_id', 'text'),
('mention_type', 'text'),
('mention_wordidxs', 'int[]')])
Feature = namedtuple('Feature', ['doc_id', 'section_id', 'mention_id', 'name'])
ENSEMBL_TYPES = ['NONCANONICAL', 'CANONICAL', 'REFSEQ']
def get_custom_features(row):
gene_word = row.words[row.mention_wordidxs[0]]
if re.match('^[ATGCN]{1,5}$', gene_word):
yield 'GENE_ONLY_BASES'
def get_features_for_row(row):
#OPTS = config.GENE['F']
features = []
f = Feature(doc_id=row.doc_id, section_id=row.section_id, mention_id=row.mention_id, name=None)
# (1) Get generic ddlib features
sentence = util.create_ddlib_sentence(row)
span = ddlib.Span(begin_word_id=row.mention_wordidxs[0], length=len(row.mention_wordidxs))
generic_features = [f._replace(name=feat) for feat in ddlib.get_generic_features_mention(sentence, span)]
features += generic_features
features += [f._replace(name=feat) for feat in get_custom_features(row)]
# (2) Include gene type as a feature
# Note: including this as feature creates massive overfitting, for obvious reasons
# We need neg supervision of canonical & noncanonical symbols, then can / should try adding this feature
"""
for t in ENSEMBL_TYPES:
if re.search(re.escape(t), row.mention_type, flags=re.I):
features.append(f._replace(name='GENE_TYPE[%s]' % t))
break
"""
return features
if __name__ == '__main__':
util.run_main_tsv(row_parser=parser.parse_tsv_row, row_fn=get_features_for_row)
| dd-genomics-master | code/gene_extract_features.py |
#!/usr/bin/env python
import collections
import extractor_util as util
import data_util as dutil
import dep_util as deps
import os
import random
import re
import sys
import config
# This defines the Row object that we read in to the extractor
parser = util.RowParser([
('doc_id', 'text'),
('section_id', 'text'),
('sent_id', 'int'),
('words', 'text[]'),
('lemmas', 'text[]'),
('poses', 'text[]'),
('dep_paths', 'text[]'),
('dep_parents', 'int[]'),
('genevar_mention_ids', 'text[]'),
('genevar_entities', 'text[]'),
('genevar_wordidxs', 'int[][]'),
('genevar_is_corrects', 'boolean[]'),
('pheno_mention_ids', 'text[]'),
('pheno_entities', 'text[]'),
('pheno_wordidxs', 'int[][]'),
('pheno_is_corrects', 'boolean[]')])
# This defines the output Relation object
Relation = collections.namedtuple('Relation', [
'dd_id',
'relation_id',
'doc_id',
'section_id',
'sent_id',
'genevar_mention_id',
'genevar_entity',
'genevar_wordidxs',
'genevar_is_correct',
'pheno_mention_id',
'pheno_entity',
'pheno_wordidxs',
'pheno_is_correct',
'is_correct',
'supertype',
'subtype'])
### CANDIDATE EXTRACTION ###
def extract_candidate_relations(row):
"""
Given a row object having a sentence and some associated N genevar and M phenotype mention
candidates, pick a subset of the N*M possible genevar-phenotype relations to return as
candidate relations
"""
HF = config.GENE_VARIANT_PHENO['HF']
relations = []
# Create a dependencies DAG for the sentence
dep_dag = deps.DepPathDAG(row.dep_parents, row.dep_paths, row.words, max_path_len=HF['max-dep-path-dist'])
# Create the list of possible G,P pairs with their dependency path distances
pairs = []
for i,gid in enumerate(row.genevar_mention_ids):
for j,pid in enumerate(row.pheno_mention_ids):
# Do not consider overlapping mention pairs
if len(set(row.genevar_wordidxs[i]).intersection(row.pheno_wordidxs[j])) > 0:
continue
# Get the min path length between any of the g / p phrase words
l = dep_dag.path_len_sets(row.genevar_wordidxs[i], row.pheno_wordidxs[j])
pairs.append([l, i, j])
# Select which of the pairs will be considered
pairs.sort()
seen_g = {}
seen_p = {}
seen_pairs = {}
for p in pairs:
d, i, j = p
# If the same entity occurs several times in a sentence, only take best one
if HF.get('take-best-only-dups'):
e = '%s_%s' % (row.genevar_entities[i], row.pheno_entities[j])
if e in seen_pairs and d > seen_pairs[e]:
continue
else:
seen_pairs[e] = d
# Only take the set of best pairs which still provides coverage of all entities
if HF.get('take-best-only'):
if (i in seen_g and seen_g[i] < d) or (j in seen_p and seen_p[j] < d):
continue
seen_g[i] = d
seen_p[j] = d
r = create_supervised_relation(row, i, j, dep_dag)
if r:
relations.append(r)
return relations
def create_supervised_relation(row, i, j, dep_dag=None):
"""
Given a Row object with a sentence and several genevar and pheno objects, create
a supervised Relation output object for the ith genevar and jth pheno objects
"""
SR = config.GENEVAR_PHENO['SR']
gvid = row.genevar_mention_ids[i]
gv_entity = row.genevar_entities[i]
gv_wordidxs = row.genevar_wordidxs[i]
gv_is_correct = row.genevar_is_corrects[i]
pid = row.pheno_mention_ids[j]
p_entity = row.pheno_entities[j]
p_wordidxs = row.pheno_wordidxs[j]
p_is_correct = row.pheno_is_corrects[j]
relation_id = '%s_%s' % (gvid, pid)
r = Relation(None, relation_id, row.doc_id, row.section_id, row.sent_id, gvid, gv_entity, \
gv_wordidxs, gv_is_correct, pid, p_entity, p_wordidxs, p_is_correct, None, None, None)
if SR.get('gv-or-p-false'):
opts = SR['gv-or-p-false']
if gv_is_correct == False or p_is_correct == False:
if random.random() < opts['rand']:
return r._replace(is_correct=False, supertype='GV_ANDOR_P_FALSE', subtype='gv_is_correct: %s, p_is_correct: %s' % (gv_is_correct, p_is_correct))
else:
return None
if SR.get('clinvar-sup'):
if p_entity in CLINVAR_SUP[gv_entity]:
return r._replace(is_correct=True, supertype='CLINVAR_SUP')
return r
if __name__ == '__main__':
CLINVAR_SUP = dutil.load_hgvs_to_hpo()
for line in sys.stdin:
row = parser.parse_tsv_row(line)
relations = extract_candidate_relations(row)
for relation in relations:
util.print_tsv_output(relation)
| dd-genomics-master | code/variantpheno_extract_candidates.py |
#! /usr/bin/env python
#
# This file contains the generic features library that is included with ddlib.
#
# The three functions that a user should want to use are load_dictionary,
# get_generic_features_mention, and get_generic_features_relation.
# All the rest should be considered more or less private, except perhaps the
# get_sentence method, which is actually just a wrapper around unpack_words.
#
# Matteo, December 2014
#
from dd import dep_path_between_words, materialize_span, Span, unpack_words
MAX_KW_LENGTH = 3
dictionaries = dict()
def load_dictionary(filename, dict_id="", func=lambda x: x):
"""Load a dictionary to be used for generic features.
Returns the id used to identify the dictionary.
Args:
filename: full path to the dictionary. The dictionary is actually a set
of words, one word per line.
dict_id: (optional) specify the id to be used to identify the
dictionary. By default it is a sequential number.
func: (optional) A function to be applied to each row of the file
"""
if dict_id == "":
dict_id = str(len(dictionaries))
with open(filename, 'rt') as dict_file:
dictionary = set()
for line in dict_file:
dictionary.add(func(line.strip()))
dictionary = frozenset(dictionary)
dictionaries[str(dict_id)] = dictionary
return str(dict_id)
def load_dictionary_map(synonyms):
for key in synonyms:
syn = synonyms[key]
dictionaries[key] = frozenset(syn)
def get_generic_features_mention(sentence, span, length_bin_size=5):
"""Yield 'generic' features for a mention in a sentence.
Args:
sentence: a list of Word objects
span: a Span namedtuple
length_bin_size: the size of the bins for the length feature
"""
# Mention sequence features (words, lemmas, ners, and poses)
for seq_feat in get_seq_features(sentence, span):
yield seq_feat
# Window (left and right, up to size 3, with combinations) around the
# mention
for window_feat in get_window_features(sentence, span):
yield window_feat
# Is (substring of) mention in a dictionary?
for dict_indicator_feat in get_dictionary_indicator_features(
sentence, span):
yield dict_indicator_feat
# Dependency path(s) from mention to keyword(s). Various transformations of
# the dependency path are done.
for (i, j) in get_substring_indices(len(sentence), MAX_KW_LENGTH):
if i >= span.begin_word_id and i < span.begin_word_id + span.length:
continue
if j > span.begin_word_id and j < span.begin_word_id + span.length:
continue
is_in_dictionary = False
for dict_id in dictionaries:
if " ".join(map(lambda x: str(x.lemma), sentence[i:j])) in \
dictionaries[dict_id]:
is_in_dictionary = True
yield "KW_IND_[" + dict_id + "]"
break
if is_in_dictionary:
kw_span = Span(begin_word_id=i, length=j-i)
for dep_path_feature in get_min_dep_path_features(
sentence, span, kw_span, "KW"):
yield dep_path_feature
# The mention starts with a capital
if sentence[span.begin_word_id].word[0].isupper():
yield "STARTS_WITH_CAPITAL"
# Length of the mention
length = len(" ".join(materialize_span(sentence, span, lambda x: x.word)))
bin_id = length // length_bin_size
length_feat = "LENGTH_" + str(bin_id)
yield length_feat
def get_generic_features_relation(sentence, span1, span2, length_bin_size=5):
"""Yield 'generic' features for a relation in a sentence.
Args:
sentence: a list of Word objects
span1: the first Span of the relation
span2: the second Span of the relation
length_bin_size: the size of the bins for the length feature
"""
# Check whether the order of the spans is inverted. We use this information
# to add a prefix to *all* the features.
order = sorted([
span1.begin_word_id, span1.begin_word_id + span1.length,
span2.begin_word_id, span2.begin_word_id + span2.length])
begin = order[0]
betw_begin = order[1]
betw_end = order[2]
end = order[3]
if begin == span2.begin_word_id:
inverted = "INV_"
yield "IS_INVERTED"
else:
inverted = ""
betw_span = Span(begin_word_id=betw_begin, length=betw_end - betw_begin)
covering_span = Span(begin_word_id=begin, length=end - begin)
# Words, Lemmas, Ners, and Poses sequence between the mentions
for seq_feat in get_seq_features(sentence, betw_span):
yield inverted + seq_feat
# Window feature (left and right, up to size 3, combined)
for window_feat in get_window_features(
sentence, covering_span, isolated=False):
yield inverted + window_feat
# Ngrams of up to size 3 between the mentions
for ngram_feat in get_ngram_features(sentence, betw_span):
yield inverted + ngram_feat
# Indicator features of whether the mentions are in dictionaries
found1 = False
for feat1 in get_dictionary_indicator_features(
sentence, span1, prefix=inverted + "IN_DICT"):
found1 = True
found2 = False
for feat2 in get_dictionary_indicator_features(
sentence, span2, prefix=""):
found2 = True
yield feat1 + feat2
if not found2:
yield feat1 + "_[_NONE]"
if not found1:
for feat2 in get_dictionary_indicator_features(
sentence, span2, prefix=""):
found2 = True
yield inverted + "IN_DICT_[_NONE]" + feat2
# Dependency path (and transformations) between the mention
for betw_dep_path_feature in get_min_dep_path_features(
sentence, span1, span2, inverted + "BETW"):
yield betw_dep_path_feature
# Dependency paths (and transformations) between the mentions and keywords
for (i, j) in get_substring_indices(len(sentence), MAX_KW_LENGTH):
if (i >= begin and i < betw_begin) or (i >= betw_end and i < end):
continue
if (j > begin and j <= betw_begin) or (j > betw_end and j <= end):
continue
is_in_dictionary = False
for dict_id in dictionaries:
if " ".join(map(lambda x: str(x.lemma), sentence[i:j])) in \
dictionaries[dict_id]:
is_in_dictionary = True
yield inverted + "KW_IND_[" + dict_id + "]"
break
if is_in_dictionary:
kw_span = Span(begin_word_id=i, length=j-i)
path1 = get_min_dep_path(sentence, span1, kw_span)
lemmas1 = []
labels1 = []
for edge in path1:
lemmas1.append(str(edge.word2.lemma))
labels1.append(edge.label)
both1 = []
for j in range(len(labels1)):
both1.append(labels1[j])
both1.append(lemmas1[j])
both1 = both1[:-1]
path2 = get_min_dep_path(sentence, span2, kw_span)
lemmas2 = []
labels2 = []
for edge in path2:
lemmas2.append(str(edge.word2.lemma))
labels2.append(edge.label)
both2 = []
for j in range(len(labels2)):
both2.append(labels2[j])
both2.append(lemmas2[j])
both2 = both2[:-1]
yield inverted + "KW_[" + " ".join(both1) + "]_[" + \
" ".join(both2) + "]"
yield inverted + "KW_L_[" + " ".join(labels1) + "]_[" + \
" ".join(labels2) + "]"
for j in range(1, len(both1), 2):
for dict_id in dictionaries:
if both1[j] in dictionaries[dict_id]:
both1[j] = "DICT_" + str(dict_id)
break # Picking up the first dictionary we find
for j in range(1, len(both2), 2):
for dict_id in dictionaries:
if both2[j] in dictionaries[dict_id]:
both2[j] = "DICT_" + str(dict_id)
break # Picking up the first dictionary we find
yield inverted + "KW_D_[" + " ".join(both1) + "]_[" + \
" ".join(both2) + "]"
# The mentions start with a capital letter
first_capital = sentence[span1.begin_word_id].word[0].isupper()
second_capital = sentence[span2.begin_word_id].word[0].isupper()
capital_feat = inverted + "STARTS_WITH_CAPITAL_[" + str(first_capital) + \
"_" + str(second_capital) + "]"
yield capital_feat
# The lengths of the mentions
first_length = len(" ".join(materialize_span(
sentence, span1, lambda x: str(x.word))))
second_length = len(" ".join(materialize_span(
sentence, span2, lambda x: str(x.word))))
first_bin_id = first_length // length_bin_size
second_bin_id = second_length // length_bin_size
length_feat = inverted + "LENGTHS_[" + str(first_bin_id) + "_" + \
str(second_bin_id) + "]"
yield length_feat
def get_substring_indices(_len, max_substring_len):
"""Yield the start-end indices for all substrings of a sequence with length
_len, up to length max_substring_len"""
for start in range(_len):
for end in reversed(range(start + 1, min(
_len, start + 1 + max_substring_len))):
yield (start, end)
def get_ngram_features(sentence, span, window=3):
"""Yields ngram features. These are all substrings of size up to window in
the part of the sentence covered by the span.
In a typical usage, the span covers the words between two mentions, so
this function returns all ngrams of size up to window between the two
mentions
Args:
sentence: a list of Word objects
span: the Span identifying the area for generating the substrings
window: maximum size of a substring
"""
for i in range(span.begin_word_id, span.begin_word_id + span.length):
for j in range(1, window + 1):
if i+j <= span.begin_word_id + span.length:
yield "NGRAM_" + str(j) + "_[" + " ".join(
map(lambda x: str(x.lemma), sentence[i:i+j])) + "]"
def get_min_dep_path(sentence, span1, span2):
"""Return the shortest dependency path between two Span objects
Args:
sentence: a list of Word objects
span1: the first Span
span2: the second Span
Returns: a list of DepEdge objects
"""
min_path = None
min_path_length = 200 # ridiculously high number?
for i in range(span1.begin_word_id, span1.begin_word_id + span1.length):
for j in range(
span2.begin_word_id, span2.begin_word_id + span2.length):
p = dep_path_between_words(sentence, i, j)
if len(p) < min_path_length:
min_path = p
return min_path
def get_min_dep_path_features(sentence, span1, span2, prefix="BETW_"):
"""Yield the minimum dependency path features between two Span objects.
Various variants of the dependency path are yielded:
- using both labels and lemmas,
- using only labels
- using labels and lemmas, but with lemmas replaced by dict_id if the
lemma is in a dictionary
Args:
sentence: a list of Word objects
span1: the first Span
span2: the second Span
prefix: string prepended to all features
"""
min_path = get_min_dep_path(sentence, span1, span2)
if min_path:
min_path_lemmas = []
min_path_labels = []
for edge in min_path:
min_path_lemmas.append(str(edge.word2.lemma))
min_path_labels.append(str(edge.label))
both = []
for j in range(len(min_path_labels)):
both.append(min_path_labels[j])
both.append(min_path_lemmas[j])
both = both[:-1]
yield prefix + "_[" + " ".join(both) + "]"
yield prefix + "_L_[" + " ".join(min_path_labels) + "]"
for j in range(1, len(both), 2):
for dict_id in dictionaries:
if both[j] in dictionaries[dict_id]:
both[j] = "DICT_" + str(dict_id)
break # Picking up the first dictionary we find
yield prefix + "_D_[" + " ".join(both) + "]"
def get_seq_features(sentence, span):
"""Yield the sequence features in a Span
These include:
- words sequence in the span
- lemmas sequence in the span
- NER tags sequence in the span
- POS tags sequence in the span
Args:
sentence: a list of Word objects
span: the Span
"""
word_seq_feat = "WORD_SEQ_[" + " ".join(materialize_span(
sentence, span, lambda x: x.word)) + "]"
yield word_seq_feat
lemma_seq_feat = "LEMMA_SEQ_[" + " ".join(materialize_span(
sentence, span, lambda x: str(x.lemma))) + "]"
yield lemma_seq_feat
ner_seq_feat = "NER_SEQ_[" + " ".join(materialize_span(
sentence, span, lambda x: str(x.ner))) + "]"
yield ner_seq_feat
pos_seq_feat = "POS_SEQ_[" + " ".join(materialize_span(
sentence, span, lambda x: str(x.pos))) + "]"
yield pos_seq_feat
def get_window_features(
sentence, span, window=3, combinations=True, isolated=True):
"""Yield the window features around a Span
These are basically the n-grams around the span, up to a window of size
'window'
Args:
sentence: a list of Word objects
span: the span
window: the maximum size of the window
combinations: Whether to yield features that combine the windows on
the left and on the right
isolated: Whether to yield features that do not combine the windows on
the left and on the right
"""
span_end_idx = span.begin_word_id + span.length - 1
left_lemmas = []
left_ners = []
right_lemmas = []
right_ners = []
try:
for i in range(1, window + 1):
lemma = str(sentence[span.begin_word_id - i].lemma)
try:
float(lemma)
lemma = "_NUMBER"
except ValueError:
pass
left_lemmas.append(lemma)
left_ners.append(str(sentence[span.begin_word_id - i].ner))
except IndexError:
pass
left_lemmas.reverse()
left_ners.reverse()
try:
for i in range(1, window + 1):
lemma = str(sentence[span_end_idx + i].lemma)
try:
float(lemma)
lemma = "_NUMBER"
except ValueError:
pass
right_lemmas.append(lemma)
right_ners.append(str(sentence[span_end_idx + i].ner))
except IndexError:
pass
if isolated:
for i in range(len(left_lemmas)):
yield "W_LEFT_" + str(i+1) + "_[" + " ".join(left_lemmas[-i-1:]) + \
"]"
yield "W_LEFT_NER_" + str(i+1) + "_[" + " ".join(left_ners[-i-1:]) +\
"]"
for i in range(len(right_lemmas)):
yield "W_RIGHT_" + str(i+1) + "_[" + " ".join(right_lemmas[:i+1]) +\
"]"
yield "W_RIGHT_NER_" + str(i+1) + "_[" + \
" ".join(right_ners[:i+1]) + "]"
if combinations:
for i in range(len(left_lemmas)):
curr_left_lemmas = " ".join(left_lemmas[-i-1:])
try:
curr_left_ners = " ".join(left_ners[-i-1:])
except TypeError:
new_ners = []
for ner in left_ners[-i-1:]:
to_add = ner
if not to_add:
to_add = "None"
new_ners.append(to_add)
curr_left_ners = " ".join(new_ners)
for j in range(len(right_lemmas)):
curr_right_lemmas = " ".join(right_lemmas[:j+1])
try:
curr_right_ners = " ".join(right_ners[:j+1])
except TypeError:
new_ners = []
for ner in right_ners[:j+1]:
to_add = ner
if not to_add:
to_add = "None"
new_ners.append(to_add)
curr_right_ners = " ".join(new_ners)
yield "W_LEMMA_L_" + str(i+1) + "_R_" + str(j+1) + "_[" + \
curr_left_lemmas + "]_[" + curr_right_lemmas + "]"
yield "W_NER_L_" + str(i+1) + "_R_" + str(j+1) + "_[" + \
curr_left_ners + "]_[" + curr_right_ners + "]"
def get_dictionary_indicator_features(
sentence, span, window=3, prefix="IN_DICT"):
"""Yield the indicator features for whether a substring of the span is in
the dictionaries
Args:
sentence: a list of Word objects
span: the span
window: the maximum size of a substring
prefix: a string to prepend to all yielded features
"""
in_dictionaries = set()
for i in range(window + 1):
for j in range(span.length - i):
phrase = " ".join(map(lambda x: str(x.lemma), sentence[j:j+i+1]))
for dict_id in dictionaries:
if phrase in dictionaries[dict_id]:
in_dictionaries.add(dict_id)
for dict_id in in_dictionaries:
yield prefix + "_[" + str(dict_id) + "]"
# yield prefix + "_JOIN_[" + " ".join(
# map(lambda x: str(x), sorted(in_dictionaries))) + "]"
def dep_graph_parser_parenthesis(edge_str):
"""Given a string representing a dependency edge in the 'parenthesis'
format, return a tuple of (parent_index, edge_label, child_index).
Args:
edge_str: a string representation of an edge in the dependency tree, in
the format edge_label(parent_word-parent_index, child_word-child_index)
Returns:
tuple of (parent_index, edge_label, child_index)
"""
tokens = edge_str.split("(")
label = tokens[0]
tokens = tokens[1].split(", ")
parent = int(tokens[0].split("-")[-1]) - 1
child = int(",".join(tokens[1:]).split("-")[-1][:-1]) - 1
return (parent, label, child)
def dep_graph_parser_triplet(edge_str):
"""Given a string representing a dependency edge in the 'triplet' format,
return a tuple of (parent_index, edge_label, child_index).
Args:
edge_str: a string representation of an edge in the dependency tree
in the format "parent_index\tlabel\child_index"
Returns:
tuple of (parent_index, edge_label, child_index)
"""
parent, label, child = edge_str.split()
# input edge used 1-based indexing
return (int(parent) - 1, label, int(child) - 1)
def dep_transform_parenthesis_to_triplet(edge_str):
"""Transform an edge representation from the parenthesis format to the
triplet format"""
parent, label, child = dep_graph_parser_parenthesis(edge_str)
return "\t".join((str(parent + 1), label, str(child + 1)))
def dep_transform_triplet_to_parenthesis(edge_str, parent_word, child_word):
"""Transform an edge representation from the triplet format to the
parenthesis format"""
parent, label, child = dep_graph_parser_triplet(edge_str)
return label + "(" + parent_word + "-" + str(parent + 1) + ", " + \
child_word + "-" + str(child + 1) + ")"
def dep_transform_test():
"""Test the transformation functions for the various dependency paths
formats"""
test = "a(b-1, c-2)"
transf = dep_transform_parenthesis_to_triplet(test)
assert transf == "1\ta\t2"
transf_back = dep_transform_triplet_to_parenthesis(transf, "b", "c")
assert transf_back == test
print("success")
def get_span(span_begin, span_length):
"""Return a Span object
Args:
span_begin: the index the Span begins at
span_length: the length of the span
"""
return Span(begin_word_id=span_begin, length=span_length)
def get_sentence(
begin_char_offsets, end_char_offsets, words, lemmas, poses,
dependencies, ners, dep_format_parser=dep_graph_parser_parenthesis):
"""Return a list of Word objects representing a sentence.
This is effectively a wrapper around unpack_words, but with a less
cumbersome interface.
Args:
begin_char_offsets: a list representing the beginning character offset
for each word in the sentence
end_char_offsets: a list representing the end character offset for each
word in the sentence
words: a list of the words in the sentence
lemmas: a list of the lemmas of the words in the sentence
poses: a list of the POS tags of the words in the sentence
dependencies: a list of the dependency path edges for the sentence
ners: a list of the NER tags of the words in the sentence
dep_format_parse: a function that takes as only argument an element of
dependencies (i.e., a dependency path edge) and returns a 3-tuple
(parent_index, label, child_index) representing the edge. Look at
the code for dep_graph_parser_parenthesis and
dep_graph_parser_triplet for examples.
"""
obj = dict()
obj['lemma'] = lemmas
obj['words'] = words
obj['ner'] = ners
obj['pos'] = poses
obj['dep_graph'] = dependencies
obj['ch_of_beg'] = begin_char_offsets
obj['ch_of_end'] = end_char_offsets
# list of Word objects
word_obj_list = unpack_words(
obj, character_offset_begin='ch_of_beg',
character_offset_end='ch_of_end', lemma='lemma', pos='pos',
ner='ner', words='words', dep_graph='dep_graph',
dep_graph_parser=dep_format_parser)
return word_obj_list
| dd-genomics-master | code/ddlib/gen_feats.py |
from collections import namedtuple,OrderedDict
import re
import sys
from inspect import isgeneratorfunction,getargspec
import csv
from StringIO import StringIO
def print_error(err_string):
"""Function to write to stderr"""
sys.stderr.write("ERROR[UDF]: " + str(err_string) + "\n")
BOOL_PARSER = {
't' : True,
'f' : False,
'NULL' : None,
'\\N' : None
}
TYPE_PARSERS = {
'text' : lambda x : str(x.replace('\n', ' ')),
'int' : lambda x : int(x.strip()),
'float' : lambda x : float(x.strip()),
'boolean' : lambda x : BOOL_PARSER(x.lower().strip())
}
def parse_pgtsv_element(s, t, sep='|^|', sep2='|~|', d=0):
"""
Parse an element in psql-compatible tsv format, i.e. {-format arrays
based on provided type and type-parser dictionary
"""
# Quoting only will occur within a psql array with strings
quoted = (len(s) > 1 and s[0] == '"' and s[-1] == '"')
if quoted and d > 0:
if t == 'text':
s = s[1:-1]
else:
raise Exception("Type mismatch with quoted array element:\n%s" % s)
elif quoted and t != 'text':
raise Exception("Type mismatch with quoted array element:\n%s" % s)
# Interpret nulls correctly according to postgres convention
# Note for arrays: {,} --> NULLS, {"",""} --> empty strings
if s == '\\N':
return None
elif len(s) == 0 and (t != 'text' or (d > 0 and not quoted)):
return None
# Handle lists recursively
elif '[]' in t:
if s[0] == '{' and s[-1] == '}':
split = list(csv.reader(StringIO(s[1:-1])))[0]
else:
split = s.split(sep)
return [parse_pgtsv_element(ss, t[:-2], sep=sep2, d=d+1) for ss in split]
# Else parse using parser
else:
try:
parser = TYPE_PARSERS[t]
except KeyError:
raise Exception("Unsupported type: %s" % t)
return parser(s)
class Row:
def __str__(self):
return '<Row(' + ', '.join("%s=%s" % x for x in self.__dict__.iteritems()) + ')>'
def __repr__(self):
return str(self)
def _asdict(self):
return self.__dict__
class PGTSVParser:
"""
Initialized with a list of duples (field_name, field_type)
Is a factory for simple Row class
Parsed from Postgres-style TSV input lines
"""
def __init__(self, fields):
self.fields = fields
def parse_line(self, line):
row = Row()
attribs = line.rstrip().split('\t')
if len(attribs) != len(self.fields):
raise ValueError("Expected %(num_rows_declared)d attributes, but found %(num_rows_found)d in input row:\n%(row)s" % dict(
num_rows_declared=len(self.fields), num_rows_found=len(attribs), row=row,
))
for i,attrib in enumerate(attribs):
field_name, field_type = self.fields[i]
setattr(row, field_name, parse_pgtsv_element(attrib, field_type))
return row
def parse_stdin(self):
for line in sys.stdin:
yield self.parse_line(line)
TYPE_CHECKERS = {
'text' : lambda x : type(x) == str,
'int' : lambda x : type(x) == int,
'float' : lambda x : type(x) == float,
'boolean' : lambda x : type(x) == bool
}
def print_pgtsv_element(x, n, t, d=0):
"""Checks element x against type string t, then prints in PG-TSV format if a match"""
# Handle NULLs first
if x is None:
if d == 0:
return '\N'
else:
return ''
# Handle lists recursively
if '[]' in t:
if not hasattr(x, '__iter__'):
raise ValueError("Mismatch between array type and non-iterable in output row:\n%s" % x)
else:
return '{%s}' % ','.join(print_pgtsv_element(e, n, t[:-2], d=d+1) for e in x)
# Else check type & print, hanlding special case of string in array
try:
checker = TYPE_CHECKERS[t]
except KeyError:
raise Exception("Unsupported type: %s" % t)
if not checker(x):
raise Exception("Output column '%(name)s' of type %(declared_type)s has incorrect value of %(value_type)s: '%(value)s'" % dict(
name=n, declared_type=t, value_type=type(x), value=x,
))
if d > 0 and t == 'text':
return '"%s"' % str(tok).replace('\\', '\\\\').replace('"', '\\\\"')
else:
return str(x)
class PGTSVPrinter:
"""
Initialized with a list of type strings
Prints out Postgres-format TSV output lines
"""
def __init__(self, fields):
self.fields = fields
def write(self, out):
if len(out) != len(self.fields):
raise ValueError("Expected %(num_rows_declared)d attributes, but found %(num_rows_found)d in output row:\n%(row)s" % dict(
num_rows_declared=len(self.fields), num_rows_found=len(out), row=out,
))
else:
print '\t'.join(print_pgtsv_element(x, n, t) for x,(n,t) in zip(out, self.fields))
# how to get types specified as default values of a function
def format_from_args_defaults_of(aFunctionOrFormat):
if hasattr(aFunctionOrFormat, '__call__'):
# TODO in Python3, support types in function annotations (PEP 3107: https://www.python.org/dev/peps/pep-3107/)
spec = getargspec(aFunctionOrFormat)
return zip(spec.args, spec.defaults)
else:
return aFunctionOrFormat
## function decorators to be used directly in UDF implementations
# decorators for input and output formats
def format_decorator(attrName):
def decorator(*name_type_pairs, **name_type_dict):
"""
When a function is decorated with this (e.g., @returns(...) or @over(...)
preceding the def line), the pairs of column name and type given as
arguments are kept as the function's attribute to supply other decorators,
such as @tsv_extractor, with information for deciding how to parse the
input lines or format the output lines.
"""
# check single argument case with a function or dict
if len(name_type_pairs) == 1:
if hasattr(name_type_pairs[0], '__call__'):
name_type_pairs = format_from_args_defaults_of(name_type_pairs[0])
elif type(name_type_pairs[0]) in [dict, OrderedDict]:
name_type_pairs = name_type_pairs[0]
# XXX @over(collection.OrderedDict(foo="type", bar="type", ...)) doesn't work
# as Python forgets the order when calling with keyword argument binding.
# merge dictionaries
name_type_pairs = list(name_type_pairs) + name_type_dict.items()
def decorate(f):
setattr(f, attrName, name_type_pairs)
return f
return decorate
return decorator
over = format_decorator("input_format")
returns = format_decorator("output_format")
# decorators that initiate the main extractor loop
def tsv_extractor(generator):
"""
When a generator function is decorated with this (i.e., @tsv_extractor
preceding the def line), standard input is parsed as Postgres-style TSV
(PGTSV) input rows, the function is applied to generate output rows, and then
checks that each line of this generator is in the output format before
printing back as PGTSV rows.
"""
# Expects the input and output formats to have been decorated with @over and @returns
try:
# @over has precedence over default values of function arguments
input_format = generator.input_format
except AttributeError:
input_format = format_from_args_defaults_of(generator)
try:
output_format = generator.output_format
# also support function argument defaults for output_format for symmetry
output_format = format_from_args_defaults_of(output_format)
except AttributeError:
raise ValueError("The function must be decorated with @returns")
# TODO or maybe just skip type checking if @returns isn't present?
# Check generator function
if not isgeneratorfunction(generator):
raise ValueError("The function must be a *generator*, i.e., use yield not return")
# Create the input parser
parser = PGTSVParser(input_format)
# Create the output parser
printer = PGTSVPrinter(output_format)
for row in parser.parse_stdin():
for out_row in generator(**row._asdict()):
printer.write(out_row)
| dd-genomics-master | code/ddlib/util.py |
from dd import *
from gen_feats import *
from util import *
| dd-genomics-master | code/ddlib/__init__.py |
import sys
import collections
Word = collections.namedtuple('Word', ['begin_char_offset', 'end_char_offset', 'word', 'lemma', 'pos', 'ner', 'dep_par', 'dep_label'])
Span = collections.namedtuple('Span', ['begin_word_id', 'length'])
Sequence = collections.namedtuple('Sequence', ['is_inversed', 'elements'])
DepEdge = collections.namedtuple('DepEdge', ['word1', 'word2', 'label', 'is_bottom_up'])
def unpack_words(input_dict, character_offset_begin=None, character_offset_end=None, lemma=None,
pos=None, ner = None, words = None, dep_graph = None, dep_graph_parser = lambda x: x.split('\t')):
"""Return a list of Word objects representing a sentence
"""
array_character_offset_begin = input_dict[character_offset_begin] if character_offset_begin != None else ()
array_character_offset_end = input_dict[character_offset_end] if character_offset_end != None else ()
array_lemma = input_dict[lemma] if lemma != None else ()
array_pos = input_dict[pos] if pos != None else ()
array_ner = input_dict[ner] if ner != None else ()
array_words = input_dict[words] if words != None else ()
dep_graph = input_dict[dep_graph] if dep_graph != None else ()
dep_tree = {}
for path in dep_graph:
(parent, label, child) = dep_graph_parser(path)
parent, child = int(parent), int(child)
dep_tree[child] = {"parent":parent, "label":label}
if parent not in dep_tree: dep_tree[parent] = {"parent":-1, "label":"ROOT"}
ziped_tags = map(None, array_character_offset_begin, array_character_offset_end, array_lemma,
array_pos, array_ner, array_words)
wordobjs = []
for i in range(0,len(ziped_tags)):
if i not in dep_tree : dep_tree[i] = {"parent":-1, "label":"ROOT"}
wordobjs.append(Word(begin_char_offset=ziped_tags[i][0], end_char_offset=ziped_tags[i][1], lemma=ziped_tags[i][2], pos=ziped_tags[i][3],
ner=ziped_tags[i][4], word=ziped_tags[i][5],dep_par=dep_tree[i]["parent"], dep_label=dep_tree[i]["label"]))
return wordobjs
def log(obj):
"""Print the string form of an object to STDERR.
Args:
obj: The object that the user wants to log to STDERR.
"""
sys.stderr.write(obj.__str__() + "\n")
def materialize_span(words, span, func=lambda x:x):
"""Given a sequence of objects and a span, return the subsequence that corresponds to the span.
Args:
words: A sequence of objects.
span: A Span namedtuple
func: Optional function that will be applied to each element in the result subsequence.
"""
return map(func, words[span.begin_word_id:(span.begin_word_id+span.length)])
def _fe_seq_between_words(words, begin_idx, end_idx, func=lambda x:x):
if begin_idx < end_idx:
return Sequence(elements=map(func, words[begin_idx+1:end_idx]), is_inversed=False)
else:
return Sequence(elements=map(func, words[end_idx+1:begin_idx]), is_inversed=True)
def tokens_between_spans(words, span1, span2, func=lambda x:x):
"""Given a sequence of objects and two spans, return the subsequence that is between these spans.
Args:
words: A sequence of objects.
span1: A Span namedtuple
span2: A Span namedtuple
func: Optional function that will be applied to each element in the result subsequence.
Returns:
A Sequence namedtuple between these two spans. The "is_inversed" label is set
to be True if span1 is *AFTER* span 2.
"""
if span1.begin_word_id < span2.begin_word_id:
return _fe_seq_between_words(words, span1.begin_word_id+span1.length-1, span2.begin_word_id, func)
else:
return _fe_seq_between_words(words, span1.begin_word_id, span2.begin_word_id+span2.length-1, func)
def _path_to_root(words, word_idx):
rs = []
c_word_idx = word_idx
covered_indexes = set()
while True:
if c_word_idx in covered_indexes:
break
rs.append(words[c_word_idx])
covered_indexes.add(c_word_idx)
if words[c_word_idx].dep_par == -1 or words[c_word_idx].dep_par == c_word_idx:
break
c_word_idx = words[c_word_idx].dep_par
return rs
def dep_path_between_words(words, begin_idx, end_idx):
"""Given a sequence of Word objects and two indices, return the sequence of Edges
corresponding to the dependency path between these two words.
Args:
words: A sequence of Word objects.
span1: A word index
span2: A word index
Returns:
An Array of Edge objects, each of which corresponds to one edge on the dependency path.
"""
path_to_root1 = _path_to_root(words, begin_idx)
path_to_root2 = _path_to_root(words, end_idx)
common = set(path_to_root1) & set(path_to_root2)
#if len(common) == 0:
# raise Exception('Dep Path Must be Wrong: No Common Element Between Word %d & %d.' % (begin_idx, end_idx))
path = []
for word in path_to_root1:
if word in common: break
path.append(DepEdge(word1=word, word2=words[word.dep_par], label=word.dep_label, is_bottom_up=True))
path_right = []
for word in path_to_root2:
if word in common: break
path_right.append(DepEdge(word1=words[word.dep_par], word2=word, label=word.dep_label, is_bottom_up=False))
for e in reversed(path_right):
path.append(e)
return path
| dd-genomics-master | code/ddlib/dd.py |
import dependencies
import sys
def index_of_sublist(subl, l):
for i in range(len(l) - len(subl) + 1):
if subl == l[i:i + len(subl)]:
return i
def intersects(a1, a2):
for i in a1:
if i in a2:
return True
return False
def acyclic(a):
return len(a) == len(set(a))
def create_sentence_index(row):
sentence = {
'words' : row.words,
'lemmas' : row.lemmas,
'poses' : row.poses,
'dep_paths' : row.dep_paths,
'dep_parents' : [ p-1 for p in row.dep_parents]
}
parents, children = dependencies.build_indexes(sentence)
index = {
'sentence': sentence,
'parents': parents,
'children': children }
return index
def supervise(m, mention_parts, sentence_index, pos_phrases, neg_phrases, pos_patterns, neg_patterns, dicts):
labeling = None
sentence = sentence_index['sentence']
parents = sentence_index['parents']
children = sentence_index['children']
pos_patterns = [ p.split(' ') for p in pos_patterns ]
neg_patterns = [ p.split(' ') for p in neg_patterns ]
for p in neg_phrases:
start = index_of_sublist(p, sentence['words'])
if start is not None:
labeling = False
for p in pos_phrases:
start = index_of_sublist(p, sentence['words'])
if start is not None:
labeling = True
for p in neg_patterns:
if dependencies.match(sentence, p, mention_parts, parents, children, dicts):
labeling = False
if labeling is None:
for p in pos_patterns:
matches = dependencies.match(sentence, p, mention_parts, parents, children, dicts)
if matches:
print >>sys.stderr, 'pos matches: %s in %s' % (str(matches), str(sentence['words']))
labeling = True
return m._replace(is_correct=labeling)
def featurize(m, mention_parts, sentence_index,
feature_patterns, bad_features, dicts):
feature_patterns = [ p.split(' ') for p in feature_patterns ]
sentence = sentence_index['sentence']
parents = sentence_index['parents']
children = sentence_index['children']
flat_mention_parts = [i for sublist in mention_parts for i in sublist]
feature_set = set()
# find dependency path that covers wordidxs, lemmatize
# feature = ''
# for i in m.wordidxs:
# m.features.append(sentence['lemmas'][i])
# for p in parents[i]:
# path, parent = p
# if parent in m.wordidxs:
# feature = feature + sentence['lemmas'][i] + '<-' + path + '-' + sentence['lemmas'][parent] + '|||'
# if feature != '':
# m.features.append(feature)
feature_prefix = ''
for i in flat_mention_parts:
for c in children[i]:
path, child = c
if path == 'neg':
feature_prefix = 'NEGATED'
# for i in m.wordidxs:
# for c in children[i]:
# path, child = c
# if path == 'neg':
# feature_set.add('NEGATED')
# for i in m.wordidxs:
# if sentence['poses'][i] == 'NNS':
# feature_set.add('PLURAL')
# for i in m.wordidxs:
# for p in parents[i]:
# path, parent = p
# if sentence['poses'][parent] == 'NNS':
# feature_set.add('MOD_PLURAL')
def get_actual_dep_from_match(pattern, j, ma):
# determine the edge
edge_pattern = pattern[2 * j + 1]
if edge_pattern[:2] == '<-':
# print(str(ma[j]), file=sys.stderr)
# print(str(parents[ma[j]]), file=sys.stderr)
dep = '<-' + parents[ma[j]][0][0] + '-'
elif edge_pattern[len(edge_pattern) - 2:] == '->':
dep = '-' + parents[ma[j + 1]][0][0] + '->'
elif edge_pattern == '_':
dep = '_'
else:
print >> sys.stderr, 'ERROR: Unknown edge pattern'
return dep
# pattern = '__ <-prep_like- __ -nsubj-> __'.split(' ')
# for pattern in feature_patterns:
# for i in m.wordidxs:
# matches = []
# dependencies.match_i(sentence, i, pattern, parents, children, matches, [], dicts)
# for ma in matches:
# #feature = '__' + pattern[1]
# feature = sentence['lemmas'][ma[0]] + get_actual_dep_from_match(pattern, 0, ma)
# j = 1
# while j < len(ma):
# feature = feature + sentence['lemmas'][ma[j]]
# if 2*j + 1 < len(pattern):
# dep = get_actual_dep_from_match(pattern, 0, ma)
# feature = feature + dep
# j = j+1
# m.features.append(feature)
# pattern = '__ <-prep_like- __ -nsubj-> __'.split(' ')
for pattern in feature_patterns:
matches = dependencies.match(sentence, pattern, mention_parts, parents, children, dicts)
for ma in matches:
if acyclic(ma):
# feature = '__' + pattern[1]
feature = sentence['lemmas'][ma[0]] + ' ' + get_actual_dep_from_match(pattern, 0, ma)
j = 1
while j < len(ma):
feature = feature + ' ' + sentence['lemmas'][ma[j]]
if 2 * j + 1 < len(pattern):
dep = get_actual_dep_from_match(pattern, j, ma)
feature = feature + ' ' + dep
j = j + 1
feature_set.add(feature)
for f in feature_set:
if f in bad_features:
continue
m.features.append(feature_prefix + '_' + f)
return m
| dd-genomics-master | code/util/clf_util.py |
######################################################################################
# LATTICE - MEMEX plugins for latticelib
#
# latticelib is an extraction framework to allow quickly building extractors by
# specifying minimal target-specific code (candidate generation patterns and
# supervision rules). It has a set of pre-built featurization code that covers
# a lot of Memex flag extractors. The goal is to make it more general,
# powerful, fast to run and easy to use.
#
# This file contains Memex-specific components for latticelib.
#
# For sample usage, see:
# udf/util/test/latticelib/module.py
# udf/extract_underage.py
#
######################################################################################
# Default dictionaries tailored for Memex. Will function in addition to the
# one in latticelib
default_dicts = {
'short_words': [
'the',
'and',
'or',
'at',
'in',
'see',
'as',
'an',
'data',
'for',
'not',
'our',
'ie',
'to',
'eg',
'one',
'age',
'on',
'center',
'right',
'left',
'from',
'based',
'total',
'via',
'but',
'resp',
'no',
],
'intensifiers': [
'very',
'really',
'extremely',
'exceptionally',
'incredibly',
'unusually',
'remarkably',
'particularly',
'absolutely',
'completely',
'quite',
'definitely',
'too',
],
'pos_certainty': [
'likely',
'possibly',
'definitely',
'absolutely',
'certainly',
],
'modal': [
'will',
'would',
'may',
'might',
],
'mutation': [
'mutation',
'variant',
'allele',
'deletion',
'duplication',
'truncation',
],
'levels': [
'serum',
'level',
'elevated',
'plasma',
]
'expression': [
'express',
'expression',
'coexpression',
'coexpress',
'co-expression',
'co-express',
'overexpress',
'overexpression',
'over-expression',
'over-express',
'production',
'product',
'increased',
'increase',
'increas',
]
}
| dd-genomics-master | code/util/memex.py |
dd-genomics-master | code/util/__init__.py |
|
#! /usr/bin/python -m trace --trace --file /dev/stderr
######################################################################################
# LATTICE - Util functions for working with dependencies
#
# Usage:
# Start by preparing a list of dep_patterns, eg. "he <-nsubj- buy"
#
# sentence is an object that contains dep_paths, dep_parents, lemmas.
#
# parents, children = build_indexes(sentence)
# matches = []
#
# for p in dep_patterns:
# match(sentence, p, parents, children, matches)
#
# Each m in in maches is a list of the word ids that matched the pattern.
#
######################################################################################
import sys
import re
def build_indexes(sentence):
l = len(sentence['dep_paths'])
parents = []
children = []
for i in range(0, l):
parents.append([])
children.append([])
for i in range(0, l):
p = sentence['dep_parents'][i]
t = sentence['dep_paths'][i]
children[p].append([t, i])
parents[i].append([t, p])
return [parents, children]
def match(sentence, path_arr, mention_parts, parents, children, dicts = {}):
matches = []
for i in range(0, len(sentence['words'])):
match_i(sentence, i, path_arr, mention_parts, parents, children, matches, [], dicts)
return matches
def token_match(sentence, i, pw, mention_parts, dicts):
#w = sentence['words'][i].lower()
w = sentence['lemmas'][i].lower()
t = pw.split('|')
#print(dicts, file=sys.stderr)
#print('.... checking ' + pw)
for s in t:
pair = s.split(':')
p = re.compile('cand\[(\d)\]')
m = p.match(pair[0])
if m:
cand_num = int(m.group(1))
cand_wordidxs = mention_parts[cand_num]
if i in cand_wordidxs:
return True
elif pair[0] == 'dic':
if not pair[1] in dicts:
print >>sys.stderr, 'ERROR: Dictionary ' + pair[1] + ' not found'
return False
if not w in dicts[pair[1]]:
return False
elif pair[0] == 'pos':
if not pair[1] == sentence['poses'][i]:
return False
elif pair[0] == 'reg':
if not re.match(pair[1], w):
return False
else:
print >>sys.stderr, 'ERROR: Predicate ' + pair[0] + ' unknown'
return False
return True
def match_i(sentence, i, path_arr, mention_parts, parents, children, matches, matched_prefix = [], dicts = {}):
assert len(path_arr) > 0, str(path_arr)
#w = sentence['lemmas'][i].lower()
#if len(path_arr) == 0:
# nothing to match anymore
#matches.append(matched_prefix)
#return True
pw = path_arr[0]
# __ is a wildcard matching every word
matched = False
if pw == '__':
matched = True
elif pw.startswith('[') and pw.endswith(']') and token_match(sentence, i, pw[1:-1], mention_parts, dicts):
matched = True
elif sentence['lemmas'][i].lower() == pw:
matched = True
if not matched:
return False
#if pw != '__' and w != pw:
# return
matched_prefix.append(i)
if len(path_arr) == 1:
matches.append(matched_prefix)
return True
# match dep
pd = path_arr[1]
found = False
if pd[:2] == '<-':
# left is child
dep_type = pd[2:-1]
for p in parents[i]:
#print(w + '\t<-' + p[0] + '-\t' + str(p[1]) + '\t' + sentence.words[p[1]].lemma, file=sys.stderr)
if p[0] == dep_type or dep_type == '__':
if match_i(sentence, p[1], path_arr[2:], mention_parts, parents, children, matches, list(matched_prefix), dicts):
found = True
elif pd[len(pd)-2:] == '->':
# left is parent
dep_type = pd[1:-2]
for c in children[i]:
#print(w + '\t-' + c[0] + '->\t' + str(c[1]) + '\t' + sentence.words[c[1]].lemma, file=sys.stderr)
if c[0] == dep_type or dep_type == '__':
if match_i(sentence, c[1], path_arr[2:], mention_parts, parents, children, matches, list(matched_prefix), dicts):
found = True
elif pd == '_':
if i+1 < len(sentence['lemmas']):
if match_i(sentence, i+1, path_arr[2:], mention_parts, parents, children, matches, list(matched_prefix), dicts):
found = True
return found
def enclosing_range(wordidxs):
m = wordidxs
if len(m) == 1:
m_from = m[0]
m_to = m[0] + 1
else:
if m[0] < m[1]:
m_from = m[0]
m_to = m[len(m)-1] + 1
else:
m_from = m[len(m)-1]
m_to = m[0]+1
return [ m_from, m_to ]
| dd-genomics-master | code/util/dependencies.py |
dd-genomics-master | code/dep_alignment/__init__.py |
|
#! /usr/bin/env python
def rc_to_match_tree(mixin, sent, cands, node, children, rv=None):
if rv is None:
rv = []
assert False, "TODO this method unfolds DAGS into trees, don't use it or fix it first"
mc = MatchCell(1)
rv.append(mc)
index = len(rv)
for i, cand in enumerate(cands):
if node == cand:
mc.cands = [i]
break
mc.lemmas = [mixin.get_lemma(sent, node)]
mc.words = [mixin.get_word(sent, node)]
mc.match_type = 'single_match'
mc.pos_tags = [mixin.get_pos_tag(sent, node)]
for c in children[node]:
if c == 0:
continue
c_index, _ = rc_to_match_tree(mixin, sent, cands, c, children, rv)
mc.children.append(c_index)
if not mc.children:
mc.children.append(0)
return index, rv
def sent_to_match_tree(sent, cands):
m = AlignmentMixin()
root = m.find_root(sent['dependencies'])
children = m.find_children(sent)
return rc_to_match_tree(m, sent, cands, root, children)
class OverlappingCandidatesException(Exception):
pass
def canonicalize_row(words, lemmas, poses, dep_paths, dep_parents, cands):
for i, cand_series1 in enumerate(cands):
for j, cand_series2 in enumerate(cands):
if i == j:
continue
if len(set(cand_series1).intersection(set(cand_series2))) != 0:
raise OverlappingCandidatesException
assert len(words) < 200, words
new_indices = [i for i in xrange(len(words))]
new_words = [None for _ in xrange(len(words))]
new_lemmas = [None for _ in xrange(len(words))]
new_poses = [None for _ in xrange(len(words))]
new_cands = []
for cand_series in cands:
first_cand_index = new_indices[cand_series[0]]
offset = len(cand_series) - 1
for i in cand_series:
new_indices[i] = first_cand_index
for i in xrange(max(cand_series)+1, len(new_indices)):
new_indices[i] -= offset
cand_series = sorted(list(cand_series))
cand_word_series = [words[i] for i in cand_series]
cand_word = '_'.join(cand_word_series)
new_words[first_cand_index] = cand_word
new_lemmas[first_cand_index] = cand_word.lower()
new_poses[first_cand_index] = 'NN'
new_cands.append(first_cand_index)
for i in xrange(len(words)):
if new_indices[i] in new_cands:
continue
new_words[new_indices[i]] = words[i]
new_lemmas[new_indices[i]] = lemmas[i]
new_poses[new_indices[i]] = poses[i]
new_words = [w for w in new_words if w is not None]
new_lemmas = [l for l in new_lemmas if l is not None]
new_poses = [p for p in new_poses if p is not None]
assert len(new_words) <= len(words) + 1
new_dep_parents_paths = [set() for _ in xrange(len(new_words))]
for i, parent in enumerate(dep_parents):
if new_indices[i] == new_indices[parent]:
# within candidate
pass
else:
new_dep_parents_paths[new_indices[i]].add((new_indices[parent], dep_paths[i]))
new_dep_parents = []
new_dep_paths = []
for parent_paths in new_dep_parents_paths:
no_minus_one_paths = [p for p in parent_paths if p[0] != -1]
if len(no_minus_one_paths) > 0:
new_dep_parents.append([p[0] for p in no_minus_one_paths])
new_dep_paths.append([p[1] for p in no_minus_one_paths])
else:
new_dep_parents.append([-1])
new_dep_paths.append([''])
return new_words, new_lemmas, new_poses, new_dep_paths, new_dep_parents, new_cands
def parts_to_match_tree(words, lemmas, poses, children, node, cands, rv=None, folded=None):
if rv is None:
rv = []
if folded is None:
folded = {}
if node in folded:
assert node != 0
return folded[node], rv
mc = MatchCell(1)
rv.append(mc)
index = len(rv)
for i, cand in enumerate(cands):
if node == cand:
mc.cands = [i]
break
mc.lemmas = [lemmas[node-1]]
mc.words = [words[node-1]]
mc.match_type = 'single_match'
mc.pos_tags = [poses[node-1]]
assert node not in folded, (node, folded)
folded[node] = index
for c in children[node]:
assert c != node
if c == 0:
continue
c_index, _ = parts_to_match_tree(words, lemmas, poses, children, c, cands, rv, folded)
if c_index not in mc.children:
mc.children.append(c_index)
if not mc.children:
mc.children.append(0)
return index, rv
def parents_to_children(dep_parents):
children = [[] for _ in xrange(len(dep_parents) + 1)]
for node, parents in enumerate(dep_parents):
for p in parents:
if p != -1:
assert p+1 < len(children), (p, len(children), dep_parents)
children[p+1].append(node+1)
for i, c in enumerate(children):
if i == 0:
continue
if not c:
c.append(0)
return children
class RootException(Exception):
pass
def parents_find_root(dep_parents):
roots = []
for i, parents in enumerate(dep_parents):
if parents != [-1]:
continue
if not [p for p in dep_parents if (i in p)]:
continue
roots.append(i+1)
if len(roots) != 1:
raise RootException("have != 1 roots (%s) in sentence" % str(roots))
return roots[0]
class DepParentsCycleException(Exception):
pass
def incoming(node, children):
rv = []
for i, c in enumerate(children):
if node in c:
rv.append(i)
return rv
def acyclic(children):
children = [set(c) for c in children]
l = []
q = []
for node in xrange(len(children)):
if not incoming(node, children):
q.append(node)
while q:
n = q[0]
q = q[1:]
l.append(n)
for m in children[n]:
incom = incoming(m, children)
if incom == [n]:
q.append(m)
children[n].clear()
for c in children:
if c:
return False
return True
def row_to_canonical_match_tree(row, in_cands):
words, lemmas, poses, dep_paths, dep_parents = row.words, row.lemmas, row.poses, row.dep_paths, row.dep_parents
words, lemmas, poses, dep_paths, dep_parents, cands = canonicalize_row(words, lemmas, poses, dep_paths, dep_parents, in_cands)
# we are converting from 0 to 1-based now
cands = [c+1 for c in cands]
children = parents_to_children(dep_parents) # takes 0-based, returns 1-based
if not acyclic(children):
raise DepParentsCycleException()
root = parents_find_root(dep_parents) # takes 0-based, returns 1-based
return parts_to_match_tree(words, lemmas, poses, children, root, cands)
class MatchCell:
def __init__(self, size):
self.size = size
self.match_type = [None for _ in xrange(size)]
self.pos_tags = [None for _ in xrange(size)]
self.words = [None for _ in xrange(size)]
self.lemmas = [None for _ in xrange(size)]
self.cands = [None for _ in xrange(size)]
self.children = []
def __repr__(self):
return '[%s,%s,%s,%s,%s,%s,%s]' % (str(self.size), \
str(self.match_type), \
str(self.pos_tags), \
str(self.words), \
str(self.lemmas), \
str(self.cands), \
str(self.children))
class AlignmentMixin:
def find_root(self, dep):
for d in dep:
if d[1].lower() == "root-0":
d2split = d[2].split('-')
return int(d2split[len(d2split) - 1])
assert False
def find_children(self, sent):
dep = sent['dependencies']
rv = [ set() for i in xrange(len(sent['words']) + 1)]
for d in dep:
d1split = d[1].split('-')
from_index = int(d1split[len(d1split) - 1])
d2split = d[2].split('-')
to_index = int(d2split[len(d2split) - 1])
rv[from_index].add(to_index)
assert to_index <= len(sent['words'])
for i, c in enumerate(rv):
if len(c) == 0:
rv[i].add(0)
return rv
def in_dicts(self, w1, w2, dicts):
for d in dicts:
if w1 in d and w2 in d:
return True
return False
def get_word(self, sent, node):
return sent['words'][node - 1][0]
def get_lemma(self, sent, node):
return sent['words'][node - 1][1]["Lemma"]
def get_pos_tag(self, sent, node):
# HACK Johannes: take only first letter of POS tag for matching
return sent['words'][node - 1][1]["PartOfSpeech"][0] | dd-genomics-master | code/dep_alignment/alignment_util.py |
#! /usr/bin/env python
import numpy as np
from alignment_util import AlignmentMixin, MatchCell
import sys
import copy
class MultiDepAlignment(AlignmentMixin):
word_match_score = 5
dict_match_score = 5
lemma_match_score = 5
pos_tag_match_score = -4
skip_score = -3
mismatch_score = -5
cand_match_score = 15
short_words = set([',', '.', '-lrb-', '-rrb-', 'is', 'the', 'of', 'for', \
'with', 'on', 'to', 'from', 'in', 'a', 'an', 'at', 'and', 'by', 'be', 'we'])
def __init__(self, mt_root1, match_tree1, mt_root2, match_tree2, num_cands, dicts):
self.match_tree1 = match_tree1
self.match_tree2 = match_tree2
self.dicts = dicts
self.num_cands = num_cands
self.empty_cell1 = MatchCell(match_tree1[0].size)
self.empty_cell2 = MatchCell(match_tree2[0].size)
self.score_matrix = np.empty((len(match_tree1) + 1, len(match_tree2) + 1))
self.score_matrix[:] = np.inf
self.path_matrix = [[('_', 0) for _ in xrange(len(match_tree2) + 1)] for _ in xrange(len(match_tree1) + 1)]
for d in dicts:
assert isinstance(d, set)
self.mt_root1 = mt_root1
self.mt_root2 = mt_root2
self._h(self.mt_root1, self.mt_root2, forbidden1=set(), forbidden2=set())
def get_match_cell1(self, mt_node1):
if mt_node1 == 0:
return self.empty_cell1
else:
return self.match_tree1[mt_node1 - 1]
def get_match_cell2(self, mt_node2):
if mt_node2 == 0:
return self.empty_cell2
else:
return self.match_tree2[mt_node2 - 1]
def _match_score(self, mt_node1, mt_node2):
mc1 = self.get_match_cell1(mt_node1)
mc2 = self.get_match_cell2(mt_node2)
sum_score = 0
match_type = ''
for i in xrange(len(mc1.words)):
for j in xrange(len(mc2.words)):
if mc1.words[i] is None and mc2.words[j] is None:
match_type += '[gaps%d,%d]' % (i, j)
continue
if mc1.words[i] is None and mc2.words[j] is not None:
sum_score += self.skip_score
match_type += '[gap1%d,%d]' % (i, j)
continue
if mc1.words[i] is not None and mc2.words[j] is None:
sum_score += self.skip_score
match_type += '[gap2%d,%d]' % (i, j)
continue
broken = False
for k in xrange(self.num_cands):
if mc1.cands[i] == k and mc2.cands[j] == k:
sum_score += self.cand_match_score
match_type += '[cand%d,%d_%d]' % (i, j, k)
broken = True
break
if broken:
continue
if mc1.pos_tags[i] == mc2.pos_tags[j] \
and mc1.lemmas[i] in self.short_words and mc2.lemmas[j] in self.short_words:
match_type += '[short_word%d,%d]' % (i, j)
continue
if mc1.pos_tags[i] == mc2.pos_tags[j] and mc1.words[i] == mc2.words[j]:
match_type += '[word%d,%d]' % (i, j)
sum_score += self.word_match_score
continue
if mc1.pos_tags[i] == mc2.pos_tags[j] and mc1.lemmas[i] == mc2.lemmas[j]:
match_type += '[lemma%d,%d]' % (i, j)
sum_score += self.lemma_match_score
continue
if self.in_dicts(mc1.words[i], mc2.words[j], self.dicts):
match_type += '[word_dict%d,%d]' % (i, j)
sum_score += self.dict_match_score
continue
if self.in_dicts(mc1.lemmas[i], mc2.lemmas[j], self.dicts):
match_type += '[lemma_dict%d,%d]' % (i, j)
sum_score += self.dict_match_score
continue
if mc1.pos_tags[i] == mc2.pos_tags[j]:
match_type += '[pos_tags%d,%d]' % (i, j)
sum_score += self.pos_tag_match_score
continue
match_type += '[mis%d,%d]' % (i, j)
sum_score += self.mismatch_score
return sum_score, match_type + '_match'
def _balance_lists(self, lists1, lists2):
fake_guy_number = 0
while len(lists1) < len(lists2):
new_guy = 'fake_' + str(fake_guy_number)
fake_guy_number += 1
lists1[new_guy] = []
for girl in lists2:
lists1[new_guy].append((0, girl))
lists2[girl].append((-1000, new_guy))
def _sort_lists(self, lists):
for guy in lists:
sorted_list = sorted(lists[guy])[::-1] # sort in reverse score order
lists[guy] = sorted_list
def _stable_marriage(self, men_pref_lists, women_pref_lists):
self._balance_lists(men_pref_lists, women_pref_lists)
self._balance_lists(women_pref_lists, men_pref_lists)
assert len(women_pref_lists) == len(men_pref_lists)
self._sort_lists(men_pref_lists)
self._sort_lists(women_pref_lists)
proposal_order = copy.deepcopy(men_pref_lists)
unmatched_guys = set([guy for guy in men_pref_lists])
matching = {}
while unmatched_guys:
guy = iter(unmatched_guys).next()
girl = proposal_order[guy][0][1]
proposal_order[guy] = proposal_order[guy][1:]
if girl not in matching:
unmatched_guys -= set([guy])
matching[girl] = guy
else:
current_guy = matching[girl]
prefer_new_guy = None
for (_, m) in women_pref_lists[girl]:
if m == current_guy:
prefer_new_guy = False
break
if m == guy:
prefer_new_guy = True
break
assert prefer_new_guy is not None, (current_guy, guy, women_pref_lists[girl])
if prefer_new_guy:
unmatched_guys.add(current_guy)
unmatched_guys -= set([guy])
matching[girl] = guy
else:
pass
rv = []
assert len(matching) == len(women_pref_lists)
matched_guys = set()
for girl in matching:
guy = matching[girl]
assert guy not in matched_guys, (guy, matched_guys)
matched_guys.add(guy)
if isinstance(guy, int) and isinstance(girl, int):
rv.append((guy, girl))
elif isinstance(guy, int) and not isinstance(girl, int):
rv.append((guy, None))
elif not isinstance(guy, int) and isinstance(girl, int):
rv.append((None, girl))
else:
assert False
return rv
def _match(self, mt_node1, mt_node2, forbidden1, forbidden2):
if mt_node1 == 0 and mt_node2 == 0:
return 0, 'end', []
if mt_node1 == 0 and mt_node2 != 0:
return -1000, 'assert_false', []
if mt_node1 != 0 and mt_node2 == 0:
return -1000, 'assert_false', []
mc1 = self.get_match_cell1(mt_node1)
c1 = set(mc1.children)
assert mt_node1 not in c1, (mt_node1, c1)
mc2 = self.get_match_cell2(mt_node2)
c2 = set(mc2.children)
assert mt_node2 not in c2, (mt_node2, c2)
men_pref_lists = {}
women_pref_lists = {}
for i in c1:
men_pref_lists[i] = []
for j in c2:
women_pref_lists[j] = []
for i in c1:
for j in c2:
self._h(i, j, forbidden1, forbidden2)
men_pref_lists[i].append((self.score_matrix[i, j], j))
women_pref_lists[j].append((self.score_matrix[i, j], i))
outgoing = self._stable_marriage(men_pref_lists, women_pref_lists)
sum_score = 0
for (i, j) in outgoing:
assert i is None or i in mc1.children, i
assert j is None or j in mc2.children, j
# assert False, 'TODO test this! should we use the skip score for unassigned branches?'
if i is not None and j is not None:
sum_score += self.score_matrix[i, j]
elif i is not None or j is not None:
sum_score += self.skip_score
# HACK Johannes. Do we want to leave out unmatched branches this way?
# insert into real_outgoing otherwise
# AND DON'T FORGET TO ADAPT CODE DOWNSTAIRS TO NONE NODES
# ESPECIALLY IN get_match_tree
else:
assert False, (i, j)
direct_match_score, match_type = self._match_score(mt_node1, mt_node2)
# HACK Johannes: Because unmatched branches can be pruned in a multi-way match, this doesn't hold anymore:
# assert len([a[0] for a in outgoing if a[0] is not None]) == len(set([a[0] for a in outgoing if a[0] is not None]))
# assert len([a[1] for a in outgoing if a[1] is not None]) == len(set([a[1] for a in outgoing if a[1] is not None]))
assert len(outgoing) >= min(len(mc1.children), len(mc2.children))
for o1, o2 in outgoing:
# assert o1 is not None
# assert o2 is not None
if o1 is not None:
assert mt_node1 != o1, (mt_node1, o1, match_type, mc1.children)
if o2 is not None:
assert mt_node2 != o2, (mt_node2, o2, match_type, mc2.children)
return direct_match_score + sum_score, match_type, outgoing
def _skip1(self, mt_node1, mt_node2, forbidden1, forbidden2):
if mt_node1 == 0:
return -1000, 'assert_false', []
score_list = []
mc1 = self.get_match_cell1(mt_node1)
for i in mc1.children:
assert i != mt_node1
self._h(i, mt_node2, forbidden1, forbidden2)
score_list.append((self.score_matrix[i, mt_node2], i))
score_list = sorted(score_list)[::-1]
mc2 = self.get_match_cell2(mt_node2)
sum_score = 0
skip_type = ''
for j, word in enumerate(mc2.words):
if word is not None:
skip_type += '[skip%d]' % j
sum_score += self.skip_score
else:
skip_type += '[zero%d]' % j
assert score_list[0][1] != mt_node1, str((score_list[0][1], mt_node1))
return sum_score + score_list[0][0], skip_type + '_skip1', [(score_list[0][1], mt_node2)]
def _skip2(self, mt_node1, mt_node2, forbidden1, forbidden2):
if mt_node2 == 0:
return -1000, 'assert_false', []
score_list = []
mc2 = self.get_match_cell2(mt_node2)
for j in mc2.children:
assert j != mt_node2
self._h(mt_node1, j, forbidden1, forbidden2)
score_list.append((self.score_matrix[mt_node1, j], j))
score_list = sorted(score_list)[::-1]
mc1 = self.get_match_cell1(mt_node1)
sum_score = 0
skip_type = ''
for i, word in enumerate(mc1.words):
if word is not None:
skip_type += '[skip%d]' % i
sum_score += self.skip_score
else:
skip_type += '[zero%d]' % i
assert score_list[0][1] != mt_node2, str((mt_node2, score_list[0][1]))
return sum_score + score_list[0][0], skip_type + '_skip2', [(mt_node1, score_list[0][1])]
def _h(self, mt_node1, mt_node2, forbidden1, forbidden2):
forbidden1 = forbidden1.copy()
forbidden1.add(mt_node1)
forbidden2 = forbidden2.copy()
forbidden2.add(mt_node2)
if self.score_matrix[mt_node1, mt_node2] != np.inf:
return self.score_matrix[mt_node1, mt_node2]
m, match_type, cont_match = self._match(mt_node1, mt_node2, forbidden1, forbidden2)
assert mt_node1 not in [c[0] for c in cont_match]
assert mt_node2 not in [c[1] for c in cont_match]
l1, skip_type1, cont_skip1 = self._skip1(mt_node1, mt_node2, forbidden1, forbidden2)
assert (mt_node1, mt_node2) not in cont_skip1, (mt_node1, mt_node2, cont_skip1)
l2, skip_type2, cont_skip2 = self._skip2(mt_node1, mt_node2, forbidden1, forbidden2)
assert (mt_node1, mt_node2) not in cont_skip2, (mt_node1, mt_node2, cont_skip2)
score = max([m, l1, l2])
self.score_matrix[mt_node1, mt_node2] = score
if score == m:
self.path_matrix[mt_node1][mt_node2] = match_type, cont_match
for o1, o2 in cont_match:
assert o1 is None or o1 not in forbidden1
assert o2 is None or o2 not in forbidden2
elif score == l1:
self.path_matrix[mt_node1][mt_node2] = skip_type1, cont_skip1
for o1, _ in cont_skip1:
assert o1 not in forbidden1
elif score == l2:
self.path_matrix[mt_node1][mt_node2] = skip_type2, cont_skip2
for _, o2 in cont_skip2:
assert o2 not in forbidden2
else:
assert False
def _print_match_tree_recursive(self, stream, match_tree, index, indent):
assert 0 < index
assert index <= len(match_tree), (index, len(match_tree))
cell = match_tree[index - 1]
print >> stream, " " * indent + str(cell)
for c in cell.children:
if c != 0:
self._print_match_tree_recursive(stream, match_tree, c, indent + 4)
def print_match_tree(self, stream=sys.stdout):
root, match_tree = self.get_match_tree()
self._print_match_tree_recursive(stream, match_tree, root, 0)
def get_match_tree(self, match_tree=-1, folded=-1, node1=-1, node2=-1, size1=-1, size2=-1, forbidden=-1):
if node1 == -1:
node1 = self.mt_root1
if node2 == -1:
node2 = self.mt_root2
if folded != -1 and (node1, node2) in folded:
if folded[(node1, node2)] == 0:
return 0, match_tree
else:
return folded[(node1, node2)], match_tree
if match_tree == -1:
match_tree = []
if folded == -1:
assert len(match_tree) == 0
folded = {}
folded[(0, 0)] = 0
folded[(0, None)] = 0
folded[(None, 0)] = 0
if forbidden == -1:
forbidden = []
if node1 is not None and node2 is not None:
mc1 = self.get_match_cell1(node1)
mc2 = self.get_match_cell2(node2)
size1 = mc1.size
size2 = mc2.size
mc = MatchCell(size1 + size2)
mc.cands[0:size1] = mc1.cands
mc.cands[size1:size1 + size2] = mc2.cands
mc.pos_tags[0:size1] = mc1.pos_tags
mc.pos_tags[size1:size1 + size2] = mc2.pos_tags
mc.words[0:size1] = mc1.words
mc.words[size1:size1 + size2] = mc2.words
mc.lemmas[0:size1] = mc1.lemmas
mc.lemmas[size1:size1 + size2] = mc2.lemmas
mc.children = []
instr, succ = self.path_matrix[node1][node2]
mc.match_type = instr
match_tree.append(mc)
index = len(match_tree)
folded[(node1, node2)] = index
forbidden = [i for i in forbidden] # copying forbidden the old way
forbidden.append((node1, node2))
for o1, o2 in succ:
assert not instr.endswith('_match') or o1 is None or o1 in mc1.children, (o1, mc1.children)
assert not instr.endswith('_match') or o2 is None or o2 in mc2.children, (o2, mc2.children)
assert not instr.endswith('_skip2') or o1 == node1
assert not instr.endswith('_skip1') or o2 == node2
assert (o1, o2) not in forbidden, (o1, o2, forbidden)
child_root, _ = self.get_match_tree(match_tree=match_tree, folded=folded, \
node1=o1, node2=o2, size1=size1, size2=size2, \
forbidden=forbidden)
assert child_root >= 0
mc.children.append(child_root)
assert mc.children
return index, match_tree
elif node1 is None and node2 is not None:
mc2 = self.get_match_cell2(node2)
assert size1 != -1
assert size2 != -1
mc = MatchCell(size1 + size2)
mc.cands[0:size1] = [None for i in xrange(size1)]
mc.cands[size1:size1 + size2] = mc2.cands
mc.pos_tags[0:size1] = [None for i in xrange(size1)]
mc.pos_tags[size1:size1 + size2] = mc2.pos_tags
mc.words[0:size1] = [None for i in xrange(size1)]
mc.words[size1:size1 + size2] = mc2.words
mc.lemmas[0:size1] = [None for i in xrange(size1)]
mc.lemmas[size1:size1 + size2] = mc2.lemmas
mc.children = []
mc.match_type = 'single2'
match_tree.append(mc)
index = len(match_tree)
folded[(node1, node2)] = index
forbidden = [i for i in forbidden]
forbidden.append((node1, node2))
for c in mc2.children:
child_root, _ = self.get_match_tree(match_tree=match_tree, folded=folded, \
node1=None, node2=c, size1=size1, size2=size2, \
forbidden=forbidden)
assert child_root >= 0
mc.children.append(child_root)
assert mc.children, (node2, mc2.children)
return index, match_tree
elif node1 is not None and node2 is None:
mc1 = self.get_match_cell1(node1)
assert size1 != -1
assert size2 != -1
mc = MatchCell(size1 + size2)
mc.cands[0:size1] = mc1.cands
mc.cands[size1:size1 + size2] = [None for i in xrange(size2)]
mc.pos_tags[0:size1] = mc1.pos_tags
mc.pos_tags[size1:size1 + size2] = [None for i in xrange(size2)]
mc.words[0:size1] = mc1.words
mc.words[size1:size1 + size2] = [None for i in xrange(size2)]
mc.lemmas[0:size1] = mc1.lemmas
mc.lemmas[size1:size1 + size2] = [None for i in xrange(size2)]
mc.children = []
mc.match_type = 'single1'
match_tree.append(mc)
index = len(match_tree)
folded[(node1, node2)] = index
forbidden = [i for i in forbidden]
forbidden.append((node1, node2))
for c in mc1.children:
child_root, _ = self.get_match_tree(match_tree=match_tree, folded=folded, \
node1=c, node2=None, size1=size1, size2=size2, \
forbidden=forbidden)
assert child_root >= 0
mc.children.append(child_root)
assert mc.children
return index, match_tree
else:
assert False
def print_matched_lemmas(self, stream=sys.stdout, node1=-1, node2=-1, folded=-1):
if node1 == -1:
node1 = self.mt_root1
if node2 == -1:
node2 = self.mt_root2
if folded == -1:
folded = set()
if (node1, node2) in folded or node1 == 0 or node2 == 0:
return
folded.add((node1, node2))
mc1 = self.get_match_cell1(node1)
mc2 = self.get_match_cell2(node2)
instr, succ = self.path_matrix[node1][node2]
if instr.endswith('_match'):
print >> stream, "%s\t%s" % ('\t'.join(mc1.lemmas), '\t'.join(mc2.lemmas))
for (o1, o2) in succ:
self.print_matched_lemmas(stream, o1, o2, folded)
def overall_score(self):
return self.score_matrix[self.mt_root1, self.mt_root2]
def rescore(self, unscore_list, folded=-1, node1=-1, node2=-1):
if node1 == -1:
node1 = self.mt_root1
if node2 == -1:
node2 = self.mt_root2
if folded == -1:
folded = set()
if (node1, node2) in folded or node1 == 0 or node2 == 0 or node1 is None or node2 is None:
return 0
folded.add((node1, node2))
mc1 = self.get_match_cell1(node1)
mc2 = self.get_match_cell2(node2)
lemmas1 = mc1.lemmas
lemmas2 = mc2.lemmas
instr, succ = self.path_matrix[node1][node2]
rv = 0
if instr.endswith('_match'):
for s1, s2, penalty in unscore_list:
for l1 in s1:
for l2 in s2:
if l1 in lemmas1 and l2 in lemmas2:
rv += penalty
for (o1, o2) in succ:
rv += self.rescore(unscore_list, folded, o1, o2)
return rv
| dd-genomics-master | code/dep_alignment/multi_dep_alignment.py |
#! /usr/bin/env python
import sys
import glob
import os
if __name__ == "__main__":
num = 0
if len(sys.argv) != 2:
print >>sys.stderr, 'Expecting list of folder names (without preceding numbers) in file as argument'
sys.exit(1)
with open(sys.argv[1]) as f:
for line in f:
assert num <= 99, 'Cannot handle more than 100 files (indices 00-99)'
filenames = glob.glob(line.strip())
assert len(filenames) <= 1, 'Multiple files of name %s' % line.strip()
if filenames:
filename = filenames[0]
stem = filename
else:
filenames = glob.glob('[0-9][0-9]-%s' % line.strip())
assert len(filenames) == 1, 'No file with name *-%s' % line.strip()
filename = filenames[0]
stem = filenames[0][3:]
new_filename = '%02d-%s' % (num, stem)
if new_filename != filename:
print 'Moving %s to %s' % (filename, new_filename)
os.rename(filename, new_filename)
else:
print 'Retaining filename %s' % filename
num += 1
| dd-genomics-master | snapshot-template/gill-reduced/number_folders.py |
#! /usr/bin/env python
import sys
import re
for line in sys.stdin:
print re.sub(r'\W+', ' ', line.strip())
| dd-genomics-master | onto/replace_non_alpha.py |
#! /usr/bin/env python
from xml.etree.ElementTree import ElementTree
import sys
import re
def attach_diseases(diseases, excludes):
excludes = [set(d.strip().split()) for d in excludes]
for line in diseases.split('\n'):
names = line.strip().split(';')
for name in names:
# if 'GLOMERULOSCLEROSIS' in name:
# print >> sys.stderr, excludes
# print >> sys.stderr, set(re.sub(r'\W+', ' ', name.strip()).split(' '))
if len(name.strip()) > 0 and set(re.sub(r'\W+', ' ', name.strip()).split(' ')) not in excludes:
yield re.sub(r'\W+', ' ', name.strip())
if __name__ == "__main__":
filename = sys.argv[1]
doc = ElementTree(file=filename)
e = doc.findall('.//mimNumber')[0]
mim_number = e.text
names = []
alt_names = []
for e in doc.findall('.//preferredTitle'):
names += attach_diseases(e.text, [])
for e in doc.findall('.//alternativeTitles'):
alt_names += attach_diseases(e.text, names)
print "OMIM:%s\t%s\t%s" % (mim_number, '|^|'.join(names), '|^|'.join(alt_names))
| dd-genomics-master | onto/parse_diseases.py |
#! /usr/bin/env python
import sys
def main():
fname = sys.argv[1]
transcript = None
with open(fname) as f:
for line in f:
if line.startswith('>'):
if transcript:
print '%s\t{%s}' % (transcript, ','.join(sequence))
transcript = line.strip()[1:].split()[0].split('_')[2]
sequence = ''
continue
sequence += line.strip()
print '%s\t{%s}' % (transcript, ','.join(sequence))
if __name__ == "__main__":
main()
| dd-genomics-master | onto/geneIsoformsToTable.py |
#! /usr/bin/env python
import sys
def main():
fname = sys.argv[1]
transcript = None
with open(fname) as f:
for line in f:
if line.startswith('>'):
if transcript:
print '%s\t{%s}' % (transcript, ','.join(sequence))
transcript = line.strip()[1:]
sequence = ''
continue
sequence += line.strip()
print '%s\t{%s}' % (transcript, ','.join(sequence))
if __name__ == "__main__":
main()
| dd-genomics-master | onto/proteinIsoformsToTable.py |
import sys
import re
disease_to_hpos = {}
with open('data/hpo_disease_phenotypes.tsv', 'rb') as f:
for line in f.readlines():
source, source_id, name, name2, hpos = line.strip().split('\t')
if source == "OMIM":
disease_to_hpos[source_id] = hpos.split("|")
with open('raw/clinvar.tsv', 'rb') as f:
for line in f.readlines():
row = line.strip().split('\t')
pheno_ids = row[10]
try:
hgvs = [n.split(':')[1] for n in (row[18], row[19]) if len(n.strip()) > 0]
except IndexError:
#sys.stderr.write('\t'.join([row[18], row[19]])+'\n')
pass
omim_match = re.search(r'OMIM:(\d+),', pheno_ids)
if omim_match:
omim_id = omim_match.group(1)
hpos = disease_to_hpos.get(omim_id)
if hpos:
for variant in hgvs:
for pheno in hpos:
print '%s\t%s' % (variant, pheno)
| dd-genomics-master | onto/join_clinvar_omim_hpo.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A constant-space parser for the GeneOntology OBO v1.2 format
Version 1.0
"""
from collections import defaultdict
__author__ = "Uli Koehler"
__copyright__ = "Copyright 2013 Uli Koehler"
__license__ = "Apache v2.0"
def processGOTerm(goTerm):
"""
In an object representing a GO term, replace single-element lists with
their only member.
Returns the modified object as a dictionary.
"""
ret = dict(goTerm) #Input is a defaultdict, might express unexpected behaviour
# for key, value in ret.iteritems():
# if len(value) == 1:
# ret[key] = value[0]
return ret
def parseGOOBO(filename):
"""
Parses a Gene Ontology dump in OBO v1.2 format.
Yields each
Keyword arguments:
filename: The filename to read
"""
with open(filename, "r") as infile:
currentGOTerm = None
for line in infile:
line = line.strip()
if not line: continue #Skip empty
if line == "[Term]":
if currentGOTerm: yield processGOTerm(currentGOTerm)
currentGOTerm = defaultdict(list)
elif line == "[Typedef]":
#Skip [Typedef sections]
currentGOTerm = None
else: #Not [Term]
#Only process if we're inside a [Term] environment
if currentGOTerm is None: continue
key, sep, val = line.partition(":")
currentGOTerm[key].append(val.strip())
#Add last term
if currentGOTerm is not None:
yield processGOTerm(currentGOTerm)
if __name__ == "__main__":
"""Print out the number of GO objects in the given GO OBO file"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('infile', help='The input file in GO OBO v1.2 format.')
args = parser.parse_args()
#Iterate over GO terms
termCounter = 0
for goTerm in parseGOOBO(args.infile):
termCounter += 1
print "Found %d GO terms" % termCounter | dd-genomics-master | onto/obo_parser.py |
#! /usr/bin/env python
import os
APP_HOME = os.environ['GDD_HOME']
import sys
sys.path.append('%s/code' % APP_HOME)
import data_util as dutil
### ATTENTION!!!! PLEASE PIPE THE OUTPUT OF THIS SCRIPT THROUGH sort | uniq !!! ###
### Doing it within python is a waste of resources. Linux does it much faster. ###
def get_parents(bottom_id, dag, root_id='HP:0000118'):
if bottom_id == root_id:
return set([bottom_id])
rv = set()
if bottom_id in dag.edges:
for parent in dag.edges[bottom_id]:
rv |= get_parents(parent, dag)
rv.add(bottom_id)
return rv
if __name__ == '__main__':
hpo_dag = dutil.read_hpo_dag()
with open('%s/onto/data/hpo_phenotypes.tsv' % APP_HOME) as f:
for line in f:
toks = line.strip().split('\t')
hpo_id = toks[0]
pheno_name = toks[1]
parent_ids = get_parents(hpo_id, hpo_dag) # includes the original hpo_id
assert hpo_id in parent_ids
if 'HP:0000118' not in parent_ids:
continue
sys.stdout.write(hpo_id + '\t' + pheno_name + '\n')
sys.stdout.flush()
| dd-genomics-master | onto/load_hpo_abnormalities.py |
#! /usr/bin/env python
import sys
if len(sys.argv) != 4:
print >> sys.stderr, "usage: ./blah diseases_file ps_file ps_to_omim_file"
sys.exit(1)
diseases_filename = sys.argv[1]
ps_filename = sys.argv[2]
ps_to_omim_filename = sys.argv[3]
omim_to_ps = {}
ps_alt_names = {}
with open(ps_to_omim_filename) as f:
for line in f:
line = line.strip().split('\t')
omim_to_ps[line[1]] = line[0]
omim_names = {}
omim_alt_names = {}
with open(diseases_filename) as f:
for line in f:
line = line.strip().split('\t')
omim_id = line[0]
names = line[1]
if len(line) >= 3:
alt_names = line[2]
else:
alt_names = ''
omim_names[omim_id] = names
if omim_id in omim_to_ps:
omim_alt_names[omim_id] = ''
ps_id = omim_to_ps[omim_id]
if ps_id not in ps_alt_names:
ps_alt_names[ps_id] = []
if len(alt_names) > 0:
ps_alt_names[ps_id].extend(alt_names.split('|^|'))
else:
omim_alt_names[omim_id] = alt_names
ps_names = {}
with open(ps_filename) as f:
for line in f:
line = line.strip().split('\t')
ps_names[line[0]] = line[1]
with open(diseases_filename, 'w') as f:
f.seek(0)
for omim_id in omim_names:
names = omim_names[omim_id]
alt_names = omim_alt_names[omim_id]
if alt_names is not None:
print >> f, "%s\t%s\t%s" % (omim_id, names, alt_names)
else:
print >> f, "%s\t%s\t" % (omim_id, names)
with open(ps_filename, 'w') as f:
f.seek(0)
for ps_id in ps_names:
name = ps_names[ps_id]
if ps_id in ps_alt_names:
alt_names = '|^|'.join(ps_alt_names[ps_id])
else:
alt_names = ''
print >> f, "%s\t%s\t%s" % (ps_id, name, alt_names)
| dd-genomics-master | onto/omim_alt_names_to_series.py |
#! /usr/bin/env python
import os
APP_HOME = os.environ['GDD_HOME']
import sys
sys.path.append('%s/code' % APP_HOME)
import data_util as dutil
import argparse
### ATTENTION!!!! PLEASE PIPE THE OUTPUT OF THIS SCRIPT THROUGH sort | uniq !!! ###
### Doing it within python is a waste of resources. Linux does it much faster. ###
if __name__ == '__main__':
hpo_dag = dutil.read_hpo_dag()
parser = argparse.ArgumentParser()
parser.add_argument('--only-abnormalities', required=False, action="store_true")
args = parser.parse_args()
for line in sys.stdin:
toks = line.strip().split()
hpo_id = toks[0]
ensemble_gene = toks[1]
parent_ids = dutil.get_parents(hpo_id, hpo_dag) # includes the original hpo_id
assert hpo_id in parent_ids
if args.only_abnormalities:
if 'HP:0000118' not in parent_ids:
sys.stderr.write('"{0}": not a phenotypic abnormality\n'.format(hpo_id.strip()))
continue
parent_ids.remove('HP:0000118')
for parent_id in parent_ids:
sys.stdout.write('{0}\t{1}\n'.format(parent_id, ensemble_gene))
sys.stdout.flush()
| dd-genomics-master | onto/canonicalize_gene_phenotype.py |
"""
Output fields:
id, name, synonyms, related terms, alt IDs, parent, MeSh terms
"""
import argparse
from obo_parser import parseGOOBO
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('infile', help='Input HPO file in OBO v1.2 format.')
parser.add_argument('outfile', help='Output TSV file name.')
args = parser.parse_args()
with open(args.outfile, 'w') as out:
for term in parseGOOBO(args.infile):
id = term['id'][0]
name = term['name'][0]
alt_ids = '|'.join(term['alt_id']) if 'alt_id' in term else ''
is_a = '|'.join(x.partition(' ')[0] for x in term['is_a']) if 'is_a' in term else ''
synonyms = set()
related = set()
mesh = set()
for s in term.get('synonym', []):
if ' EXACT [' in s:
synonyms.add(s.split(' EXACT [')[0].strip('" '))
else:
# RELATED, BROAD, etc.
related.add(s.split('" ')[0].strip('"'))
for n in term.get('xref', []):
if ' ' in n:
cur_syn = n.partition(' ')[-1].strip('" ')
#synonyms.add(cur_syn)
xref_id = n.split(' ')[0]
source = xref_id.split(':')[0]
if source == 'MeSH':
mesh.add(cur_syn)
synonyms.discard(name)
related.discard(name)
synonyms = '|'.join(sorted(synonyms)) if synonyms else ''
related = '|'.join(sorted(related)) if related else ''
mesh = '|'.join(sorted(mesh)) if mesh else ''
out.write('\t'.join([id, name.replace('\t', ' '), synonyms.replace('\t', ' '), related.replace('\t', ' '), alt_ids.replace('\t', ' '), is_a.replace('\t', ' '), mesh.replace('\t', ' ')]) + '\n')
| dd-genomics-master | onto/parse_hpo.py |
#!/usr/bin/env python
import sys
import re
import os
#from nltk.stem.snowball import SnowballStemmer
#from nltk.stem import PorterStemmer
from nltk.stem import WordNetLemmatizer
GDD_HOME = os.environ['GDD_HOME']
# [Alex 4/12/15]:
# This script is for preprocessing a dictionary of phenotype phrase - HPO code pairs to be used
# primarily in the candidate extraction stage of the phenotype pipeline
# Currently we take the list of phenotype names and synonym phrases and normalize (lower case,
# lemmatization, stop words removal, etc). Note that we also keep the exact form as well
# Choose which lemmatizer / stemmer to use
# Based on some rough testing:
# - WordNetLemmatizer has an error rate of ~10% wrt lemmatization of raw data in db (this is mostly verbs since we don't use POS tag info, and would be << 10% if only counting unique words)
# - SnowballStemmer is much faster but has ~30% error rate
# TODO: preprocess using Stanford CoreNLP lemmatizer for exact alignment w raw data?
lemmatizer = WordNetLemmatizer()
def lemmatize(w):
if w.isalpha():
return lemmatizer.lemmatize(w)
else:
# Things involving non-alphabetic characters, don't try to lemmatize
return w
STOPWORDS = [w.strip() for w in open('%s/onto/manual/stopwords.tsv' % (GDD_HOME,), 'rb')]
def normalize_phrase(p):
"""Lowercases, removes stop words, and lemmatizes inputted multi-word phrase"""
out = []
# split into contiguous alphanumeric segments, lower-case, filter stopwords, lemmatize
ws = [re.sub(r'[^a-z0-9]', '', w) for w in p.lower().split()]
ws = [w for w in ws if w not in STOPWORDS]
ws = [lemmatize(w) for w in ws]
out.append(' '.join(ws))
# if there's a comma, try permuting the order (some of these cases ommitted from HPO!)
if ',' in p:
cs = re.split(r'\s*,\s*', p.strip())
out += normalize_phrase(' '.join(cs[::-1]))
return out
def load_diseases(filename):
out = []
for line in open(filename):
row = line.split('\t')
omim_ps_id = row[0]
names = row[1].split('|^|')
alt_names = row[2].split('|^|')
forms = []
exact = []
for p in names:
if len(p.strip().lower()) > 0 and len(p.strip().split()) > 1:
exact.append(p.strip().lower())
forms.append((omim_ps_id, p.strip().lower(), 'EXACT'))
for p in alt_names:
if len(p.strip().lower()) > 0 and len(p.strip().split()) > 1:
exact.append(p.strip().lower())
forms.append((omim_ps_id, p.strip().lower(), 'EXACT'))
for p in exact:
forms += [(omim_ps_id, np.strip(), 'LEMMA') for np in normalize_phrase(p) if len(np.strip()) > 0 and len(np.strip().split()) > 1]
for f in forms:
k = f[0] + f[1]
if not seen.has_key(k):
seen[k] = 1
out.append(f)
return out
if __name__ == "__main__":
out_pheno = []
out_disease = []
seen = {}
load_data_tsv = lambda f : [line.split('\t') for line in open('%s/onto/data/%s' % (GDD_HOME, f), 'rb')]
for row in load_data_tsv('hpo_phenotypes.tsv'):
hpo_id = row[0]
exact = [row[1].lower()]
if len(row) > 2:
exact += [p.strip().lower() for p in row[2].split('|') if len(p.strip()) > 0]
forms = [(hpo_id, p, "EXACT") for p in exact]
for p in exact:
forms += [(hpo_id, np, "LEMMA") for np in normalize_phrase(p) if len(np.strip()) > 0]
for f in forms:
k = f[0] + f[1]
if not seen.has_key(k):
seen[k] = 1
out_pheno.append(f)
out_disease.extend(load_diseases('%s/onto/manual/phenotypic_series.tsv' % GDD_HOME))
out_disease.extend(load_diseases('%s/onto/manual/diseases.tsv' % GDD_HOME))
with open("%s/onto/manual/pheno_terms.tsv" % (GDD_HOME,), 'w') as f:
for o in out_pheno:
f.write('\t'.join(o))
f.write('\n')
with open("%s/onto/manual/disease_terms.tsv" % (GDD_HOME,), 'w') as f:
for o in out_disease:
f.write('\t'.join(o))
f.write('\n')
| dd-genomics-master | onto/prep_pheno_terms.py |
#! /usr/bin/env python
import fileinput
import sys
def getDigits(text):
c = ''
for i in text:
if i.isdigit():
c += i
if len(c) > 0:
return int(c)
return -1
if __name__ == "__main__":
for line in fileinput.input():
comps = line.strip().split('\t')
if len(comps) == 0:
continue
elif len(comps) == 6 or len(comps) == 7:
text_year = comps[2].strip()
pmid = comps[0]
source_name = comps[1]
issn_global = comps[3]
issn_print = comps[4]
issn_electronic = comps[5]
if len(comps) >= 7:
mesh_terms = comps[6]
else:
mesh_terms = ''
if pmid == 'null':
continue
if text_year == 'null':
year = 2100
year_status = 'null'
elif text_year.isdigit():
year = int(text_year)
year_status = 'ok'
else:
year = getDigits(text_year)
if year < 1850:
year = 2100
year_status = 'not ok'
else:
year_status = 'extracted'
else:
print line
continue
print '%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s' % (pmid, source_name, year, text_year, year_status, issn_global, issn_print, issn_electronic, mesh_terms)
| dd-genomics-master | parser/md_cleanup.py |
import json
import os
import re
import lxml.etree as et
class XMLTree:
"""
A generic tree representation which takes XML as input
Includes subroutines for conversion to JSON & for visualization based on js form
"""
def __init__(self, xml_root):
"""Calls subroutines to generate JSON form of XML input"""
self.root = xml_root
def to_str(self):
return et.tostring(self.root)
def sentence_to_xmltree(sentence_input, prune_root=True):
"""Transforms a util.SentenceInput object into an XMLTree"""
root = sentence_to_xmltree_sub(sentence_input, 0)
# Often the return tree will have several roots, where one is the actual root
# And the rest are just singletons not included in the dep tree parse...
# We optionally remove these singletons and then collapse the root if only one child left
if prune_root:
for c in root:
if len(c) == 0:
root.remove(c)
if len(root) == 1:
root = root.findall("./*")[0]
return XMLTree(root)
def sentence_to_xmltree_sub(s, rid=0):
"""Recursive subroutine to construct XML tree from util.SentenceInput object"""
i = rid - 1
attrib = {}
if i >= 0:
for k,v in filter(lambda x : type(x[1]) == list, s._asdict().iteritems()):
if v[i] is not None:
attrib[singular(k)] = str(v[i])
root = et.Element('node', attrib=attrib)
for i,d in enumerate(s.dep_parents):
if d == rid:
root.append(sentence_to_xmltree_sub(s, i+1))
return root
def singular(s):
"""Get singular form of word s (crudely)"""
return re.sub(r'e?s$', '', s, flags=re.I)
def html_table_to_xmltree(html):
"""HTML/XML table to XMLTree object"""
node = et.fromstring(re.sub(r'>\s+<', '><', html.strip()))
xml = html_table_to_xmltree_sub(node)
return XMLTree(xml)
def html_table_to_xmltree_sub(node):
"""
Take the XML/HTML table and convert each word in leaf nodes into its own node
Note: Ideally this text would be run through CoreNLP?
"""
# Split text into Token nodes
# NOTE: very basic token splitting here... (to run through CoreNLP?)
if node.text is not None:
for tok in re.split(r'\s+', node.text):
node.append(et.Element('token', attrib={'word':tok}))
# Recursively append children
for c in node:
node.append(html_table_to_xmltree_sub(c))
return node
| dd-genomics-master | dsr/tree_structs.py |
from collections import namedtuple
import re
def read_ptsv_element(x):
"""
Parse an element in psql-compatible tsv format, i.e. {-format arrays
Takes a string as input, handles float, int, str vals, and arrays of these types
"""
if len(x) == 0:
return None
if x[0] == '{':
return map(read_ptsv_element, re.split(r'\"?,\"?', re.sub(r'^\{\"?|\"?\}$', '', x)))
for type_fn in [int, float, str]:
try:
return type_fn(x)
except ValueError:
pass
raise ValueError("Type not recognized.")
def read_ptsv(line):
"""
Parse a line in psql-compatible tsv format
I.e. tab-separated with psql {-style arrays
"""
return map(read_ptsv_element, line.rstrip().split('\t'))
SentenceInput = namedtuple('SentenceInput', 'doc_id, sent_id, text, words, lemmas, poses, ners, char_idxs, dep_labels, dep_parents, word_idxs')
def load_sentences(f_path):
"""
Helper fn to load NLP parser output file as SentenceInput objects
"""
for line in open(f_path, 'rb'):
l = read_ptsv(line)
yield SentenceInput._make(l + [range(len(l[3]))])
def tag_candidate(root, words, cid):
"""
Hackey function to tag candidates in xml tree
Note for example that this will get messed up if the words comprising the candidate occur
elsewhere in the sentence also...
"""
for word in words:
root.findall(".//node[@word='%s']" % word)[0].set('cid', cid)
| dd-genomics-master | dsr/treedlib_util.py |
from IPython.core.display import display_html, HTML, display_javascript, Javascript
import json
import os
import re
import lxml.etree as et
class XMLTree:
"""
A generic tree representation which takes XML as input
Includes subroutines for conversion to JSON & for visualization based on js form
"""
def __init__(self, xml_root):
"""Calls subroutines to generate JSON form of XML input"""
self.root = xml_root
self.json = self._to_json(self.root)
# create a unique id for e.g. canvas id in notebook
self.id = str(abs(hash(self.to_str())))
def _to_json(self, root):
js = {
'attrib': dict(root.attrib),
'children': []
}
for i,c in enumerate(root):
js['children'].append(self._to_json(c))
return js
def to_str(self):
return et.tostring(self.root)
def render_tree(self):
"""
Renders d3 visualization of the d3 tree, for IPython notebook display
Depends on html/js files in vis/ directory, which is assumed to be in same dir...
"""
# TODO: Make better control over what format / what attributes displayed @ nodes!
# HTML
html = open('vis/tree-chart.html').read() % self.id
display_html(HTML(data=html))
# JS
JS_LIBS = ["http://d3js.org/d3.v3.min.js"]
js = open('vis/tree-chart.js').read() % (json.dumps(self.json), self.id)
display_javascript(Javascript(data=js, lib=JS_LIBS))
def sentence_to_xmltree(sentence_input, prune_root=True):
"""Transforms a util.SentenceInput object into an XMLTree"""
root = sentence_to_xmltree_sub(sentence_input, 0)
# Often the return tree will have several roots, where one is the actual root
# And the rest are just singletons not included in the dep tree parse...
# We optionally remove these singletons and then collapse the root if only one child left
if prune_root:
for c in root:
if len(c) == 0:
root.remove(c)
if len(root) == 1:
root = root.findall("./*")[0]
return XMLTree(root)
def sentence_to_xmltree_sub(s, rid=0):
"""Recursive subroutine to construct XML tree from util.SentenceInput object"""
i = rid - 1
attrib = {}
if i >= 0:
for k,v in filter(lambda x : type(x[1]) == list, s._asdict().iteritems()):
if v[i] is not None:
attrib[singular(k)] = str(v[i])
root = et.Element('node', attrib=attrib)
for i,d in enumerate(s.dep_parents):
if d == rid:
root.append(sentence_to_xmltree_sub(s, i+1))
return root
def singular(s):
"""Get singular form of word s (crudely)"""
return re.sub(r'e?s$', '', s, flags=re.I)
def html_table_to_xmltree(html):
"""HTML/XML table to XMLTree object"""
node = et.fromstring(re.sub(r'>\s+<', '><', html.strip()))
xml = html_table_to_xmltree_sub(node)
return XMLTree(xml)
def html_table_to_xmltree_sub(node):
"""
Take the XML/HTML table and convert each word in leaf nodes into its own node
Note: Ideally this text would be run through CoreNLP?
"""
# Split text into Token nodes
# NOTE: very basic token splitting here... (to run through CoreNLP?)
if node.text is not None:
for tok in re.split(r'\s+', node.text):
node.append(et.Element('token', attrib={'word':tok}))
# Recursively append children
for c in node:
node.append(html_table_to_xmltree_sub(c))
return node
| dd-genomics-master | dsr/tree_structs_ipynb.py |
#! /usr/bin/python
# -*- coding: utf-8 -*-
import json
import sys
from nltk.stem import WordNetLemmatizer
import re
from nltk.corpus import stopwords
def load_gene_name_to_genes(ensembl_genes_path):
ret = {}
with open(ensembl_genes_path) as f:
for line in f:
eid = line.strip().split(':')[0]
canonical_name = (line.strip().split(':')[1]).split()[0]
name = line.strip().split()[1]
mapping_type = line.strip().split()[2]
ret[name] = (eid, canonical_name, mapping_type)
return ret
min_word_len = {'ENSEMBL_ID': 2, 'REFSEQ': 2, 'NONCANONICAL_SYMBL': 4, 'CANONICAL_SYMBOL': 2}
bad_genes = ['ANOVA', 'MRI', 'CO2', 'gamma', 'spatial', 'tau', 'Men', 'ghrelin', 'MIM', 'NHS', 'STD', 'hole']
def comp_gene_rgxs(ensembl_genes_path):
gene_names = []
gene_name_to_genes = load_gene_name_to_genes(ensembl_genes_path)
for name in gene_name_to_genes:
if name in bad_genes:
continue
(eid, canonical_name, mapping_type) = gene_name_to_genes[name]
if mapping_type not in ['CANONICAL_SYMBOL', 'NONCANONICAL_SYMBOL']:
continue
if mapping_type == 'NONCANONICAL_SYMBOL':
min_len = 4
else:
min_len = 2
if len(name) < min_len:
continue
if not re.match(r'.*[a-zA-Z].*', name):
continue
gene_names.append('[\.,_ \(\)]' + name + '[\.,_ \(\)]')
return re.compile('(' + '|'.join(gene_names) + ')')
def replace_genes(content, genes_rgx):
return genes_rgx.sub(' ENSEMBLGENE ', content)
a = r'[cgrnm]'
i = r'IVS'
b = r'ATCGatcgu'
s1 = r'0-9\_\.\:'
s2 = r'\/>\?\(\)\[\]\;\:\*\_\-\+0-9'
s3 = r'\/><\?\(\)\[\]\;\:\*\_\-\+0-9'
b1 = r'[%s]' % b
bs1 = r'[%s%s]' % (b,s1)
bs2 = r'[%s %s]' % (b,s2)
bs3 = r'[%s %s]' % (b,s3)
c1 = r'(inv|del|ins|dup|tri|qua|con|delins|indel)'
c2 = r'(del|ins|dup|tri|qua|con|delins|indel)'
c3 = r'([Ii]nv|[Dd]el|[Ii]ns|[Dd]up|[Tt]ri|[Qq]ua|[Cc]on|[Dd]elins|[Ii]ndel|fsX|fsx|fs)'
p = r'CISQMNPKDTFAGHLRWVEYX'
ps2 = r'[%s %s]' % (p, s2)
ps3 = r'[%s %s]' % (p, s3)
d = '[ATCGRYUatgc]'
aa_long_to_short = {'CYS': 'C', 'ASP': 'D', 'SER': 'S', 'GLN': 'Q', 'LYS': 'K',
'ILE': 'I', 'PRO': 'P', 'THR': 'T', 'PHE': 'F', 'ASN': 'N',
'GLY': 'G', 'HIS': 'H', 'LEU': 'L', 'ARG': 'R', 'TRP': 'W',
'ALA': 'A', 'VAL':'V', 'GLU': 'E', 'TYR': 'Y', 'MET': 'M'}
aa_camel = {}
for aa in aa_long_to_short:
aa_camel[aa[0] + aa[1].lower() + aa[2].lower()] = aa_long_to_short[aa]
aal = '(' + '|'.join([x for x in aa_long_to_short] + [x for x in aa_camel]) + ')'
# regexes from tmVar paper
# See Table 3 in http://bioinformatics.oxfordjournals.org/content/early/2013/04/04/bioinformatics.btt156.full.pdf
def comp_gv_rgxs():
GV_RGXS = [
r'^([cgrnm]\.)?([0-9]+)([_]+([0-9]+))([\+\-\*][0-9]+)?(%s)[->/β](%s)' % (d, d),
r'^([cgrnm]\.)?([0-9]+)([_]+([0-9]+))?([\+\-\*][0-9]+)?(%s)(%s+)?' % (c3, d),
r'^[cgrnm]\.([0-9]+)?([\+\-\*][0-9]+)?(%s)[->/β](%s)' % (d, d),
r'^[cgrnm]\.([0-9]+)?([\+\-\*][0-9]+)?(%s)' % d,
r'^IVS([0-9]*[abcd]?)([\+\-\*][0-9]+)?(%s)-*[>/β](%s)' % (d, d),
r'^IVS([0-9]*[abcd]?)([\+\-\*][0-9]+)?(%s?)(%s+)' % (c3, d),
r'^p\.(([%s])|%s)([0-9]+)(([%s])|%s)' % (p, aal, p, aal),
r'^p\.(([%s])|%s)([0-9]+)[_]+(([%s])|%s)([0-9]+)(%s)' % (p, aal, p, aal, c3),
r'^p\.(([%s])|%s)([0-9]+)(%s)' % (p, aal, c3),
r'^(%s)([0-9]+)(%s)' % (d, d)
]
return re.compile('(' + '|'.join(GV_RGXS) + ')', flags=re.I)
gv_rgx = comp_gv_rgxs()
def replace_variants(content):
return gv_rgx.sub(' GENEVARIANT ', content)
lemmatizer = WordNetLemmatizer()
def lemmatize(content):
return [lemmatizer.lemmatize(s) for s in content]
no_alnum = re.compile(r'[\W_ ]+')
if __name__ == "__main__":
if len(sys.argv) != 4:
print >>sys.stderr, "need 3 args: symbol for file (NOT used for stdin), ensembl genes path, output path"
sys.exit(1)
pubmed = sys.argv[1]
ensembl_genes_path = sys.argv[2]
out_path = sys.argv[3]
gene_rgx = comp_gene_rgxs(ensembl_genes_path)
with open(out_path, 'w') as f:
ctr = -1
for line in sys.stdin:
ctr += 1
if ctr % 500 == 0:
print >>sys.stderr, "replacing %d lines in %s " % (ctr, pubmed)
item = json.loads(line)
pmid = item['doc-id']
content = item['content']
content = replace_genes(content, gene_rgx)
content = replace_variants(content)
content = [w for w in no_alnum.sub(' ', content).lower().split() if w not in stopwords.words('english')]
# content = no_alnum.sub(' ', content).lower()
print >>f, "%s\t%s" % (pmid, ' '.join(lemmatize(content)))
| dd-genomics-master | document_classifier/classification/lemmatize_gpv_stdin.py |
#! /usr/bin/python
# -*- coding: utf-8 -*-
import json
import sys
from nltk.stem import WordNetLemmatizer
import re
from nltk.corpus import stopwords
def load_gene_name_to_genes(ensembl_genes_path):
ret = {}
with open(ensembl_genes_path) as f:
for line in f:
eid = line.strip().split(':')[0]
canonical_name = (line.strip().split(':')[1]).split()[0]
name = line.strip().split()[1]
mapping_type = line.strip().split()[2]
ret[name] = (eid, canonical_name, mapping_type)
return ret
min_word_len = {'ENSEMBL_ID': 2, 'REFSEQ': 2, 'NONCANONICAL_SYMBL': 4, 'CANONICAL_SYMBOL': 2}
bad_genes = ['ANOVA', 'MRI', 'CO2', 'gamma', 'spatial', 'tau', 'Men', 'ghrelin', 'MIM', 'NHS', 'STD', 'hole']
def comp_gene_rgxs(ensembl_genes_path):
gene_names = []
gene_name_to_genes = load_gene_name_to_genes(ensembl_genes_path)
for name in gene_name_to_genes:
if name in bad_genes:
continue
(eid, canonical_name, mapping_type) = gene_name_to_genes[name]
if mapping_type not in ['CANONICAL_SYMBOL', 'NONCANONICAL_SYMBOL']:
continue
if mapping_type == 'NONCANONICAL_SYMBOL':
min_len = 4
else:
min_len = 2
if len(name) < min_len:
continue
if not re.match(r'.*[a-zA-Z].*', name):
continue
gene_names.append('[\.,_ \(\)]' + name + '[\.,_ \(\)]')
return re.compile('(' + '|'.join(gene_names) + ')')
def replace_genes(content, genes_rgx):
return genes_rgx.sub(' ENSEMBLGENE ', content)
a = r'[cgrnm]'
i = r'IVS'
b = r'ATCGatcgu'
s1 = r'0-9\_\.\:'
s2 = r'\/>\?\(\)\[\]\;\:\*\_\-\+0-9'
s3 = r'\/><\?\(\)\[\]\;\:\*\_\-\+0-9'
b1 = r'[%s]' % b
bs1 = r'[%s%s]' % (b,s1)
bs2 = r'[%s %s]' % (b,s2)
bs3 = r'[%s %s]' % (b,s3)
c1 = r'(inv|del|ins|dup|tri|qua|con|delins|indel)'
c2 = r'(del|ins|dup|tri|qua|con|delins|indel)'
c3 = r'([Ii]nv|[Dd]el|[Ii]ns|[Dd]up|[Tt]ri|[Qq]ua|[Cc]on|[Dd]elins|[Ii]ndel|fsX|fsx|fs)'
p = r'CISQMNPKDTFAGHLRWVEYX'
ps2 = r'[%s %s]' % (p, s2)
ps3 = r'[%s %s]' % (p, s3)
d = '[ATCGRYUatgc]'
aa_long_to_short = {'CYS': 'C', 'ASP': 'D', 'SER': 'S', 'GLN': 'Q', 'LYS': 'K',
'ILE': 'I', 'PRO': 'P', 'THR': 'T', 'PHE': 'F', 'ASN': 'N',
'GLY': 'G', 'HIS': 'H', 'LEU': 'L', 'ARG': 'R', 'TRP': 'W',
'ALA': 'A', 'VAL':'V', 'GLU': 'E', 'TYR': 'Y', 'MET': 'M'}
aa_camel = {}
for aa in aa_long_to_short:
aa_camel[aa[0] + aa[1].lower() + aa[2].lower()] = aa_long_to_short[aa]
aal = '(' + '|'.join([x for x in aa_long_to_short] + [x for x in aa_camel]) + ')'
# regexes from tmVar paper
# See Table 3 in http://bioinformatics.oxfordjournals.org/content/early/2013/04/04/bioinformatics.btt156.full.pdf
def comp_gv_rgxs():
GV_RGXS = [
r'^([cgrnm]\.)?([0-9]+)([_]+([0-9]+))([\+\-\*][0-9]+)?(%s)[->/β](%s)' % (d, d),
r'^([cgrnm]\.)?([0-9]+)([_]+([0-9]+))?([\+\-\*][0-9]+)?(%s)(%s+)?' % (c3, d),
r'^[cgrnm]\.([0-9]+)?([\+\-\*][0-9]+)?(%s)[->/β](%s)' % (d, d),
r'^[cgrnm]\.([0-9]+)?([\+\-\*][0-9]+)?(%s)' % d,
r'^IVS([0-9]*[abcd]?)([\+\-\*][0-9]+)?(%s)-*[>/β](%s)' % (d, d),
r'^IVS([0-9]*[abcd]?)([\+\-\*][0-9]+)?(%s?)(%s+)' % (c3, d),
r'^p\.(([%s])|%s)([0-9]+)(([%s])|%s)' % (p, aal, p, aal),
r'^p\.(([%s])|%s)([0-9]+)[_]+(([%s])|%s)([0-9]+)(%s)' % (p, aal, p, aal, c3),
r'^p\.(([%s])|%s)([0-9]+)(%s)' % (p, aal, c3),
r'^(%s)([0-9]+)(%s)' % (d, d)
]
return re.compile('(' + '|'.join(GV_RGXS) + ')', flags=re.I)
gv_rgx = comp_gv_rgxs()
def replace_variants(content):
return gv_rgx.sub(' GENEVARIANT ', content)
lemmatizer = WordNetLemmatizer()
def lemmatize(content):
return [lemmatizer.lemmatize(s) for s in content]
no_alnum = re.compile(r'[\W_ ]+')
if __name__ == "__main__":
if len(sys.argv) != 4:
print >>sys.stderr, "need 4 args: path to the full pubmed json, ensembl genes path, output path"
sys.exit(1)
pubmed = sys.argv[1]
ensembl_genes_path = sys.argv[2]
out_path = sys.argv[3]
gene_rgx = comp_gene_rgxs(ensembl_genes_path)
out = {}
with open(pubmed) as f:
ctr = -1
for line in f:
ctr += 1
if ctr % 500 == 0:
print >>sys.stderr, "replacing %d lines in %s " % (ctr, pubmed)
item = json.loads(line)
pmid = item['doc-id']
content = item['content']
content = replace_genes(content, gene_rgx)
content = replace_variants(content)
content = [w for w in no_alnum.sub(' ', content).lower().split() if w not in stopwords.words('english')]
if pmid in out:
out[pmid] += content
else:
out[pmid] = content
print >>sys.stderr, "Printing and lemmatizing"
with open(out_path, 'w') as f:
ctr = -1
for pmid in out:
ctr += 1
if ctr % 500 == 0:
print >>sys.stderr, "lemmatizing %d lines in %s " % (ctr, pubmed)
print >>f, "%s\t%s" % (pmid, ' '.join(lemmatize(out[pmid])))
| dd-genomics-master | document_classifier/classification/lemmatize_gpv.py |
#! /usr/bin/python
# -*- coding: utf-8 -*-
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
import sys
from bunch import *
import numpy as np
import random
from sklearn.linear_model import LogisticRegression
from nltk.stem import WordNetLemmatizer
import re
import cPickle
def load_unlabeled_docs_processed(data_path):
b = Bunch()
b.data = []
b.pmids = []
with open(data_path) as f:
ctr = -1
for line in f:
ctr += 1
if ctr % 100000 == 0:
print >>sys.stderr, "counting %d lines" % ctr
item = line.strip().split('\t')
pmid = item[0]
data = item[1]
b.pmids.append(pmid)
b.data.append(data)
return b
if __name__ == "__main__":
if len(sys.argv) != 3:
print >>sys.stderr, "need 2 args: path to all (pubmed) TSV, output path"
sys.exit(1)
all_path = sys.argv[1]
with open('clf.pkl', 'rb') as f:
print >>sys.stderr, "Loading classifier for %s" % all_path
clf = cPickle.load(f)
with open('count_vect.pkl', 'rb') as f:
print >>sys.stderr, "Loading count vectorizer for %s" % all_path
count_vect = cPickle.load(f)
with open('tfidf_transformer.pkl', 'rb') as f:
print >>sys.stderr, "Loading tfidf transformer for %s" % all_path
tfidf_transformer = cPickle.load(f)
print >>sys.stderr, "Loading all docs"
docs_new = load_unlabeled_docs_processed(all_path)
print >>sys.stderr, "Number of docs: %d" % len(docs_new.data)
print >>sys.stderr, "Transforming new docs through count vectorization"
X_new_counts = count_vect.transform(docs_new.data)
print >>sys.stderr, "Transforming new docs through tf-idf"
X_new_tfidf = tfidf_transformer.transform(X_new_counts)
print >>sys.stderr, "Predicting over new docs"
predicted = clf.predict(X_new_tfidf)
print >>sys.stderr, "Printing to %s" % sys.argv[2]
with open(sys.argv[2], 'w') as f:
for i, value in enumerate(predicted):
if value == 1:
print >>f, docs_new.pmids[i]
| dd-genomics-master | document_classifier/classification/classify.py |
#! /usr/bin/env python
import sys
if __name__ == "__main__":
cur_pmid = -1
cur_str = ''
for line in sys.stdin:
comps = line.strip().split('\t')
pmid = int(comps[0])
if pmid == cur_pmid:
cur_str += ' '
cur_str += comps[1]
else:
if cur_pmid != -1:
print "%s\t%s" % (cur_pmid, cur_str)
cur_pmid = pmid
cur_str = comps[1]
if cur_pmid != -1:
print "%s\t%s" % (cur_pmid, cur_str)
| dd-genomics-master | document_classifier/classification/merge_lines.py |
#!/usr/bin/env python
from collections import defaultdict, namedtuple
import sys
import re
import os
import random
from itertools import chain
import extractor_util as util
import data_util as dutil
import config
# This defines the Row object that we read in to the extractor
parser = util.RowParser([
('doc_id', 'text'),
('section_id', 'text'),
('sent_id', 'int'),
('words', 'text[]'),
('lemmas', 'text[]'),
('poses', 'text[]'),
('ners', 'text[]')])
# This defines the output Mention object
Mention = namedtuple('Mention', [
'dd_id',
'doc_id',
'section_id',
'sent_id',
'wordidxs',
'mention_id',
'mention_supertype',
'mention_subtype',
'entity',
'words',
'is_correct'])
### CANDIDATE EXTRACTION ###
HF = config.PHENO['HF']
SR = config.PHENO['SR']
def load_pheno_terms():
phenos = {}
pheno_sets = {}
"""
Load phenotypes (as phrases + as frozensets to allow permutations)
Output a dict with pheno phrases as keys, and a dict with pheno sets as keys
"""
# [See onto/prep_pheno_terms.py]
# Note: for now, we don't distinguish between lemmatized / exact
rows = [line.split('\t') for line in open(onto_path('data/pheno_terms.tsv'), 'rb')]
for row in rows:
hpoid, phrase, entry_type = [x.strip() for x in row]
if hpoid in hpo_phenos:
if phrase in phenos:
phenos[phrase].append(hpoid)
else:
phenos[phrase] = [hpoid]
phrase_bow = frozenset(phrase.split())
if phrase_bow in pheno_sets:
pheno_sets[phrase_bow].append(hpoid)
else:
pheno_sets[phrase_bow] = [hpoid]
return phenos, pheno_sets
def keep_word(w):
return (w.lower() not in STOPWORDS and len(w) > HF['min-word-len'] - 1)
def extract_candidate_mentions(row):
"""Extracts candidate phenotype mentions from an input row object"""
mentions = []
# First we initialize a list of indices which we 'split' on,
# i.e. if a window intersects with any of these indices we skip past it
split_indices = set()
# split on certain characters / words e.g. commas
split_indices.update([i for i,w in enumerate(row.words) if w in HF['split-list']])
# split on segments of more than M consecutive skip words
seq = []
for i,w in enumerate(row.words):
if not keep_word(w):
seq.append(i)
else:
if len(seq) > HF['split-max-stops']:
split_indices.update(seq)
seq = []
# Next, pass a window of size n (dec.) over the sentence looking for candidate mentions
for n in reversed(range(1, min(len(row.words), HF['max-len'])+1)):
for i in range(len(row.words)-n+1):
wordidxs = range(i,i+n)
words = [w.lower() for w in row.words[i:i+n]]
lemmas = [w.lower() for w in row.lemmas[i:i+n]]
# skip this window if it intersects with the split set
if not split_indices.isdisjoint(wordidxs):
continue
# skip this window if it is sub-optimal: e.g. starts with a skip word, etc.
if not all(map(keep_word, [words[0], lemmas[0], words[-1], lemmas[-1]])):
continue
# Note: we filter stop words coordinated between word and lemma lists
# (i.e. if lemmatized version of a word is stop word, it should be stop word too)
# This also keeps these filtered lists in sync!
ws, lws = zip(*[(words[k], lemmas[k]) for k in range(n) if keep_word(words[k]) and keep_word(lemmas[k])])
# (1) Check for exact match (including exact match of lemmatized / stop words removed)
# If found add to split list so as not to consider subset phrases
p, lp = map(' '.join, [ws, lws])
if p in PHENOS or lp in PHENOS:
entities = PHENOS[p] if p in PHENOS else PHENOS[lp]
for entity in entities:
mentions.append(create_supervised_mention(row, wordidxs, entity, 'EXACT'))
split_indices.update(wordidxs)
continue
# (2) Check for permuted match
# Note: avoid repeated words here!
if HF['permuted']:
ps, lps = map(frozenset, [ws, lws])
if (len(ps)==len(ws) and ps in PHENO_SETS) or (len(lps)==len(lws) and lps in PHENO_SETS):
entities = PHENO_SETS[ps] if ps in PHENO_SETS else PHENO_SETS[lps]
for entity in entities:
mentions.append(create_supervised_mention(row, wordidxs, entity, 'PERM'))
continue
# (3) Check for an exact match with one ommitted (interior) word/lemma
# Note: only consider ommiting non-stop words!
if HF['omitted-interior']:
if len(ws) > 2:
for omit in range(1, len(ws)-1):
p, lp = [' '.join([w for i,w in enumerate(x) if i != omit]) for x in [ws, lws]]
if p in PHENOS or lp in PHENOS:
entities = PHENOS[p] if p in PHENOS else PHENOS[lp]
for entity in entities:
mentions.append(create_supervised_mention(row, wordidxs, entity, 'OMIT_%s' % omit))
return mentions
### DISTANT SUPERVISION ###
VALS = config.PHENO['vals']
def create_supervised_mention(row, idxs, entity=None, mention_supertype=None, mention_subtype=None):
"""Given a Row object consisting of a sentence, create & supervise a Mention output object"""
words = [row.words[i] for i in idxs]
mid = '%s_%s_%s_%s_%s_%s_%s' % (row.doc_id, row.section_id, row.sent_id, idxs[0], idxs[-1], mention_supertype, entity)
m = Mention(None, row.doc_id, row.section_id, row.sent_id, idxs, mid, mention_supertype, mention_subtype, entity, words, None)
if SR.get('post-match'):
opts = SR['post-match']
phrase_post = " ".join(row.words[idxs[-1]:])
for name,val in VALS:
if len(opts[name]) + len(opts['%s-rgx' % name]) > 0:
match = util.rgx_mult_search(phrase_post, opts[name], opts['%s-rgx' % name], flags=re.I)
if match:
return m._replace(is_correct=val, mention_supertype='POST_MATCH_%s_%s' % (name, val), mention_subtype=match)
if SR.get('mesh-supervise'):
pubmed_id = dutil.get_pubmed_id_for_doc(row.doc_id)
if pubmed_id and pubmed_id in PMID_TO_HPO:
if entity in PMID_TO_HPO[pubmed_id]:
return m._replace(is_correct=True, mention_supertype='%s_MESH_SUPERV' % mention_supertype, mention_subtype=str(pubmed_id) + ' ::: ' + str(entity))
# If this is more specific than MeSH term, also consider true.
elif SR.get('mesh-specific-true') and entity in hpo_dag.node_set:
for parent in PMID_TO_HPO[pubmed_id]:
if hpo_dag.has_child(parent, entity):
return m._replace(is_correct=True, mention_supertype='%s_MESH_CHILD_SUPERV' % mention_supertype, mention_subtype=str(parent) + ' -> ' + str(entity))
phrase = " ".join(words).lower()
if mention_supertype == 'EXACT':
if SR.get('exact-english-word') and \
len(words) == 1 and phrase in ENGLISH_WORDS and random.random() < SR['exact-english-word']['p']:
return m._replace(is_correct=True, mention_supertype='EXACT_AND_ENGLISH_WORD', mention_subtype=phrase)
else:
return m._replace(is_correct=True, mention_supertype='NON_EXACT_AND_ENGLISH_WORD', mention_subtype=phrase)
# Else default to existing values / NULL
return m
### RANDOM NEGATIVE SUPERVISION ###
def generate_rand_negatives(s, candidates):
"""Generate some negative examples in 1:1 ratio with positive examples"""
negs = []
n_negs = len([c for c in candidates if c.is_correct])
if n_negs == 0:
return negs
# pick random noun / adj phrases which do not overlap with candidate mentions
covered = set(chain.from_iterable([m.wordidxs for m in candidates]))
idxs = set([i for i in range(len(s.words)) if re.match(SR['rand-negs']['pos-tag-rgx'], s.poses[i])])
for i in range(n_negs):
x = sorted(list(idxs - covered))
if len(x) == 0:
break
ridxs = [random.randint(0, len(x)-1)]
while random.random() > 0.5:
j = ridxs[-1]
if j + 1 < len(x) and x[j+1] == x[j] + 1:
ridxs.append(j+1)
else:
break
wordidxs = [x[j] for j in ridxs]
mtype = 'RAND_NEG'
mid = '%s_%s_%s_%s_%s_%s' % (s.doc_id, s.section_id, s.sent_id, wordidxs[0], wordidxs[-1], mtype)
negs.append(
Mention(dd_id=None, doc_id=s.doc_id, section_id=s.section_id, sent_id=s.sent_id, wordidxs=wordidxs,
mention_id=mid, mention_supertype=mtype, mention_subtype=None, entity=None, words=[s.words[i] for i in wordidxs],
is_correct=False))
for i in wordidxs:
covered.add(i)
return negs
if __name__ == '__main__':
onto_path = lambda p : '%s/onto/%s' % (os.environ['GDD_HOME'], p)
# Load static dictionaries
# TODO: any simple ways to speed this up?
STOPWORDS = frozenset([w.strip() for w in open(onto_path('manual/stopwords.tsv'), 'rb')])
ENGLISH_WORDS = frozenset([w.strip() for w in open(onto_path('data/english_words.tsv'), 'rb')])
hpo_dag = dutil.read_hpo_dag()
hpo_phenos = set(dutil.get_hpo_phenos(hpo_dag))
if SR.get('mesh-supervise'):
# unnecessary and hope it will never be used again --- our doc id is the pmid currently
# DOI_TO_PMID = dutil.read_doi_to_pmid()
PMID_TO_HPO = dutil.load_pmid_to_hpo()
PHENOS, PHENO_SETS = load_pheno_terms()
# Read TSV data in as Row objects
for line in sys.stdin:
row = parser.parse_tsv_row(line)
# Skip row if sentence doesn't contain a verb, contains URL, etc.
if util.skip_row(row):
continue
# find candidate mentions & supervise
mentions = extract_candidate_mentions(row)
if SR.get('rand-negs'):
mentions += generate_rand_negatives(row, mentions)
# print output
for mention in mentions:
util.print_tsv_output(mention)
| dd-genomics-master | document_classifier/classification/pheno_extract_candidates.py |
#! /usr/bin/python
# -*- coding: utf-8 -*-
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
import sys
from bunch import *
import numpy as np
import random
from sklearn.linear_model import LogisticRegression
from nltk.stem import WordNetLemmatizer
import re
import cPickle
def load_labeled_docs_processed(data_path, neg_factor=None):
b = Bunch()
b.data = []
b.pmids = []
b.target = []
poss = 0
negs = 0
include_pmids = set()
with open(data_path) as f:
ctr = -1
for line in f:
ctr += 1
if ctr % 100000 == 0:
print >>sys.stderr, "counting %d lines" % ctr
item = line.strip().split('\t')
pmid = item[0]
data = item[1]
label = int(item[2])
if label == 1 or neg_factor is None or negs <= poss * neg_factor or pmid in include_pmids:
include_pmids.add(pmid)
b.pmids.append(pmid)
b.data.append(data)
b.target.append(label)
if label == 1:
poss += 1
else:
negs += 1
return b
if __name__ == "__main__":
if len(sys.argv) != 2:
print >>sys.stderr, "need 1 arg: path to training (pubmed) TSV"
sys.exit(1)
training_path = sys.argv[1]
train_docs = load_labeled_docs_processed(training_path, 1.2)
pos_count = np.count_nonzero(train_docs.target)
print >>sys.stderr, "Number of positive training examples: %d" % pos_count
neg_count = len(train_docs.target) - pos_count
print >>sys.stderr, "Number of negative training examples: %d" % neg_count
count_vect = CountVectorizer(analyzer='word', ngram_range=(1,1))
print >>sys.stderr, "Count-vectorizing data"
X_train_counts = count_vect.fit_transform(train_docs.data)
tfidf_transformer = TfidfTransformer()
print >>sys.stderr, "Transforming word counts to tf-idf"
X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)
print >>sys.stderr, "Fitting classifier to data"
clf = LogisticRegression(penalty='l2', max_iter=1000)
clf.fit(X_train_tfidf, train_docs.target)
with open('clf.pkl', 'wb') as f:
cPickle.dump(clf, f)
with open('count_vect.pkl', 'wb') as f:
cPickle.dump(count_vect, f)
with open('tfidf_transformer.pkl', 'wb') as f:
cPickle.dump(tfidf_transformer, f)
| dd-genomics-master | document_classifier/classification/create_classifier.py |
#! /usr/bin/python
# -*- coding: utf-8 -*-
import json
import sys
import re
if __name__ == "__main__":
if len(sys.argv) != 2:
print >>sys.stderr, "need 2 args: symbol for file (NOT used for stdin), output path"
sys.exit(1)
pubmed = sys.argv[1]
out_path = sys.argv[2]
gene_rgx = comp_gene_rgxs(ensembl_genes_path)
with open(out_path, 'w') as f:
ctr = -1
for line in sys.stdin:
ctr += 1
if ctr % 500 == 0:
print >>sys.stderr, "replacing %d lines in %s " % (ctr, pubmed)
item = json.loads(line)
pmid = item['doc-id']
content = item['content']
print "%s\t%s" % (pmid, content)
| dd-genomics-master | document_classifier/classification/json_to_tsv.py |
#! /usr/bin/python
# -*- coding: utf-8 -*-
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
import sys
from bunch import *
import numpy as np
import random
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.svm import LinearSVC
from sklearn.feature_selection import SelectFromModel
from sklearn.pipeline import Pipeline
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import metrics
from nltk.stem import WordNetLemmatizer
import re
from nltk.corpus import stopwords
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_regression
from sklearn.decomposition import PCA
from sklearn.ensemble import AdaBoostClassifier
def load_unlabeled_docs_processed(data_path):
b = Bunch()
b.data = []
b.pmids = []
with open(data_path) as f:
ctr = -1
for line in f:
ctr += 1
if ctr % 100000 == 0:
print >>sys.stderr, "counting %d lines" % ctr
item = line.strip().split('\t')
pmid = item[0]
data = item[1]
b.pmids.append(pmid)
b.data.append(data)
return b
def load_labeled_docs_processed(data_path, neg_factor=None):
b = Bunch()
b.data = []
b.pmids = []
b.target = []
poss = 0
negs = 0
include_pmids = set()
with open(data_path) as f:
ctr = -1
for line in f:
ctr += 1
if ctr % 100000 == 0:
print >>sys.stderr, "counting %d lines" % ctr
item = line.strip().split('\t')
pmid = item[0]
data = item[1]
label = int(item[2])
if label == 1 or neg_factor is None or negs <= poss * neg_factor or pmid in include_pmids:
include_pmids.add(pmid)
b.pmids.append(pmid)
b.data.append(data)
b.target.append(label)
if label == 1:
poss += 1
else:
negs += 1
return b
if __name__ == "__main__":
if len(sys.argv) != 4:
print >>sys.stderr, "need 3 args: path to training (pubmed) TSV, path to testing (pubmed) TSV, path to random (pubmed) TSV"
sys.exit(1)
training_path = sys.argv[1]
testing_path = sys.argv[2]
random_path = sys.argv[3]
train_docs = load_labeled_docs_processed(training_path, 1.2)
pos_count = np.count_nonzero(train_docs.target)
print >>sys.stderr, "Number of positive training examples: %d" % pos_count
neg_count = len(train_docs.target) - pos_count
print >>sys.stderr, "Number of negative training examples: %d" % neg_count
count_vect = CountVectorizer(analyzer='word', ngram_range=(1,1))
print >>sys.stderr, "Count-vectorizing data"
X_train_counts = count_vect.fit_transform(train_docs.data)
tfidf_transformer = TfidfTransformer()
print >>sys.stderr, "Transforming word counts to tf-idf"
X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)
# print >>sys.stderr, "Converting to dense matrix"
# X_train_tfidf_dense = X_train_tfidf.toarray()
print >>sys.stderr, "Fitting classifier to data"
# clf = MultinomialNB(alpha=1.0).fit(X_train_tfidf, train_docs.target) # poor performance
# clf = LogisticRegression().fit(X_train_tfidf, train_docs.target) # better performance but hoping for more
# SVMs with kernel too slow
# linear SVM like LogReg, but faster
# clf = Pipeline([('feature-selection', SelectFromModel(LinearSVC(penalty='l1', dual=False))),
# ('classification', GradientBoostingClassifier(n_estimators=100))]) # slow as number of estimators rises; but also 3% precision
# clf = Pipeline([('anova', SelectKBest(f_regression, k=5)), ('clf', GradientBoostingClassifier(n_estimators=100))]) # too slow
# clf = Pipeline([('feature-selection', SelectFromModel(LinearSVC(penalty='l1', dual=False))),
# ('classification', SVC())]) # SVC still too slow
# clf = Pipeline([('reduce-dim', PCA(n_components=100)),
# ('classification', SVC())]) # SVC (? or PCA??) still too slow
# PCA itself is far too slow! --- and apparently it doesn't help: linearsvc + pca 100 + gradient boosting 100 = worse than w/o pca
# clf = Pipeline([('feature-selection', SelectFromModel(LinearSVC(penalty='l1', dual=False))),
# ('classification', GradientBoostingClassifier(n_estimators=100))]) # this gives the old 3% and quite slow
# clf = AdaBoostClassifier(base_estimator=LogisticRegression(solver='sag', max_iter=1000), n_estimators=200) # fitting depends highly on n_estimators. at 100, 0.11 / 0.76 or so
clf = LogisticRegression(penalty='l2', max_iter=1000)
clf.fit(X_train_tfidf, train_docs.target)
print >>sys.stderr, "Loading test set"
test_docs = load_labeled_docs_processed(testing_path)
pos_count = np.count_nonzero(test_docs.target)
print >>sys.stderr, "Number of positive testing examples: %d" % pos_count
neg_count = len(test_docs.target) - pos_count
print >>sys.stderr, "Number of negative testing examples: %d" % neg_count
print >>sys.stderr, "Count-vectorizing test set"
X_test_counts = count_vect.transform(test_docs.data)
print >>sys.stderr, "Transforming test test to tf-idf"
X_test_tfidf = tfidf_transformer.transform(X_test_counts)
# print >>sys.stderr, "Converting to dense matrix"
# X_test_tfidf_dense = X_test_tfidf.toarray()
print >>sys.stderr, "Predicting over test set"
predicted = clf.predict(X_test_tfidf)
print >>sys.stderr, metrics.classification_report(test_docs.target, predicted, target_names=['uninteresting', 'OMIM'])
print >>sys.stderr, "Loading random docs"
docs_new = load_unlabeled_docs_processed(random_path)
print >>sys.stderr, "Number of new docs: %d" % len(docs_new.data)
print >>sys.stderr, "Transforming new docs through count vectorization"
X_new_counts = count_vect.transform(docs_new.data)
print >>sys.stderr, "Transforming new docs through tf-idf"
X_new_tfidf = tfidf_transformer.transform(X_new_counts)
# print >>sys.stderr, "Converting to dense matrix"
# X_new_tfidf_dense = X_new_tfidf.toarray()
print >>sys.stderr, "Predicting over new docs"
predicted = clf.predict(X_new_tfidf)
for i, value in enumerate(predicted):
if value == 1:
print("%s\t%s" % (docs_new.pmids[i], docs_new.data[i].decode('utf-8').encode('ascii', 'ignore')))
| dd-genomics-master | document_classifier/classification/word_counter.py |
#! /usr/bin/python
# -*- coding: utf-8 -*-
import word_counter
import sys
import re
from bunch import *
import numpy as np
import random
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
from functools32 import lru_cache
from nltk.stem import PorterStemmer
def toBunch(mmap):
rv = Bunch()
rv.data = []
rv.pmids = []
rv.target = []
for pmid in mmap:
rv.data.append(mmap[pmid][1])
rv.pmids.append(pmid)
rv.target.append(mmap[pmid][0])
rv.target = np.array(rv.target)
return rv
def lemmatize_select(lemmatize, word):
if word.startswith('MeSH_') or word == 'GENEVARIANT' or word == 'ENSEMBLGENE':
return word
return lemmatize(word)
sw = stopwords.words('english')
def lemmatize(bunch):
print >>sys.stderr, "Lemmatizing data"
new_data = []
pattern = re.compile('[\W_]+]')
# lemmatizer = WordNetLemmatizer()
lemmatizer = PorterStemmer()
# lemmatize = lru_cache(maxsize=5000)(lemmatizer.stem)
# lemmatize = lemmatizer.stem
lemmatize = lambda x : x.lower()
ctr = -1
for content in bunch.data:
ctr += 1
if ctr % 100 == 0:
print >>sys.stderr, "lemmatizing %d lines" % ctr
stripped = pattern.sub('', content.strip())
split = stripped.split()
new_content = ' '.join([lemmatize_select(lemmatize, s) for s in split if s not in sw])
new_data.append(new_content)
bunch.data = new_data
return bunch
def make_mesh_terms(mesh):
no_alnum = re.compile(r'[\W_]+')
return ' '.join(['MeSH_' + no_alnum.sub('_', term) for term in mesh.split('|^|')])
def load_gene_name_to_genes(ensembl_genes_path):
ret = {}
with open(ensembl_genes_path) as f:
for line in f:
eid = line.strip().split(':')[0]
canonical_name = (line.strip().split(':')[1]).split()[0]
name = line.strip().split()[1]
mapping_type = line.strip().split()[2]
ret[name] = (eid, canonical_name, mapping_type)
return ret
min_word_len = {'ENSEMBL_ID': 2, 'REFSEQ': 2, 'NONCANONICAL_SYMBL': 4, 'CANONICAL_SYMBOL': 2}
bad_genes = ['ANOVA', 'MRI', 'CO2', 'gamma', 'spatial', 'tau', 'Men', 'ghrelin', 'MIM', 'NHS', 'STD', 'hole']
def comp_gene_rgx(ensembl_genes_path):
print >>sys.stderr, "Computing gene regex"
gene_names = []
gene_name_to_genes = load_gene_name_to_genes(ensembl_genes_path)
for name in gene_name_to_genes:
if name in bad_genes:
continue
(eid, canonical_name, mapping_type) = gene_name_to_genes[name]
if mapping_type not in ['CANONICAL_SYMBOL', 'NONCANONICAL_SYMBOL']:
continue
if mapping_type == 'NONCANONICAL_SYMBOL':
min_len = 4
else:
min_len = 2
if len(name) < min_len:
continue
if not re.match(r'.*[a-zA-Z].*', name):
continue
gene_names.append('[\.,_ \(\)]' + name + '[\.,_ \(\)]')
return re.compile('(' + '|'.join(gene_names) + ')')
def replace_genes(content, gene_rgx):
return gene_rgx.sub(' ENSEMBLGENE ', content)
a = r'[cgrnm]'
i = r'IVS'
b = r'ATCGatcgu'
s1 = r'0-9\_\.\:'
s2 = r'\/>\?\(\)\[\]\;\:\*\_\-\+0-9'
s3 = r'\/><\?\(\)\[\]\;\:\*\_\-\+0-9'
b1 = r'[%s]' % b
bs1 = r'[%s%s]' % (b,s1)
bs2 = r'[%s %s]' % (b,s2)
bs3 = r'[%s %s]' % (b,s3)
c1 = r'(inv|del|ins|dup|tri|qua|con|delins|indel)'
c2 = r'(del|ins|dup|tri|qua|con|delins|indel)'
c3 = r'([Ii]nv|[Dd]el|[Ii]ns|[Dd]up|[Tt]ri|[Qq]ua|[Cc]on|[Dd]elins|[Ii]ndel|fsX|fsx|fs)'
p = r'CISQMNPKDTFAGHLRWVEYX'
ps2 = r'[%s %s]' % (p, s2)
ps3 = r'[%s %s]' % (p, s3)
d = '[ATCGRYUatgc]'
aa_long_to_short = {'CYS': 'C', 'ASP': 'D', 'SER': 'S', 'GLN': 'Q', 'LYS': 'K',
'ILE': 'I', 'PRO': 'P', 'THR': 'T', 'PHE': 'F', 'ASN': 'N',
'GLY': 'G', 'HIS': 'H', 'LEU': 'L', 'ARG': 'R', 'TRP': 'W',
'ALA': 'A', 'VAL':'V', 'GLU': 'E', 'TYR': 'Y', 'MET': 'M'}
aa_camel = {}
for aa in aa_long_to_short:
aa_camel[aa[0] + aa[1].lower() + aa[2].lower()] = aa_long_to_short[aa]
aal = '(' + '|'.join([x for x in aa_long_to_short] + [x for x in aa_camel]) + ')'
# regexes from tmVar paper
# See Table 3 in http://bioinformatics.oxfordjournals.org/content/early/2013/04/04/bioinformatics.btt156.full.pdf
def comp_gv_rgxs():
print >>sys.stderr, "Computing variants regex"
GV_RGXS = [
r'^([cgrnm]\.)?([0-9]+)([_]+([0-9]+))([\+\-\*][0-9]+)?(%s)[->/β](%s)' % (d, d),
r'^([cgrnm]\.)?([0-9]+)([_]+([0-9]+))?([\+\-\*][0-9]+)?(%s)(%s+)?' % (c3, d),
r'^[cgrnm]\.([0-9]+)?([\+\-\*][0-9]+)?(%s)[->/β](%s)' % (d, d),
r'^[cgrnm]\.([0-9]+)?([\+\-\*][0-9]+)?(%s)' % d,
r'^IVS([0-9]*[abcd]?)([\+\-\*][0-9]+)?(%s)-*[>/β](%s)' % (d, d),
r'^IVS([0-9]*[abcd]?)([\+\-\*][0-9]+)?(%s?)(%s+)' % (c3, d),
r'^p\.(([%s])|%s)([0-9]+)(([%s])|%s)' % (p, aal, p, aal),
r'^p\.(([%s])|%s)([0-9]+)[_]+(([%s])|%s)([0-9]+)(%s)' % (p, aal, p, aal, c3),
r'^p\.(([%s])|%s)([0-9]+)(%s)' % (p, aal, c3),
r'^(%s)([0-9]+)(%s)' % (d, d)
]
return re.compile('(' + '|'.join(GV_RGXS) + ')', flags=re.I)
gv_rgx = comp_gv_rgxs()
def replace_variants(content):
return gv_rgx.sub(' GENEVARIANT ', content)
def load_unlabeled_docs(data_path, gene_rgx):
no_alnum = re.compile(r'[\W_ ]+')
rv = {}
print >>sys.stderr, "Loading JSON data"
ctr = -1
with open(data_path) as f:
for line in f:
ctr += 1
if ctr % 100 == 0:
print >>sys.stderr, "counting %d lines" % ctr
item = line.strip().split('\t')
pmid = item[0]
content = item[1]
content.replace('\n', ' ')
content = replace_genes(content, gene_rgx)
content = replace_variants(content)
content = no_alnum.sub(' ', content)
if len(item) >= 8:
mesh_terms = item[7]
else:
mesh_terms = ''
if pmid in rv:
rv[pmid] += ' ' + content
else:
rv[pmid] = content
rv[pmid] += ' ' + make_mesh_terms(mesh_terms)
b = Bunch()
b.data = []
b.pmids = []
for pmid in rv:
b.data.append(rv[pmid])
b.pmids.append(pmid)
return lemmatize(b)
if __name__ == "__main__":
if len(sys.argv) != 4:
print >>sys.stderr, "need 3 args: path to test pubmed TSV file to lemmatize etc, path to ensembl genes, output filename"
sys.exit(1)
testing_path = sys.argv[1]
ensembl_genes_path = sys.argv[2]
output_path = sys.argv[3]
global gene_rgx
gene_rgx = comp_gene_rgx(ensembl_genes_path)
print >>sys.stderr, "Loading test set"
test_docs = load_unlabeled_docs(testing_path, gene_rgx)
with open(output_path, 'w') as f:
for i, pmid in enumerate(test_docs.pmids):
print >>f, "%s\t%s" % (pmid, test_docs.data[i])
| dd-genomics-master | document_classifier/classification/preprocess_test_data.py |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import json
import sys
import re
def load_unlabeled_docs(data_path):
rv = {}
print >>sys.stderr, "Loading JSON data"
ctr = -1
with open(data_path) as f:
for line in f:
ctr += 1
if ctr % 100000 == 0:
print >>sys.stderr, "counting %d lines" % ctr
item = json.loads(line)
pmid = item['doc-id']
content = item['content']
content.replace('\n', ' ')
print "%s\t%s" % (pmid, content.encode('ascii', 'ignore'))
if __name__ == "__main__":
if len(sys.argv) != 2:
print >>sys.stderr, "need 1 arg: path to json"
sys.exit(1)
path = sys.argv[1]
load_unlabeled_docs(path)
| dd-genomics-master | document_classifier/classification/joined_data/json_to_tsv.py |
#! /usr/bin/env python
import sys
import re
if __name__ == "__main__":
no_alnum = re.compile(r'[\W_]+')
with open(sys.argv[2], 'w') as out_file:
with open(sys.argv[1]) as f:
for line in f:
comps = line.strip().split('\t')
pmid = comps[0]
journal = comps[1]
mesh_terms_string = comps[2]
sv = comps[3]
text = comps[4]
gm = comps[5]
pm = comps[6]
sentences = text.split('|~^~|')
gm_sentences = gm.split('|~^~|')
pm_sentences = pm.split('|~^~|')
mesh_terms = mesh_terms_string.split('|^|')
new_text = 'JOURNAL_' + no_alnum.sub('_', journal).strip() + ' ' + ' '.join(['MeSH_' + no_alnum.sub('_', x).strip() for x in mesh_terms]) + ' '
for i, sentence in enumerate(sentences):
words = sentence.split('|^|')
if i >= len(gm_sentences):
print >>sys.stderr, (pmid, i, gm_sentences)
gms_string = gm_sentences[i]
pms_string = pm_sentences[i]
if gms_string != 'N':
gms = gms_string.split('|^+^|')
for gm in [int(x) for x in gms]:
words[gm] = 'ENSEMBLGENE'
if pms_string != 'N':
pms = pms_string.split('|^+^|')
for pm in [int(x) for x in pms]:
words[pm] = 'DETECTEDPHENO'
new_text += ' ' + ' '.join(words)
print >>out_file, "%s\t%s\t%s" % (pmid, new_text, sv)
| dd-genomics-master | document_classifier/classification/processed/genomics_dump_to_processed.py |
"""Assess phenotype recall relative to known HPO-PMID map."""
import collections
import random
import sys
sys.path.append('../code')
import extractor_util as util
import data_util as dutil
NUM_ERRORS_TO_SAMPLE = 50
def main(id_file, candidate_file):
# Load list of all pubmed IDs in the dataset
print >> sys.stderr, 'Loading list of pubmed IDs from doc ID list.'
doi_to_pmid = dutil.read_doi_to_pmid()
pmids_in_data = set()
num_docs = 0
with open(id_file) as f:
for line in f:
doc_id = line.strip()
pmid = dutil.get_pubmed_id_for_doc(doc_id, doi_to_pmid=doi_to_pmid)
if pmid:
pmids_in_data.add(pmid)
num_docs += 1
print >> sys.stderr, '%d/%d documents have PubMed IDs.' % (
len(pmids_in_data), num_docs)
# Load map from Pubmed ID to HPO term via MeSH
print >> sys.stderr, 'Loading supervision data via MeSH'
mesh_supervision = collections.defaultdict(set)
with open('%s/onto/data/hpo_to_pmid_via_mesh.tsv' % util.APP_HOME) as f:
for line in f:
hpo_id, pmid = line.strip().split('\t')
if pmid in pmids_in_data:
mesh_supervision[pmid].add(hpo_id)
# Identify all true pairs from MeSH
true_pairs = set()
for pmid in pmids_in_data:
for hpo in mesh_supervision[pmid]:
true_pairs.add((pmid, hpo))
# Load map from Pubmed ID to HPO term based on extracted candidates
print >> sys.stderr, 'Loading extracted pheno candidates'
candidates = collections.defaultdict(set)
with open(candidate_file) as f:
for line in f:
doc_id, hpo_id = line.strip().split('\t')
pmid = dutil.get_pubmed_id_for_doc(doc_id, doi_to_pmid=doi_to_pmid)
if pmid:
candidates[pmid].add(hpo_id)
# Load HPO DAG
# We say we found a HPO term if we find either the exact HPO term
# or one of its children
hpo_dag = dutil.read_hpo_dag()
# Determine which true pairs had candidate mentions for them
found_pairs = set()
missed_pairs = set()
for pmid, hpo in true_pairs:
found_hpo_ids = candidates[pmid]
for cand_hpo in found_hpo_ids:
if cand_hpo == '\N': continue
if hpo_dag.has_child(hpo, cand_hpo):
found_pairs.add((pmid, hpo))
break
else:
missed_pairs.add((pmid, hpo))
# Compute recall
num_true = len(true_pairs)
num_found = len(found_pairs)
print >> sys.stderr, 'Recall: %d/%d = %g' % (
num_found, num_true, float(num_found) / num_true)
# Compute other statistics
num_article = len(pmids_in_data)
num_annotated = sum(1 for x in pmids_in_data if len(mesh_supervision[x]) > 0)
print >> sys.stderr, '%d/%d = %g pubmed articles had HPO annotation' % (
num_annotated, num_article, float(num_annotated) / num_article)
# Read in HPO information
hpo_info_dict = dict()
with open('%s/onto/data/hpo_phenotypes.tsv' % util.APP_HOME) as f:
for line in f:
toks = line.strip('\r\n').split('\t')
hpo_id = toks[0]
hpo_info_dict[hpo_id] = toks[0:3]
# Sample some error cases
missed_sample = random.sample(list(missed_pairs), 100)
for pmid, hpo in missed_sample:
hpo_info = hpo_info_dict[hpo]
pubmed_url = 'http://www.ncbi.nlm.nih.gov/pubmed/%s' % pmid
hpo_url = 'www.human-phenotype-ontology.org/hpoweb/showterm?id=%s' % hpo
toks = [pubmed_url, hpo_url] + hpo_info
print '\t'.join(toks)
if __name__ == '__main__':
if len(sys.argv) < 3:
print >> sys.stderr, 'Usage: %s doc_ids.tsv candidates.tsv' % sys.argv[0]
print >> sys.stderr, ''
print >> sys.stderr, 'doc_ids.tsv should be list of doc ids'
print >> sys.stderr, ' e.g. /lfs/raiders2/0/robinjia/data/genomics_sentences_input_data/50k_doc_ids.tsv'
print >> sys.stderr, 'candidates.tsv should have rows doc_id, hpo_id.'
print >> sys.stderr, ' e.g. result of SELECT doc_id, entity FROM pheno_mentions'
print >> sys.stderr, ' or SELECT doc_id, entity FROM pheno_mentions_is_correct_inference WHERE expectation > 0.9'
sys.exit(1)
main(*sys.argv[1:])
| dd-genomics-master | eval/pheno_recall.py |
import collections
import random
import sys
sys.path.append('../code')
import extractor_util as util
import data_util as dutil
HPO_DAG = dutil.read_hpo_dag()
def read_supervision():
"""Reads genepheno supervision data (from charite)."""
supervision_pairs = set()
with open('%s/onto/data/hpo_phenotype_genes.tsv' % util.APP_HOME) as f:
for line in f:
hpo_id, gene_symbol = line.strip().split('\t')
# Canonicalize i.e. include all parents of hpo entities
hpo_ids = [hpo_id] + [parent for parent in HPO_DAG.edges[hpo_id]]
eids = EID_MAP[gene_symbol]
for h in hpo_ids:
for e in eids:
supervision_pairs.add((h,e))
return supervision_pairs
supervision_pairs = read_supervision()
# TODO: PIPE SQL IN HERE
"""
SELECT
gene_entity
pheno_entity
FROM
genepheno_relations_is_correct_inference
WHERE
expectation >= 0.9;
"""
extracted_pairs = []
# TODO to compare / visualize:
# (1) overall number new extracted e.g.
print len(set(extracted_pairs).difference(supervision_pairs))
# (2) difference in coverage on gene axis e.g. something with
set([p[0] for p in extracted_pairs]).difference([p[0] for p in supervision_pairs])
# (3) difference in coverage on pheno axis e.g. something with
set([p[1] for p in extracted_pairs]).difference([p[1] for p in supervision_pairs])
| dd-genomics-master | eval/omim_coverage.py |
#! /usr/bin/env python
'''
Created on Aug 3, 2015
@author: jbirgmei
'''
import abbreviations
if __name__ == '__main__':
sentence = 'Scaffold proteins are abundant and essential components of the postsynaptic density -LRB- PSD -RRB- as well as I Hate JavaScript Proteins -LRB- IHJSP -RRB- , and a completely unrelated parenthesis -LRB- THIS -RRB- and a definition that could maybe fit but is way too long -LRB- AA -RRB- .'.split(' ')
print abbreviations.getabbreviations(sentence)
sentence = 'This sentence certainly contains no parentheses .'.split()
print abbreviations.getabbreviations(sentence);
sentence = 'This sentence is botched -RRB- asdf -LRB-'.split()
print abbreviations.getabbreviations(sentence)
sentence = 'This sentence is botched -LRB- IB'.split()
print abbreviations.getabbreviations(sentence)
sentence = 'While|^|the|^|intestinal|^|stem|^|cells|^|-LRB-|^|ISCs|^|-RRB-|^|are|^|essential|^|for|^|the|^|proliferative|^|aspects|^|of|^|intestinal|^|homeostasis|^|--|^|,|^|the|^|enterocytes|^|-LRB-|^|ECs|^|-RRB-|^|form|^|the|^|first|^|line|^|of|^|defence|^|against|^|pathogens|^|and|^|stressors|^|.'.split('|^|')
print abbreviations.getabbreviations(sentence)
| dd-genomics-master | archived/test-abbreviations.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.