python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
import pandas as pd
import numpy as np
__all__ = ['csv_node']
class csv_node(object):
def __init__(self, DataFrame, seed):
self.dataframe = DataFrame
self.seed = int(seed)
self.build_node()
@classmethod
def from_csv(klass, csv, seed=None):
dataframe = pd.read_csv(csv)
if seed is None:
seed = csv.split("/")[-1].split(".")[0].split("_")[-1]
return klass(dataframe, seed)
def build_node(self):
"""
Building the dict of predictions
based on the pd.DataFrame
"""
self.data = {}
self.PID = []
for i, row in self.dataframe.iterrows():
self.PID.append(row.PID)
self.data.update({row.PID: {"Y_TRUE": row.Y_TRUE,
"Y_PROBA": row.Y_PROBA,
"Y_PRED": row.Y_PRED}})
def extract(self, key, PIDs=None):
PIDs = self.PID if PIDs is None else PIDs
return np.array([self.data[pid][key] for pid in PIDs])
| ukb-cardiac-mri-master | ukb/ensemble/csv_reader.py |
from .csv_reader import *
from .voting import *
| ukb-cardiac-mri-master | ukb/ensemble/__init__.py |
import glob
import numpy as np
import pandas as pd
from collections import OrderedDict
#from . import metrics
import metrics
from .csv_reader import csv_node
__all__ = ['tune_threshold',
'assemble_node',
'assemble_dev_threshold',
'metric_reading',
'Ensemble']
def tune_threshold(y_true, y_prob, metric="f1_score"):
if isinstance(metric, str):
metric = getattr(metrics, metric)
thresholds = np.arange(0.01, 1, 0.01)
best_score = 0.0
best_threshold = 0.5
for threshold in thresholds:
y_pred = np.array([1 if p > threshold else 0 for p in y_prob])
cur_score = metric(y_true, y_pred)
if cur_score > best_score:
best_score = cur_score
best_threshold = threshold
print("Tuned threshold: {:.4f}".format(best_threshold))
return best_threshold
def assemble_node(nodes, key="Y_PROBA", method="median", PIDs=None):
if isinstance(method, str):
method = getattr(np, method)
if PIDs is None:
PIDs = nodes[0].PID
probas = []
for pid in PIDs:
proba = method([x.data[pid][key] for x in nodes])
probas.append(proba)
return np.array(probas)
def assemble_dev_threshold(nodes, method="median", metric="f1_score", PIDs=None):
y_prob = assemble_node(nodes, key="Y_PROBA", method=method, PIDs=PIDs)
y_true = nodes[0].extract("Y_TRUE", PIDs)
threshold = tune_threshold(y_true, y_prob, metric)
return threshold
def metric_reading(y_true, y_pred, y_proba):
if isinstance(y_true, list):
readings = [metric_reading(y_true_, y_pred_, y_proba_)
for y_true_,y_pred_,y_proba_ in zip(y_true, y_pred, y_proba)]
return readings
else:
scores = metrics.classification_summary(y_true, y_pred, [0,1], y_proba, verbose=False)
reading = OrderedDict([('Pos.Acc',scores['pos_acc']*100.0),
('Neg.Acc',scores['neg_acc']*100.0),
('Precision',scores['precision']*100.0),
('Recall',scores['recall']*100.0),
('F1',scores['f1']*100.0),
('ROC',scores['roc']*100.0),
('PRC',scores['prc']*100.0),
('NDCG',scores['ndcg']*100.0),
('TP',scores['tp']),
('FP',scores['fp']),
('TN',scores['tn']),
('FN',scores['fn'])])
return reading
class Ensemble(object):
def __init__(self, results_csvs, dev_csvs, pids=None):
self.results_csvs = results_csvs
self.dev_csvs = dev_csvs
self.build(pids)
@classmethod
def from_keyword(klass, test_keyword, dev_keyword, pids=None):
test_csvs = glob.glob(test_keyword, recursive=True)
dev_csvs = glob.glob(dev_keyword, recursive=True)
return klass(test_csvs, dev_csvs, pids)
@classmethod
def from_folder(klass, results_folder, dev_folder, pids=None):
results_csvs = glob.glob("{}/**/predictions*.csv".format(results_folder), recursive=True)
dev_csvs = glob.glob("{}/**/predictions*.csv".format(dev_folder), recursive=True)
return klass(results_csvs, dev_csvs, pids)
def build(self, pids=None):
self.results = [csv_node.from_csv(x) for x in self.results_csvs]
self.devs = [csv_node.from_csv(x) for x in self.dev_csvs]
self.results = sorted(self.results, key=lambda x: x.seed)
self.devs = sorted(self.devs, key=lambda x: x.seed)
if pids is None:
self.pids = list(self.results[0].PID)
else:
self.pids = pids
try:
self.score_list = self.get_seeds_score_list()
self.score = True
except:
self.score = False
self.proba_list = self.get_seeds_proba_list()
self.pred_list = self.get_seeds_pred_list()
@property
def score_dataframe(self):
return pd.DataFrame(OrderedDict(self.score_list_head+self.score_list))
@property
def proba_dataframe(self):
return pd.DataFrame(OrderedDict(self.proba_list_head+self.proba_list))
@property
def pred_dataframe(self):
return pd.DataFrame(OrderedDict(self.pred_list_head+self.pred_list))
def get_df_by_seed(self, key="Y_PROBA"):
seeds = [x.seed for x in self.results]
probas = [x.extract(key, self.pids) for x in self.results]
df_dict = OrderedDict([("PID", self.pids)] + \
[("SEED_{}".format(seed), proba) for seed, proba in zip(seeds, probas)])
df = pd.DataFrame(df_dict)
return df
def get_score_by_seed(self, seed=0):
idx = [x.seed for x in self.results].index(seed)
node = self.results[idx]
y_true = node.extract("Y_TRUE")
y_pred = node.extract("Y_PRED")
y_proba = node.extract("Y_PROBA")
score = metric_reading(y_true, y_pred, y_proba)
return score
def score2pair(self, key, score):
val = ["{:.2f}".format(score[key]) for key in self.score_keys]
return (key, val)
def get_seeds_score_list(self):
seeds = [x.seed for x in self.results]
scores = [self.get_score_by_seed(x) for x in seeds]
self.score_keys = list(scores[0].keys())
self.score_list_head = [("Experiment", self.score_keys)]
df_list = []
for seed, score in zip(seeds, scores):
pair = self.score2pair("SEED_{}".format(seed), score)
df_list.append(pair)
mean_score = OrderedDict([(key, np.mean([score[key] for score in scores])) for key in self.score_keys])
std_score = OrderedDict([(key, np.std([score[key] for score in scores])) for key in self.score_keys])
df_list.append(self.score2pair("AVERAGE", mean_score))
df_list.append(self.score2pair("STD", std_score))
return df_list
def get_seeds_proba_list(self):
seeds = [x.seed for x in self.results]
probas = [x.extract("Y_PROBA", self.pids) for x in self.results]
self.proba_list_head = [("PID", self.pids)]
proba_list = [("SEED_{}".format(seed), proba) for seed, proba in zip(seeds, probas)]
return proba_list
def get_seeds_pred_list(self):
seeds = [x.seed for x in self.results]
preds = [x.extract("Y_PRED", self.pids) for x in self.results]
self.pred_list_head = [("PID", self.pids)]
pred_list = [("SEED_{}".format(seed), pred) for seed, pred in zip(seeds, preds)]
return pred_list
def median_vote(self, metric="f1_score"):
dev_threshold = assemble_dev_threshold(self.devs, method="median",
metric=metric, PIDs=self.devs[0].PID)
voted_y_proba = assemble_node(self.results, key="Y_PROBA",
method="median", PIDs=self.pids)
voted_y_pred = np.array([1 if p > dev_threshold else 0 for p in voted_y_proba])
y_true = self.results[0].extract("Y_TRUE", self.pids)
#df_dict = OrderedDict([("PID", self.pids),
# ("Y_PROBA", voted_y_proba),
# ("Y_PRED", voted_y_pred)])
#df = pd.DataFrame(df_dict)
proba_pair = ("MEDIAN", voted_y_proba)
self.proba_list.append(proba_pair)
proba_df = pd.DataFrame(OrderedDict(self.proba_list_head+[proba_pair]))
pred_pair = ("MEDIAN", voted_y_pred)
self.pred_list.append(pred_pair)
pred_df = pd.DataFrame(OrderedDict(self.pred_list_head+[pred_pair]))
if self.score:
score = metric_reading(y_true, voted_y_pred, voted_y_proba)
score_pair = self.score2pair("MEDIAN", score)
self.score_list.append(score_pair)
score_df = pd.DataFrame(OrderedDict(self.score_list_head+[score_pair]))
else:
score_df = None
return proba_df, pred_df, score_df
def mv_vote(self):
voted_y_proba = assemble_node(self.results, key="Y_PRED",
method="mean", PIDs=self.pids)
voted_y_pred = np.round(voted_y_proba)
y_true = self.results[0].extract("Y_TRUE", self.pids)
proba_pair = ("MV", voted_y_proba)
self.proba_list.append(proba_pair)
proba_df = pd.DataFrame(OrderedDict(self.proba_list_head+[proba_pair]))
pred_pair = ("MV", voted_y_pred)
self.pred_list.append(pred_pair)
pred_df = pd.DataFrame(OrderedDict(self.pred_list_head+[pred_pair]))
if self.score:
score = metric_reading(y_true, voted_y_pred, voted_y_proba)
score_pair = self.score2pair("MV", score)
self.score_list.append(score_pair)
score_df = pd.DataFrame(OrderedDict(self.score_list_head+[score_pair]))
else:
score_df = None
return proba_df, pred_df, score_df
| ukb-cardiac-mri-master | ukb/ensemble/voting.py |
from .ukbb import UKBBCardiacMRI
from .ukbb import UKBBCardiacMRIMeta
from .ukbb import UKBBCardiacMRICache
from .ukbb import stratified_sample_dataset
from .cifar10 import CIFAR10
| ukb-cardiac-mri-master | ukb/dataloaders/__init__.py |
from __future__ import print_function, division
import os
import sys
import logging
import numpy as np
import pandas as pd
from skimage.color import grey2rgb
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import StratifiedKFold
from collections import OrderedDict
from sklearn.preprocessing import OneHotEncoder
logger = logging.getLogger(__name__)
seriesMap = {
0 : "flow_250_tp_AoV_bh_ePAT@c_MAG",
1 : "flow_250_tp_AoV_bh_ePAT@c_P",
2 : "flow_250_tp_AoV_bh_ePAT@c"
}
AGE_MEAN = 55.360334580673566
AGE_STD = 7.569607026616798
class UKBBCardiacMRI(Dataset):
"""
UK Biobank cardiac MRI dataset
Load Numpy MRI sequence tensors with shape:
num_frames X width X height
TODO: Transformations are custom and are applied to each frame
"""
def __init__(self, csv_data, root_dir, series=0, N=30, image_type='grey',
preprocess=None, augmentation=None, postprocess=None,
rebalance=False, threshold=0.5, seed=4321,
sample=False, sample_type=0, sample_split=0.5, n_samples=100,
pos_samples=0, neg_samples=0, frame_label=False, rebalance_strategy="oversample",
semi=False, semi_dir=None, semi_csv=None):
# either load from CSV or just use the provided pandas dataframe
if frame_label:
csv_data = "{}/labels_frame.csv".format(root_dir)
self.labels = pd.read_csv(csv_data) if type(csv_data) is str else csv_data
self.root_dir = root_dir
self.series = series
self.preprocess = preprocess
self.augment = augmentation
self.postprocess = postprocess
self.image_type = image_type
self.N = N
self.frame_label = frame_label
self.rebalance_strategy = rebalance_strategy
self.semi = semi
self.semi_dir = semi_dir
self.semi_csv = semi_csv
np.random.seed(seed)
# remove any instances with label = 0.5
#df = [{"ID":x.ID,"LABEL":x.LABEL} for x in list(self.labels.itertuples()) if x.LABEL != 0.5]
#self.labels = pd.DataFrame(df)
# Sampling is computed first, then rebalancing if wanted :)
if sample:
pids, ys = zip(*[(x.ID,x.LABEL) for x in list(self.labels.itertuples())])
pids = np.array(pids)
ys = np.array(ys)
Content = OrderedDict([(str(key), np.array(getattr(self.labels, key))) for key in self.labels.columns if key != 'ID' and key != 'LABEL'])
if (sample_type == 0):
logger.info("Randomly Sampling dataset...\n" +
"\tNum Samples = {}\n".format(n_samples))
self.random_sample(pids, ys, n_samples, Content)
elif (sample_type == 1):
logger.info("Threshold Random Sampling dataset...\n" +
"\tSample Split = {}\n".format(sample_split) +
"\tThreshold = {}\n".format(threshold) +
"\tNum Samples = {}\n".format(n_samples))
self.threshold_random_sample(pids, ys, sample_split, threshold, n_samples, Content)
elif (sample_type == 2):
logger.info("Top/Bottom Sampling dataset...\n" +
"\tSample Split = {}\n".format(sample_split) +
"\tNum Samples = {}\n".format(n_samples))
self.top_bottom_sample(pids, ys, sample_split, n_samples, Content)
elif (sample_type == 3):
logger.info("Random Subset Sampling dataset...\n" +
"\tThreshold = {}\n".format(threshold) +
"\tPos Samples = {}\n".format(pos_samples) +
"\tNeg Samples = {}\n".format(neg_samples) +
"\tTotal Samples = {}\n".format(pos_samples + neg_samples))
self.random_subset_sampling(pids, ys, threshold, pos_samples, neg_samples, Content)
# append hard labels to create semi-supervised labels DataFrame
if semi:
semi_labels = pd.read_csv("{}/{}".format(self.semi_dir, self.semi_csv))
self.labels = self.labels.append(semi_labels, ignore_index=True).ix[:,semi_labels.columns]
# resample to create balanced training set?
# HACK: assumes binary classes
if rebalance:
logger.info("Rebalancing dataset... {} b={}".format(self.rebalance_strategy, threshold))
pids, ys = zip(*[(x.ID,x.LABEL) for x in list(self.labels.itertuples())])
pids = np.array(pids)
ys = np.array(ys)
t = [i for i,v in enumerate(ys) if v > threshold]
f = [i for i,v in enumerate(ys) if v <= threshold]
logger.info("True:{:>4} False:{:>4}".format(len(t), len(f)))
# oversample minority class OR undersample majority class
minority_class, majority_class = (t,f) if len(t) < len(f) else (f,t)
if self.rebalance_strategy == "oversample":
minority_class = np.random.choice(minority_class, len(majority_class), replace=True)
elif self.rebalance_strategy == "undersample":
majority_class = np.random.choice(majority_class, len(minority_class), replace=True)
#logger.info("Minority:{:>4} Majority:{:>4}".format(len(minority_class), len(majority_class)))
df = []
for pid,label in zip(pids[minority_class], ys[minority_class]):
df.append(self.labels.loc[self.labels.ID==pid].to_dict('records', into=OrderedDict)[0])
#df.append({"ID":id, "LABEL":label})
for pid,label in zip(pids[majority_class], ys[majority_class]):
df.append(self.labels.loc[self.labels.ID==pid].to_dict('records', into=OrderedDict)[0])
#df.append({"ID":id, "LABEL":label})
self.labels = pd.DataFrame(df)
# sanity check
pids, ys = zip(*[(x.ID, x.LABEL) for x in list(self.labels.itertuples())])
pids = np.array(pids)
ys = np.array(ys)
t = [i for i, v in enumerate(ys) if v > threshold]
f = [i for i, v in enumerate(ys) if v <= threshold]
logger.info("True:{:>4} False:{:>4}".format(len(t), len(f)))
def random_sample(self, pids, ys, x, Content):
"""
Randomly sample x patients.
Params
------
pids : np.array
- patient id array
ys : np.array
- patient label array
x : int
- number of patients to sample
Return
------
None
- set self.labels = pd.DataFrame(something)
"""
indexes = np.arange(len(pids))
np.random.shuffle(indexes)
df = []
for idx, (id, label) in enumerate(zip(pids[indexes[:x]], ys[indexes[:x]])):
row = OrderedDict([("ID",id), ("LABEL",label)])
for key in Content:
row.update({key : Content[key][indexes[idx]]})
df.append(row)
self.labels = pd.DataFrame(df)
def threshold_random_sample(self, pids, ys, split, threshold, x, Content):
"""
Randomly sample x patients based on a given threshold.
Params
------
pids : np.array
- patient id array
ys : np.array
- patient label array
x : int
- number of patients to sample
Return
------
None
- set self.labels = pd.DataFrame(something)
"""
# Determine the split of patients
pos_count = int(round(x*split))
neg_count = x - pos_count
# Separate patients based on threshold
p = [i for i,v in enumerate(ys) if v > threshold]
n = [i for i,v in enumerate(ys) if v <= threshold]
logger.info("Class Distribution :\n" +
"\tPossitive Class : {:>4}\n\tNegative Class : {:>4}\n".format(len(p), len(n)))
np.random.shuffle(p)
np.random.shuffle(n)
logger.info("Class Selection Count :\n" +
"\tPossitive Class : {:>4}\n\tNegative Class : {:>4}\n".format(pos_count, neg_count))
df = []
for idx, (id,label) in enumerate(zip(pids[p[:pos_count]], ys[p[:pos_count]])):
row = OrderedDict([("ID",id), ("LABEL",label)])
for key in Content:
row.update({key : Content[key][p[idx]]})
df.append(row)
for idx, (id,label) in enumerate(zip(pids[n[:neg_count]], ys[n[:neg_count]])):
row = OrderedDict([("ID",id), ("LABEL",label)])
for key in Content:
row.update({key : Content[key][n[idx]]})
df.append(row)
self.labels = pd.DataFrame(df)
def top_bottom_sample(self, pids, ys, split, x, Content):
"""
Sample x patients from top and bottom of labels.
Params
------
pids : np.array
- patient id array
ys : np.array
- patient label array
x : int
- number of patients to sample
Return
------
None
- set self.labels = pd.DataFrame(something)
"""
# Determine the split of patients
pos_count = int(round(x*split))
neg_count = x - pos_count
index_sort = np.argsort(ys)
sorted_ys = ys[index_sort]
sorted_pids = pids[index_sort]
logger.info("Class Selection Count :\n" +
"\tPossitive Class : {:>4}\n\tNegative Class : {:>4}\n".format(pos_count, neg_count))
df = []
# Get highest probability (highest labels) 'positive' cases
for idx, (id, label) in enumerate(zip(sorted_pids[-pos_count:], sorted_ys[-pos_count:])):
row = OrderedDict([("ID",id), ("LABEL",label)])
for key in Content:
row.update({key : Content[key][index_sort[-pos_count+idx]]})
df.append(row)
# Get lowest probability (lowest labels) 'negative' cases
for idx, (id, label) in enumerate(zip(sorted_pids[:neg_count], sorted_ys[:neg_count])):
row = OrderedDict([("ID",id), ("LABEL",label)])
for key in Content:
row.update({key : Content[key][index_sort[idx]]})
df.append(row)
self.labels = pd.DataFrame(df)
def random_subset_sampling(self, pids, ys, threshold, pos_cases, neg_cases, Content):
"""
Randomly sample subsets of cases and non cases.
Return a set of total_cases = pos_cases + neg_cases
Params
------
pids : np.array
- patient id array
ys : np.array
- patient label array
threshold : int
- threshold to separate cases and non cases
pos_cases : int
- number of positive cases to select
neg_cases : int
- number of negative cases to select
Return
------
None
- set self.labels = pd.DataFrame(something)
"""
# Separate patients based on threshold
p = [i for i, v in enumerate(ys) if v > threshold]
n = [i for i, v in enumerate(ys) if v <= threshold]
logger.info("Class Distribution :\n" +
"\tPossitive Class : {:>4}\n\tNegative Class : {:>4}\n".format(len(p), len(n)))
np.random.shuffle(p)
np.random.shuffle(n)
logger.info("Class Selection Count :\n" +
"\tPossitive Class : {:>4}\n\tNegative Class : {:>4}\n".format(pos_cases, neg_cases))
df = []
for idx, (id, label) in enumerate(zip(pids[p[:pos_cases]], ys[p[:pos_cases]])):
row = OrderedDict([("ID", id), ("LABEL", label)])
for key in Content:
row.update({key : Content[key][p[idx]]})
df.append(row)
for idx, (id, label) in enumerate(zip(pids[n[:neg_cases]], ys[n[:neg_cases]])):
row = OrderedDict([("ID", id), ("LABEL", label)])
for key in Content:
row.update({key : Content[key][n[idx]]})
df.append(row)
self.labels = pd.DataFrame(df)
def summary(self):
"""
Generate message summarizing data (e.g., counts, class balance)
Assumes hard labels
:return:
"""
return "Instances: {}".format(len(self))
def load_label(self, idx):
# most PyTorch operations are only defined over float or doublefloat (32 vs 64bit) tensors
if self.frame_label:
label = np.array(self.labels.iloc[idx, 2:8]).astype(float)
else:
label = self.labels.iloc[idx, 1]
return label
def get_labels(self):
return [(str(self.labels.iloc[i, 0]), data[1]) for i, data in enumerate(self)]
def convert_image(self, images):
try:
images = np.array(images)
except Exception as err:
raise ValueError("image channels are having different shapes. \n ERR| {}".format(err))
if images.ndim == 4:
images = np.moveaxis(images, 0, 1)
elif self.image_type == 'grey':
images = np.expand_dims(images, axis=1)
elif self.image_type == 'rgb':
images = np.moveaxis(grey2rgb(images), -1, 1)
return images
def flow_250_MAG(self, pid, rootDir):
"""
Load the flow_250_tp_AoV_bh_ePAT@c_MAG series for the given patient.
Params
------
pid : str
- patient id
Return
------
np.array
- numpy series
"""
fpath = os.path.join(rootDir, seriesMap[0] + "/" + pid + ".npy")
series = np.load(fpath).astype(np.float32)
if self.preprocess:
# Apply Preprocessing
series = self.preprocess(series)
if self.augment:
# Apply Agumentations
# raise NotImplemented()
series = self.augment(series)
if self.postprocess:
series = self.postprocess(series)
# Compute final 1 Ch or 3 ch
series = self.convert_image(series)
return series
def flow_250_other(self, pid, rootDir):
"""
Load the flow_250_tp_AoV_bh_ePAT@c* series for the given patient.
Params
------
pid : str
- patient id
series : int
- series map number
1 : flow_250_tp_AoV_bh_ePAT@c_P
2 : flow_250_tp_AoV_bh_ePAT@c
Return
------
np.array
- numpy series
"""
fpath = os.path.join(rootDir, seriesMap[self.series] + "/" + pid + ".npy")
series = np.load(fpath).astype(np.float32)
if self.postprocess:
series = self.postprocess(series)
# Compute final 1 Ch or 3 ch
series = self.convert_image(series)
return series
def flow_250_all(self, pid, rootDir):
"""
Load ALL flow_250_tp_AoV_bh_ePAT@c* series for the given patient.
Params
------
pid : str
- patient id
Return
------
np.array
- numpy series
"""
# flow_250_tp_AoV_bh_ePAT@c_MAG
fpath_MAG = os.path.join(rootDir, seriesMap[0] + "/" + pid + ".npy")
# flow_250_tp_AoV_bh_ePAT@c_P
fpath_P = os.path.join(rootDir, seriesMap[1] + "/" + pid + ".npy")
# flow_250_tp_AoV_bh_ePAT@c
fpath_c = os.path.join(rootDir, seriesMap[2] + "/" + pid + ".npy")
# most PyTorch operations are only defined over float or doublefloat (32 vs 64bit) tensors
series_MAG = np.load(fpath_MAG).astype(np.float32)
series_P = np.load(fpath_P).astype(np.float32)
series_c = np.load(fpath_c).astype(np.float32)
if self.preprocess:
# Apply Preprocessing
series_MAG, series_P, series_c = self.preprocess(series_MAG, series_P, series_c)
# if self.augment:
# Apply Agumentations
# raise NotImplemented()
if self.postprocess:
series_MAG, series_P, series_c = self.postprocess(series_MAG, series_P, series_c)
series = [series_MAG, series_P, series_c]
# Compute final 1 Ch per series type (series has 3 ch)
series = self.convert_image(series)
return series
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
pid = str(self.labels.iloc[idx, 0])
if 'DIR' in self.labels.columns and isinstance(self.labels.DIR[idx], str):
rootDir = str(self.labels.DIR[idx])
else:
rootDir = self.root_dir
if (self.series == 3):
series = self.flow_250_all(pid, rootDir)
elif (self.series == 0):
series = self.flow_250_MAG(pid, rootDir)
else:
series = self.flow_250_other(pid, rootDir)
label = self.load_label(idx)
return (series, label)
class UKBBCardiacMRIMeta(UKBBCardiacMRI):
"""
Class for MetaVGG16RNN class
"""
def _init_Meta(self):
MetaData = pd.read_csv("{}/MetaData.csv".format(self.root_dir))
if self.semi:
semi_MetaData = pd.read_csv("{}/MetaData.csv".format(self.semi_dir))
MetaData = MetaData.append(semi_MetaData, ignore_index=True).ix[:,semi_MetaData.columns]
self.MetaData = self.encode_MetaData(MetaData)
def encode_MetaData(self, MetaData):
age = np.array(MetaData.Age)
gen = np.array(MetaData.Gender)
ss = np.array(MetaData.SmokingStatus)
ss[ss==-3] = 3
age = (age - AGE_MEAN)/AGE_STD
age = age.reshape(-1,1)
gen[gen==1] = -1
gen[gen==0] = 1
gen = gen.reshape(-1,1)
enc = OneHotEncoder(sparse=False)
enc.fit(np.array([0,1,2,3,4]).reshape(-1,1))
ss = enc.transform(ss.reshape(-1,1))
encoded_MetaData = pd.DataFrame()
encoded_MetaData['ID'] = list(MetaData.ID)
encoded_MetaData['SmokingStatus'] = list(ss)
encoded_MetaData['Age'] = list(age)
encoded_MetaData['Gender'] = list(gen)
return encoded_MetaData
def _get_meta(self, idx):
if not hasattr(self, "MetaData"):
self._init_Meta()
pid = str(self.labels.iloc[idx, 0])
meta_idx = self.MetaData.loc[self.MetaData['ID']==pid].index[0]
return np.concatenate(self.MetaData.iloc[meta_idx,1:]).astype(float)
def __getitem__(self, idx):
series, label = super(UKBBCardiacMRIMeta, self).__getitem__(idx)
meta_data = self._get_meta(idx)
return ([series, meta_data], label)
class UKBBCardiacMRICache(UKBBCardiacMRI):
"""
UK Biobank cardiac MRI dataset
Load Numpy MRI sequence tensors with shape:
num_frames X width X height
TODO: Transformations are custom and are applied to each frame
"""
def __init__(self, csv_data, root_dir, series=0, N=30, image_type='grey',
preprocess=None, augmentation=None, postprocess=None,
rebalance=False, threshold=0.5, seed=4321,
sample=False, sample_type=0, sample_split=0.5, n_samples=100,
pos_samples=0, neg_samples=0, frame_label=False):
super(UKBBCardiacMRICache, self).__init__(csv_data=csv_data,
root_dir=root_dir,
series=series,
N=N,
image_type=image_type,
preprocess=preprocess,
augmentation=augmentation,
postprocess=postprocess,
rebalance=rebalance,
threshold=threshold,
seed=seed,
sample=sample,
sample_type=sample_type,
sample_split=sample_split,
n_samples=n_samples,
pos_samples=pos_samples,
neg_samples=neg_samples,
frame_label=frame_label)
self.cache_data()
def flow_250_MAG(self, pid):
"""
Load the flow_250_tp_AoV_bh_ePAT@c_MAG series for the given patient.
Params
------
pid : str
- patient id
Return
------
np.array
- numpy series
"""
fpath = os.path.join(self.root_dir, seriesMap[0] + "/" + pid + ".npy")
series = np.load(fpath).astype(np.float32)
if self.preprocess:
# Apply Preprocessing
series = self.preprocess(series)
return series
def cache_data(self):
self.cached_data = []
for idx in range(len(self)):
pid = str(self.labels.iloc[idx, 0])
label = self.labels.iloc[idx, 1]
# most PyTorch operations are only defined over float or doublefloat (32 vs 64bit) tensors
if (self.series == 3):
series = self.flow_250_all(pid)
elif (self.series == 0):
series = self.flow_250_MAG(pid)
else:
series = self.flow_250_other(pid)
self.cached_data.append((pid, series, label))
def __getitem__(self, idx):
pid, series, label = self.cached_data[idx]
if (self.series == 0):
if self.augment:
# Apply Agumentations
series = self.augment(series)
if self.postprocess:
series = self.postprocess(series)
# Compute final 1 Ch or 3 ch
series = self.convert_image(series)
return (series, label)
def stratified_sample_dataset(csv_data, seed=1234):
labels = pd.read_csv(csv_data) if type(csv_data) is str else csv_data
X = np.array(labels["ID"])
Y = np.array(labels["LABEL"])
dataframes = {}
skf = StratifiedKFold(n_splits=2, shuffle=True, random_state=seed)
for split1, split2 in skf.split(X, Y):
data = np.vstack([X[split1], Y[split1]])
dataframes["dev"] = pd.DataFrame(data.T, columns=['ID', 'LABEL'])
data = np.vstack([X[split2], Y[split2]])
dataframes["test"] = pd.DataFrame(data.T, columns=['ID', 'LABEL'])
break
return dataframes
| ukb-cardiac-mri-master | ukb/dataloaders/ukbb.py |
from __future__ import print_function, division
import os
import logging
import numpy as np
import pandas as pd
import torchvision.datasets
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import StratifiedKFold
logger = logging.getLogger(__name__)
class CIFAR10(Dataset):
"""
"""
def __init__(self, data_root, split="train", transform=None, num_samples=25000):
self.dataset = torchvision.datasets.CIFAR10(data_root, train=split in ['train','dev'], download=True)
self.transform = transform
if split == "train":
self.data = self._load_images(self.dataset.train_data[0:num_samples])
self.labels = [y for y in self.dataset.train_labels[0:num_samples]]
elif split == "dev":
self.data = self._load_images(self.dataset.train_data[50000 - num_samples:])
self.labels = [y for y in self.dataset.train_labels[50000 - num_samples:]]
else:
self.data = self._load_images(self.dataset.test_data)[0:num_samples]
self.labels = [y for y in self.dataset.test_labels][0:num_samples]
self.dataset = None
def _load_images(self, array):
imgs = []
for x in array:
x = np.array(x).astype(np.float32)
x = np.array([x[..., i] for i in range(x.shape[-1])])
imgs.append(x)
return imgs
def summary(self):
return "Instances: {}".format(len(self))
def get_labels(self):
return self.labels
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
if self.transform:
raise NotImplemented()
x,y = self.data[idx], self.labels[idx]
return x, y
| ukb-cardiac-mri-master | ukb/dataloaders/cifar10.py |
import sys
import json
import models
import logging
from collections import OrderedDict
logger = logging.getLogger(__name__)
def convert_param_string(s):
"""
Convert string of hyperparamters into typed dictionary
e.g., `lr=0.001,rebalance=False,attention=True`
This is used to parse paramaters specificed on the command line
:param s:
:return:
"""
config = dict([p.split("=") for p in s.split(",")])
# force typecasting in this order
types = [int, float]
for param in config:
v = config[param]
for t in types:
try:
v = t(v)
except:
continue
config[param] = v
break
if config[param] in ['true','True']:
config[param] = True
elif config[param] in ['false','False']:
config[param] = False
return config
def get_model_config(args, verbose=True):
"""
Given command line arguments and (optional) JSON configuration file,
setup model and paramater grid
:param model_name:
:param manual_config:
:param verbose:
:return:
"""
# load config JSON
if args.config:
args.config = json.load(open(args.config,"rU"))
model_class = getattr(models, args.config[u"model"])
model_class_params = args.config[u'model_class_params']
#model_hyperparams = args.config[u'model_hyperparams']
model_param_grid = args.config[u'model_param_grid']
logger.info("Loaded model config from JSON file...")
# convert list params into tuples
for param_name in model_param_grid:
values = []
for v in model_param_grid[param_name]:
values.append(v if type(v) is not list else tuple(v))
model_param_grid[param_name] = values
# use model defaults
elif args.model:
model_class, model_class_params = {},{}
model_param_grid = {}
logger.info("Loaded model defaults...")
else:
logger.error("Please specify model config or model class type")
sys.exit()
# override parameter grid
if args.param_grid:
manual_param_grid = json.load(open(args.param_grid, "rU"))
args.n_model_search = len(manual_param_grid[u'params'])
logger.info("Using manual parameter grid, setting n_model_search={}".format(args.n_model_search))
else:
manual_param_grid = {}
# # custom model parameters
# if args.params:
# params = convert_param_string(args.params)
# # override any grid search settings
# logger.info("Overriding some model hyperparameters")
# # override any model_hyperparameter defaults
# for name in params:
# model_hyperparams[name] = params[name]
# # also override in the param grid
# if name in model_param_grid:
# model_param_grid[name] = [params[name]]
# override model params from command line
model_class_params['seed'] = args.seed
model_class_params['n_threads'] = args.n_procs
#model_hyperparams['n_epochs'] = args.n_epochs
model_class_params['host_device'] = args.host_device
model_param_grid = OrderedDict(sorted(model_param_grid.items()))
return model_class, model_class_params, model_param_grid #, manual_param_grid
def get_data_config(args, verbose=True):
"""
Given command line arguments and (optional) JSON configuration file,
setup data preprocessing and augmentation.
:param data_config:
:param verbose:
:return:
"""
# load config JSON
if args.dconfig:
args.dconfig = json.load(open(args.dconfig,"rU"))
if (args.series == 3):
preprocessing = args.dconfig.get(u'Preprocess',
{"FrameSelector" : {
"name" : "FrameSelectionVarMulti"
}
})
augmentation = None
postprocessing = args.dconfig.get(u'Postprocess',
{"Name" : "RescaleIntensityMulti"})
logger.info("Loaded data config from JSON MULTI file...")
else:
preprocessing = args.dconfig.get(u'Preprocess',
{"FrameSelector" : {
"name" : "FrameSelectionVar"
}
})
augmentation = args.dconfig.get(u'Augmentation', None)
postprocessing = args.dconfig.get(u'Postprocess',
{"Name" : "RescaleIntensity"})
logger.info("Loaded data config from JSON file...")
else:
if (args.series == 3):
preprocessing = {
"FrameSelector" : {
"name" : "FrameSelectionVarMulti"
}
}
augmentation = None
postprocessing = {"Name" : "RescaleIntensityMulti"}
logger.info("Loaded data defaults for MULTI series...")
else:
preprocessing = {
"FrameSelector" : {
"name" : "FrameSelectionVar"
}
}
augmentation = None
postprocessing = {"Name" : "RescaleIntensity"}
logger.info("Loaded data defaults for SINGLE series...")
return preprocessing, augmentation, postprocessing
| ukb-cardiac-mri-master | ukb/utils/config_parser.py |
from .utils import *
from .config_parser import * | ukb-cardiac-mri-master | ukb/utils/__init__.py |
import time
import logging
import numpy as np
from skimage import draw
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import matplotlib.animation as animation
logger = logging.getLogger(__name__)
def print_key_pairs(v, title="Parameters"):
"""
Print python dictionary key/value pairs
:param v: python dictionary
:param title: table title
:return:
"""
items = v.items() if type(v) is dict else v
logger.info("-" * 40)
logger.info(title)
logger.info("-" * 40)
for key,value in items:
logger.info("{!s:<20}: {!s:<10}".format(key, value))
logger.info("-" * 40)
def print_dict_pairs(d, title="Parameters"):
"""
Print python dictionary key/value pairs
:param v: python dictionary
:param title: table title
:return:
"""
logger.info("")
logger.info("=" * 90)
logger.info(title)
logger.info("=" * 90)
if (d is None):
logger.info("None")
else:
items = d.items() if type(d) is dict else d
for key,value in items:
if (type(value) is dict):
for sub_key, sub_value in value.items():
logger.info("{!s:<35}: {!s:<10}".format(key + " " + sub_key, sub_value))
else:
logger.info("{!s:<35}: {!s:<10}".format(key, value))
logger.info("")
def timeit(method):
"""
Decorator function for timing
:param method:
:return:
"""
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
result = (te-ts,) + result
logger.info('%r %2.2f sec' % (method.__name__, te-ts))
return result
return timed
def select_frames(seq, epsilon=0.05, plot=False):
"""
Select a contiguous subset of frames based on each frame's
overall standard deviation by pixel. Determine cut points based
on the first pair of inflection points.
:param seq:
:param epsilon:
:param plot:
:return:
"""
# z-score transform frames
z = (seq - np.mean(seq)) / np.std(seq)
# standard deviation per frame / threshold
std = [np.std(z[i]) for i in range(seq.shape[0])]
std = [v - min(std) for v in std]
std = [v if v > epsilon else 0 for v in std]
# find inflection points
signs = [np.sign(std[i + 1] - std[i]) for i in range(len(std) - 1)]
inf_pnts = [] if signs[0] <= 0 else [0]
for i in range(1, len(signs)):
if signs[i] != signs[i - 1]:
inf_pnts.append(i)
# pathological image sequence
if len(inf_pnts) < 3:
return (0, len(seq) - 1)
if plot:
plt.plot(std)
plt.show()
return (inf_pnts[0], inf_pnts[2])
def z_score_normalize(seq):
"""
Z-score normalization
:param seq:
:return:
"""
return (seq - np.mean(seq)) / np.std(seq)
def seq_as_float(seq):
"""
Convert 0-255 ubyte image to 0-1 float range
:param seq:
:return:
"""
seq = seq.astype(np.float32)
return (seq - np.min(seq)) / (np.max(seq) - np.min(seq))
def seq_as_ubyte(seq):
"""
Convert 0-1 float image to 0-255 range
:param seq:
:return:
"""
return (seq * 255.0).astype(np.uint8)
def get_bounding_box(pnts):
"""
Compute bounding box given a region of points.
:param pnts:
:return:
"""
min_x, max_x, min_y, max_y = None, None, None, None
for x,y in pnts:
min_x, max_x = min(x, min_x), max(x, max_x)
min_y, max_y = min(y, min_y), max(y, min_y)
return [min_x, max_x, min_y, max_y]
def seq_to_video(seq, outfpath, width=4, height=4, normalize=False):
"""
Export Numpy tensor images to .mp4 video.
see https://stackoverflow.com/questions/43445103/inline-animations-in-jupyter
:param seq:
:param outfpath:
:param width:
:param height:
:return:
"""
def getImageFromList(x):
return seq[x]
# z-score normalization
seq = z_score_normalize(seq.astype(np.float32)) if normalize else seq
fig = plt.figure(figsize=(width, height))
ims = []
for i in range(seq.shape[0]):
im = plt.imshow(getImageFromList(i), animated=True, cmap='gray', vmin=0, vmax=np.max(seq))
ims.append([im])
ani = animation.ArtistAnimation(fig, ims, interval=60, blit=True, repeat_delay=5)
plt.close()
ani.save('{}.mp4'.format(outfpath))
def generate_random_dataset(outdir, n_samples=100, dim=(30, 192, 192)):
"""
Create random numpy matrices in the same format as our MRI images.
Generate some simple circle shapes to test segmentation.
:param n_samples:
:param dim:
:return:
"""
for i in range(1000000, 1000000 + n_samples):
fpath = "{}/{}_random_{}x{}x{}".format(outdir, i, *dim)
X = np.zeros(dim)
# fix random center points
centers = [np.random.randint(50, 160, 2) for _ in range(6)]
# create some elliptical variation around these centers
for j in range(dim[0]):
for c in centers:
c_radius = np.random.randint(2, 10)
r_radius = np.random.randint(5, 10)
rr, cc = draw.ellipse(c[0], c[1], r_radius, c_radius, shape=X[j].shape)
X[j, rr, cc] = 10
np.save(fpath, X)
def format_time(seconds):
days = int(seconds / 3600/24)
seconds = seconds - days*3600*24
hours = int(seconds / 3600)
seconds = seconds - hours*3600
minutes = int(seconds / 60)
seconds = seconds - minutes*60
secondsf = int(seconds)
seconds = seconds - secondsf
millis = int(seconds*1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= 2:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + 's'
i += 1
if millis > 0 and i <= 2:
f += str(millis) + 'ms'
i += 1
if f == '':
f = '0ms'
return f
| ukb-cardiac-mri-master | ukb/utils/utils.py |
import torch
import numpy as np
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.manifold import TSNE
from sklearn.metrics import auc, roc_curve, precision_recall_curve
import seaborn as sns
sns.set_style("darkgrid")
def set_seed(seed, use_cuda):
np.random.seed(seed=int(seed))
torch.manual_seed(seed)
if use_cuda:
torch.cuda.manual_seed_all(seed)
def embed(model, data_loader, seed, use_cuda):
"""
Generated learned representation
:param model:
:param data_loader:
:return:
"""
set_seed(seed,use_cuda)
embs = []
for i, data in enumerate(data_loader):
x,y = data
if isinstance(x, list):
X = [Variable(x_) for x_ in x]
hidden = model.init_hidden(X[0].size(0))
else:
X = Variable(x)
hidden = model.init_hidden(X.size(0))
embs.append(model.embedding(X, hidden).data.numpy())
return np.vstack(embs)
def tsne_plot(model, data_loader, outfpath, seed, use_cuda,
threshold=0.5, fmt="pdf", width=16, height=8,
topSelection=None, save_coords=False, pred_only=False,
save_embeds=False, classes=None):
"""
Generate TSNE (2D stochastic neighbor embedding) for visualization
:param model:
:param data_loader:
:param width:
:param height:
:return:
"""
# generate labels and embeddings for the provided data loader
embs = embed(model, data_loader, seed, use_cuda)
if save_embeds:
np.save("{}embeds.npy".format(outfpath), embs)
set_seed(seed, use_cuda)
y_proba, y_pred = model.predict(data_loader, threshold=threshold,
return_proba=True, topSelection=topSelection)
y_true = np.hstack([y.numpy() for x,y in data_loader])
X_emb = TSNE(n_components=2).fit_transform(embs)
classes = np.unique(y_true) if classes is None else np.unique(classes)
colors = cm.rainbow(np.linspace(0, 1, len(classes)))
def scatter_tsne(ax_, y_label, ax_title):
if save_coords:
# save the tsne coordinates
pnts_indexes = [[index for index, label in zip(range(y_label.shape[0]), y_label) if label == class_name] for class_name in classes]
pnts_indexes = [np.vstack(p) if p else np.array([]) for p in pnts_indexes]
pnts = [[pnt for pnt, label in zip(X_emb, y_label) if label == class_name] for class_name in classes]
pnts = [np.vstack(p) if p else np.array([]) for p in pnts]
for p, c in zip(pnts, colors):
if p.size == 0:
continue
xs, ys = zip(*p)
ax_.scatter(xs, ys, color=c)
ax_.set_title(ax_title)
if save_coords:
# save the tsne coordinates
pnt_table = []
for i,p in zip(pnts_indexes, pnts):
if p.size == 0:
continue
xs, ys = zip(*p)
pnt_table += [(index_[0],x_,y_) for index_, x_, y_ in zip(i, xs, ys)]
pnt_table = sorted(pnt_table, key=lambda x: x[0])
coords = np.array([(x_,y_) for i_,x_,y_ in pnt_table])
xs_,ys_ = zip(*coords)
pids = [data[0] for i,data in enumerate(data_loader.dataset.get_labels())]
tsne_csv = "PID,X,Y,PROBA,{}\n".format(ax_title.upper())
tsne_csv += "\n".join(["{},{},{},{},{}".format(pid_,x_,y_,proba_,label_) for pid_,x_,y_,proba_,label_ in zip(pids, xs_, ys_, y_proba, y_label)])
open("{}tsne_{}_coords.csv".format(outfpath, ax_title), "w").write(tsne_csv)
if pred_only:
width /= 2
# setup 1 subplots
f, (ax1) = plt.subplots(1, 1, sharey=True, figsize=(width, height))
# 1) Predicted Labels
scatter_tsne(ax1, y_pred, "y_pred")
# Save the fig
plt.savefig("{}tsne_pred.{}".format(outfpath, fmt))
else:
# setup 2 subplots
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True, figsize=(width, height))
# 1) True Labels
scatter_tsne(ax1, y_true, "y_true")
# 2) Predicted Labels
scatter_tsne(ax2, y_pred, "y_pred")
# Save the fig
plt.savefig("{}tsne.{}".format(outfpath, fmt))
def analysis_plot(**kwargs):
"""
Generate Histograms of probabilities.
Params
------
outfpath:
types:
fmt:
1). y_true:
1). y_proba:
2). model:
2). data_loader:
2). seed:
2). use_cuda:
Return
------
None
"""
outfpath = kwargs.get("outfpath")
types = kwargs.get("types")
fmt = kwargs.get("fmt", "pdf")
def _plt_hist_plot(y_true, y_proba, bins='auto'):
plt.figure(figsize=(5,5))
plt.hist(y_proba, bins=bins)
plt.xlim([-0.01, 1.01])
plt.title("Probabilities histogram")
plt.xlabel("Probabilities")
plt.ylabel("Sample count")
plt.savefig("{}plt_hist.{}".format(outfpath, fmt))
def _hist_plot(y_true, y_proba):
plt.figure(figsize=(5,5))
positive_index = np.flatnonzero(y_true)
negative_index = np.flatnonzero(1-y_true)
sns.distplot(y_proba[positive_index], label="positive_class")
sns.distplot(y_proba[negative_index], label="negative_class")
plt.xlim([-0.01, 1.01])
plt.title("Probabilities histogram")
plt.xlabel("Probabilities")
plt.ylabel("Sample count")
plt.legend(bbox_to_anchor=(0.95,0.05),loc=4, borderaxespad=0.)
plt.savefig("{}hist.{}".format(outfpath, fmt))
def _roc_curve(y_true, y_proba):
plt.figure(figsize=(5,5))
fpr, tpr, thresholds = roc_curve(y_true, y_proba, pos_label=1)
roc_score = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=4)
plt.xlim([-0.01, 1.01])
plt.ylim([-0.01, 1.01])
plt.title("ROC_Curve: {:4.2f}".format(roc_score))
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.legend(bbox_to_anchor=(0.95,0.05),loc=4, borderaxespad=0.)
plt.savefig("{}roc_curve.{}".format(outfpath, fmt))
def _prc_curve(y_true, y_proba):
plt.figure(figsize=(5,5))
precision, recall, thresholds = precision_recall_curve(y_true, y_proba, pos_label=1)
prc_score = auc(recall, precision)
plt.step(recall, precision, color="b", alpha=0.2, where='post')
plt.fill_between(recall, precision, step='post', alpha=0.2, color='b')
plt.xlim([-0.01, 1.01])
plt.ylim([-0.01, 1.01])
plt.title("PRC_Curve: {:4.2f}".format(prc_score))
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.legend(bbox_to_anchor=(0.95,0.05),loc=4, borderaxespad=0.)
plt.savefig("{}prc_curve.{}".format(outfpath, fmt))
def generate_plots(y_true, y_proba, image_type):
function_dict = dict(hist_plot=_hist_plot,
plt_hist_plot=_plt_hist_plot,
roc_curve=_roc_curve,
prc_curve=_prc_curve)
function_dict[image_type](y_true, y_proba)
if not 'y_true' in kwargs.keys() or not 'y_proba' in kwargs.keys():
model = kwargs.get("model")
data_loader = kwargs.get("data_loader")
seed = kwargs.get("seed")
use_cuda = kwargs.get("use_cuda")
set_seed(seed, use_cuda)
y_proba = model.predict_proba(data_loader)
y_true = np.hstack([y.numpy() for x,y in data_loader])
else:
y_true = kwargs.get("y_true")
y_proba = kwargs.get("y_proba")
for image_type in types:
try:
generate_plots(y_true, y_proba, image_type)
except:
pass
| ukb-cardiac-mri-master | ukb/utils/viz.py |
from .mri import *
from .trainer import *
| ukb-cardiac-mri-master | ukb/models/__init__.py |
import torch
import logging
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from .frame import LeNetFrameEncoder, FNNFrameEncoder, DenseNet121, vgg16_bn, densenet121, densenet_40_12_bc
from .sequence import RNN, MetaRNN, SeqSumPoolingEncoder
logger = logging.getLogger(__name__)
################################################################################
# Image Container Models (each image is independantly classified)
################################################################################
class MRINet(nn.Module):
"""
Simple container class for MRI net. This module consists of:
1) A frame encoder, e.g., a ConvNet/CNN
2) Linear output layer
"""
def __init__(self, frame_encoder, n_classes, output_size, layers, dropout,
vote_opt='mean', use_cuda=False):
super(MRINet, self).__init__()
self.n_classes = n_classes
self.fenc = frame_encoder
self.classifier = self._make_classifier(output_size, n_classes, layers, dropout)
self.vote_opt = vote_opt
self.use_cuda = use_cuda
def _make_classifier(self, output_size, n_classes, layers=[64,32], dropout=0.2):
layers = [output_size] + layers + [n_classes]
classifier = []
for i, size in enumerate(layers[:-1]):
classifier.append(nn.Linear(layers[i], layers[i+1]))
if size != layers[-1]:
classifier.append(nn.ReLU(True))
classifier.append(nn.Dropout(p=dropout))
return nn.Sequential(*classifier)
def init_hidden(self, batch_size):
return None
def embedding(self, x, hidden=None):
"""Get learned representation of MRI sequence"""
if self.use_cuda and not x.is_cuda:
x = x.cuda()
batch_size, num_frames, num_channels, width, height = x.size()
self.num_frames = num_frames
x = x.view(-1, num_channels, width, height)
x = self.fenc(x)
x = self.classifier(x)
if self.use_cuda:
return x.cpu()
else:
return x
def forward(self, x, hidden=None):
if self.use_cuda and not x.is_cuda:
x = x.cuda()
# collapse all frames into new batch = batch_size * num_frames
batch_size, num_frames, num_channels, width, height = x.size()
self.num_frames = num_frames
x = x.view(-1, num_channels, width, height)
# encode frames
x = self.fenc(x)
# feed-forward-classifier
x = self.classifier(x)
return x
def vote(self, y_pred, threshold=None):
if threshold is not None:
y_pred = (y_pred > threshold).astype(float)
num_frames = self.num_frames
num_samples = int(y_pred.shape[0]/num_frames)
ex_shape = y_pred.shape[1:]
y_pred = np.reshape(y_pred, (num_samples, num_frames,)+ex_shape)
y_pred = np.mean(y_pred, axis=1)
return y_pred
def predict_proba(self, data_loader, binary=True, pos_label=1, threshold=0.5):
""" Forward inference """
y_pred = []
for i, data in enumerate(data_loader):
x, y = data
x = Variable(x) if not self.use_cuda else Variable(x).cuda()
y = Variable(y) if not self.use_cuda else Variable(y).cuda()
h0 = self.init_hidden(x.size(0))
outputs = self(x, h0)
y_hat = F.softmax(outputs, dim=1)
y_hat = y_hat.data.numpy() if not self.use_cuda else y_hat.cpu().data.numpy()
y_pred.append(y_hat)
# empty cuda cache
if self.use_cuda:
torch.cuda.empty_cache()
y_pred = np.concatenate(y_pred)
if self.vote_opt=='mean':
y_pred = self.vote(y_pred)
elif self.vote_opt=='vote':
y_pred = self.vote(y_pred, threshold)
return y_pred[:, pos_label] if binary else y_pred
def predict(self, data_loader, binary=True, pos_label=1, threshold=0.5, return_proba=False):
"""
If binary classification, use threshold on positive class
If multinomial, just select the max probability as the predicted class
:param data_loader:
:param binary:
:param pos_label:
:param threshold:
:return:
"""
proba = self.predict_proba(data_loader, binary, pos_label, threshold)
if binary:
pred = np.array([1 if p > threshold else 0 for p in proba])
else:
pred = np.argmax(proba, 1)
if return_proba:
return (proba, pred)
else:
return pred
class DenseNet121Net(MRINet):
def __init__(self, n_classes, output_size, use_cuda, **kwargs):
super(DenseNet121Net, self).__init__(frame_encoder=None, n_classes=n_classes,
output_size=output_size, use_cuda=use_cuda)
self.name = "DenseNet121Net"
self.fenc = DenseNet121()
class VGG16Net(MRINet):
def __init__(self, n_classes, use_cuda, **kwargs):
input_shape = kwargs.get("input_shape", (3, 32, 32))
layers = kwargs.get("layers", [64, 32])
dropout = kwargs.get("dropout", 0.2)
vote_opt = kwargs.get("vote_opt", "mean")
pretrained = kwargs.get("pretrained", True)
requires_grad = kwargs.get("requires_grad", False)
frm_output_size = self.get_frm_output_size(input_shape)
super(VGG16Net, self).__init__(frame_encoder=None, n_classes=n_classes,
output_size=frm_output_size,
layers=layers, dropout=dropout,
vote_opt=vote_opt, use_cuda=use_cuda)
self.name = "VGG16Net"
self.fenc = vgg16_bn(pretrained=pretrained, requires_grad=requires_grad)
def get_frm_output_size(self, input_shape):
feature_output = int(min(input_shape[-1], input_shape[-2])/32)
feature_output = 1 if feature_output == 0 else feature_output
frm_output_size = pow(feature_output, 2) * 512
return frm_output_size
class LeNet(MRINet):
def __init__(self, n_classes, n_channels, output_size, use_cuda, **kwargs):
super(LeNet, self).__init__(frame_encoder=None, n_classes=n_classes,
output_size=output_size, use_cuda=use_cuda)
self.name = "LeNet"
self.fenc = LeNetFrameEncoder(n_channels=n_channels, output_size=output_size)
################################################################################
# Sequence Container Models
################################################################################
class MRISequenceNet(nn.Module):
"""
Simple container network for MRI sequence classification. This module consists of:
1) A frame encoder, e.g., a ConvNet/CNN
2) A sequence encoder for merging frame representations, e.g., an RNN
"""
def __init__(self, frame_encoder, seq_encoder, use_cuda=False):
super(MRISequenceNet, self).__init__()
self.fenc = frame_encoder
self.senc = seq_encoder
self.use_cuda = use_cuda
def init_hidden(self, batch_size):
return self.senc.init_hidden(batch_size)
def embedding(self, x, hidden):
"""Get learned representation of MRI sequence"""
if self.use_cuda and not x.is_cuda:
x = x.cuda()
batch_size, num_frames, num_channels, width, height = x.size()
x = x.view(-1, num_channels, width, height)
x = self.fenc(x)
x = x.view(batch_size, num_frames, -1)
x = self.senc.embedding(x, hidden)
if self.use_cuda:
return x.cpu()
else:
return x
def forward(self, x, hidden=None):
if self.use_cuda and not x.is_cuda:
x = x.cuda()
# collapse all frames into new batch = batch_size * num_frames
batch_size, num_frames, num_channels, width, height = x.size()
x = x.view(-1, num_channels, width, height)
# encode frames
x = self.fenc(x)
x = x.view(batch_size, num_frames, -1)
# encode sequence
x = self.senc(x, hidden)
return x
def predict_proba(self, data_loader, binary=True, pos_label=1):
""" Forward inference """
y_pred = []
for i, data in enumerate(data_loader):
x, y = data
x = Variable(x) if not self.use_cuda else Variable(x).cuda()
y = Variable(y) if not self.use_cuda else Variable(y).cuda()
h0 = self.init_hidden(x.size(0))
outputs = self(x, h0)
y_hat = F.softmax(outputs, dim=1)
y_hat = y_hat.data.numpy() if not self.use_cuda else y_hat.cpu().data.numpy()
y_pred.append(y_hat)
# empty cuda cache
if self.use_cuda:
torch.cuda.empty_cache()
y_pred = np.concatenate(y_pred)
return y_pred[:, pos_label] if binary else y_pred
def predict(self, data_loader, binary=True, pos_label=1, threshold=0.5, return_proba=False, topSelection=None):
"""
If binary classification, use threshold on positive class
If multinomial, just select the max probability as the predicted class
:param data_loader:
:param binary:
:param pos_label:
:param threshold:
:return:
"""
proba = self.predict_proba(data_loader, binary, pos_label)
if topSelection is not None and topSelection < proba.shape[0]:
threshold = proba[np.argsort(proba)[-topSelection-1]]
if binary:
pred = np.array([1 if p > threshold else 0 for p in proba])
else:
pred = np.argmax(proba, 1)
if return_proba:
return (proba, pred)
else:
return pred
################################################################################
# FNN Models
################################################################################
class FNNFrameSum(MRISequenceNet):
def __init__(self, n_classes, use_cuda, **kwargs):
super(FNNFrameSum, self).__init__(frame_encoder=None, seq_encoder=None, use_cuda=use_cuda)
self.name = "FNNFrameSum"
self.n_classes = n_classes
frm_layers = kwargs.get("frm_layers", [64, 32])
input_shape = kwargs.get("input_shape", (1, 32, 32))
frm_input_size = input_shape[0]*input_shape[1]*input_shape[2]
self.fenc = FNNFrameEncoder(input_size=frm_input_size, layers=list(frm_layers))
self.senc = SeqSumPoolingEncoder(n_classes=n_classes, input_size=frm_layers[-1])
class FNNFrameRNN(MRISequenceNet):
def __init__(self, n_classes, use_cuda, **kwargs):
super(FNNFrameRNN, self).__init__(frame_encoder=None, seq_encoder=None, use_cuda=use_cuda)
self.name = "FNNFrameRNN"
self.n_classes = n_classes
frm_layers = kwargs.get("frm_layers", [64, 32])
input_shape = kwargs.get("input_shape", (1, 32, 32))
frm_input_size = input_shape[0]*input_shape[1]*input_shape[2]
frm_output_size = frm_layers[-1]
seq_output_size = kwargs.get("seq_output_size", 128)
seq_dropout = kwargs.get("seq_dropout", 0.1)
seq_attention = kwargs.get("seq_attention", True)
seq_bidirectional = kwargs.get("seq_bidirectional", True)
seq_max_seq_len = kwargs.get("seq_max_seq_len", 30)
seq_rnn_type = kwargs.get("rnn_type", "LSTM")
self.fenc = FNNFrameEncoder(input_size=frm_input_size, layers=frm_layers)
self.senc = RNN(n_classes=2, input_size=frm_output_size, hidden_size=seq_output_size,
dropout=seq_dropout, max_seq_len=seq_max_seq_len, attention=seq_attention,
rnn_type=seq_rnn_type, bidirectional=seq_bidirectional, use_cuda=self.use_cuda)
################################################################################
# LeNet Models
################################################################################
class LeNetFrameSum(MRISequenceNet):
def __init__(self, n_classes, use_cuda, **kwargs):
super(LeNetFrameSum, self).__init__(frame_encoder=None, seq_encoder=None, use_cuda=use_cuda)
self.name = "LeNetFrameSum"
self.n_classes = n_classes
frm_output_size = kwargs.get("frm_output_size", 84)
input_shape = kwargs.get("input_shape", (1, 32, 32))
self.fenc = LeNetFrameEncoder(input_shape=input_shape, output_size=frm_output_size)
self.senc = SeqSumPoolingEncoder(n_classes=n_classes, input_size=frm_output_size)
class LeNetFrameRNN(MRISequenceNet):
def __init__(self, n_classes, use_cuda, **kwargs):
super(LeNetFrameRNN, self).__init__(frame_encoder=None, seq_encoder=None, use_cuda=use_cuda)
self.name = "LeNetFrameRNN"
self.n_classes = n_classes
frm_output_size = kwargs.get("frm_output_size", 84)
input_shape = kwargs.get("input_shape", (1, 32, 32))
seq_output_size = kwargs.get("seq_output_size", 128)
seq_dropout = kwargs.get("seq_dropout", 0.1)
seq_attention = kwargs.get("seq_attention", True)
seq_bidirectional = kwargs.get("seq_bidirectional", True)
seq_max_seq_len = kwargs.get("seq_max_seq_len", 15)
seq_rnn_type = kwargs.get("rnn_type", "LSTM")
self.fenc = LeNetFrameEncoder(input_shape=input_shape, output_size=frm_output_size)
self.senc = RNN(n_classes=2, input_size=frm_output_size, hidden_size=seq_output_size,
dropout=seq_dropout, max_seq_len=seq_max_seq_len, attention=seq_attention,
rnn_type=seq_rnn_type, bidirectional=seq_bidirectional, use_cuda=self.use_cuda)
################################################################################
# DenseNet 3-channel Models
################################################################################
class DenseNet121FrameSum(MRISequenceNet):
def __init__(self, n_classes, use_cuda, **kwargs):
super(DenseNet121FrameSum, self).__init__(frame_encoder=None, seq_encoder=None, use_cuda=use_cuda)
self.name = "DenseNet121FrameSum"
self.n_classes = n_classes
input_shape = kwargs.get("input_shape", (3, 32, 32))
pretrained = kwargs.get("pretrained", True)
requires_grad = kwargs.get("requires_grad", False)
frm_output_size = pow(int(input_shape[-1]/32), 2) * 1024
#self.fenc = DenseNet121()
self.fenc = densenet121(pretrained=pretrained, requires_grad=requires_grad)
self.senc = SeqSumPoolingEncoder(n_classes=n_classes, input_size=frm_output_size)
class DenseNet121FrameRNN(MRISequenceNet):
def __init__(self, n_classes, use_cuda, **kwargs):
super(DenseNet121FrameRNN, self).__init__(frame_encoder=None, seq_encoder=None, use_cuda=use_cuda)
self.name = "DenseNet121FrameRNN"
self.n_classes = n_classes
input_shape = kwargs.get("input_shape", (3, 32, 32))
frm_output_size = pow(int(input_shape[-1]/32), 2) * 1024
seq_output_size = kwargs.get("seq_output_size", 128)
seq_dropout = kwargs.get("seq_dropout", 0.1)
seq_attention = kwargs.get("seq_attention", True)
seq_bidirectional = kwargs.get("seq_bidirectional", True)
seq_max_seq_len = kwargs.get("seq_max_seq_len", 15)
seq_rnn_type = kwargs.get("rnn_type", "LSTM")
pretrained = kwargs.get("pretrained", True)
requires_grad = kwargs.get("requires_grad", False)
#self.fenc = DenseNet121()
self.fenc = densenet121(pretrained=pretrained, requires_grad=requires_grad)
self.senc = RNN(n_classes=2, input_size=frm_output_size, hidden_size=seq_output_size,
dropout=seq_dropout, max_seq_len=seq_max_seq_len, attention=seq_attention,
rnn_type=seq_rnn_type, bidirectional=seq_bidirectional, use_cuda=self.use_cuda)
################################################################################
# VGG 3-channel Models
################################################################################
class VGG16FrameSum(MRISequenceNet):
def __init__(self, n_classes, use_cuda, **kwargs):
super(VGG16FrameSum, self).__init__(frame_encoder=None, seq_encoder=None, use_cuda=use_cuda)
self.name = "VGG16FrameSum"
self.n_classes = n_classes
input_shape = kwargs.get("input_shape", (3, 32, 32))
pretrained = kwargs.get("pretrained", True)
requires_grad = kwargs.get("requires_grad", False)
self.fenc = vgg16_bn(pretrained=pretrained, requires_grad=requires_grad)
frm_output_size = self.get_frm_output_size(input_shape)
self.senc = SeqSumPoolingEncoder(n_classes=n_classes, input_size=frm_output_size)
def get_frm_output_size(self, input_shape):
feature_output = int(min(input_shape[-1], input_shape[-2])/32)
feature_output = 1 if feature_output == 0 else feature_output
frm_output_size = pow(feature_output, 2) * 512
return frm_output_size
class VGG16FrameRNN(MRISequenceNet):
def __init__(self, n_classes, use_cuda, **kwargs):
super(VGG16FrameRNN, self).__init__(frame_encoder=None, seq_encoder=None, use_cuda=use_cuda)
self.name = "VGG16FrameRNN"
self.n_classes = n_classes
input_shape = kwargs.get("input_shape", (3, 32, 32))
pretrained = kwargs.get("pretrained", True)
requires_grad = kwargs.get("requires_grad", False)
self.fenc = vgg16_bn(pretrained=pretrained, requires_grad=requires_grad)
frm_output_size = self.get_frm_output_size(input_shape)
#print(kwargs)
#print("seq_bidirectional" in kwargs)
seq_output_size = kwargs.get("seq_output_size", 128)
seq_dropout = kwargs.get("seq_dropout", 0.1)
seq_attention = kwargs.get("seq_attention", True)
seq_bidirectional = kwargs.get("seq_bidirectional", True)
seq_max_seq_len = kwargs.get("seq_max_seq_len", 15)
seq_rnn_type = kwargs.get("seq_rnn_type", "LSTM")
self.senc = RNN(n_classes=n_classes, input_size=frm_output_size, hidden_size=seq_output_size,
dropout=seq_dropout, max_seq_len=seq_max_seq_len, attention=seq_attention,
rnn_type=seq_rnn_type, bidirectional=seq_bidirectional, use_cuda=self.use_cuda)
def get_frm_output_size(self, input_shape):
input_shape = list(input_shape)
input_shape.insert(0,1)
dummy_batch_size = tuple(input_shape)
x = torch.autograd.Variable(torch.zeros(dummy_batch_size))
frm_output_size = self.fenc.forward(x).size()[1]
return frm_output_size
class Dense4012FrameRNN(MRISequenceNet):
def __init__(self, n_classes, use_cuda, **kwargs):
super(Dense4012FrameRNN, self).__init__(frame_encoder=None, seq_encoder=None, use_cuda=use_cuda)
self.name = "Dense4012FrameRNN"
input_shape = kwargs.get("input_shape", (3, 32, 32))
seq_output_size = kwargs.get("seq_output_size", 128)
seq_dropout = kwargs.get("seq_dropout", 0.1)
seq_attention = kwargs.get("seq_attention", True)
seq_bidirectional = kwargs.get("seq_bidirectional", True)
seq_max_seq_len = kwargs.get("seq_max_seq_len", 15)
seq_rnn_type = kwargs.get("rnn_type", "LSTM")
pretrained = kwargs.get("pretrained", True)
requires_grad = kwargs.get("requires_grad", False)
logger.info("============================")
logger.info("Dense4012FrameRNN parameters")
logger.info("============================")
logger.info("seq_output_size: {}".format(seq_output_size))
logger.info("seq_dropout: {}".format(seq_dropout))
logger.info("seq_attention: {}".format(seq_attention))
logger.info("seq_bidirectional: {}".format(seq_bidirectional))
logger.info("seq_max_seq_len: {}".format(seq_max_seq_len))
logger.info("seq_rnn_type: {}".format(seq_rnn_type))
logger.info("pretrained: {}".format(pretrained))
logger.info("requires_grad: {}\n".format(requires_grad))
self.fenc = densenet_40_12_bc(pretrained=pretrained, requires_grad=requires_grad)
frm_output_size = self.get_frm_output_size(input_shape)
self.senc = RNN(n_classes=2, input_size=frm_output_size, hidden_size=seq_output_size,
dropout=seq_dropout, max_seq_len=seq_max_seq_len, attention=seq_attention,
rnn_type=seq_rnn_type, bidirectional=seq_bidirectional, use_cuda=self.use_cuda)
def get_frm_output_size(self, input_shape):
input_shape = list(input_shape)
input_shape.insert(0,1)
dummy_batch_size = tuple(input_shape)
x = torch.autograd.Variable(torch.zeros(dummy_batch_size))
frm_output_size = self.fenc.forward(x).size()[1]
return frm_output_size
################################################################################
# Sequence Container Meta Models
################################################################################
class MRIMetaSequenceRNN(MRISequenceNet):
def __init__(self, frame_encoder, n_classes, use_cuda, **kwargs):
super(MRIMetaSequenceRNN, self).__init__(frame_encoder=None, seq_encoder=None, use_cuda=use_cuda)
self.n_classes = n_classes
input_shape = kwargs.get("input_shape", (3, 32, 32))
self.fenc = frame_encoder
frm_output_size = self.get_frm_output_size(input_shape)
#print(kwargs)
#print("seq_bidirectional" in kwargs)
seq_output_size = kwargs.get("seq_output_size", 128)
seq_dropout = kwargs.get("seq_dropout", 0.1)
seq_attention = kwargs.get("seq_attention", True)
seq_bidirectional = kwargs.get("seq_bidirectional", True)
seq_max_seq_len = kwargs.get("seq_max_seq_len", 15)
seq_rnn_type = kwargs.get("seq_rnn_type", "LSTM")
self.senc = MetaRNN(n_classes=n_classes, input_size=frm_output_size, hidden_size=seq_output_size,
dropout=seq_dropout, max_seq_len=seq_max_seq_len, attention=seq_attention,
rnn_type=seq_rnn_type, bidirectional=seq_bidirectional, use_cuda=self.use_cuda)
meta_input_shape = kwargs.get("meta_input_shape", 3)
self.classifier = self.get_classifier(seq_output_size, n_classes, seq_bidirectional, meta_input_shape)
def get_frm_output_size(self, input_shape):
input_shape = list(input_shape)
input_shape.insert(0,1)
dummy_batch_size = tuple(input_shape)
x = torch.autograd.Variable(torch.zeros(dummy_batch_size))
frm_output_size = self.fenc.forward(x).size()[1]
return frm_output_size
def get_classifier(self, seq_output_size, n_classes, seq_bidirectional,
meta_input_shape):
b = 2 if seq_bidirectional else 1
meta_input_shape = np.prod([meta_input_shape])
classifier = nn.Linear(int(b * seq_output_size + meta_input_shape), int(n_classes))
return classifier
def embedding(self, x, hidden):
"""Get learned representation of MRI sequence"""
x, meta = x
return super(MRIMetaSequenceRNN, self).embedding(x, hidden)
def forward(self, x, hidden=None):
x, meta = x
if self.use_cuda and not meta.is_cuda:
meta = meta.cuda()
if self.use_cuda and not x.is_cuda:
x = x.cuda()
x = super(MRIMetaSequenceRNN, self).forward(x, hidden)
concats = torch.cat((x.view(x.size(0), -1).float(),
meta.view(meta.size(0), -1).float()), 1)
outputs = self.classifier(concats)
return outputs
def predict_proba(self, data_loader, binary=True, pos_label=1):
""" Forward inference """
y_pred = []
for i, data in enumerate(data_loader):
x, y = data
x = [Variable(x_) if not self.use_cuda else Variable(x_).cuda() for x_ in x]
y = Variable(y) if not self.use_cuda else Variable(y).cuda()
h0 = self.init_hidden(x[0].size(0))
outputs = self(x, h0)
y_hat = F.softmax(outputs, dim=1)
y_hat = y_hat.data.numpy() if not self.use_cuda else y_hat.cpu().data.numpy()
y_pred.append(y_hat)
# empty cuda cache
if self.use_cuda:
torch.cuda.empty_cache()
y_pred = np.concatenate(y_pred)
return y_pred[:, pos_label] if binary else y_pred
class MetaVGG16FrameRNN(MRIMetaSequenceRNN):
def __init__(self, n_classes, use_cuda, **kwargs):
self.name = "MetaVGG16FrameRNN"
pretrained = kwargs.get("pretrained", True)
requires_grad = kwargs.get("requires_grad", False)
frame_encoder = vgg16_bn(pretrained=pretrained, requires_grad=requires_grad)
super(MetaVGG16FrameRNN, self).__init__(frame_encoder=frame_encoder,
n_classes=n_classes,
use_cuda=use_cuda,
**kwargs)
class MetaDense4012FrameRNN(MRIMetaSequenceRNN):
def __init__(self, n_classes, use_cuda, **kwargs):
self.name = "MetaDense4012FrameRNN"
pretrained = kwargs.get("pretrained", True)
requires_grad = kwargs.get("requires_grad", False)
frame_encoder = densenet_40_12_bc(pretrained=pretrained, requires_grad=requires_grad)
super(MetaDense4012FrameRNN, self).__init__(frame_encoder=frame_encoder,
n_classes=n_classes,
use_cuda=use_cuda,
**kwargs)
| ukb-cardiac-mri-master | ukb/models/mri.py |
"""
Simple random grid search
"""
import os
import sys
import glob
import copy
import time
import torch
import logging
import numpy as np
import pandas as pd
from itertools import product
import metrics
from metrics import *
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
from torch.optim.lr_scheduler import ReduceLROnPlateau
from utils import timeit, format_time
logger = logging.getLogger(__name__)
def tune_threshold(y_true, y_prob, metric='roc_auc_score'):
"""
Function for tuning the threshold
"""
try:
logger.info("The tune_metric chosen is {}".format(metric))
metric = getattr(metrics, metric) if type(metric) is str else metric
thresholds = np.arange(0.01, 1, 0.01)
best_score = 0.0
best_threshold = 0.5
for threshold in thresholds:
y_pred = np.array([1 if p > threshold else 0 for p in y_prob])
auc_score = metric(y_true, y_pred)
if auc_score > best_score:
best_score = auc_score
best_threshold = threshold
dev_threshold = best_threshold
return dev_threshold
except:
try:
logger.info("The tune_metric chosen is disabled.\n"
"Fixed threshold chosen: {}".format(metric))
return float(metric)
except:
sys.exit("Invalid tune_metric input!\n"
"Valid option1: `str` eg. roc_auc_score\n"
"Valid option2: `float` eg. 0.7\n")
class Trainer(object):
def __init__(self, model_class, model_class_params, noise_aware=False, use_cuda=False, seed=1234):
"""
:param model_class:
:param model_class_params:
:param noise_aware:
:param use_cuda:
:param seed:
"""
self.model_class = model_class
self.model_class_params = model_class_params
self.noise_aware = noise_aware
self.use_cuda = use_cuda
self.seed = seed
self.model_class_params.update({"use_cuda": use_cuda})
@timeit
def fit(self, train, dev, test=None, update_freq=5, checkpoint_freq=10, checkpoint_dir=".", **kwargs):
"""
Fit target model
:param train:
:param dev:
:param test:
:param update_freq:
:param checkpoint_freq:
:param checkpoint_dir:
:param kwargs:
:return:
"""
self._set_seed()
lr = kwargs.get('lr', 0.001)
momentum = kwargs.get('momentum', 0.9)
n_epochs = kwargs.get('n_epochs', 10)
metric = kwargs.get('metric', 'roc_auc_score')
tune_metric = kwargs.get('tune_metric', 'roc_auc_score')
batch_size = kwargs.get('batch_size', 4)
num_workers = kwargs.get('num_workers', 1)
threshold = kwargs.get('threshold', 0.5)
use_class_weights = kwargs.get('class_weights', False)
use_scheduler = kwargs.get('scheduler', True)
l2_penalty = kwargs.get('l2_penalty', 0.001)
num_frames = kwargs.get('num_frames', None)
binary = self.model_class_params.get('n_classes', 2) == 2
verbose = kwargs.get('verbose', False)
checkpoint_burn = kwargs.get('checkpoint_burn', 1)
logger.info("============================")
logger.info("Trainer Config")
logger.info("============================")
logger.info("lr: {}".format(lr))
logger.info("momentum: {}".format(momentum))
logger.info("tune_metric: {}".format(tune_metric))
logger.info("batch_size: {}".format(batch_size))
logger.info("l2_penalty: {}".format(l2_penalty))
logger.info("num_frames: {}".format(num_frames))
logger.info("use_scheduler: {}".format(use_scheduler))
logger.info("checkpoint_burn: {}".format(checkpoint_burn))
# get metric function
metric = getattr(metrics, metric) if type(metric) is str else metric
# build model params dictionary
params = {name:v if name not in kwargs else kwargs[name] for name,v in self.model_class_params.items()}
model = self.model_class(**params) if not self.use_cuda else self.model_class(**params).cuda()
# setup checkpointing / model state name
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
filelist = glob.glob("{}/{}*".format(checkpoint_dir, model.name))
checkpoint_name = "{}{}".format(model.name, len(filelist))
train_loader, dev_loader, test_loader = self._get_data_loaders(train, dev, test, batch_size, num_workers)
criterion = self._get_criterion(train_loader, use_weights=use_class_weights)
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=momentum, weight_decay=l2_penalty)
if use_scheduler:
scheduler = ReduceLROnPlateau(optimizer, 'min')
best_score = -0.01
best_threshold = threshold
best_model = None
start_time = time.time()
for epoch in range(n_epochs):
train_loss, correct, total = 0.0, 0.0, 0.0
for i, batch in enumerate(train_loader):
x,y = batch
if num_frames is not None:
if self.noise_aware:
y = y.view(-1)
else:
y = np.repeat(y, num_frames)
if isinstance(x, list):
x = [Variable(x_) if not self.use_cuda else Variable(x_).cuda() for x_ in x]
h0 = model.init_hidden(x[0].size(0))
else:
x = Variable(x) if not self.use_cuda else Variable(x).cuda()
h0 = model.init_hidden(x.size(0))
y = Variable(y) if not self.use_cuda else Variable(y).cuda()
optimizer.zero_grad()
outputs = model(x, h0)
# BCELoss assumes binary classification and relies on probability of second class
if self.noise_aware:
loss = criterion(outputs[:,1], y.float())
y = (y-best_threshold+0.5).round().long()
else:
loss = criterion(outputs, y)
loss.backward()
optimizer.step()
train_loss += loss.data[0]
total += y.size(0)
_, predicted = torch.max(outputs.data, 1)
correct += predicted.eq(y.data).cpu().sum()
# progress update
if (epoch + 1) % update_freq == 0:
elapsed = time.time() - start_time
msg = 'Epoch {:>8} | {:>10} | Loss: {:2.3f} | Acc. {:>5}% ({}/{})'
avg_loss = train_loss / (i+1)
acc = 100.0 * correct / total
logger.info(msg.format("[{}/{}]".format(epoch + 1, n_epochs), format_time(elapsed),
avg_loss, "{:2.1f}".format(acc), int(correct), int(total)))
# dev set checkpointing
if epoch + 1 >= checkpoint_burn and ((epoch + 1) % checkpoint_freq == 0 or epoch + 1 == n_epochs):
dev_true, dev_pred, dev_prob, dev_threshold = self._evaluate(model, dev_loader, "DEV",
binary=binary,
threshold=best_threshold,
tune_metric=tune_metric)
try:
score = metric(dev_true, dev_prob)
except:
score = metric(dev_true, dev_pred)
if test:
test_true, test_pred, test_prob, _ = self._evaluate(model, test_loader, "TEST",
binary=binary, threshold=dev_threshold,
tune_metric=tune_metric)
if verbose:
y_proba, y_pred = model.predict(test_loader, threshold=threshold,
binary=binary, return_proba=True)
classification_summary(test_true, test_pred, [], test_prob)
print(test_prob)
if (score > 0.0 and score > best_score) or best_score == -0.01:
best_score = score
best_threshold = dev_threshold
best_model = {
'epoch': epoch,
'model': copy.deepcopy(model),
'state_dict': copy.deepcopy(model.state_dict()),
'best_score': best_score,
'optimizer': optimizer.state_dict()
}
torch.cuda.empty_cache()
if use_scheduler:
score = train_loss / (i+1)
scheduler.step(score)
#for ii,group in enumerate(optimizer.param_groups):
# logger.info("group lr {} {} {}".format(group['lr'], score, epoch))
# load best model
#model.load_state_dict(best_model['state_dict'])
return best_model['model'], best_score, best_threshold
def save(self, state, checkpoint_root_dir, checkpoint_name):
"""
Dump model & optimizer state_dict to file
:param state:
:param checkpoint_root_dir:
:param checkpoint_name:
:return:
"""
filename = "{}/{}".format(checkpoint_root_dir, checkpoint_name)
torch.save(state, filename)
logger.info("Saved model to {}".format(filename))
def load(self, model, checkpoint_root_dir, checkpoint_name):
"""
Load saved model. Assumes only state_dict is saved to file.
:param model:
:param checkpoint_root_dir:
:param checkpoint_name:
:return:
"""
filename = "{}/{}".format(checkpoint_root_dir, checkpoint_name)
if os.path.exists(filename):
checkpoint = torch.load(filename)
model.load_state_dict(checkpoint['state_dict'])
else:
logger.info("No checkpoint found at '{}'".format(filename))
return model
################################################################################
# INTERNAL
################################################################################
def _set_seed(self):
"""
Set seed for deterministic random behavior for PyTorch on CPU and GPU
:return:
"""
torch.cuda.random.manual_seed_all(self.seed)
torch.manual_seed(self.seed)
np.random.seed(seed=int(self.seed))
def _get_criterion(self, train_loader, use_weights=False):
"""
NOTE: Noise aware loss assumes binary classes
:param train_loader:
:param use_weights:
:return:
"""
if use_weights and not self.noise_aware:
class_weights = []
return nn.CrossEntropyLoss(weight=class_weights)
elif not self.noise_aware:
return nn.CrossEntropyLoss()
return nn.BCEWithLogitsLoss(size_average=False)
def _get_data_loaders(self, train, dev, test=None, batch_size=4, num_workers=1):
"""
Initialize dataloaders here so we can tune over batch_size
:param train:
:param dev:
:param test:
:param batch_size:
:param num_workers:
:return:
"""
train_loader = DataLoader(train, batch_size=batch_size, shuffle=True, num_workers=num_workers)
dev_loader = DataLoader(dev, batch_size=batch_size, shuffle=False, num_workers=num_workers)
test_loader = None if not test else DataLoader(test, batch_size=batch_size,
shuffle=False, num_workers=num_workers)
return train_loader, dev_loader, test_loader
def _scorer(self, y_true, y_pred_prob, y_pred, name, binary=False, pos_label=1):
"""
Print performance metrics
:param y_true:
:param y_pred:
:param name:
:return:
"""
fpr, tpr, thresholds = roc_curve(y_true, y_pred_prob, pos_label=pos_label)
precision, recall, _ = precision_recall_curve(y_true, y_pred_prob)
roc_score = auc(fpr, tpr) * 100.0
prc_score = auc(recall, precision) * 100.0
logloss = log_loss(y_true, y_pred_prob)
average = "binary" if binary else "micro"
precision = precision_score(y_true, y_pred, average=average) * 100.0
recall = recall_score(y_true, y_pred, average=average) * 100.0
f1 = f1_score(y_true, y_pred, average=average) * 100.0
acc = accuracy_score(y_true, y_pred) * 100.0
msg = "{:<6} log loss: {:2.3f} | ROC: {:>5} | PRC: {:>5} | accuracy: {:>5} | P/R/F1: {:>5}"
prf1 = "{:>5} / {:>5} / {:>5}".format("%2.1f" % precision, "%2.1f" % recall, "%2.1f" % f1)
return msg.format(" [%s]" % name, logloss,
"%2.1f" % roc_score,
"%2.1f" % prc_score,
"%2.1f" % acc, prf1)
def _evaluate(self, model, data_loader, name, binary=False, threshold=0.5, tune_metric='roc_auc_score'):
"""
Generate label predictions
:param model:
:param data_loader:
:param name:
:return:
"""
# save rng state, seed for deterministic evaluation
rng_gpu = torch.cuda.random.get_rng_state_all()
rng_cpu = torch.random.get_rng_state()
torch.cuda.random.manual_seed_all(self.seed)
torch.random.manual_seed(self.seed)
y_true = np.hstack([y.numpy() for x,y in data_loader])
y_prob, y_pred = model.predict(data_loader, binary=binary, pos_label=1,
threshold=threshold, return_proba=True)
if name == "DEV":
threshold = tune_threshold(y_true, y_prob, tune_metric)
logger.info("Tuned threshold: {:.2f}".format(threshold))
if binary:
y_pred = np.array([1 if p > threshold else 0 for p in y_prob])
else:
y_pred = np.argmax(y_prob, 1)
msg = self._scorer(y_true, y_prob, y_pred, name, binary)
logger.info(msg)
# restore rng state to all devices
torch.cuda.set_rng_state_all(rng_gpu)
torch.random.set_rng_state(rng_cpu)
return y_true, y_pred, y_prob, threshold
class GridSearchTrainer(Trainer):
def __init__(self, model_class, model_class_params, param_grid, n_model_search,
noise_aware=False, use_cuda=False, seed=1234):
"""
Single-threaded random grid search
:param model_class:
:param model_class_params:
:param param_grid:
:param n_model_search:
:param seed:
"""
super(GridSearchTrainer, self).__init__(model_class, model_class_params,
noise_aware=noise_aware, use_cuda=use_cuda,
seed=seed)
# use fixed random seed for consistent parameter grid
self.rng = np.random.RandomState(1234)
self.param_grid = param_grid
self.param_names = [name for name in self.param_grid]
self.n_model_search = n_model_search
@timeit
def fit(self, train, dev, test=None, update_freq=5, checkpoint_freq=10, checkpoint_dir=".",
metric='roc_auc_score', **kwargs):
"""
Random grid search
:param train:
:param dev:
:param test:
:param update_freq:
:param checkpoint_freq:
:param checkpoint_dir:
:param metric: scikit-learn metric (function or str) or custom score function
:param kwargs:
:return:
"""
hyperparams = self.get_hyperparams(self.n_model_search)
self._print_parameter_space()
metric = getattr(metrics, metric) if type(metric) is str else metric
scores = {}
curr_best = -0.01
tuned_threshold = 0.5
best_model = None
for i, params in enumerate(hyperparams):
params = dict(zip(self.param_names, params))
model_name = "MODEL [{}]".format(i)
logger.info(model_name)
# override any grid search params
params.update(kwargs)
logger.info(params)
fit_time, model, score, dev_threshold = super(GridSearchTrainer, self).fit(
train=train, dev=dev, test=test, update_freq=update_freq, metric=metric,
checkpoint_freq=checkpoint_freq, checkpoint_dir=checkpoint_dir, **params
)
scores[model_name] = [score, model_name, params]
if score > curr_best:
curr_best = score
tuned_threshold = dev_threshold
best_model = {
'model': copy.deepcopy(model),
'state_dict': copy.deepcopy(model.state_dict()),
'best_score': curr_best,
'params': copy.deepcopy(params),
'threshold': copy.deepcopy(tuned_threshold)
}
checkpoint_name = "{}_{}".format(model.name, i)
logger.info("NEW BEST: {} {}".format(metric.__name__, curr_best))
self.save(best_model, checkpoint_dir, checkpoint_name)
self.save(best_model, checkpoint_dir, "{}_BEST".format(model.name))
logger.info(("#" * 90) + "\n")
del model
torch.cuda.empty_cache()
# print performance summary
logger.info("=" * 90)
self._print_grid_search_summary(scores, metric)
logger.info("Best [DEV] score: {} {}\n".format(metric.__name__, curr_best))
model = best_model['model']
logger.info(model)
logger.info("=" * 90)
return model, curr_best, tuned_threshold
def search_space(self):
"""
Get full parameter grid
:return:
"""
return product(*[self.param_grid[name] for name in self.param_grid])
def get_hyperparams(self, n_model_search=5):
"""
Fetch n_model_search parameter sets
:param n_model_search:
:return:
"""
ss = sorted(list(set(self.search_space())))
n = min(len(ss), n_model_search)
self.rng.seed(1234)
self.rng.shuffle(ss)
return ss[0:n]
################################################################################
# INTERNAL
################################################################################
def _print_grid_search_summary(self, scores, metric):
""" Print sorted grid search results """
df = []
for score, name, params in sorted(scores.values(), reverse=1):
params.update({metric.__name__: score, "model": name})
df.append(params)
print(pd.DataFrame(df))
def _print_parameter_space(self):
"""
Show full hyperparameter search space
:param name:
:return:
"""
name = "{}".format(self.model_class.__name__)
ss = self.get_hyperparams(self.n_model_search)
logger.info("=" * 90)
n,N = len(ss), len(list(self.search_space()))
logger.info("Model Parameter Space {}/{} {:2.2f}% (grid search seed={}):".format(n, N, float(n)/N * 100, 1234))
logger.info("=" * 90)
padding = 10
for i, params in enumerate(ss):
param_set = [": ".join(x) for x in zip(self.param_names, map(str, params))]
tmpl = ("{:<" + str(padding) + "} ") * len(param_set)
logger.info("{}_{} | {}".format(name, i, tmpl.format(*param_set)))
logger.info("")
| ukb-cardiac-mri-master | ukb/models/trainer.py |
#!/usr/bin/env python
"""
vgg.py
- VGG11/13/16/19 and VGG11_bn/VGG13_bn/VGG16_bn/VGG19_bn in torchvision
- load the features layers weights only (without the classifier)
"""
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import math
from torchvision.models.vgg import model_urls, make_layers, cfg
__all__ = [
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19',
]
class VGG(nn.Module):
def __init__(self, features, init_weights=True):
super(VGG, self).__init__()
self.features = features
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def extract_feature_state_dict(pretrained_state_dict, model):
model_state_dict = model.state_dict()
pretrained_state_dict = {k: v for k, v in pretrained_state_dict.items() if k in model_state_dict}
return pretrained_state_dict
def vgg11(pretrained=False, requires_grad=False, **kwargs):
"""VGG 11-layer model (configuration "A")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['A']), **kwargs)
if pretrained:
pretrained_state_dict = model_zoo.load_url(model_urls['vgg11'])
pretrained_state_dict = extract_feature_state_dict(pretrained_state_dict, model)
model.load_state_dict(pretrained_state_dict)
model.requires_grad = requires_grad
return model
def vgg11_bn(pretrained=False, requires_grad=False, **kwargs):
"""VGG 11-layer model (configuration "A") with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['A'], batch_norm=True), **kwargs)
if pretrained:
pretrained_state_dict = model_zoo.load_url(model_urls['vgg11_bn'])
pretrained_state_dict = extract_feature_state_dict(pretrained_state_dict, model)
model.load_state_dict(pretrained_state_dict)
model.requires_grad = requires_grad
return model
def vgg13(pretrained=False, requires_grad=False, **kwargs):
"""VGG 13-layer model (configuration "B")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['B']), **kwargs)
if pretrained:
pretrained_state_dict = model_zoo.load_url(model_urls['vgg13'])
pretrained_state_dict = extract_feature_state_dict(pretrained_state_dict, model)
model.load_state_dict(pretrained_state_dict)
model.requires_grad = requires_grad
return model
def vgg13_bn(pretrained=False, requires_grad=False, **kwargs):
"""VGG 13-layer model (configuration "B") with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['B'], batch_norm=True), **kwargs)
if pretrained:
pretrained_state_dict = model_zoo.load_url(model_urls['vgg13_bn'])
pretrained_state_dict = extract_feature_state_dict(pretrained_state_dict, model)
model.load_state_dict(pretrained_state_dict)
model.requires_grad = requires_grad
return model
def vgg16(pretrained=False, requires_grad=False, **kwargs):
"""VGG 16-layer model (configuration "D")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['D']), **kwargs)
if pretrained:
pretrained_state_dict = model_zoo.load_url(model_urls['vgg16'])
pretrained_state_dict = extract_feature_state_dict(pretrained_state_dict, model)
model.load_state_dict(pretrained_state_dict)
model.requires_grad = requires_grad
return model
def vgg16_bn(pretrained=False, requires_grad=False, **kwargs):
"""VGG 16-layer model (configuration "D") with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['D'], batch_norm=True), **kwargs)
if pretrained:
pretrained_state_dict = model_zoo.load_url(model_urls['vgg16_bn'])
pretrained_state_dict = extract_feature_state_dict(pretrained_state_dict, model)
model.load_state_dict(pretrained_state_dict)
model.requires_grad = requires_grad
return model
def vgg19(pretrained=False, requires_grad=False, **kwargs):
"""VGG 19-layer model (configuration "E")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['E']), **kwargs)
if pretrained:
pretrained_state_dict = model_zoo.load_url(model_urls['vgg19'])
pretrained_state_dict = extract_feature_state_dict(pretrained_state_dict, model)
model.load_state_dict(pretrained_state_dict)
model.requires_grad = requires_grad
return model
def vgg19_bn(pretrained=False, requires_grad=False, **kwargs):
"""VGG 19-layer model (configuration 'E') with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['E'], batch_norm=True), **kwargs)
if pretrained:
pretrained_state_dict = model_zoo.load_url(model_urls['vgg19_bn'])
pretrained_state_dict = extract_feature_state_dict(pretrained_state_dict, model)
model.load_state_dict(pretrained_state_dict)
model.requires_grad = requires_grad
return model
| ukb-cardiac-mri-master | ukb/models/frame/vgg.py |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from os.path import dirname, join
#Implementation based on https://github.com/andreasveit/densenet-pytorch
densenet_40_12_bc_weights_path = join(dirname(__file__), "pretrained_densenet_4012BC.pth.tar")
def densenet_40_12_bc(pretrained=False, requires_grad=False, **kwargs):
layers=40
depth=10
growth_rate=12
reduce_rate=0.5
drop_rate= 0.0
bottleneck= True
model=DenseNet3(layers, depth, growth_rate, reduction=reduce_rate,
bottleneck=bottleneck,
dropRate=drop_rate)
if pretrained:
checkpoint = torch.load(densenet_40_12_bc_weights_path)
model.load_state_dict(checkpoint['state_dict'])
model.requires_grad = requires_grad
# Removing linear layer
removed = list(model.children())[:-1]
model= torch.nn.Sequential(*removed)
return model
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
def forward(self, x):
out = self.conv1(self.relu(self.bn1(x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
return torch.cat([x, out], 1)
class BottleneckBlock(nn.Module):
def __init__(self, in_planes, out_planes, dropRate=0.0):
super(BottleneckBlock, self).__init__()
inter_planes = out_planes * 4
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, inter_planes, kernel_size=1, stride=1,
padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(inter_planes)
self.conv2 = nn.Conv2d(inter_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
def forward(self, x):
out = self.conv1(self.relu(self.bn1(x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)
out = self.conv2(self.relu(self.bn2(out)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)
return torch.cat([x, out], 1)
class TransitionBlock(nn.Module):
def __init__(self, in_planes, out_planes, dropRate=0.0):
super(TransitionBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1,
padding=0, bias=False)
self.droprate = dropRate
def forward(self, x):
out = self.conv1(self.relu(self.bn1(x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)
return F.avg_pool2d(out, 2)
class DenseBlock(nn.Module):
def __init__(self, nb_layers, in_planes, growth_rate, block, dropRate=0.0):
super(DenseBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, growth_rate, nb_layers, dropRate)
def _make_layer(self, block, in_planes, growth_rate, nb_layers, dropRate):
layers = []
for i in range(nb_layers):
layers.append(block(in_planes+i*growth_rate, growth_rate, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class DenseNet3(nn.Module):
def __init__(self, depth, num_classes, growth_rate=12,
reduction=0.5, bottleneck=True, dropRate=0.0):
super(DenseNet3, self).__init__()
in_planes = 2 * growth_rate
n = (depth - 4) / 3
if bottleneck == True:
n = n/2
block = BottleneckBlock
else:
block = BasicBlock
n = int(n)
# 1st conv before any dense block
self.conv1 = nn.Conv2d(3, in_planes, kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = DenseBlock(n, in_planes, growth_rate, block, dropRate)
in_planes = int(in_planes+n*growth_rate)
self.trans1 = TransitionBlock(in_planes, int(math.floor(in_planes*reduction)), dropRate=dropRate)
in_planes = int(math.floor(in_planes*reduction))
# 2nd block
self.block2 = DenseBlock(n, in_planes, growth_rate, block, dropRate)
in_planes = int(in_planes+n*growth_rate)
self.trans2 = TransitionBlock(in_planes, int(math.floor(in_planes*reduction)), dropRate=dropRate)
in_planes = int(math.floor(in_planes*reduction))
# 3rd block
self.block3 = DenseBlock(n, in_planes, growth_rate, block, dropRate)
in_planes = int(in_planes+n*growth_rate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
# changed this line so avgpool is a layer in the net!!
self.avpl = nn.AvgPool2d(8)
self.fc = nn.Linear(in_planes, num_classes)
self.in_planes = in_planes
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.block1(out))
out = self.trans2(self.block2(out))
out = self.block3(out)
out = self.relu(self.bn1(out))
out = self.avpl(out)
out = out.view(-1, self.in_planes)
return self.fc(out)
| ukb-cardiac-mri-master | ukb/models/frame/densenet_av.py |
'''
DenseNet in PyTorch.
From: https://github.com/kuangliu/pytorch-cifar
https://arxiv.org/pdf/1608.06993.pdf
'''
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class Bottleneck(nn.Module):
def __init__(self, in_planes, growth_rate):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, 4*growth_rate, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(4*growth_rate)
self.conv2 = nn.Conv2d(4*growth_rate, growth_rate, kernel_size=3, padding=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = self.conv2(F.relu(self.bn2(out)))
out = torch.cat([out,x], 1)
return out
class Transition(nn.Module):
def __init__(self, in_planes, out_planes):
super(Transition, self).__init__()
self.bn = nn.BatchNorm2d(in_planes)
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False)
def forward(self, x):
out = self.conv(F.relu(self.bn(x)))
out = F.avg_pool2d(out, 2)
return out
class DenseNetFrameEncoder(nn.Module):
def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=10):
super(DenseNetFrameEncoder, self).__init__()
self.growth_rate = growth_rate
num_planes = 2*growth_rate
self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False)
self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0])
num_planes += nblocks[0]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans1 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])
num_planes += nblocks[1]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans2 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])
num_planes += nblocks[2]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans3 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])
num_planes += nblocks[3]*growth_rate
self.bn = nn.BatchNorm2d(num_planes)
#self.linear = nn.Linear(num_planes, num_classes)
def _make_dense_layers(self, block, in_planes, nblock):
layers = []
for i in range(nblock):
layers.append(block(in_planes, self.growth_rate))
in_planes += self.growth_rate
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.trans3(self.dense3(out))
out = self.dense4(out)
out = F.avg_pool2d(F.relu(self.bn(out)), 4)
out = out.view(out.size(0), -1)
#out = self.linear(out)
return out
def DenseNet121():
return DenseNetFrameEncoder(Bottleneck, [6,12,24,16], growth_rate=32)
def DenseNet169():
return DenseNetFrameEncoder(Bottleneck, [6,12,32,32], growth_rate=32)
def DenseNet201():
return DenseNetFrameEncoder(Bottleneck, [6,12,48,32], growth_rate=32)
def DenseNet161():
return DenseNetFrameEncoder(Bottleneck, [6,12,36,24], growth_rate=48)
def densenet_cifar():
return DenseNetFrameEncoder(Bottleneck, [6,12,24,16], growth_rate=12)
def test_densenet():
net = densenet_cifar()
x = torch.randn(1,3,32,32)
y = net(Variable(x))
print(y)
| ukb-cardiac-mri-master | ukb/models/frame/densenet.py |
from .fnn import *
from .vgg import *
from .lenet import *
from .densenet import *
from .densenet_pretrained import *
from .densenet_av import *
| ukb-cardiac-mri-master | ukb/models/frame/__init__.py |
import torch.nn as nn
import torch.nn.functional as F
################################################################################
# Simple CNN Models
################################################################################
class LeNetFrameEncoder(nn.Module):
def __init__(self, input_shape=(1, 32, 32), kernel_size=5, output_size=84):
super(LeNetFrameEncoder, self).__init__()
n_channels, width, height = input_shape
conv_output = self.calculate_conv_output(input_shape, kernel_size)
self.conv1 = nn.Conv2d(n_channels, 6, kernel_size)
self.conv2 = nn.Conv2d(6, 16, kernel_size)
self.fc1 = nn.Linear(conv_output, 120)
self.fc2 = nn.Linear(120, output_size)
def forward(self, x):
out = F.relu(self.conv1(x))
out = F.max_pool2d(out, 2)
out = F.relu(self.conv2(out))
out = F.max_pool2d(out, 2)
out = out.view(out.size(0), -1)
out = F.relu(self.fc1(out))
out = F.relu(self.fc2(out))
return out
def calculate_conv_output(self, input_shape, kernel_size):
output1 = (6, input_shape[1]-kernel_size+1, input_shape[2]-kernel_size+1)
output2 = (6, int(output1[1]/2), int(output1[2]/2))
output3 = (16, output2[1]-kernel_size+1, output2[2]-kernel_size+1)
output4 = (16, int(output3[1]/2), int(output3[2]/2))
return output4[0]*output4[1]*output4[2]
| ukb-cardiac-mri-master | ukb/models/frame/lenet.py |
import torch.nn as nn
import torch.nn.functional as F
################################################################################
# FNN Models
################################################################################
class FNNFrameEncoder(nn.Module):
def __init__(self, input_size=1024, layers=[64, 32]):
super(FNNFrameEncoder, self).__init__()
layers = [input_size] + layers
for i, size in enumerate(layers[:-1]):
self.add_module('fc{}'.format(i+1), nn.Linear(layers[i], layers[i+1]))
def forward(self, x):
x = x.view(-1, self.num_flat_features(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
| ukb-cardiac-mri-master | ukb/models/frame/fnn.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from collections import OrderedDict
from torchvision.models.densenet import model_urls, _DenseLayer, _DenseBlock, _Transition
__all__ = ['DenseNet', 'densenet121', 'densenet169', 'densenet201', 'densenet161']
class DenseNet(nn.Module):
r"""Densenet-BC model class, based on
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
growth_rate (int) - how many filters to add each layer (`k` in paper)
block_config (list of 4 ints) - how many layers in each pooling block
num_init_features (int) - the number of filters to learn in the first convolution layer
bn_size (int) - multiplicative factor for number of bottle neck layers
(i.e. bn_size * k features in the bottleneck layer)
drop_rate (float) - dropout rate after each dense layer
num_classes (int) - number of classification classes
"""
def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),
num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000):
super(DenseNet, self).__init__()
# First convolution
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),
('norm0', nn.BatchNorm2d(num_init_features)),
('relu0', nn.ReLU(inplace=True)),
('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]))
# Each denseblock
num_features = num_init_features
for i, num_layers in enumerate(block_config):
block = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
self.features.add_module('denseblock%d' % (i + 1), block)
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
self.features.add_module('transition%d' % (i + 1), trans)
num_features = num_features // 2
# Final batch norm
self.features.add_module('norm5', nn.BatchNorm2d(num_features))
# Linear layer
# self.classifier = nn.Linear(num_features, num_classes)
def forward(self, x):
features = self.features(x)
out = F.relu(features, inplace=True)
# default: kernel_size=7 | changed to 1 for smaller images | >=61
out = F.avg_pool2d(out, kernel_size=1, stride=1).view(features.size(0), -1)
#out = self.classifier(out)
return out
def extract_feature_state_dict(pretrained_state_dict, model):
model_state_dict = model.state_dict()
pretrained_state_dict = {k: v for k, v in pretrained_state_dict.items() if k in model_state_dict}
return pretrained_state_dict
def densenet121(pretrained=False, requires_grad=False, **kwargs):
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16),
**kwargs)
if pretrained:
pretrained_state_dict = model_zoo.load_url(model_urls['densenet121'])
pretrained_state_dict = extract_feature_state_dict(pretrained_state_dict, model)
model.load_state_dict(pretrained_state_dict)
model.requires_grad = requires_grad
return model
def densenet169(pretrained=False, requires_grad=False, **kwargs):
r"""Densenet-169 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 32, 32),
**kwargs)
if pretrained:
pretrained_state_dict = model_zoo.load_url(model_urls['densenet169'])
pretrained_state_dict = extract_feature_state_dict(pretrained_state_dict, model)
model.load_state_dict(pretrained_state_dict)
model.requires_grad = requires_grad
return model
def densenet201(pretrained=False, requires_grad=False, **kwargs):
r"""Densenet-201 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 48, 32),
**kwargs)
if pretrained:
pretrained_state_dict = model_zoo.load_url(model_urls['densenet201'])
pretrained_state_dict = extract_feature_state_dict(pretrained_state_dict, model)
model.load_state_dict(pretrained_state_dict)
model.requires_grad = requires_grad
return model
def densenet161(pretrained=False, requires_grad=False, **kwargs):
r"""Densenet-161 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = DenseNet(num_init_features=96, growth_rate=48, block_config=(6, 12, 36, 24),
**kwargs)
if pretrained:
pretrained_state_dict = model_zoo.load_url(model_urls['densenet161'])
pretrained_state_dict = extract_feature_state_dict(pretrained_state_dict, model)
model.load_state_dict(pretrained_state_dict)
model.requires_grad = requires_grad
return model
def test_densenet(size, dense):
from torch.autograd import Variable
dense = int(dense)
if dense==121:
net = densenet121(pretrained=True)
elif dense==161:
net = densenet161(pretrained=True)
elif dense==169:
net = densenet169(pretrained=True)
elif dense==201:
net = densenet201(pretrained=True)
x = torch.randn(1,3,size,size)
y = net(Variable(x))
print(y.shape)
| ukb-cardiac-mri-master | ukb/models/frame/densenet_pretrained.py |
from .pooled import *
from .rnn import * | ukb-cardiac-mri-master | ukb/models/sequence/__init__.py |
"""
Simple models for encoding dense representations of sequences
"""
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
################################################################################
# Summing Models
################################################################################
class SeqSumPoolingEncoder(nn.Module):
"""Sum all frame representations into a single feature vector"""
def __init__(self, n_classes, input_size):
super(SeqSumPoolingEncoder, self).__init__()
self.linear = nn.Linear(input_size, n_classes, bias=False)
def init_hidden(self, batch_size):
return None
def embedding(self, x, hidden=None):
"""Get learned (summed) representation"""
x = torch.sum(x, 1)
return x
def forward(self, x, hidden=None):
x = torch.sum(x, 1)
x = self.linear(x)
return x | ukb-cardiac-mri-master | ukb/models/sequence/pooled.py |
"""
Simple models for encoding dense representations of sequences
"""
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
################################################################################
# Recurrent Neural Network Models
################################################################################
class RNN(nn.Module):
def __init__(self, n_classes, input_size, hidden_size, rnn_type="LSTM", dropout=0.0,
max_seq_len=15, attention=True, bidirectional=True, use_cuda=False):
"""
Initalize RNN module
:param n_classes:
:param input_size:
:param hidden_size:
:param rnn_type: GRU or LSTM
:param dropout:
:param max_seq_len:
:param attention:
:param bidirectional:
:param use_cuda:
"""
super(RNN, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bidirectional = bidirectional
self.n_classes = n_classes
self.attention = attention
self.max_seq_len = max_seq_len
self.use_cuda = use_cuda
self.rnn_type = rnn_type
if rnn_type in ['LSTM', 'GRU']:
self.rnn = getattr(nn, rnn_type)(input_size, hidden_size, batch_first=True,
dropout=dropout, bidirectional=self.bidirectional)
self.dropout = nn.Dropout(dropout)
b = 2 if self.bidirectional else 1
if attention:
self.attn_linear_w_1 = nn.Linear(b * hidden_size, b * hidden_size, bias=True)
self.attn_linear_w_2 = nn.Linear(b * hidden_size, 1, bias=False)
self.linear = nn.Linear(b * hidden_size, n_classes)
def embedding(self, x, hidden, x_mask=None):
"""
Get learned representation
"""
x_mask = self._get_mask(x) if not x_mask else x_mask
output, hidden = self.rnn(x, hidden)
output = self.dropout(output)
if self.attention:
output = self._attn_mean_pooling(output, x_mask)
else:
output = self._mean_pooling(output, x_mask)
return output
def _mean_pooling(self, x, x_mask):
"""
Mean pooling of RNN hidden states
TODO: add variable sequence lengths back in
"""
return torch.mean(x, 1)
# x_lens = x_mask.data.eq(0).long().sum(dim=1)
# if self.use_cuda:
# weights = Variable(torch.ones(x.size()).cuda() / x_lens.unsqueeze(1).float())
# else:
# weights = Variable(torch.ones(x.size()) / x_lens.unsqueeze(1).float())
# weights.data.masked_fill_(x_mask.data, 0.0)
# output = torch.bmm(x.transpose(1, 2), weights.unsqueeze(2)).squeeze(2)
# return output
def _attn_mean_pooling(self, x, x_mask):
"""
Weighted mean pooling of RNN hidden states, where weights are
calculated via an attention layer where the attention weight is
a = T' . tanh(Wx + b)
where x is the input, b is the bias.
"""
emb_squish = F.tanh(self.attn_linear_w_1(x))
emb_attn = self.attn_linear_w_2(emb_squish)
emb_attn.data.masked_fill_(x_mask.unsqueeze(2).data, float("-inf"))
emb_attn_norm = F.softmax(emb_attn.squeeze(2), dim=0)
emb_attn_vectors = torch.bmm(x.transpose(1, 2), emb_attn_norm.unsqueeze(2)).squeeze(2)
return emb_attn_vectors
def _get_mask(self, x):
"""
Return an empty mask
:param x:
:return:
"""
x_mask = Variable(torch.zeros(x.size(0), self.max_seq_len).byte())
return x_mask.cuda() if self.use_cuda else x_mask
def forward(self, x, hidden, x_mask=None):
"""
Forward pass of the network
:param x:
:param hidden:
:param x_mask: 0-1 byte mask for variable length sequences
:return:
"""
x_mask = self._get_mask(x) if not x_mask else x_mask
output, hidden = self.rnn(x, hidden)
output = self.dropout(output)
if self.attention:
output = self._attn_mean_pooling(output, x_mask)
else:
output = self._mean_pooling(output, x_mask)
output = self.linear(output)
return output
def init_hidden(self, batch_size):
"""
Initialize hidden state params
:param batch_size:
:return:
"""
b = 2 if self.bidirectional else 1
if self.rnn_type == "LSTM":
h0 = (Variable(torch.zeros(b, batch_size, self.hidden_size)),
Variable(torch.zeros(b, batch_size, self.hidden_size)))
h0 = [h0[0].cuda(), h0[1].cuda()] if self.use_cuda else h0
else:
h0 = Variable(torch.zeros(b, batch_size, self.hidden_size))
h0 = h0.cuda() if self.use_cuda else h0
return h0
class MetaRNN(RNN):
"""
RNN class for Meta data concatenating into seq_output before classifier
"""
def forward(self, x, hidden, x_mask=None):
"""
Forward pass of the network
:param x:
:param hidden:
:param x_mask: 0-1 byte mask for variable length sequences
:return:
"""
x_mask = self._get_mask(x) if not x_mask else x_mask
output, hidden = self.rnn(x, hidden)
output = self.dropout(output)
if self.attention:
output = self._attn_mean_pooling(output, x_mask)
else:
output = self._mean_pooling(output, x_mask)
return output
| ukb-cardiac-mri-master | ukb/models/sequence/rnn.py |
from .ukbb import *
from .augmentations import *
from .multi_series import *
from torchvision.transforms import Compose
class RandomTransforms(object):
"""Base class for a list of transformations with randomness
Args:
transforms (list or tuple): list of transformations
"""
def __init__(self, transforms, out_range=(0.0, 1.0)):
assert isinstance(transforms, (list, tuple))
self.transforms = transforms
self.out_range = out_range
def __call__(self, *args, **kwargs):
raise NotImplementedError()
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class RandomOrder(RandomTransforms):
"""Apply a list of transformations in a random order
"""
def __call__(self, img):
order = list(range(len(self.transforms)))
random.shuffle(order)
for i in order:
img = self.transforms[i](img)
rescale = RescaleIntensity(out_range=self.out_range)
img = rescale(img)
return img
class ComposeMultiChannel(object):
"""Composes several transforms together for multi channel operations.
Args:
transforms (list of ``Transform`` objects): list of transforms to compose.
Example:
>>> transforms.Compose([
>>> transforms.CenterCrop(10),
>>> transforms.ToTensor(),
>>> ])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img1, img2, img3):
for t in self.transforms:
img1, img2, img3 = t(img1, img2, img3)
return img1, img2, img3
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
##############################################################################
# SINGLE Series Transforms (to be used on flow_250_*_MAG)
##############################################################################
############################
# Preprocessing Transforms
############################
def compose_preprocessing(preprocessing):
"""
Compose a preprocessing transform to be performed.
Params
------
preprocessing : dict
- dictionary defining all preprocessing steps to be taken with their
values
e.g. {"FrameSelector" : "var",
"Rescale_Intensity" : [0, 255],
"Gamma_Correction" : 2.0}
Return
------
torchvision.transforms.Compose
"""
# Frame Selector
if (preprocessing["FrameSelector"]["name"] == "FrameSelectionVar"):
frame_selector = FrameSelectionVar(n_frames=preprocessing["n_frames"])
else:
frame_selector = FrameSelectionStd(n_frames=preprocessing["n_frames"],
channel=preprocessing["FrameSelector"]["channel"],
epsilon=preprocessing["FrameSelector"]["epsilon"])
# Rescale Intensity
if ("Rescale_Intensity" in preprocessing):
intensity_rescale = RescaleIntensity(out_range=tuple(preprocessing["Rescale_Intensity"]))
else:
intensity_rescale = NullTransform()
# Gamma Correction
if ("Gamma_Correction" in preprocessing):
gamma_correct = GammaCorrection(gamma=preprocessing["Gamma_Correction"]["gamma"],
intensity=preprocessing["Gamma_Correction"]["intensity"])
else:
gamma_correct = NullTransform()
return Compose([frame_selector, intensity_rescale, gamma_correct])
###########################
# Augmentation Transforms
###########################
def compose_augmentation(augmentations, seed=1234):
"""
Compose an augmentation transform to be performed.
Params
------
augmentations : dict
- dictionary defining all augmentation steps to be taken with their
values
e.g.
{
"RandomCrop" : {
"size" : 28,
"padding" : 12
},
"RandomRotation" : {
"degrees" : 25
},
"RandomTranslation" : {
"translate" : (0.2, 0.8)
},
"RandomShear" : {
"shear" : 12.5
},
"RandomAffine" : {
"degrees" : 5,
"translate" : (0.5, 0.5),
"scale" : 0.8,
"shear" : 15.0
},
"Randomize" : 0
}
Return
------
torchvision.transforms.Compose (ordered transforms)
OR
torchvision.transforms.RandomOrder (randomly ordered transforms)
"""
# Padding
if ("Pad" in augmentations):
if ("padding" in augmentations["Pad"]):
padding = augmentations["Pad"]["padding"]
else:
padding = 0
if ("fill" in augmentations["Pad"]):
fill = augmentations["Pad"]["fill"]
else:
fill = 0
if ("padding_mode" in augmentations["Pad"]):
padding_mode = augmentations["Pad"]["padding_mode"]
else:
padding_mode = 'constant'
pad = Pad(
padding=padding,
fill=fill, padding_mode=padding_mode)
else:
pad = NullAugmentation()
# Random Horizontal Flip
if ("RandomHorizontalFlip" in augmentations):
if ("probability" in augmentations["RandomHorizontalFlip"]):
probability = augmentations["RandomHorizontalFlip"]["probability"]
else:
probability = 0.5
random_horizontal = RandomHorizontalFlip(p=probability, seed=seed)
else:
random_horizontal = NullAugmentation()
# Random Vertical Flip
if ("RandomVerticalFlip" in augmentations):
if ("probability" in augmentations["RandomVerticalFlip"]):
probability = augmentations["RandomVerticalFlip"]["probability"]
else:
probability = 0.5
random_vertical = RandomVerticalFlip(p=probability, seed=seed)
else:
random_vertical = NullAugmentation()
# Random Cropping
if ("RandomCrop" in augmentations):
if ("padding" in augmentations["RandomCrop"]):
padding = augmentations["RandomCrop"]["padding"]
else:
padding = 0
random_crop = RandomCrop(
augmentations["RandomCrop"]["size"],
padding=padding, seed=seed)
else:
random_crop = NullAugmentation()
# Random Rotation
if ("RandomRotation" in augmentations):
if ("resample" in augmentations["RandomRotation"]):
resample = augmentations["RandomRotation"]["resample"]
else:
resample = False
if ("center" in augmentations["RandomRotation"]):
center = augmentations["RandomRotation"]["center"]
else:
center = None
random_rotation = RandomRotation(
augmentations["RandomRotation"]["degrees"],
resample=resample, center=center, seed=seed)
else:
random_rotation = NullAugmentation()
# Random Translation
if ("RandomTranslation" in augmentations):
if ("resample" in augmentations["RandomTranslation"]):
resample = augmentations["RandomTranslation"]["resample"]
else:
resample = False
random_translation = RandomTranslation(
augmentations["RandomTranslation"]["translate"], resample=resample,
seed=seed)
else:
random_translation = NullAugmentation()
# Random Shear
if ("RandomShear" in augmentations):
if ("resample" in augmentations["RandomShear"]):
resample = augmentations["RandomShear"]["resample"]
else:
resample = False
random_shear = RandomShear(
augmentations["RandomShear"]["shear"], resample=resample,
seed=seed)
else:
random_shear = NullAugmentation()
# Random Affine
if ("RandomAffine" in augmentations):
if ("translate" in augmentations["RandomAffine"]):
translate = augmentations["RandomAffine"]["translate"]
else:
translate = None
if ("scale" in augmentations["RandomAffine"]):
scale = augmentations["RandomAffine"]["scale"]
else:
scale = None
if ("shear" in augmentations["RandomAffine"]):
shear = augmentations["RandomAffine"]["shear"]
else:
shear = None
if ("resample" in augmentations["RandomAffine"]):
resample = augmentations["RandomAffine"]["resample"]
else:
resample = False
if ("fillcolor" in augmentations["RandomAffine"]):
fillcolor = augmentations["RandomAffine"]["fillcolor"]
else:
fillcolor = 0
random_affine = RandomAffine(
augmentations["RandomAffine"]["degrees"],
translate=translate, scale=scale, shear=shear,
resample=resample, fillcolor=fillcolor, seed=seed)
else:
random_affine = NullAugmentation()
try:
if (augmentations["Randomize"]):
if ("PixelRange" in augmentations):
return RandomOrder(
[random_crop, random_rotation, random_translation,
random_shear, random_affine])
else:
return RandomOrder(
[random_crop, random_rotation, random_translation,
random_shear, random_affine])
except: # This will fail when "Randomize" is not defined in augmentations
pass
return Compose([pad, random_horizontal, random_vertical, random_crop,
random_rotation, random_translation, random_shear,
random_affine])
##############################################################################
# Postprocessing Transforms
##############################################################################
def compose_postprocessing(postprocessing):
"""
Compose a postprocessing transform to be performed.
Params
------
postprocessing : dict
- dictionary defining all preprocessing steps to be taken with their
values
e.g. {"Name" : "RescaleIntensity"}
OR
{"Name" : "StdNormalize"}
Return
------
torchvision.transforms.Compose
"""
if (postprocessing["Name"] == "StdNormalize"):
postprocess = StdNormalize()
else:
postprocess = RescaleIntensity(out_range=(0.0, 1.0))
return Compose([postprocess])
##############################################################################
# MULTIPLE Series Transforms (to be used on ALL flow_250_* series)
##############################################################################
############################
# Preprocessing Transforms
############################
def compose_preprocessing_multi(preprocessing):
"""
Compose a preprocessing transform to be performed on MULTI series.
Params
------
preprocessing : dict
- dictionary defining all preprocessing steps to be taken with their
values
e.g. {"FrameSelector" : "var",
"Rescale_Intensity" : [0, 255],
"Gamma_Correction" : 2.0}
Return
------
torchvision.transforms.Compose
"""
# Frame Selector
if (preprocessing["FrameSelector"]["name"] == "FrameSelectionVarMulti"):
frame_selector = FrameSelectionVarMulti(n_frames=preprocessing["n_frames"])
# Rescale Intensity
if ("RescaleIntensityMulti" in preprocessing):
intensity_rescale = RescaleIntensityMulti(out_range=tuple(preprocessing["RescaleIntensityMulti"]))
else:
intensity_rescale = NullTransformMulti()
return ComposeMultiChannel([frame_selector, intensity_rescale])
#############################
# Postprocessing Transforms
#############################
def compose_postprocessing_multi(postprocessing):
"""
Compose a postprocessing transform to be performed on MULTI series.
Params
------
postprocessing : dict
- dictionary defining all preprocessing steps to be taken with their
values
e.g. {"Name" : "RescaleIntensity"}
OR
{"Name" : "StdNormalize"}
Return
------
torchvision.transforms.Compose
"""
if (postprocessing["Name"] == "StdNormalizeMulti"):
postprocess = StdNormalizeMulti()
else:
postprocess = RescaleIntensityMulti(out_range=(0.0, 1.0))
return ComposeMultiChannel([postprocess])
| ukb-cardiac-mri-master | ukb/transforms/__init__.py |
"""
Custom preprocessing transformation functions for video/sequential frame
MRI data from the UK Biobank
"""
import numpy as np
from skimage.exposure import rescale_intensity
from torchvision.transforms import Lambda
class NullTransform(Lambda):
"""
Create a null transformation.
This is to be used when a given augmentation is not selected in the config
so that it simply returns the input.
"""
def __init__(self):
"""
Instantiate lambda x: x.
Params
------
None
"""
super(NullTransform, self).__init__(lambda x: x)
class FrameSelectionStd(object):
"""
Select subset of MRI frames based on pixel variance
Assumes NUM_FRAMES X WIDTH X HEIGHT tensors
TODO: Setup for 3 channel images
"""
def __init__(self, n_frames=15, channel=1, epsilon=0.05):
"""
:param n_frames:
:param channel:
:param epsilon:
"""
self.n_frames = n_frames
self.channel = channel
self.epsilon = epsilon
def std_by_frame(self, seq, normalize=True):
"""
Compute standard deviation by frame
:param seq:
:param normalize:
:return:
"""
# z-score transform frames
z = (seq - np.mean(seq)) / np.std(seq) if normalize else seq
# standard deviation per frame
std = [np.std(z[i]) for i in range(seq.shape[0])]
return [v - min(std) for v in std]
def select_frames(self, seq, epsilon=0.05):
"""
Select a contiguous subset of frames based on each frame's
overall pixel standard deviation. Determine cut points based
on the first set of inflection points.
P2
/\
/ \
__/ \__/\____
P1 P3
:param seq:
:param epsilon:
:return:
"""
std = self.std_by_frame(seq)
# threshold SD
std = [v if v > epsilon else 0 for v in std]
# find inflection points
signs = [np.sign(std[i + 1] - std[i]) for i in range(len(std) - 1)]
# skip if first point is no slope or negative
inf_pnts = [] if signs[0] <= 0 else [0]
for i in range(1, len(signs)):
if signs[i] != signs[i - 1]:
inf_pnts.append(i)
if len(inf_pnts) < 3:
raise ValueError("No inflection points found")
return (inf_pnts[0], inf_pnts[2])
def __call__(self, sample):
# x,y = sample
# i,j = self.select_frames(x, self.epsilon)
if (self.n_frames == 30):
return sample
i, j = self.select_frames(sample, self.epsilon)
j = i + self.n_frames
# return (x[i:j,...], y)
return sample[i:j,...]
class FrameSelectionVar():
"""
Frame selector class.
Select the N best frames from a series to use for classification.
In this case, the N best frames are the "brightest" sequential frames. The
frames with the most variance between dark and light pixels, centered
around the brightest frame (frame with most variance). Nowing how the
frames are structured (with a lot of noise), we konw that the best frames
are where the noise dies down and the consentration is on the aortic valve.
Therefore, with pixel intensities around the valve going up and intensities
away from the valve go down, we get our largest variance in pixels with
these frames.
"""
def __init__(self, n_frames=6):
"""
Class initialization function.
Params
------
None
"""
self.N = n_frames
def __call__(self, seq):
"""
Select the BEST frames from the given series.
Params
------
npy_series : np.array
- numpy array of the series of DICOM images.
Return
------
list
- list of the most best (sequential) frames
"""
if (self.N == seq.shape[0]):
return seq
# Otherwise find correct frames to output
var = [fr.var() for fr in seq]
varDict = dict((i, fr.var()) for i, fr in enumerate(seq))
frameIndx = [np.argmax(var)]
low_, high_ = frameIndx[-1]-1, frameIndx[-1]+1
if (self.N > 1):
for i in range(self.N-1):
if (low_ >= 0 and high_ <= len(seq)-1):
if (varDict[low_] > varDict[high_]):
frameIndx.append(low_)
low_ = sorted(frameIndx)[0] - 1
high_ = sorted(frameIndx)[-1] + 1
else:
frameIndx.append(high_)
low_ = sorted(frameIndx)[0] - 1
high_ = sorted(frameIndx)[-1] + 1
elif (low_ == -1):
frameIndx.append(high_)
low_ = sorted(frameIndx)[0] - 1
high_ = sorted(frameIndx)[-1] + 1
else:
frameIndx.append(low_)
low_ = sorted(frameIndx)[0] - 1
high_ = sorted(frameIndx)[-1] + 1
return seq[sorted(frameIndx)]
class RescaleIntensity():
"""Rescale pixel values of a DICOM Series so that they span low-high."""
def __init__(self, out_range=(0.0,255.0)):
"""
Class initialization function.
Params
------
None
"""
self.out_range = out_range
def __call__(self, series):
"""
Execute normalization for the given series.
Params
------
seris : np.array
- DICOM Series as an np.array
Return
------
np.array
- new normalized series
"""
return np.array(
[rescale_intensity(1.0*frame, out_range=self.out_range) for frame in series])
class GammaCorrection():
"""Enhance Gray Scale Levels of a DICOM Series frame."""
def __init__(self, gamma=2.0, intensity=255.0):
"""
Class initialization function.
Params
------
gamma : float
- gamma correction amount
"""
assert isinstance(gamma, (int, float))
self.gamma = gamma
self.intensity = intensity
def __call__(self, series):
"""
Execute gamma correction for the entire series.
Params
------
series : np.array
- DICOM Series of images as an np.array
Return
------
np.array
- new gamma corrected series
"""
return np.array([self.intensity*(1.0*frame/frame.max())**self.gamma for frame in series])
class StdNormalize(object):
"""Standard Deviation Normalization mu = 0, std = 1.0."""
def __call__(self, series):
"""
Execute std normalization for each individual image in the series.
Params
------
series : np.array
- series of images
Return
------
stdNorm series
- standard normalized series
"""
stdSeries = []
for img in series:
stdSeries.append((img - img.mean())/img.std())
return np.array(stdSeries)
| ukb-cardiac-mri-master | ukb/transforms/ukbb.py |
"""
Custom preprocessing transformation functions for video/sequential frame
MRI data from the UK Biobank
TO BE USED ON MULTI SERIES INPUTS
"""
import numpy as np
from skimage.exposure import rescale_intensity
class NullTransformMulti():
"""
Create a null transformation (for multiple series inputs).
This is to be used when a given augmentation is not selected in the config
so that it simply returns the input.
"""
def __call__(self, series1, series2, series3):
"""
Do nothing and return the same series passed in.
Params
------
seq1 : np.array
- numpy array of the MAIN series of DICOM images
seq2 : np.array
- numpy array of the SUB series of DICOM images
seq3 : np.array
- numpy array of the SUB series of DICOM images
Return
------
seq1, seq2, seq3
- same as input
"""
return series1, series2, series3
class FrameSelectionVarMulti():
"""
Frame selector class (for multiple series inputs).
Select the N best frames from a series to use for classification.
In this case, the N best frames are the "brightest" sequential frames. The
frames with the most variance between dark and light pixels, centered
around the brightest frame (frame with most variance). Nowing how the
frames are structured (with a lot of noise), we konw that the best frames
are where the noise dies down and the consentration is on the aortic valve.
Therefore, with pixel intensities around the valve going up and intensities
away from the valve go down, we get our largest variance in pixels with
these frames.
"""
def __init__(self, n_frames=6):
"""
Class initialization function.
Params
------
None
"""
self.N = n_frames
def __call__(self, seq1, seq2, seq3):
"""
Select the BEST frames from seq1 and get those frames only for all seq.
Params
------
seq1 : np.array
- numpy array of the MAIN series of DICOM images
seq2 : np.array
- numpy array of the SUB series of DICOM images
seq3 : np.array
- numpy array of the SUB series of DICOM images
Return
------
seq1, seq2, seq3
- series of essential (selected) frames (drop the rest)
"""
if (self.N == seq1.shape[0]):
return seq1, seq2, seq3
# Otherwise find correct frames to output
var = [fr.var() for fr in seq1]
varDict = dict((i, fr.var()) for i, fr in enumerate(seq1))
frameIndx = [np.argmax(var)]
low_, high_ = frameIndx[-1]-1, frameIndx[-1]+1
if (self.N > 1):
for i in range(self.N-1):
if (low_ >= 0 and high_ <= len(seq1)-1):
if (varDict[low_] > varDict[high_]):
frameIndx.append(low_)
low_ = sorted(frameIndx)[0] - 1
high_ = sorted(frameIndx)[-1] + 1
else:
frameIndx.append(high_)
low_ = sorted(frameIndx)[0] - 1
high_ = sorted(frameIndx)[-1] + 1
elif (low_ == -1):
frameIndx.append(high_)
low_ = sorted(frameIndx)[0] - 1
high_ = sorted(frameIndx)[-1] + 1
else:
frameIndx.append(low_)
low_ = sorted(frameIndx)[0] - 1
high_ = sorted(frameIndx)[-1] + 1
return seq1[sorted(frameIndx)], seq2[sorted(frameIndx)], seq3[sorted(frameIndx)]
class RescaleIntensityMulti():
"""Rescale pixel values of multiple DICOM Series so that they span low-high."""
def __init__(self, out_range=(0.0,255.0)):
"""
Class initialization function.
Params
------
None
"""
self.out_range = out_range
def __call__(self, series1, series2, series3):
"""
Execute normalization for all of the given series.
NOTE :
series1 is the MAIN series
Params
------
seris1 : np.array
- MAIN DICOM Series as an np.array
seris2 : np.array
- SUB DICOM Series as an np.array
seris3 : np.array
- SUB DICOM Series as an np.array
Return
------
np.array
- new normalized series
"""
return (np.array([rescale_intensity(1.0*frame, out_range=self.out_range) for frame in series1]),
np.array([rescale_intensity(1.0*frame, out_range=self.out_range) for frame in series2]),
np.array([rescale_intensity(1.0*frame, out_range=self.out_range) for frame in series3]))
class StdNormalizeMulti(object):
"""Standard Deviation Normalization for multiple series mu = 0, std = 1.0."""
def __call__(self, series1, series2, series3):
"""
Execute std normalization for each individual image in all of the series.
Params
------
seris1 : np.array
- MAIN DICOM Series as an np.array
seris2 : np.array
- SUB DICOM Series as an np.array
seris3 : np.array
- SUB DICOM Series as an np.array
Return
------
stdNorm series1 series2 and series3
- standard normalized series
"""
stdSeries1, stdSeries2, stdSeries3 = [], [], []
for i, img in enumerate(series1):
stdSeries1.append((img - img.mean())/img.std())
stdSeries2.append((series2[i] - series2[i].mean())/series2[i].std())
stdSeries3.append((series3[i] - series3[i].mean())/series3[i].std())
return np.array(stdSeries1), np.array(stdSeries2), np.array(stdSeries3)
| ukb-cardiac-mri-master | ukb/transforms/multi_series.py |
ukb-cardiac-mri-master | ukb/transforms/debug.py |
|
"""
Custom augmentation transform functions for video/sequential frame
MRI data from the UK Biobank
"""
import math
import random
import numbers
import collections
import numpy as np
from PIL import Image
from torchvision.transforms import functional as F
from torchvision.transforms import Lambda
##############################################################################
# Helper Functions
##############################################################################
def _numpy_to_PIL(img):
"""
Convert a given numpy array image into a PIL Image.
Params
------
img : np.array
- image to be converted
Return
------
PIL.Image
- Image type of numpy array
"""
return Image.fromarray(img)
def _PIL_to_numpy(img):
"""
Convert a given PIL.Image into a numpy array.
Params
------
img : PIL.Image
- image to be converted
Return
------
np.array
- numpy array of PIL.Image
"""
return np.array(img)
def _get_inverse_affine_matrix(center, angle, translate, scale, shear):
# Helper method to compute inverse matrix for affine transformation
# As it is explained in PIL.Image.rotate
# We need compute INVERSE of affine transformation matrix: M = T * C * RSS * C^-1
# where T is translation matrix: [1, 0, tx | 0, 1, ty | 0, 0, 1]
# C is translation matrix to keep center: [1, 0, cx | 0, 1, cy | 0, 0, 1]
# RSS is rotation with scale and shear matrix
# RSS(a, scale, shear) = [ cos(a)*scale -sin(a + shear)*scale 0]
# [ sin(a)*scale cos(a + shear)*scale 0]
# [ 0 0 1]
# Thus, the inverse is M^-1 = C * RSS^-1 * C^-1 * T^-1
angle = math.radians(angle)
shear = math.radians(shear)
scale = 1.0 / scale
# Inverted rotation matrix with scale and shear
d = math.cos(angle + shear) * math.cos(angle) + math.sin(angle + shear) * math.sin(angle)
matrix = [
math.cos(angle + shear), math.sin(angle + shear), 0,
-math.sin(angle), math.cos(angle), 0
]
matrix = [scale / d * m for m in matrix]
# Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1
matrix[2] += matrix[0] * (-center[0] - translate[0]) + matrix[1] * (-center[1] - translate[1])
matrix[5] += matrix[3] * (-center[0] - translate[0]) + matrix[4] * (-center[1] - translate[1])
# Apply center translation: C * RSS^-1 * C^-1 * T^-1
matrix[2] += center[0]
matrix[5] += center[1]
return matrix
def affine(img, angle, translate, scale, shear, resample=0, fillcolor=None):
"""Apply affine transformation on the image keeping image center invariant
Args:
img (PIL Image): PIL Image to be rotated.
angle ({float, int}): rotation angle in degrees between -180 and 180, clockwise direction.
translate (list or tuple of integers): horizontal and vertical translations (post-rotation translation)
scale (float): overall scale
shear (float): shear angle value in degrees between -180 to 180, clockwise direction.
resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):
An optional resampling filter.
See http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#filters
If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
fillcolor (int): Optional fill color for the area outside the transform in the output image.
"""
if not F._is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
assert isinstance(translate, (tuple, list)) and len(translate) == 2, \
"Argument translate should be a list or tuple of length 2"
assert scale > 0.0, "Argument scale should be positive"
output_size = img.size
center = (img.size[0] * 0.5 + 0.5, img.size[1] * 0.5 + 0.5)
matrix = _get_inverse_affine_matrix(center, angle, translate, scale, shear)
return img.transform(output_size, Image.AFFINE, matrix, resample, fillcolor=fillcolor)
##############################################################################
# Augmentation Functions
##############################################################################
class NullAugmentation(Lambda):
"""
Create a null augmentation transformation.
This is to be used when a given augmentation is not selected in the config
so that it simply returns the input.
"""
def __init__(self):
"""
Instantiate lambda x: x.
Params
------
None
"""
super(NullAugmentation, self).__init__(lambda x: x)
class RandomCrop(object):
"""Crop a given MRI Image Series at a random location."""
def __init__(self, size, padding=0, seed=1234):
"""
Class initialization function.
Params
------
size : (sequence or int)
- Desired output size of the crop. If size is an int instead
of sequence like (h, w), a square crop (size, size) is made
padding : (int or sequence)
- (Optional) padding on each border of the image
Default is 0, i.e no padding. If a sequence of length 4 is
provided, it is used to pad left, top, right, bottom borders
respectively
"""
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
self.padding = padding
np.random.seed(seed)
@staticmethod
def get_params(img, output_size):
"""Get parameters for ``crop`` for a random crop.
Args:
img (PIL Image): Image to be cropped.
output_size (tuple): Expected output size of the crop.
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
"""
w, h = img.size
th, tw = output_size
if w == tw and h == th:
return 0, 0, h, w
i = np.random.randint(0, h - th)
j = np.random.randint(0, w - tw)
return i, j, th, tw
def __call__(self, series):
"""
Execute random cropping of series.
Params
------
series : np.array
MRI Series to be cropped
Return
------
np.array
- Cropped series
"""
# Convert all images in the series to PIL.Image types
PIL_series = [_numpy_to_PIL(img) for img in series]
# Pad all images in the series
if (self.padding > 0):
PIL_series = [F.pad(img, self.padding) for img in PIL_series]
# Find crop params
i, j, h, w = self.get_params(PIL_series[0], self.size)
# Crop the entire series
PIL_series = [F.crop(img, i, j, h, w) for img in PIL_series]
# Convert all images back to numpy array
return np.array([_PIL_to_numpy(img) for img in PIL_series])
class RandomRotation(object):
"""Rotate a MRI Image Series by an angle."""
def __init__(self, degrees, resample=False, center=None, seed=1234):
"""
Class initialization function.
Params
------
degrees : (sequence or float or int):
- Range of degrees to select from. If degrees is a number instead
of sequence like (min, max), the range of degrees will be
(-degrees, +degrees)
resample: (PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC):
An optional resampling filter.
See http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#filters
If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
center : (2-tuple, optional)
- Optional center of rotation. Origin is the upper left corner.
Default is the center of the image.
"""
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError("If degrees is a single number, it must be positive.")
self.degrees = (-degrees, degrees)
else:
if len(degrees) != 2:
raise ValueError("If degrees is a sequence, it must be of len 2.")
self.degrees = degrees
if (resample):
if (resample == 'NEAREST'):
self.resample = Image.NEAREST
elif (resample == 'BILINEAR'):
self.resample = Image.BILINEAR
else:
self.resample = Image.BICUBIC
else:
self.resample = Image.BILINEAR
self.center = center
np.random.seed(seed)
@staticmethod
def get_params(degrees):
"""Get parameters for ``rotate`` for a random rotation.
Returns:
sequence: params to be passed to ``rotate`` for random rotation.
"""
angle = np.random.uniform(degrees[0], degrees[1])
return angle
def __call__(self, series):
"""
Execute random cropping of series.
Params
------
series : np.array
MRI Series to be rotated
Return
------
np.array
- Rotated series
"""
# Randomly select angle to rotate the series
angle = self.get_params(self.degrees)
# Convert numpy series to PIL series
PIL_series = [_numpy_to_PIL(img) for img in series]
# Rotate all images in the series
PIL_series = [F.rotate(img, angle, self.resample, self.center) for img in PIL_series]
# Return numpy array
return np.array([_PIL_to_numpy(img) for img in PIL_series])
def __repr__(self):
format_string = self.__class__.__name__ + '(degrees={0}'.format(self.degrees)
format_string += ', resample={0}'.format(self.resample)
format_string += ', expand={0}'.format(self.expand)
if self.center is not None:
format_string += ', center={0}'.format(self.center)
format_string += ')'
return format_string
class RandomTranslation(object):
"""Random translation (shifting) of a MRI Image Series."""
def __init__(self, translate=None, resample=False, seed=1234):
"""
Class initialization function.
Params
------
translate : (tuple, optional)
- tuple of maximum absolute fraction for horizontal and vertical
translations. For example translate=(a, b), then horizontal
shift is randomly sampled in the range
-img_width * a < dx < img_width * a
and vertical shift is randomly sampled in the range
-img_height * b < dy < img_height * b
Will not translate by default.
resample: (PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC)
- An optional resampling filter.
See http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#filters
If omitted, or if the image has mode "1" or "P", it is set to
PIL.Image.NEAREST.
"""
if translate is not None:
assert isinstance(translate, (tuple, list)) and len(translate) == 2, \
"translate should be a list or tuple and it must be of length 2."
for t in translate:
if not (0.0 <= t <= 1.0):
raise ValueError("translation values should be between 0 and 1")
self.translate = translate
if (resample):
if (resample == 'NEAREST'):
self.resample = Image.NEAREST
elif (resample == 'BILINEAR'):
self.resample = Image.BILINEAR
else:
self.resample = Image.BICUBIC
else:
self.resample = Image.BILINEAR
np.random.seed(seed)
@staticmethod
def get_params(translate, img_size):
"""Get parameters for translation
Returns:
sequence: params to be passed to the translation
"""
if translate is not None:
max_dx = translate[0] * img_size[0]
max_dy = translate[1] * img_size[1]
translations = (np.round(np.random.uniform(-max_dx, max_dx)),
np.round(np.random.uniform(-max_dy, max_dy)))
else:
translations = (0, 0)
angle = 0.0
scale = 1.0
shear = 0.0
return angle, translations, scale, shear
def __call__(self, series):
"""
Execute random translation of series.
Params
------
series : np.array
MRI Series to be translated
Return
------
np.array
- Translated series
"""
# Convert numpy series to PIL series
PIL_series = [_numpy_to_PIL(img) for img in series]
# Get random params for Affine Transform
ret = self.get_params(self.translate, PIL_series[0].size)
# Compute Affine Transform for all images in the series
PIL_series = [affine(img, *ret, resample=self.resample, fillcolor=0) for img in PIL_series]
# Return numpy array
return np.array([_PIL_to_numpy(img) for img in PIL_series])
def __repr__(self):
if self.translate is not None:
s = 'translate={translate}'
if self.resample > 0:
s += ', resample={resample}'
s += ')'
d = dict(self.__dict__)
d['resample'] = _pil_interpolation_to_str[d['resample']]
return s.format(name=self.__class__.__name__, **d)
class RandomShear(object):
"""Random shear (stretching) of a MRI Image Series."""
def __init__(self, shear=None, resample=False, seed=1234):
"""
Class initialization function.
Params
------
shear : (sequence or float or int, optional)
- Range of degrees to select from. If degrees is a number instead
of sequence like (min, max), the range of degrees will be
(-degrees, +degrees). Will not apply shear by default
resample: (PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC)
- An optional resampling filter.
See http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#filters
If omitted, or if the image has mode "1" or "P", it is set to
PIL.Image.NEAREST.
"""
if shear is not None:
if isinstance(shear, numbers.Number):
if shear < 0:
raise ValueError("If shear is a single number, it must be positive.")
self.shear = (-shear, shear)
else:
assert isinstance(shear, (tuple, list)) and len(shear) == 2, \
"shear should be a list or tuple and it must be of length 2."
self.shear = shear
else:
self.shear = shear
if (resample):
if (resample == 'NEAREST'):
self.resample = Image.NEAREST
elif (resample == 'BILINEAR'):
self.resample = Image.BILINEAR
else:
self.resample = Image.BICUBIC
else:
self.resample = Image.BILINEAR
np.random.seed(seed)
@staticmethod
def get_params(shears):
"""Get parameters for shearing
Returns:
sequence: params to be passed to the shearing transformation
"""
angle = 0.0
translations = (0, 0)
scale = 1.0
if shears is not None:
shear = np.random.uniform(shears[0], shears[1])
else:
shear = 0.0
return angle, translations, scale, shear
def __call__(self, series):
"""
Execute random shear transform of series.
Params
------
series : np.array
MRI Series to be transformed
Return
------
np.array
- Shear transformed series
"""
# Get random params for Affine Transform
ret = self.get_params(self.shear)
# Convert numpy series to PIL series
PIL_series = [_numpy_to_PIL(img) for img in series]
# Compute Affine Transform for all images in the series
PIL_series = [affine(img, *ret, resample=self.resample, fillcolor=0) for img in PIL_series]
# Return numpy array
return np.array([_PIL_to_numpy(img) for img in PIL_series])
def __repr__(self):
if self.shear is not None:
s = 'shear={shear}'
if self.resample > 0:
s += ', resample={resample}'
s += ')'
d = dict(self.__dict__)
d['resample'] = _pil_interpolation_to_str[d['resample']]
return s.format(name=self.__class__.__name__, **d)
class RandomAffine(object):
"""Random affine transformation of a MRI Image Series."""
def __init__(self, degrees, translate=None, scale=None, shear=None,
resample=False, fillcolor=0, seed=1234):
"""
Class initialization function.
Args:
degrees : (sequence or float or int)
- Range of degrees to select from. If degrees is a number instead
of sequence like (min, max), the range of degrees will be
(-degrees, +degrees). Set to 0 to desactivate rotations.
translate : (tuple, optional)
- tuple of maximum absolute fraction for horizontal and vertical
translations. For example translate=(a, b), then horizontal
shift is randomly sampled in the range
-img_width * a < dx < img_width * a
and vertical shift is randomly sampled in the range
-img_height * b < dy < img_height * b
Will not translate by default.
scale : (tuple, optional)
- scaling factor interval, e.g (a, b), then scale is randomly
sampled from the range
a <= scale <= b
Will keep original scale by default.
shear : (sequence or float or int, optional)
- Range of degrees to select from. If degrees is a number instead
of sequence like (min, max), the range of degrees will be
(-degrees, +degrees). Will not apply shear by default
resample: (PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC)
- An optional resampling filter.
See http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#filters
If omitted, or if the image has mode "1" or "P", it is set to
PIL.Image.NEAREST.
fillcolor : (int)
- Optional fill color for the area outside the transform in the
output image.
"""
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError("If degrees is a single number, it must be positive.")
self.degrees = (-degrees, degrees)
else:
assert isinstance(degrees, (tuple, list)) and len(degrees) == 2, \
"degrees should be a list or tuple and it must be of length 2."
self.degrees = degrees
if translate is not None:
assert isinstance(translate, (tuple, list)) and len(translate) == 2, \
"translate should be a list or tuple and it must be of length 2."
for t in translate:
if not (0.0 <= t <= 1.0):
raise ValueError("translation values should be between 0 and 1")
self.translate = translate
if scale is not None:
assert isinstance(scale, (tuple, list)) and len(scale) == 2, \
"scale should be a list or tuple and it must be of length 2."
for s in scale:
if s <= 0:
raise ValueError("scale values should be positive")
self.scale = scale
if shear is not None:
if isinstance(shear, numbers.Number):
if shear < 0:
raise ValueError("If shear is a single number, it must be positive.")
self.shear = (-shear, shear)
else:
assert isinstance(shear, (tuple, list)) and len(shear) == 2, \
"shear should be a list or tuple and it must be of length 2."
self.shear = shear
else:
self.shear = shear
if (resample):
if (resample == 'NEAREST'):
self.resample = Image.NEAREST
elif (resample == 'BILINEAR'):
self.resample = Image.BILINEAR
else:
self.resample = Image.BICUBIC
else:
self.resample = Image.BILINEAR
self.fillcolor = fillcolor
np.random.seed(seed)
@staticmethod
def get_params(degrees, translate, scale_ranges, shears, img_size):
"""Get parameters for affine transformation
Returns:
sequence: params to be passed to the affine transformation
"""
angle = np.random.uniform(degrees[0], degrees[1])
if translate is not None:
max_dx = translate[0] * img_size[0]
max_dy = translate[1] * img_size[1]
translations = (np.round(np.random.uniform(-max_dx, max_dx)),
np.round(np.random.uniform(-max_dy, max_dy)))
else:
translations = (0, 0)
if scale_ranges is not None:
scale = np.random.uniform(scale_ranges[0], scale_ranges[1])
else:
scale = 1.0
if shears is not None:
shear = np.random.uniform(shears[0], shears[1])
else:
shear = 0.0
return angle, translations, scale, shear
def __call__(self, series):
"""
Execute random affine transform of series.
Params
------
series : np.array
MRI Series to be transformed
Return
------
np.array
- Affine transformed series
"""
# Convert numpy series to PIL series
PIL_series = [_numpy_to_PIL(img) for img in series]
# Get random params for Affine Transform
ret = self.get_params(self.degrees, self.translate, self.scale,
self.shear, PIL_series[0].size)
# Compute Affine Transform for all images in the series
PIL_series = [affine(img, *ret, resample=self.resample, fillcolor=self.fillcolor) for img in PIL_series]
# Return numpy array
return np.array([_PIL_to_numpy(img) for img in PIL_series])
def __repr__(self):
s = '{name}(degrees={degrees}'
if self.translate is not None:
s += ', translate={translate}'
if self.scale is not None:
s += ', scale={scale}'
if self.shear is not None:
s += ', shear={shear}'
if self.resample > 0:
s += ', resample={resample}'
if self.fillcolor != 0:
s += ', fillcolor={fillcolor}'
s += ')'
d = dict(self.__dict__)
d['resample'] = _pil_interpolation_to_str[d['resample']]
return s.format(name=self.__class__.__name__, **d)
class Pad(object):
"""Pad the given Series on all sides with the given "pad" value.
Args:
padding (int or tuple): Padding on each border. If a single int is provided this
is used to pad all borders. If tuple of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a tuple of length 4 is provided
this is the padding for the left, top, right and bottom borders
respectively.
fill: Pixel fill value for constant fill. Default is 0. If a tuple of
length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant
padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.
constant: pads with a constant value, this value is specified with fill
edge: pads with the last value at the edge of the image
reflect: pads with reflection of image (without repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
will result in [3, 2, 1, 2, 3, 4, 3, 2]
symmetric: pads with reflection of image (repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
will result in [2, 1, 1, 2, 3, 4, 4, 3]
"""
def __init__(self, padding, fill=0, padding_mode='constant'):
assert isinstance(padding, (numbers.Number, tuple))
assert isinstance(fill, (numbers.Number, str, tuple))
assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric']
if isinstance(padding, collections.Sequence) and len(padding) not in [2, 4]:
raise ValueError("Padding must be an int or a 2, or 4 element tuple, not a " +
"{} element tuple".format(len(padding)))
self.padding = padding
self.fill = fill
self.padding_mode = padding_mode
def __call__(self, series):
"""
Execute padding for series.
Params
------
series : np.array
MRI Series to be padded
Return
------
np.array
- Padded series
"""
# Convert numpy series to PIL series
PIL_series = [_numpy_to_PIL(img) for img in series]
# Return numpy array
return np.array([_PIL_to_numpy(F.pad(img, self.padding, self.fill)) for img in PIL_series])
def __repr__(self):
return self.__class__.__name__ + '(padding={0}, fill={1}, padding_mode={2})'.\
format(self.padding, self.fill, self.padding_mode)
class RandomHorizontalFlip(object):
"""Horizontally flip the given Series randomly with a given probability.
Args:
p (float): probability of the image being flipped. Default value is 0.5
"""
def __init__(self, p=0.5, seed=1234):
self.p = p
np.random.seed(seed)
def __call__(self, series):
"""
Execute random horizontal flip of series.
Params
------
series : np.array
MRI Series to be padded
Return
------
np.array
- Horizontally flipped series (or not)
"""
if np.random.random() < self.p:
# Convert numpy series to PIL series
PIL_series = [_numpy_to_PIL(img) for img in series]
# Compute Flip
PIL_series = [F.hflip(img) for img in PIL_series]
# Convert PIL series to numpy series
series = np.array([_PIL_to_numpy(img) for img in PIL_series])
return series
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
class RandomVerticalFlip(object):
"""Vertically flip the given Series randomly with a given probability.
Args:
p (float): probability of the image being flipped. Default value is 0.5
"""
def __init__(self, p=0.5, seed=1234):
self.p = p
np.random.seed(seed)
def __call__(self, series):
"""
Execute random vertical flip of series.
Params
------
series : np.array
MRI Series to be padded
Return
------
np.array
- Vertically flipped series (or not)
"""
if np.random.random() < self.p:
# Convert numpy series to PIL series
PIL_series = [_numpy_to_PIL(img) for img in series]
# Compute Flip
PIL_series = [F.vflip(img) for img in PIL_series]
# Convert PIL series to numpy series
series = np.array([_PIL_to_numpy(img) for img in PIL_series])
return series
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
| ukb-cardiac-mri-master | ukb/transforms/augmentations.py |
ukb-cardiac-mri-master | ukb/weak_supervision/coral/coral/__init__.py |
|
import coral_sa as csa
def is_square(width, height):
if width == height:
return True
else:
return False
def is_large(width, height):
if width * height > 100:
return True
else:
return False
def is_small(width, height):
if width * height < 10:
return True
# Just for testing.
elif width * height <= 30:
return True
elif width * height + height * height >= 30:
return True
else:
return False
tests = [is_square, is_large, is_small]
coral_ast = csa.convert_to_coral_ast(is_small)
vocabs_used = csa.extract_vocabularies(coral_ast)
print "AST:", coral_ast
print "Vocabs used:", vocabs_used
| ukb-cardiac-mri-master | ukb/weak_supervision/coral/coral/old_sa/tests.py |
import coral_types as ct
# A basic node in an AST.
# This is meant to be subclassed.
class Expression(object):
def __repr__(self):
return str(self)
def children(self):
return NotImplementedError
def walk(self, f):
"""
Walks the AST, applying the function f to each node in pre-order.
"""
for child in self.children():
child.walk(f)
f(self)
def __str__(self):
return "???"
class LabelingFunction(Expression):
def __init__(self, body, vocabs):
# A list of subexpressions.
self.body = body
# A list of vocabularies, identified via the vocab name.
# Each latent vocabulary is derived from these vocabularies.
self.vocabs = vocabs
self.ty = ct.LabelingFunctionType
def children(self):
return self.body
def __str__(self):
if isinstance(self.body, list):
return str(self.body)
else:
return str(self.body)
## Literals
class VocabLiteral(Expression):
def __init__(self, name):
self.name = name
# The type is just the name.
self.ty = ct.VocabType([self.name])
def children(self):
return []
def __str__(self):
return self.name
class TrueLabelLiteral(Expression):
def __init__(self):
self.ty = ct.LabelType()
def children(self):
return []
def __str__(self):
return "true"
class FalseLabelLiteral(Expression):
def __init__(self):
self.ty = ct.LabelType()
def children(self):
return []
def __str__(self):
return "false"
class AbstainLabelLiteral(Expression):
def __init__(self):
self.ty = ct.LabelType()
def children(self):
return []
def __str__(self):
return "abstain"
class PythonLiteral(Expression):
"""
A catch-all for sub-expressions we don't care about.
"""
def __init__(self, value):
self.value = value
self.ty = ct.PythonBasicExpressionType()
def children(self):
return []
def __str__(self):
return str(self.value)
## Etc Expressions important to Coral.
class IfThen(Expression):
def __init__(self, cond, true_branch):
self.cond = cond
self.true_branch = true_branch
self.ty = ct.PythonBasicExpressionType()
def children(self):
return [self.cond, self.true_branch]
def __str__(self):
return "if ({}) then ({})".format(self.cond, self.true_branch)
class Return(Expression):
def __init__(self, value):
self.value = value
# Has no sensible return type.
self.ty = ct.PythonBasicExpressionType()
def children(self):
return [self.value]
def __str__(self):
return "return {}".format(self.value)
## Basic Binary Operations
class BinOp(Expression):
def __init__(self, left, right):
self.left = left
self.right = right
left_isvocab = isinstance(self.left.ty, ct.VocabType)
right_isvocab = isinstance(self.right.ty, ct.VocabType)
if left_isvocab and right_isvocab:
# This is a new latent vocabulary composed of existing vocabularies.
self.ty = ct.VocabType(self.left.ty.deps + self.right.ty.deps)
elif left_isvocab:
self.ty = ct.VocabType(self.left.ty.deps)
elif rght_isvocab:
self.ty = ct.VocabType(self.right.ty.deps)
else:
self.ty = ct.PythonBasicExpressionType()
def children(self):
return [self.left, self.right]
def printer(self, operator, prefix=False):
if prefix:
return "{}({}, {})".format(operator, self.left, self.right)
else:
return "({} {} {})".format(self.left, operator, self.right)
class Add(BinOp):
def __str__(self):
return self.printer("+")
class Subtract(BinOp):
def __str__(self):
return self.printer("-")
class Multiply(BinOp):
def __str__(self):
return self.printer("*")
class Divide(BinOp):
def __str__(self):
return self.printer("/")
class Equal(BinOp):
def __str__(self):
return self.printer("==")
class GreaterThan(BinOp):
def __str__(self):
return self.printer(">")
class GreaterThanOrEqual(BinOp):
def __str__(self):
return self.printer(">=")
class LessThan(BinOp):
def __str__(self):
return self.printer("<")
class LessThanOrEqual(BinOp):
def __str__(self):
return self.printer("<=")
## Meta doesn't handle dictionaries, but they're probably important for labeling functions...
# TODO
| ukb-cardiac-mri-master | ukb/weak_supervision/coral/coral/old_sa/coral_sa/coral_ast.py |
import ast
import meta
import coral_ast as cast
import coral_types as ct
# Public API
# Turn this on for printing.
verbose = False
def vprint(a):
global verbose
if verbose:
print a
def convert_to_coral_ast(func):
"""
Converts a Python function to a Coral AST.
"""
# A Python AST.
tree = meta.decompiler.decompile_func(func)
vprint(ast.dump(tree))
coral_tree = _generate_coral_ast(tree)
vprint(coral_tree)
return coral_tree
# Ghetto a.f.
vocabs = []
def extract_vocabularies(coral_tree):
global vocabs
assert isinstance(coral_tree, cast.LabelingFunction)
vocabs = [str(v) for v in coral_tree.vocabs]
def _extract_vocabs(node):
global vocabs
if isinstance(node.ty, ct.VocabType):
vocabs.append(str(node.ty))
coral_tree.walk(_extract_vocabs)
# Uniquify
return list(set(list(vocabs)))
### Private Stuff.
def _generate_coral_ast(node, names={}):
"""
Generates a Coral AST given a Python AST.
"""
if isinstance(node, ast.FunctionDef):
args = [name.id for name in node.args.args]
for arg in args:
names[arg] = cast.VocabLiteral(arg)
body = [_generate_coral_ast(b, names) for b in node.body]
expr = cast.LabelingFunction(body, args)
return expr
if isinstance(node, ast.Return):
return cast.Return(_generate_coral_ast(node.value, names))
if isinstance(node, ast.If):
cond = _generate_coral_ast(node.test, names)
true_branch = _generate_coral_ast(node.body[0], names)
expr = cast.IfThen(cond, true_branch)
vprint(expr)
return expr
if isinstance(node, ast.Compare):
left = _generate_coral_ast(node.left, names)
right = _generate_coral_ast(node.comparators[0], names)
op = node.ops[0]
if isinstance(op, ast.Eq):
expr = cast.Equal(left, right)
vprint(expr)
return expr
elif isinstance(op, ast.Gt):
expr = cast.GreaterThan(left, right)
vprint(expr)
return expr
elif isinstance(op, ast.Lt):
expr = cast.LessThan(left, right)
vprint(expr)
return expr
elif isinstance(op, ast.LtE):
expr = cast.LessThanOrEqual(left, right)
vprint(expr)
return expr
elif isinstance(op, ast.GtE):
expr = cast.GreaterThanOrEqual(left, right)
vprint(expr)
return expr
if isinstance(node, ast.BinOp):
if isinstance(node.op, ast.Add):
expr = cast.Add(_generate_coral_ast(node.left, names), _generate_coral_ast(node.right,
names))
elif isinstance(node.op, ast.Mult):
expr = cast.Multiply(_generate_coral_ast(node.left, names),
_generate_coral_ast(node.right, names))
if isinstance(node.op, ast.Sub):
expr = cast.Subtract(_generate_coral_ast(node.left, names),
_generate_coral_ast(node.right, names))
vprint(expr)
return expr
if isinstance(node, ast.Name):
if node.id == "True":
expr = cast.TrueLabelLiteral()
elif node.id == "False":
expr = cast.FalseLabelLiteral()
elif node.id == "None":
expr = cast.AbstainLabelLiteral()
else:
expr = names[node.id]
vprint(expr)
return expr
if isinstance(node, ast.Num):
return cast.PythonLiteral(node.n)
| ukb-cardiac-mri-master | ukb/weak_supervision/coral/coral/old_sa/coral_sa/coral_funcs.py |
# These are directly accessible.
from coral_funcs import *
| ukb-cardiac-mri-master | ukb/weak_supervision/coral/coral/old_sa/coral_sa/__init__.py |
class Type(object):
pass
class LabelingFunctionType(Type):
def __str__(self):
return "LF"
class VocabType(Type):
def __init__(self, deps):
# This vocabulary is dependent on the given list of vocabulary types.
self.deps = list(set(deps))
def __str__(self):
return "|".join(self.deps)
class LabelType(Type):
def __str__(self):
return "label"
# Hmm...need to think about this a little.
class Threshold(Type):
pass
class PythonBasicExpressionType(Type):
# Encapsulates all the Python types we don't care about, e.g.,
# booleans, integers, lists, classes, and so forth....
pass
| ukb-cardiac-mri-master | ukb/weak_supervision/coral/coral/old_sa/coral_sa/coral_types.py |
"""
Subpackage for learning the structures of models.
"""
from .gen_learning import CoralModel
from .structure_learning import CoralDependencySelector | ukb-cardiac-mri-master | ukb/weak_supervision/coral/coral/learning/__init__.py |
from numbskull import NumbSkull
from numbskull.inference import *
from numbskull.numbskulltypes import Weight, Variable, Factor, FactorToVar
from numbskull.udf import *
import numpy as np
import random
class CoralModel(object):
def __init__(self, class_prior=False, lf_prior=False, lf_propensity=False, lf_class_propensity=False, seed=271828):
self.class_prior = class_prior
self.lf_prior = lf_prior
self.lf_propensity = lf_propensity
self.lf_class_propensity = lf_class_propensity
self.weights = None
self.rng = random.Random()
self.rng.seed(seed)
def train(self, V, cardinality, L, L_offset, y=None, deps=(), init_acc = 1.0, init_deps=0.0, init_class_prior=-1.0, epochs=100, step_size=None, decay=0.99, reg_param=0.1, reg_type=2, verbose=False,
truncation=10, burn_in=50, timer=None):
n_data = V.shape[0]
step_size = step_size or 1.0 / n_data
reg_param_scaled = reg_param / n_data
# self._process_dependency_graph(L, deps)
weight, variable, factor, ftv, domain_mask, n_edges = self._compile(V, cardinality, L, L_offset, y, deps, init_acc, init_deps) # , init_deps, init_class_prior)
fg = NumbSkull(n_inference_epoch=0, n_learning_epoch=epochs, stepsize=step_size, decay=decay,
reg_param=reg_param_scaled, regularization=reg_type, truncation=truncation,
quiet=(not verbose), verbose=verbose, learn_non_evidence=True, burn_in=burn_in)
fg.loadFactorGraph(weight, variable, factor, ftv, domain_mask, n_edges)
if timer is not None:
timer.start()
fg.learning(out=False)
if timer is not None:
timer.end()
self.weights = fg.factorGraphs[0].weight_value[0][:len(L)]
self.dep_weights = fg.factorGraphs[0].weight_value[0][len(L):]
self.lf_accuracy = 1. / (1. + np.exp(-self.weights[:len(L)]))
# self._process_learned_weights(L, fg)
def marginals(self, V, cardinality, L, L_offset, deps=(), init_acc = 1.0, init_deps=1.0, init_class_prior=-1.0, epochs=100, step_size=None, decay=0.99, verbose=False,
burn_in=50, timer=None):
if self.weights is None:
raise ValueError("Must fit model with train() before computing marginal probabilities.")
y = None
weight, variable, factor, ftv, domain_mask, n_edges = self._compile(V, cardinality, L, L_offset, y, deps, self.weights, self.dep_weights)
fg = NumbSkull(n_inference_epoch=epochs, n_learning_epoch=0, stepsize=step_size, decay=decay,
quiet=(not verbose), verbose=verbose, learn_non_evidence=True, burn_in=burn_in,
sample_evidence=False)
fg.loadFactorGraph(weight, variable, factor, ftv, domain_mask, n_edges)
fg.inference(out=False)
marginals = fg.factorGraphs[0].marginals[:V.shape[0]]
return marginals
def _compile(self, V, cardinality, L, L_offset, y, deps, init_acc, init_deps):
"""
Compiles a generative model based on L and the current labeling function dependencies.
"""
### Error Checking ###
# Check L_offset is valid
index = np.flatnonzero(UdfStart == L_offset)
if len(index) == 0:
raise ValueError("L_offset " + str(L_offset) + " does not correspond to a known application")
if len(index) > 1:
raise ValueError("L_offset " + str(L_offset) + " found multiple times")
index = index[0]
# Check L is the right size
if len(L) != LfCount[index]:
raise ValueError("Wrong number of LFs passed: (" + str(len(L)) + " given and " + str(LfCount[index]) + " in udf.py)")
# Check cardinality of each LF is right
for i in range(len(L)):
if len(L[i]) != UdfCardinality[UdfCardinalityStart[index] + i]:
raise ValueError("LF " + str(i) + " has the wrong cardinality: (" + str(len(L[i])) + " given and " + str(UdfCardinality[UdfCardinalityStart[index] + i]) + " in udf.py)")
# Check that there are enough vocab terms
for i in range(len(L)):
for j in range(len(L[i])):
if L[i][j] >= V.shape[1]:
raise ValueError("LF " + str(i) + " uses vocab " + str(L[i][j]) + " when there are only " + str(V.shape[1]) + " terms")
### Set up factor graph ###
n_data = V.shape[0]
n_vocab = V.shape[1]
n_lf = len(L)
n_weights = n_lf + len(deps)
n_vars = n_data * (n_vocab + 1)
n_factors = n_data * n_weights
n_edges = n_data * (sum([len(l) + 1 for l in L]) + 2 * len(deps))
weight = np.zeros(n_weights, Weight)
variable = np.zeros(n_vars, Variable)
factor = np.zeros(n_factors, Factor)
ftv = np.zeros(n_edges, FactorToVar)
domain_mask = np.zeros(n_vars, np.bool)
#
# Compiles weight matrix
#
for i in range(n_weights):
weight[i]['isFixed'] = False
if i < n_lf:
if type(init_acc) == int or type(init_acc) == float:
weight[i]['initialValue'] = np.float64(init_acc)
else:
weight[i]['initialValue'] = init_acc[i]
else:
if type(init_deps) == int or type(init_deps) == float:
weight[i]['initialValue'] = np.float64(init_deps)
else:
weight[i]['initialValue'] = init_deps[i - n_lf]
#
# Compiles variable matrix
#
# True Label y
for i in range(n_data):
variable[i]['isEvidence'] = False if (y is None) else True
variable[i]['initialValue'] = self.rng.randrange(0, 2) if (y is None) else (1 if y[i] == 1 else 0)
variable[i]["dataType"] = 0
variable[i]["cardinality"] = 2
# Vocabulary
for i in range(n_data):
for j in range(n_vocab):
variable[n_data + i * n_vocab + j]["isEvidence"] = True
variable[n_data + i * n_vocab + j]["initialValue"] = V[i, j]
variable[n_data + i * n_vocab + j]["dataType"] = 0
variable[n_data + i * n_vocab + j]["cardinality"] = cardinality[j]
if V[i, j] >= cardinality[j]:
raise ValueError("Vocab " + str(j) + " contains " + str(V[i, j]) + " even though it has a cardinality of " + str(cardinality[j]))
#
# Compiles factor and ftv matrices
#
index = 0
# Accuracy
for i in range(n_data):
for j in range(n_lf):
factor[i * n_lf + j]["factorFunction"] = L_offset + j
factor[i * n_lf + j]["weightId"] = j
factor[i * n_lf + j]["featureValue"] = 1.0
factor[i * n_lf + j]["arity"] = len(L[j]) + 1
factor[i * n_lf + j]["ftv_offset"] = index
for k in range(len(L[j])):
ftv[index]["vid"] = n_data + i * n_vocab + L[j][k]
ftv[index]["dense_equal_to"] = 0 # not actually used
index += 1
ftv[index]["vid"] = i
ftv[index]["dense_equal_to"] = 0 # not actually used
index += 1
# Dependencies
for i in range(n_data):
for j in range(len(deps)):
factor[n_lf * n_data + i * len(deps) + j]["factorFunction"] = FUNC_CORAL_GEN_DEP_SIMILAR
factor[n_lf * n_data + i * len(deps) + j]["weightId"] = n_lf + j
factor[n_lf * n_data + i * len(deps) + j]["featureValue"] = 1.0
factor[n_lf * n_data + i * len(deps) + j]["arity"] = 2
factor[n_lf * n_data + i * len(deps) + j]["ftv_offset"] = index
ftv[index + 0]["vid"] = n_data + i * n_vocab + deps[j][0]
ftv[index + 0]["dense_equal_to"] = 0 # not actually used
ftv[index + 1]["vid"] = n_data + i * n_vocab + deps[j][1]
ftv[index + 1]["dense_equal_to"] = 0 # not actually used
index += 2
return weight, variable, factor, ftv, domain_mask, n_edges | ukb-cardiac-mri-master | ukb/weak_supervision/coral/coral/learning/gen_learning.py |
from numba import jit
import numpy as np
import random
from numbskull.udf import *
from numbskull.numbskulltypes import *
class CoralDependencySelector(object):
"""
Fast method for identifying dependencies among labeling functions.
:param seed: seed for initializing state of Numbskull variables
"""
def __init__(self, seed=271828):
self.rng = random.Random()
self.rng.seed(seed)
def select(self, V, cardinality, L, UDF_SET, threshold=0.05, truncation=10):
"""
Identifies a dependency structure among labeling functions for a given data set.
By default searches for correlations, i.e., the DEP_SIMILAR dependency type.
:param L: labeling function output matrix
:param higher_order: bool indicating whether to additionally search for higher order
fixing and reinforcing dependencies (DEP_FIXING and DEP_REINFORCING)
:param propensity: bool indicating whether to include LF propensity dependencies during learning
:param threshold: minimum magnitude weight a dependency must have to be returned (in log scale), also
regularization strength
:param truncation: number of iterations between truncation step for regularization
:return: collection of tuples of the format (LF 1 index, LF 2 index, dependency type),
see snorkel.learning.constants
"""
n_data = V.shape[0]
n_vocab = V.shape[1]
n_lf = len(L)
# Initializes data structures
deps = set()
n_weights = n_lf + n_vocab
weights = np.zeros((n_weights,))
joint = np.zeros((6,))
# joint[0] = P(Y = -1, V_j = 0)
# joint[1] = P(Y = -1, V_j = 1)
# joint[2] = P(Y = -1, V_j = 2)
# joint[3] = P(Y = 1, V_j = 0)
# joint[4] = P(Y = 1, V_j = 1)
# joint[5] = P(Y = 1, V_j = 2)
Lstart = np.cumsum(np.array([0] + [len(l) for l in L]))
L = [item for sublist in L for item in sublist]
for j in range(n_vocab):
## Initializes weights
# Accuracy
for k in range(n_lf):
weights[k] = 1.1 - .2 * self.rng.random()
# Similarity
for k in range(n_lf, len(weights)):
weights[k] = 0.0
_fit_deps(n_data, n_lf, n_vocab, j, V, cardinality, L, Lstart, UDF_USAGE[UDF_SET], weights, joint, threshold, truncation)
for k in range(n_vocab):
if abs(weights[n_lf + k]) > threshold:
deps.add((j, k) if j < k else (k, j))
return deps
@jit(nopython=True, cache=True, nogil=True)
def eval_udf(i, udf_index, V, L, var_samp, value):
var_copy = 0
var_value = V[i:(i + 1), :]
fmap = np.empty(len(L), FactorToVar)
for i in range(len(L)):
fmap[i]["vid"] = L[i]
fmap[i]["dense_equal_to"] = 0 # not used
ftv_start = 0
return udf(udf_index, var_samp, value, var_copy, var_value, fmap, ftv_start)
@jit(nopython=True, cache=True, nogil=True)
def _fit_deps(n_data, n_lf, n_vocab, j, V, cardinality, L, Lstart, udf, weights, joint, regularization, truncation):
step_size = 1.0 / n_data
epochs = 100
l1delta = regularization * step_size * truncation
for t in range(epochs):
for i in range(n_data):
# Processes a training example
# First, computes joint and conditional distributions
joint[:] = 0, 0, 0, 0, 0, 0
for k in range(n_lf):
# Accuracy
for value in range(cardinality[j]):
u = eval_udf(i, udf[k], V, L[Lstart[k]:Lstart[k + 1]], j, value)
joint[0 + value] += weights[k] * -1 * u
joint[3 + value] += weights[k] * +1 * u
for k in range(n_vocab):
# Similarity
if j != k:
if cardinality[j] == cardinality[k]:
joint[0 + V[i, k]] += weights[n_lf + k]
joint[3 + V[i, k]] += weights[n_lf + k]
elif cardinality[j] == 2 and cardinality[k] == 3:
if V[i, k] == 0:
joint[0] += weights[n_lf + k]
joint[3] += weights[n_lf + k]
elif V[i, k] == 1:
pass
elif V[i, k] == 2:
joint[1] += weights[n_lf + k]
joint[4] += weights[n_lf + k]
elif cardinality[j] == 3 and cardinality[k] == 2:
if V[i, k] == 0:
joint[0] += weights[n_lf + k]
joint[3] += weights[n_lf + k]
elif V[i, k] == 1:
joint[2] += weights[n_lf + k]
joint[5] += weights[n_lf + k]
else:
raise ValueError("cardinality not valid")
if cardinality[j] == 2:
joint[2] = -np.inf
joint[5] = -np.inf
joint = np.exp(joint)
joint /= np.sum(joint)
marginal_pos = np.sum(joint[3:6])
marginal_neg = np.sum(joint[0:3])
conditional_pos = joint[3 + V[i, j]] / (joint[0 + V[i, j]] + joint[3 + V[i, j]])
conditional_neg = joint[0 + V[i, j]] / (joint[0 + V[i, j]] + joint[3 + V[i, j]])
# Second, takes likelihood gradient step
for k in range(n_lf):
for value in range(cardinality[j]):
# decrease marginal
u = eval_udf(i, udf[k], V, L[Lstart[k]:Lstart[k + 1]], j, value)
weights[k] -= step_size * joint[0 + value] * -1 * u
weights[k] -= step_size * joint[3 + value] * +1 * u
# increase conditional
value = V[i, j]
u = eval_udf(i, udf[k], V, L[Lstart[k]:Lstart[k + 1]], j, value)
weights[k] += step_size * -1 * u * conditional_neg
weights[k] += step_size * +1 * u * conditional_pos
for k in range(n_vocab):
# Similarity
if j != k:
if cardinality[j] == cardinality[k]:
weights[n_lf + k] -= step_size * (joint[0 + V[i, k]] + joint[3 + V[i, k]])
if V[i, j] == V[i, k]:
weights[n_lf + k] += step_size
elif cardinality[j] == 2 and cardinality[k] == 3:
if V[i, k] == 0:
weights[n_lf + k] -= step_size * (joint[0] + joint[3])
if V[i, k] == 2:
weights[n_lf + k] -= step_size * (joint[1] + joint[4])
if (V[i, j] == 0 and V[i, k] == 0) or (V[i, j] == 1 and V[i, k] == 2):
weights[n_lf + k] += step_size
elif cardinality[j] == 3 and cardinality[k] == 2:
if V[i, k] == 0:
weights[n_lf + k] -= step_size * (joint[0] + joint[3])
if V[i, k] == 1:
weights[n_lf + k] -= step_size * (joint[2] + joint[5])
if (V[i, j] == 0 and V[i, k] == 0) or (V[i, j] == 2 and V[i, k] == 1):
weights[n_lf + k] += step_size
else:
raise ValueError("cardinality not valid")
# Third, takes regularization gradient step
if (t * n_data + i) % truncation == 0:
# do not regularize accuracy
# only regularize dependencies
for k in range(n_lf, len(weights)):
weights[k] = max(0, weights[k] - l1delta) if weights[k] > 0 else min(0, weights[k] + l1delta) | ukb-cardiac-mri-master | ukb/weak_supervision/coral/coral/learning/structure_learning.py |
import numpy as np
from scipy import sparse
def log_odds(p):
"""This is the logit function"""
return np.log(p / (1.0 - p))
def odds_to_prob(l):
"""
This is the inverse logit function logit^{-1}:
l = \log\frac{p}{1-p}
\exp(l) = \frac{p}{1-p}
p = \frac{\exp(l)}{1 + \exp(l)}
"""
return np.exp(l) / (1.0 + np.exp(l))
def sample_data(X, w, n_samples):
"""
Here we do Gibbs sampling over the decision variables (representing our objects), o_j
corresponding to the columns of X
The model is just logistic regression, e.g.
P(o_j=1 | X_{*,j}; w) = logit^{-1}(w \dot X_{*,j})
This can be calculated exactly, so this is essentially a noisy version of the exact calc...
"""
N, R = X.shape
t = np.zeros(N)
f = np.zeros(N)
# Take samples of random variables
idxs = np.round(np.random.rand(n_samples) * (N-1)).astype(int)
ct = np.bincount(idxs)
# Estimate probability of correct assignment
increment = np.random.rand(n_samples) < odds_to_prob(X[idxs, :].dot(w))
increment_f = -1. * (increment - 1)
t[idxs] = increment * ct[idxs]
f[idxs] = increment_f * ct[idxs]
return t, f
def exact_data(X, w, evidence=None):
"""
We calculate the exact conditional probability of the decision variables in
logistic regression; see sample_data
"""
t = odds_to_prob(X.dot(w))
if evidence is not None:
t[evidence > 0.0] = 1.0
t[evidence < 0.0] = 0.0
return t, 1-t
def transform_sample_stats(Xt, t, f, Xt_abs=None):
"""
Here we calculate the expected accuracy of each LF/feature
(corresponding to the rows of X) wrt to the distribution of samples S:
E_S[ accuracy_i ] = E_(t,f)[ \frac{TP + TN}{TP + FP + TN + FN} ]
= \frac{X_{i|x_{ij}>0}*t - X_{i|x_{ij}<0}*f}{t+f}
= \frac12\left(\frac{X*(t-f)}{t+f} + 1\right)
"""
if Xt_abs is None:
Xt_abs = sparse_abs(Xt) if sparse.issparse(Xt) else abs(Xt)
n_pred = Xt_abs.dot(t+f)
m = (1. / (n_pred + 1e-8)) * (Xt.dot(t) - Xt.dot(f))
p_correct = (m + 1) / 2
return p_correct, n_pred
class IndepModel(object):
"""IndepModel Object that learns the accuracies for the heuristics.
Copied from Snorkel v0.4 NaiveBayes Model with minor changes for simplicity"""
def __init__(self, bias_term=False):
self.w = None
self.bias_term = bias_term
def train(self, X, n_iter=1000, w0=None, rate=0.01, alpha=0.5, mu=1e-6, \
sample=False, n_samples=100, evidence=None, warm_starts=False, tol=1e-6, verbose=True):
"""
Perform SGD wrt the weights w
* n_iter: Number of steps of SGD
* w0: Initial value for weights w
* rate: I.e. the SGD step size
* alpha: Elastic net penalty mixing parameter (0=ridge, 1=lasso)
* mu: Elastic net penalty
* sample: Whether to sample or not
* n_samples: Number of samples per SGD step
* evidence: Ground truth to condition on
* warm_starts:
* tol: For testing for SGD convergence, i.e. stopping threshold
"""
self.X_train = X
# Set up stuff
N, M = X.shape
if verbose:
print "="*80
print "Training marginals (!= 0.5):\t%s" % N
print "Features:\t\t\t%s" % M
print "="*80
Xt = X.transpose()
Xt_abs = np.abs(Xt)
w0 = w0 if w0 is not None else np.ones(M)
# Initialize training
w = w0.copy()
g = np.zeros(M)
l = np.zeros(M)
g_size = 0
# Gradient descent
if verbose:
print "Begin training for rate={}, mu={}".format(rate, mu)
for step in range(n_iter):
# Get the expected LF accuracy
t,f = sample_data(X, w, n_samples=n_samples) if sample else exact_data(X, w, evidence)
p_correct, n_pred = transform_sample_stats(Xt, t, f, Xt_abs)
# Get the "empirical log odds"; NB: this assumes one is correct, clamp is for sampling...
l = np.clip(log_odds(p_correct), -10, 10)
# SGD step with normalization by the number of samples
g0 = (n_pred*(w - l)) / np.sum(n_pred)
# Momentum term for faster training
g = 0.95*g0 + 0.05*g
# Check for convergence
wn = np.linalg.norm(w, ord=2)
g_size = np.linalg.norm(g, ord=2)
if step % 250 == 0 and verbose:
print "\tLearning epoch = {}\tGradient mag. = {:.6f}".format(step, g_size)
if (wn < 1e-12 or g_size / wn < tol) and step >= 10:
if verbose:
print "SGD converged for mu={} after {} steps".format(mu, step)
break
# Update weights
w -= rate * g
# Apply elastic net penalty
w_bias = w[-1]
soft = np.abs(w) - mu
ridge_pen = (1 + (1-alpha) * mu)
# \ell_1 penalty by soft thresholding | \ell_2 penalty
w = (np.sign(w)*np.select([soft>0], [soft], default=0)) / ridge_pen
# Don't regularize the bias term
if self.bias_term:
w[-1] = w_bias
# SGD did not converge
else:
if verbose:
print "Final gradient magnitude for rate={}, mu={}: {:.3f}".format(rate, mu, g_size)
# Return learned weights
self.w = w
def marginals(self, X):
X = X.todense()
marginals = odds_to_prob(X.dot(self.w))
return np.array(marginals)[0] | ukb-cardiac-mri-master | ukb/weak_supervision/coral/coral/learning/indep_learning.py |
from inspect import getsourcelines
| ukb-cardiac-mri-master | ukb/weak_supervision/coral/coral/static_analysis/__init__.py |
from inspect import getsourcelines
import numpy as np
def find_dependencies(L_names, primitive_names):
LFs = []
for lf in L_names:
LFs.append(getsourcelines(lf)[0])
L_deps = []
for lf_idx, lf in enumerate(LFs):
L_dep = []
for line in lf:
if len(line.strip()) > 0:
if line.lstrip()[:3] == "def":
parameters = line[line.find("(")+1:line.rfind(")")].split(",")
for param in parameters:
L_dep.append(primitive_names.index(param.strip()))
L_deps.append(L_dep)
return L_deps
def discretize_primitives(L_names):
LFs = []
for lf in L_names:
LFs.append(getsourcelines(lf)[0])
# extract all conditions from labeling functions
lfs_primitives = {}
primitives = {}
primitives_ordered = []
for lf_idx, lf in enumerate(LFs):
for line in lf:
if len(line.strip()) > 0:
if (line.lstrip()[:2] == "if" or line.lstrip()[:4] == "elif") and line.rstrip()[-1] == ":":
p_cond = line[line.find("if")+2:-1].lstrip() #TODO(pabajaj): handle multiple and or conditions
p_name, p_cond, p_thresh = p_cond.split()
if p_name not in primitives:
primitives_ordered.append(p_name)
primitives[p_name] = []
if (p_name, p_cond, p_thresh) not in primitives[p_name]:
primitives[p_name].append((p_name, p_cond, p_thresh))
code = []
for p_idx, p_name in enumerate(primitives_ordered):
p_idx_str = str(p_idx)
p_max = len(primitives[p_name])
p_data = primitives[p_name][0]
code.append("P.discrete_primitive_mtx[i,"+p_idx_str+"] = "+str(p_max)+" if P.primitive_mtx[i,"+p_idx_str+"] "+p_data[1]+" "+p_data[2].strip(':')+" else 0")
for p_data in primitives[p_name][1:]:
p_max -= 1
code.append("P.discrete_primitive_mtx[i,"+p_idx_str+"] = "+str(p_max)+" if P.primitive_mtx[i,"+p_idx_str+"] "+p_data[1]+" "+p_data[2].strip(':')+" else P.discrete_primitive_mtx[i,"+p_idx_str+"]")
return code | ukb-cardiac-mri-master | ukb/weak_supervision/coral/coral/static_analysis/dependency_learning.py |
import json
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import skimage.io as io
class DataLoader(object):
def __init__(self, data_path='/tutorial_data/'):
current_dir = os.getcwd()
self.data_path = current_dir + data_path
def load_train_attr(self):
self.train_mscoco = np.load(self.data_path + 'train_mscoco.npy')
self.train_vg = np.load(self.data_path + 'train_vg.npy')
self.train_vg_idx = np.load(self.data_path + 'train_vg_idx.npy')
self.train_ground = np.load(self.data_path + 'train_ground.npy')
self.train_object_names = np.load(self.data_path + 'train_object_names.npy')
self.train_object_x = np.load(self.data_path + 'train_object_x.npy')
self.train_object_y = np.load(self.data_path + 'train_object_y.npy')
self.train_object_height = np.load(self.data_path + 'train_object_height.npy')
self.train_object_width = np.load(self.data_path + 'train_object_width.npy')
def load_val_attr(self):
self.val_mscoco = np.load(self.data_path + 'val_mscoco.npy')
self.val_vg = np.load(self.data_path + 'val_vg.npy')
self.val_vg_idx = np.load(self.data_path + 'val_vg_idx.npy')
self.val_ground = np.load(self.data_path + 'val_ground.npy')
self.val_object_names = np.load(self.data_path + 'val_object_names.npy')
self.val_object_x = np.load(self.data_path + 'val_object_x.npy')
self.val_object_y = np.load(self.data_path + 'val_object_y.npy')
self.val_object_height = np.load(self.data_path + 'val_object_height.npy')
self.val_object_width = np.load(self.data_path + 'val_object_width.npy')
load_train_attr(self)
self.train_num = np.shape(self.train_object_names)[0]
load_val_attr(self)
self.val_num = np.shape(self.val_object_names)[0]
with open(self.data_path + 'image_data.json') as json_data:
self.data = json.load(json_data)
def show_examples(self, annotated=False, label=-1):
def show_image(vg_idx):
image_url = self.data[vg_idx]['url']
I = io.imread(image_url)
plt.axis('off')
plt.imshow(I)
def show_image_annotated(vg_idx, idx):
image_url = self.data[vg_idx]['url']
I = io.imread(image_url)
plt.axis('off')
plt.imshow(I)
ax = plt.gca()
for i in range(np.shape(self.val_object_y[idx])[0]):
ax.add_patch(Rectangle((self.val_object_x[idx][i], self.val_object_y[idx][i]),
self.val_object_width[idx][i],
self.val_object_height[idx][i],
fill=False,
edgecolor='cyan',
linewidth=1))
split_idx = np.where(self.val_ground == label)[0]
idx_list = np.random.choice(split_idx,3)
plt.figure(figsize=(15,3))
for j,i in enumerate(idx_list):
plt.subplot(1,3,j+1)
if annotated:
show_image_annotated(int(self.val_vg_idx[i]), i)
else:
show_image(int(self.val_vg_idx[i]))
plt.suptitle('Query Examples')
| ukb-cardiac-mri-master | ukb/weak_supervision/coral/tutorials/data_loader.py |
import numpy as np
class PrimitiveObject(object):
def save_primitive_matrix(self,primitive_mtx):
self.primitive_mtx = primitive_mtx
self.discrete_primitive_mtx = primitive_mtx
self.num_primitives = np.shape(self.primitive_mtx)[1]
def save_primitive_names(self,names):
self.primitive_names = names
if len(self.primitive_names) != self.num_primitives:
Exception('Incorrect number of Primitive Names')
def bike_human_nums(object_names):
names = object_names.split(' , ')[1:]
num_person = 0
num_bicycles = 0
for i in range(np.shape(names)[0]):
name = names[i]
if ('person' in name) or ('man' in name) or ('woman' in name) or ('girl' in name) or ('boy' in name) or ('people' in name):
num_person = num_person+1
if ('cycle' in name) or ('bike' in name) or ('bicycle' in name):
num_bicycles = num_bicycles+1
if (num_bicycles == 0) or (num_person == 0):
return 0
if num_person == num_bicycles:
return 2
elif num_person <= num_bicycles:
return 0
else:
return 1
def bike_human_distance(object_names, object_x, object_y):
names = object_names.split(' , ')[1:]
person_position = np.array([[0,0],[0,0]])
bicycle_position = np.array([[0,0],[0,0]])
for i in range(np.shape(names)[0]):
name = names[i]
if ('person' in name) or ('man' in name) or ('woman' in name) or ('girl' in name) or ('boy' in name) or ('people' in name):
person_position = np.concatenate((person_position, np.array([[object_x[i],object_y[i]]])))
if ('cycle' in name) or ('bike' in name) or ('bicycle' in name):
bicycle_position = np.concatenate((bicycle_position, np.array([[object_x[i],object_y[i]]])))
person_position = person_position[2:,:]
bicycle_position = bicycle_position[2:,:]
if (np.shape(bicycle_position)[0] == 0) or (np.shape(person_position)[0] == 0):
return -1
import itertools
if len(bicycle_position) >= len(person_position):
list1 = [list(coord) for coord in bicycle_position]
list2 = [list(coord) for coord in person_position]
else:
list2 = [list(coord) for coord in bicycle_position]
list1 = [list(coord) for coord in person_position]
coord_comb = [list1, list2]
person_bike_pairs = itertools.product(*coord_comb)
dists = []
for pair in person_bike_pairs:
for coord1, coord2 in pair:
dists.append(np.linalg.norm(coord1-coord2))
return np.min(dists)
def bike_human_size(object_names, object_area):
names = object_names.split(' , ')[1:]
person_area = np.array([0])
bicycle_area = np.array([0])
for i in range(np.shape(names)[0]):
name = names[i]
if ('person' in name) or ('man' in name) or ('woman' in name) or ('girl' in name) or ('boy' in name) or ('people' in name):
person_area = np.concatenate((person_area, [object_area[i]]))
if ('cycle' in name) or ('bike' in name) or ('bicycle' in name):
bicycle_area = np.concatenate((bicycle_area, [object_area[i]]))
person_area = person_area[1:]
bicycle_area = bicycle_area[1:]
if (np.shape(bicycle_area)[0] == 0) or (np.shape(person_area)[0] == 0):
area_diff = -1
area_diff = -1
for i in range(np.shape(bicycle_area)[0]):
try:
area_diff_temp = np.max((np.abs(bicycle_area[i]-person_area[:])))
area_diff = np.max(area_diff_temp, area_diff)
except:
continue
return area_diff | ukb-cardiac-mri-master | ukb/weak_supervision/coral/tutorials/primitive_helpers.py |
#!/usr/bin/env python
"""TODO."""
from __future__ import print_function
import numbskull
from numbskull.numbskulltypes import *
import numpy as np
def factor(f, args):
"""THIS IS A DOCSTRING."""
if f == FUNC_IMPLY_NATURAL:
# TODO
pass
elif f == FUNC_OR:
return 1 if any(args) else -1
elif f == FUNC_EQUAL:
# TODO
pass
elif f == FUNC_AND or FUNC_ISTRUE:
return 1 if all(args) else -1
elif f == FUNC_LINEAR:
# TODO
pass
elif f == FUNC_RATIO:
# TODO
pass
elif f == FUNC_LOGICAL:
# TODO
pass
elif f == FUNC_IMPLY_MLN:
# TODO
pass
else:
raise NotImplemented("FACTOR " + str(f) + " not implemented.")
for (key, value) in numbskull.inference.FACTORS.items():
print(key)
variables = 2
if key == "DP_GEN_DEP_FIXING" or key == "DP_GEN_DEP_REINFORCING":
# These factor functions requires three vars to work
variables = 3
edges = variables
weight = np.zeros(1, Weight)
variable = np.zeros(variables, Variable)
factor = np.zeros(1, Factor)
fmap = np.zeros(edges, FactorToVar)
domain_mask = np.zeros(variables, np.bool)
weight[0]["isFixed"] = True
weight[0]["initialValue"] = 1
for i in range(variables):
variable[i]["isEvidence"] = 0
variable[i]["initialValue"] = 0
variable[i]["dataType"] = 0
variable[i]["cardinality"] = 2
factor[0]["factorFunction"] = value
factor[0]["weightId"] = 0
factor[0]["featureValue"] = 1
factor[0]["arity"] = variables
factor[0]["ftv_offset"] = 0
for i in range(variables):
fmap[i]["vid"] = i
ns = numbskull.NumbSkull(n_inference_epoch=100,
n_learning_epoch=100,
quiet=True)
ns.loadFactorGraph(weight, variable, factor, fmap, domain_mask, edges)
ns.learning()
ns.inference()
print(ns.factorGraphs[0].count)
| ukb-cardiac-mri-master | ukb/weak_supervision/numbskull/loadfg.py |
#!/usr/bin/env python
"""TODO."""
from numbskull import numbskull
args = ['test',
'-l', '100',
'-i', '100',
'-t', '10',
'-s', '0.01',
'--regularization', '2',
'-r', '0.1',
'--quiet']
ns = numbskull.load(args)
ns.learning()
ns.inference()
print(ns.factorGraphs[0].count)
| ukb-cardiac-mri-master | ukb/weak_supervision/numbskull/test.py |
#!/usr/bin/env python
"""This tests learning for labelling functions."""
from __future__ import print_function, absolute_import
import numpy as np
import numbskull
from numbskull.numbskulltypes import *
import math
def index_to_values(index, num_lf):
value = [0] * (1 + num_lf)
value[0] = index % 2
index = index // 2
for i in range(num_lf):
value[i + 1] = index % 3
index = index // 3
return value
def create_fg(prior, accuracy, abstain, copies):
"""
This creates copies of the following factor graph.
istrue (weight = prior)
|
y_i
/|\
/ | \
/ | \
/ | \
/ | \
LF_{i1} ... LF_{in}
( weight = ) ( weight = )
accuracy[1] accuracy[n]
Arguments:
prior: one floating-point value
weight: list of floating-point values
abstain: list of floating-point values (same number as weight)
copies: integer
Returns:
list of arguments that can be passed to numbskull.loadFactorGraph
"""
n = len(accuracy) # number of labelling functions
weights = 1 + n
variables = copies * (1 + n)
factors = copies * (1 + n)
edges = copies * (1 + 2 * n)
weight = np.zeros(weights, Weight)
variable = np.zeros(variables, Variable)
factor = np.zeros(factors, Factor)
fmap = np.zeros(edges, FactorToVar)
domain_mask = np.zeros(variables, np.bool)
states = 2 * 3 ** n
Z = np.zeros(states, np.float64)
for i in range(states):
value = index_to_values(i, n)
y = value[0]
lfs = value[1:]
Z[i] = prior * (2 * y - 1)
for (j, lf) in enumerate(lfs):
lf = lf - 1 # remap to standard -1, 0, 1
if lf != 0:
Z[i] += accuracy[j] * lf * (2 * y - 1)
# TODO: abstain not handled yet
Z[i] = math.exp(Z[i])
Z = np.cumsum(Z)
Z = Z / Z[-1]
print(Z)
for w in weight:
w["isFixed"] = False
w["initialValue"] = 1.0
weight[0]["initialValue"] = 0
for copy in range(copies):
r = np.random.rand()
index = np.argmax(Z >= r)
value = index_to_values(index, n)
y = value[0]
lf = value[1:]
# y variable
variable[copy * (1 + n)]["isEvidence"] = 0 # query
variable[copy * (1 + n)]["initialValue"] = 0 # Do not actually show y
variable[copy * (1 + n)]["dataType"] = 0 # binary
variable[copy * (1 + n)]["cardinality"] = 2
# labelling function variable
for i in range(n):
variable[copy * (1 + n) + 1 + i]["isEvidence"] = 1 # evidence
variable[copy * (1 + n) + 1 + i]["initialValue"] = lf[i]
variable[copy * (1 + n) + 1 + i]["dataType"] = 1 # categorical
variable[copy * (1 + n) + 1 + i]["cardinality"] = 3
# Class prior
factor[copy * (1 + n)]["factorFunction"] = 18 # DP_GEN_CLASS_PRIOR
factor[copy * (1 + n)]["weightId"] = 0
factor[copy * (1 + n)]["featureValue"] = 1
factor[copy * (1 + n)]["arity"] = 1
factor[copy * (1 + n)]["ftv_offset"] = copy * (1 + 2 * n)
fmap[copy * (1 + 2 * n)]["vid"] = copy * (1 + n)
# Labelling function accuracy
for i in range(n):
factor_index = copy * (1 + n) + 1 + i
factor[factor_index]["factorFunction"] = 21 # DP_GEN_LF_ACCURACY
factor[factor_index]["weightId"] = i + 1
factor[factor_index]["featureValue"] = 1
factor[factor_index]["arity"] = 2
factor[factor_index]["ftv_offset"] = copy * (1 + 2 * n) + 1 + 2 * i
fmap_index = copy * (1 + 2 * n) + 1 + 2 * i
fmap[fmap_index]["vid"] = copy * (1 + n) # y
fmap[fmap_index]["vid"] = copy * (1 + n) + i + 1 # labeling func i
return weight, variable, factor, fmap, domain_mask, edges
learn = 100
ns = numbskull.NumbSkull(n_inference_epoch=100,
n_learning_epoch=learn,
quiet=True,
learn_non_evidence=True,
stepsize=0.01,
burn_in=100,
decay=0.001 ** (1.0 / learn),
reg_param=0.15)
prior = 0
accuracy = [1, 0.5]
abstain = [0, 0, 0]
copies = 10
fg = create_fg(prior, accuracy, abstain, copies)
print("weight")
print(fg[0])
print()
print("variable")
print(fg[1])
print()
print("factor")
print(fg[2])
print()
print("fmap")
print(fg[3])
print()
print("domain_mask")
print(fg[4])
print()
print("edges")
print(fg[5])
print()
ns.loadFactorGraph(*fg)
print(ns.factorGraphs[0].weight_value)
ns.learning()
print(ns.factorGraphs[0].weight_value)
| ukb-cardiac-mri-master | ukb/weak_supervision/numbskull/test_lf_learning.py |
"""For pip."""
from setuptools import setup, find_packages
setup(
name='numbskull',
version='0.0',
description='sample away',
packages=find_packages(),
install_requires=[],
entry_points={
'console_scripts': [
'numbskull = numbskull.numbskull:main',
],
},
)
| ukb-cardiac-mri-master | ukb/weak_supervision/numbskull/setup.py |
# -*- coding: utf-8 -*-
#
# Numbskull documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 26 17:55:24 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
"""TODO."""
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'numpydoc',
# 'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Numbskull'
copyright = u'2016, Bryan He, Theodoros Rekatsinas'
author = u'Bryan He, Theodoros Rekatsinas'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.0'
# The full version, including alpha/beta/rc tags.
release = u'0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'Numbskull v0.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
html_logo = '../../fig/numbskull.png'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or
# 32x32 pixels large.
#
html_favicon = "../../fig/mario.gif"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Numbskulldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Numbskull.tex', u'Numbskull Documentation',
u'Bryan He, Theodoros Rekatsinas', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'numbskull', u'Numbskull Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Numbskull', u'Numbskull Documentation',
author, 'Numbskull', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# Allow __init__ to be autodoc'ed
def skip(app, what, name, obj, skip, options):
"""TODO."""
if name == "__init__":
return False
return skip
def setup(app):
"""TODO."""
app.connect("autodoc-skip-member", skip)
numpydoc_show_class_members = False
| ukb-cardiac-mri-master | ukb/weak_supervision/numbskull/docs/source/conf.py |
"""TODO."""
from __future__ import print_function, absolute_import
import numba
from numba import jit
import numpy as np
# HELPER METHODS #
def dataType(i):
"""TODO."""
return {0: "Boolean",
1: "Categorical"}.get(i, "Unknown")
@jit(nopython=True, cache=True)
def compute_var_map(variables, factors, fmap, vmap, factor_index, domain_mask):
"""TODO."""
# Fill in domain values (for mapping when dumping marginals)
for i, v in enumerate(variables):
# skip boolean (value is 0)
if v["dataType"] == 0:
continue # default to 0
# categorical with explicit domain
if domain_mask[i]:
continue # filled already
else: # categorical with implicit domain [0...cardinality)
for index in range(v["cardinality"]):
vmap[v["vtf_offset"] + index]["value"] = index
# Fill in factor_index and indexes into factor_index
# Step 1: populate VTF.length
for ftv in fmap:
vid = ftv["vid"]
val = ftv["dense_equal_to"] if variables[vid]["dataType"] == 1 else 0
vtf = vmap[variables[vid]["vtf_offset"] + val]
vtf["factor_index_length"] += 1
# Step 2: populate VTF.offset
last_len = 0
last_off = 0
for i, vtf in enumerate(vmap):
vtf["factor_index_offset"] = last_off + last_len
last_len = vtf["factor_index_length"]
last_off = vtf["factor_index_offset"]
# Step 3: populate factor_index
offsets = vmap["factor_index_offset"].copy()
for i, fac in enumerate(factors):
for j in range(fac["ftv_offset"], fac["ftv_offset"] + fac["arity"]):
ftv = fmap[j]
vid = ftv["vid"]
val = ftv["dense_equal_to"] if variables[
vid]["dataType"] == 1 else 0
vtf_idx = variables[vid]["vtf_offset"] + val
fidx = offsets[vtf_idx]
factor_index[fidx] = i
offsets[vtf_idx] += 1
# Step 4: remove dupes from factor_index
for vtf in vmap:
offset = vtf["factor_index_offset"]
length = vtf["factor_index_length"]
new_list = factor_index[offset: offset + length]
new_list.sort()
i = 0
last_fid = -1
for fid in new_list:
if last_fid == fid:
continue
last_fid = fid
factor_index[offset + i] = fid
i += 1
vtf["factor_index_length"] = i
@jit(nopython=True, cache=True)
def reverse(data, start, end):
"""TODO."""
end -= 1
while (start < end):
data[start], data[end] = data[end], data[start]
start += 1
end -= 1
@jit(nopython=True, cache=True)
def reverse_array(data):
"""TODO."""
# TODO: why does this fail?
# data = np.flipud(data)
reverse(data, 0, data.size)
# DEFINE NUMBA-BASED DATA LOADING METHODS #
@jit(nopython=True, cache=True)
def load_weights(data, nweights, weights):
"""TODO."""
for i in range(nweights):
# TODO: read types from struct?
# TODO: byteswap only if system is little-endian
buf = data[(17 * i):(17 * i + 8)]
reverse_array(buf)
weightId = np.frombuffer(buf, dtype=np.int64)[0]
isFixed = data[17 * i + 8]
buf = data[(17 * i + 9):(17 * i + 17)]
reverse_array(buf)
initialValue = np.frombuffer(buf, dtype=np.float64)[0]
weights[weightId]["isFixed"] = isFixed
weights[weightId]["initialValue"] = initialValue
print("LOADED WEIGHTS")
@jit(nopython=True, cache=True)
def load_variables(data, nvariables, variables):
"""TODO."""
for i in range(nvariables):
# TODO: read types from struct?
# TODO: byteswap only if system is little-endian
buf = data[(27 * i):(27 * i + 8)]
reverse_array(buf)
variableId = np.frombuffer(buf, dtype=np.int64)[0]
isEvidence = data[27 * i + 8]
buf = data[(27 * i + 9):(27 * i + 17)]
reverse_array(buf)
initialValue = np.frombuffer(buf, dtype=np.int64)[0]
buf = data[(27 * i + 17):(27 * i + 19)]
reverse_array(buf)
dataType = np.frombuffer(buf, dtype=np.int16)[0]
buf = data[(27 * i + 19):(27 * i + 27)]
reverse_array(buf)
cardinality = np.frombuffer(buf, dtype=np.int64)[0]
variables[variableId]["isEvidence"] = isEvidence
variables[variableId]["initialValue"] = initialValue
variables[variableId]["dataType"] = dataType
variables[variableId]["cardinality"] = cardinality
print("LOADED VARS")
@jit(nopython=True, cache=True)
def load_domains(data, domain_mask, vmap, variables):
"""TODO."""
index = 0
while index < data.size:
buf = data[index: index + 8]
reverse_array(buf)
variableId = np.frombuffer(buf, dtype=np.int64)[0]
index += 8
buf = data[index: index + 8]
reverse_array(buf)
cardinality = np.frombuffer(buf, dtype=np.int64)[0]
index += 8
domain_mask[variableId] = True
# NOTE: values are sorted already by DD
for j in range(cardinality):
buf = data[index: index + 8]
reverse_array(buf)
val = np.frombuffer(buf, dtype=np.int64)[0]
index += 8
vmap[variables[variableId]["vtf_offset"] + j]["value"] = val
# translate initial value into dense index
if val == variables[variableId]["initialValue"]:
variables[variableId]["initialValue"] = j
print("LOADED DOMAINS")
@jit(nopython=True, cache=True)
def load_factors(data, nfactors, factors, fmap, domain_mask, variable, vmap):
"""TODO."""
index = 0
fmap_idx = 0
k = 0 # somehow numba 0.28 would raise LowerError without this line
for i in range(nfactors):
buf = data[index:(index + 2)]
reverse_array(buf)
factors[i]["factorFunction"] = np.frombuffer(buf, dtype=np.int16)[0]
buf = data[(index + 2):(index + 10)]
reverse_array(buf)
arity = np.frombuffer(buf, dtype=np.int64)[0]
factors[i]["arity"] = arity
factors[i]["ftv_offset"] = fmap_idx
index += 10 # TODO: update index once per loop?
for k in range(arity):
buf = data[index:(index + 8)]
reverse_array(buf)
vid = np.frombuffer(buf, dtype=np.int64)[0]
fmap[fmap_idx + k]["vid"] = vid
buf = data[(index + 8):(index + 16)]
reverse_array(buf)
val = np.frombuffer(buf, dtype=np.int64)[0]
# translate initial value into dense index using bisect
if domain_mask[vid]:
start = variable[vid]["vtf_offset"]
end = start + variable[vid]["cardinality"]
val = np.searchsorted(vmap["value"][start:end], val)
fmap[fmap_idx + k]["dense_equal_to"] = val
index += 16
fmap_idx += arity
buf = data[index:(index + 8)]
reverse_array(buf)
factors[i]["weightId"] = np.frombuffer(buf, dtype=np.int64)[0]
buf = data[(index + 8):(index + 16)]
reverse_array(buf)
factors[i]["featureValue"] = np.frombuffer(buf, dtype=np.float64)[0]
index += 16
print("LOADED FACTORS")
| ukb-cardiac-mri-master | ukb/weak_supervision/numbskull/numbskull/dataloading.py |
"""TODO."""
from __future__ import print_function, absolute_import
import sys
import numpy as np
from numbskull.inference import *
from numbskull.learning import *
from numbskull.timer import Timer
import concurrent.futures
from concurrent.futures import ThreadPoolExecutor
def run_pool(threadpool, threads, func, args):
"""TODO."""
if threads == 1:
func(0, *args)
else:
future_to_samples = \
[threadpool.submit(func, threadID, *args)
for threadID in range(threads)]
concurrent.futures.wait(future_to_samples)
for fts in future_to_samples:
if fts.exception() is not None:
raise fts.exception()
class FactorGraph(object):
"""TODO."""
def __init__(self, weight, variable, factor, fmap, vmap,
factor_index, var_copies, weight_copies, fid, workers):
"""TODO."""
self.weight = weight
self.variable = variable
self.factor = factor
self.fmap = fmap
self.vmap = vmap
self.factor_index = factor_index
# This is just cumsum shifted by 1
self.cstart = np.zeros(self.variable.shape[0] + 1, np.int64)
for i in range(self.variable.shape[0]):
c = self.variable[i]["cardinality"]
if self.variable[i]["dataType"] == 0:
c = 1
self.cstart[i + 1] = self.cstart[i] + c
self.count = np.zeros(self.cstart[self.variable.shape[0]], np.int64)
self.var_value_evid = \
np.tile(self.variable[:]['initialValue'], (var_copies, 1))
self.var_value = \
np.tile(self.variable[:]['initialValue'], (var_copies, 1))
self.weight_value = \
np.tile(self.weight[:]['initialValue'], (weight_copies, 1))
if self.variable.size == 0:
self.Z = np.zeros((workers, 0))
else:
self.Z = np.zeros((workers, max(self.variable[:]['cardinality'])))
if self.vmap.size == 0:
size = (workers, 0)
else:
size = (workers, 2 * max(self.vmap['factor_index_length']))
self.fids = np.zeros(size, factor_index.dtype)
self.fid = fid
assert(workers > 0)
self.threads = workers
self.threadpool = ThreadPoolExecutor(self.threads)
self.marginals = np.zeros(self.cstart[self.variable.shape[0]])
self.inference_epoch_time = 0.0
self.inference_total_time = 0.0
self.learning_epoch_time = 0.0
self.learning_total_time = 0.0
def clear(self):
"""TODO."""
self.count[:] = 0
self.threadpool.shutdown()
#################
# GETTERS #
#################
def getWeights(self, weight_copy=0):
"""TODO."""
return self.weight_value[weight_copy][:]
def getMarginals(self, varIds=None):
"""TODO."""
if not varIds:
return self.marginals
else:
return self.marginals[varIds]
#####################
# DIAGNOSTICS #
#####################
def diagnostics(self, epochs):
"""TODO."""
print('Inference took %.03f sec.' % self.inference_total_time)
epochs = epochs or 1
bins = 10
hist = np.zeros(bins, dtype=np.int64)
for i in range(len(self.count)):
assert(self.count[i] >= 0)
assert(self.count[i] <= epochs)
hist[min(self.count[i] * bins // epochs, bins - 1)] += 1
for i in range(bins):
start = i / 10.0
end = (i + 1) / 10.0
print("Prob. " + str(start) + ".." + str(end) + ": \
" + str(hist[i]) + " variables")
def diagnosticsLearning(self, weight_copy=0):
"""TODO."""
print('Learning epoch took %.03f sec.' % self.learning_epoch_time)
print("Weights:")
for (i, w) in enumerate(self.weight):
print(" weightId:", i)
print(" isFixed:", w["isFixed"])
print(" weight: ", self.weight_value[weight_copy][i])
print()
################################
# INFERENCE AND LEARNING #
################################
def burnIn(self, epochs, sample_evidence, diagnostics=False,
var_copy=0, weight_copy=0):
"""TODO."""
if diagnostics:
print("FACTOR " + str(self.fid) + ": STARTED BURN-IN...")
# NUMBA-based method. Implemented in inference.py
for ep in range(epochs):
args = (self.threads, var_copy, weight_copy,
self.weight, self.variable, self.factor,
self.fmap, self.vmap,
self.factor_index, self.Z, self.cstart, self.count,
self.var_value, self.weight_value, sample_evidence, True)
run_pool(self.threadpool, self.threads, gibbsthread, args)
if diagnostics:
print("FACTOR " + str(self.fid) + ": DONE WITH BURN-IN")
def inference(self, burnin_epochs, epochs, sample_evidence=False,
diagnostics=False, var_copy=0, weight_copy=0):
"""TODO."""
# Burn-in
if burnin_epochs > 0:
self.burnIn(burnin_epochs, sample_evidence,
diagnostics=diagnostics)
# Run inference
if diagnostics:
print("FACTOR " + str(self.fid) + ": STARTED INFERENCE")
for ep in range(epochs):
with Timer() as timer:
args = (self.threads, var_copy, weight_copy, self.weight,
self.variable, self.factor, self.fmap,
self.vmap, self.factor_index, self.Z,
self.cstart, self.count, self.var_value,
self.weight_value, sample_evidence, False)
run_pool(self.threadpool, self.threads, gibbsthread, args)
self.inference_epoch_time = timer.interval
self.inference_total_time += timer.interval
if diagnostics:
print('Inference epoch #%d took %.03f sec.' %
(ep, self.inference_epoch_time))
if diagnostics:
print("FACTOR " + str(self.fid) + ": DONE WITH INFERENCE")
# compute marginals
if epochs != 0:
self.marginals = self.count / float(epochs)
if diagnostics:
self.diagnostics(epochs)
def learn(self, burnin_epochs, epochs, stepsize, decay, regularization,
reg_param, truncation, diagnostics=False, verbose=False,
learn_non_evidence=False, var_copy=0, weight_copy=0):
"""TODO."""
# Burn-in
if burnin_epochs > 0:
self.burnIn(burnin_epochs, True, diagnostics=diagnostics)
# Run learning
if diagnostics:
print("FACTOR " + str(self.fid) + ": STARTED LEARNING")
for ep in range(epochs):
if diagnostics:
print("FACTOR " + str(self.fid) + ": EPOCH #" + str(ep))
print("Current stepsize = " + str(stepsize))
if verbose:
self.diagnosticsLearning(weight_copy)
sys.stdout.flush() # otherwise output refuses to show in DD
with Timer() as timer:
args = (self.threads, stepsize, regularization, reg_param,
truncation, var_copy, weight_copy, self.weight,
self.variable, self.factor, self.fmap,
self.vmap, self.factor_index, self.Z, self.fids,
self.var_value, self.var_value_evid,
self.weight_value, learn_non_evidence)
run_pool(self.threadpool, self.threads, learnthread, args)
self.learning_epoch_time = timer.interval
self.learning_total_time += timer.interval
# Decay stepsize
stepsize *= decay
if diagnostics:
print("FACTOR " + str(self.fid) + ": DONE WITH LEARNING")
def dump_weights(self, fout, weight_copy=0):
"""Dump <wid, weight> text file in DW format."""
with open(fout, 'w') as out:
for i, w in enumerate(self.weight):
out.write('%d %f\n' % (i, self.weight_value[weight_copy][i]))
def dump_probabilities(self, fout, epochs):
"""Dump <vid, value, prob> text file in DW format."""
epochs = epochs or 1
with open(fout, 'w') as out:
for i, v in enumerate(self.variable):
if v["dataType"] == 0:
prob = float(self.count[self.cstart[i]]) / epochs
out.write('%d %d %.3f\n' % (i, 1, prob))
else:
for k in range(v["cardinality"]):
prob = float(self.count[self.cstart[i] + k]) / epochs
original_value = self.vmap[
v["vtf_offset"] + k]["value"]
out.write('%d %d %.3f\n' % (i, original_value, prob))
| ukb-cardiac-mri-master | ukb/weak_supervision/numbskull/numbskull/factorgraph.py |
"""TODO."""
from __future__ import print_function, absolute_import
import time
class Timer:
"""TODO."""
def __enter__(self):
"""TODO."""
self.start = time.time()
return self
def __exit__(self, *args):
"""TODO."""
self.end = time.time()
self.interval = self.end - self.start
| ukb-cardiac-mri-master | ukb/weak_supervision/numbskull/numbskull/timer.py |
"""TODO."""
from .numbskull import NumbSkull
from .numbskull import main
__all__ = ('numbskull', 'factorgraph', 'timer')
| ukb-cardiac-mri-master | ukb/weak_supervision/numbskull/numbskull/__init__.py |
#!/usr/bin/env python
"""TODO: This is a docstring."""
from __future__ import print_function, absolute_import
import os
import sys
import argparse
import numbskull.factorgraph
from numbskull.factorgraph import FactorGraph
from numbskull.dataloading import *
from numbskull.numbskulltypes import *
import numpy as np
# Define arguments for both parser in main and NumbSkull
arguments = [
(tuple(['directory']),
{'metavar': 'DIRECTORY',
'nargs': '?',
'default': '.',
'type': str,
'help': 'specify the directory of factor graph files'}),
# TODO: print default for meta, weight, variable, factor in help
(('-o', '--output_dir'),
{'metavar': 'OUTPUT_DIR',
'dest': 'output_dir',
'default': '.',
'type': str,
'help': 'Output dir to contain inference_result.out.text ' +
'and inference_result.out.weights.text'}),
(('-m', '--meta', '--fg_meta'),
{'metavar': 'META_FILE',
'dest': 'metafile',
'default': 'graph.meta',
'type': str,
'help': 'factor graph metadata file'}),
(('-w', '--weight', '--weights'),
{'metavar': 'WEIGHTS_FILE',
'dest': 'weightfile',
'default': 'graph.weights',
'type': str,
'help': 'factor weight file'}),
(('-v', '--variable', '--variables'),
{'metavar': 'VARIABLES_FILE',
'dest': 'variablefile',
'default': 'graph.variables',
'type': str,
'help': 'factor graph variables file'}),
(('-f', '--factor', '--factors'),
{'metavar': 'FACTORS_FILE',
'dest': 'factorfile',
'default': 'graph.factors',
'type': str,
'help': 'factor file'}),
(('--domain', '--domains'),
{'metavar': 'DOMAINS_FILE',
'dest': 'domainfile',
'default': 'graph.domains',
'type': str,
'help': 'domain file'}),
(('-l', '--n_learning_epoch'),
{'metavar': 'NUM_LEARNING_EPOCHS',
'dest': 'n_learning_epoch',
'default': 0,
'type': int,
'help': 'number of learning epochs'}),
(('-i', '--n_inference_epoch'),
{'metavar': 'NUM_INFERENCE_EPOCHS',
'dest': 'n_inference_epoch',
'default': 0,
'type': int,
'help': 'number of inference epochs'}),
(('-s', '--stepsize', '-a', '--alpha'),
{'metavar': 'LEARNING_STEPSIZE',
'dest': 'stepsize',
'default': 0.01,
'type': float,
'help': 'stepsize for learning'}),
(('-d', '--decay', '--diminish'),
{'metavar': 'LEARNING_DECAY',
'dest': 'decay',
'default': 0.95,
'type': float,
'help': 'decay for updating stepsize during learning'}),
(('-r', '--reg_param'),
{'metavar': 'LEARNING_REGULARIZATION_PARAM',
'dest': 'reg_param',
'default': 0.01,
'type': float,
'help': 'regularization penalty'}),
(tuple(['--regularization']),
{'metavar': 'REGULARIZATION',
'dest': 'regularization',
'default': 2,
'type': int,
'help': 'regularization (l1 or l2) [Enter as "1" or "2"]'}),
(('-k', '--truncation'),
{'metavar': 'TRUNCATION',
'dest': 'truncation',
'default': 1,
'type': int,
'help': 'If using l1 regularization, truncation is applied with '
'probability 1/k and with magnitude '
'step_size * reg_param * k. If not using l1 regularization, '
'this parameter has no effect.'}),
(('-b', '--burn_in'),
{'metavar': 'BURN_IN',
'dest': 'burn_in',
'default': 0,
'type': int,
'help': 'number of burn-in epochs'}),
(('-t', '--threads', '--n_threads'),
{'metavar': 'NUM_THREADS',
'dest': 'nthreads',
'default': 1,
'type': int,
'help': 'number of threads to be used'}),
(('-u', '--dburl'),
{'metavar': 'DATABASE_URL',
'dest': 'dburl',
'default': '',
'type': str,
'help': 'url to database holding factor graph information'})
]
flags = [
(tuple(['--sample_evidence']),
{'default': True,
'dest': 'sample_evidence',
'action': 'store_true',
'help': 'sample evidence variables'}),
(tuple(['--learn_non_evidence']),
{'default': False,
'dest': 'learn_non_evidence',
'action': 'store_true',
'help': 'learn from non-evidence variables'}),
(('-q', '--quiet'),
{'default': False,
'dest': 'quiet',
'action': 'store_true',
'help': 'quiet'}),
(tuple(['--verbose']),
{'default': False,
'dest': 'verbose',
'action': 'store_true',
'help': 'verbose'})
]
class NumbSkull(object):
"""TODO: Main class for numbskull."""
def __init__(self, **kwargs):
"""TODO.
Parameters
----------
paramater : type
This is a parameter
Returns
-------
describe : type
Expanation
"""
# Set version
self.version = "0.0"
# Initialize default execution arguments
arg_defaults = {}
for arg, opts in arguments:
if 'directory' in arg[0]:
arg_defaults['directory'] = opts['default']
else:
arg_defaults[opts['dest']] = opts['default']
# Initialize default execution flags
for arg, opts in flags:
arg_defaults[opts['dest']] = opts['default']
for (arg, default) in arg_defaults.items():
setattr(self, arg, kwargs.get(arg, default))
self.factorGraphs = []
def loadFactorGraphRaw(self, weight, variable, factor, fmap,
vmap, factor_index,
var_copies=1, weight_copies=1):
"""TODO."""
fg = FactorGraph(weight, variable, factor, fmap, vmap, factor_index,
var_copies, weight_copies,
len(self.factorGraphs), self.nthreads)
self.factorGraphs.append(fg)
def loadFactorGraph(self, weight, variable, factor, fmap, domain_mask,
edges, var_copies=1, weight_copies=1):
"""TODO."""
# Assert input arguments correspond to NUMPY arrays
assert(type(weight) == np.ndarray and weight.dtype == Weight)
assert(type(variable) == np.ndarray and variable.dtype == Variable)
assert(type(factor) == np.ndarray and factor.dtype == Factor)
assert(type(fmap) == np.ndarray and fmap.dtype == FactorToVar)
assert(type(domain_mask) == np.ndarray and
domain_mask.dtype == np.bool)
assert(type(edges) == int or type(edges) == np.int64)
# Initialize metadata
meta = {}
meta['weights'] = weight.shape[0]
meta['variables'] = variable.shape[0]
meta['factors'] = factor.shape[0]
meta['edges'] = edges
# count total number of VTF records needed
num_vtfs = 0
for var in variable:
var["vtf_offset"] = num_vtfs
if var["dataType"] == 0: # boolean
num_vtfs += 1
else:
num_vtfs += var["cardinality"]
vmap = np.zeros(num_vtfs, VarToFactor)
factor_index = np.zeros(meta["edges"], np.int64)
# Numba-based method. Defined in dataloading.py
compute_var_map(variable, factor, fmap, vmap,
factor_index, domain_mask)
fg = FactorGraph(weight, variable, factor, fmap, vmap, factor_index,
var_copies, weight_copies,
len(self.factorGraphs), self.nthreads)
self.factorGraphs.append(fg)
def loadFGFromFile(self, directory=None, metafile=None, weightfile=None,
variablefile=None, factorfile=None, domainfile=None,
var_copies=1, weight_copies=1):
"""TODO."""
# init necessary input arguments
if not self.directory:
print("No factor graph specified")
return
else:
directory = self.directory
metafile = self.metafile if not metafile else metafile
weightfile = self.weightfile if not weightfile else weightfile
variablefile = self.variablefile if not variablefile else variablefile
factorfile = self.factorfile if not factorfile else factorfile
domainfile = self.domainfile if not domainfile else domainfile
print_info = not self.quiet
print_only_meta = not self.verbose
# load metadata
meta = np.loadtxt(directory + "/" + metafile,
delimiter=',',
dtype=Meta)
meta = meta[()]
if print_info:
print("Meta:")
print(" weights: ", meta["weights"])
print(" variables:", meta["variables"])
print(" factors: ", meta["factors"])
print(" edges: ", meta["edges"])
print()
# load weights
weight_data = np.memmap(directory + "/" + weightfile, mode="c")
weight = np.zeros(meta["weights"], Weight)
# NUMBA-based function. Defined in dataloading.py
load_weights(weight_data, meta["weights"], weight)
if print_info and not print_only_meta:
print("Weights:")
for (i, w) in enumerate(weight):
print(" weightId:", i)
print(" isFixed:", w["isFixed"])
print(" weight: ", w["initialValue"])
print()
# load variables
variable_data = np.memmap(directory + "/" + variablefile, mode="c")
variable = np.zeros(meta["variables"], Variable)
# NUMBA-based method. Defined in dataloading.py
load_variables(variable_data, meta["variables"], variable)
sys.stdout.flush()
if print_info and not print_only_meta:
print("Variables:")
for (i, v) in enumerate(variable):
print(" variableId:", i)
print(" isEvidence: ", v["isEvidence"])
print(" initialValue:", v["initialValue"])
print(" dataType: ", v["dataType"],
"(", dataType(v["dataType"]), ")")
print(" cardinality: ", v["cardinality"])
print()
# count total number of VTF records needed
num_vtfs = 0
for var in variable:
var["vtf_offset"] = num_vtfs
if var["dataType"] == 0: # boolean
num_vtfs += 1
else:
num_vtfs += var["cardinality"]
print("#VTF = %s" % num_vtfs)
sys.stdout.flush()
# generate variable-to-factor map
vmap = np.zeros(num_vtfs, VarToFactor)
factor_index = np.zeros(meta["edges"], np.int64)
# load domains
# whether a var has domain spec
domain_mask = np.zeros(meta["variables"], np.bool)
domain_file = directory + "/" + domainfile
if os.path.isfile(domain_file) and os.stat(domain_file).st_size > 0:
domain_data = np.memmap(directory + "/" + domainfile, mode="c")
load_domains(domain_data, domain_mask, vmap, variable)
sys.stdout.flush()
# load factors
factor_data = np.memmap(directory + "/" + factorfile, mode="c")
factor = np.zeros(meta["factors"], Factor)
fmap = np.zeros(meta["edges"], FactorToVar)
# Numba-based method. Defined in dataloading.py
load_factors(factor_data, meta["factors"],
factor, fmap, domain_mask, variable, vmap)
sys.stdout.flush()
# Numba-based method. Defined in dataloading.py
compute_var_map(variable, factor, fmap, vmap,
factor_index, domain_mask)
print("COMPLETED VMAP INDEXING")
sys.stdout.flush()
fg = FactorGraph(weight, variable, factor, fmap, vmap, factor_index,
var_copies, weight_copies,
len(self.factorGraphs), self.nthreads)
self.factorGraphs.append(fg)
def getFactorGraph(self, fgID=0):
"""TODO."""
return self.factorGraphs[fgID]
def inference(self, fgID=0, out=True):
"""TODO."""
burn_in = self.burn_in
n_inference_epoch = self.n_inference_epoch
self.factorGraphs[fgID].inference(burn_in, n_inference_epoch,
sample_evidence=self.sample_evidence,
diagnostics=not self.quiet)
if out:
output_file = os.path.join(
self.output_dir, "inference_result.out.text")
self.factorGraphs[fgID].dump_probabilities(output_file,
n_inference_epoch)
def learning(self, fgID=0, out=True):
"""TODO."""
burn_in = self.burn_in
n_learning_epoch = self.n_learning_epoch
stepsize = self.stepsize
decay = self.decay
regularization = self.regularization
reg_param = self.reg_param
truncation = self.truncation
fg = self.factorGraphs[fgID]
fg.learn(burn_in, n_learning_epoch,
stepsize, decay, regularization, reg_param, truncation,
diagnostics=not self.quiet,
verbose=self.verbose,
learn_non_evidence=self.learn_non_evidence)
if out:
output_file = os.path.join(
self.output_dir, "inference_result.out.weights.text")
self.factorGraphs[fgID].dump_weights(output_file)
def load(argv=None):
"""TODO."""
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(
description="Runs a Gibbs sampler",
epilog="")
# Add version to parser
parser.add_argument("--version",
action='version',
version="%(prog)s 0.0",
help="print version number")
# Add execution arguments to parser
for arg, opts in arguments:
parser.add_argument(*arg, **opts)
# Add flags to parser
for arg, opts in flags:
parser.add_argument(*arg, **opts)
# Initialize NumbSkull #
args = parser.parse_args(argv)
ns = NumbSkull(**vars(args))
ns.loadFGFromFile()
return ns
def main(argv=None):
"""Duh."""
ns = load(argv)
ns.learning()
ns.inference()
| ukb-cardiac-mri-master | ukb/weak_supervision/numbskull/numbskull/numbskull.py |
#!/usr/bin/env python
"""TODO."""
from __future__ import print_function
import zmq
import sys
import time
import argparse
import gibbs
import numpy as np
def send_array(socket, A, flags=0, copy=True, track=False):
"""TODO: send a numpy array with metadata."""
md = dict(
dtype=str(A.dtype),
shape=A.shape,
)
socket.send_json(md, flags | zmq.SNDMORE)
return socket.send(A, flags, copy=copy, track=track)
def recv_array(socket, flags=0, copy=True, track=False):
"""TODO: recv a numpy array."""
md = socket.recv_json(flags=flags)
msg = socket.recv(flags=flags, copy=copy, track=track)
buf = buffer(msg)
try:
A = np.frombuffer(buf, dtype=md['dtype'])
except:
A = np.frombuffer(buf, dtype=eval(md['dtype']))
return A.reshape(md['shape'])
def server(argv=None):
"""TODO."""
parser = argparse.ArgumentParser(
description="Run Gibbs worker",
epilog="")
parser.add_argument("directory",
metavar="DIRECTORY",
nargs="?",
help="specify directory of factor graph files",
default="",
type=str)
parser.add_argument("-p", "--port",
metavar="PORT",
help="port",
default=5556,
type=int)
parser.add_argument("-m", "--meta",
metavar="META_FILE",
dest="meta",
default="graph.meta",
type=str,
help="meta file")
# TODO: print default for meta, weight, variable, factor in help
parser.add_argument("-w", "--weight",
metavar="WEIGHTS_FILE",
dest="weight",
default="graph.weights",
type=str,
help="weight file")
parser.add_argument("-v", "--variable",
metavar="VARIABLES_FILE",
dest="variable",
default="graph.variables",
type=str,
help="variable file")
parser.add_argument("-f", "--factor",
metavar="FACTORS_FILE",
dest="factor",
default="graph.factors",
type=str,
help="factor file")
parser.add_argument("-b", "--burn",
metavar="NUM_BURN_STEPS",
dest="burn",
default=0,
type=int,
help="number of learning sweeps")
parser.add_argument("-l", "--learn",
metavar="NUM_LEARN_STEPS",
dest="learn",
default=0,
type=int,
help="number of learning sweeps")
parser.add_argument("-e", "--epoch",
metavar="NUM_LEARNING_EPOCHS",
dest="epoch",
default=0,
type=int,
help="number of learning epochs")
parser.add_argument("-i", "--inference",
metavar="NUM_INFERENCE_STEPS",
dest="inference",
default=0,
type=int,
help="number of inference sweeps")
# TODO: sample observed variable option
parser.add_argument("-q", "--quiet",
# metavar="QUIET",
dest="quiet",
default=False,
action="store_true",
# type=bool,
help="quiet")
# TODO: verbose option (print all info)
parser.add_argument("--verbose",
# metavar="VERBOSE",
dest="verbose",
default=False,
action="store_true",
# type=bool,
help="verbose")
print("Running server...")
arg = parser.parse_args(argv[1:])
if arg.directory == "":
fg = None
else:
var_copies = 1
weight_copies = 1
(meta, weight, variable, factor,
fstart, fmap, vstart, vmap, equalPredicate) = \
gibbs.load(arg.directory, arg.meta, arg.weight, arg.variable,
arg.factor, not arg.quiet, not arg.verbose)
fg_args = (weight, variable, factor, fstart, fmap, vstart,
vmap, equalPredicate, var_copies, weight_copies)
fg = gibbs.FactorGraph(*fg_args)
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind("tcp://*:%s" % arg.port)
num_clients = 0
# TODO: barrier between burn, learn, and inference
# Probably need to send client id back
while True:
# Wait for next request from client
message = socket.recv()
if message == "HELLO": # Initial message from client
print("Received HELLO.")
socket.send("CLIENT_ID", zmq.SNDMORE)
socket.send_json("%d" % num_clients)
num_clients += 1
elif message == 'R_FACTOR_GRAPH': # Request for factor graph
client_id = socket.recv_json()
print("Received factor graph request from client #%d." % client_id)
# TODO: check that fg != None
# TODO
socket.send("FACTOR_GRAPH", zmq.SNDMORE)
socket.send_json(len(fg_args), zmq.SNDMORE)
for a in fg_args:
is_array = (type(a) == np.ndarray)
socket.send_json(is_array, zmq.SNDMORE)
if is_array:
send_array(socket, a, zmq.SNDMORE)
else:
socket.send_json(a, zmq.SNDMORE)
# TODO: could just not send SNDMORE for last arg
socket.send("DONE")
elif message == "READY": # Client ready
print("Received ready.")
# could skip this if arg.burn == 0
socket.send("BURN", zmq.SNDMORE)
socket.send_json(arg.burn)
elif message == 'DONE_BURN' or message == 'DONE_LEARN':
# Client done with burn/learning
if message == 'DONE_BURN': # Done burning
epochs = 0
else: # Done learning
epochs = socket.recv_json()
fg.wv += recv_array(socket)
pass
if epochs < arg.epoch:
socket.send("LEARN", zmq.SNDMORE)
socket.send_json(arg.learn, zmq.SNDMORE)
socket.send_json(0.001, zmq.SNDMORE) # TODO
send_array(socket, fg.wv)
else:
socket.send("INFERENCE", zmq.SNDMORE)
socket.send_json(arg.inference, zmq.SNDMORE)
send_array(socket, fg.wv)
elif message == 'DONE_INFERENCE': # Client done with inference
data = recv_array(socket)
# TODO: handle count
socket.send("EXIT")
else:
print("Message (%s) cannot be interpreted." % message,
file=sys.stderr)
socket.send("EXIT")
return
def client(argv=None):
"""TODO."""
parser = argparse.ArgumentParser(
description="Run Gibbs worker",
epilog="")
parser.add_argument("directory",
metavar="DIRECTORY",
nargs="?",
help="specify directory of factor graph files",
default="",
type=str)
parser.add_argument("-p", "--port",
metavar="PORT",
help="port",
default=5556,
type=int)
parser.add_argument("-m", "--meta",
metavar="META_FILE",
dest="meta",
default="graph.meta",
type=str,
help="meta file")
# TODO: print default for meta, weight, variable, factor in help
parser.add_argument("-w", "--weight",
metavar="WEIGHTS_FILE",
dest="weight",
default="graph.weights",
type=str,
help="weight file")
parser.add_argument("-v", "--variable",
metavar="VARIABLES_FILE",
dest="variable",
default="graph.variables",
type=str,
help="variable file")
parser.add_argument("-f", "--factor",
metavar="FACTORS_FILE",
dest="factor",
default="graph.factors",
type=str,
help="factor file")
parser.add_argument("-q", "--quiet",
# metavar="QUIET",
dest="quiet",
default=False,
action="store_true",
# type=bool,
help="quiet")
parser.add_argument("--verbose",
# metavar="VERBOSE",
dest="verbose",
default=False,
action="store_true",
# type=bool,
help="verbose")
print(argv)
arg = parser.parse_args(argv[1:])
print("Running client...")
print(arg.directory)
if arg.directory == "":
fg = None
else:
var_copies = 1
weight_copies = 1
(meta, weight, variable, factor,
fstart, fmap, vstart, vmap, equalPredicate) = \
gibbs.load(arg.directory, arg.meta, arg.weight, arg.variable,
arg.factor, not arg.quiet, not arg.verbose)
fg = gibbs.FactorGraph(weight, variable, factor, fstart, fmap, vstart,
vmap, equalPredicate, var_copies, weight_copies)
context = zmq.Context()
print("Connecting to server...")
socket = context.socket(zmq.REQ)
socket.connect("tcp://localhost:%s" % arg.port)
# hello message
print("Sent HELLO.")
socket.send("HELLO")
message = socket.recv()
assert(message == "CLIENT_ID")
message = socket.recv_json()
client_id = int(message)
print("Received id #%d.\n" % client_id)
# request factor graph if not loaded
if fg is None:
socket.send("R_FACTOR_GRAPH", zmq.SNDMORE)
socket.send_json(client_id)
message = socket.recv()
assert(message == "FACTOR_GRAPH")
length = socket.recv_json()
fg_args = [None, ] * length
for i in range(length):
is_array = socket.recv_json()
if is_array:
fg_args[i] = recv_array(socket)
else:
fg_args[i] = socket.recv_json()
assert(socket.recv() == "DONE")
fg = gibbs.FactorGraph(*fg_args)
# Send "ready"
socket.send("READY")
learning_epochs = 0
while True:
message = socket.recv()
if message == 'BURN': # request for burn-in
print("Received request for burn-in.")
burn = socket.recv_json()
print("Burning", burn, "sweeps.")
fg.gibbs(burn, 0, 0)
socket.send("DONE_BURN")
elif message == 'LEARN': # Request for learning
print("Received request for learning.")
sweeps = socket.recv_json()
step = socket.recv_json()
fg.wv = recv_array(socket)
wv = fg.wv
fg.learn(sweeps, step, 0, 0)
dw = fg.wv - wv
socket.send("DONE_LEARNING", zmq.SNDMORE)
learning_epochs += 1
socket.send_json(learning_epochs, zmq.SNDMORE)
send_array(socket, dw)
elif message == 'INFERENCE': # Request for inference
print("Received request for inference.")
inference = socket.recv_json()
fg.wv = recv_array(socket)
print("Inference:", inference, "sweeps.")
fg.clear()
fg.gibbs(inference, 0, 0)
socket.send("DONE_INFERENCE", zmq.SNDMORE)
send_array(socket, fg.count)
elif message == 'EXIT': # Exit
print("Exit")
break
else:
print("Message cannot be interpreted.", file=sys.stderr)
break
def main(argv=None):
"""TODO."""
if argv is None:
argv = sys.argv[1:]
if len(argv) < 1:
print("Usage: ./distributed.py [server/client]", file=sys.stderr)
elif argv[0].lower() == "server" or argv[0].lower() == "s":
server(argv)
elif argv[0].lower() == "client" or argv[0].lower() == "c":
client(argv)
else:
print("Error:", argv[0], "is not a valid choice.", file=sys.stderr)
if __name__ == "__main__":
main()
| ukb-cardiac-mri-master | ukb/weak_supervision/numbskull/numbskull/distributed.py |
"""TODO."""
from __future__ import print_function, absolute_import
import numpy as np
# TODO (shared with DW): space optimization:
# 1. use smaller ints for some fields
# 2. replace a[x].length with a[x+1].offset - a[x].offset
Meta = np.dtype([('weights', np.int64),
('variables', np.int64),
('factors', np.int64),
('edges', np.int64)])
Weight = np.dtype([("isFixed", np.bool),
("initialValue", np.float64)])
Variable = np.dtype([("isEvidence", np.int8),
("initialValue", np.int64),
("dataType", np.int16),
("cardinality", np.int64),
("vtf_offset", np.int64)])
Factor = np.dtype([("factorFunction", np.int16),
("weightId", np.int64),
("featureValue", np.float64),
("arity", np.int64),
("ftv_offset", np.int64)])
FactorToVar = np.dtype([("vid", np.int64),
("dense_equal_to", np.int64)])
VarToFactor = np.dtype([("value", np.int64),
("factor_index_offset", np.int64),
("factor_index_length", np.int64)])
| ukb-cardiac-mri-master | ukb/weak_supervision/numbskull/numbskull/numbskulltypes.py |
"""TODO."""
from __future__ import print_function, absolute_import
import numba
from numba import jit
import numpy as np
import math
from numbskull.udf import *
@jit(nopython=True, cache=True, nogil=True)
def gibbsthread(shardID, nshards, var_copy, weight_copy, weight, variable,
factor, fmap, vmap, factor_index, Z, cstart,
count, var_value, weight_value, sample_evidence, burnin):
"""TODO."""
# Indentify start and end variable
nvar = variable.shape[0]
start = (shardID * nvar) // nshards
end = ((shardID + 1) * nvar) // nshards
# TODO: give option do not store result, or just store tally
for var_samp in range(start, end):
if variable[var_samp]["isEvidence"] == 0 or sample_evidence:
v = draw_sample(var_samp, var_copy, weight_copy, weight, variable,
factor, fmap, vmap, factor_index, Z[shardID],
var_value, weight_value)
var_value[var_copy][var_samp] = v
if not burnin:
if variable[var_samp]["dataType"] == 0:
count[cstart[var_samp]] += v
else:
count[cstart[var_samp] + v] += 1
@jit(nopython=True, cache=True, nogil=True)
def draw_sample(var_samp, var_copy, weight_copy, weight, variable, factor,
fmap, vmap, factor_index, Z, var_value, weight_value):
"""TODO."""
cardinality = variable[var_samp]["cardinality"]
for value in range(cardinality):
Z[value] = np.exp(potential(var_samp, value, var_copy, weight_copy,
weight, variable, factor, fmap,
vmap, factor_index, var_value,
weight_value))
for j in range(1, cardinality):
Z[j] += Z[j - 1]
z = np.random.rand() * Z[cardinality - 1]
return np.argmax(Z[0:cardinality] >= z)
@jit(nopython=True, cache=True, nogil=True)
def potential(var_samp, value, var_copy, weight_copy, weight, variable, factor,
fmap, vmap, factor_index, var_value, weight_value):
"""TODO."""
p = 0.0
varval_off = value
if variable[var_samp]["dataType"] == 0:
varval_off = 0
vtf = vmap[variable[var_samp]["vtf_offset"] + varval_off]
start = vtf["factor_index_offset"]
end = start + vtf["factor_index_length"]
for k in range(start, end):
factor_id = factor_index[k]
p += weight_value[weight_copy][factor[factor_id]["weightId"]] * \
eval_factor(factor_id, var_samp, value, var_copy, variable,
factor, fmap, var_value)
return p
FACTORS = {
# Factor functions for boolean variables
"IMPLY_NATURAL": 0,
"OR": 1,
"EQUAL": 3,
"AND": 2,
"ISTRUE": 4,
"LINEAR": 7,
"RATIO": 8,
"LOGICAL": 9,
"IMPLY_MLN": 13,
# Factor functions for categorical variables
"AND_CAT": 12,
"OR_CAT": 14,
"EQUAL_CAT_CONST": 15,
"IMPLY_NATURAL_CAT": 16,
"IMPLY_MLN_CAT": 17,
# Factor functions for generative models for data programming.
#
# These functions accept two types of categorical variables:
#
# y \in {1, -1} corresponding to latent labels, and
# l \in {1, 0, -1} corresponding to labeling function outputs.
#
# The values of y are mapped to Numbskull variables y_index
# via {-1: 0, 1: 1}, and
# the values of l are mapped to Numbskull variables l_index
# via {-1: 0, 0: 1, 1: 2}.
# h(y) := y
"DP_GEN_CLASS_PRIOR": 18,
# h(l) := l
"DP_GEN_LF_PRIOR": 19,
# h(l) := l * l
"DP_GEN_LF_PROPENSITY": 20,
# h(y, l) := y * l
"DP_GEN_LF_ACCURACY": 21,
# h(l) := y * l * l
"DP_GEN_LF_CLASS_PROPENSITY": 22,
# l_2 fixes errors made by l_1
#
# h(y, l_1, l_2) := if l_1 == 0 and l_2 != 0: -1,
# elif l_1 == -1 * y and l_2 == y: 1,
# else: 0
"DP_GEN_DEP_FIXING": 23,
# l_2 reinforces the output of l_1
#
# h(y, l_1, l_2) := if l_1 == 0 and l_2 != 0: -1,
# elif l_1 == y and l_2 == y: 1,
# else: 0
"DP_GEN_DEP_REINFORCING": 24,
# h(l_1, l_2) := if l_1 != 0 and l_2 != 0: -1, else: 0
"DP_GEN_DEP_EXCLUSIVE": 25,
#h(l_1, l_2) := if l_1 == l_2: 1, else: 0
"DP_GEN_DEP_SIMILAR": 26,
"CORAL_GEN_DEP_SIMILAR": 27,
}
for (key, value) in FACTORS.items():
exec("FUNC_" + key + " = " + str(value))
@jit(nopython=True, cache=True, nogil=True)
def eval_factor(factor_id, var_samp, value, var_copy, variable, factor, fmap,
var_value):
"""TODO."""
####################
# BINARY VARIABLES #
####################
fac = factor[factor_id]
ftv_start = fac["ftv_offset"]
ftv_end = ftv_start + fac["arity"]
if fac["factorFunction"] == FUNC_IMPLY_NATURAL:
for l in range(ftv_start, ftv_end):
v = value if (fmap[l]["vid"] == var_samp) else \
var_value[var_copy][fmap[l]["vid"]]
if v == 0:
# Early return if body is not satisfied
return 0
# If this point is reached, body must be true
l = ftv_end - 1
head = value if (fmap[l]["vid"] == var_samp) else \
var_value[var_copy][fmap[l]["vid"]]
if head:
return 1
return -1
elif factor[factor_id]["factorFunction"] == FUNC_OR:
for l in range(ftv_start, ftv_end):
v = value if (fmap[l]["vid"] == var_samp) else \
var_value[var_copy][fmap[l]["vid"]]
if v == 1:
return 1
return -1
elif factor[factor_id]["factorFunction"] == FUNC_EQUAL:
v = value if (fmap[ftv_start]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start]["vid"]]
for l in range(ftv_start + 1, ftv_end):
w = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][fmap[l]["vid"]]
if v != w:
return -1
return 1
elif factor[factor_id]["factorFunction"] == FUNC_AND \
or factor[factor_id]["factorFunction"] == FUNC_ISTRUE:
for l in range(ftv_start, ftv_end):
v = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][fmap[l]["vid"]]
if v == 0:
return -1
return 1
elif factor[factor_id]["factorFunction"] == FUNC_LINEAR:
res = 0
head = value if (fmap[ftv_end - 1]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_end - 1]["vid"]]
for l in range(ftv_start, ftv_end - 1):
v = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][fmap[l]["vid"]]
if v == head:
res += 1
# This does not match Dimmwitted, but matches the eq in the paper
return res
elif factor[factor_id]["factorFunction"] == FUNC_RATIO:
res = 1
head = value if (fmap[ftv_end - 1]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_end - 1]["vid"]]
for l in range(ftv_start, ftv_end - 1):
v = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][fmap[l]["vid"]]
if v == head:
res += 1
# This does not match Dimmwitted, but matches the eq in the paper
return math.log(res) # TODO: use log2?
elif factor[factor_id]["factorFunction"] == FUNC_LOGICAL:
head = value if (fmap[ftv_end - 1]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_end - 1]["vid"]]
for l in range(ftv_start, ftv_end - 1):
v = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][fmap[l]["vid"]]
if v == head:
return 1
return 0
elif factor[factor_id]["factorFunction"] == FUNC_IMPLY_MLN:
for l in range(ftv_start, ftv_end - 1):
v = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][fmap[l]["vid"]]
if v == 0:
# Early return if body is not satisfied
return 1
# If this point is reached, body must be true
l = ftv_end - 1
head = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][l]
if head:
return 1
return 0
#########################
# CATEGORICAL VARIABLES #
#########################
elif factor[factor_id]["factorFunction"] == FUNC_AND_CAT \
or factor[factor_id]["factorFunction"] == FUNC_EQUAL_CAT_CONST:
for l in range(ftv_start, ftv_end):
v = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][fmap[l]["vid"]]
if v != fmap[l]["dense_equal_to"]:
return 0
return 1
elif factor[factor_id]["factorFunction"] == FUNC_OR_CAT:
for l in range(ftv_start, ftv_end):
v = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][fmap[l]["vid"]]
if v == fmap[l]["dense_equal_to"]:
return 1
return -1
elif factor[factor_id]["factorFunction"] == FUNC_IMPLY_NATURAL_CAT:
for l in range(ftv_start, ftv_end - 1):
v = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][fmap[l]["vid"]]
if v != fmap[l]["dense_equal_to"]:
# Early return if body is not satisfied
return 0
# If this point is reached, body must be true
l = ftv_end - 1
head = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][l]
if head == fmap[l]["dense_equal_to"]:
return 1
return -1
elif factor[factor_id]["factorFunction"] == FUNC_IMPLY_MLN_CAT:
for l in range(ftv_start, ftv_end - 1):
v = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][fmap[l]["vid"]]
if v != fmap[l]["dense_equal_to"]:
# Early return if body is not satisfied
return 1
# If this point is reached, body must be true
l = ftv_end - 1
head = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][l]
if head == fmap[l]["dense_equal_to"]:
return 1
return 0
#####################
# DATA PROGRAMMING #
# GENERATIVE MODELS #
#####################
elif factor[factor_id]["factorFunction"] == FUNC_DP_GEN_CLASS_PRIOR:
y_index = value if fmap[ftv_start]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start]["vid"]]
return 1 if y_index == 1 else -1
elif factor[factor_id]["factorFunction"] == FUNC_DP_GEN_LF_PRIOR:
l_index = value if fmap[ftv_start]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start]["vid"]]
if l_index == 0:
return -1
elif l_index == 1:
return 0
else:
return 1
elif factor[factor_id]["factorFunction"] == FUNC_DP_GEN_LF_PROPENSITY:
l_index = value if fmap[ftv_start]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start]["vid"]]
return 0 if l_index == 1 else 1
elif factor[factor_id]["factorFunction"] == FUNC_DP_GEN_LF_ACCURACY:
y_index = value if fmap[ftv_start]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start]["vid"]]
l_index = value if fmap[ftv_start + 1]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start + 1]["vid"]]
if l_index == 1:
return 0
# First part of below condition is simpler because
# the index for value -1 is 0 for both variables
elif y_index == l_index or (y_index == 1 and l_index == 2):
return 1
else:
return -1
elif factor[factor_id]["factorFunction"] == \
FUNC_DP_GEN_LF_CLASS_PROPENSITY:
y_index = value if fmap[ftv_start]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start]["vid"]]
l_index = value if fmap[ftv_start + 1]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start + 1]["vid"]]
if l_index == 1:
return 0
elif y_index == 1:
return 1
else:
return -1
elif factor[factor_id]["factorFunction"] == FUNC_DP_GEN_DEP_FIXING:
y_index = value if fmap[ftv_start]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start]["vid"]]
l1_index = value if fmap[ftv_start + 1]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start + 1]["vid"]]
l2_index = value if fmap[ftv_start + 2]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start + 2]["vid"]]
if l1_index == 1:
return -1 if l2_index != 1 else 0
elif l1_index == 0 and l2_index == 2 and y_index == 1:
return 1
elif l1_index == 2 and l2_index == 0 and y_index == 0:
return 1
else:
return 0
elif factor[factor_id]["factorFunction"] == FUNC_DP_GEN_DEP_REINFORCING:
y_index = value if fmap[ftv_start]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start]["vid"]]
l1_index = value if fmap[ftv_start + 1]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start + 1]["vid"]]
l2_index = value if fmap[ftv_start + 2]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start + 2]["vid"]]
if l1_index == 1:
return -1 if l2_index != 1 else 0
elif l1_index == 0 and l2_index == 0 and y_index == 0:
return 1
elif l1_index == 2 and l2_index == 2 and y_index == 1:
return 1
else:
return 0
elif factor[factor_id]["factorFunction"] == FUNC_DP_GEN_DEP_EXCLUSIVE:
l1_index = value if fmap[ftv_start]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start]["vid"]]
l2_index = value if fmap[ftv_start + 1]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start + 1]["vid"]]
return 0 if l1_index == 1 or l2_index == 1 else -1
elif factor[factor_id]["factorFunction"] == FUNC_DP_GEN_DEP_SIMILAR:
l1_index = value if fmap[ftv_start]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start]["vid"]]
l2_index = value if fmap[ftv_start + 1]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start + 1]["vid"]]
return 1 if l1_index == l2_index else 0
elif factor[factor_id]["factorFunction"] == FUNC_CORAL_GEN_DEP_SIMILAR:
v1 = value if fmap[ftv_start]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start]["vid"]]
v2 = value if fmap[ftv_start + 1]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start + 1]["vid"]]
card1 = variable[fmap[ftv_start]["vid"]]["cardinality"]
card2 = variable[fmap[ftv_start + 1]["vid"]]["cardinality"]
assert(card1 == 2 or card1 == 3)
assert(card2 == 2 or card2 == 3)
if (card1 == card2):
return 1 if v1 == v2 else 0
if card2 == 2:
v1, v2 = v2, v1
return 1 if ((v1 == 0) and (v2 == 0)) or ((v1 == 1) and (v2 == 2)) else 0
else:
for i in range(UdfStart.shape[0] - 1):
if (factor[factor_id]["factorFunction"] >= UdfStart[i]) and (factor[factor_id]["factorFunction"] < UdfStart[i + 1]):
# This is a valid UDF
fid = factor[factor_id]["factorFunction"] - UdfStart[i]
if fid < LfCount[i]:
# LF Accuracy
u = udf(UdfMap[UdfCardinalityStart[i] + fid], var_samp, value, var_copy, var_value, fmap, ftv_start)
y = value if fmap[ftv_start + UdfCardinality[UdfCardinalityStart[i] + fid]]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start + UdfCardinality[UdfCardinalityStart[i] + fid]]["vid"]]
y = 2 * y - 1
return u * y
else:
# Correlation
pass
# FUNC_UNDEFINED
print("Error: Factor Function", factor[factor_id]["factorFunction"],
"( used in factor", factor_id, ") is not implemented.")
raise NotImplementedError("Factor function is not implemented.")
| ukb-cardiac-mri-master | ukb/weak_supervision/numbskull/numbskull/inference.py |
"""TODO."""
from __future__ import print_function, absolute_import
import numba
from numba import jit
import numpy as np
import math
import random
from numbskull.inference import draw_sample, eval_factor
@jit(nopython=True, cache=True, nogil=True)
def learnthread(shardID, nshards, step, regularization, reg_param, truncation,
var_copy, weight_copy, weight,
variable, factor, fmap,
vmap, factor_index, Z, fids, var_value, var_value_evid,
weight_value, learn_non_evidence):
"""TODO."""
# Identify start and end variable
nvar = variable.shape[0]
start = (shardID * nvar) // nshards
end = ((shardID + 1) * nvar) // nshards
for var_samp in range(start, end):
sample_and_sgd(var_samp, step, regularization, reg_param, truncation,
var_copy, weight_copy, weight, variable,
factor, fmap, vmap,
factor_index, Z[shardID], fids[shardID], var_value,
var_value_evid, weight_value, learn_non_evidence)
@jit(nopython=True, cache=True, nogil=True)
def get_factor_id_range(variable, vmap, var_samp, val):
"""TODO."""
varval_off = val
if variable[var_samp]["dataType"] == 0:
varval_off = 0
vtf = vmap[variable[var_samp]["vtf_offset"] + varval_off]
start = vtf["factor_index_offset"]
end = start + vtf["factor_index_length"]
return (start, end)
@jit(nopython=True, cache=True, nogil=True)
def sample_and_sgd(var_samp, step, regularization, reg_param, truncation,
var_copy, weight_copy, weight, variable, factor, fmap,
vmap, factor_index, Z, fids, var_value, var_value_evid,
weight_value, learn_non_evidence):
"""TODO."""
# If learn_non_evidence sample twice.
# The method corresponds to expectation-conjugate descent.
if variable[var_samp]["isEvidence"] != 1:
evidence = draw_sample(var_samp, var_copy, weight_copy,
weight, variable, factor,
fmap, vmap, factor_index, Z,
var_value_evid, weight_value)
# If evidence then store the initial value in a tmp variable
# then sample and compute the gradient.
else:
evidence = variable[var_samp]["initialValue"]
var_value_evid[var_copy][var_samp] = evidence
# Sample the variable
proposal = draw_sample(var_samp, var_copy, weight_copy, weight,
variable, factor, fmap, vmap,
factor_index, Z, var_value, weight_value)
var_value[var_copy][var_samp] = proposal
if not learn_non_evidence and variable[var_samp]["isEvidence"] != 1:
return
# Compute the gradient and update the weights
# Iterate over corresponding factors
range_fids = get_factor_id_range(variable, vmap, var_samp, evidence)
# TODO: is it possible to avoid copying around fids
if evidence != proposal:
range_prop = get_factor_id_range(variable, vmap, var_samp, proposal)
s1 = range_fids[1] - range_fids[0]
s2 = range_prop[1] - range_prop[0]
s = s1 + s2
fids[:s1] = factor_index[range_fids[0]:range_fids[1]]
fids[s1:s] = factor_index[range_prop[0]:range_prop[1]]
fids[:s].sort()
else:
s = range_fids[1] - range_fids[0]
fids[:s] = factor_index[range_fids[0]:range_fids[1]]
truncate = random.random() < 1.0 / truncation if regularization == 1 else False
# go over all factor ids, ignoring dupes
last_fid = -1 # numba 0.28 would complain if this were None
for factor_id in fids[:s]:
if factor_id == last_fid:
continue
last_fid = factor_id
weight_id = factor[factor_id]["weightId"]
if weight[weight_id]["isFixed"]:
continue
# Compute Gradient
p0 = eval_factor(factor_id, var_samp,
evidence, var_copy,
variable, factor, fmap,
var_value_evid)
p1 = eval_factor(factor_id, var_samp,
proposal, var_copy,
variable, factor, fmap,
var_value)
gradient = (p1 - p0) * factor[factor_id]["featureValue"]
# Update weight
w = weight_value[weight_copy][weight_id]
if regularization == 2:
w *= (1.0 / (1.0 + reg_param * step))
w -= step * gradient
elif regularization == 1:
# Truncated Gradient
# "Sparse Online Learning via Truncated Gradient"
# Langford et al. 2009
w -= step * gradient
if truncate:
l1delta = reg_param * step * truncation
w = max(0, w - l1delta) if w > 0 else min(0, w + l1delta)
else:
w -= step * gradient
weight_value[weight_copy][weight_id] = w
| ukb-cardiac-mri-master | ukb/weak_supervision/numbskull/numbskull/learning.py |
"""TODO."""
from __future__ import print_function, absolute_import
import numba
from numba import jit
import numpy as np
import math
# Search "USER" to find sections that need to be implemented.
# USER: insert name of UDF and cardinality here
UDF_CARDINALITY = {
# UDFs for toy example
"TOY_OR": 2,
"TOY_AND": 2,
"TOY_DIRECT": 1,
"TOY_DIRECT_ABSTAIN": 1,
"TOY_DIRECT_SINGLE": 1,
"VG_12": 2,
"VG_POS_SIZE_NUM": 3,
"BT_DAUBE": 3,
"BT_EDGE": 3,
"BT_LESION": 2,
"BT_SHAPE": 3,
"BT_SOBEL": 2,
"BT_GLCM": 2,
"BT_FIRST": 2,
"AN_PB": 2,
"AN_DIST": 3,
"AN_COLOR_TEMP": 2,
"HEART_POSITIVE": 1,
"HEART_NEGATIVE": 1,
"HEART_SINGLE_POSITIVE": 1,
"HEART_SINGLE_NEGATIVE": 1
}
# Automatically select a unique index for each UDF (no modification needed)
UDF_INDEX = {}
UDF_NAME = {}
udf_index = 0
for udf in UDF_CARDINALITY:
UDF_INDEX[udf] = udf_index
UDF_NAME[udf_index] = udf
udf_index += 1
# Create a constant for the index of each UDF
for (key, value) in UDF_INDEX.items():
exec(key + " = " + str(value))
# USER: Specify the list of UDFs that are used in a single model.
UDF_USAGE = {
#"TOY": [TOY_AND, TOY_DIRECT, TOY_DIRECT],
"TOY": [TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT],
"SYNTHETIC2_5": [TOY_OR, TOY_OR, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT],
"SYNTHETIC3_5": [TOY_OR, TOY_OR, TOY_OR, TOY_DIRECT, TOY_DIRECT],
"SYNTHETIC4_5": [TOY_OR, TOY_OR, TOY_OR, TOY_OR, TOY_DIRECT],
"SYNTHETIC5_5": [TOY_OR, TOY_OR, TOY_OR, TOY_OR, TOY_OR],
"SYNTHETIC2_6": [TOY_OR, TOY_OR, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT],
"SYNTHETIC3_6": [TOY_OR, TOY_OR, TOY_OR, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT],
"SYNTHETIC4_6": [TOY_OR, TOY_OR, TOY_OR, TOY_OR, TOY_DIRECT, TOY_DIRECT],
"SYNTHETIC5_6": [TOY_OR, TOY_OR, TOY_OR, TOY_OR, TOY_OR, TOY_DIRECT],
"SYNTHETIC2_7": [TOY_OR, TOY_OR, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT],
"SYNTHETIC3_7": [TOY_OR, TOY_OR, TOY_OR, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT],
"SYNTHETIC4_7": [TOY_OR, TOY_OR, TOY_OR, TOY_OR, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT],
"SYNTHETIC5_7": [TOY_OR, TOY_OR, TOY_OR, TOY_OR, TOY_OR, TOY_DIRECT, TOY_DIRECT],
"SYNTHETIC2_8": [TOY_OR, TOY_OR, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT],
"SYNTHETIC3_8": [TOY_OR, TOY_OR, TOY_OR, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT],
"SYNTHETIC4_8": [TOY_OR, TOY_OR, TOY_OR, TOY_OR, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT],
"SYNTHETIC5_8": [TOY_OR, TOY_OR, TOY_OR, TOY_OR, TOY_OR, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT],
"SYNTHETIC2_10": [TOY_OR, TOY_OR, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT],
"SYNTHETIC3_10": [TOY_OR, TOY_OR, TOY_OR, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT],
"SYNTHETIC4_10": [TOY_OR, TOY_OR, TOY_OR, TOY_OR, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT],
"SYNTHETIC5_10": [TOY_OR, TOY_OR, TOY_OR, TOY_OR, TOY_OR, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT],
"SYNTHETIC2_15": [TOY_OR, TOY_OR, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT],
"SYNTHETIC3_15": [TOY_OR, TOY_OR, TOY_OR, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT],
"SYNTHETIC4_15": [TOY_OR, TOY_OR, TOY_OR, TOY_OR, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT],
"SYNTHETIC5_15": [TOY_OR, TOY_OR, TOY_OR, TOY_OR, TOY_OR, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT],
"DDSM": [TOY_DIRECT_ABSTAIN, TOY_DIRECT_ABSTAIN, TOY_DIRECT_ABSTAIN, TOY_DIRECT_ABSTAIN, TOY_DIRECT_SINGLE, TOY_DIRECT_ABSTAIN],
"SYNTH": [TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT],
"VG": [VG_12, VG_12, VG_POS_SIZE_NUM, VG_POS_SIZE_NUM, VG_POS_SIZE_NUM],
"VGSYNTH": [TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT],
"BT": [BT_DAUBE, BT_EDGE, BT_LESION, BT_SHAPE, BT_SOBEL, BT_GLCM, BT_FIRST],
"BTSYNTH": [TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT, TOY_DIRECT],
"AN": [AN_PB, AN_DIST, AN_COLOR_TEMP, AN_COLOR_TEMP],
#"MRI": [HEART_POSITIVE, HEART_POSITIVE, HEART_POSITIVE, HEART_POSITIVE, HEART_NEGATIVE]
"MRI": [HEART_NEGATIVE, HEART_POSITIVE, HEART_SINGLE_NEGATIVE, HEART_POSITIVE, HEART_NEGATIVE]
}
# USER: There are not modifications necessary here. However, the value
# generated in UDF_OFFSET needs to be given to CoralModel as L_offset.
UDF_OFFSET = {}
UDF_OFFSET_END = {}
UdfStart = np.empty(len(UDF_USAGE) + 1, np.int64) # UdfStart[i] = first index corresponding to application [i]
LfCount = np.empty(len(UDF_USAGE), np.int64) # LfCount[i] = number of LFs in application [i]
UdfCardinalityStart = np.empty(len(UDF_USAGE) + 1, np.int64)
UdfMap = np.empty(sum(len(value) for (key, value) in UDF_USAGE.items()), np.int64)
UdfCardinality = np.empty(sum(len(value) for (key, value) in UDF_USAGE.items()), np.int64)
index = 0
ci = 0
udf_offset = 1000
for (key, value) in UDF_USAGE.items():
UDF_OFFSET[key] = udf_offset
UdfStart[index] = udf_offset
LfCount[index] = len(UDF_USAGE[key])
UdfCardinalityStart[index] = ci
for i in range(LfCount[index]):
UdfMap[ci] = UDF_USAGE[key][i]
UdfCardinality[ci] = UDF_CARDINALITY[UDF_NAME[UDF_USAGE[key][i]]]
ci += 1
udf_offset += len(UDF_USAGE[key]) # LF accuracy
udf_offset += len(UDF_USAGE[key]) * (len(UDF_USAGE[key]) - 1) / 2 # correlations
index += 1
UdfStart[index] = udf_offset
UDF_OFFSET_END[key] = udf_offset
UdfCardinalityStart[index] = ci
exec(key + "_UDF_OFFSET = " + str(UDF_OFFSET[key]))
# USER: Implement the UDF here
# The following code can be used to obtain the correct value of a variable:
# vi = value if (fmap[ftv_start + i]["vid"] == var_samp) \
# else var_value[var_copy][fmap[ftv_start + i]["vid"]]
@jit(nopython=True, cache=True, nogil=True)
def udf(udf_index, var_samp, value, var_copy, var_value, fmap, ftv_start):
if udf_index == TOY_OR:
v1 = value if (fmap[ftv_start + 0]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 0]["vid"]]
v2 = value if (fmap[ftv_start + 1]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 1]["vid"]]
if v1 == 1 or v2 == 1:
return 1
return -1
elif udf_index == TOY_AND:
v1 = value if (fmap[ftv_start + 0]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 0]["vid"]]
v2 = value if (fmap[ftv_start + 1]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 1]["vid"]]
if v1 == 1 and v2 == 1:
return 1
return -1
elif udf_index == TOY_DIRECT:
v1 = value if (fmap[ftv_start + 0]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 0]["vid"]]
return 2 * v1 - 1
#DDSM Labeling Functions
elif udf_index == TOY_DIRECT_ABSTAIN:
v1 = value if (fmap[ftv_start + 0]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 0]["vid"]]
return v1-1
elif udf_index == TOY_DIRECT_SINGLE:
v1 = value if (fmap[ftv_start + 0]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 0]["vid"]]
return v1-1
#Visual Genome Labeling Functions
elif udf_index == VG_12:
v1 = value if (fmap[ftv_start + 0]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 0]["vid"]]
v2 = value if (fmap[ftv_start + 1]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 1]["vid"]]
if v1 == 1:
if v2 == 1:
return 1
else:
return -1
return 0
elif udf_index == VG_POS_SIZE_NUM:
v1 = value if (fmap[ftv_start + 0]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 0]["vid"]]
v2 = value if (fmap[ftv_start + 1]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 1]["vid"]]
v3 = value if (fmap[ftv_start + 2]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 2]["vid"]]
if v1 == 1:
if v2 == 1:
if v3 == 2:
return 1
else:
if v3 == 0:
return -1
return 0
#Bone Tumor Labeling Functions
elif udf_index == BT_DAUBE:
v1 = value if (fmap[ftv_start + 0]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 0]["vid"]]
v2 = value if (fmap[ftv_start + 1]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 1]["vid"]]
v3 = value if (fmap[ftv_start + 2]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 2]["vid"]]
if v1 == 1:
if v2 == 1:
return 1
else:
if v3 == 1:
return -1
else:
return 1
else:
return -1
return 0
elif udf_index == BT_EDGE:
v1 = value if (fmap[ftv_start + 0]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 0]["vid"]]
v2 = value if (fmap[ftv_start + 1]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 1]["vid"]]
v3 = value if (fmap[ftv_start + 2]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 2]["vid"]]
if v1 == 1:
return -1
else:
if v2 == 1:
return -1
else:
if v3 == 1:
return -1
else:
return 1
return 0
elif udf_index == BT_LESION:
v1 = value if (fmap[ftv_start + 0]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 0]["vid"]]
v2 = value if (fmap[ftv_start + 1]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 1]["vid"]]
if v1 == 2:
return -1
if v1 == 1:
if v2 == 2:
return 1
return 0
elif udf_index == BT_SHAPE:
v1 = value if (fmap[ftv_start + 0]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 0]["vid"]]
v2 = value if (fmap[ftv_start + 1]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 1]["vid"]]
v3 = value if (fmap[ftv_start + 2]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 2]["vid"]]
if v1 == 2:
return -1
if v1 == 1:
if v2 == 2:
if v3 == 2:
return -1
if v3 == 1:
return 1
if v2 == 1:
return 1
return 0
elif udf_index == BT_SOBEL:
v1 = value if (fmap[ftv_start + 0]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 0]["vid"]]
v2 = value if (fmap[ftv_start + 1]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 1]["vid"]]
if v1 == 2:
return -1
if v1 == 1:
if v2 == 2:
return 1
if v2 == 1:
return -1
return 0
elif udf_index == BT_GLCM:
v1 = value if (fmap[ftv_start + 0]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 0]["vid"]]
v2 = value if (fmap[ftv_start + 1]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 1]["vid"]]
if v1 == 2:
if v2 == 2:
return -1
if v2 == 1:
return 1
if v1 == 1:
return -1
return 0
elif udf_index == BT_FIRST:
v1 = value if (fmap[ftv_start + 0]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 0]["vid"]]
v2 = value if (fmap[ftv_start + 1]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 1]["vid"]]
if v1 == 2:
return -1
if v1 == 1:
if v2 == 2:
return 1
if v2 == 1:
return -1
return 0
#ActivityNet Labeling Functions
elif udf_index == AN_PB:
v1 = value if (fmap[ftv_start + 0]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 0]["vid"]]
v2 = value if (fmap[ftv_start + 1]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 1]["vid"]]
if v1 == 1:
if v2 == 1:
return 1
else:
return -1
return 0
elif udf_index == AN_DIST:
v1 = value if (fmap[ftv_start + 0]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 0]["vid"]]
v2 = value if (fmap[ftv_start + 1]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 1]["vid"]]
v3 = value if (fmap[ftv_start + 2]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 2]["vid"]]
if v1 == 1:
if v2 == 1:
if v3 == 1:
return 1
else:
return -1
return 0
elif udf_index == AN_COLOR_TEMP:
v1 = value if (fmap[ftv_start + 0]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 0]["vid"]]
v2 = value if (fmap[ftv_start + 1]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 1]["vid"]]
if v1 == 1:
if v2 == 1:
return 1
else:
return -1
return 0
#Heart MRI Functions
elif udf_index == HEART_POSITIVE:
v1 = value if (fmap[ftv_start + 0]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 0]["vid"]]
v2 = value if (fmap[ftv_start + 1]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 1]["vid"]]
if v1 == 2:
return 1
if v1 == 1:
return -1
return 0
elif udf_index == HEART_NEGATIVE:
v1 = value if (fmap[ftv_start + 0]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 0]["vid"]]
v2 = value if (fmap[ftv_start + 1]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 1]["vid"]]
if v1 == 2:
return -1
if v1 == 1:
return 1
return 0
elif udf_index == HEART_SINGLE_POSITIVE:
v1 = value if (fmap[ftv_start + 0]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 0]["vid"]]
v2 = value if (fmap[ftv_start + 1]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 1]["vid"]]
if v1 == 2:
return 1
return 0
elif udf_index == HEART_SINGLE_NEGATIVE:
v1 = value if (fmap[ftv_start + 0]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 0]["vid"]]
v2 = value if (fmap[ftv_start + 1]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start + 1]["vid"]]
if v1 == 1:
return 1
return 0
else:
print("Error: UDF", udf_index,
"is not implemented.")
raise NotImplementedError("UDF is not implemented.")
| ukb-cardiac-mri-master | ukb/weak_supervision/numbskull/numbskull/udf.py |
"""TODO."""
from .numbskull import main
main()
| ukb-cardiac-mri-master | ukb/weak_supervision/numbskull/numbskull/__main__.py |
import logging
import re
from builtins import chr, range, str
from difflib import SequenceMatcher
from fonduer.candidates import MentionNgrams
from fonduer.candidates.models.implicit_span_mention import TemporaryImplicitSpanMention
logger = logging.getLogger(__name__)
def expand_part_range(text):
"""
Given a string, generates strings that are potentially implied by
the original text. Two main operations are performed:
1. Expanding ranges (X to Y; X ~ Y; X -- Y)
2. Expanding suffixes (123X/Y/Z; 123X, Y, Z)
Also yields the original input string.
To get the correct output from complex strings, this function should be fed
many Ngrams from a particular sentence.
"""
# Regex Patterns compile only once per function call.
# This range pattern will find text that "looks like" a range.
range_pattern = re.compile(
r"^(?P<start>[\w\/]+)(?:\s*(\.{3,}|\~|\-+|to|thru|through"
r"|\u2011+|\u2012+|\u2013+|\u2014+|\u2012+|\u2212+)\s*)(?P<end>[\w\/]+)$",
re.IGNORECASE | re.UNICODE,
)
suffix_pattern = re.compile(r"(?P<spacer>(?:,|\/)\s*)(?P<suffix>[\w\-]+)")
base_pattern = re.compile(
r"(?P<base>[\w\-]+)(?P<spacer>(?:,|\/)\s*)(?P<suffix>[\w\-]+)?"
)
logger.debug("Text: " + text)
expanded_parts = set()
final_set = set()
# Step 1: Search and expand ranges
m = re.search(range_pattern, text)
if m:
start = m.group("start")
end = m.group("end")
start_diff = ""
end_diff = ""
logger.debug(f" Start: {start} \t End: {end}")
# Use difflib to find difference. We are interested in 'replace' only
seqm = SequenceMatcher(None, start, end).get_opcodes()
for opcode, a0, a1, b0, b1 in seqm:
if opcode == "equal":
continue
elif opcode == "insert":
break
elif opcode == "delete":
break
elif opcode == "replace":
# NOTE: Potential bug if there is more than 1 replace
start_diff = start[a0:a1]
end_diff = end[b0:b1]
else:
logger.error("Unexpected opcode")
raise RuntimeError("[ERROR] unexpected opcode")
logger.debug(f" start_diff: {start_diff} \t end_diff: {end_diff}")
# First, check for number range
if atoi(start_diff) and atoi(end_diff):
logger.debug(f" Enumerate {atoi(start_diff)} to {atoi(end_diff)}")
# generate a list of the numbers plugged in
for number in range(atoi(start_diff), atoi(end_diff) + 1):
new_part = start.replace(start_diff, str(number))
# Produce the strings with the enumerated ranges
expanded_parts.add(new_part)
# Second, check for single-letter enumeration
if len(start_diff) == 1 and len(end_diff) == 1:
if start_diff.isalpha() and end_diff.isalpha():
logger.debug(f" Enumerate {start_diff} to {end_diff}")
letter_range = char_range(start_diff, end_diff)
for letter in letter_range:
new_part = start.replace(start_diff, letter)
# Produce the strings with the enumerated ranges
expanded_parts.add(new_part)
# If we cannot identify a clear number or letter range, or if there are
# multiple ranges being expressed, just ignore it.
if len(expanded_parts) == 0:
expanded_parts.add(text)
else:
expanded_parts.add(text)
# Special case is when there is a single slack (e.g. BC337-16/BC338-16)
# and we want to output both halves of the slash, assuming that both
# halves are the same length
if text.count("/") == 1:
split = text.split("/")
if len(split[0]) == len(split[1]):
expanded_parts.add(split[0])
expanded_parts.add(split[1])
logger.debug(" Inferred Text: \n " + str(sorted(expanded_parts)))
# Step 2: Expand suffixes for each of the inferred sentences
# NOTE: this only does the simple case of replacing same-length suffixes.
# we do not handle cases like "BC546A/B/XYZ/QR"
for part in expanded_parts:
first_match = re.search(base_pattern, part)
if first_match:
base = re.search(base_pattern, part).group("base")
final_set.add(base) # add the base (multiple times, but set handles that)
if first_match.group("suffix"):
all_suffix_lengths = set()
# This is a bit inefficient but this first pass just is here
# to make sure that the suffixes are the same length
# first_suffix = first_match.group("suffix")
# if part.startswith('BC547'):
# import pdb; pdb.set_trace()
for m in re.finditer(suffix_pattern, part):
suffix = m.group("suffix")
suffix_len = len(suffix)
all_suffix_lengths.add(suffix_len)
if len(all_suffix_lengths) == 1:
for m in re.finditer(suffix_pattern, part):
suffix = m.group("suffix")
suffix_len = len(suffix)
old_suffix = base[-suffix_len:]
if (suffix.isalpha() and old_suffix.isalpha()) or (
suffix.isdigit() and old_suffix.isdigit()
):
trimmed = base[:-suffix_len]
final_set.add(trimmed + suffix)
else:
if part and (not part.isspace()):
final_set.add(part) # no base was found with suffixes to expand
logger.debug(" Final Set: " + str(sorted(final_set)))
# Also return the original input string
final_set.add(text)
for part in final_set:
yield part
# NOTE: We make a few assumptions (e.g. suffixes must be same length), but
# one important unstated assumption is that if there is a single suffix,
# (e.g. BC546A/B), the single suffix will be swapped in no matter what.
# In this example, it works. But if we had "ABCD/EFG" we would get "ABCD,AEFG"
# Check out UtilsTests.py to see more of our assumptions capture as test
# cases.
def atoi(num_str):
"""
Helper function which converts a string to an integer, or returns None.
"""
try:
return int(num_str)
except Exception:
pass
return None
def char_range(a, b):
"""
Generates the characters from a to b inclusive.
"""
for c in range(ord(a), ord(b) + 1):
yield chr(c)
class MentionNgramsPart(MentionNgrams):
def __init__(
self, parts_by_doc=None, n_max=3, expand=True, split_tokens=["-", "/"]
):
"""MentionNgrams specifically for transistor parts.
:param parts_by_doc: a dictionary d where d[document_name.upper()] =
[partA, partB, ...]
"""
super(MentionNgrams, self).__init__(n_max=n_max, split_tokens=split_tokens)
self.parts_by_doc = parts_by_doc
self.expander = expand_part_range if expand else (lambda x: [x])
def apply(self, doc):
for ts in MentionNgrams.apply(self, doc):
enumerated_parts = [
part.upper() for part in expand_part_range(ts.get_span())
]
parts = set(enumerated_parts)
if self.parts_by_doc:
possible_parts = self.parts_by_doc[ts.parent.document.name.upper()]
for base_part in enumerated_parts:
for part in possible_parts:
if part.startswith(base_part) and len(base_part) >= 4:
parts.add(part)
for i, part in enumerate(parts):
if " " in part:
continue # it won't pass the part_matcher
if part == ts.get_span():
yield ts
else:
yield TemporaryImplicitSpanMention(
sentence=ts.sentence,
char_start=ts.char_start,
char_end=ts.char_end,
expander_key="part_expander",
position=i,
text=part,
words=[part],
lemmas=[part],
pos_tags=[ts.get_attrib_tokens("pos_tags")[0]],
ner_tags=[ts.get_attrib_tokens("ner_tags")[0]],
dep_parents=[ts.get_attrib_tokens("dep_parents")[0]],
dep_labels=[ts.get_attrib_tokens("dep_labels")[0]],
page=[min(ts.get_attrib_tokens("page"))]
if ts.sentence.is_visual()
else [None],
top=[min(ts.get_attrib_tokens("top"))]
if ts.sentence.is_visual()
else [None],
left=[max(ts.get_attrib_tokens("left"))]
if ts.sentence.is_visual()
else [None],
bottom=[min(ts.get_attrib_tokens("bottom"))]
if ts.sentence.is_visual()
else [None],
right=[max(ts.get_attrib_tokens("right"))]
if ts.sentence.is_visual()
else [None],
meta=None,
)
class MentionNgramsTemp(MentionNgrams):
def __init__(self, n_max=2, split_tokens=["-", "/"]):
super(MentionNgrams, self).__init__(n_max=n_max, split_tokens=split_tokens)
def apply(self, doc):
for ts in MentionNgrams.apply(self, doc):
m = re.match(
r"^([\+\-\u2010\u2011\u2012\u2013\u2014\u2212\uf02d])?(\s*)(\d+)$",
ts.get_span(),
re.U,
)
if m:
if m.group(1) is None:
temp = ""
elif m.group(1) == "+":
if m.group(2) != "":
# If bigram '+ 150' is seen, accept the unigram '150',
# not both
continue
temp = ""
else: # m.group(1) is a type of negative sign
# A bigram '- 150' is different from unigram '150', so we
# keep the implicit '-150'
temp = "-"
temp += m.group(3)
yield TemporaryImplicitSpanMention(
sentence=ts.sentence,
char_start=ts.char_start,
char_end=ts.char_end,
expander_key="temp_expander",
position=0,
text=temp,
words=[temp],
lemmas=[temp],
pos_tags=[ts.get_attrib_tokens("pos_tags")[-1]],
ner_tags=[ts.get_attrib_tokens("ner_tags")[-1]],
dep_parents=[ts.get_attrib_tokens("dep_parents")[-1]],
dep_labels=[ts.get_attrib_tokens("dep_labels")[-1]],
page=[ts.get_attrib_tokens("page")[-1]]
if ts.sentence.is_visual()
else [None],
top=[ts.get_attrib_tokens("top")[-1]]
if ts.sentence.is_visual()
else [None],
left=[ts.get_attrib_tokens("left")[-1]]
if ts.sentence.is_visual()
else [None],
bottom=[ts.get_attrib_tokens("bottom")[-1]]
if ts.sentence.is_visual()
else [None],
right=[ts.get_attrib_tokens("right")[-1]]
if ts.sentence.is_visual()
else [None],
meta=None,
)
else:
yield ts
class MentionNgramsVolt(MentionNgrams):
def __init__(self, n_max=1, split_tokens=["-", "/"]):
super(MentionNgrams, self).__init__(n_max=n_max, split_tokens=split_tokens)
def apply(self, doc):
for ts in MentionNgrams.apply(self, doc):
if ts.get_span().endswith(".0"):
value = ts.get_span()[:-2]
yield TemporaryImplicitSpanMention(
sentence=ts.sentence,
char_start=ts.char_start,
char_end=ts.char_end,
expander_key="volt_expander",
position=0,
text=value,
words=[value],
lemmas=[value],
pos_tags=[ts.get_attrib_tokens("pos_tags")[-1]],
ner_tags=[ts.get_attrib_tokens("ner_tags")[-1]],
dep_parents=[ts.get_attrib_tokens("dep_parents")[-1]],
dep_labels=[ts.get_attrib_tokens("dep_labels")[-1]],
page=[ts.get_attrib_tokens("page")[-1]]
if ts.sentence.is_visual()
else [None],
top=[ts.get_attrib_tokens("top")[-1]]
if ts.sentence.is_visual()
else [None],
left=[ts.get_attrib_tokens("left")[-1]]
if ts.sentence.is_visual()
else [None],
bottom=[ts.get_attrib_tokens("bottom")[-1]]
if ts.sentence.is_visual()
else [None],
right=[ts.get_attrib_tokens("right")[-1]]
if ts.sentence.is_visual()
else [None],
meta=None,
)
else:
yield ts
| fonduer-tutorials-master | hardware/hardware_spaces.py |
import codecs
import csv
from builtins import range
from fonduer.candidates.models import Candidate
from fonduer.learning.utils import confusion_matrix
from fonduer.supervision.models import GoldLabel, GoldLabelKey
try:
from IPython import get_ipython
if "IPKernelApp" not in get_ipython().config:
raise ImportError("console")
except (AttributeError, ImportError):
from tqdm import tqdm
else:
from tqdm import tqdm_notebook as tqdm
# Define labels
ABSTAIN = -1
FALSE = 0
TRUE = 1
def get_gold_dict(
filename, doc_on=True, part_on=True, val_on=True, attribute=None, docs=None
):
with codecs.open(filename, encoding="utf-8") as csvfile:
gold_reader = csv.reader(csvfile)
gold_dict = set()
for row in gold_reader:
(doc, part, attr, val) = row
if docs is None or doc.upper() in docs:
if attribute and attr != attribute:
continue
if val == TRUE:
continue
else:
key = []
if doc_on:
key.append(doc.upper())
if part_on:
key.append(part.upper())
if val_on:
key.append(val.upper())
gold_dict.add(tuple(key))
return gold_dict
gold_dict = get_gold_dict(
"data/hardware_tutorial_gold.csv", attribute="stg_temp_max"
)
def gold(c: Candidate) -> int:
doc = (c[0].context.sentence.document.name).upper()
part = (c[0].context.get_span()).upper()
val = ("".join(c[1].context.get_span().split())).upper()
if (doc, part, val) in gold_dict:
return TRUE
else:
return FALSE
def entity_level_f1(
candidates, gold_file, attribute=None, corpus=None, parts_by_doc=None
):
"""Checks entity-level recall of candidates compared to gold.
Turns a CandidateSet into a normal set of entity-level tuples
(doc, part, [attribute_value])
then compares this to the entity-level tuples found in the gold.
Example Usage:
from hardware_utils import entity_level_total_recall
candidates = # CandidateSet of all candidates you want to consider
gold_file = 'tutorials/tables/data/hardware/hardware_gold.csv'
entity_level_total_recall(candidates, gold_file, 'stg_temp_min')
"""
docs = [(doc.name).upper() for doc in corpus] if corpus else None
val_on = attribute is not None
gold_set = get_gold_dict(
gold_file,
docs=docs,
doc_on=True,
part_on=True,
val_on=val_on,
attribute=attribute,
)
if len(gold_set) == 0:
print(f"Gold File: {gold_file}\n Attribute: {attribute}")
print("Gold set is empty.")
return
# Turn CandidateSet into set of tuples
print("Preparing candidates...")
entities = set()
for i, c in enumerate(tqdm(candidates)):
part = c[0].context.get_span()
doc = c[0].context.sentence.document.name.upper()
if attribute:
val = c[1].context.get_span()
for p in get_implied_parts(part, doc, parts_by_doc):
if attribute:
entities.add((doc, p, val))
else:
entities.add((doc, p))
(TP_set, FP_set, FN_set) = confusion_matrix(entities, gold_set)
TP = len(TP_set)
FP = len(FP_set)
FN = len(FN_set)
prec = TP / (TP + FP) if TP + FP > 0 else float("nan")
rec = TP / (TP + FN) if TP + FN > 0 else float("nan")
f1 = 2 * (prec * rec) / (prec + rec) if prec + rec > 0 else float("nan")
print("========================================")
print("Scoring on Entity-Level Gold Data")
print("========================================")
print(f"Corpus Precision {prec:.3}")
print(f"Corpus Recall {rec:.3}")
print(f"Corpus F1 {f1:.3}")
print("----------------------------------------")
print(f"TP: {TP} | FP: {FP} | FN: {FN}")
print("========================================\n")
return [sorted(list(x)) for x in [TP_set, FP_set, FN_set]]
def get_implied_parts(part, doc, parts_by_doc):
yield part
if parts_by_doc:
for p in parts_by_doc[doc]:
if p.startswith(part) and len(part) >= 4:
yield p
def entity_to_candidates(entity, candidate_subset):
matches = []
for c in candidate_subset:
c_entity = tuple(
[c[0].context.sentence.document.name.upper()]
+ [c[i].context.get_span().upper() for i in range(len(c))]
)
c_entity = tuple([str(x) for x in c_entity])
if c_entity == entity:
matches.append(c)
return matches
| fonduer-tutorials-master | hardware/hardware_utils.py |
import codecs
import csv
from builtins import range
from fonduer.candidates.models import Candidate
from fonduer.learning.utils import confusion_matrix
from fonduer.supervision.models import GoldLabel, GoldLabelKey
try:
from IPython import get_ipython
if "IPKernelApp" not in get_ipython().config:
raise ImportError("console")
except (AttributeError, ImportError):
from tqdm import tqdm
else:
from tqdm import tqdm_notebook as tqdm
# Define labels
ABSTAIN = -1
FALSE = 0
TRUE = 1
def get_gold_dict(
filename, doc_on=True, part_on=True, val_on=True, attribute=None, docs=None
):
with codecs.open(filename, encoding="utf-8") as csvfile:
gold_reader = csv.reader(csvfile)
gold_dict = set()
for row in gold_reader:
(doc, part, attr, val) = row
if docs is None or doc.upper() in docs:
if attribute and attr != attribute:
continue
if val == TRUE:
continue
else:
key = []
if doc_on:
key.append(doc.upper())
if part_on:
key.append(part.upper())
if val_on:
key.append(val.upper())
gold_dict.add(tuple(key))
return gold_dict
gold_dict = get_gold_dict(
"data/hardware_tutorial_gold.csv", attribute="stg_temp_max"
)
def gold(c: Candidate) -> int:
doc = (c[0].context.sentence.document.name).upper()
part = (c[0].context.get_span()).upper()
val = ("".join(c[1].context.get_span().split())).upper()
if (doc, part, val) in gold_dict:
return TRUE
else:
return FALSE
def entity_level_f1(
candidates, gold_file, attribute=None, corpus=None, parts_by_doc=None
):
"""Checks entity-level recall of candidates compared to gold.
Turns a CandidateSet into a normal set of entity-level tuples
(doc, part, [attribute_value])
then compares this to the entity-level tuples found in the gold.
Example Usage:
from hardware_utils import entity_level_total_recall
candidates = # CandidateSet of all candidates you want to consider
gold_file = 'tutorials/tables/data/hardware/hardware_gold.csv'
entity_level_total_recall(candidates, gold_file, 'stg_temp_min')
"""
docs = [(doc.name).upper() for doc in corpus] if corpus else None
val_on = attribute is not None
gold_set = get_gold_dict(
gold_file,
docs=docs,
doc_on=True,
part_on=True,
val_on=val_on,
attribute=attribute,
)
if len(gold_set) == 0:
print(f"Gold File: {gold_file}\n Attribute: {attribute}")
print("Gold set is empty.")
return
# Turn CandidateSet into set of tuples
print("Preparing candidates...")
entities = set()
for i, c in enumerate(tqdm(candidates)):
part = c[0].context.get_span()
doc = c[0].context.sentence.document.name.upper()
if attribute:
val = c[1].context.get_span()
for p in get_implied_parts(part, doc, parts_by_doc):
if attribute:
entities.add((doc, p, val))
else:
entities.add((doc, p))
(TP_set, FP_set, FN_set) = confusion_matrix(entities, gold_set)
TP = len(TP_set)
FP = len(FP_set)
FN = len(FN_set)
prec = TP / (TP + FP) if TP + FP > 0 else float("nan")
rec = TP / (TP + FN) if TP + FN > 0 else float("nan")
f1 = 2 * (prec * rec) / (prec + rec) if prec + rec > 0 else float("nan")
print("========================================")
print("Scoring on Entity-Level Gold Data")
print("========================================")
print(f"Corpus Precision {prec:.3}")
print(f"Corpus Recall {rec:.3}")
print(f"Corpus F1 {f1:.3}")
print("----------------------------------------")
print(f"TP: {TP} | FP: {FP} | FN: {FN}")
print("========================================\n")
return [sorted(list(x)) for x in [TP_set, FP_set, FN_set]]
def get_implied_parts(part, doc, parts_by_doc):
yield part
if parts_by_doc:
for p in parts_by_doc[doc]:
if p.startswith(part) and len(part) >= 4:
yield p
def entity_to_candidates(entity, candidate_subset):
matches = []
for c in candidate_subset:
c_entity = tuple(
[c[0].context.sentence.document.name.upper()]
+ [c[i].context.get_span().upper() for i in range(len(c))]
)
c_entity = tuple([str(x) for x in c_entity])
if c_entity == entity:
matches.append(c)
return matches
| fonduer-tutorials-master | intro/hardware_utils.py |
import codecs
import csv
from builtins import range
from fonduer.candidates.models import Candidate
from fonduer.parser.models import Document, Sentence
from fonduer.learning.utils import confusion_matrix
from fonduer.supervision.models import GoldLabel, GoldLabelKey
try:
from IPython import get_ipython
if "IPKernelApp" not in get_ipython().config:
raise ImportError("console")
except (AttributeError, ImportError):
from tqdm import tqdm
else:
from tqdm import tqdm_notebook as tqdm
# Define labels
ABSTAIN = -1
FALSE = 0
TRUE = 1
def get_gold_dict(
filename, doc_on=True, presidentname_on=True, placeofbirth_on=True, docs=None
):
with codecs.open(filename) as csvfile:
gold_reader = csv.reader(csvfile, delimiter=";")
# skip header row
next(gold_reader)
gold_dict = set()
for row in gold_reader:
(doc, presidentname, placeofbirth) = row
docname_without_spaces = doc.replace(" ", "_")
if docs is None or docname_without_spaces.upper() in docs:
if not (doc and placeofbirth and presidentname):
continue
else:
key = []
if doc_on:
key.append(docname_without_spaces.upper())
if presidentname_on:
key.append(presidentname.upper())
if placeofbirth_on:
key.append(placeofbirth.upper())
gold_dict.add(tuple(key))
return gold_dict
gold_dict = get_gold_dict("data/president_tutorial_gold.csv")
def gold(c: Candidate) -> int:
doc = (c[0].context.sentence.document.name).upper()
president_name = (c[0].context.get_span()).upper()
birthplace = (c[1].context.get_span()).upper()
cand_tuple = (doc, president_name, birthplace)
# gold_matches = [x for x in gold_dict if x[0] == doc]
if cand_tuple in gold_dict:
return TRUE
else:
return FALSE
def entity_level_f1(candidates, gold_file, corpus=None):
"""Checks entity-level recall of candidates compared to gold.
Turns a CandidateSet into a normal set of entity-level tuples
(doc, president_name, birthplace)
then compares this to the entity-level tuples found in the gold.
Example Usage:
from hardware_utils import entity_level_total_recall
candidates = # CandidateSet of all candidates you want to consider
gold_file = 'tutorials/tables/data/hardware/hardware_gold.csv'
entity_level_total_recall(candidates, gold_file, 'stg_temp_min')
"""
docs = [(doc.name).upper() for doc in corpus] if corpus else None
gold_set = get_gold_dict(gold_file, docs=docs)
if len(gold_set) == 0:
print("Gold File: {gold_file}")
print("Gold set is empty.")
return
# Turn CandidateSet into set of tuples
print("Preparing candidates...")
entities = set()
for i, c in enumerate(tqdm(candidates)):
doc = c[0].context.sentence.document.name.upper()
president_name = c[0].context.get_span().upper()
birthplace = c[1].context.get_span().upper()
entities.add((doc, president_name, birthplace))
(TP_set, FP_set, FN_set) = confusion_matrix(entities, gold_set)
TP = len(TP_set)
FP = len(FP_set)
FN = len(FN_set)
prec = TP / (TP + FP) if TP + FP > 0 else float("nan")
rec = TP / (TP + FN) if TP + FN > 0 else float("nan")
f1 = 2 * (prec * rec) / (prec + rec) if prec + rec > 0 else float("nan")
print("========================================")
print("Scoring on Entity-Level Gold Data")
print("========================================")
print(f"Corpus Precision {prec:.3}")
print(f"Corpus Recall {rec:.3}")
print(f"Corpus F1 {f1:.3}")
print("----------------------------------------")
print(f"TP: {TP} | FP: {FP} | FN: {FN}")
print("========================================\n")
return [sorted(list(x)) for x in [TP_set, FP_set, FN_set]]
| fonduer-tutorials-master | wiki/wiki_table_utils.py |
import datasets
import tensorflow as tf
import pandas as pd
from pathlib import Path
import json
from tqdm.auto import tqdm
import os
DATA_DIR = os.environ.get("AMA_DATA", "/home/data")
# Download P3 github data from HF website
'''
git lfs install
git clone https://huggingface.co/datasets/bigscience/P3
'''
import sys
sys.path.append(f"{DATA_DIR}/P3")
# From P3 github
from tasks_splits_and_features import DATA_SPLITS_SIZES
SPLITS = ["train", "test", "validation"]
_FEAT_MAPPING_FUNCTIONS = {
"answer_choices": lambda x: [choice.decode("utf-8") for choice in x],
"inputs": lambda x: x.tolist(),
"inputs_pretokenized": lambda x: x.decode("utf-8"),
"targets": lambda x: x.tolist(),
"targets_pretokenized": lambda x: x.decode("utf-8"),
"idx": lambda x: x.tolist(),
"weight": lambda x: float(x),
"is_correct": lambda x: x,
}
def _feature_config(shape, dtype):
if dtype in ("int32", "bool"):
# int32 and bool are stored as int64 in the tf.train.Example protobuf.
dtype = "int64"
if shape and shape[0] is None:
return tf.io.FixedLenSequenceFeature(
shape[1:], dtype, allow_missing=True
)
return tf.io.FixedLenFeature(shape, dtype)
@tf.autograph.experimental.do_not_convert
def extract_dataset(data_path, subfolder, sizes, processes=10, splits=SPLITS):
datasets = {}
for split in splits:
if not (data_path / subfolder / f"info.{split}.json").exists():
continue
features_dict = json.load(open(data_path / subfolder / f"info.{split}.json"))
if "features" not in features_dict:
features_dict = json.load(open(data_path / subfolder / f"info.train.json"))
if "features" not in features_dict:
continue
features_dict = features_dict["features"]
tfrecord = str(data_path / subfolder / f"{split}.tfrecord-00000-of-00001")
feature_description = {
feat: _feature_config(**desc) for feat, desc in features_dict.items()
}
ds = tf.data.TFRecordDataset(tf.io.gfile.glob([tfrecord])) # TODO -> handle multiple shards
ds = ds.map(
lambda pb: tf.io.parse_single_example(pb, feature_description),
num_parallel_calls=processes
)
# Cast features back to the types from the info JSON since some features
# must be cast for storage (e.g., int32 is stored as int64).
ds = ds.map(
lambda x: {k: tf.cast(v, features_dict[k]["dtype"]) for k, v in x.items()},
num_parallel_calls=processes
)
res = []
for ex in tqdm(ds.as_numpy_iterator(), total=sizes.get(split, 10000)):
ex_dict = {}
for feat_name, feat_value in ex.items():
ex_dict[feat_name] = _FEAT_MAPPING_FUNCTIONS[feat_name](feat_value)
res.append(ex_dict)
if len(res) > 0:
df = pd.DataFrame.from_records(res)
datasets[split] = df
return datasets
data_path = Path(f"{DATA_DIR}/P3/data")
output_data_path = Path(f"{DATA_DIR}/P3/data_feather")
for subfolder in data_path.iterdir():
print(subfolder)
subfolder = subfolder.name
out_path = output_data_path / subfolder
out_path.mkdir(parents=True, exist_ok=True)
splits_to_pass = []
for split in SPLITS:
if not (out_path / f"{split}.feather").exists():
splits_to_pass.append(split)
datasets = extract_dataset(data_path, subfolder, sizes=DATA_SPLITS_SIZES[str(subfolder)], splits=splits_to_pass)
for split, df in datasets.items():
df.to_feather(out_path / f"{split}.feather") | ama_prompting-main | download_p3.py |
#!/usr/bin/env python
# coding: utf-8
from tqdm.auto import tqdm
import pandas as pd
from sklearn.metrics import classification_report
from decomposition import Decomposition, get_args, DATA_DIR
from utils import get_response, InputOutputPrompt
synonym = InputOutputPrompt(
input_formatter=lambda x: f"{x['passage']}",
output_formatter=lambda x: f"- {x['answer']}",
required_keys=["passage", "answer"],
input_output_sep="\n",
example_sep="\n\n",
instruction="Give synonyms of the word in the sentence.\n\n"
)
synonym_examples = [
pd.DataFrame([
{
"passage": "In \"She heard the sound of voices in the hall.\", synonyms for the word \"sound\" are:",
"answer": "noise",
},
{
"passage": "In \"Enter the secret code.\", synonyms for the word \"code\" are:",
"answer": "password",
},
{
"passage": "In \"She acted in a play on Broadway\", synonyms for the word \"play\" are:",
"answer": "show",
},
]),
pd.DataFrame([
{
"passage": "In \"She rode around the park on her cycle.\", synonyms for the word \"cycle\" are:",
"answer": "bicycle",
},
{
"passage": "In \"Don't keep everything bottled up.\", synonyms for the word \"bottled\" are:",
"answer": "trapped inside",
},
{
"passage": "In \"The present is like no other time.\", synonyms for the word \"present\" are:",
"answer": "current moment",
},
]),
pd.DataFrame([
{
"passage": "In \"The movie was awful.\", synonyms for the word \"aweful\" are:",
"answer": "bad and terrible",
},
{
"passage": "In \"She is so beautiful.\", synonyms for the word \"beautiful\" are:",
"answer": "pretty and gorgeous",
},
{
"passage": "In \"It was quite cool out so she wore a jacket\", synonyms for the word \"cool\" are:",
"answer": "cold and chilly",
},
]),
pd.DataFrame([
{
"passage": "In \"There are so many flies near the food.\", synonyms for the word \"flies\" are:",
"answer": "bugs",
},
{
"passage": "In \"Eat your noodles with a fork.\", synonyms for the word \"fork\" are:",
"answer": "utensils",
},
{
"passage": "In \"She and her husband went on a trip.\", synonyms for the word \"trip\" are:",
"answer": "vacation",
},
]),
pd.DataFrame([
{
"passage": "In \"It was definitely a cry for help.\", synonyms for the word \"cry\" are:",
"answer": "call",
},
{
"passage": "In \"I watch all my students as they take their exams.\", synonyms for the word \"watch\" are:",
"answer": "look at",
},
{
"passage": "In \"The beginning of the book was fine, but the end was terrible.\", synonyms for the word \"beginning\" are:",
"answer": "start",
},
])
]
description = InputOutputPrompt(
input_formatter=lambda x: f"Choices\n{x['choices']}\nFill the [MASK] with the correct \"Choice\": {x['sentence']}",
output_formatter=lambda x: f"[MASK] is \"Choice\": {x['answer']}\n",
required_keys=["choices", "sentence", "answer"],
input_output_sep="\n",
example_sep="\n\n",
instruction="Select the correct choice for the blank.\n\n"
)
description_examples = [
pd.DataFrame([
{
"choices": "1: noise\n2. in good condition",
"sentence": "She heard the [MASK] of voices in the hall.",
"answer": "noise",
},
{
"choices": "1. not heavy\n2. sun rays",
"sentence": "The [MASK] shined through the window.",
"answer": "sun rays",
},
{
"choices": "1. widespread\n2. commander of an army",
"sentence": "The book is of [MASK] interest.",
"answer": "widespread",
},
])
]
class WICDecomp(Decomposition):
def __init__(self, task_name, data_dir, val_split="validation"):
super().__init__(task_name, data_dir, val_split)
def get_boost_decomp_examples(self, train_data, boost_id):
return [
synonym_examples[boost_id],
]
def zero_few_baseline(
self,
test_data,
few_shot_df,
manifest,
overwrite_manifest,
do_few_shot=True,
):
expt_log = {}
preds = []
labels = []
labels_names = set(test_data["targets_pretokenized"])
labels_names = [l.lower().strip() for l in labels_names]
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
if ind in expt_log:
pred = entry["pred"]
gold = entry["gold"]
else:
text = row["inputs_pretokenized"]
gold = row["targets_pretokenized"]
icl_str = ""
if do_few_shot:
for s_ind, s_row in few_shot_df.iterrows():
icl_str += f"{s_row['inputs_pretokenized']} {s_row['targets_pretokenized']}\n\n"
prompt = f"{icl_str}{{text:}}"
pmp = prompt.format(text=text)
if i == 0:
print(pmp)
raw_answer = get_response(
pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=30,
)
answer = raw_answer.strip().lower()
answer = answer.split("\n")
answer = [a for a in answer if a]
answer = [
a
for a in answer
if any(l.lower() in a.lower() for l in labels_names)
]
if answer:
answer = answer[0]
answer = "".join(
[a for a in answer if a not in [".", ",", "?", ";", ":", "'", '"']]
)
is_yes = "yes" in answer.split()
is_no = "no" in answer.split()
pred = ""
if is_yes and (not is_no):
pred = "yes"
if is_no and (not is_yes):
pred = "no"
gold = gold.strip().lower()
pred = pred.strip().lower()
entry = {
"ind": ind,
"example": text,
"base_prompt": pmp,
"raw_answer": raw_answer,
"pred": pred,
"gold": gold,
}
expt_log[ind] = entry
preds.append(pred)
labels.append(gold)
report = classification_report(labels, preds, output_dict=True)
return expt_log, report["accuracy"]
def get_parts(self, text):
parts = text.split("\n")
sent1 = parts[0]
sent2 = parts[1]
word = parts[2].split("Question: Is the word ")[-1].split()[0]
return word, sent1, sent2
def clean_sentence(self, sentence):
sentence = sentence.replace("2. ", "")
sentence = sentence.replace("3. ", "")
sentence = sentence.replace("\n\n", "")
sentence = sentence.replace("A:", "")
sentence = sentence.strip()
sentence = sentence.split(".")[0]
sentence = sentence.split("\n")[0]
return sentence
def get_sentences(
self, all_constructors, all_boost_exs, sent, word, manifest, overwrite_manifest
):
synonym = all_constructors[0]
all_prompts = []
# synonyms
prompt_suffix = synonym(all_boost_exs[0])
prompt_combined = f'{prompt_suffix}\n\nIn "{{sent:}}", synonyms for the word \"{{word:}}\" are: '
all_prompts.append(prompt_combined.format(sent=sent, word=word))
synonyms = get_response(
prompt_combined.format(sent=sent, word=word),
manifest,
overwrite=bool(overwrite_manifest),
max_toks=10,
)
synonyms = synonyms.replace("- ", "").split("\n")
synonyms = ", ".join([a for a in synonyms if a][0:1])
# generate sentences
quoted_sent = sent.split()
sent = []
for tok in quoted_sent:
if tok.lower() == word.strip('"').lower():
sent.append(f'"{tok}"')
else:
sent.append(tok)
if sent:
sent = " ".join(sent)
else:
sent = " ".join(quoted_sent)
combined_definition = f"{synonyms}"
sentences = []
return combined_definition, sentences, all_prompts
def pairwise_comparisons(
self,
description_constructor,
boost_exs,
def1,
sentences_lst1,
def2,
sentences_lst2,
word,
manifest,
overwrite_manifest,
):
all_prompts = []
# reconcile the result
answer = ""
if def1.strip() != def2.strip():
answer = "No"
else:
answer = "Yes"
return answer, all_prompts
def run_decomposed_prompt(
self, test_data, boost_data_train, boost_dfs, manifest, overwrite_manifest
):
expt_log, all_boost_preds, labels = self._run_decomp_single_data(test_data, boost_dfs, manifest, overwrite_manifest)
expt_log_train, all_boost_train_preds, train_labels = self._run_decomp_single_data(boost_data_train, boost_dfs, manifest, overwrite_manifest, run_limit=1000)
# Do WS
preds = self.merge_boosted_preds(all_boost_preds, all_boost_train_preds, train_labels, expt_log, expt_log_train)
# Get accuracies across all boost sets
individual_accuracies = []
for i in range(len(all_boost_preds[0])):
individual_accuracies.append(classification_report(labels, [p[i] for p in all_boost_preds], output_dict=True)["accuracy"])
report = classification_report(labels, preds, output_dict=True)
return expt_log, expt_log_train, report["accuracy"], individual_accuracies
def _run_decomp_single_data(self, test_data, boost_dfs, manifest, overwrite_manifest, run_limit=-1):
expt_log = {}
all_boost_preds = []
labels = []
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
text = row["inputs_pretokenized"]
gold = row["targets_pretokenized"]
word, sent1, sent2 = self.get_parts(text)
if i == run_limit:
break
prompts_across_boost = []
preds_across_boost = []
for boost_examples in boost_dfs:
all_prompts = []
def1, sentences_lst1, lst1_prompts = self.get_sentences(
[synonym], [boost_examples[0]], sent1, word, manifest, overwrite_manifest
)
def2, sentences_lst2, lst2_prompts = self.get_sentences(
[synonym], [boost_examples[0]], sent2, word, manifest, overwrite_manifest
)
pred, pred_prompts = self.pairwise_comparisons(
description,
boost_examples[-1],
def1,
sentences_lst1,
def2,
sentences_lst2,
word,
manifest,
overwrite_manifest,
)
all_prompts = lst1_prompts + lst2_prompts + pred_prompts
if i == 0:
print("\n".join(all_prompts))
prompts_across_boost.append(all_prompts)
preds_across_boost.append(pred)
entry = {
"ind": ind,
"example": text,
"word": word,
"prompts": prompts_across_boost,
"preds_boost": preds_across_boost,
"sent1": sent1,
"sent2": sent2,
"def1": def1,
"def2": def2,
"gold": gold,
"sentences_lst1": sentences_lst1,
"sentences_lst2": sentences_lst2,
}
expt_log[ind] = entry
all_boost_preds.append(preds_across_boost)
labels.append(gold)
return expt_log, all_boost_preds, labels
def main():
args = get_args()
args.num_boost = 5
task_name = "super_glue_wic"
data_dir = f"{DATA_DIR}/P3/data_feather/super_glue_wic_GPT_3_prompt/"
decomp = WICDecomp(task_name, data_dir)
decomp.run(args)
if __name__ == "__main__":
main()
| ama_prompting-main | tasks/WIC_final.py |
#!/usr/bin/env python
# coding: utf-8
from pathlib import Path
import pandas as pd
import numpy as np
from tqdm.auto import tqdm
from datasets import load_dataset
from decomposition import Decomposition, get_args
from utils import get_response, text_f1, InputOutputPrompt, load_hf_data
extract = InputOutputPrompt(
input_formatter=lambda x: f"Context: {x['context']}\nQuestion: {x['question']}",
output_formatter=lambda x: f"Answer: {x['answer']}",
required_keys=["context", "question", "answer"],
input_output_sep="\n",
example_sep="\n\n",
instruction="Answer the question. If there is no evidence in the context, return \"Unknown\".\n\n"
)
extract_examples = [
pd.DataFrame([
{
"context": "According to Biraben, the plague was present somewhere in Europe in every year between 1346 and 1671",
"question": "Where was the plague present?",
"answer": "somewhere in Europe"
},
{
"context": "Policies aiming at controlling unemployment and in particular at reducing its inequality-associated effects support economic growth.",
"question": "What's one factor in increasing self-esteem?",
"answer": "Unknown"
},
{
"context": "The term \"matter\" is used throughout physics in a bewildering variety of contexts: for example, one refers to \"condensed matter physics\", \"elementary matter\", \"partonic\" matter, \"dark\" matter, \"anti\"-matter, \"strange\" matter, and \"nuclear\" matter.",
"question": "What is another name for anti-matter?",
"answer": "Unknown"
}
]),
pd.DataFrame([
{
"context": "According to Biraben, the plague was present somewhere in Europe in every year between 1346 and 1671",
"question": "Where was the plague present?",
"answer": "somewhere in Europe"
},
{
"context": "The term \"matter\" is used throughout physics in a bewildering variety of contexts: for example, one refers to \"condensed matter physics\", \"elementary matter\", \"partonic\" matter, \"dark\" matter, \"anti\"-matter, \"strange\" matter, and \"nuclear\" matter.",
"question": "What is another name for anti-matter?",
"answer": "Unknown"
},
{
"context": "Policies aiming at controlling unemployment and in particular at reducing its inequality-associated effects support economic growth.",
"question": "What's one factor in increasing self-esteem?",
"answer": "Unknown"
},
]),
pd.DataFrame([
{
"context": "Policies aiming at controlling unemployment and in particular at reducing its inequality-associated effects support economic growth.",
"question": "What's one factor in increasing self-esteem?",
"answer": "Unknown"
},
{
"context": "According to Biraben, the plague was present somewhere in Europe in every year between 1346 and 1671",
"question": "Where was the plague present?",
"answer": "somewhere in Europe"
},
{
"context": "The term \"matter\" is used throughout physics in a bewildering variety of contexts: for example, one refers to \"condensed matter physics\", \"elementary matter\", \"partonic\" matter, \"dark\" matter, \"anti\"-matter, \"strange\" matter, and \"nuclear\" matter.",
"question": "What is another name for anti-matter?",
"answer": "Unknown"
}
]),
]
prefix_select_zeroshot = """Answer the question. If there is no evidence in the context, return "Unknown".\n\n"""
class DropDecomp(Decomposition):
def __init__(self, task_name, data_dir, val_split="validation"):
super().__init__(task_name, data_dir, val_split)
def read_data(self, save_dir, overwrite_data):
return load_hf_data(save_dir, self.task_name, self.val_split, "drop", overwrite_data)
def get_boost_decomp_examples(self, train_data, boost_id):
return [
extract_examples[boost_id],
]
def get_few_shot_examples(self, train_data, k_shot):
"""Get few shot examples"""
labels = []
for x in train_data.answers_spans:
if len(x['spans']) == 0:
labels.append("unknown")
else:
labels.append(x['spans'][0])
train_data['expanded_labels'] = labels
labels = ["unknown"] + list(sorted(set(labels)))
num_per_class = int(np.ceil(k_shot / len(labels)))
print(f"Selecting {num_per_class} examples per class.")
dfs = []
total_in_context = 0
for label in labels:
while num_per_class + total_in_context > k_shot:
num_per_class -= 1
sub_df = train_data[train_data["expanded_labels"] == label].sample(num_per_class)
dfs.append(sub_df)
total_in_context += num_per_class
if total_in_context == k_shot:
break
mini_df = pd.concat(dfs)
return mini_df
def zero_few_baseline(
self,
test_data,
few_shot_df,
manifest,
overwrite_manifest,
do_few_shot=True,
):
expt_log = {}
preds = []
labels = []
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
if ind in expt_log:
entry = expt_log[ind]
pred = entry["pred"]
gold = entry["gold"]
else:
text = row.passage
question = row.question
if len(row.answers_spans["spans"]) == 0:
label = "unknown"
else:
label = row.answers_spans["spans"][0]
gold = label.lower()
icl_str = ""
if do_few_shot:
for s_ind, s_row in few_shot_df.iterrows():
input = s_row.passage
s_question = s_row.question
if len(s_row.answers_spans["spans"]) == 0:
label = "unknown"
else:
label = s_row.answers_spans["spans"][0]
icl_str += f"Passage: {input}\nQuestion: {s_question}\nAnswer: {label}\n\n"
prompt = (
icl_str
+ "Passage: {text:}\nQuestion: {question:}"
+ "\nAnswer:"
)
pmp = prompt.format(text=text, question=question)
if i == 0:
print(prompt.format(text=text, question=question))
raw_answer = get_response(
pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=10,
stop_token="\n\n",
)
pred = raw_answer.strip("\n").strip().lower()
entry = {
"ind": ind,
"example": text,
"base_prompt": pmp,
"raw_answer": raw_answer,
"pred": pred,
"gold": gold,
}
expt_log[ind] = entry
preds.append(pred)
labels.append(gold)
metric = text_f1(preds=preds, golds=labels)
return expt_log, metric
def run_decomposed_prompt(
self, test_data, boost_data_train, boost_dfs, manifest, overwrite_manifest
):
expt_log, all_boost_preds, labels = self._run_decomp_single_data(test_data, boost_dfs, manifest, overwrite_manifest)
expt_log_train, all_boost_train_preds, train_labels = self._run_decomp_single_data(boost_data_train, boost_dfs, manifest, overwrite_manifest, run_limit=1)
# Do WS
preds = self.merge_boosted_preds(all_boost_preds, all_boost_train_preds, train_labels, expt_log, expt_log_train)
# Get accuracies across all boost sets
individual_accuracies = []
for i in range(len(all_boost_preds[0])):
individual_accuracies.append(text_f1(preds=[p[i] for p in all_boost_preds], golds=labels))
metric = text_f1(preds=preds, golds=labels)
return expt_log, expt_log_train, metric, individual_accuracies
def _run_decomp_single_data(self, test_data, boost_dfs, manifest, overwrite_manifest, run_limit=-1):
expt_log = {}
all_boost_preds = []
labels = []
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
if i == run_limit:
break
text = row.passage
question = row.question
if len(row.answers_spans["spans"]) == 0:
label = "unknown"
else:
label = row.answers_spans["spans"][0]
gold = label.lower()
prompts_across_boost = []
preds_across_boost = []
for boost_examples in boost_dfs:
prompt_suffix = extract(boost_examples[0])
prompt = (
prompt_suffix + "\n\nContext: {text:}\nQuestion: {question:}\nAnswer:"
)
pmp = prompt.format(text=text, question=question)
if i == 0:
print(pmp)
raw_answer = get_response(
pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=50,
stop_token="\n\n",
)
pred = raw_answer.split("\n")[0].replace('"', "").strip().lower()
# Single list pmp for one step decomp
prompts_across_boost.append([pmp])
preds_across_boost.append(pred)
entry = {
"ind": ind,
"example": text,
"prompts": prompts_across_boost,
"preds_boost": preds_across_boost,
"gold": gold,
}
expt_log[ind] = entry
all_boost_preds.append(preds_across_boost)
labels.append(gold)
return expt_log, all_boost_preds, labels
def main():
args = get_args()
task_name = "drop"
data_dir = "drop"
wic = DropDecomp(task_name, data_dir)
wic.run(args)
if __name__ == "__main__":
main()
| ama_prompting-main | tasks/drop_final.py |
#!/usr/bin/env python
# coding: utf-8
from tqdm.auto import tqdm
import pandas as pd
from sklearn.metrics import classification_report
from decomposition import Decomposition, get_args, DATA_DIR
from utils import get_response, InputOutputPrompt
questioner = InputOutputPrompt(
input_formatter=lambda x: f"Statement: {x['statement']}",
output_formatter=lambda x: f"Question: {x['question']}",
required_keys=["statement", "question"],
input_output_sep="\n",
example_sep="\n\n",
instruction="Rewrite the statement as a yes/no question.\n\n"
)
questioner_examples = [
pd.DataFrame([
{
"statement": "most of the light comes from the sun",
"question": "Does most of the light come from the sun?"
},
{
"statement": "the test was not",
"question": "Was the test hard?"
},
{
"statement": "it is a good idea to buy your parents gifts",
"question": "Is it a good idea to buy your parents gifts?"
},
{
"statement": "the balloon popped",
"question": "Did the balloon pop?"
},
{
"statement": "The father and son went camping to California.",
"question": "Did the father and son go camping?"
}
]),
pd.DataFrame([
{
"statement": "most of the light comes from the sun",
"question": "Does most of the light come from the sun?"
},
{
"statement": "the test was not",
"question": "Was the test hard?"
},
{
"statement": "it is a good idea to buy your parents gifts",
"question": "Is it a good idea to buy your parents gifts?"
},
{
"statement": "the balloon popped",
"question": "Did the balloon pop?"
},
{
"statement": "The father and son went camping to California.",
"question": "Did the father and son go camping?"
}
]),
pd.DataFrame([
{
"statement": "she prefers kittens over puppies.",
"question": "Does she prefer kittens over puppies?",
},
{
"statement": "Max and his wife went on a trip to Europe",
"question": "Did Max and his wife go on a trip to Europe?",
},
{
"statement": "jared was born during the war in 1942.",
"question": "Was Jared born during a war in 1942?",
},
{
"statement": "it took jenna 7 attempts to solve the problem",
"question": "Did it take Jenna 7 attempts to solve the problem?",
},
]),
]
openended_qa = InputOutputPrompt(
input_formatter=lambda x: f"Passage: {x['passage']}\nQuestion: {x['question']}",
output_formatter=lambda x: f"Answer: {x['answer']}",
required_keys=["passage", "question", "answer"],
input_output_sep="\n",
example_sep="\n\n",
instruction="Provide the answer to the question from the passage.\n\n"
)
openended_qa_examples = [
pd.DataFrame([
{
"passage": "When Judy and Jack went to school, they got in trouble with their teacher for being late. I didn't think it was very fair.",
"question": "Did she think it was fair?",
"answer": "No"
},
{
"passage": "If inflation is occurring, leading to higher prices for basic necessities such as gas by 2 dollars. Do you think that inflation is good for society?",
"question": "Is inflation good for society?",
"answer": "Maybe"
},
{
"passage": "Put yourself out there. The more time you spend dating and socializing, the more likely you will find a boyfriend you like.",
"question": "Does socializing help you find a boyfriend?",
"answer": "Yes"
},
]),
pd.DataFrame([
{
"passage": "Jack recommends his least favorite books of the year to his followers. The least favorite book this year was Harry Potter and the 7 Rings.",
"question": "What book does Jack dislike?",
"answer": "Jack does not like Harry Potter and the 7 Rings."
},
{
"passage": "When Judy and Jack went to school, they got in trouble with their teacher for being late. I didn't think it was very fair.",
"question": "Did she think it was fair?",
"answer": "No, she didn't think it was very fair."
},
{
"passage": "If inflation is occurring, leading to higher prices for basic necessities such as gas by 2 dollars. Do you think that inflation is good for society?",
"question": "Is inflation good for society?",
"answer": "Hmm. Do you think so?"
},
{
"passage": "Put yourself out there. The more time you spend dating and socializing, the more likely you will find a boyfriend you like.",
"question": "Does socializing help you find a boyfriend?",
"answer": "Yes, it helps you find a boyfriend."
}
]),
pd.DataFrame([
{
"passage": "Anna's mother always told her to be confident even if she feels nervous on the inside",
"question": "Does Anna always feel nervous on the inside?",
"answer": "Unknown"
},
{
"passage": "Max and Jeff were extremely competitive at soccer, but Max was a lot better.",
"question": "Was Jeff better than Max at soccer?",
"answer": "No, Max was a lot better"
},
{
"passage": "When Judy and Jack went to school, they got in trouble with their teacher for being late. I didn't think it was very fair.",
"question": "Did she think it was fair?",
"answer": "No, she didn't think it was very fair."
},
{
"passage": "The FSP conference took place last week in Spain and representatives from 21 countries attended.",
"question": "Did representatives from more than 20 countries attend FSP?",
"answer": "Yes"
},
]),
]
class CBDecomp(Decomposition):
def __init__(self, task_name, data_dir, val_split="validation"):
super().__init__(task_name, data_dir, val_split)
def get_boost_decomp_examples(self, data_train, boost_id):
return [
questioner_examples[boost_id],
openended_qa_examples[boost_id],
]
def zero_few_baseline(
self,
test_data,
few_shot_df,
manifest,
overwrite_manifest,
do_few_shot=True,
):
expt_log = {}
preds = []
labels = []
labels_names = set(test_data["targets_pretokenized"])
labels_names = [l.lower().strip() for l in labels_names]
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
if ind in expt_log:
pred = entry["pred"]
gold = entry["gold"]
else:
text = row["inputs_pretokenized"]
gold = row["targets_pretokenized"]
icl_str = ""
if do_few_shot:
for s_ind, s_row in few_shot_df.iterrows():
icl_str += f"{s_row['inputs_pretokenized']} {s_row['targets_pretokenized']}\n\n\n"
text = row["inputs_pretokenized"]
text = text.replace("True, False, or Neither?", "")
text = text + ". True, False, or Neither?"
gold = row["targets_pretokenized"]
prompt = f"{icl_str}{{text:}}"
pmp = prompt.format(text=text)
if i == 0:
print(pmp)
raw_answer = get_response(
pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=30,
)
answer = raw_answer.strip().lower()
answer = answer.split("\n")
answer = [a for a in answer if a]
answer = [
a
for a in answer
if any(l.lower() in a.lower() for l in labels_names)
]
if answer:
answer = answer[0]
else:
answer = ""
answer = "".join(
[a for a in answer if a not in [".", ",", "?", ";", ":", "'", '"']]
)
is_yes = "true" in answer.split()
is_no = "false" in answer.split()
is_maybe = "neither" in answer.split() or "unknown" in answer.split() or "maybe" in answer.split()
pred = "Neither"
if is_yes and (not is_maybe and not is_no):
pred = "True"
if is_no and (not is_maybe and not is_yes):
pred = "False"
if is_maybe and (not is_no and not is_yes):
pred = "Neither"
entry = {
"ind": ind,
"example": text,
"base_prompt": pmp,
"raw_answer": raw_answer,
"pred": pred,
"gold": gold,
}
expt_log[ind] = entry
preds.append(pred)
labels.append(gold)
report = classification_report(labels, preds, output_dict=True)
return expt_log, report["accuracy"]
def get_question(self, statement, prompt, boost_ex, manifest, overwrite_manifest):
prompt_suffix = prompt(boost_ex)
quesiton_prompt = f"\n{prompt_suffix}\n\nStatement: {{statement:}}\nQuestion:"
question_pmp = quesiton_prompt.format(statement=statement)
answer = get_response(
question_pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=50,
)
answer = answer.split("\n")[0]
return answer, question_pmp
def open_qa(self, question, passage, prompt, boost_ex, manifest, overwrite_manifest):
prompt_suffix = prompt(boost_ex)
qa_prompt = f"\n{prompt_suffix}\n\nPassage: {{passage:}}\nQuestion: {question}\nAnswer:"
qa_pmp = qa_prompt.format(passage=passage)
answer = get_response(
qa_pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=50,
)
answer = answer.split("\n")[0]
answer = (
answer.replace("A: ", "")
.replace("B: ", "")
.replace("Answer: ", "")
.replace(", ", " ")
)
return answer, qa_pmp
def run_decomposed_prompt(
self, test_data, boost_data_train, boost_dfs, manifest, overwrite_manifest
):
expt_log, all_boost_preds, labels = self._run_decomp_single_data(test_data, boost_dfs, manifest, overwrite_manifest)
expt_log_train, all_boost_train_preds, train_labels = self._run_decomp_single_data(boost_data_train, boost_dfs, manifest, overwrite_manifest, run_limit=-1)
# Do WS
preds = self.merge_boosted_preds(all_boost_preds, all_boost_train_preds, train_labels, expt_log, expt_log_train)
# Get accuracies across all boost sets
individual_accuracies = []
for i in range(len(all_boost_preds[0])):
individual_accuracies.append(classification_report(labels, [p[i] for p in all_boost_preds], output_dict=True)["accuracy"])
report = classification_report(labels, preds, output_dict=True)
return expt_log, expt_log_train, report["accuracy"], individual_accuracies
def _run_decomp_single_data(self, test_data, boost_dfs, manifest, overwrite_manifest, run_limit=-1):
expt_log = {}
all_boost_preds = []
labels = []
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
suffix = "True, False, or Neither?"
input = row["inputs_pretokenized"]
passage = input.split("\nQuestion: ")[0]
statement = (
input.split("\nQuestion: ")[-1]
.replace(suffix, "")
.replace('"', "")
.strip()
)
if i == run_limit:
break
gold = row["targets_pretokenized"]
gold = gold.lower()
prompts_across_boost = []
preds_across_boost = []
for boost_examples in boost_dfs:
all_prompts = []
question, question_final_prompt = self.get_question(
statement, questioner, boost_examples[0], manifest, overwrite_manifest
)
open_answer, answer_final_prompt = self.open_qa(
question, passage, openended_qa, boost_examples[1], manifest, overwrite_manifest
)
all_prompts.append(question_final_prompt)
all_prompts.append(answer_final_prompt)
if i == 0:
print("\n".join(all_prompts))
if "Yes" in open_answer.split():
answer = "True"
elif "No" in open_answer.split():
answer = "False"
else:
answer = "Neither"
answer = answer.lower()
is_yes = "yes" in answer.split() or "true" in answer.split()
is_no = "no" in answer.split() or "false" in answer.split()
is_maybe = "neither" in answer.split() or "maybe" in answer.split() or "unknown" in answer.split()
pred = "neither"
if is_yes and (not is_maybe and not is_no):
pred = "true"
if is_no and (not is_maybe and not is_yes):
pred = "false"
if is_maybe and (not is_no and not is_yes):
pred = "neither"
prompts_across_boost.append(all_prompts)
preds_across_boost.append(pred)
entry = {
"ind": ind,
"prompts": prompts_across_boost,
"preds_boost": preds_across_boost,
"example": input,
"gold": gold,
}
expt_log[ind] = entry
all_boost_preds.append(preds_across_boost)
labels.append(gold)
return expt_log, all_boost_preds, labels
def main():
args = get_args()
args.num_boost = 3
task_name = "super_glue_cb"
data_dir = f"{DATA_DIR}/P3/data_feather/super_glue_cb_GPT_3_style/"
decomp = CBDecomp(task_name, data_dir)
decomp.run(args)
if __name__ == "__main__":
main()
| ama_prompting-main | tasks/CB_final.py |
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import numpy as np
import ast
from tqdm.auto import tqdm
from pathlib import Path
from nltk.corpus import stopwords
stops = set(stopwords.words("english"))
from decomposition import Decomposition, get_args, DATA_DIR
from utils import get_response, InputOutputPrompt, accuracy_span_overlap
extract = InputOutputPrompt(
input_formatter=lambda x: f"Question: {x['question']}",
output_formatter=lambda x: f"Answer: {x['answer']}",
required_keys=["question", "answer"],
input_output_sep="\n",
example_sep="\n\n",
instruction="Produce distinct questions.\n\n"
)
more_info_examples = [
pd.DataFrame([
{
"question": "who plays Carrie Bradshaw in sex and the city?",
"answer": "Caroline \"Carrie\" Bradshaw is a fictional character from the HBO franchise Sex and the City, portrayed by Sarah Jessica Parker."
},
{
"question": "what are the elements in air?",
"answer": "By mole fraction (i.e., by number of molecules), dry air contains 78.08% nitrogen, 20.95% oxygen, 0.93% argon, 0.04% carbon dioxide, and small amounts of other gases"
},
{
"question": "what is HP company?",
"answer": "HP Inc. is an American multinational information technology company headquartered in Palo Alto, California, that develops personal computers (PCs)"
},
{
"question": "when was the last season of FRIENDS released?",
"answer": "The series finale aired on May 6, 2004, and was watched by around 52.5 million American viewers, making it the fifth-most-watched series finale in television history"
}
]),
]
answer = InputOutputPrompt(
input_formatter=lambda x: f"Context: {x['context']}\nQuestion: {x['question']}",
output_formatter=lambda x: f"Answer: {x['answer']}",
required_keys=["context", "question", "answer"],
input_output_sep="\n",
example_sep="\n\n",
instruction="Answer the question.\n\n"
)
answer_question = [
pd.DataFrame([
{
'context': 'The nearest airport to Palm Springs is Indio/Palm Springs (PSP) Airport which is 2.1 miles away. ',
'question': 'what airport is closest to palm springs?',
'answer': 'Palm Springs International Airport'
},
{
'context': 'Martin Luther King earned his Bachelor of Divinity degree from Crozer Theological Seminary, followed by a doctorate in Systematic Theology from Boston University.',
'question': 'what degree did martin luther king get?',
'answer': 'Bachelor of Divinity'
},
{
'context': 'The Niger river runs in a crescent through Libya, Mali, Niger, on the border with Benin and then through Nigeria.',
'question': 'what countries does the niger river flow through?',
'answer': 'Libya'
},
{
'context': 'Puerto Rico is a territory of the United States and uses the U.S. dollar. ',
'question': 'what type of currency is used in puerto rico?',
'answer': 'United States dollar'
},
{
'context': 'kitt was voice most often by William daniels.',
'question': 'who played kitt in knight rider?',
'answer': 'William Daniels'
}
]),
pd.DataFrame([
{
'context': 'leonardo da vinci invented the parachute, the helicopter, double hull, an armored fighting vehicle,',
'question': 'what inventions did leonardo da vinci made?',
'answer': 'Double hull'
},
{
'context': "The French franc (F) was the national currency of France prior to France's adoption of the euro (EUR) in January 2002.",
'question': 'what currency is used in france before euro?',
'answer': 'French franc'
},
{
'context': 'The Isthmus of Panama, contains the country of Panama and the panama canal.',
'question': 'where is isthmus of panama located?',
'answer': 'Costa Rica'
},
{
'context': 'Hurricane Irene was a large and destructive tropical cyclone which affected much of the Caribbean and East Coast',
'question': 'where did hurricane irene?',
'answer': 'Eastern United States'
},
{
'context': 'Rihanna acted in This is the End and Battleship.',
'question': 'what movie did rihanna play in?',
'answer': 'This Is the End'
}
]),
pd.DataFrame([
{
'context': 'st vincent de paul is buried in the 6th arrondisment of Paris.',
'question': 'where is st vincent de paul buried?',
'answer': 'Paris'
},
{
'context': 'Thomas Luther "Luke" Bryan (born July 17, 1976) is an American country singer and songwriter from Leesburg.',
'question': 'where is luke bryan from?',
'answer': 'Leesburg'
},
{
'context': "Klum and Seal got married on 10 May 2005 on a beach in Mexico near Seal's home on Costa Careyes. ",
'question': 'where did heidi klum and seal get married?',
'answer': 'Mexico'},
{
'context': 'Tarantino starred in pulp fiction, grindhouse and others.',
'question': 'what movies did quentin tarantino star in?',
'answer': 'Grindhouse'
},
{
'context': 'Countries that are sometimes considered to be entirely or partially part of the Balkans are Croatia, Serbia, Lake Prespa.',
'question': 'what country is located in the balkan peninsula?',
'answer': 'Lake Prespa'
}
])
]
prefix_select_zeroshot = """Answer the question.\n\n"""
class NQDecomp(Decomposition):
def __init__(self, task_name, data_dir, val_split="validation"):
super().__init__(task_name, data_dir, val_split)
def get_boost_decomp_examples(self, train_data, boost_id):
return [
more_info_examples[0],
answer_question[boost_id],
]
def read_data(self, save_dir, overwrite_data):
save_data = Path(f"{save_dir}/{self.task_name}/data.feather")
if not save_data.exists() or overwrite_data:
test_data = pd.read_csv(f"{self.data_dir}/{self.task_name}/nq-test.qa.csv", sep="\t", header=None)
test_data.to_feather(f"{save_data}")
else:
print(f"Reading test data from {save_data}")
test_data = pd.read_feather(save_data)
save_data = Path(f"{self.data_dir}/{self.task_name}/train_data.feather")
if not save_data.exists() or overwrite_data:
train_data = pd.read_json(f"{self.data_dir}/{self.task_name}/biencoder-nq-dev.json")
train_data.to_feather(f"{save_data}")
else:
print(f"Reading train data from {save_data}")
train_data = pd.read_feather(save_data)
print(f"Test Data Size: {len(test_data)}")
print(f"Train Data Size: {len(train_data)}")
return test_data, train_data
def get_few_shot_examples(self, train_data, k_shot):
"""Get few shot examples"""
labels = train_data.question.tolist()
num_per_class = int(np.ceil(k_shot / len(labels)))
print(f"Selecting {num_per_class} examples per class.")
dfs = []
total_in_context = 0
for label in labels:
while num_per_class + total_in_context > k_shot:
num_per_class -= 1
sub_df = train_data[train_data["question"] == label].sample(num_per_class)
dfs.append(sub_df)
total_in_context += num_per_class
if total_in_context == k_shot:
break
mini_df = pd.concat(dfs)
return mini_df
def zero_few_baseline(
self,
test_data,
few_shot_df,
manifest,
overwrite_manifest,
prompt_suffix="",
do_few_shot=True,
):
expt_log = {}
preds = []
labels = []
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
question = row.question
if isinstance(row.answers, str):
label = ast.literal_eval(row.answers)
else:
label = row.answers.tolist()
gold = label
icl_str = ""
if do_few_shot:
for s_ind, s_row in few_shot_df.iterrows():
input = s_row.question
s_question = s_row.question
if isinstance(s_row.answers, str):
label = ast.literal_eval(s_row.answers)
else:
label = s_row.answers.tolist()
icl_str += f"Question: {s_question}\nAnswer: {label[0]}\n\n"
prompt = (
icl_str
+ "Question: {question:}"
+ prompt_suffix
+ "\nAnswer:"
)
if i == 0:
print(prompt.format(question=question))
prompt = prompt.format(question=question)
raw_answer = get_response(
prompt, #prompt.format(question=question),
manifest,
overwrite=bool(overwrite_manifest),
max_toks=20,
stop_token="\n\n",
)
pred = raw_answer.strip("\n").strip().lower()
entry = {
"ind": ind,
"example": question,
"base_prompt": prompt,
"raw_answer": raw_answer,
"pred": pred,
"gold": gold,
}
expt_log[ind] = entry
preds.append([pred])
labels.append(gold)
metric = accuracy_span_overlap(preds=preds, golds=labels)
return expt_log, metric
def run_decomposed_prompt(
self, test_data, boost_data_train, boost_dfs, manifest, overwrite_manifest
):
expt_log, all_boost_preds, labels = self._run_decomp_single_data(test_data, boost_dfs, manifest, overwrite_manifest)
expt_log_train, all_boost_train_preds, train_labels = self._run_decomp_single_data(boost_data_train, boost_dfs, manifest, overwrite_manifest, run_limit=1000)
# Do WS
boost_test, boost_train = [], []
for p in all_boost_preds:
samples = [lf[1] for lf in p]
boost_test.append(samples)
for p in all_boost_train_preds:
samples = [lf[1] for lf in p]
boost_train.append(samples)
preds = self.merge_boosted_preds(boost_test, boost_train, train_labels, expt_log, expt_log_train)
preds = [(x,y) for x,y in zip([p[0][0] for p in all_boost_preds], preds)]
# Get accuracies across all boost sets
individual_accuracies = []
for i in range(len(all_boost_preds[0])):
individual_accuracies.append(accuracy_span_overlap(preds=[p[i] for p in all_boost_preds], golds=labels))
metric = accuracy_span_overlap(preds=preds, golds=labels)
return expt_log, expt_log_train, metric, individual_accuracies
def _run_decomp_single_data(self, test_data, boost_dfs, manifest, overwrite_manifest, run_limit=-1):
expt_log = {}
all_boost_preds = []
labels = []
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
if i == run_limit:
break
question = row.question
if isinstance(row.answers, str):
label = ast.literal_eval(row.answers)
else:
label = row.answers.tolist()
gold = label
prompts_across_boost = []
preds_across_boost = []
# extract context
prompt_suffix = extract(boost_dfs[0][0])
prompt = (
prompt_suffix + "\n\Question: {question:}\nAnswer:"
)
more_info_answer = get_response(
prompt.format(question=question),
manifest,
overwrite=bool(overwrite_manifest),
max_toks=20,
stop_token="\n\n",
)
for boost_examples in boost_dfs:
all_prompts = []
prompt_suffix = answer(boost_examples[1])
prompt = (
prompt_suffix + "\n\nContext: {text:}\nQuestion: {question:}\nAnswer:"
)
all_prompts.append(prompt)
if i == 0:
print(prompt.format(text=more_info_answer, question=question))
raw_answer = get_response(
prompt.format(text=more_info_answer, question=question),
manifest,
overwrite=bool(overwrite_manifest),
max_toks=20,
stop_token="\n\n",
)
pred = raw_answer.split("\n")[0].strip().lower()
prompts_across_boost.append(all_prompts)
preds_across_boost.append((more_info_answer, pred))
entry = {
"ind": ind,
"example": question,
"prompts": prompts_across_boost,
"preds_boost": preds_across_boost,
"gold": gold,
}
expt_log[ind] = entry
all_boost_preds.append(preds_across_boost)
labels.append(gold)
return expt_log, all_boost_preds, labels
def main():
args = get_args()
task_name = "NQ"
data_dir = f"{DATA_DIR}/NQ"
webq = NQDecomp(task_name, data_dir)
webq.run(args)
if __name__ == "__main__":
main()
| ama_prompting-main | tasks/NQ_final.py |
#!/usr/bin/env python
# coding: utf-8
from tqdm.auto import tqdm
from pathlib import Path
import pandas as pd
import numpy as np
import random
from sklearn.metrics import classification_report
from decomposition import Decomposition, get_args, DATA_DIR
from utils import get_response
def format_data(lines):
"""from lines in dataset to two lists of sentences and labels respectively"""
def process_raw_data_sst(lines):
labels = []
sentences = []
for line in lines:
labels.append(int(line[0]))
sentences.append(line[2:].strip())
return sentences, labels
train_sentences, train_labels = process_raw_data_sst(lines)
return train_sentences, train_labels
class SST2Decomp(Decomposition):
def __init__(self, task_name, data_dir, val_split="validation"):
super().__init__(task_name, data_dir, val_split)
def get_few_shot_examples(self, train_data, k_shot):
"""Get few shot examples"""
labels = [0, 1]
num_per_class = int(np.ceil(k_shot / len(labels)))
print(f"Selecting {num_per_class} examples per class.")
dfs = []
total_in_context = 0
for label in labels:
while num_per_class + total_in_context > k_shot:
num_per_class -= 1
sub_df = train_data[train_data["label"] == label].sample(num_per_class)
dfs.append(sub_df)
total_in_context += num_per_class
if total_in_context == k_shot:
break
mini_df = pd.concat(dfs)
return mini_df
def read_data(self, save_dir, overwrite_data):
save_data = Path(f"{save_dir}/{self.task_name}/data.feather")
if not save_data.exists() or overwrite_data:
with open(f"{self.data_dir}/stsa.binary.test", "r") as f:
test_lines = f.readlines()
test_sentences, test_labels = format_data(test_lines)
test_data = pd.DataFrame({
'sentence': test_sentences,
'label': test_labels,
})
test_data.to_feather(f"{save_data}")
else:
print(f"Reading test data from {save_data}")
test_data = pd.read_feather(save_data)
save_data = Path(f"{save_dir}/{self.task_name}/train_data.feather")
if not save_data.exists() or overwrite_data:
with open(f"{self.data_dir}/stsa.binary.train", "r") as f:
train_lines = f.readlines()
train_sentences, train_labels = format_data(train_lines)
train_data = pd.DataFrame({
'sentence': train_sentences,
'label': train_labels,
})
train_data.to_feather(f"{save_data}")
else:
print(f"Reading train data from {save_data}")
train_data = pd.read_feather(save_data)
print(f"Test Data Size: {len(test_data)}")
print(f"Train Data Size: {len(train_data)}")
return test_data, train_data
def get_boost_decomp_examples(self, train_data, boost_id):
seed = [1, 2, 3][boost_id]
k_shot = 16
random.seed(seed)
np.random.seed(seed)
data_train = pd.DataFrame(train_data)
labels = set(data_train["label"])
num_per_class = int(np.ceil(k_shot / len(labels)))
dfs = []
total_in_context = 0
for label in labels:
while num_per_class + total_in_context > k_shot:
num_per_class -= 1
if seed % 2 == 1:
sub_df = data_train[data_train["label"] == label].sample(num_per_class, random_state=seed)
elif seed % 2 == 0:
sub_df = data_train[data_train["label"] != label].sample(num_per_class, random_state=seed)
dfs.append(sub_df)
total_in_context += num_per_class
if total_in_context == k_shot:
break
booster_df = pd.concat(dfs).sample(frac=1, random_state=0)
print(f"Selected: {len(booster_df)} in context examples.")
return [
booster_df
]
def zero_few_baseline(
self,
test_data,
few_shot_df,
manifest,
overwrite_manifest,
do_few_shot=True,
):
expt_log = {}
preds = []
labels = []
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
if ind in expt_log:
pred = entry["pred"]
gold_str = entry["gold"]
else:
sentence = row["sentence"]
label = row["label"]
icl_str = ""
if do_few_shot:
for s_ind, s_row in few_shot_df.iterrows():
if s_row["label"] == 0:
demo_label = "negative"
else:
demo_label = "positive"
icl = f"Text: {s_row['sentence']}\nSentiment: {demo_label}"
icl_str += f"{icl}\n\n"
description = "For each snippet of text, label the sentiment of the text as positive or negative."
prompt = f"{description}\n\n{icl_str}Text: {{sentence:}}\nSentiment:"
pmp = prompt.format(sentence=sentence)
if i == 0:
print(pmp)
pred = get_response(
pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=50,
)
pred = (
pred.replace(".", "")
.replace(",", "")
.replace("Label: ", "")
.replace("Sentiment: ", "")
)
pred = [p for p in pred.split("\n") if p]
is_pos = "positive" in pred
is_neg = "negative" in pred
if is_pos and not is_neg:
pred = "positive"
elif is_neg and not is_pos:
pred = "negative"
else:
pred = ""
if label == 1:
gold_str = "positive"
else:
gold_str = "negative"
entry = {
"gold": gold_str,
"pred": pred,
"base_prompt": pmp,
"ind": ind,
"example": sentence,
}
expt_log[ind] = entry
preds.append(pred)
labels.append(gold_str)
report = classification_report(labels, preds, output_dict=True)
return expt_log, report["accuracy"]
def run_decomposed_prompt(
self, test_data, boost_data_train, boost_dfs, manifest, overwrite_manifest
):
expt_log, all_boost_preds, labels = self._run_decomp_single_data(test_data, boost_dfs, manifest, overwrite_manifest, run_limit=-1)
expt_log_train, all_boost_train_preds, train_labels = self._run_decomp_single_data(boost_data_train, boost_dfs, manifest, overwrite_manifest, run_limit=1000)
# Do WS
preds = self.merge_boosted_preds(all_boost_preds, all_boost_train_preds, train_labels, expt_log, expt_log_train)
# Get accuracies across all boost sets
individual_accuracies = []
for i in range(len(all_boost_preds[0])):
individual_accuracies.append(classification_report(labels, [p[i] for p in all_boost_preds], output_dict=True)["accuracy"])
report = classification_report(labels, preds, output_dict=True)
return expt_log, expt_log_train, report["accuracy"], individual_accuracies
def _run_decomp_single_data(self, test_data, boost_dfs, manifest, overwrite_manifest, run_limit=-1):
expt_log = {}
all_boost_preds = []
labels = []
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
sentence = row["sentence"]
label = row["label"]
if i == run_limit:
break
prompts_across_boost = []
preds_across_boost = []
for boost_examples in boost_dfs:
all_prompts = []
icl_str = ""
for s_ind, s_row in boost_examples[0].iterrows():
if s_row["label"] == 0:
demo_label = "negative"
else:
demo_label = "positive"
icl = f"Text: {s_row['sentence']}\nSentiment: {demo_label}"
icl_str += f"{icl}\n\n"
description = "For each snippet of text, label the sentiment of the text as positive or negative."
prompt = f"{description}\n\n{icl_str}Text: {{sentence:}}\nSentiment:"
pmp = prompt.format(sentence=sentence)
all_prompts.append(pmp)
if i == 0:
print(pmp)
pred = get_response(
pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=5,
)
pred = pred.replace(".", "").replace(",", "").replace("Label: ", "")
pred = [p for p in pred.split("\n") if p]
if pred:
pred = pred[0]
else:
pred = ""
prompts_across_boost.append(all_prompts)
preds_across_boost.append(pred)
if label == 1:
gold_str = "positive"
else:
gold_str = "negative"
entry = {
"gold": gold_str,
"prompts": prompts_across_boost,
"preds_boost": preds_across_boost,
"example": sentence,
"ind": i,
}
expt_log[i] = entry
all_boost_preds.append(preds_across_boost)
labels.append(gold_str)
return expt_log, all_boost_preds, labels
def main():
args = get_args()
args.num_boost = 3
task_name = "sst2"
data_dir = f"{DATA_DIR}/sst2/"
if not Path(data_dir).exists():
raise ValueError(
f"Data directory {data_dir} does not exist. Download AGNews from https://github.com/tonyzhaozh/few-shot-learning.")
decomp = SST2Decomp(task_name, data_dir)
decomp.run(args)
if __name__ == "__main__":
main()
| ama_prompting-main | tasks/SST2_final.py |
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import numpy as np
from tqdm.auto import tqdm
from sklearn.metrics import classification_report
from decomposition import Decomposition, get_args, DATA_DIR
from utils import get_response, InputOutputPrompt
summarize = InputOutputPrompt(
input_formatter=lambda x: f"Passage: {x['passage']}",
output_formatter=lambda x: f"Summarize: the passage \"Passage\": {x['summary']}",
required_keys=["passage", "summary"],
input_output_sep="\n",
example_sep="\n\n",
instruction="Summarize the passage.\n\n\"Categories\":\n- company\n- educational institution\n- artist\n- athlete\n- office holder\n- mean of transportation\n- building\n- natural place\n- village\n- animal\n- plant\n- album\n- film \n- written work\n\n"
)
summarize_examples = [
pd.DataFrame([
{
"passage": "Personality and Mental Health - Personality and Mental Health: Multidisciplinary Studies from Personality Dysfunction to Criminal Behaviour is a quarterly peer-reviewed academic journal published by Wiley-Blackwell on behalf of the Centre for Health and Justice.",
"summary": "The passage is about a journal."
},
{
"passage": "RNLB Mona (ON 775) - RNLB Mona (ON 775) was a Watson Class lifeboat based at Broughty Ferry in Scotland that capsized during a rescue attempt with the loss of her entire crew of eight men. The Mona was built in 1935 and in her time saved 118 lives.",
"summary": "The passage is about a lifeboat."
},
{
"passage": "Sayonara mo Ienakatta Natsu - Sayonara mo Ienakatta Natsu (さよならも言えなかった夏) is an album by Mikuni Shimokawa released on July 4 2007 by Pony Canyon.This album consists of eleven songs; several new songs and some songs which were previously released as singles.",
"summary": "The passage is about a album."
}
]),
pd.DataFrame([
{
"passage": "Personality and Mental Health - Personality and Mental Health: Multidisciplinary Studies from Personality Dysfunction to Criminal Behaviour is a quarterly peer-reviewed academic journal published by Wiley-Blackwell on behalf of the Centre for Health and Justice.",
"summary": "The passage is about a journal."
},
{
"passage": "Sayonara mo Ienakatta Natsu - Sayonara mo Ienakatta Natsu (さよならも言えなかった夏) is an album by Mikuni Shimokawa released on July 4 2007 by Pony Canyon.This album consists of eleven songs; several new songs and some songs which were previously released as singles.",
"summary": "The passage is about a album."
},
{
"passage": "RNLB Mona (ON 775) - RNLB Mona (ON 775) was a Watson Class lifeboat based at Broughty Ferry in Scotland that capsized during a rescue attempt with the loss of her entire crew of eight men. The Mona was built in 1935 and in her time saved 118 lives.",
"summary": "The passage is about a lifeboat."
},
]),
pd.DataFrame([
{
"passage": "Sayonara mo Ienakatta Natsu - Sayonara mo Ienakatta Natsu (さよならも言えなかった夏) is an album by Mikuni Shimokawa released on July 4 2007 by Pony Canyon.This album consists of eleven songs; several new songs and some songs which were previously released as singles.",
"summary": "The passage is about a album."
},
{
"passage": "Personality and Mental Health - Personality and Mental Health: Multidisciplinary Studies from Personality Dysfunction to Criminal Behaviour is a quarterly peer-reviewed academic journal published by Wiley-Blackwell on behalf of the Centre for Health and Justice.",
"summary": "The passage is about a journal."
},
{
"passage": "RNLB Mona (ON 775) - RNLB Mona (ON 775) was a Watson Class lifeboat based at Broughty Ferry in Scotland that capsized during a rescue attempt with the loss of her entire crew of eight men. The Mona was built in 1935 and in her time saved 118 lives.",
"summary": "The passage is about a lifeboat."
},
])
]
categorize = InputOutputPrompt(
input_formatter=lambda x: f"Passage: {x['passage']}\nSummary: {x['summary']}",
output_formatter=lambda x: f"The summary \"Summary\" fits \"Category\": {x['category']}",
required_keys=["passage", "summary", "category"],
input_output_sep="\n",
example_sep="\n\n",
instruction="Pick one category for the following text.\n\n\"Categories\":\n- company\n- educational institution\n- artist\n- athlete\n- office holder\n- mean of transportation\n- building\n- natural place\n- village\n- animal\n- plant\n- album\n- film\n- written work\n\n"
)
categorize_examples = [
pd.DataFrame([
{
"passage": "Personality and Mental Health - Personality and Mental Health: Multidisciplinary Studies from Personality Dysfunction to Criminal Behaviour is a quarterly peer-reviewed academic journal published by Wiley-Blackwell on behalf of the Centre for Health and Justice.",
"summary": "The passage is about a journal.",
"category": "written work"
},
{
"passage": "RNLB Mona (ON 775) - RNLB Mona (ON 775) was a Watson Class lifeboat based at Broughty Ferry in Scotland that capsized during a rescue attempt with the loss of her entire crew of eight men. The Mona was built in 1935 and in her time saved 118 lives.",
"summary": "The passage is about a lifeboat.",
"category": "mean of transportation"
},
{
"passage": "Sayonara mo Ienakatta Natsu - Sayonara mo Ienakatta Natsu (さよならも言えなかった夏) is an album by Mikuni Shimokawa released on July 4 2007 by Pony Canyon.This album consists of eleven songs; several new songs and some songs which were previously released as singles.",
"summary": "The passage is about a album.",
"category": "album"
}
]),
pd.DataFrame([
{
"passage": "Personality and Mental Health - Personality and Mental Health: Multidisciplinary Studies from Personality Dysfunction to Criminal Behaviour is a quarterly peer-reviewed academic journal published by Wiley-Blackwell on behalf of the Centre for Health and Justice.",
"summary": "The passage is about a journal.",
"category": "written work"
},
{
"passage": "Sayonara mo Ienakatta Natsu - Sayonara mo Ienakatta Natsu (さよならも言えなかった夏) is an album by Mikuni Shimokawa released on July 4 2007 by Pony Canyon.This album consists of eleven songs; several new songs and some songs which were previously released as singles.",
"summary": "The passage is about a album.",
"category": "album"
},
{
"passage": "RNLB Mona (ON 775) - RNLB Mona (ON 775) was a Watson Class lifeboat based at Broughty Ferry in Scotland that capsized during a rescue attempt with the loss of her entire crew of eight men. The Mona was built in 1935 and in her time saved 118 lives.",
"summary": "The passage is about a lifeboat.",
"category": "mean of transportation"
},
]),
pd.DataFrame([
{
"passage": "Sayonara mo Ienakatta Natsu - Sayonara mo Ienakatta Natsu (さよならも言えなかった夏) is an album by Mikuni Shimokawa released on July 4 2007 by Pony Canyon.This album consists of eleven songs; several new songs and some songs which were previously released as singles.",
"summary": "The passage is about a album.",
"category": "album"
},
{
"passage": "Personality and Mental Health - Personality and Mental Health: Multidisciplinary Studies from Personality Dysfunction to Criminal Behaviour is a quarterly peer-reviewed academic journal published by Wiley-Blackwell on behalf of the Centre for Health and Justice.",
"summary": "The passage is about a journal.",
"category": "written work"
},
{
"passage": "RNLB Mona (ON 775) - RNLB Mona (ON 775) was a Watson Class lifeboat based at Broughty Ferry in Scotland that capsized during a rescue attempt with the loss of her entire crew of eight men. The Mona was built in 1935 and in her time saved 118 lives.",
"summary": "The passage is about a lifeboat.",
"category": "mean of transportation"
},
])
]
description_zeroshot="""
Pick the correct category for the passage.
Categories:
- company
- educational institution
- artist
- athlete
- office holder
- mean of transportation
- building
- natural place
- village
- animal
- plant
- album
- film
- written work"""
class DBPediaDecomp(Decomposition):
def __init__(self, task_name, data_dir, val_split="validation"):
super().__init__(task_name, data_dir, val_split)
def get_boost_decomp_examples(self, train_data, boost_id):
return [
summarize_examples[boost_id],
categorize_examples[boost_id],
]
def get_few_shot_examples(self, train_data, k_shot):
"""Get few shot examples"""
labels = sorted(set(train_data["targets_pretokenized"]))
num_per_class = int(np.ceil(k_shot / len(labels)))
print(f"Selecting {num_per_class} examples per class.")
dfs = []
total_in_context = 0
for label in labels:
while num_per_class + total_in_context > k_shot:
num_per_class -= 1
sub_df = train_data[train_data["targets_pretokenized"] == label].sample(
num_per_class
)
dfs.append(sub_df)
total_in_context += num_per_class
if total_in_context == k_shot:
break
mini_df = pd.concat(dfs)
return mini_df
def zero_few_baseline(
self,
test_data,
few_shot_df,
manifest,
overwrite_manifest,
do_few_shot=True,
):
expt_log = {}
preds = []
labels = []
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
input = row["inputs_pretokenized"]
body = input.split("written work. ")[-1]
gold = row["targets_pretokenized"].strip().lower()
icl_str = ""
title = description_zeroshot
if do_few_shot:
for s_ind, s_row in few_shot_df.iterrows():
s_input = s_row.inputs_pretokenized
s_body = s_input.split("written work. ")[-1]
s_title = s_input.split("written work. ")[0] + "written work."
s_output = s_row.targets_pretokenized.strip()
icl_str += f"Passage: {s_body}\nCategory: {s_output}\n\n"
icl_str = f"{title}\n\n{icl_str}"
prompt = f"{icl_str}Passage: {{body:}}\nCategory:"
pmp = prompt.format(body=body)
if i == 0:
print(pmp)
output = get_response(
pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=10,
stop_token="\n\n",
)
pred = output.strip().lower()
entry = {
"ind": ind,
"example": input,
"base_prompt": pmp,
"pred": pred,
"gold": gold,
}
expt_log[ind] = entry
preds.append(pred)
labels.append(gold)
report = classification_report(labels, preds, output_dict=True)
return expt_log, report["accuracy"]
def run_decomposed_prompt(
self, test_data, boost_data_train, boost_dfs, manifest, overwrite_manifest
):
expt_log, all_boost_preds, labels = self._run_decomp_single_data(test_data, boost_dfs, manifest, overwrite_manifest)
expt_log_train, all_boost_train_preds, train_labels = self._run_decomp_single_data(boost_data_train, boost_dfs, manifest, overwrite_manifest, run_limit=1)
# Do WS
preds = self.merge_boosted_preds(all_boost_preds, all_boost_train_preds, train_labels, expt_log, expt_log_train)
# Get accuracies across all boost sets
individual_accuracies = []
for i in range(len(all_boost_preds[0])):
individual_accuracies.append(classification_report(labels, [p[i] for p in all_boost_preds], output_dict=True)["accuracy"])
report = classification_report(labels, preds, output_dict=True)
return expt_log, expt_log_train, report["accuracy"], individual_accuracies
def _run_decomp_single_data(self, test_data, boost_dfs, manifest, overwrite_manifest, run_limit=-1):
expt_log = {}
all_boost_preds = []
labels = []
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
if i == run_limit:
break
text = (
row.inputs_pretokenized.strip("\n").split("written work.")[-1].strip()
)
gold = row.targets_pretokenized.strip().lower()
prompts_across_boost = []
preds_across_boost = []
for boost_examples in boost_dfs:
all_prompts = []
prompt_suffix = summarize(boost_examples[0])
summarize_prompt = f"{prompt_suffix}\n\nPassage: {{text:}}\nSummarize: the passage \"Passage\":"
summarize_pmp = summarize_prompt.format(text=text)
output = get_response(
summarize_pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=50,
)
summary = output.split("\n")[0].split(":")[-1].strip("\n")
prompt_suffix = categorize(boost_examples[1])
category_prompt = f"{prompt_suffix}\n\nPassage: {{text:}}\nSummary: {{summary:}}\nThe summary \"Summary\" fits \"Category\":"
category_pmp = category_prompt.format(text=text, summary=summary)
output = get_response(
category_pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=15,
)
pred = output.split("\n")[0].strip().lower()
all_prompts.append(summarize_pmp)
all_prompts.append(category_pmp)
if i == 0:
print(summarize_pmp)
print(category_pmp)
prompts_across_boost.append(all_prompts)
preds_across_boost.append(pred)
entry = {
"ind": ind,
"example": text,
"prompts": prompts_across_boost,
"preds_boost": preds_across_boost,
"gold": gold,
}
expt_log[ind] = entry
all_boost_preds.append(preds_across_boost)
labels.append(gold)
return expt_log, all_boost_preds, labels
def main():
args = get_args()
task_name = "dbpedia"
data_dir = (
f"{DATA_DIR}/P3/data_feather/dbpedia_14_pick_one_category_for_the_following_text"
)
decomp = DBPediaDecomp(task_name, data_dir, val_split="test")
decomp.run(args)
if __name__ == "__main__":
main()
| ama_prompting-main | tasks/DBPedia_final.py |
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import numpy as np
from tqdm.auto import tqdm
import random
from sklearn.metrics import classification_report
from decomposition import Decomposition, get_args, DATA_DIR
from utils import get_response, InputOutputPrompt
##############################################################################################################################
# All prompts
questioner_prompt = InputOutputPrompt(
input_formatter=lambda x: f"Statement: {x['statement']}",
output_formatter=lambda x: f"Question: {x['question']}",
required_keys=["question", "statement"],
input_output_sep="\n",
example_sep="\n\n",
instruction="Rewrite the statement as a yes/no question.\n\n"
)
questioner_prompt_examples = [
pd.DataFrame([
{
"statement": "most of the light comes from the sun",
"question": "Does most of the light come from the sun?"
},
{
"statement": "the test was not hard",
"question": "Was the test not hard?",
},
{
"statement": "it is a good idea to buy your parents gifts",
"question": "Is it a good idea to buy your parents gifts?",
},
{
"statement": "the balloon popped",
"question": "Did the balloon pop?",
},
{
"statement": "The father and son went camping to California.",
"question": "Did the father and son go camping?",
},
]),
pd.DataFrame([
{
"statement": "most of the light comes from the sun",
"question": "Does most of the light come from the sun?"
},
{
"statement": "the test was not",
"question": "Was the test not hard?",
},
{
"statement": "it is a good idea to buy your parents gifts",
"question": "Is it a good idea to buy your parents gifts?",
},
{
"statement": "the balloon popped",
"question": "Did the balloon pop?",
},
{
"statement": "The father and son went camping to California.",
"question": "Did the father and son go camping?",
},
]),
pd.DataFrame([
{
"statement": "most of the light comes from the sun",
"question": "Does most of the light come from the sun?"
},
{
"statement": "the test was not hard",
"question": "Was the test not hard?",
},
{
"statement": "it is a good idea to buy your parents gifts",
"question": "Is it a good idea to buy your parents gifts?",
},
{
"statement": "the balloon popped",
"question": "Did the balloon pop?",
},
{
"statement": "The father and son went camping to California.",
"question": "Did the father and son go camping?",
},
]),
pd.DataFrame([
{
"statement": "most of the light comes from the sun",
"question": "Does most of the light come from the sun?"
},
{
"statement": "the test was not hard",
"question": "Was the test not hard?",
},
{
"statement": "it is a good idea to buy your parents gifts",
"question": "Is it a good idea to buy your parents gifts?",
},
{
"statement": "the balloon popped",
"question": "Did the balloon pop?",
},
{
"statement": "The father and son went camping to California.",
"question": "Did the father and son go camping?",
},
]),
pd.DataFrame([
{
"statement": "most of the light comes from the sun",
"question": "Does most of the light come from the sun?"
},
{
"statement": "the test was not hard",
"question": "Was the test not hard?",
},
{
"statement": "it is a good idea to buy your parents gifts",
"question": "Is it a good idea to buy your parents gifts?",
},
{
"statement": "the balloon popped",
"question": "Did the balloon pop?",
},
{
"statement": "The father and son went camping to California.",
"question": "Did the father and son go camping?",
},
]),
]
extraction_qa = InputOutputPrompt(
input_formatter=lambda x: f"Context: {x['context']}\nQuestion: {x['question']}",
output_formatter=lambda x: f"Answer: {x['answer']}",
required_keys=["context", "question", "answer"],
input_output_sep="\n",
example_sep="\n\n",
instruction="Answer the question. If there is no evidence in the context, return \"Unknown\".\n\n"
)
extraction_qa_examples = [
pd.DataFrame([
{
"context": "According to Biraben, the plague was present somewhere in Italy and affected 1,200 people.",
"question": "Based on the context, Did the plague affect people in Europe?",
"answer": "yes, people in Italy, Europe",
},
{
"context": "Policies aiming at controlling unemployment and in particular at reducing its inequality-associated effects support economic growth.",
"question": "Based on the context, Is confidence a factor in increasing self-esteem?",
"answer": "unknown",
},
{
"context": "The term \"matter\" is used throughout physics in a bewildering variety of contexts: for example, one refers to \"condensed matter physics\", \"elementary matter\", \"partonic\" matter, \"dark\" matter, \"anti\"-matter, \"strange\" matter, and \"nuclear\" matter.",
"question": "Based on the context, Is anti-matter made of electrons? ",
"answer": "Unknown",
},
]),
pd.DataFrame([
{
"context": "According to Biraben, the plague was present somewhere in Italy only between 1346 and 1671, and not after that.",
"question": "Based on the context, Was the plague present in Italy during the 2000s?",
"answer": "No, it was present between 1346 and 1671"
},
{
"context": "The term \"matter\" is used throughout physics in a bewildering variety of contexts: for example, one refers to \"condensed matter physics\", \"elementary matter\", \"partonic\" matter, \"dark\" matter, \"anti\"-matter, \"strange\" matter, and \"nuclear\" matter.",
"question": "Based on the context, Is anti-matter made of electrons? ",
"answer": "Unknown"
},
{
"context": "Policies aiming at controlling unemployment and in particular at reducing its inequality-associated effects support economic growth.",
"question": "Based on the context, Is confidence a factor in increasing self-esteem?",
"answer": "unknown"
}
]),
pd.DataFrame([
{
"context": "Jenna's 10th birthday was yesterday evening and at least 10 of her friends attended the party.",
"question": "Based on the context, Did 10 friends attend Jenna's party?",
"answer": "Unknown"
},
{
"context": "The bullies attacked John when he was walking through the elementary school parking lot and then got sent to the teacher's office.",
"question": "Based on the context, Did the bullies attack John in the teacher's office?",
"answer": "No, parking lot"
},
{
"context": "WISS discovered a new monkey disease occurring in a remote tribe in the Amazon rainforrest.",
"question": "Based on the context, Did WISS discover a new monkey species?",
"answer": "No, a new monkey disease"
}
]),
pd.DataFrame([
{
"context": "When Judy and Jack went to school, they got in trouble with their teacher for being late. I didn't think it was very fair.",
"question": "Based on the context, Did she think it was fair?",
"answer": "No"
},
{
"context": "If inflation is occurring, leading to higher prices for basic necessities such as gas by 2 dollars. Do you think that inflation is good for society?",
"question": "Based on the context, Is inflation good for society?",
"answer": "Unknown"
},
{
"context": "Put yourself out there. The more time you spend dating and socializing, the more likely you will find a boyfriend you like.",
"question": "Based on the context, Does socializing help you find a boyfriend?",
"answer": "Yes"
},
{
"context": "According to Biraben, the plague was present somewhere in Italy and affected 1,200 people.",
"question": "Based on the context, Did the plague affect people in Europe?",
"answer": "yes, people in Italy, Europe",
},
{
"context": "Policies aiming at controlling unemployment and in particular at reducing its inequality-associated effects support economic growth.",
"question": "Based on the context, Is confidence a factor in increasing self-esteem?",
"answer": "unknown",
},
{
"context": "The term \"matter\" is used throughout physics in a bewildering variety of contexts: for example, one refers to \"condensed matter physics\", \"elementary matter\", \"partonic\" matter, \"dark\" matter, \"anti\"-matter, \"strange\" matter, and \"nuclear\" matter.",
"question": "Based on the context, Is anti-matter made of electrons? ",
"answer": "Unknown",
},
]),
pd.DataFrame([
{
"context": "According to Biraben, the plague was present somewhere in Italy and affected 1,200 people.",
"question": "Based on the context, Did the plague affect over 1,000 people?",
"answer": "yes, 1,200 people",
},
{
"context": "If inflation is occurring, leading to higher prices for basic necessities such as gas by 2 dollars. Do you think that inflation is good for society?",
"question": "Based on the context, Is inflation good for society?",
"answer": "Unknown"
},
{
"context": "Policies aiming at controlling unemployment and in particular at reducing its inequality-associated effects support economic growth.",
"question": "Based on the context, Is confidence a factor in increasing self-esteem?",
"answer": "unknown"
}
]),
]
##############################################################################################################################
class ANLIR1Decomp(Decomposition):
def __init__(self, task_name, data_dir, val_split="validation"):
super().__init__(task_name, data_dir, val_split)
def get_few_shot_examples(self, train_data, k_shot):
"""Get few shot examples"""
labels = [' True', ' False', ' Neither']
num_per_class = int(np.ceil(k_shot / len(labels)))
print(f"Selecting {num_per_class} examples per class.")
dfs = []
total_in_context = 0
for label in labels:
while num_per_class + total_in_context > k_shot:
num_per_class -= 1
sub_df = train_data[train_data["targets_pretokenized"] == label].sample(
num_per_class
)
dfs.append(sub_df)
total_in_context += num_per_class
if total_in_context == k_shot:
break
mini_df = pd.concat(dfs)
return mini_df
def _get_boost_decomp_examples(self, train_data, boost_id):
seed = [69, 987][boost_id]
k_shot = 64
random.seed(seed)
np.random.seed(seed)
data_train = pd.DataFrame(train_data)
labels = [' Neither', ' False', ' True']
num_per_class = int(np.ceil(k_shot / len(labels)))
dfs = []
total_in_context = 0
for label in labels:
while num_per_class + total_in_context > k_shot:
num_per_class -= 1
if seed % 2 == 1:
sub_df = data_train[data_train["targets_pretokenized"] == label].sample(num_per_class, random_state = seed)
elif seed % 2 == 0:
sub_df = data_train[data_train["targets_pretokenized"] != label].sample(num_per_class, random_state = seed)
dfs.append(sub_df)
total_in_context += num_per_class
if total_in_context == k_shot:
break
booster_df = pd.concat(dfs).sample(frac=1, random_state=0)
print(f"Selected: {len(booster_df)} in context examples.")
return [
booster_df
]
def get_boost_decomp_examples(self, train_data, boost_id):
if boost_id < 3:
return [
questioner_prompt_examples[boost_id],
extraction_qa_examples[boost_id],
]
else:
icl_examples = self._get_boost_decomp_examples(train_data, boost_id-3)[0]
return [
icl_examples
]
def zero_few_baseline(
self,
test_data,
few_shot_df,
manifest,
overwrite_manifest,
do_few_shot=True,
):
expt_log = {}
golds = []
preds = []
labels = set(test_data["targets_pretokenized"])
labels = [l.lower().strip() for l in labels]
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
if ind in expt_log:
pred = expt_log[ind]["pred"]
gold = expt_log[ind]["gold"]
else:
icl_str = ""
if do_few_shot:
for s_ind, s_row in few_shot_df.iterrows():
icl_str += f"{s_row['inputs_pretokenized']}{s_row['targets_pretokenized']}\n\n"
text = row["inputs_pretokenized"]
text = text.replace("True, False, or Neither?", "").strip().strip("\n")
text = text + " True, False, or Neither? "
gold = row["targets_pretokenized"]
prompt = f"{icl_str}{{text:}}"
pmp = prompt.format(text=text)
if i == 0:
print(pmp)
raw_answer = get_response(
pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=20,
)
answer = raw_answer.strip().lower()
answer = answer.split("\n")
answer = [a for a in answer if a]
answer = [
a for a in answer if any(l.lower() in a.lower() for l in labels)
]
if answer:
answer = answer[0]
else:
answer = ""
answer = "".join(
[a for a in answer if a not in [".", ",", "?", ";", ":", "'", '"']]
)
is_yes = "true" in answer.split()
is_no = "false" in answer.split()
is_maybe = "neither" in answer.split()
pred = "Neither"
if is_yes and (not is_maybe and not is_no):
pred = "True"
if is_no and (not is_maybe and not is_yes):
pred = "False"
if is_maybe and (not is_no and not is_yes):
pred = "Neither"
gold = gold.strip().lower()
pred = pred.strip().lower()
entry = {
"ind": ind,
"example": text,
"base_prompt": pmp,
"raw_answer": raw_answer,
"pred": pred,
"gold": gold,
}
expt_log[ind] = entry
golds.append(gold)
preds.append(pred)
report = classification_report(golds, preds, output_dict=True)
return expt_log, report["accuracy"]
def get_extraction(self, question, passage, prompt, boost_ex, manifest, overwrite_manifest):
prompt_suffix = prompt(boost_ex)
if "Based on the context," in prompt_suffix:
question_prefix = " Based on the context,"
else:
question_prefix = ""
extract_prompt = f"{prompt_suffix}\n\nContext: {{passage:}}\nQuestion:{question_prefix} {question}\nAnswer:"
extract_pmp = extract_prompt.format(passage=passage)
answer = get_response(
extract_pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=50,
)
answer = answer.replace(",", "").replace(".", "").replace("?", "")
answer = [a for a in answer.split("\n") if a]
if answer:
answer = answer[0]
else:
answer = passage
return answer, extract_pmp
def get_question(self, statement, prompt, boost_ex, manifest, overwrite_manifest):
prompt_suffix = prompt(boost_ex)
question_prompt = f"{prompt_suffix}\n\nStatement: {{statement:}}\nQuestion:"
question_pmp = question_prompt.format(statement=statement)
answer = get_response(
question_pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=50,
)
answer = answer.replace("Question: ", "")
answer = [a for a in answer.split("\n") if a]
if answer:
answer = answer[0].strip()
# answer = ''
statement = statement.strip().strip(".")
if (
not answer
or statement.lower() == answer.lower()
or not answer.strip().endswith("?")
):
answer = f"{statement}. Yes, no, or unknown?"
answer = answer.split("\n")[0]
return answer, question_pmp
def resolve_pred(self, answer):
is_yes = "yes" in answer.split() or "true" in answer.split()
is_no = "no" in answer.split() or "false" in answer.split()
is_maybe = "maybe" in answer.split() or "maybe" in answer.split()
pred = "Neither"
if is_yes and (not is_maybe and not is_no):
pred = "True"
if is_no and (not is_maybe and not is_yes):
pred = "False"
if is_maybe and (not is_no and not is_yes):
pred = "Neither"
return pred
def run_decomposed_prompt(
self, test_data, boost_data_train, boost_dfs, manifest, overwrite_manifest
):
expt_log, all_boost_preds, labels = self._run_decomp_single_data(test_data, boost_dfs, manifest, overwrite_manifest, run_limit=-1)
expt_log_train, all_boost_train_preds, train_labels = self._run_decomp_single_data(boost_data_train, boost_dfs, manifest, overwrite_manifest, run_limit=1000)
# Do WS
preds = self.merge_boosted_preds(all_boost_preds, all_boost_train_preds, train_labels, expt_log, expt_log_train, indecisive_ans="neither")
# Get accuracies across all boost sets
individual_accuracies = []
for i in range(len(all_boost_preds[0])):
report = classification_report(labels, [p[i] for p in all_boost_preds], output_dict=True)
individual_accuracies.append(report["accuracy"])
print(report)
print("\n\n")
report = classification_report(labels, preds, output_dict=True)
print(report)
return expt_log, expt_log_train, report["accuracy"], individual_accuracies
def _run_decomp_single_data(
self, test_data, boost_dfs, manifest, overwrite_manifest, run_limit = -1
):
expt_log = {}
all_boost_preds = []
labels = []
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
prompts_across_boost = []
preds_across_boost = []
if i == run_limit:
break
text = row["inputs_pretokenized"]
gold = row["targets_pretokenized"].strip()
passage = text.split("\n")[0]
statement = (
text.split("\n")[-1]
.replace("True, False, or Neither?", "")
.strip()
.strip("\n")
.replace("Question: ", "")
)
for boost_num, boost_examples in enumerate(boost_dfs):
all_prompts = []
# question / extract prompt
if boost_num < 3:
question, question_final_prompt = self.get_question(
statement, questioner_prompt, boost_examples[0], manifest, overwrite_manifest
)
all_prompts.append(question_final_prompt)
open_answer_f, extraction_final_prompt = self.get_extraction(
question,
passage,
extraction_qa,
boost_examples[1],
manifest,
overwrite_manifest,
)
all_prompts.append(extraction_final_prompt)
if i == 0:
print("\n".join(all_prompts))
answer_f = open_answer_f.lower()
pred = self.resolve_pred(answer_f)
pred = pred.strip().lower()
preds_across_boost.append(pred)
# just ICL
else:
icl_str = ""
for s_ind, s_row in boost_examples[0].iterrows():
if s_row["targets_pretokenized"].strip() == "True":
demo_label = "yes"
elif s_row["targets_pretokenized"].strip() == "False":
demo_label = "no"
else:
demo_label = "unknown"
s_text = s_row["inputs_pretokenized"]
s_passage = s_text.split("\n")[0]
s_statement = (
s_text.split("\n")[-1]
.replace("True, False, or Neither?", "")
.strip()
.strip("\n")
.replace("Question: ", "")
)
icl = f"Statement: {s_statement}\nAnswer: {demo_label}"
icl_str += f"{icl}\n\n"
description = "Is the statement Yes, No, or Unknown?"
prompt = f"{description}\n\n{icl_str}Statement: {{statement:}}\nAnswer:"
pmp = prompt.format(statement=statement)
if i == 0:
print("PMP ICL")
print(pmp)
pred = get_response(
pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=10,
stop_token="\n",
)
pred = pred.lower().strip()
pred = pred.replace(".", "").replace(",", "").replace("Label: ", "").replace("Sentiment:", "")
pred = [p for p in pred.split("\n") if p]
if pred:
pred = pred[0]
else:
pred = ""
all_prompts.append(pmp)
prompts_across_boost.append(all_prompts)
pred = self.resolve_pred(pred).lower()
preds_across_boost.append(pred)
gold = gold.strip().lower()
expt_log[ind] = {
"ind": ind,
"preds_boost": preds_across_boost,
"prompts": prompts_across_boost,
"example": text,
"pred": pred,
"gold": gold,
}
all_boost_preds.append(preds_across_boost)
labels.append(gold)
return expt_log, all_boost_preds, labels
def main():
args = get_args()
args.num_boost = 5
task_name = "anli_r1"
data_dir = f"{DATA_DIR}/P3/data_feather/anli_GPT_3_style_r1"
decomp = ANLIR1Decomp(task_name, data_dir, val_split="test")
decomp.run(args)
if __name__ == "__main__":
main() | ama_prompting-main | tasks/ANLIR1_final.py |
#!/usr/bin/env python
# coding: utf-8
from tqdm.auto import tqdm
import pandas as pd
import random
import numpy as np
from pathlib import Path
from datasets import load_dataset
from sklearn.metrics import classification_report
from decomposition import Decomposition, get_args, DATA_DIR
from utils import get_response, InputOutputPrompt
questioner = InputOutputPrompt(
input_formatter=lambda x: f"Statement: {x['statement']}",
output_formatter=lambda x: f"Question: {x['question']}",
required_keys=["statement", "question"],
input_output_sep="\n",
example_sep="\n\n",
instruction="Rewrite the statement as a yes/no question.\n\n"
)
questioner_examples = [
pd.DataFrame([
{
"statement": "Jonathan Samuels was born in the 70's.",
"question": "Was Jonathan Samuels born in the 70's?"
},
{
"statement": "Jerry bullied him and called him names",
"question": "Did Jerry bully him and call him names?",
},
{
"statement": "Sam and jade were going to go to the movies",
"question": "Did did Sam and jade go to the movies?",
},
{
"statement": "Chocolate is tasty, when I am feeling hungry.",
"question": "Does chocolate taste good when you are hungry?",
},
{
"statement": "Mark ran fast.",
"question": "Did mark run fast?",
},
]),
pd.DataFrame([
{
"statement": "most of the light comes from the sun",
"question": "Does most of the light come from the sun?"
},
{
"statement": "the test was not hard",
"question": "Was the test not hard?",
},
{
"statement": "it is a good idea to buy your parents gifts",
"question": "Is it a good idea to buy your parents gifts?",
},
{
"statement": "the balloon popped",
"question": "Did the balloon pop?",
},
{
"statement": "The father and son went camping to California.",
"question": "Did the father and son go camping?",
},
]),
pd.DataFrame([
{
"statement": "most of the light comes from the sun",
"question": "Does most of the light come from the sun?"
},
{
"statement": "the test was not",
"question": "Was the test hard?"
},
{
"statement": "it is a good idea to buy your parents gifts",
"question": "Is it a good idea to buy your parents gifts?"
},
{
"statement": "the balloon popped",
"question": "Did the balloon pop?"
},
{
"statement": "The father and son went camping to California.",
"question": "Did the father and son go camping?"
}
])
]
openended_qa = InputOutputPrompt(
input_formatter=lambda x: f"Passage: {x['context']}\nQuestion: {x['question']}",
output_formatter=lambda x: f"Answer: {x['answer']}",
required_keys=["passage", "question", "answer"],
input_output_sep="\n",
example_sep="\n\n",
instruction=""
)
openended_qa_examples = [
pd.DataFrame([
{
"context": "My aunt is a nurse and she often talks about long hours at work. Last week was especially bad and she was constantly working many hours.",
"question": "Was her work easy?",
"answer": "No, it was hard work."
},
{
"context": "My roommate was sick. She stayed home from work and school. She slept all day long and by the end of the day, she was feeling better.",
"question": "Did the rest help her?",
"answer": "Yes, she slept and felt better."
},
{
"context": "Andy had always wanted a big kids bike. When he turned six Year's old he asked for a bike for his birthday. He did not know how to ride a bike. On Andy's birthday his mother gave him a bike.",
"question": "Did he cry all night?",
"answer": "No, Andy was happy because he got a bike."
},
]),
pd.DataFrame([
{
"context": "Anna's mother always told her to be confident even if she feels nervous on the inside",
"question": "Does Anna always feel nervous on the inside?",
"answer": "Unknown"
},
{
"context": "Max and Jeff were extremely competitive at soccer, but Max was a lot better.",
"question": "Was Jeff better than Max at soccer?",
"answer": "No, Max was a lot better"
},
{
"context": "When Judy and Jack went to school, they got in trouble with their teacher for being late. I didn't think it was very fair.",
"question": "Did she think it was fair?",
"answer": "No, she didn't think it was very fair."
},
{
"context": "The FSP conference took place last week in Spain and representatives from 21 countries attended.",
"question": "Did representatives from more than 20 countries attend FSP?",
"answer": "Yes"
},
]),
pd.DataFrame([
{
"context": "My roommate was sick. She stayed home from work and school. She slept all day long and by the end of the day, she was feeling better.",
"question": "Did the rest help her?",
"answer": "Yes, she slept and felt better."
},
{
"context": "It was a beautiful day outside. Bob decided to go for a walkk. Bob walked along the path and admired the scenery. He found a twenty dollar bill on the ground.",
"question": "Was he disappointed?",
"answer": "No, he was happy he got money."
},
{
"context": "My aunt is a nurse and she often talks about long hours at work. Last week was especially bad and she was constantly working many hours.",
"question": "Was her work easy?",
"answer": "No, it was hard work."
},
]),
]
sentiment = InputOutputPrompt(
input_formatter=lambda x: f"Passage: {x['statement']}",
output_formatter=lambda x: f"Sentiment: {x['sentiment']}",
required_keys=["statement", "question"],
input_output_sep="\n",
example_sep="\n\n",
instruction="Is the sentiment of the passage positive, negative, or neutral?\n\n"
)
sentiment_examples = [
pd.DataFrame([
{
"statement": "Mary saw the animal",
"sentiment": "neutral",
},
{
"statement": "the town is quaint , but ultimately too boring and ugly",
"sentiment": "negative",
},
{
"statement": "he's a strong athlete, people come from miles away to watch him compete",
"sentiment": "positive",
},
]),
pd.DataFrame([
{
"statement": "the movie was not very good, they could have picked a better lead actor",
"sentiment": "negative",
},
{
"statement": "she loves her mother so much that she gives her a hug everyday",
"sentiment": "positive",
},
{
"statement": "the computer sat on the table",
"sentiment": "neutral",
},
]),
pd.DataFrame([
{
"statement": "Mary saw the animal",
"sentiment": "neutral",
},
{
"statement": "he's a strong athlete, people come from miles away to watch him compete",
"sentiment": "positive",
},
{
"statement": "the dress is boring and ugly, it loooks like a towel",
"sentiment": "negative",
},
{
"statement": "the exam went well since i studied a lot",
"sentiment": "positive",
},
{
"statement": "the table was made of wood",
"sentiment": "neutral",
},
{
"statement": "grocery stores sell food",
"sentiment": "neutral",
},
])
]
sentiment_more_positive = InputOutputPrompt(
input_formatter=lambda x: f"Sentence 1: {x['sentence1']}\nSentence 2: {x['sentence2']}",
output_formatter=lambda x: f"More positive: {x['answer']}",
required_keys=["sentence1", "sentence2", 'answer'],
input_output_sep="\n",
example_sep="\n\n",
instruction="Which sentence is more positive?\n\n"
)
sentiment_more_positive_examples = [
pd.DataFrame([
{
"sentence1": "i think she's fine",
"sentence2": "she's my favorite person in the world",
"answer": "she's my favorite person in the world"
},
{
"sentence1": "i have never been to a better restaurant in my life",
"sentence2": "the restaurant was decent, I may go back",
"answer": "i have never been to a better restaurant in my life"
},
{
"sentence1": "I went on the best vacation with my family last week.",
"sentence2": "I just got back from a vacation, which was expensive, but fun",
"answer": "I went on the best vacation with my family last week."
}
])
]
what_next = InputOutputPrompt(
input_formatter=lambda x: f"Choices:\n- {x['choice_a']}\n- {x['choice_b']}\n\nPassage: {x['passage']} Then?",
output_formatter=lambda x: f"{x['answer']}",
required_keys=["choice_a", "choice_b", "passage", "answer"],
input_output_sep=" ",
example_sep="\n\n----\n\n",
instruction="Pick the best choice for the passage.\n\n"
)
what_next_examples = [
pd.DataFrame([
{
"passage": "The girl went to college and graduated with honors",
"choice_a": "She was qualified to get a job",
"choice_b": "She was qualified to eat pizza",
"answer": "she was qualified to get a job"
},
{
"passage": "Max bought all his friends cupcakes for the party.",
"choice_a": "They never spoke to him again",
"choice_b": "They all thanked him",
"answer": "They all thanked him"
},
{
"passage": "Sam felt so hungry so he bought himself some cheese!",
"choice_a": "he was starving",
"choice_b": "he felt full",
"answer": "he felt full"
}
]),
pd.DataFrame([
{
"passage": "The girl went to college and graduated with honors",
"choice_a": "She was qualified to get a job",
"choice_b": "She was qualified to eat pizza",
"answer": "she was qualified to get a job"
},
{
"passage": "Max bought all his friends cupcakes for the party.",
"choice_a": "They never spoke to him again",
"choice_b": "They all thanked him",
"answer": "They all thanked him"
},
{
"passage": "Sam felt so hungry so he bought himself some cheese!",
"choice_a": "he was starving",
"choice_b": "he felt full",
"answer": "he felt full"
}
]),
pd.DataFrame([
{
"passage": "Sam and Jade were excited to see the new movie.",
"choice_a": "They went to the theater",
"choice_b": "They went swimming",
"answer": "They went to the theater"
},
{
"passage": "Andy's parents got him a new toy",
"choice_a": "he played",
"choice_b": "he cried",
"answer": "he played"
},
{
"passage": "She can read the entire book in a single day.",
"choice_a": "She is a slow reader",
"choice_b": "She is a fast reader",
"answer": "She is a fast reader"
}
])
]
class StoryCloze(Decomposition):
def __init__(self, task_name, data_dir, val_split="validation"):
super().__init__(task_name, data_dir, val_split)
def read_data(self, save_dir, overwrite_data):
save_data = Path(f"{save_dir}/{self.task_name}/data.feather")
if not save_data.exists() or overwrite_data:
dataset = load_dataset("story_cloze", "2016", data_dir=self.data_dir)
test_data = dataset[self.val_split].to_pandas()
test_data.to_feather(f"{save_data}")
else:
print(f"Reading test data from {save_data}")
test_data = pd.read_feather(f"{save_data}")
save_data_train = Path(f"{save_dir}/{self.task_name}/train_data.feather")
if not save_data_train.exists() or overwrite_data:
dataset = load_dataset("story_cloze", "2016", data_dir=self.data_dir)
train_data = dataset["validation"].to_pandas()
train_data.to_feather(f"{save_data_train}")
else:
print(f"Reading train data from {save_data_train}")
train_data = pd.read_feather(f"{save_data_train}")
print(f"Test Data Size: {len(test_data)}")
print(f"Train Data Size: {len(train_data)}")
return test_data, train_data
def get_boost_decomp_examples(self, data_train, boost_id):
if boost_id < 3:
return [
questioner_examples[boost_id],
openended_qa_examples[boost_id],
sentiment_examples[boost_id],
what_next_examples[boost_id],
sentiment_more_positive_examples[0]
]
else:
seed = [1, 2, 3][boost_id-3]
k_shot = 8 #32#4
random.seed(seed)
np.random.seed(seed)
sub_df = data_train.sample(k_shot)
booster_df = sub_df.sample(frac=1, random_state=0)
print(f"Selected: {len(booster_df)} in context examples.")
return [
booster_df
]
def zero_few_baseline(
self,
test_data,
few_shot_df,
manifest,
overwrite_manifest,
prompt_suffix="",
do_few_shot=True,
):
expt_log = {}
preds = []
labels = []
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
if ind in expt_log:
pred = entry["pred"]
gold = entry["gold"]
else:
instruction = "Given two possible next sentences A) and B), choose the best next sentence to complete the story. Answer with A or B."
icl_str = ""
if do_few_shot:
for s_ind, s_row in few_shot_df.iterrows():
s_text = f"{s_row['input_sentence_1']} {s_row['input_sentence_2']} {s_row['input_sentence_3']} {s_row['input_sentence_4']}\n\n"
if s_row['answer_right_ending'] == 1:
answer = s_row['sentence_quiz1']
elif s_row['answer_right_ending'] == 2:
answer = s_row['sentence_quiz2']
choices = f"A) {s_row['sentence_quiz1']}\nB) {s_row['sentence_quiz2']}\n\n"
icl_str += f"{s_text}{choices}Answer: {answer}\n\n\n"
text = f"{row['input_sentence_1']} {row['input_sentence_2']} {row['input_sentence_3']} {row['input_sentence_4']}\n\n"
choices = f"A) {row['sentence_quiz1']}\nB) {row['sentence_quiz2']}\n\n"
gold = ''
if row['answer_right_ending'] == 1:
gold = row['sentence_quiz1']
elif row['answer_right_ending'] == 2:
gold = row['sentence_quiz2']
prompt = f"{instruction}\n\n\n{icl_str}{text}{choices}Answer: "
raw_answer = get_response(prompt, manifest, max_toks=50)
answer = raw_answer.split("\n")
answer = [a for a in answer if a]
if answer:
answer = answer[0].replace("Answer: ", "").strip()
else:
answer = ''
if i == 0:
print(prompt)
answer = answer.replace(")", "").replace("(", "").replace(":", "")
is_A = answer.lower() in row['sentence_quiz1'].lower() or row['sentence_quiz1'].lower() in answer.lower() or "A" in answer.split()
is_B = answer.lower() in row['sentence_quiz2'].lower() or row['sentence_quiz2'].lower() in answer.lower() or "B" in answer.split()
pred = ''
if is_A and (not is_B):
pred = '1'
if is_B and (not is_A):
pred = '2'
entry = {
"ind": ind,
"example": text,
"base_prompt": prompt,
"raw_answer": raw_answer,
"pred": pred,
"gold": gold,
}
expt_log[ind] = entry
labels.append(str(row['answer_right_ending']))
preds.append(pred)
report = classification_report(labels, preds, output_dict=True)
return expt_log, report["accuracy"]
def get_question(self, statement, all_prompts, boost_examples, manifest, overwrite_manifest):
questioner = all_prompts[0](boost_examples[0])
question_prompt = f"{questioner}\n\nStatement: {{statement:}}\n"
question = get_response(
question_prompt.format(statement=statement),
manifest,
max_toks= 4*len(statement.split()))
question = question.replace("Question: ", "")
question = [q for q in question.split("\n") if q]
if not question:
question = f"{statement} Yes or no?"
else:
question = question[0]
return question, question_prompt
def answer_question(self, question, passage, all_prompts, boost_examples, manifest, overwrite_manifest, option=1):
one_at_a_time = all_prompts[1](boost_examples[1])
answer_prompt = f"{one_at_a_time}\n\nPassage: {{passage:}}\nQuestion: {{question:}}\n"
answer = get_response(
answer_prompt.format(passage=passage, question=question),
manifest,
max_toks=50)
answer = answer.replace("Answer: ", "")
answer = [a for a in answer.split("\n") if a]
if answer:
answer = answer[0].replace(",", "").replace(".", "").lower()
else:
answer = ''
pred = ''
if option == 1:
if 'yes' in answer.split():
pred = "1"
elif 'no' in answer.split():
pred = "2"
elif option == 2:
if 'no' in answer.split():
pred = "1"
elif 'yes' in answer.split():
pred = "2"
return pred, answer_prompt
def get_one_by_one(self, example, choice_a, choice_b, all_prompts, boost_examples, manifest, overwrite_manifest):
# construct questions
question_a, questioner_prompt = self.get_question(choice_a, all_prompts, boost_examples, manifest, overwrite_manifest)
question_b, questioner_prompt = self.get_question(choice_b, all_prompts, boost_examples, manifest, overwrite_manifest)
# ask questions
pred_a, answerer_prompt = self.answer_question(question_a, example, all_prompts, boost_examples, manifest, overwrite_manifest, option=1)
pred_b, answerer_prompt = self.answer_question(question_b, example, all_prompts, boost_examples, manifest, overwrite_manifest, option=2)
# reconcile answer
if pred_a == "1" and pred_b == "1":
pred = "1"
elif pred_a == "2" and pred_b == "2":
pred = "2"
elif pred_a and not pred_b:
pred = pred_a
elif not pred_b and pred_b:
pred = pred_b
else:
pred = ''
return pred, questioner_prompt, answerer_prompt
def get_sentiment(self, statement, all_prompts, boost_examples, manifest, overwrite_manifest):
sentiment_prompt = all_prompts[0](boost_examples[2])
prompt = f"{sentiment_prompt}\n\nPassage: {{statement:}}\nSentiment: "
raw_answer = get_response(
prompt.format(statement=statement),
manifest,
max_toks=5)
sent = raw_answer.split("\n")[0]
if "positive" in sent:
sent = 1
elif "negative" in sent:
sent = -1
elif "neutral" in sent:
sent = 0
return sent, sentiment_prompt
def get_sentiment_more_pos(self, choice_a, choice_b, all_prompts, boost_examples, manifest, overwrite_manifest):
sentiment_prompt = all_prompts[1](boost_examples[4])
prompt = f"{sentiment_prompt}\n\nSentence 1: {{choice_a:}}\nSentence 2: {{choice_b:}}\nMore positive:"
raw_answer = get_response(
prompt.format(choice_a=choice_a, choice_b=choice_b),
manifest,
max_toks=20)
raw_answer = raw_answer.split("\n")[0].lower()
if choice_a.lower() in raw_answer and not choice_b.lower() in raw_answer:
return 1
elif choice_b.lower() in raw_answer and not choice_a.lower() in raw_answer:
return 2
else:
return 0
def combine_sentiments(self, example, choice_a, choice_b, all_prompts, boost_examples, manifest, boost_id, overwrite_manifest):
# construct questions
sentiment_a, sentiment_prompt = self.get_sentiment(choice_a, all_prompts, boost_examples, manifest, overwrite_manifest)
sentiment_b, sentiment_prompt = self.get_sentiment(choice_b, all_prompts, boost_examples, manifest, overwrite_manifest)
sentiment_ex, sentiment_prompt = self.get_sentiment(example, all_prompts, boost_examples, manifest, overwrite_manifest)
# reconcile answer
pred = ''
if abs(sentiment_a - sentiment_ex) < abs(sentiment_b - sentiment_ex):
pred = "1"
elif abs(sentiment_a - sentiment_ex) > abs(sentiment_b - sentiment_ex):
pred = "2"
return pred, sentiment_prompt
def get_what_next(self, example, choice_a, choice_b, all_prompts, boost_examples, manifest, overwrite_manifest):
what_next_prompt = all_prompts[0](boost_examples[3])
prompt = f"{what_next_prompt}\n\n----\n\nChoices:\n- {{choice_a:}}\n- {{choice_b:}}\n\nPassage: {{example:}} Then?"
raw_answer = get_response(
prompt.format(choice_a=choice_a, choice_b=choice_b, example=example),
manifest,
max_toks=50)
answer = raw_answer.split("\n")[0].lower()
choice_a = choice_a.lower()
choice_b = choice_b.lower()
pred = ''
for n in range(5,50):
for idx_offset in range(len(answer) - n + 1):
ngram = "".join(answer[idx_offset:idx_offset+n])
if ngram in choice_a and ngram not in choice_b:
pred = '1'
elif ngram not in choice_a and ngram in choice_b:
pred = '2'
return pred, what_next_prompt
def run_decomposed_prompt(
self, test_data, boost_data_train, boost_dfs, manifest, overwrite_manifest
):
expt_log, all_boost_preds, labels = self._run_decomp_single_data(test_data, boost_dfs, manifest, overwrite_manifest, run_limit=-1)
expt_log_train, all_boost_train_preds, train_labels = self._run_decomp_single_data(boost_data_train, boost_dfs, manifest, overwrite_manifest, run_limit=1000)
# Do WS
preds = self.merge_boosted_preds(all_boost_preds, all_boost_train_preds, train_labels, expt_log, expt_log_train)
# Get accuracies across all boost sets
individual_accuracies = []
for i in range(len(all_boost_preds[0])):
individual_accuracies.append(classification_report(labels, [p[i] for p in all_boost_preds], output_dict=True)["accuracy"])
report = classification_report(labels, preds, output_dict=True)
return expt_log, expt_log_train, report["accuracy"], individual_accuracies
def _run_decomp_single_data(self, test_data, boost_dfs, manifest, overwrite_manifest, run_limit=-1):
expt_log = {}
all_boost_preds = []
labels = []
no_preds = 0
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
example = f"{row['input_sentence_1']} {row['input_sentence_2']} {row['input_sentence_3']} {row['input_sentence_4']}"
choice_a = row['sentence_quiz1']
choice_b = row['sentence_quiz2']
gold = str(row['answer_right_ending'])
if i == run_limit:
break
prompts_across_boost = []
preds_across_boost = []
for boost_num, boost_examples in enumerate(boost_dfs):
all_prompts = []
if boost_num < 3:
pred, questioner_prompt, answerer_prompt = self.get_one_by_one(
example, choice_a, choice_b, [questioner, openended_qa], boost_examples, manifest, overwrite_manifest
)
if i == 0:
print(questioner_prompt)
print("\n\n")
print(answerer_prompt)
print("\n\n")
all_prompts.append(questioner_prompt)
all_prompts.append(answerer_prompt)
if not pred:
pred, sentiment_prompt = self.combine_sentiments(
example, choice_a, choice_b, [sentiment, sentiment_more_positive], boost_examples, manifest, boost_num, overwrite_manifest
)
all_prompts.append(sentiment_prompt)
if not pred:
pred, what_next_prompt = self.get_what_next(
example, choice_a, choice_b, [what_next], boost_examples, manifest, overwrite_manifest
)
pred2, what_next_prompt = self.get_what_next(
example, choice_b, choice_a, [what_next], boost_examples, manifest, overwrite_manifest
)
if pred != pred2:
pred = ""
all_prompts.append(what_next_prompt)
if not pred:
pred = ''
no_preds += 1
else:
icl_str = ""
for s_ind, s_row in boost_examples[0].iterrows():
s_text = f"{s_row['input_sentence_1']} {s_row['input_sentence_2']} {s_row['input_sentence_3']} {s_row['input_sentence_4']}"
if s_row['answer_right_ending'] == 1:
answer = s_row['sentence_quiz1']
elif s_row['answer_right_ending'] == 2:
answer = s_row['sentence_quiz2']
icl_str += f"Context: {s_text} {answer}\n\n"
text = f"{row['input_sentence_1']} {row['input_sentence_2']} {row['input_sentence_3']} {row['input_sentence_4']}"
options = [row['sentence_quiz1'], row['sentence_quiz2']]
if row['answer_right_ending'] == 1:
gold = row['sentence_quiz1']
elif row['answer_right_ending'] == 2:
gold = row['sentence_quiz2']
prompt = f"Complete the paragraph.\n\n\n{icl_str}Context: {text}"
if i == 0:
print(prompt.format(text=text))
all_prompts.append(prompt)
raw_answer, _ = get_response(
prompt.format(text=text),
manifest,
gold_choices=[options[0].replace("- ", "").strip(), options[1].replace("- ", "").strip()],
overwrite=bool(overwrite_manifest),
max_toks=max(len(opt) for opt in options)*4,
)
answer = raw_answer
is_A = answer.lower() in row['sentence_quiz1'].lower() or row['sentence_quiz1'].lower() in answer.lower() or "A" in answer.split()
is_B = answer.lower() in row['sentence_quiz2'].lower() or row['sentence_quiz2'].lower() in answer.lower() or "B" in answer.split()
pred = ''
if is_A and (not is_B):
pred = '1'
if is_B and (not is_A):
pred = '2'
prompts_across_boost.append(all_prompts)
preds_across_boost.append(pred)
entry = {
"ind": ind,
"prompts": prompts_across_boost,
"preds_boost": preds_across_boost,
"example": example,
"choice_a": choice_a,
"choice_b": choice_b,
"gold": str(row['answer_right_ending']),
}
expt_log[ind] = entry
all_boost_preds.append(preds_across_boost)
labels.append(str(row['answer_right_ending']))
return expt_log, all_boost_preds, labels
def main():
args = get_args()
args.num_boost = 6
task_name = "story_cloze"
data_dir = f"{DATA_DIR}/story_cloze"
decomp = StoryCloze(task_name, data_dir, val_split="test")
decomp.run(args)
if __name__ == "__main__":
main()
| ama_prompting-main | tasks/StoryCloze_final.py |
#!/usr/bin/env python
# coding: utf-8
from tqdm.auto import tqdm
from pathlib import Path
from collections import Counter
import re
import pandas as pd
import json
import unicodedata
import string
from decomposition import Decomposition, get_args, DATA_DIR
from utils import get_response, InputOutputPrompt
cloze_completion = InputOutputPrompt(
input_formatter=lambda x: f"Context: {x['context']}",
output_formatter=lambda x: f"{x['answer']}",
input_output_sep=" ",
example_sep="\n\n----\n\n",
required_keys=["context"],
instruction="Complete the paragraph.\n\n"
)
cloze_completion_examples = [
pd.DataFrame([
{
"context": "Barack Hussein Obama is an American politician who served as the 44th president of the United States from 2009 to 2017. A member of the Democratic Party, he was the first African-American president of the United States. Obama previously served as a U.S. senator from Illinois from 2005 to 2008 and as an Illinois state senator from 1997 to 2004.",
"answer": "Obama was senator of the state of Illinois prior to becoming a US president."
},
{
"context": "(CNN) -- Saif al-Islam Gadhafi, 38, has never lived a day in which his father Moammar didn't rule Libya -- as its undisputed leader inside the country and an enigmatic, controversial voice for the world. And yet, as the Libyan government faced a stiff popular uprising, it was Moammar Gadhafi's second eldest son -- and not the Leader of the Revolution himself -- who was first to talk to the nation about the unrest and detail a plan to address it. The speech, made early Monday on Libyan state television, does not mean that Saif Gadhafi has usurped power from his father: Senior U.S. officials said there's no indication the elder Gadhafi is losing his grip.Saif al-Islam Gadhafi, 38, gives Libya's first public speech acknowledging unrest. There's been no public indication why he, and not his father Moammar, talked.",
"answer": "Even while some may see the son as more open to change, there's little question that his loyalty remains first with Moammar and that his father has given little indication publicly that he's ready to let go and calls the shots."
},
{
"context": "The Beatles were an English rock band, formed in Liverpool in 1960, that comprised John Lennon, Paul McCartney, George Harrison and Ringo Starr. They are regarded as the most influential band of all time and were integral to the development of 1960s counterculture and popular music's recognition as an art form. They were led by primary songwriters Lennon and McCartney.",
"answer": "It is without a doubt that the Beatles were influential in rock and roll."
}
]),
pd.DataFrame([
{
"context": "One of the Internet's great promises is that it's the ultimate democratizer. It's open to everyone and allows all people to communicate. Facebook and Google have added new translation tools, but they take different approaches.",
"answer": "Pros and cons: Google's computerized approach means it can translate tons of content -- and fast."
},
{
"context": "Los Angeles, often referred to by its initials L.A., is the largest city in the U.S. state of California. With a population of roughly 3.9 million as of 2020, it is the second largest city in the United States after New York City and one of the world's most populous megacities. ",
"answer": "Los Angeles is known for its Mediterranean climate, ethnic and cultural diversity, Hollywood film industry, and sprawling metropolitan area."
},
{
"context": "The United States is cutting funding to the U.N. education and science agency UNESCO after the agency voted to accept a Palestinian bid for full membership, the U.S. State Department said Monday. \"Today's vote by the member states of UNESCO to admit Palestine as member is regrettable, premature and undermines our shared goal of a comprehensive just and lasting peace in the Middle East,\" said State Department spokeswoman Victoria Nuland.",
"answer": "Israel believes that the correct and only way to make progress in the diplomatic process with the Palestinian is through direct negotiations without preconditions."
},
]),
pd.DataFrame([
{
"context": "(CNN) -- Martin Luther King Jr. fought and died so blacks would no longer be viewed as inferior but rather enjoy the same inherent rights given to whites in America. Yet in 2014, 50 years since the passage of the Civil Rights Act, the West View News thinks it's appropriate to publish a story about our first black president, Barack Obama, with the headline, \"The Nigger in the White House.\" Oh, the times we are living in. ",
"answer": "The entire incident shows how far America has to yet to go in race relations."
},
{
"context": "Martin Skrtel has warned Liverpool’s top-four rivals they should be ready to fight as they look to take momentum from his ‘most important’ goal. The Slovakian defender had eight staples put into a head wound following a clash with Olivier Giroud but carried on with a bandage to score the equaliser deep into injury time in Sunday’s dramatic 2-2 draw with Arsenal at Anfield. Liverpool have a chance to push on over Christmas with a fixture away to Burnley followed by home games against Swansea and Leicester.",
"answer": "The Liverpool defender celebrates his last-minute goal as the crowd go wild at Anfield"
},
{
"context": "Tripoli, Libya (CNN) -- It has been almost two weeks since Eman al-Obeidy burst into our hotel in Tripoli, desperate for the world to hear her story of rape and torture. We were finally able to speak to her Wednesday, against the explicit wishes of the Libyan government. The interview with al-Obeidy was facilitated by Gadhafi's son Saadi. We asked al-Obeidy if she would be willing to come to Saadi Gadhafi's office. She agreed and Gadhafi sent a car to pick her up. She says she wants to clear her name, smeared on state TV. Story of rape and torture became known after she burst into a Tripoli hotel",
"answer": "Later Saadi Gadhafi told me: \"The people responsible for raping her should face charges.\""
},
])
]
# TODO: how to fit in more ICL-demos
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return normalize_answer(prediction) == normalize_answer(ground_truth)
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(labels, predictions):
f1 = exact_match = total = 0
for label_set, pred in zip(labels, predictions):
total += 1
exact_match += metric_max_over_ground_truths(
exact_match_score, pred, label_set)
f1 += metric_max_over_ground_truths(f1_score, pred, label_set)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {'exact_match': exact_match, 'f1': f1}
class ReCoRDDecomp(Decomposition):
def __init__(self, task_name, data_dir, val_split="validation"):
super().__init__(task_name, data_dir, val_split)
# Download from https://sheng-z.github.io/ReCoRD-explorer/
if not Path(f"{DATA_DIR}/record").exists():
raise ValueError(f"{DATA_DIR}/record doesn't exist. Please download.")
data = json.load(open(f"{DATA_DIR}/record/record_dev.json"))["data"]
self.test_query2labels = {}
t = 0
for ex in data:
passage = ex["passage"]["text"]
passage = unicodedata.normalize("NFKD", passage)
for q in ex["qas"]:
t += 1
answers = list(set([e["text"] for e in q["answers"]]))
query = unicodedata.normalize("NFKD", q["query"].strip())
key = (passage, query)
if key in self.test_query2labels:
assert set(self.test_query2labels[key]) == set(answers)
self.test_query2labels[key] = answers
data = json.load(open(f"{DATA_DIR}/record/record_train.json"))["data"]
self.train_query2labels = {}
t2 = 0
for ex in data:
passage = ex["passage"]["text"]
passage = unicodedata.normalize("NFKD", passage)
for q in ex["qas"]:
t2 += 1
answers = list(set([e["text"] for e in q["answers"]]))
query = unicodedata.normalize("NFKD", q["query"].strip())
key = (passage, query)
if key in self.train_query2labels:
assert set(self.train_query2labels[key]) == set(answers)
self.train_query2labels[key] = answers
print(f"Loaded {t} test examples and {t2} train examples")
def zero_few_baseline(
self,
test_data,
few_shot_df,
manifest,
overwrite_manifest,
do_few_shot=True,
):
expt_log = {}
preds = []
labels = []
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
icl_str = ""
if do_few_shot:
for s_ind, s_row in few_shot_df.iterrows():
s_text = " ".join(s_row['inputs_pretokenized'].split("\n\n")[1:]).strip()
s_query = s_text.split("\n")[-1]
s_text = s_text.replace(s_query, "").strip().replace("@highlight", "").replace("\n\n", ". ")
s_answer_choices = s_row['answer_choices']
s_clean_choices = []
for choice in s_answer_choices:
other_choices = [c for c in s_answer_choices if c != choice]
if not any(c for c in other_choices if choice.lower() in c.lower()):
s_clean_choices.append(s_query.replace("@placeholder", choice))
s_answer = s_row['targets_pretokenized']
s_choices = "\n- ".join(list(s_clean_choices))
s_answer = s_query.replace("@placeholder", f'{s_answer}')
if s_ind + 1 == len(few_shot_df):
icl_str += f"Context: {s_text}\n\nAnswer: {s_answer}"
else:
icl_str += f"Context: {s_text}\n\nAnswer: {s_answer}\n\n----\n\n"
text = " ".join(row['inputs_pretokenized'].split("\n\n")[1:]).strip()
query = text.split("\n")[-1]
passage = text.rsplit("\n", 1)[0]
key = (passage, query.strip())
if key in self.test_query2labels:
golds = self.test_query2labels[key]
else:
golds = [row['targets_pretokenized']]
text = text.replace(query, "").strip().replace("@highlight", "").replace("\n\n", ". ")
# gold = row['targets_pretokenized']
answer_choices = row['answer_choices']
answer, prompt = self.get_final_answer_full_sentence(answer_choices, None, None, text, query, manifest, icl_str=icl_str)
pred = answer
entry = {
"ind": ind,
"example": text,
"base_prompt": prompt,
"raw_answer": answer,
"pred": pred,
"gold": golds,
}
expt_log[ind] = entry
preds.append(pred)
labels.append(golds)
metrics = evaluate(labels, preds)
print(metrics)
return expt_log, metrics["exact_match"]
def get_boost_decomp_examples(self, train_data, boost_id):
return [
cloze_completion_examples[boost_id]
]
def get_final_answer_full_sentence(self, answer_choices, prompt, boost_ex, text, query, manifest, icl_str='', size=2):
if boost_ex is None:
answer_prefix = "\n\nAnswer: "
prompt_suffix = icl_str
else:
answer_prefix = " "
prompt_suffix = prompt(boost_ex)
left, right = query.split("@placeholder")
clean_choices = []
for choice in answer_choices:
clean_choices.append(f"{choice}{right}")
# other_choices = [c for c in answer_choices if c != choice]
# if not any(c for c in other_choices if choice.lower() in c.lower()):
# clean_choices.append(query.replace("@placeholder", choice))
prompt = f"{prompt_suffix}\n\n----\n\nContext: {{text:}}{answer_prefix}{{left:}}"
pmp = prompt.format(text=text, left=left)
answers = []
for choice_group in range(0, len(clean_choices), size):
try:
raw_answer, score = get_response(
pmp,
manifest,
max_toks=20,
gold_choices=list(clean_choices[choice_group:choice_group+size])
)
raw_answer = raw_answer.replace(right, "").strip()
except Exception as e:
print(e)
raw_answer = ""
score = -1000
answers.append((raw_answer, score))
answers = sorted(answers, key=lambda x: x[1], reverse=True)
final_answer = answers[0][0].strip()
return final_answer, pmp
def run_decomposed_prompt(
self, test_data, boost_data_train, boost_dfs, manifest, overwrite_manifest
):
expt_log, all_boost_preds, labels = self._run_decomp_single_data(test_data, boost_dfs, manifest, overwrite_manifest, run_limit=-1)
expt_log_train, all_boost_train_preds, train_labels = self._run_decomp_single_data(boost_data_train, boost_dfs, manifest, overwrite_manifest, run_limit=1000, is_train=True)
# Do WS
preds = self.merge_boosted_preds(all_boost_preds, all_boost_train_preds, train_labels, expt_log, expt_log_train)
# Get accuracies across all boost sets
individual_accuracies = []
for i in range(len(all_boost_preds[0])):
individual_accuracies.append(evaluate(labels, [p[i] for p in all_boost_preds])["exact_match"])
report = evaluate(labels, preds)
return expt_log, expt_log_train, report["exact_match"], individual_accuracies
def _run_decomp_single_data(self, test_data, boost_dfs, manifest, overwrite_manifest, run_limit=-1, is_train=False):
expt_log = {}
all_boost_preds = []
all_boost_answers = []
labels = []
label_data = self.test_query2labels if not is_train else self.train_query2labels
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
text = " ".join(row['inputs_pretokenized'].split("\n\n")[1:]).strip()
query = text.split("\n")[-1]
passage = text.rsplit("\n", 1)[0]
key = (passage, query.strip())
if key in label_data:
golds = label_data[key]
else:
golds = [row['targets_pretokenized']]
answer_choices = row['answer_choices']
if i == run_limit:
break
prompts_across_boost = []
preds_across_boost = []
answers_across_boost= []
for boost_num, boost_examples in enumerate(boost_dfs):
all_prompts = []
if "@highlight" not in boost_examples[0]:
text = text.replace(query, "").strip().replace("@highlight", "").replace("\n\n", ". ")
final_answer, prompt = self.get_final_answer_full_sentence(answer_choices, cloze_completion, boost_examples[0], text, query, manifest)
if i == 0:
print(prompt)
all_prompts.append(prompt)
pred = final_answer
answers_across_boost.append(final_answer)
prompts_across_boost.append(all_prompts)
preds_across_boost.append(pred)
all_boost_preds.append(preds_across_boost)
all_boost_answers.append(answers_across_boost)
entry = {
"ind": ind,
"example": text,
"prompts": prompts_across_boost,
"preds_boost": preds_across_boost,
"gold": golds,
}
expt_log[ind] = entry
labels.append(golds)
return expt_log, all_boost_preds, labels
def main():
args = get_args()
task_name = "super_glue_record"
data_dir = f"{DATA_DIR}/P3/data_feather/super_glue_record_exercise/"
decomp = ReCoRDDecomp(task_name, data_dir)
decomp.run(args)
if __name__ == "__main__":
main()
| ama_prompting-main | tasks/ReCoRD_final.py |
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import numpy as np
import ast
from tqdm.auto import tqdm
from decomposition import Decomposition, get_args
from utils import get_response, InputOutputPrompt, accuracy_span_overlap, load_hf_data
extract = InputOutputPrompt(
input_formatter=lambda x: f"Question: {x['question']}",
output_formatter=lambda x: f"Answer: {x['answer']}",
required_keys=["question", "answer"],
input_output_sep="\n",
example_sep="\n\n",
instruction="Produce distinct questions.\n\n"
)
more_info_examples = [
pd.DataFrame([
{
"question": "who plays Carrie Bradshaw in sex and the city?",
"answer": "Caroline \"Carrie\" Bradshaw is a fictional character from the HBO franchise Sex and the City, portrayed by Sarah Jessica Parker."
},
{
"question": "what are the elements in air?",
"answer": "By mole fraction (i.e., by number of molecules), dry air contains 78.08% nitrogen, 20.95% oxygen, 0.93% argon, 0.04% carbon dioxide, and small amounts of other gases"
},
{
"question": "what is HP company?",
"answer": "HP Inc. is an American multinational information technology company headquartered in Palo Alto, California, that develops personal computers (PCs)"
},
{
"question": "when was the last season of FRIENDS released?",
"answer": "The series finale aired on May 6, 2004, and was watched by around 52.5 million American viewers, making it the fifth-most-watched series finale in television history"
}
]),
]
answer = InputOutputPrompt(
input_formatter=lambda x: f"Context: {x['context']}\nQuestion: {x['question']}",
output_formatter=lambda x: f"Answer: {x['answer']}",
required_keys=["context", "question", "answer"],
input_output_sep="\n",
example_sep="\n\n",
instruction="Answer the question.\n\n"
)
answer_question = [
pd.DataFrame([
{
'context': 'The nearest airport to Palm Springs is Indio/Palm Springs (PSP) Airport which is 2.1 miles away. ',
'question': 'what airport is closest to palm springs?',
'answer': 'Palm Springs International Airport'
},
{
'context': 'Martin Luther King earned his Bachelor of Divinity degree from Crozer Theological Seminary, followed by a doctorate in Systematic Theology from Boston University.',
'question': 'what degree did martin luther king get?',
'answer': 'Bachelor of Divinity'
},
{
'context': 'The Niger river runs in a crescent through Libya, Mali, Niger, on the border with Benin and then through Nigeria.',
'question': 'what countries does the niger river flow through?',
'answer': 'Libya'
},
{
'context': 'Puerto Rico is a territory of the United States and uses the U.S. dollar. ',
'question': 'what type of currency is used in puerto rico?',
'answer': 'United States dollar'
},
{
'context': 'kitt was voice most often by William daniels.',
'question': 'who played kitt in knight rider?',
'answer': 'William Daniels'
}
]),
pd.DataFrame([
{
'context': 'leonardo da vinci invented the parachute, the helicopter, double hull, an armored fighting vehicle,',
'question': 'what inventions did leonardo da vinci made?',
'answer': 'Double hull'
},
{
'context': "The French franc (F) was the national currency of France prior to France's adoption of the euro (EUR) in January 2002.",
'question': 'what currency is used in france before euro?',
'answer': 'French franc'
},
{
'context': 'The Isthmus of Panama, contains the country of Panama and the panama canal.',
'question': 'where is isthmus of panama located?',
'answer': 'Costa Rica'
},
{
'context': 'Hurricane Irene was a large and destructive tropical cyclone which affected much of the Caribbean and East Coast',
'question': 'where did hurricane irene?',
'answer': 'Eastern United States'
},
{
'context': 'Rihanna acted in This is the End and Battleship.',
'question': 'what movie did rihanna play in?',
'answer': 'This Is the End'
}
]),
pd.DataFrame([
{
'context': 'st vincent de paul is buried in the 6th arrondisment of Paris.',
'question': 'where is st vincent de paul buried?',
'answer': 'Paris'
},
{
'context': 'Thomas Luther "Luke" Bryan (born July 17, 1976) is an American country singer and songwriter from Leesburg.',
'question': 'where is luke bryan from?',
'answer': 'Leesburg'
},
{
'context': "Klum and Seal got married on 10 May 2005 on a beach in Mexico near Seal's home on Costa Careyes. ",
'question': 'where did heidi klum and seal get married?',
'answer': 'Mexico'},
{
'context': 'Tarantino starred in pulp fiction, grindhouse and others.',
'question': 'what movies did quentin tarantino star in?',
'answer': 'Grindhouse'
},
{
'context': 'Countries that are sometimes considered to be entirely or partially part of the Balkans are Croatia, Serbia, Lake Prespa.',
'question': 'what country is located in the balkan peninsula?',
'answer': 'Lake Prespa'
}
])
]
prefix_select_zeroshot = """Answer the question.\n\n"""
class WebQDecomp(Decomposition):
def __init__(self, task_name, data_dir, val_split="validation"):
super().__init__(task_name, data_dir, val_split)
def read_data(self, save_dir, overwrite_data):
return load_hf_data(save_dir, self.task_name, self.val_split, "web_questions", overwrite_data)
def get_boost_decomp_examples(self, train_data, boost_id):
return [
more_info_examples[0],
answer_question[boost_id],
]
def zero_few_baseline(
self,
test_data,
few_shot_df,
manifest,
overwrite_manifest,
prompt_suffix="",
do_few_shot=True,
):
expt_log = {}
preds = []
labels = []
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
if ind in expt_log:
entry = expt_log[ind]
pred = entry["pred"]
gold = entry["gold"]
else:
question = row.question
if isinstance(row.answers, str):
label = ast.literal_eval(row.answers)
else:
label = row.answers.tolist()
gold = label
icl_str = ""
if do_few_shot:
for s_ind, s_row in few_shot_df.iterrows():
s_question = s_row.question
if isinstance(s_row.answers, str):
label = ast.literal_eval(s_row.answers)
else:
label = s_row.answers.tolist()
icl_str += f"Question: {s_question}\nAnswer: {label[0]}\n\n"
prompt = (
icl_str
+ "Question: {question:}"
+ prompt_suffix
+ "\nAnswer:"
)
if i == 0:
print(prompt.format(question=question))
prompt = prompt.format(question=question)
raw_answer = get_response(
prompt, #prompt.format(question=question),
manifest,
overwrite=bool(overwrite_manifest),
max_toks=20,
stop_token="\n\n",
)
pred = raw_answer.strip("\n").strip().lower()
entry = {
"ind": ind,
"example": question,
"base_prompt": prompt,
"raw_answer": raw_answer,
"pred": pred,
"gold": gold,
}
expt_log[ind] = entry
preds.append([pred])
labels.append(gold)
metric = accuracy_span_overlap(preds=preds, golds=labels)
return expt_log, metric
def run_decomposed_prompt(
self, test_data, boost_data_train, boost_dfs, manifest, overwrite_manifest
):
expt_log, all_boost_preds, labels = self._run_decomp_single_data(test_data, boost_dfs, manifest, overwrite_manifest)
expt_log_train, all_boost_train_preds, train_labels = self._run_decomp_single_data(boost_data_train, boost_dfs, manifest, overwrite_manifest, run_limit=1000)
# Do WS
boost_test, boost_train = [], []
for p in all_boost_preds:
samples = [lf[1] for lf in p]
boost_test.append(samples)
for p in all_boost_train_preds:
samples = [lf[1] for lf in p]
boost_train.append(samples)
preds = self.merge_boosted_preds(boost_test, boost_train, train_labels, expt_log, expt_log_train)
preds = [(x,y) for x,y in zip([p[0][0] for p in all_boost_preds], preds)]
# Get accuracies across all boost sets
individual_accuracies = []
for i in range(len(all_boost_preds[0])):
individual_accuracies.append(accuracy_span_overlap(preds=[p[i] for p in all_boost_preds], golds=labels))
metric = accuracy_span_overlap(preds=preds, golds=labels)
return expt_log, expt_log_train, metric, individual_accuracies
def _run_decomp_single_data(self, test_data, boost_dfs, manifest, overwrite_manifest, run_limit=-1):
expt_log = {}
all_boost_preds = []
labels = []
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
if i == run_limit:
break
question = row.question
if isinstance(row.answers, str):
label = ast.literal_eval(row.answers)
else:
label = row.answers.tolist()
gold = label
prompts_across_boost = []
preds_across_boost = []
# extract context
prompt_suffix = extract(boost_dfs[0][0])
prompt = (
prompt_suffix + "\n\Question: {question:}\nAnswer:"
)
more_info_answer = get_response(
prompt.format(question=question),
manifest,
overwrite=bool(overwrite_manifest),
max_toks=20,
stop_token="\n\n",
)
for boost_examples in boost_dfs:
all_prompts = []
prompt_suffix = answer(boost_examples[1])
prompt = (
prompt_suffix + "\n\nContext: {text:}\nQuestion: {question:}\nAnswer:"
)
if i == 0:
print(prompt.format(text=more_info_answer, question=question))
all_prompts.append(prompt.format(text=more_info_answer, question=question))
raw_answer = get_response(
prompt.format(text=more_info_answer, question=question),
manifest,
overwrite=bool(overwrite_manifest),
max_toks=20,
stop_token="\n\n",
)
pred = raw_answer.split("\n")[0].strip().lower()
prompts_across_boost.append(all_prompts)
preds_across_boost.append((more_info_answer, pred))
entry = {
"ind": ind,
"example": question,
"prompts": prompts_across_boost,
"preds_boost": preds_across_boost,
"gold": gold,
}
expt_log[ind] = entry
all_boost_preds.append(preds_across_boost)
labels.append(gold)
return expt_log, all_boost_preds, labels
def main():
args = get_args()
task_name = "webq"
data_dir = "webq"
webq = WebQDecomp(task_name, data_dir, val_split="test")
webq.run(args)
if __name__ == "__main__":
main()
| ama_prompting-main | tasks/webq_final.py |
#!/usr/bin/env python
# coding: utf-8
from pathlib import Path
from tqdm.auto import tqdm
import pandas as pd
from nltk.corpus import stopwords
from datasets import load_dataset
stops = set(stopwords.words("english"))
from sklearn.metrics import classification_report
from utils import get_response, InputOutputPrompt, load_hf_data
from decomposition import Decomposition, get_args
extract_relevant_phrase = InputOutputPrompt(
input_formatter=lambda x: f"Passage: {x['passage']}",
output_formatter=lambda x: f"Extract: {x['extract']}",
required_keys=["passage", "extract"],
input_output_sep="\n",
example_sep="\n\n",
instruction="Extract the phrase containing the pronoun.\n\n"
)
extract_relevant_phrase_examples = [
pd.DataFrame([
{
"passage": "Jane's mom went to the shop to buy Jane a backpack for \"her\" first day of kindergarten.",
"extract": "phrase containing \"her\": \"her\" first day"
},
{
"passage": "The musicians performed in the park and the crowd loved \"them\". The crowd cheered for them.",
"extract": "phrase containing \"them\": crowd loved \"them\""
},
{
"passage": "Jeff gave his son some money because \"he\" wanted to buy lunch.",
"extract": "phrase containing \"he\": \"he\" wanted to buy"
}
]),
pd.DataFrame([
{
"passage": "The dog chased the cat. The cat ran up a tree and \"it\" waited at the top.",
"extract": "phrase containing \"it\": \"it\" waited at the top"
},
{
"passage": "The musicians performed in the park and the crowd loved \"them\". The crowd cheered for them.",
"extract": "phrase containing \"them\": crowd loved \"them\""
},
{
"passage": "John couldn't see the stage with Billy in front of him because \"he\" is so short.",
"extract": "phrase containing \"he\": \"he\" is so short"
}
]),
pd.DataFrame([
{
"passage": "The candle gave some light during the blackout, but after a while \"it\" also burned out.",
"extract": "phrase containing \"it\": \"it\" also burned out"
},
{
"passage": "Mark stocked the pantry with \"his\" son Jack's favorite cookies.",
"extract": "phrase containing \"his\": \"his\" son Jack's"
},
{
"passage": "Mary invited Jenna to \"her\" birthday party, but didn't invite Anna.",
"extract": "phrase containing \"her\": \"her\" birthday party"
}
]),
]
convert_reason_to_q = InputOutputPrompt(
input_formatter=lambda x: f"Input: {x['input']}",
output_formatter=lambda x: f"Question: {x['question']}",
required_keys=["input", "question"],
input_output_sep="\n",
example_sep="\n\n",
instruction="Rewrite the input as a question.\n\n"
)
convert_reason_to_q_examples = [
pd.DataFrame([
{
"input": "it was made of glass",
"question": "What was made of glass?"
},
{
"input": "they are funny",
"question": "Who or what are funny?"
},
{
"input": "he drowned",
"question": "Who drowned?"
},
{
"input": "wrap around them",
"question": "Wrap around who or what?"
},
{
"input": "his cat is black",
"question": "Whose cat is black?"
},
{
"input": "laugh at them",
"question": "Laugh at who?"
},
{
"input": "her friend jennfier",
"question": "Whose friend Jennifer?"
}
]),
pd.DataFrame([
{
"input": "it was made of glass",
"question": "What was made of glass?"
},
{
"input": "they are funny",
"question": "Who or what are funny?"
},
{
"input": "he drowned",
"question": "Who drowned?"
},
{
"input": "wrap around them",
"question": "Wrap around who or what?"
},
{
"input": "his cat is black",
"question": "Whose cat is black?"
},
{
"input": "laugh at them",
"question": "Laugh at who?"
},
{
"input": "her friend jennfier",
"question": "Whose friend Jennifer?"
}
]),
pd.DataFrame([
{
"input": "it was made of glass",
"question": "What was made of glass?"
},
{
"input": "they are funny",
"question": "Who or what are funny?"
},
{
"input": "he drowned",
"question": "Who drowned?"
},
{
"input": "wrap around them",
"question": "Wrap around who or what?"
},
{
"input": "his cat is black",
"question": "Whose cat is black?"
},
{
"input": "laugh at them",
"question": "Laugh at who?"
},
{
"input": "her friend jennfier",
"question": "Whose friend Jennifer?"
}
]),
]
answer_q_in_passage = InputOutputPrompt(
input_formatter=lambda x: f"Passage: {x['passage']}\nQuestion: {x['question']}",
output_formatter=lambda x: f"Answer: {x['answer']}",
required_keys=["passage", "question", "answer"],
input_output_sep="\n",
example_sep="\n\n",
instruction="Answer the question.\n\n"
)
answer_q_in_passage_examples = [
pd.DataFrame([
{
"passage": "Jane's mom went to the shop to buy Jane a backpack for her first day of kindergarten.",
"question": "Whose first day?",
"answer": "Jane"
},
{
"passage": "Mrs. Jenna told Fred she loved him.",
"question": "Who loved him?",
"answer": "Mrs. Jenna"
},
{
"passage": "Joe gave Mark some money so he could buy lunch.",
"question": "Who could buy lunch?",
"answer": "Mark"
}
]),
pd.DataFrame([
{
"passage": "Joe gave Mark some money so he could buy lunch.",
"question": "Who could buy lunch?",
"answer": "Mark"
},
{
"passage": "Jane's mom went to the shop to buy Jane a backpack for her first day of kindergarten.",
"question": "Whose first day?",
"answer": "Jane"
},
{
"passage": "Mark stocked the pantry with his son Jack's favorite cookies.",
"question": "Whose son?",
"answer": "Mark"
},
]),
pd.DataFrame([
{
"passage": "The candle burned out after some time. It dripped a lot of wax.",
"question": "What dripped?",
"answer": "The candle"
},
{
"passage": "Mark stocked the pantry with his son Jack's favorite cookies.",
"question": "Whose son?",
"answer": "Mark"
},
{
"passage": "Mary invited Jenna to her birthday party.",
"question": "Whose birthday party?",
"answer": "Mary"
}
]),
]
class WSCDecomp(Decomposition):
def __init__(self, task_name, data_dir, val_split="validation"):
super().__init__(task_name, data_dir, val_split)
def get_boost_decomp_examples(self, train_data, boost_id):
return [
extract_relevant_phrase_examples[boost_id],
convert_reason_to_q_examples[boost_id],
answer_q_in_passage_examples[boost_id],
]
def read_data(self, save_dir, overwrite_data):
return load_hf_data(save_dir, self.task_name, self.val_split, "SetFit/wsc_fixed", overwrite_data)
def zero_few_baseline(
self,
test_data,
few_shot_df,
manifest,
overwrite_manifest,
do_few_shot=True,
):
expt_log = {}
preds = []
labels = []
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
text = row['text']
gold = row['label_text']
if gold == 'False':
gold = 'No'
elif gold == 'True':
gold = 'Yes'
pronoun = row['span2_text']
subject = row['span1_text']
text_toks = text.split(" ")
text_toks_prefix = text_toks[:row['span2_index']]
text_toks_suffix = text_toks[row['span2_index']+len(pronoun.split()):]
text_toks = text_toks_prefix + [f'"{pronoun}"'] + text_toks_suffix
passage = " ".join(text_toks).strip(".").strip() + "."
question = f"Question: In the passage above, does the pronoun \"{pronoun}\" refer to {subject}?"
icl_str = ""
if do_few_shot:
for s_ind, s_row in few_shot_df.iterrows():
s_text = s_row['text']
s_gold = s_row['label_text']
if s_gold == 'False':
s_gold = 'No'
elif s_gold == 'True':
s_gold = 'Yes'
s_pronoun = s_row['span2_text']
s_subject = s_row['span1_text']
s_text_toks = s_text.split(" ")
s_text_toks_prefix = s_text_toks[:s_row['span2_index']]
s_text_toks_suffix = s_text_toks[s_row['span2_index']+len(s_pronoun.split()):]
s_text_toks = s_text_toks_prefix + [f'"{s_pronoun}"'] + s_text_toks_suffix
s_passage = " ".join(s_text_toks).strip(".").strip() + "."
s_question = f"Passage: {s_passage}\nQuestion: In the passage above, does the pronoun \"{s_pronoun}\" refer to {s_subject}?"
icl_str += f"{s_question}\nAnswer: {s_gold}\n\n"
prompt = f"{icl_str}Passage: {{passage:}}\n{question}\nAnswer:"
pmp = prompt.format(passage=passage)
if i == 0:
print(pmp)
raw_answer = get_response(
pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=30,
)
answer = raw_answer.split("\n")
answer = [a for a in answer if a]
if len(answer) <= 0:
answer = ""
else:
answer = answer[0]
answer = " ".join(
[a.strip(",").strip(".").strip() for a in answer.split()]
)
is_yes = "yes" in answer.lower().split()
is_no = "no" in answer.lower().split()
if is_yes and not is_no:
pred = "Yes"
elif is_no and not is_yes:
pred = "No"
else:
pred = ""
entry = {
"ind": ind,
"example": text,
"base_prompt": pmp,
"raw_answer": raw_answer,
"pred": pred,
"gold": gold,
}
expt_log[ind] = entry
preds.append(pred)
labels.append(gold)
report = classification_report(labels, preds, output_dict=True)
return expt_log, report["accuracy"]
def question_answer(
self,
all_prompts,
boost_exs,
passage,
original_passage,
pronoun,
manifest,
overwrite_manifest,
):
prompt_suffix = all_prompts[0](boost_exs[0])
extract_prompt = (
f"{prompt_suffix}\n\nPassage: {{passage:}}\nExtract: phrase containing \"{{pronoun:}}\": "
)
extract_pmp = extract_prompt.format(passage=passage, pronoun=pronoun)
relevant_phrase = get_response(
extract_pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=50,
)
relevant_phrase = relevant_phrase.split("\n")[0]
relevant_phrase = relevant_phrase.replace('"', '')
prompt_suffix = all_prompts[1](boost_exs[1])
convert_prompt = f"{prompt_suffix}\n\nInput: {{relevant_phrase:}}\nQuestion:"
convert_pmp = convert_prompt.format(relevant_phrase=relevant_phrase)
converted = get_response(
convert_pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=50,
)
converted = converted.split("\n")[0].replace("Question: ", "")
prompt_suffix = all_prompts[2](boost_exs[2])
answer_prompt = f"{prompt_suffix}\n\nPassage: {{passage:}}\nQuestion: {{converted:}}\nAnswer:"
answer_pmp = answer_prompt.format(passage=original_passage, converted=converted)
answer = get_response(
answer_pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=50,
)
answer = answer.split("\n")[0].strip("'s").strip().replace("Answer: ", "").replace("A: ", "").strip()
return answer, extract_pmp, convert_pmp, answer_pmp
def run_decomposed_prompt(
self, test_data, boost_data_train, boost_dfs, manifest, overwrite_manifest
):
expt_log, all_boost_preds, labels = self._run_decomp_single_data(test_data, boost_dfs, manifest, overwrite_manifest)
expt_log_train, all_boost_train_preds, train_labels = self._run_decomp_single_data(boost_data_train, boost_dfs, manifest, overwrite_manifest, run_limit=1000)
# Do WS
preds = self.merge_boosted_preds(all_boost_preds, all_boost_train_preds, train_labels, expt_log, expt_log_train)
# Get accuracies across all boost sets
individual_accuracies = []
for i in range(len(all_boost_preds[0])):
individual_accuracies.append(classification_report(labels, [p[i] for p in all_boost_preds], output_dict=True)["accuracy"])
report = classification_report(labels, preds, output_dict=True)
return expt_log, expt_log_train, report["accuracy"], individual_accuracies
def _run_decomp_single_data(self, test_data, boost_dfs, manifest, overwrite_manifest, run_limit=-1):
expt_log = {}
all_boost_preds = []
labels = []
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
if i == run_limit:
break
prompts_across_boost = []
preds_across_boost = []
for boost_examples in boost_dfs:
all_prompts = []
text = row['text']
gold_answer = row['label_text']
pronoun = row['span2_text']
gold = row['span1_text']
text_toks = text.split(" ")
text_toks_prefix = text_toks[:row['span2_index']]
text_toks_suffix = text_toks[row['span2_index']+len(pronoun.split()):]
text_toks = text_toks_prefix + [f'\"{pronoun}\"'] + text_toks_suffix
passage = " ".join(text_toks).strip(".").strip() + "."
original_passage = text.strip(".").strip() + "."
# gold = question.split("refer to")[-1].replace("?", "").strip().lower()
gold_split = gold.split()
if gold_split[0] in stops:
gold = " ".join(gold_split[1:])
(
qa_answer,
extract_prompt,
convert_prompt,
answer_prompt,
) = self.question_answer(
[extract_relevant_phrase, convert_reason_to_q, answer_q_in_passage],
boost_examples,
passage,
original_passage,
pronoun,
manifest,
overwrite_manifest,
)
all_prompts.append(extract_prompt)
all_prompts.append(convert_prompt)
all_prompts.append(answer_prompt)
if i == 0:
print(extract_prompt)
print(convert_prompt)
print(answer_prompt)
answer_no_stop = " ".join(
[a for a in qa_answer.lower().split() if a not in stops]
).lower()
gold_no_stop = " ".join([a for a in gold.lower().split() if a not in stops]).lower()
answer_no_stop = answer_no_stop.strip("s")
gold_no_stop = gold_no_stop.strip("s")
if (
answer_no_stop.strip() == gold_no_stop.strip()
or gold_no_stop.strip() == answer_no_stop.strip()
):
pred = "True"
else:
pred = "False"
prompts_across_boost.append(all_prompts)
preds_across_boost.append(pred)
entry = {
"ind": ind,
"example": text,
"prompts": prompts_across_boost,
"preds_boost": preds_across_boost,
"gold": gold_answer,
}
expt_log[ind] = entry
all_boost_preds.append(preds_across_boost)
labels.append(gold_answer)
return expt_log, all_boost_preds, labels
def main():
args = get_args()
task_name = "super_glue_wsc"
data_dir = "SetFit/wsc_fixed"
decomp = WSCDecomp(task_name, data_dir)
decomp.run(args)
if __name__ == "__main__":
main()
| ama_prompting-main | tasks/WSC_final.py |
from pathlib import Path
from collections import Counter
import json
from datasets import load_dataset
import re
import pandas as pd
from typing import Callable, List
from manifest import Manifest
class InputOutputPrompt:
def __init__(self,
input_formatter: Callable,
output_formatter: Callable,
required_keys: List,
input_output_sep: str = "\n",
example_sep: str = "\n\n",
instruction: str = ""
):
self.input_formatter = input_formatter
self.output_formatter = output_formatter
self.required_keys = required_keys
self.input_output_sep = input_output_sep
self.example_sep = example_sep
self.instruction = instruction
def __call__(self, input_output_pairs: pd.DataFrame):
examples = []
for _, example in input_output_pairs.iterrows():
examples.append(f"{self.input_formatter(example)}{self.input_output_sep}{self.output_formatter(example)}")
if examples:
input_str = self.example_sep.join(examples)
res = f"{self.instruction}{input_str}"
else:
res = f"{self.instruction}".rstrip()
return res
def __repr__(self):
dummy_ex = pd.DataFrame([{k: f"<{k.upper()}>" for k in self.required_keys}])
st = self(dummy_ex)
return st
def prefix_formatter(ex_keys: List[str], prefix: str, error_on_empty: bool = True) -> str:
def full_prefix_formatter(ex: pd.Series):
for k in ex_keys:
if k in ex:
return f"{prefix} {getattr(ex, k)}"
if error_on_empty:
raise ValueError(f"Example {ex} has no value for any of the keys {ex_keys}")
else:
return f"{prefix}"
return full_prefix_formatter
def get_manifest_session(
client_name="huggingface",
client_engine=None,
client_connection="http://127.0.0.1:5000",
cache_connection=None,
temperature=0,
top_p=1.0,
):
if client_name == "huggingface" and temperature == 0:
params = {
"temperature": 0.001,
"do_sample": False,
"top_p": top_p,
}
elif client_name in {"openai", "ai21"}:
params = {
"temperature": temperature,
"top_p": top_p,
"engine": client_engine,
}
else:
raise ValueError(f"{client_name} is not a valid client name")
manifest = Manifest(
client_name=client_name,
client_connection=client_connection,
cache_name="sqlite",
cache_connection=cache_connection,
session_id=None,
**params,
)
params = manifest.client.get_model_params()
model_name = params["model_name"]
if "engine" in params:
model_name += f"_{params['engine']}"
return manifest, model_name
def get_response(
prompt,
manifest,
overwrite=False,
max_toks=10,
stop_token=None,
gold_choices=[],
verbose=False,
):
prompt = prompt.strip()
if gold_choices:
gold_choices = [" " + g.strip() for g in gold_choices]
response_obj = manifest.run(
prompt, gold_choices=gold_choices, overwrite_cache=overwrite, return_response=True
)
response_obj = response_obj.get_json_response()["choices"][0]
log_prob = response_obj["text_logprob"]
response = response_obj["text"]
else:
response = manifest.run(
prompt,
max_tokens=max_toks,
stop_token=stop_token,
overwrite_cache=overwrite,
)
log_prob = None
if verbose:
print("\n***Prompt***\n", prompt)
print("\n***Response***\n", response)
if log_prob:
return response, log_prob
return response
def load_hf_data(save_dir, task_name, val_split, hf_name, overwrite_data):
save_data = Path(f"{save_dir}/{task_name}/data.feather")
if not save_data.exists() or overwrite_data:
dataset = load_dataset(hf_name)
test_data = dataset[val_split].to_pandas()
test_data.to_feather(f"{save_data}")
else:
print(f"Reading test data from {save_data}")
test_data = pd.read_feather(f"{save_data}")
save_data_train = Path(f"{save_dir}/{task_name}/train_data.feather")
if not save_data_train.exists() or overwrite_data:
dataset = load_dataset(hf_name)
train_data = dataset["train"].to_pandas()
train_data.to_feather(f"{save_data_train}")
else:
print(f"Reading train data from {save_data_train}")
train_data = pd.read_feather(f"{save_data_train}")
print(f"Test Data Size: {len(test_data)}")
print(f"Train Data Size: {len(train_data)}")
return test_data, train_data
def save_log(task_name, expt_name, log, final_run_dir):
final_run_dir = Path(final_run_dir)
output_fpath = final_run_dir / task_name
output_fpath.mkdir(parents=True, exist_ok=True)
print("Saving to", output_fpath / f"{expt_name}.json")
assert all(a in list(log.values())[0].keys() for a in ["ind","example","pred","gold"])
with open(output_fpath / f"{expt_name}.json", "w") as f:
json.dump(log, f)
def text_f1(preds, golds):
"""Compute average F1 of text spans.
Taken from Squad without prob threshold for no answer.
"""
total_f1 = 0
for pred, gold in zip(preds, golds):
pred_toks = pred.split()
gold_toks = gold.split()
common = Counter(pred_toks) & Counter(gold_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
total_f1 += int(gold_toks == pred_toks)
elif num_same == 0:
total_f1 += 0
else:
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
total_f1 += f1
f1_avg = total_f1 / len(golds)
return f1_avg
def accuracy_span_overlap(preds, golds):
correct = 0
for pred, gold in zip(preds, golds):
found = False
for p in pred:
for g in gold:
if len(p) < len(g):
if p.lower() in g.lower():
found = True
break
else:
if g.lower() in p.lower():
found = True
break
if found: correct += 1
return correct / len(preds)
| ama_prompting-main | tasks/utils.py |
#!/usr/bin/env python
# coding: utf-8
from tqdm.auto import tqdm
import pandas as pd
from nltk.corpus import stopwords
stops = set(stopwords.words("english"))
from sklearn.metrics import classification_report
from decomposition import Decomposition, get_args, DATA_DIR
from utils import get_response, InputOutputPrompt
questioner = InputOutputPrompt(
input_formatter=lambda x: f"Statement: {x['statement']}",
output_formatter=lambda x: f"Question: {x['question']}",
input_output_sep="\n",
example_sep="\n\n",
required_keys=["question", "statement"],
instruction="Rewrite the statement as a question.\n\n"
)
questioner_examples = [
pd.DataFrame([
{
"statement": "most of the light comes from the sun",
"question": "Does most of the light come from the sun?"
},
{
"statement": "the test was not hard",
"question": "Was the test hard?"
},
{
"statement": "it was a good idea to buy your parents gifts",
"question": "Was it a good idea to buy your parents gifts?"
},
{
"statement": "The 20 cans will arrive in the grocery store tomorrow.",
"question": "Will the 20 cans arrive in the grocery store tomorrow?"
},
{
"statement": "the balloon popped",
"question": "Did the balloon pop?"
},
{
"statement": "The father and son went camping to California.",
"question": "Did the father and son go camping?"
}
]),
pd.DataFrame([
{
"statement": "most of the light comes from the sun",
"question": "Does most of the light come from the sun?"
},
{
"statement": "the test was not",
"question": "Was the test hard?"
},
{
"statement": "it was a good idea to buy your parents gifts",
"question": "Was it a good idea to buy your parents gifts?"
},
{
"statement": "The 20 cans will arrive in the grocery store tomorrow.",
"question": "Will the 20 cans arrive in the grocery store tomorrow?"
},
{
"statement": "the balloon popped",
"question": "Did the balloon pop?"
},
{
"statement": "The father and son went camping to California.",
"question": "Did the father and son go camping?"
}
]),
pd.DataFrame([
{
"statement": "tennis can be played on grass courts",
"question": "Can tennis be played on grass courts?",
},
{
"statement": "the artist painted a picture of the apple in a bowl.",
"question": "Did the artist paint a picture of an apple in a bowl?",
},
{
"statement": "mary is unhappy with tim.",
"question": "Is mary unhappy with Tim?",
},
{
"statement": "after school, Jim was going to go to the park",
"question": "Was Jim going to go to the park after school?",
},
]),
pd.DataFrame([
{
"statement": "she prefers kittens over puppies",
"question": "What does she prefer over puppies?\nAnswer: kittens",
},
{
"statement": "Max and his wife went on a trip to Europe",
"question": "Where did Max and his wife go on a trip?\nAnswer: Europe",
},
{
"statement": "jared was born during the war in 1942",
"question": "Jared was born during a war in which year?\nAnswer: 1942",
},
{
"statement": "it took jenna 7 attempts to solve the problem",
"question": "How many attempts did it take Jenna to solve the problem?\nAnswer: 7",
},
]),
]
openended_qa = InputOutputPrompt(
input_formatter=lambda x: f"Context: {x['passage']}\n\nQuestion: {x['question']}",
output_formatter=lambda x: f"Answer: {x['answer']}",
input_output_sep="\n\n",
example_sep="\n\n----\n\n",
required_keys=["question", "statement", 'answer'],
instruction="Answer the question. If there is no evidence in the context, return \"Unknown\".\n\n"
)
openended_qa_examples = [
pd.DataFrame([
{
"passage": "Jenna's 10th birthday was yesterday evening and at least 10 of her friends attended the party.",
"question": "Did 10 friends attend Jenna's party?",
"answer": "Unknown, at least 10"
},
{
"passage": "The bullies attacked John when he was walking through the elementary school parking lot and then got sent to the teacher's office.",
"question": "Did the bullies attack John in the teacher's office?",
"answer": "No, parking lot"
},
{
"passage": "WISS discovered a new monkey disease in a remote tribe in the Amazon rainforrest last week. It was highly contagious.",
"question": "Did WISS discover a new disease?",
"answer": "Yes, new monkey disease"
},
]),
pd.DataFrame([
{
"passage": "Jenna's birthday was yesterday evening and at least 10 of her friends attended the party.",
"question": "Did 10 friends attend Jenna's party?",
"answer": "unknown, at least 10"
},
{
"passage": "The bullies punched John when he was walking through the elementary school parking lot. They punched 3 times.",
"question": "Did the bullies punch John 4 time?",
"answer": "No, 3 times"
},
{
"passage": "WISS discovered a new monkey disease in a remote tribe in the Amazon rainforrest last week. It was highly contagious.",
"question": "Did WISS discover a new species of monkeys?",
"answer": "Unknown"
},
]),
pd.DataFrame([
{
"passage": "The doctor performed surgery at the hospital and then went to the school to pick up her son.",
"question": "Was the surgery successful?",
"answer": "Unknown"
},
{
"passage": "As soon as the book was released, it became a New York Times fiction bestseller.",
"question": "Is the book non-fiction?",
"answer": "No, Fiction bestseller"
},
{
"passage": "During the presidential election polls last week, Jeff had 15% more votes than John",
"question": "Were Jack and John running for president?",
"answer": "Yes, presidential election"
},
]),
pd.DataFrame([
{
"passage": "According to Biraben, the plague was present somewhere in Italy in every year between 1346 and 1671",
"question": "Where was the plague present?",
"answer": "somewhere in Italy"
},
{
"passage": "Jenna's birthday was yesterday evening and at least 10 of her friends attended the party.",
"question": "How many of Jenna's friends attended?",
"answer": "at least 10"
},
{
"passage": "Mitsubishi Motor Corp's vehicle sales fell by 42 percent in June",
"question": "When did Mitsubishi's sales fall?",
"answer": "June"
},
{
"passage": "The bullies attacked in the elementary school parking lot and then got sent to the teacher's office.",
"question": "Who or what did the bullies punch?",
"answer": "Unknown"
},
]),
]
cloze_convertor = InputOutputPrompt(
input_formatter=lambda x: f"Example: {x['passage']}",
output_formatter=lambda x: f"Output: {x['question']}",
input_output_sep="\n",
example_sep="\n\n",
required_keys=["question", "passage"],
instruction=""
)
cloze_examples = [
pd.DataFrame([
{
"passage": "Barrack Obama believes the best novel is Harry Potter.",
"question": "Barrack Obama believes the best novel is Harry",
},
{
"passage": "The girl invited 12 friends to her birthday party last week.",
"question": "The girl invited 12 friends to her birthday ",
},
{
"passage": "Apple computers are worse than Dell computers.",
"question": "Apple computers are worse",
},
{
"passage": "Welcome to New York.",
"question": "Welcome to New"
}
]),
]
cloze_choices = InputOutputPrompt(
input_formatter=lambda x: f"Example: {x['example']}\nList alternatives:\n- {x['alternatives1']}\n- {x['alternatives2']}\n- {x['alternatives3']}",
output_formatter=lambda x: f"",
input_output_sep="",
example_sep="\n\n",
required_keys=["example", "alternatives1", "alternatives2", "alternatives3"],
instruction="Output a list of unique alternatives for each example.\n\n"
)
cloze_choice_examples = [
pd.DataFrame([
{
"example": "Barrack Obama believes the",
"alternatives1": "best novel is Harry Potter",
"alternatives2": "worst book is Harry Potter",
"alternatives3": "United States is great"
},
{
"example":"The Beatles were honored in:",
"alternatives1":"Buckingham Palace",
"alternatives2":"Mexico",
"alternatives3":"Tower of London"
},
{
"example":"Jerry Baker:",
"alternatives1":"is part of a soccer team",
"alternatives2":"is not part of a soccer team",
"alternatives3":"is a character in a book"
},
])
]
cloze_completion = InputOutputPrompt(
input_formatter=lambda x: f"Select One Choice:\n1. {x['alternatives1']}\n2. {x['alternatives2']}\n3. {x['alternatives3']}\n\nPassage: {x['passage']}\n\nThe passage \"Passage\" states: {x['statement']}: \"Choice\":",
output_formatter=lambda x: f"{x['answer']}",
input_output_sep=" ",
example_sep="\n\n----\n\n",
required_keys=["passage", "alternatives1", "alternatives2", "alternatives3", "statement", "answer"],
instruction="Select one choice from the passage.\n\n"
)
cloze_completion_examples = [
pd.DataFrame([
{
"passage": "Microsoft Corporation produces computer software, consumer electronics, and personal computers. It is headquartered at the Microsoft Redmond campus located in Redmond, Washington, United States.",
"alternatives1": "consumer electronics",
"alternatives2": "Play Stations",
"alternatives3": "cameras",
"statement": "Microsoft Corporation sells",
"answer": "consumer electronics"
},
{
"passage":"Sir Elton Hercules John CH CBE is a British singer, pianist and reknowned composer. His nickname is the Rocket Man.",
"alternatives1":"and tall man",
"alternatives2":"and trombone player",
"alternatives3":"and reknowned composer",
"statement": "Sir Elton John is a musician",
"answer": "and reknowned composer"
},
{
"passage":"The Mac versus Windows PC debate has been going on for a long time. Most people say the Windows PC three is superior. It comes down to personal preference.",
"alternatives1":"Lenovo computers",
"alternatives2":"Windows PC three",
"alternatives3":"Dell computers",
"statement": "Apple computers are superior to",
"answer": " Windows PC three"
},
])
]
class RTEDecomp(Decomposition):
def __init__(self, task_name, data_dir, val_split="validation"):
super().__init__(task_name, data_dir, val_split)
def get_boost_decomp_examples(self, data_train, boost_id):
if boost_id < 4:
return [
questioner_examples[boost_id],
openended_qa_examples[boost_id],
]
else:
return [
cloze_examples[0],
cloze_choice_examples[0],
cloze_completion_examples[0]
]
def zero_few_baseline(
self,
test_data,
few_shot_df,
manifest,
overwrite_manifest,
prompt_suffix="",
do_few_shot=True,
):
expt_log = {}
preds = []
labels = []
labels_names = set(test_data["targets_pretokenized"])
labels_names = [l.lower().strip() for l in labels_names]
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
if ind in expt_log:
pred = entry["pred"]
gold = entry["gold"]
else:
text = row["inputs_pretokenized"]
gold = row["targets_pretokenized"]
icl_str = ""
if do_few_shot:
for s_ind, s_row in few_shot_df.iterrows():
icl_str += f"{s_row['inputs_pretokenized']} {s_row['targets_pretokenized']}\n\n\n"
prompt = f"{icl_str}{{text:}}"
pmp = prompt.format(text=text)
if i == 0:
print(pmp)
raw_answer = get_response(
pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=30,
)
answer = raw_answer.strip().lower()
answer = answer.split("\n")
answer = [a for a in answer if a]
answer = [
a
for a in answer
if any(l.lower() in a.lower() for l in labels_names)
]
if answer:
answer = answer[0]
else:
answer = ""
answer = "".join(
[a for a in answer if a not in [".", ",", "?", ";", ":", "'", '"']]
)
is_yes = "true" in answer.split()
is_no = "false" in answer.split()
if is_yes and (not is_no):
pred = "True"
if is_no and (not is_yes):
pred = "False"
elif not is_no and not is_yes:
pred = "False"
entry = {
"ind": ind,
"example": text,
"base_prompt": pmp,
"raw_answer": raw_answer,
"pred": pred,
"gold": gold,
}
expt_log[ind] = entry
preds.append(pred)
labels.append(gold)
report = classification_report(labels, preds, output_dict=True)
return expt_log, report["accuracy"]
def get_question(self, statement, prompt, boost_ex, manifest, overwrite_manifest):
prompt_suffix = prompt(boost_ex)
quesiton_prompt = f"{prompt_suffix}\n\nStatement: {{statement:}}\nQuestion:"
quesiton_prompt = quesiton_prompt.format(statement=statement).replace("\n\nAnswer:", "\nAnswer:")
chopped_answer = get_response(
quesiton_prompt,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=50)
chopped_answer = chopped_answer.split("\n")
question = [ch for ch in chopped_answer if ch][0]
answer = [ch for ch in chopped_answer if ch.startswith("Answer: ")]
if answer:
answer = answer[0].replace(",", "").replace(".", "").replace("?", "").replace("Answer: ", "")
answer = " ".join([a for a in answer.split() if a not in stops])
else:
answer = ''
if "A:" in question:
statement = statement.strip(".")
return f"{statement}. Yes or no?"
return question, answer, quesiton_prompt
def open_qa(self, question, passage, prompt, boost_ex, manifest, overwrite_manifest):
prompt_suffix = prompt(boost_ex)
qa_prompt = f"{prompt_suffix}\n\n----\n\nContext: {{passage:}}\n\nQuestion: {{question:}}\n\nAnswer:"
qa_prompt = qa_prompt.format(passage=passage, question=question)
answer = get_response(
qa_prompt,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=50
)
answer = answer.replace(",", "").replace(".", "").replace("?", "")
answer = [a for a in answer.split("\n") if a]
if answer:
answer = answer[0]
else:
answer = passage
return answer, qa_prompt
def resolve_pred(self, answer, open_answer):
answer = answer.lower()
is_yes = "yes" in answer.split() or "true" in answer.split()
is_no = "no" in answer.split() or "false" in answer.split()
is_maybe = False
answer = answer.replace("-", "")
pred = "False"
if is_yes and (not is_maybe and not is_no) or (answer in open_answer or open_answer in answer):
pred = "True"
if is_no and (not is_maybe and not is_yes):
pred = "False"
return pred
def get_choices_answer(self, chopped_answer, cuttoff, prompt, boost_ex, manifest, overwrite_manifest, get_choices_prompt=''):
prompt_suffix = prompt(boost_ex)
prompt = f"{prompt_suffix}\n\nExample: {{example:}}\nList alternatives:\n- {{cuttoff:}}\n"
choices_answer = get_response(
prompt.format(example=chopped_answer, cuttoff=cuttoff),
manifest,
overwrite=bool(overwrite_manifest),
max_toks = 30
)
choices_answer = choices_answer.split("\n\n")[0]
choices_answer = choices_answer.split("\n")
choices_answer = [a.replace("- ", "").strip() for a in choices_answer]
choices_answer = [a for a in choices_answer if cuttoff.lower() not in a.lower()]
choices_answer = list(sorted(set(choices_answer)))
choices_answer = choices_answer[:min(len(choices_answer), 2)]
choices_answer = list(sorted(set(choices_answer)))
choices_answer.append(cuttoff)
choices_answer = [ch.strip(".") for ch in choices_answer]
return choices_answer, prompt
def get_chopping(self, question, prompt, boost_ex, manifest, overwrite_manifest, cuttoff_size=2, chopper_prompt=''):
prompt_suffix = prompt(boost_ex)
prompt = f"{prompt_suffix}\n\nExample: {{question:}}\nOutput:"
chopped_answer = get_response(
prompt.format(question=question),
manifest,
overwrite=bool(overwrite_manifest),
max_toks = len(question.split())*4
)
chopped_answer = chopped_answer.split("\n")[0]
chopped_list = chopped_answer.split()
question = question.split()
cuttoff = [t for t in question if t not in chopped_list]
cuttoff_str = " ".join(cuttoff).strip(".")
chopped_list_str = " ".join(chopped_list).strip(".")
if not cuttoff or chopped_list_str.endswith(cuttoff_str):
chopped_list = question[0:-cuttoff_size]
cuttoff = question[-cuttoff_size:]
cuttoff = " ".join(cuttoff)
chopped_answer = " ".join(chopped_list)
cuttoff = cuttoff.strip(".")
return chopped_answer, cuttoff, prompt
def get_final_selection(self, choices_answer, passage, chopped_answer, prompt, boost_ex, manifest, overwrite_manifest, selector_prompt=''):
prompt_suffix = prompt(boost_ex)
select_choice_str = ""
gold_choice = choices_answer[-1]
other_choices = choices_answer[:-1]
for num, ch in enumerate(choices_answer):
select_choice_str += f"\n{num+1}. {ch}"
prompt = f"{prompt_suffix}\n\n----\n\nSelect one Choice:{{choices_str:}}\n\nPassage: {{passage:}}\nThe passage \"Passage\" states: {{chopped_answer:}} \"Choice\": "
select_answer = get_response(
prompt.format(choices_str=select_choice_str, passage=passage, chopped_answer=chopped_answer),
manifest,
overwrite=bool(overwrite_manifest),
max_toks = max(len(c.split()) for c in choices_answer)
)
select_answer = select_answer.lower()
select_answer = select_answer.split("\n")[0].strip(".")
if select_answer.lower() in gold_choice.lower():
answer = "True"
else:
answer = "False"
return answer, prompt
def run_decomposed_prompt(
self, test_data, boost_data_train, boost_dfs, manifest, overwrite_manifest
):
expt_log, all_boost_preds, labels = self._run_decomp_single_data(test_data, boost_dfs, manifest, overwrite_manifest)
expt_log_train, all_boost_train_preds, train_labels = self._run_decomp_single_data(boost_data_train, boost_dfs, manifest, overwrite_manifest, run_limit=1000)
# Do WS
preds = self.merge_boosted_preds(all_boost_preds, all_boost_train_preds, train_labels, expt_log, expt_log_train)
# Get accuracies across all boost sets
individual_accuracies = []
for i in range(len(all_boost_preds[0])):
individual_accuracies.append(classification_report(labels, [p[i] for p in all_boost_preds], output_dict=True)["accuracy"])
report = classification_report(labels, preds, output_dict=True)
return expt_log, expt_log_train, report["accuracy"], individual_accuracies
def _run_decomp_single_data(self, test_data, boost_dfs, manifest, overwrite_manifest, run_limit=-1):
expt_log = {}
all_boost_preds = []
labels = []
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
input = row['inputs_pretokenized']
gold = row['targets_pretokenized']
passage = input.split("Question: ")[0].strip("\n")
statement = input.split("Question: ")[-1].replace("True or False?", "")
if i == run_limit:
break
prompts_across_boost = []
preds_across_boost = []
for boost_num, boost_examples in enumerate(boost_dfs):
all_prompts = []
if boost_num < 4:
question, proposed_answer, question_final_prompt = self.get_question(
statement, questioner, boost_examples[0], manifest, overwrite_manifest
)
if i == 0:
print("PROMPT:")
print(question_final_prompt)
open_answer, answer_final_prompt = self.open_qa(
question, passage, openended_qa, boost_examples[1], manifest, overwrite_manifest
)
if i == 0:
print("\nPROMPT:")
print(answer_final_prompt)
all_prompts.append(question_final_prompt)
all_prompts.append(answer_final_prompt)
open_answer = open_answer.replace("-", "")
open_answer = " ".join([a for a in open_answer.split() if a not in stops])
if proposed_answer:
answer = proposed_answer.replace("-", "")
answer = " ".join([a for a in answer.split() if a not in stops])
if all(wd in open_answer.lower() for wd in answer.lower().split()) or all(wd in answer.lower() for wd in open_answer.lower().split()):
pred = "True"
else:
pred = 'False'
if not answer.strip():
pred = 'False'
else:
pred = self.resolve_pred(open_answer.lower(), open_answer)
else:
chopped_answer, cuttoff, chopper_prompt = self.get_chopping(
statement, cloze_convertor, boost_examples[0], manifest, overwrite_manifest, cuttoff_size=2)
choices_answer, choices_prompt = self.get_choices_answer(
chopped_answer, cuttoff, cloze_choices, boost_examples[1], manifest, overwrite_manifest)
pred, selector_prompt = self.get_final_selection(
choices_answer, passage, chopped_answer, cloze_completion, boost_examples[2], manifest, overwrite_manifest)
prompts_across_boost.append(all_prompts)
preds_across_boost.append(pred)
entry = {
"ind": ind,
"prompts": prompts_across_boost,
"preds_boost": preds_across_boost,
"example": input,
"gold": gold,
}
expt_log[ind] = entry
all_boost_preds.append(preds_across_boost)
labels.append(gold)
return expt_log, all_boost_preds, labels
def main():
args = get_args()
args.num_boost = 5
task_name = "super_glue_rte"
data_dir = f"{DATA_DIR}/P3/data_feather/super_glue_rte_GPT_3_style/"
decomp = RTEDecomp(task_name, data_dir)
decomp.run(args)
if __name__ == "__main__":
main()
| ama_prompting-main | tasks/RTE_final.py |
#!/usr/bin/env python
# coding: utf-8
from pathlib import Path
import argparse
from typing import Counter
import pandas as pd
import json
import numpy as np
import datetime
import os
import random
from utils import save_log, get_manifest_session
DATA_DIR = os.environ.get("AMA_DATA", "/home/data")
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type=int, default=3)
parser.add_argument(
"--num_run", type=int, default=-1, help="Number of rows of test data to run"
)
parser.add_argument(
"--k_shot", type=int, default=3, help="Number of few shot"
)
parser.add_argument(
"--num_boost", type=int, default=3, help="Number of few shot sets to boost over")
parser.add_argument(
"--boost_train_examples", type=int, default=1000, help="Number of training examples to run through for boosting"
)
parser.add_argument(
"--output_metrics_file", type=str, default="decomposition_metrics.json", help="Output file for all metrics."
)
parser.add_argument(
"--save_dir", type=str, default="/home/final_runs/", help="Data directory"
)
parser.add_argument(
"--run_decomp",
type=int,
default=1,
help="Run decomp",
choices=[0, 1],
)
parser.add_argument(
"--run_zeroshot",
type=int,
default=1,
help="Run zeroshot",
choices=[0, 1],
)
parser.add_argument(
"--run_fewshot",
type=int,
default=1,
help="Run fewshot",
choices=[0, 1],
)
parser.add_argument(
"--run_zeroshot_decomp",
type=int,
default=0,
help="Run zero shot decomp",
choices=[0, 1],
)
parser.add_argument(
"--overwrite_boost_exs",
type=int,
default=0,
help="Overwrite boost examples",
choices=[0, 1],
)
parser.add_argument(
"--overwrite_data",
type=int,
default=0,
help="Overwrite saved data examples",
choices=[0, 1],
)
# Manifest
parser.add_argument(
"--client_name",
type=str,
default="huggingface",
help="Client name manifest",
choices=["huggingface", "openai", "ai21"],
)
parser.add_argument(
"--client_engine",
type=str,
default=None,
help="Client engine manifest. Only used for openai/ai21",
choices=["davinci"],
)
parser.add_argument(
"--client_connection",
type=str,
default="http://127.0.0.1:5000",
help="Client connection str",
)
parser.add_argument(
"--cache_connection",
type=str,
default="/home/manifest/final_runs.sqlite",
help="Cache connection str",
)
parser.add_argument(
"--overwrite_manifest",
type=int,
default=0,
help="Overwrite manifest",
choices=[0, 1],
)
return parser.parse_args()
class Decomposition:
def __init__(self, task_name, data_dir, val_split="validation"):
self.task_name = task_name
self.data_dir = data_dir
self.val_split = val_split
def read_data(self, save_dir, overwrite_data):
save_data = Path(f"{save_dir}/{self.task_name}/data.feather")
if not save_data.exists() or overwrite_data:
test_data = pd.read_feather(f"{self.data_dir}/{self.val_split}.feather")
test_data.to_feather(f"{save_data}")
else:
print(f"Reading test data from {save_data}")
test_data = pd.read_feather(save_data)
save_data = Path(f"{save_dir}/{self.task_name}/train_data.feather")
if not save_data.exists() or overwrite_data:
train_data = pd.read_feather(f"{self.data_dir}/train.feather")
else:
print(f"Reading train data from {save_data}")
train_data = pd.read_feather(save_data)
print(f"Test Data Size: {len(test_data)}")
print(f"Train Data Size: {len(train_data)}")
return test_data, train_data
def get_few_shot_examples(self, train_data, k_shot):
"""Get few shot examples"""
return train_data.sample(k_shot)
def get_boost_decomp_examples(self, train_data, boost_i=0):
"""Get boost examples"""
raise NotImplementedError()
def zero_few_baseline(
self, test_data, few_shot_df, manifest, overwrite_manifest, do_few_shot=True
):
"""Zero and few shot baseline"""
raise NotImplementedError()
def run_decomposed_prompt(
self, test_data, boost_data_train, boost_dfs, manifest, overwrite_manifest
):
"""Decomposition run"""
raise NotImplementedError()
def merge_boosted_preds(self, boosted_preds, all_boost_train_preds, train_labels, exp_log, expt_log_train, indecisive_ans=None):
"""Merge boosted preds"""
if isinstance(boosted_preds, list):
boosted_preds = np.array(boosted_preds)
if isinstance(all_boost_train_preds, list):
all_boost_train_preds = np.array(all_boost_train_preds)
if isinstance(train_labels, list):
train_labels = np.array(train_labels)
uniq = np.unique(boosted_preds)
pred_map = {}
if "yes" in uniq:
pred_map = {"yes": 1, "no": -1, "neither": 0}
elif "true" in uniq:
pred_map = {"true": 1, "false": -1, "neither": 0}
elif "positive" in uniq:
pred_map = {"positive": 1, "negative": -1, "neutral": 0}
pred_map_inv = {v:k for k,v in pred_map.items()}
use_pred_map = False
if all(p.lower() in pred_map for p in uniq):
use_pred_map = True
if use_pred_map:
# Cast to integers
boosted_preds = np.array([[pred_map[p.lower()] for p in preds] for preds in boosted_preds])
all_boost_train_preds = np.array(
[[pred_map[p.lower()] for p in preds] for preds in all_boost_train_preds]
)
train_labels = np.array([pred_map[p.lower()] for p in train_labels])
if indecisive_ans:
indecisive_ans = pred_map[indecisive_ans.lower()]
# Take majority vote
preds_test = []
for i, voter_preds in enumerate(boosted_preds):
most_common = Counter(voter_preds).most_common(1)[0]
if indecisive_ans and len(voter_preds) > 1 and most_common[1] == 1:
majority_vote_pred = indecisive_ans
else:
majority_vote_pred = most_common[0]
if use_pred_map:
majority_vote_pred = pred_map_inv[majority_vote_pred]
preds_test.append(majority_vote_pred)
exp_log[i]["pred"] = majority_vote_pred
# Take majority vote
preds_train = []
for i, voter_preds in enumerate(all_boost_train_preds):
most_common = Counter(voter_preds).most_common(1)[0]
if indecisive_ans and len(voter_preds) > 1 and most_common[1] == 1:
majority_vote_pred = indecisive_ans
else:
majority_vote_pred = most_common[0]
if use_pred_map:
majority_vote_pred = pred_map_inv[majority_vote_pred]
preds_train.append(majority_vote_pred)
expt_log_train[i]["pred"] = majority_vote_pred
return preds_test
def run(self, args):
print(json.dumps(vars(args), indent=4))
random.seed(args.seed)
np.random.seed(args.seed)
save_path = Path(f"{args.save_dir}/{self.task_name}")
save_path.mkdir(parents=True, exist_ok=True)
data_test, data_train = self.read_data(args.save_dir, bool(args.overwrite_data))
# Subsample train for boost exps
if args.boost_train_examples >= 0:
boost_data_train = data_train.head(min(len(data_train), args.boost_train_examples))
else:
boost_data_train = data_train
# Reset indexes for enumerations
boost_data_train = boost_data_train.reset_index(drop=True)
data_test = data_test.reset_index(drop=True)
data_train = data_train.reset_index(drop=True)
num_run = (
min(args.num_run, len(data_test)) if args.num_run > 0 else len(data_test)
)
save_results = True
if num_run != len(data_test):
print("Using {} rows".format(num_run))
data_test = data_test.iloc[:num_run]
save_results = False
runner, model_name = get_manifest_session(
client_name=args.client_name,
client_engine=args.client_engine,
client_connection=args.client_connection,
cache_connection=args.cache_connection,
)
model_name = model_name.replace("/", "_")
print("Model name:", model_name)
# Read in few shot examples
few_shot_path = save_path /f"{args.k_shot}_shot_examples.feather"
if bool(args.overwrite_data) or not few_shot_path.exists():
mini_df = self.get_few_shot_examples(data_train, args.k_shot)
mini_df.reset_index().to_feather(few_shot_path)
else:
print(f"Reading few show examples from {few_shot_path}")
mini_df = pd.read_feather(few_shot_path)
# Read in few shot decomp examples - one data frame per decomp step
boost_examples = []
for i in range(args.num_boost):
boost_examples_per_step = []
# Get all steps
boost_examples_paths = list(save_path.glob(f"boost_examples_{i}_step*.feather"))
if bool(args.overwrite_boost_exs) or not boost_examples_paths or not all(p.exists() for p in boost_examples_paths):
boost_df_steps = self.get_boost_decomp_examples(data_train, boost_id=i)
if not isinstance(boost_df_steps, list) or not isinstance(
boost_df_steps[0], pd.DataFrame
):
raise ValueError("Must return list of dataframes, one per step")
for step, boost_df in enumerate(boost_df_steps):
boost_df.reset_index().to_feather(save_path / f"boost_examples_{i}_step{step}.feather")
print(f"Saving boost examples to", save_path / f"boost_examples_{i}_step{step}.feather")
boost_examples_per_step.append(boost_df)
else:
for boost_examples_p in sorted(boost_examples_paths):
print(f"Reading boost examples from {boost_examples_p}")
boost_examples_per_step.append(pd.read_feather(boost_examples_p))
boost_examples.append(boost_examples_per_step)
today = datetime.datetime.today().strftime("%m%d%Y")
# Default metrics
metric_zero = -1.0
metric_few = -1.0
metric_decomposed = -1.0
metric_decomposed_by_boost = []
metric_zeroshot_decomposed = -1.0
if bool(args.run_zeroshot):
# Zero Shot
run_name = f"{model_name}_0shot"
exp_zero, metric_zero = self.zero_few_baseline(
test_data=data_test,
few_shot_df=mini_df,
manifest=runner,
overwrite_manifest=args.overwrite_manifest,
do_few_shot=False,
)
if save_results:
save_log(self.task_name, run_name, exp_zero, args.save_dir)
if bool(args.run_fewshot):
# Few Shot
run_name = f"{model_name}_{args.k_shot}shot"
exp_few, metric_few = self.zero_few_baseline(
test_data=data_test,
few_shot_df=mini_df,
manifest=runner,
overwrite_manifest=args.overwrite_manifest,
do_few_shot=True,
)
if save_results:
save_log(self.task_name, run_name, exp_few, args.save_dir)
if bool(args.run_decomp):
# Decomp
run_name = f"{model_name}_decomposed_{today}"
exp_decomposed, exp_decomposed_train, metric_decomposed, metric_decomposed_by_boost = self.run_decomposed_prompt(
test_data=data_test, boost_data_train=boost_data_train, boost_dfs=boost_examples, manifest=runner, overwrite_manifest=args.overwrite_manifest
)
if save_results:
save_log(
self.task_name,
run_name,
exp_decomposed,
args.save_dir
)
if exp_decomposed_train:
save_log(
self.task_name,
f"{run_name}_train",
exp_decomposed_train,
args.save_dir
)
# Zero shot decomp
exp_zeroshot_decomposed = []
if bool(args.run_zeroshot_decomp):
run_name = f"{model_name}_decomposed_0shot_{today}"
(
exp_zeroshot_decomposed,
exp_zeroshot_decomposed_train,
metric_zeroshot_decomposed,
_,
) = self.run_decomposed_prompt(
test_data=data_test, boost_data_train=boost_data_train, boost_dfs=[[pd.DataFrame() for _ in range(len(boost_examples[0]))]], manifest=runner, overwrite_manifest=args.overwrite_manifest,
)
if save_results and len(exp_zeroshot_decomposed) > 0:
save_log(
self.task_name,
run_name,
exp_zeroshot_decomposed,
args.save_dir,
)
if exp_zeroshot_decomposed_train:
save_log(
self.task_name,
f"{run_name}_train",
exp_zeroshot_decomposed_train,
args.save_dir,
)
print("Accuracy Zero Shot", metric_zero)
print("Accuracy Few Shot", metric_few)
if len(metric_decomposed_by_boost) > 0:
print("Accuracy by Boost Set Decomposed", metric_decomposed_by_boost)
print("Accuracy by Boost Set Decomposed Average", np.mean(metric_decomposed_by_boost))
print("Accuracy Boost Decomposed", metric_decomposed)
if len(exp_zeroshot_decomposed) > 0:
print("Accuracy Zero Shot Decomposed", metric_zeroshot_decomposed)
metrics = {
"model_name": model_name,
"task_name": self.task_name,
"today": today,
"zero_shot": metric_zero,
"few_shot": metric_few,
"decomposed": metric_decomposed,
"decomposed_by_boost": metric_decomposed_by_boost,
"decomposed_by_boost_avg": np.mean(metric_decomposed_by_boost),
"zero_shot_decomposed": metric_zeroshot_decomposed,
}
output_metrics = Path(args.output_metrics_file)
output_metrics.parent.mkdir(parents=True, exist_ok=True)
with open(output_metrics, "a") as f:
f.write(json.dumps(metrics) + "\n")
print(f"Saved metrics to {output_metrics}")
print(f"Saved final data to", Path(args.save_dir) / self.task_name)
| ama_prompting-main | tasks/decomposition.py |
#!/usr/bin/env python
# coding: utf-8
from tqdm.auto import tqdm
from collections import Counter
import pandas as pd
import random
import numpy as np
from sklearn.metrics import classification_report
from decomposition import Decomposition, get_args, DATA_DIR
from utils import get_response, InputOutputPrompt
what_next = InputOutputPrompt(
input_formatter=lambda x: f"Question: {x['example']}",
output_formatter=lambda x: f"{x['continue']}",
required_keys=["example", "continue"],
input_output_sep=" ",
example_sep="\n\n",
instruction="Pick the correct ending for the example.\n\n"
)
what_next_examples = [
pd.DataFrame([
{
"example": "(because 'she took medicine', because 'she got expelled') My roommate was feeling better because?",
"continue": "'she took medicine'",
},
{
"example": "(because 'he does not practice', because 'he is fast') Matt is not good at soccer because?",
"continue": "'he does not practice'",
},
{
"example": "(because 'she was smart', because 'she never did her homework') The girl went to college and graduated with honors because?",
"continue": "'she was smart'",
},
]),
pd.DataFrame([
{
"example": "(so 'he is always tired', so 'he is always sleeping') My dad works very hard so",
"continue": "'he is always tired'",
},
{
"example": "(so 'she threw a party', so 'she took medicine') My roommate was sick so",
"continue": "'she took medicine'",
},
{
"example": "(so 'he played', so 'he cried') Andy's parents got him a new toy so",
"continue": "'he played'",
},
]),
]
question = InputOutputPrompt(
input_formatter=lambda x: f"Question: {x['example']}",
output_formatter=lambda x: f"Answer: {x['continue']}",
required_keys=["example", "continue"],
input_output_sep="\n",
example_sep="\n\n",
instruction="Pick the correct ending for the example.\n\n"
)
question_examples = [
pd.DataFrame([
{
"example": "What best continues the sentence \"My dad often talks about long hours at work because\"?",
"continue": "\"work is hard\"",
},
{
"example": "What best continues the sentence \"My roommate was sick and took medicine and so\"?",
"continue": "\"she felt better\"",
},
{
"example": "What best continues the sentence \"Andy's parents got him a new toy and so\"?",
"continue": "\"he played\"",
},
{
"example": "What best continues the sentence \"My roommate was feeling better because\"?",
"continue": "\"she took medicine\"",
}
])
]
questioner = InputOutputPrompt(
input_formatter=lambda x: f"Statement: {x['statement']}",
output_formatter=lambda x: f"Question: {x['question']}",
required_keys=["statement", "question"],
input_output_sep="\n",
example_sep="\n\n",
instruction="Rewrite the statement as a yes/no question.\n\n"
)
questioner_examples = [
pd.DataFrame([
{
"statement": "Jonathan Samuels was born in the 70's.",
"question": "Was Jonathan Samuels born in the 70's?"
},
{
"statement": "Jerry bullied him and called him names",
"question": "Did Jerry bully him and call him names?",
},
{
"statement": "Sam and jade were going to go to the movies",
"question": "Did did Sam and jade go to the movies?",
},
{
"statement": "Chocolate is tasty, when I am feeling hungry.",
"question": "Does chocolate taste good when you are hungry?",
},
{
"statement": "Mark ran fast.",
"question": "Did mark run fast?",
},
]),
pd.DataFrame([
{
"statement": "most of the light comes from the sun",
"question": "Does most of the light come from the sun?"
},
{
"statement": "the test was not hard",
"question": "Was the test not hard?",
},
{
"statement": "it is a good idea to buy your parents gifts",
"question": "Is it a good idea to buy your parents gifts?",
},
{
"statement": "the balloon popped",
"question": "Did the balloon pop?",
},
{
"statement": "The father and son went camping to California.",
"question": "Did the father and son go camping?",
},
]),
pd.DataFrame([
{
"statement": "most of the light comes from the sun",
"question": "Does most of the light come from the sun?"
},
{
"statement": "the test was not",
"question": "Was the test hard?"
},
{
"statement": "it is a good idea to buy your parents gifts",
"question": "Is it a good idea to buy your parents gifts?"
},
{
"statement": "the balloon popped",
"question": "Did the balloon pop?"
},
{
"statement": "The father and son went camping to California.",
"question": "Did the father and son go camping?"
}
])
]
openended_qa = InputOutputPrompt(
input_formatter=lambda x: f"Passage: {x['context']}\nQuestion: {x['question']}",
output_formatter=lambda x: f"Answer: {x['answer']}",
required_keys=["passage", "question", "answer"],
input_output_sep="\n",
example_sep="\n\n",
instruction=""
)
openended_qa_examples = [
pd.DataFrame([
{
"context": "My aunt is a nurse and she often talks about long hours at work. Last week was especially bad and she was constantly working many hours.",
"question": "Was her work easy?",
"answer": "No, it was hard work."
},
{
"context": "My roommate was sick. She stayed home from work and school. She slept all day long and by the end of the day, she was feeling better.",
"question": "Did the rest help her?",
"answer": "Yes, she slept and felt better."
},
{
"context": "Andy had always wanted a big kids bike. When he turned six Year's old he asked for a bike for his birthday. He did not know how to ride a bike. On Andy's birthday his mother gave him a bike.",
"question": "Did he cry all night?",
"answer": "No, Andy was happy because he got a bike."
},
]),
pd.DataFrame([
{
"context": "It was a beautiful summer day outside. Bob decided to go for a walk at the park. Bob walked along the path and admired the scenery. He found a twenty dollar bill on the ground.",
"question": "Was he disappointed?",
"answer": "No, he was happy he got money."
},
{
"context": "Mike is a busy man. He often eats fast food for breakfast. Mike wanted to enjoy a healthier breakfast. He tried an overnight oatmeal recipe.",
"question": "Did Mike eat the oatmeal?",
"answer": "Yes"
},
{
"context": "Gina's sister cut her ankle on broken glass. The blood ran down her foot and into her shoe. When she saw the blood she ran home. Gina ran behind her, but couldn't keep up.",
"question": "Did Gina's sister go to the doctor?",
"answer": "Yes, because she was bleeding"
},
]),
pd.DataFrame([
{
"context": "My aunt is a nurse she works a lot. Last week was especially bad and she was constantly working many hours.",
"question": "Was her work easy?",
"answer": "No"
},
{
"context": "It was a beautiful day outside. Bob decided to go for a walkk. Bob walked along the path and admired the scenery. He found a twenty dollar bill on the ground.",
"question": "Was he disappointed?",
"answer": "No, he was happy he got money."
},
{
"context": "Mom didn't want to cook dinner tonight. We were all very hungry. She told us to fend for ourselves. We ate cold cereal for dinner tonight.",
"question": "Was everyone upset about the dinner?",
"answer": "Yes, the food was cold"
},
]),
]
sentiment = InputOutputPrompt(
input_formatter=lambda x: f"Passage: {x['statement']}",
output_formatter=lambda x: f"Sentiment: {x['sentiment']}",
required_keys=["statement", "question"],
input_output_sep="\n",
example_sep="\n\n",
instruction="Is the sentiment of the passage positive, negative, or neutral?\n\n"
)
sentiment_examples = [
pd.DataFrame([
{
"statement": "Mary saw the animal",
"sentiment": "neutral",
},
{
"statement": "the town is quaint , but ultimately too boring and ugly",
"sentiment": "negative",
},
{
"statement": "he's a strong athlete, people come from miles away to watch him compete",
"sentiment": "positive",
},
]),
pd.DataFrame([
{
"statement": "Mary saw the animal",
"sentiment": "neutral",
},
{
"statement": "the town is quaint , but ultimately too boring and ugly",
"sentiment": "negative",
},
{
"statement": "he's a strong athlete, people come from miles away to watch him compete",
"sentiment": "positive",
},
]),
pd.DataFrame([
{
"statement": "Mary saw the animal",
"sentiment": "neutral",
},
{
"statement": "the town is quaint , but ultimately too boring and ugly",
"sentiment": "negative",
},
{
"statement": "he's a strong athlete, people come from miles away to watch him compete",
"sentiment": "positive",
},
])
]
what_next2 = InputOutputPrompt(
input_formatter=lambda x: f"Choices:\n- {x['choice_a']}\n- {x['choice_b']}\n\nPassage: {x['passage']}",
output_formatter=lambda x: f"{x['answer']}",
required_keys=["choice_a", "choice_b", "passage", "answer"],
input_output_sep=" ",
example_sep="\n\n----\n\n",
instruction="Pick the best choice for the passage.\n\n"
)
what_next_examples2 = [
pd.DataFrame([
{
"passage": "My dad often talks about long hours at work. Because?",
"choice_a": "work is hard",
"choice_b": "work is easy",
"answer": "work is hard"
},
{
"passage": "My roommate was sick and took medicine. So?",
"choice_a": "she threw a party",
"choice_b": "she felt better",
"answer": "she felt better"
},
{
"passage": "Andy's parents got him a new toy. So?",
"choice_a": "he played",
"choice_b": "he cried",
"answer": "he played"
}
]),
pd.DataFrame([
{
"passage": "The girl went to college and graduated with honors.",
"choice_a": "She was qualified to get a job.",
"choice_b": "She was qualified to eat pizza.",
"answer": "she was qualified to get a job."
},
{
"passage": "Max bought all his friends cupcakes for the party.",
"choice_a": "They never spoke to him again.",
"choice_b": "They all thanked him.",
"answer": "They all thanked him."
},
{
"passage": "Sam felt so hungry so he bought himself some cheese!",
"choice_a": "After he ate the cheese, he was starving.",
"choice_b": "After he ate the cheese, he felt better.",
"answer": "After he ate the cheese, he felt better."
}
]),
pd.DataFrame([
{
"passage": "Sam and Jade were excited to see the new movie.",
"choice_a": "They went to the theater.",
"choice_b": "They went swimming.",
"answer": "They went to the theater."
},
{
"passage": "Matt is very competitive in soccer.",
"choice_a": "He practices all the time.",
"choice_b": "He loves to lose.",
"answer": "He practices all the time."
},
{
"passage": "She can read the entire book in a single day.",
"choice_a": "She is a slow reader.",
"choice_b": "She is a fast reader.",
"answer": "She is a fast reader."
}
])
]
class COPADecomp(Decomposition):
def __init__(self, task_name, data_dir, val_split="validation"):
super().__init__(task_name, data_dir, val_split)
def zero_few_baseline(
self,
test_data,
few_shot_df,
manifest,
overwrite_manifest,
do_few_shot=True,
):
expt_log = {}
total = 0
total_crct = 0
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
if ind in expt_log:
pred = entry["pred"]
gold = entry["gold"]
else:
icl_str = ""
if do_few_shot:
for s_ind, s_row in few_shot_df.iterrows():
s_text = s_row['inputs_pretokenized'].replace("Pick the more likely continuation to the following sentence:", "").strip("\n")
s_parts = s_text.split(". ")
s_sentence = s_parts[0]
s_transition = s_parts[1]
options = [l for l in s_text.split("\n") if l.startswith("- ")]
if "as a consequence" in s_transition:
s_text = f"{s_sentence} so"
elif "as a result of" in s_transition:
s_text = f"{s_sentence} because"
icl_str += f"Context: {s_text} {s_row['targets_pretokenized']}\n\n"
text = row['inputs_pretokenized']
parts = text.split(". ")
sentence = parts[0]
transition = parts[1]
options = [l for l in text.split("\n") if l.startswith("- ")]
if "as a consequence" in transition:
text = f"{sentence} so"
elif "as a result of" in transition:
text = f"{sentence} because"
text = text.replace("Pick the more likely continuation to the following sentence:", "").strip("\n")
gold = row['targets_pretokenized']
prompt = f"Pick the more likely continuation to the following sentence.\n\n\n{icl_str}Context: {{text:}}"
pmp = prompt.format(text=text)
if i == 0:
print(pmp)
raw_answer, _ = get_response(
pmp,
manifest,
gold_choices=[options[0].replace("- ", "").strip(), options[1].replace("- ", "").strip()],
overwrite=bool(overwrite_manifest),
max_toks=50,
)
answer = raw_answer.strip().lower()
answer = answer.split("\n")
answer = [a for a in answer if a]
if answer:
answer = answer[0].replace("-", "").strip()
else:
answer = ''
pred = "".join([a for a in answer if a not in [".", ",", "?", ";", ":", "'", '"']])
gold = "".join([a for a in gold if a not in [".", ",", "?", ";", ":", "'", '"']])
crct = gold.lower() == pred.lower()
total += 1
total_crct += crct
entry = {
"ind": ind,
"example": text,
"base_prompt": pmp,
"raw_answer": raw_answer,
"pred": pred,
"gold": gold,
}
expt_log[ind] = entry
accuracy = total_crct/total
return expt_log, accuracy
def get_boost_decomp_examples(self, train_data, boost_id):
if boost_id < 1:
return [
what_next_examples[boost_id],
]
elif boost_id < 2:
return [
what_next_examples2[boost_id-1],
]
elif boost_id >= 2:
seed = [1, 2, 3][boost_id-2]
k_shot = 4*seed
random.seed(seed)
np.random.seed(seed)
data_train = pd.DataFrame(train_data)
sub_df = data_train.sample(k_shot)
booster_df = sub_df.sample(frac=1, random_state=0)
print(f"Selected: {len(booster_df)} in context examples.")
return [
booster_df
]
def what_happened_next(self, prompt, boost_ex, example, transition, choice_a, choice_b, word, manifest, overwrite_manifest):
example = example.strip(".")
choice_a = choice_a.lower()
choice_b = choice_b.lower()
transition = transition.strip()
prompt_suffix = prompt(boost_ex)
ex_prompt = f"{prompt_suffix}\n\nQuestion: ({{word:}} \'{{choice_a:}}\', {{word:}} \'{{choice_b:}}\') {{example:}} {{word:}}?"
raw_answer = get_response(
ex_prompt.format(word=word, choice_a=choice_a, choice_b=choice_b, example=example),
manifest,
max_toks= 4*len(choice_a.split()),
overwrite=bool(overwrite_manifest))
answer = [q for q in raw_answer.split("\n") if q][0].lower()
pred = ''
for n in range(5,50):
for idx_offset in range(len(answer) - n + 1):
ngram = "".join(answer[idx_offset:idx_offset+n])
if ngram in choice_a and ngram not in choice_b:
pred = choice_a
elif ngram not in choice_a and ngram in choice_b:
pred = choice_b
return pred, ex_prompt
def question_answer(self, prompt, boost_ex, example, transition, choice_a, choice_b, word, manifest, overwrite_manifest):
example = example.strip(".")
choice_a = choice_a.lower()
choice_b = choice_b.lower()
transition = transition.strip()
prompt_suffix = prompt(boost_ex)
ex_prompt = f"{prompt_suffix}\n\nQuestion: What best continues the sentence \"{{example:}}\"?\nAnswer:"
ex_pmp = ex_prompt.format(example=example)
raw_answer, log_prob = get_response(
ex_pmp,
manifest,
gold_choices=[choice_a, choice_b],
max_toks= 4*len(choice_a.split()),
overwrite=bool(overwrite_manifest))
answer = [q for q in raw_answer.split("\n") if q][0].lower()
pred = ''
for n in range(5,50):
for idx_offset in range(len(answer) - n + 1):
ngram = "".join(answer[idx_offset:idx_offset+n])
if ngram in choice_a and ngram not in choice_b:
pred = '1'
elif ngram not in choice_a and ngram in choice_b:
pred = '2'
return pred, ex_pmp
def answer_question(self, question, passage, all_prompts, boost_examples, manifest, overwrite_manifest, option=1):
one_at_a_time = all_prompts[1](boost_examples[1])
answer_prompt = f"{one_at_a_time}\n\nPassage: {{passage:}}\nQuestion: {{question:}}\n"
answer = get_response(
answer_prompt.format(passage=passage, question=question),
manifest,
max_toks=50)
answer = answer.replace("Answer: ", "")
answer = [a for a in answer.split("\n") if a]
if answer:
answer = answer[0].replace(",", "").replace(".", "").lower()
else:
answer = ''
pred = ''
if option == 1:
if 'yes' in answer.split():
pred = "1"
elif 'no' in answer.split():
pred = "2"
elif option == 2:
if 'no' in answer.split():
pred = "1"
elif 'yes' in answer.split():
pred = "2"
return pred, answer_prompt
def get_one_by_one(self, example, choice_a, choice_b, all_prompts, boost_examples, manifest, overwrite_manifest):
# construct questions
question_a, questioner_prompt = self.get_question(choice_a, all_prompts, boost_examples, manifest, overwrite_manifest)
question_b, questioner_prompt = self.get_question(choice_b, all_prompts, boost_examples, manifest, overwrite_manifest)
# ask questions
pred_a, answerer_prompt = self.answer_question(question_a, example, all_prompts, boost_examples, manifest, overwrite_manifest, option=1)
pred_b, answerer_prompt = self.answer_question(question_b, example, all_prompts, boost_examples, manifest, overwrite_manifest, option=2)
# reconcile answer
if pred_a == "1" and pred_b == "1":
pred = choice_a
elif pred_a == "2" and pred_b == "2":
pred = choice_b
elif pred_a and not pred_b:
if pred_a == "1":
pred = choice_a
else:
pred = choice_b
elif not pred_b and pred_b:
if pred_b == "1":
pred = choice_a
else:
pred = choice_b
else:
pred = ''
return pred, questioner_prompt, answerer_prompt
def get_sentiment(self, statement, all_prompts, boost_examples, manifest, overwrite_manifest):
sentiment_prompt = all_prompts[0](boost_examples[0])
prompt = f"{sentiment_prompt}\n\nPassage: {{statement:}}\nSentiment: "
raw_answer = get_response(
prompt.format(statement=statement),
manifest,
max_toks=5)
sent = raw_answer.split("\n")[0]
if "positive" in sent:
sent = 1
elif "negative" in sent:
sent = -1
elif "neutral" in sent:
sent = 0
return sent, sentiment_prompt
def combine_sentiments(self, example, choice_a, choice_b, all_prompts, boost_examples, manifest, overwrite_manifest):
# construct questions
sentiment_a, sentiment_prompt = self.get_sentiment(choice_a, all_prompts, boost_examples, manifest, overwrite_manifest)
sentiment_b, sentiment_prompt = self.get_sentiment(choice_b, all_prompts, boost_examples, manifest, overwrite_manifest)
sentiment_ex, sentiment_prompt = self.get_sentiment(example, all_prompts, boost_examples, manifest, overwrite_manifest)
# reconcile answer
pred = ''
if abs(sentiment_a - sentiment_ex) < abs(sentiment_b - sentiment_ex):
pred = choice_a
elif abs(sentiment_a - sentiment_ex) > abs(sentiment_b - sentiment_ex):
pred = choice_b
return pred, sentiment_prompt
def get_question(self, statement, all_prompts, boost_examples, manifest, overwrite_manifest):
questioner = all_prompts[0](boost_examples[0])
question_prompt = f"{questioner}\n\nStatement: {{statement:}}\n"
question = get_response(
question_prompt.format(statement=statement),
manifest,
max_toks= 4*len(statement.split()))
question = question.replace("Question: ", "")
question = [q for q in question.split("\n") if q]
if not question:
question = f"{statement} Yes or no?"
else:
question = question[0]
return question, question_prompt
def get_what_next(self, example, choice_a, choice_b, transition, all_prompts, boost_examples, manifest, overwrite_manifest):
what_next_prompt = all_prompts[0](boost_examples[0])
if "result of":
prompt = f"{what_next_prompt}\n\n----\n\nChoices:\n- {{choice_a:}}\n- {{choice_b:}}\n\nPassage: {{example:}} Because?"
elif "consequence":
prompt = f"{what_next_prompt}\n\n----\n\nChoices:\n- {{choice_a:}}\n- {{choice_b:}}\n\nPassage: {{example:}} So?"
raw_answer = get_response(
prompt.format(choice_a=choice_a, choice_b=choice_b, example=example),
manifest,
max_toks=50)
answer = raw_answer.split("\n")[0].lower()
choice_a = choice_a.lower()
choice_b = choice_b.lower()
pred = ''
for n in range(5,50):
for idx_offset in range(len(answer) - n + 1):
ngram = "".join(answer[idx_offset:idx_offset+n])
if ngram in choice_a and ngram not in choice_b:
pred = choice_a
elif ngram not in choice_a and ngram in choice_b:
pred = choice_b
return pred, what_next_prompt
def run_decomposed_prompt(
self, test_data, boost_data_train, boost_dfs, manifest, overwrite_manifest
):
expt_log, all_boost_preds, labels = self._run_decomp_single_data(test_data, boost_dfs, manifest, overwrite_manifest)
expt_log_train, all_boost_train_preds, train_labels = self._run_decomp_single_data(boost_data_train, boost_dfs, manifest, overwrite_manifest, run_limit=1000)
# Do WS
preds = self.merge_boosted_preds(all_boost_preds, all_boost_train_preds, train_labels, expt_log, expt_log_train)
# Get accuracies across all boost sets
individual_accuracies = []
for i in range(len(all_boost_preds[0])):
individual_accuracies.append(classification_report(labels, [p[i] for p in all_boost_preds], output_dict=True)["accuracy"])
report = classification_report(labels, preds, output_dict=True)
return expt_log, expt_log_train, report["accuracy"], individual_accuracies
def _run_decomp_single_data(self, test_data, boost_dfs, manifest, overwrite_manifest, run_limit=-1):
expt_log = {}
all_boost_preds = []
labels = []
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
text = row['inputs_pretokenized']
text = text.replace("Pick the more likely continuation to the following sentence:", "").strip("\n")
gold = row['targets_pretokenized']
parts = text.split("\n")
statement = parts[0].split(".")[0:-1]
transition = parts[0].split(".")[-1]
example = " ".join(statement)
choice_a = parts[1].replace("-", "").strip()
choice_b = parts[2].replace("-", "").strip()
gold_idx = -1
if gold.lower() == choice_a.lower():
gold_idx = '1'
else:
gold_idx = '2'
all_prompts = []
if i == run_limit:
break
prompts_across_boost = []
preds_across_boost = []
for boost_num, boost_examples in enumerate(boost_dfs):
icl_str = ""
pred = ''
answer2 = None
if boost_num < 1:
all_prompts = []
if 'as a consequence' in transition:
answer, what_next_prompt = self.what_happened_next(question, boost_examples[0], example, transition, choice_a, choice_b, 'and so', manifest, overwrite_manifest)
else:
answer, what_next_prompt = self.what_happened_next(
question, boost_examples[0], example, transition, choice_a, choice_b, 'because', manifest, overwrite_manifest)
if 'as a consequence' in transition:
answer2, what_next_prompt = self.what_happened_next(question, boost_examples[0], example, transition, choice_b, choice_a, 'and so', manifest, overwrite_manifest)
else:
answer2, what_next_prompt = self.what_happened_next(
question, boost_examples[0], example, transition, choice_b, choice_a, 'because', manifest, overwrite_manifest)
if answer != answer2:
answer = ''
all_prompts.append(what_next_prompt)
elif boost_num < 2:
answer, what_next_prompt = self.get_what_next(
example, choice_a, choice_b, transition, [what_next2], boost_examples, manifest, overwrite_manifest
)
answer2, what_next_prompt = self.get_what_next(
example, choice_b, choice_a, transition, [what_next2], boost_examples, manifest, overwrite_manifest
)
if answer != answer2:
answer = ''
all_prompts.append(what_next_prompt)
else:
icl_str = ""
for s_ind, s_row in boost_examples[0].iterrows():
s_text = s_row['inputs_pretokenized'].replace("Pick the more likely continuation to the following sentence:", "").strip("\n")
s_parts = s_text.split(". ")
s_sentence = s_parts[0]
s_transition = s_parts[1]
options = [l for l in s_text.split("\n") if l.startswith("- ")]
if "as a consequence" in s_transition:
s_text = f"{s_sentence} so"
elif "as a result of" in s_transition:
s_text = f"{s_sentence} because"
s_gold = s_row['targets_pretokenized'].lower()
icl_str += f"Context: {s_text} {s_gold}\n\n"
text = row['inputs_pretokenized']
parts = text.split(". ")
sentence = parts[0]
transition = parts[1]
options = [l.lower() for l in text.split("\n") if l.startswith("- ")]
if "as a consequence" in transition:
text = f"{sentence} so"
elif "as a result of" in transition:
text = f"{sentence} because"
text = text.replace("Pick the more likely continuation to the following sentence:", "").strip("\n")
gold = row['targets_pretokenized']
prompt = f"Pick the more likely continuation to the following sentence.\n\n\n{icl_str}Context: {{text:}}"
if i == 0:
print(prompt.format(text=text))
all_prompts.append(prompt)
raw_answer, _ = get_response(
prompt.format(text=text),
manifest,
gold_choices=[options[0].replace("- ", "").strip(), options[1].replace("- ", "").strip()],
overwrite=bool(overwrite_manifest),
max_toks=50,
)
answer = raw_answer.strip().lower()
answer = answer.split("\n")
answer = [a for a in answer if a]
if answer:
answer = answer[0].replace("-", "").strip()
else:
answer = ''
pred = "".join([a for a in answer if a not in [".", ",", "?", ";", ":", "'", '"']]).lower()
gold = "".join([a for a in gold if a not in [".", ",", "?", ";", ":", "'", '"']]).lower()
prompts_across_boost.append(all_prompts)
preds_across_boost.append(pred)
preds_across_boost.reverse()
mapped_p = []
for p in preds_across_boost:
if not p:
mapped_p.append("")
continue
if p == gold:
mapped_p.append(gold_idx)
elif gold_idx == "1":
mapped_p.append("2")
else:
mapped_p.append("1")
all_boost_preds.append(mapped_p)
entry = {
"ind": ind,
"example": text,
"prompts": prompts_across_boost,
"preds_boost": mapped_p,
"gold": gold_idx,
}
expt_log[ind] = entry
labels.append(gold_idx)
return expt_log, all_boost_preds, labels
def main():
args = get_args()
args.num_boost = 5
task_name = "super_glue_copa"
data_dir = f"{DATA_DIR}/P3/data_feather/super_glue_copa_more_likely/"
decomp = COPADecomp(task_name, data_dir)
decomp.run(args)
if __name__ == "__main__":
main()
| ama_prompting-main | tasks/COPA_final.py |
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import numpy as np
from pathlib import Path
from tqdm.auto import tqdm
import random
from datasets import load_dataset
from sklearn.metrics import classification_report
from decomposition import Decomposition, get_args, DATA_DIR
from utils import get_response, InputOutputPrompt
##############################################################################################################################
# All prompts
summarize = InputOutputPrompt(
input_formatter=lambda x: f"Product: {x['product']}",
output_formatter=lambda x: f"Summarize: the product \"Product\": {x['summarize']}",
required_keys=["product", "summarize"],
input_output_sep="\n",
example_sep="\n\n",
instruction="Summarize the product.\n\n"
)
summarize_examples = [
pd.DataFrame([
{
"product": "Was unsure when I purchased the DVD what to expect. With real joy I can say that it was worth every cent and I have already watched it several times. The Storyline kept me interested.",
"summarize": "The product is a DVD."
},
{
"product": "These are the best headphones I've ever owned. I recently purchased a replacement pair, as my original set died after several years of intensive use.",
"summarize": "The product is headphones."
},
{
"product": "So these tights are tighter than most tights I own and when I take these off, they leave my legs feeling like they've been squeezed to death.",
"summarize": "The product is tights."
}
]),
pd.DataFrame([
{
"product": "This bra is extremely comfortable, affordable and pretty too! My only complaint, and the reason for 4 stars is that the straps can't be adjusted very much.",
"summarize": "The product is a bra."
},
{
"product": "1/8/10 Have been using this drill and am very pleased. It has tons of torque and the handle comes in handy.",
"summarize": "The product is a drill."
},
{
"product": "I have used the Sanford highlighters for close to 20 years. there are nice. They are almost a disaster when highlighting textbooks.",
"summarize": "The product is a highlighter."
}
]),
pd.DataFrame([
{
"product": "bought a pack of these at a b&m; store, and you'd think pens are pens... especially if you paid a measly $2 for a 12 pack. But negative. These pens I bought were dry.",
"summarize": "The product is a pen."
},
{
"product": "I get a lot of grease on my guitar from lotion, sweat, fingerprints, dust, what have you; I take some of this, spray it on a cloth, give it some elbow grease, and my guitars are shiny as the day it was made.",
"summarize": "The product is a guitar."
},
{
"product": "I purchased this sander nearly one year ago and can't say I have any complaints about it. The dust collection works surprisingly well, though if the bag isn't pushed in all",
"summarize": "The product is a sander."
}
]),
pd.DataFrame([
{
"product": "I have 7 guitars in my cramped little bedroom studio and I quickly ran out of space to hold them easily. Floor space is dominated by my desk and drum set and I wanted my guitars to be out of the way and safe so they didn't get tripped over or dinged.",
"summarize": "The product is guitars."
},
{
"product": "This is a beautifully constructed book. The circus atmosphere is rich and detailed, and it's redolent of its time period. The images are strong and the pace, while not fast, is stately -- perhaps the way an elephant moves??",
"summarize": "The product is a book."
},
{
"product": "I was looking for decent Levi's for a few years and Amazon had them!!! I wanted the stiff unwashed jeans because they last quite a few years.",
"summarize": "The product is jeans."
}
]),
pd.DataFrame([
{
"product": "I get a lot of grease on my guitar from lotion, sweat, fingerprints, dust, what have you; I take some of this, spray it on a cloth, give it some elbow grease, and my guitars are shiny as the day it was made.",
"summarize": "The product is a guitar."
},
{
"product": "This bra is extremely comfortable, affordable and pretty too! My only complaint, and the reason for 4 stars is that the straps can't be adjusted very much.",
"summarize": "The product is a bra."
},
{
"product": "The parts of this book that dealt with the main character in old age, were very insightful and I enjoyed that. But quite honestly had I known that it would detail the abuse of the circus animals and then also the detailed sex acts I would never had purchased this.",
"summarize": "The product is a book."
}
])
]
categorize = InputOutputPrompt(
input_formatter=lambda x: f"Product: {x['product']}\nSummary: {x['summary']}",
output_formatter=lambda x: f"The summary \"Summary\" fits \"Category\": {x['category']}",
required_keys=["product", "summary", "category"],
input_output_sep="\n",
example_sep="\n\n",
instruction="Pick the correct category for the product.\n\n\"Categories\":\n- Amazon Instant Video\n- Books\n- Clothing Shoes and Jewelry\n- Electronics\n- Kindle Store\n- Movies and TV\n- Musical Instruments\n- Office Products\n- Tools and Home Improvement\n\n"
)
categorize_examples = [
pd.DataFrame([
{
"product": "Was unsure when I purchased the DVD what to expect. With real joy I can say that it was worth every cent and I have already watched it several times. The Storyline kept me interested.",
"summary": "The product is a DVD.",
"category": "Amazon Instant Video"
},
{
"product": "These are the best headphones I've ever owned. I recently purchased a replacement pair, as my original set died after several years of intensive use.",
"summary": "The product is headphones.",
"category": "Electronics"
},
{
"product": "So these tights are tighter than most tights I own and when I take these off, they leave my legs feeling like they've been squeezed to death.",
"summary": "The product is tights.",
"category": "Clothing Shoes and Jewelry"
}
]),
pd.DataFrame([
{
"product": "This bra is extremely comfortable, affordable and pretty too! My only complaint, and the reason for 4 stars is that the straps can't be adjusted very much. ",
"summary": "The product is a bra.",
"category": "Clothing Shoes and Jewelry"
},
{
"product": "1/8/10 Have been using this drill and am very pleased. It has tons of torque and the handle comes in handy. ",
"summary": "The product is a drill.",
"category": "Tools and Home Improvement"
},
{
"product": "I have used the Sanford highlighters for close to 20 years. there are nice. They are almost a disaster when highlighting textbooks. ",
"summary": "The product is a highlighter.",
"category": "Office Products"
}
]),
pd.DataFrame([
{
"product": "bought a pack of these at a b&m; store, and you'd think pens are pens... especially if you paid a measly $2 for a 12 pack. But negative. These pens I bought were dry.",
"summary": "The product is a pen.",
"category": "Office Products"
},
{
"product": "I get a lot of grease on my guitar from lotion, sweat, fingerprints, dust, what have you; I take some of this, spray it on a cloth, give it some elbow grease, and my guitars are shiny as the day it was made. ",
"summary": "The product is a guitar.",
"category": "Musical Instruments"
},
{
"product": "I purchased this sander nearly one year ago and can't say I have any complaints about it. The dust collection works surprisingly well, though if the bag isn't pushed in all",
"summary": "The product is a sander.",
"category": "Tools and Home Improvement"
}
]),
pd.DataFrame([
{
"product": "I have 7 guitars in my cramped little bedroom studio and I quickly ran out of space to hold them easily. Floor space is dominated by my desk and drum set and I wanted my guitars to be out of the way and safe so they didn't get tripped over or dinged.",
"summary": "The product is guitars.",
"category": "Musical Instruments"
},
{
"product": "This is a beautifully constructed book. The circus atmosphere is rich and detailed, and it's redolent of its time period. The images are strong and the pace, while not fast, is stately -- perhaps the way an elephant moves??",
"summary": "The product is a book.",
"category": 'Books',
},
{
"product": "I was looking for decent Levi's for a few years and Amazon had them!!! I wanted the stiff unwashed jeans because they last quite a few years.",
"summary": 'The product is jeans',
"category": 'Clothing Shoes and Jewelry',
}
]),
pd.DataFrame([
{
"product": "I get a lot of grease on my guitar from lotion, sweat, fingerprints, dust, what have you; I take some of this, spray it on a cloth, give it some elbow grease, and my guitars are shiny as the day it was made.",
"summary": "The product is a guitar.",
"category": "Musical Instruments"
},
{
"product": "The parts of this book that dealt with the main character in old age, were very insightful and I enjoyed that. But quite honestly had I known that it would detail the abuse of the circus animals and then also the detailed sex acts I would never had purchased this.",
"summary": 'The product is a book.',
"category": 'Books',
},
{
"product": "This bra is extremely comfortable, affordable and pretty too! My only complaint, and the reason for 4 stars is that the straps can't be adjusted very much.",
"summary": "The product is a bra.",
"category": "Clothing Shoes and Jewelry"
}
])
]
description_zeroshot="""
Pick the correct category for the product.
Categories:
- Amazon Instant Video
- Books
- Clothing Shoes and Jewelry
- Electronics
- Kindle Store
- Movies and TV
- Musical Instruments
- Office Products
- Tools and Home Improvement"""
class AmazonProduct(Decomposition):
def __init__(self, task_name, data_dir, val_split="validation"):
super().__init__(task_name, data_dir, val_split)
def get_boost_decomp_examples(self, train_data, boost_id):
return [
summarize_examples[boost_id],
categorize_examples[boost_id],
]
def read_data(self, save_dir, overwrite_data):
random.seed(0)
np.random.seed(0)
save_data = Path(f"{save_dir}/{self.task_name}/data.feather")
dataset = load_dataset('amazon')
all_data = dataset['test']
all_data = pd.DataFrame(all_data).sample(frac=1, random_state=0)
if not save_data.exists() or overwrite_data:
test_data = all_data.iloc[:int(len(all_data)*0.9)]
test_data.reset_index().to_feather(f"{save_data}")
else:
print(f"Reading test data from {save_data}")
test_data = pd.read_feather(save_data)
save_data = Path(f"{save_dir}/{self.task_name}/train_data.feather")
if not save_data.exists() or overwrite_data:
train_data = all_data.iloc[int(len(all_data)*0.9):]
train_data.reset_index().to_feather(f"{save_data}")
else:
print(f"Reading train data from {save_data}")
train_data = pd.read_feather(save_data)
print(f"Test Data Size: {len(test_data)}")
print(f"Train Data Size: {len(train_data)}")
return test_data, train_data
def get_few_shot_examples(self, train_data, k_shot):
"""Get few shot examples"""
labels = ['Clothing_Shoes_and_Jewelry', 'Tools_and_Home_Improvement', 'Office_Products', 'Amazon_Instant_Video', 'Musical_Instruments', 'Books', 'Electronics', 'Kindle_Store']
num_per_class = int(np.ceil(k_shot / len(labels)))
print(f"Selecting {num_per_class} examples per class.")
dfs = []
total_in_context = 0
for label in labels:
while num_per_class + total_in_context > k_shot:
num_per_class -= 1
sub_df = train_data[train_data["label"] == label].sample(
num_per_class
)
dfs.append(sub_df)
total_in_context += num_per_class
if total_in_context == k_shot:
break
mini_df = pd.concat(dfs)
return mini_df
def zero_few_baseline(
self,
test_data,
few_shot_df,
manifest,
overwrite_manifest,
do_few_shot=True,
):
expt_log = {}
preds = []
labels = []
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
text = row['text']
gold = row['label']
gold = gold.replace("_", " ").strip().replace(",", "")
icl_str = f"{description_zeroshot}"
if do_few_shot:
for s_ind, s_row in few_shot_df.iterrows():
s_label = s_row['label'].replace("_", " ")
icl_str += f"\n\nProduct: {s_row['text']}\nCategory: {s_label}"
prompt = f"{icl_str}\n\nProduct: {{text:}}\nCategory:"
pmp = prompt.format(text=text)
if i == 0:
print(pmp)
answer = get_response(
pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=10,
stop_token="\n\n",
)
answer = answer.split("\n")
answer = [a for a in answer if a]
pred = ''
if answer:
pred = answer[0]
pred = pred.replace("-", "").strip().replace(",", "")
entry = {
"ind": ind,
"example": text,
"base_prompt": pmp,
"pred": pred,
"gold": gold,
}
expt_log[ind] = entry
preds.append(pred)
labels.append(gold)
report = classification_report(labels, preds, output_dict=True)
return expt_log, report["accuracy"]
def run_decomposed_prompt(
self, test_data, boost_data_train, boost_dfs, manifest, overwrite_manifest
):
expt_log, all_boost_preds, labels = self._run_decomp_single_data(test_data, boost_dfs, manifest, overwrite_manifest, run_limit=-1)
expt_log_train, all_boost_train_preds, train_labels = self._run_decomp_single_data(boost_data_train, boost_dfs, manifest, overwrite_manifest, run_limit=-1)
# Do WS
preds = self.merge_boosted_preds(all_boost_preds, all_boost_train_preds, train_labels, expt_log, expt_log_train)
# Get accuracies across all boost sets
individual_accuracies = []
for i in range(len(all_boost_preds[0])):
individual_accuracies.append(classification_report(labels, [p[i] for p in all_boost_preds], output_dict=True)["accuracy"])
report = classification_report(labels, preds, output_dict=True)
return expt_log, expt_log_train, report["accuracy"], individual_accuracies
def _run_decomp_single_data(self, test_data, boost_dfs, manifest, overwrite_manifest, run_limit=-1):
expt_log = {}
all_boost_preds = []
labels = []
labels_clean = [l.replace("_", " ") for l in set(test_data['label'])]
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
text = row['text']
gold = row['label']
if i == run_limit:
break
prompts_across_boost = []
preds_across_boost = []
for boost_examples in boost_dfs:
all_prompts = []
prompt_suffix = summarize(boost_examples[0])
summary_prompt = f"{prompt_suffix}\n\nProduct: {{text:}}\nSummarize: the product \"Product\":"
summary_pmp = summary_prompt.format(text=text)
output = get_response(
summary_pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=25,
)
summary = output.split("\n")[0].split(":")[-1].strip("\n")
all_prompts.append(summary_pmp)
prompt_suffix = categorize(boost_examples[1])
category_prompt = f"{prompt_suffix}\n\nProduct: {{text:}}\nSummary: {{summary:}}\nThe summary \"Summary\" fits \"Category\":"
category_pmp = category_prompt.format(text=text, summary=summary)
output = get_response(
category_pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=10,
)
all_prompts.append(category_pmp)
if i == 0:
print(summary_pmp)
print(category_pmp)
answer = output.split("\n")[0].strip().lower()
answer = answer.replace("-", "").strip()
gold = gold.replace("_", " ").strip().lower()
pred = answer
for label in labels_clean:
if label.lower() in answer.lower():
pred = label.lower()
break
prompts_across_boost.append(all_prompts)
preds_across_boost.append(pred)
entry = {
"ind": ind,
"example": text,
"prompts": prompts_across_boost,
"preds_boost": preds_across_boost,
"gold": gold,
}
expt_log[i] = entry
all_boost_preds.append(preds_across_boost)
labels.append(gold)
return expt_log, all_boost_preds, labels
def main():
args = get_args()
args.num_boost = 3
task_name = "amazon_products"
data_dir = (
f"{DATA_DIR}/amazon_products/"
)
if not Path(data_dir).exists():
raise ValueError(
f"Data directory {data_dir} does not exist."
"Download from https://github.com/allenai/flex/blob/75d6d1cea66df2c8a7e3d429c6af5008ccf1544b/fewshot/hf_datasets_scripts/amazon/amazon.py"
)
decomp = AmazonProduct(task_name, data_dir, val_split="test")
decomp.run(args)
if __name__ == "__main__":
main()
| ama_prompting-main | tasks/Amazon_final.py |
#!/usr/bin/env python
# coding: utf-8
from collections import defaultdict
from pathlib import Path
import pandas as pd
import numpy as np
from tqdm.auto import tqdm
from decomposition import Decomposition, get_args, DATA_DIR
from utils import get_response, InputOutputPrompt
answer_prompt = InputOutputPrompt(
input_formatter=lambda x: f"Passage: {x['passage']}\nQuestion: {x['question']}\n{x['choice_question']}",
output_formatter=lambda x: f"Answer: {x['answer']}",
required_keys=["passage", "question", "choice_question", "answer"],
input_output_sep="\n",
example_sep="\n\n",
instruction="Answer if the possible answer is a correct answer to the question.\n\n"
)
answer_prompt_examples = [
pd.DataFrame([
{
"passage": "Sara wanted to play on a baseball team. She had never tried to swing a bat and hit a baseball before. Her Dad gave her a bat and together they went to the park to practice. Sara wondered if she could hit a ball. She wasn't sure if she would be any good. She really wanted to play on a team and wear a real uniform. She couldn't wait to get to the park and test out her bat. When Sara and her Dad reached the park, Sara grabbed the bat and stood a few steps away from her Dad. Sara waited as her Dad pitched the ball to her. Her heart was beating fast. She missed the first few pitches. She felt like quitting but kept trying. Soon she was hitting the ball very far. She was very happy and she couldn't wait to sign up for a real team. Her Dad was very proud of her for not giving up. ",
"question": "Based on the previous passage, Who pitched the ball to Sara and where did it occur? ",
"choice_question": "Is \"Her dad did in the park\" a correct answer?",
"answer": "yes",
},
{
"passage": "The Vice President stated that he called the President to discuss the rules of engagement for the CAP. He recalled feeling that it did no good to establish the CAP unless the pilots had instructions on whether they were authorized to shoot if the plane would not divert. He said the President signed off on that concept. The President said he remembered such a conversation, and that it reminded him of when he had been an interceptor pilot. The President emphasized to us that he had authorized the shootdown of hijacked aircraft. The Vice President's military aide told us he believed the Vice President spoke to the President just after entering the conference room, but he did not hear what they said. Rice, who entered the room shortly after the Vice President and sat next to him, remembered hearing him inform the President, \"Sir, the CAPs are up. Sir, they're going to want to know what to do.\" Then she recalled hearing him say, \"Yes sir.\" She believed this conversation occurred a few minutes, perhaps five, after they entered the conference room. We believe this call would have taken place sometime before 10:10 to 10:15. Among the sources that reflect other important events of that morning, there is no documentary evidence for this call, but the relevant sources are incomplete. Others nearby who were taking notes, such as the Vice President's chief of staff, Scooter Libby, who sat next to him, and Mrs. Cheney, did not note a call between the President and Vice President immediately after the Vice President entered the conference room. At 10:02, the communicators in the shelter began receiving reports from the Secret Service of an inbound aircraft-presumably hijacked-heading toward Washington. That aircraft was United 93. The Secret Service was getting this information directly from the FAA. The FAA may have been tracking the progress of United 93 on a display that showed its projected path to Washington, not its actual radar return. Thus, the Secret Service was relying on projections and was not aware the plane...",
"question": "Based on the previous passage, Why was the Secret Service's information about United 93 flawed? ",
"choice_question": "Is \"The Secret Service Didn't have access to FAA information\" a correct answer?",
"answer": "no",
},
{
"passage": "Patricia Cross and her boyfriend Larry Osborne , two students in a San Francisco school , become expelled for the publication of an off-campus underground paper . As a result , a philosophy professor , Dr. Jonathon Barnett , resigns his teaching position and decides to become an advocate for the counterculture youth movement and , specifically , the use of LSD . The hippies of the Haight-Ashbury district first see him as a hero and then as something even more . Dr. Barnett even makes an appearance on the Joe Pyne TV show to voice his support of the hippie community and the use of LSD . One scheming young man sees the opportunity to build Dr. Barnett as the head of a cult centered around the use of LSD . He hopes to earn profit from the users , Dr. Barnett's speeches known as `` happenings , '' and their lifestyles . At a massive LSD-fueled dance , Patricia begins to have a bad trip Which leads to an argument between her and Pat , ultimately splitting the couple up . After Patricia realizes that she's pregnant , Dr. Barnett advises her to have an abortion , ultimately leading to Patricia attempting suicide . However , Larry saves her and makes the destruction of Dr. Barnett's cult his primary objective . Larry shoots Dr. Barnett from the crowd at one of his massive speeches . As another hippie in attendance calms the audience and Elliot sees his new leader for their cult-like organization , Larry realizes that his assassination of Dr. Barnett simply made him a martyr for the hippie movement . ",
"question": "Based on the previous passage, Why did Dr. Barnett resign from teaching? ",
"choice_question": "Is \"Patricia expulsion\" a correct answer?",
"answer": "yes",
},
{
"passage": "I wondered if that were my case--if I rode out for honour, and not for the pure pleasure of the riding. And I marvelled more to see the two of us, both lovers of one lady and eager rivals, burying for the nonce our feuds, and with the same hope serving the same cause. We slept the night at Aird's store, and early the next morning found Ringan. A new Ringan indeed, as unlike the buccaneer I knew as he was unlike the Quaker. He was now the gentleman of Breadalbane, dressed for the part with all the care of an exquisite. He rode a noble roan, in his Spanish belt were stuck silver-hafted pistols, and a long sword swung at his side. When I presented Grey to him, he became at once the cavalier, as precise in his speech and polite in his deportment as any Whitehall courtier. They talked high and disposedly of genteel matters, and you would have thought that that red-haired pirate had lived his life among proud lords and high-heeled ladies. That is ever the way of the Highlander. He alters like a clear pool to every mood of the sky, so that the shallow observer might forget how deep the waters are. ",
"question": "Based on the previous passage, Who is described as both buccaneer and cavalier? ",
"choice_question": "Is \"Quaker\" a correct answer?",
"answer": "no"
}
]),
pd.DataFrame([
{
"passage": "Earth processes have not changed over time. The way things happen now is the same way things happened in the past. Mountains grow and mountains slowly wear away. The same process is at work the same as it was billions of years ago. As the environment changes, living creatures adapt. They change over time. Some organisms may not be able to adapt. They become extinct. Becoming extinct means they die out completely. Some geologists study the history of the Earth. They want to learn about Earths past. They use clues from rocks and fossils. They use these clues to make sense of events. The goal is to place things in the order they happened. They also want to know how long it took for those events to happen. ",
"question": "Based on the previous passage, Who studies in order to learn about the earth's past? ",
"choice_question": "Is \"Librarians\" a correct answer?",
"answer": "no",
},
{
"passage": "Sam Farragut is a sociopathic business executive in Southern California who forces a team of advertising agency employees to embark on a dangerous dirtbike trip to the Baja California desert in order to compete for his business . The men are Warren Summerfield , a suicidal middle-aged ad executive who has been fired from the agency ; the straightlaced Paul McIlvain who is inattentive to his wife , and brash art designer Maxon who feels suddenly trapped after his girlfriend announces she is pregnant . There are numerous long sequences of motorcycle riding on desert backroads . Summerfield has been having an affair with McIlvian's wife . He has not told his wife that he was fired and is simply serving out his tenure at the agency while looking for a new position . His wife is actually aware of the affair . Farragut convinces the ad men to make the motorcycle journey on the pretext of looking for a location to shoot a commercial . In reality , Farragut is reckless and looking to involve the men in spontaneous edgy adventure of his own manipulation . After they leave , McIlvain's wife suspects that Summerfield is planning to kill himself for the insurance money , but she can not convince Summerfield's wife to instigate a search . The four men travel deeper into Mexico on isolated dirt roads . At one point Summerfield contemplates plunging off a cliff . After being humiliated by a young American couple in a Baja bar , Farragut tracks them down on the beach while accompanied by Maxon . ",
"question": "Based on the previous passage, Under what pretext does a sociopathic company executive organize the motorcycle trip? ",
"choice_question": "Is \"Because he wants to compete for his business, so he suggest looking for a place to shoot a commercial\" a correct answer?",
"answer": "yes",
},
{
"passage": "The mighty fane, with its three massive towers, rises majestically over the red roofs of the town. Its most striking feature is the great Norman screen, running up without buttresses or projections to the parapet and hiding the bases of the square, richly decorated towers of the west front. The plain centre of the screen is the work of Remigius, the first bishop. The rest of it is relieved with rich arcading of Late Norman and Early English periods. The wooden spires which crowned the towers were removed in 1807. In 1192 Hugh of Avalon determined to rebuild the Norman building of Remigius, which an earthquake had shaken. To him we owe the choir and eastern transept. His successors completed the western transept and began the west end of the nave. So much money had to be spent in rebuilding the central tower, which fell in 1239, that the canons could not rebuild the nave entirely, but had to incorporate the Norman end by Remigius. Unfortunately the axis of the west front does not correspond to that of the nave, which is too wide for its height. The low vaulting is a serious defect in the choir built by St. Hugh, but of the superb beauty of the Angel Choir, which encloses his shrine, there can be no doubt. In its richness of sculpture it is one of the masterpieces of Gothic architecture in England. The interior of the cathedral is remarkable for the harmony of its style, which is Lancet-Gothic, and the dim lighting of the nave only adds to its impressiveness. ",
"question": "Based on the previous passage, Who was responsible for initially building the choir and eastern transept and in what year did he start? ",
"choice_question": "Is \"It wasn't the Hugh of Avalon\" a correct answer?",
"answer": "no",
},
{
"passage": "If you beat a dog in Schuylkill County, you'll probably get a $100 fine. If you repeatedly beat a woman, you'll probably get the same fine. In 2001, county judges heard 98 Protection From Abuse cases, finding the defendant guilty in 48 percent of those cases, either after a hearing or through a technical violation or plea. Of those found guilty, the majority were ordered to pay court costs, plus a $100 fine. No defendants were ordered to pay more than a $250 fine for violating the court order. In 27 percent of the cases, the charges were dismissed or the defendant was found not guilty. In the rest of the cases, charges were withdrawn or the matter is not yet resolved. Sarah T. Casey, executive director of Schuylkill Women in Crisis, finds it disturbing that in most cases, the fine for violating a PFA is little more than the fine someone would get for cruelty and abuse toward an animal. \"In most of the counties surrounding Schuylkill County, the penalties given for indirect criminal contempt are much stiffer than those in Schuylkill County,\" Casey said. \"What kind of message are we sending those who repeatedly violate Protection From Abuse orders? That it's OK to abuse women in Schuylkill County, because you'll only get a slap on the wrist?\" Under state law, the minimum fine for contempt of a PFA is $100; the maximum fine is $1,000 and up to six months in jail. Like others who are familiar with how the county's legal system does and doesn't work for victims of domestic violence, Casey believes some changes are in order. Valerie West, a manager/attorney with Mid-Penn Legal Services, with offices in Pottsville and Reading, regularly handles domestic violence cases. She finds fault with the local requirement that a custody order must be established within 30 days after a PFA is filed. West said she feels a custody order should be allowed to stand for the full term of the PFA - up to 18 months - as it does in many other counties in the state. \"It places an undue burden on the plaintiff, in terms of cost, finding...",
"question": "Based on the previous passage, What solution is West offering and how is it different for a plaintiff from what is already being practiced? ",
"choice_question": "Is \"West said she feels a custody order should be allowed to stand for the full term of the PFA - up to 18 months - as it does in many other counties in the state\" a correct answer?",
"answer": "yes"
}
]),
pd.DataFrame([
{
"passage": "Fossils give clues about major geological events. Fossils can also give clues about past climates. Fossils of ocean animals on the top of a mountain? Ocean animals have been found on the Earths tallest mountain. Its hard to believe, but it is true. These fossils were found at the top of Mt. Everest. Mt. Everest is the highest mountain on Earth. These fossils showed that this entire area was once at the bottom of a sea. It can only mean that Mt. Everest was uplifted. In fact, the entire Himalaya mountain range was raised. It was forced up from the collision of two continents. Fossils of plants are found in Antarctica. Now, Antarctica is almost completely covered with ice. Plants do not grow in Antarctica. According to fossils, they once did. This means that Antarctica was once much warmer than it is now. These fossils tell us about Antarcticas past climate. ",
"question": "Based on the previous passage, How were the Himalayas \"uplifted\"? ",
"choice_question": "Is \"The collision of two continents\" a correct answer?",
"answer": "yes",
},
{
"passage": "Fossils give clues about major geological events. Fossils can also give clues about past climates. Fossils of ocean animals on the top of a mountain? Ocean animals have been found on the Earths tallest mountain. Its hard to believe, but it is true. These fossils were found at the top of Mt. Everest. Mt. Everest is the highest mountain on Earth. These fossils showed that this entire area was once at the bottom of a sea. It can only mean that Mt. Everest was uplifted. In fact, the entire Himalaya mountain range was raised. It was forced up from the collision of two continents. Fossils of plants are found in Antarctica. Now, Antarctica is almost completely covered with ice. Plants do not grow in Antarctica. According to fossils, they once did. This means that Antarctica was once much warmer than it is now. These fossils tell us about Antarcticas past climate. ",
"question": "Based on the previous passage, How were the Himalayas \"uplifted\"? ",
"choice_question": "Is \"Magnetic forces\" a correct answer?",
"answer": "no",
},
{
"passage": "If you beat a dog in Schuylkill County, you'll probably get a $100 fine. If you repeatedly beat a woman, you'll probably get the same fine. In 2001, county judges heard 98 Protection From Abuse cases, finding the defendant guilty in 48 percent of those cases, either after a hearing or through a technical violation or plea. Of those found guilty, the majority were ordered to pay court costs, plus a $100 fine. No defendants were ordered to pay more than a $250 fine for violating the court order. In 27 percent of the cases, the charges were dismissed or the defendant was found not guilty. In the rest of the cases, charges were withdrawn or the matter is not yet resolved. Sarah T. Casey, executive director of Schuylkill Women in Crisis, finds it disturbing that in most cases, the fine for violating a PFA is little more than the fine someone would get for cruelty and abuse toward an animal. \"In most of the counties surrounding Schuylkill County, the penalties given for indirect criminal contempt are much stiffer than those in Schuylkill County,\" Casey said. \"What kind of message are we sending those who repeatedly violate Protection From Abuse orders? That it's OK to abuse women in Schuylkill County, because you'll only get a slap on the wrist?\" Under state law, the minimum fine for contempt of a PFA is $100; the maximum fine is $1,000 and up to six months in jail. Like others who are familiar with how the county's legal system does and doesn't work for victims of domestic violence, Casey believes some changes are in order. Valerie West, a manager/attorney with Mid-Penn Legal Services, with offices in Pottsville and Reading, regularly handles domestic violence cases. She finds fault with the local requirement that a custody order must be established within 30 days after a PFA is filed. West said she feels a custody order should be allowed to stand for the full term of the PFA - up to 18 months - as it does in many other counties in the state. \"It places an undue burden on the plaintiff, in terms of cost, finding...",
"question": "Based on the previous passage, What solution is West offering and how is it different for a plaintiff from what is already being practiced? ",
"choice_question": "Is \"West said she feels a custody order should be allowed to stand for the full term of the PFA - up to 18 months - as it does in many other counties in the state\" a correct answer?",
"answer": "yes",
},
{
"passage": "Sam Farragut is a sociopathic business executive in Southern California who forces a team of advertising agency employees to embark on a dangerous dirtbike trip to the Baja California desert in order to compete for his business . The men are Warren Summerfield , a suicidal middle-aged ad executive who has been fired from the agency ; the straightlaced Paul McIlvain who is inattentive to his wife , and brash art designer Maxon who feels suddenly trapped after his girlfriend announces she is pregnant . There are numerous long sequences of motorcycle riding on desert backroads . Summerfield has been having an affair with McIlvian's wife . He has not told his wife that he was fired and is simply serving out his tenure at the agency while looking for a new position . His wife is actually aware of the affair . Farragut convinces the ad men to make the motorcycle journey on the pretext of looking for a location to shoot a commercial . In reality , Farragut is reckless and looking to involve the men in spontaneous edgy adventure of his own manipulation . After they leave , McIlvain's wife suspects that Summerfield is planning to kill himself for the insurance money , but she can not convince Summerfield's wife to instigate a search . The four men travel deeper into Mexico on isolated dirt roads . At one point Summerfield contemplates plunging off a cliff . After being humiliated by a young American couple in a Baja bar , Farragut tracks them down on the beach while accompanied by Maxon . ",
"question": "Based on the previous passage, Under what pretext does a sociopathic company executive organize the motorcycle trip? ",
"choice_question": "Is \"For a getaway trip\" a correct answer?",
"answer": "no"
}
]),
]
##############################################################################################################################
def multirc_metric(preds_by_question, golds_by_question):
assert len(preds_by_question) == len(golds_by_question)
agreement_count = 0
correct_count = 0
predict_count = 0
accuracy_count = 0
total_count = 0
for p_id in range(len(preds_by_question)):
predicted_ans = [int(g.lower() == "yes") for g in preds_by_question[p_id]]
gold_ans = [int(g.lower() == "yes") for g in golds_by_question[p_id]]
assert len(predicted_ans) == len(gold_ans)
total_count += len(predicted_ans)
if all([p == g for p, g in zip(predicted_ans, gold_ans)]):
accuracy_count += 1
predict_count += sum(predicted_ans)
correct_count += sum(gold_ans)
agreement_count += sum([a * b for (a, b) in zip(gold_ans, predicted_ans)])
p1 = (1.0 * agreement_count / predict_count) if predict_count > 0.0 else 1.0
r1 = (1.0 * agreement_count / correct_count) if correct_count > 0.0 else 1.0
acc = (1.0 * accuracy_count / total_count) if total_count > 0.0 else 1.0
return {"precision": p1, "recall": r1, "f1a": 2 * r1 * p1 / (p1 + r1), "accuracy": acc}
class MultiRCDecomp(Decomposition):
def __init__(self, task_name, data_dir, val_split="validation"):
super().__init__(task_name, data_dir, val_split)
def transform_df(self, df):
"""Group data by passage for accurate multiRC metrics."""
by_passage_question_df = defaultdict(list)
for _, row in df.iterrows():
passage, question, choice_question = row["inputs_pretokenized"].split("\n")
new_key = passage + "\n" + question
by_passage_question_df[new_key].append({"choice_question": choice_question, "answer": row["targets_pretokenized"]})
for_df = []
for key, d in by_passage_question_df.items():
passage, question = key.split("\n")
for_df.append({"passage": passage, "question": question, "choice_questions": [x["choice_question"] for x in d], "answers": [x["answer"] for x in d]})
return pd.DataFrame(for_df)
def read_data(self, save_dir, overwrite_data):
save_data = Path(f"{save_dir}/{self.task_name}/data.feather")
if not save_data.exists() or overwrite_data:
test_data = self.transform_df(pd.read_feather(f"{self.data_dir}/{self.val_split}.feather"))
train_data = self.transform_df(pd.read_feather(f"{self.data_dir}/train.feather"))
test_data.to_feather(f"{save_data}")
else:
print(f"Reading train data from {save_data}")
train_data = self.transform_df(pd.read_feather(f"{self.data_dir}/train.feather"))
test_data = pd.read_feather(f"{save_data}")
print(f"Test Data Size: {len(test_data)}")
print(f"Train Data Size: {len(train_data)}")
return test_data, train_data
def get_boost_decomp_examples(self, train_data, boost_id):
return [
answer_prompt_examples[boost_id],
]
def zero_few_baseline(
self,
test_data,
few_shot_df,
manifest,
overwrite_manifest,
do_few_shot=True,
):
expt_log = {}
golds = []
preds = []
cum_ind = 0
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
# Task requires multiple questions per passage for scoring
all_q_preds = []
all_q_golds = []
fs_exs = []
if do_few_shot:
cnt = 0
for s_ind, s_row in few_shot_df.iterrows():
passage = s_row["passage"]
question = s_row["question"]
for choice_q, gold in zip(s_row["choice_questions"], s_row["answers"]):
cnt += 1
fs_exs.append({
"passage": passage,
"question": question,
"choice_question": choice_q,
"answer": gold,
})
# Take one question per passage
break
passage = row["passage"]
question = row["question"]
all_prompts = []
for q_ind, (choice_q, gold) in enumerate(zip(row["choice_questions"], row["answers"])):
exs = fs_exs[:]
exs.append({
"passage": passage,
"question": question,
"choice_question": choice_q,
"answer": ""
})
pmp = answer_prompt(pd.DataFrame(exs))
all_prompts.append(pmp)
res = get_response(
pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=8,
stop_token="\n",
)
all_q_golds.append(gold.strip().lower())
all_q_preds.append(res.strip().lower())
# row as dict - convert ndarry to list
row_as_dict = row.to_dict()
for k, v in list(row_as_dict.items()):
if isinstance(v, np.ndarray):
row_as_dict[k] = v.tolist()
entry = {
"ind": cum_ind,
"example_ind": ind,
"question_ind": q_ind,
"example": row_as_dict,
"base_prompt": pmp,
"pred": res.strip().lower(),
"gold": gold.strip().lower(),
}
expt_log[cum_ind] = entry
cum_ind += 1
if i == 0:
print(pmp)
preds.append(all_q_preds)
golds.append(all_q_golds)
report = multirc_metric(preds_by_question=preds, golds_by_question=golds)
return expt_log, report["f1a"]
def run_decomposed_prompt(
self, test_data, boost_data_train, boost_dfs, manifest, overwrite_manifest
):
expt_log, all_boost_preds, labels = self._run_decomp_single_data(test_data, boost_dfs, manifest, overwrite_manifest)
expt_log_train, all_boost_train_preds, train_labels = self._run_decomp_single_data(boost_data_train, boost_dfs, manifest, overwrite_manifest)
# This task has nested labels (i.e. shape # test examples X number sub questions X number boosts)
# Flatten over first two dims for merging and then undo
all_boost_preds_flattened = [pred for pred_q_and_boost in all_boost_preds for pred in pred_q_and_boost]
all_boost_train_preds_flattened = [pred for pred_q_and_boost in all_boost_train_preds for pred in pred_q_and_boost]
train_labels_flattened = [label for label_q in train_labels for label in label_q]
all_boost_preds_flattened = np.array(all_boost_preds_flattened)
all_boost_train_preds_flattened = np.array(all_boost_train_preds_flattened)
# Do WS
preds = self.merge_boosted_preds(all_boost_preds_flattened, all_boost_train_preds_flattened, train_labels_flattened, expt_log, expt_log_train)
preds_unflattened = []
cum_i = 0
for i in range(len(all_boost_preds)):
preds_unflattened.append([preds[cum_i + j] for j in range(len(all_boost_preds[i]))])
cum_i += len(all_boost_preds[i])
# Get accuracies across all boost sets
individual_accuracies = []
for i in range(len(all_boost_preds[0][0])):
individual_accuracies.append(multirc_metric(preds_by_question=[[p[i] for p in pred_set] for pred_set in all_boost_preds], golds_by_question=labels)["f1a"])
metric = multirc_metric(preds_by_question=preds_unflattened, golds_by_question=labels)["f1a"]
return expt_log, expt_log_train, metric, individual_accuracies
def _run_decomp_single_data(
self, test_data, boost_dfs, manifest, overwrite_manifest
):
expt_log = {}
all_boost_preds = []
labels = []
cum_ind = 0
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
all_q_golds = []
all_q_preds = []
passage = row["passage"]
question = row["question"]
# Add prompt before the inner gold choice loop
for q_ind, (choice_q, gold) in enumerate(zip(row["choice_questions"], row["answers"])):
all_q_golds.append(gold.strip().lower())
prompts_across_boost = []
preds_across_boost = []
for boost_examples in boost_dfs:
prompt_suffix = answer_prompt(boost_examples[0])
pmp = f"{prompt_suffix}\n\nPassage: {passage}\nQuestion: {question}\n{choice_q}\nAnswer:"
# Single list pmp for one step decomp
prompts_across_boost.append([pmp])
res = get_response(
pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=8,
)
preds_across_boost.append(res.split("\n\n")[0].strip().lower())
all_q_preds.append(preds_across_boost)
# row as dict - convert ndarry to list
row_as_dict = row.to_dict()
for k, v in list(row_as_dict.items()):
if isinstance(v, np.ndarray):
row_as_dict[k] = v.tolist()
expt_log[cum_ind] = {
"ind": cum_ind,
"example_ind": ind,
"question_ind": q_ind,
"preds_boost": preds_across_boost,
"prompts": prompts_across_boost,
"example": row_as_dict,
"gold": gold.strip().lower(),
}
cum_ind += 1
if i == 0:
for pmp_set in prompts_across_boost:
print("\n".join(pmp_set))
all_boost_preds.append(all_q_preds)
labels.append(all_q_golds)
return expt_log, all_boost_preds, labels
def main():
args = get_args()
task_name = "multirc"
data_dir = f"{DATA_DIR}/P3/data_feather/super_glue_multirc_is_a_correct_answer_"
decomp = MultiRCDecomp(task_name, data_dir, val_split="validation")
decomp.run(args)
if __name__ == "__main__":
main()
| ama_prompting-main | tasks/MultiRC_final.py |
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import numpy as np
from tqdm.auto import tqdm
import random
from sklearn.metrics import classification_report
from decomposition import Decomposition, get_args, DATA_DIR
from utils import get_response, InputOutputPrompt
##############################################################################################################################
# All prompts
questioner_prompt = InputOutputPrompt(
input_formatter=lambda x: f"Statement: {x['statement']}",
output_formatter=lambda x: f"Question: {x['question']}",
required_keys=["question", "statement"],
input_output_sep="\n",
example_sep="\n\n",
instruction="Rewrite the statement as a yes/no question.\n\n"
)
questioner_prompt_examples = [
pd.DataFrame([
{
"statement": "most of the light comes from the sun",
"question": "Does most of the light come from the sun?"
},
{
"statement": "the test was not hard",
"question": "Was the test not hard?",
},
{
"statement": "it is a good idea to buy your parents gifts",
"question": "Is it a good idea to buy your parents gifts?",
},
{
"statement": "the balloon popped",
"question": "Did the balloon pop?",
},
{
"statement": "The father and son went camping to California.",
"question": "Did the father and son go camping?",
},
]),
pd.DataFrame([
{
"statement": "most of the light comes from the sun",
"question": "Does most of the light come from the sun?"
},
{
"statement": "the test was not",
"question": "Was the test not hard?",
},
{
"statement": "it is a good idea to buy your parents gifts",
"question": "Is it a good idea to buy your parents gifts?",
},
{
"statement": "the balloon popped",
"question": "Did the balloon pop?",
},
{
"statement": "The father and son went camping to California.",
"question": "Did the father and son go camping?",
},
]),
pd.DataFrame([
{
"statement": "most of the light comes from the sun",
"question": "Does most of the light come from the sun?"
},
{
"statement": "the test was not hard",
"question": "Was the test not hard?",
},
{
"statement": "it is a good idea to buy your parents gifts",
"question": "Is it a good idea to buy your parents gifts?",
},
{
"statement": "the balloon popped",
"question": "Did the balloon pop?",
},
{
"statement": "The father and son went camping to California.",
"question": "Did the father and son go camping?",
},
]),
pd.DataFrame([
{
"statement": "most of the light comes from the sun",
"question": "Does most of the light come from the sun?"
},
{
"statement": "the test was not hard",
"question": "Was the test not hard?",
},
{
"statement": "it is a good idea to buy your parents gifts",
"question": "Is it a good idea to buy your parents gifts?",
},
{
"statement": "the balloon popped",
"question": "Did the balloon pop?",
},
{
"statement": "The father and son went camping to California.",
"question": "Did the father and son go camping?",
},
]),
pd.DataFrame([
{
"statement": "most of the light comes from the sun",
"question": "Does most of the light come from the sun?"
},
{
"statement": "the test was not hard",
"question": "Was the test not hard?",
},
{
"statement": "it is a good idea to buy your parents gifts",
"question": "Is it a good idea to buy your parents gifts?",
},
{
"statement": "the balloon popped",
"question": "Did the balloon pop?",
},
{
"statement": "The father and son went camping to California.",
"question": "Did the father and son go camping?",
},
]),
]
extraction_qa = InputOutputPrompt(
input_formatter=lambda x: f"Context: {x['context']}\nQuestion: {x['question']}",
output_formatter=lambda x: f"Answer: {x['answer']}",
required_keys=["context", "question", "answer"],
input_output_sep="\n",
example_sep="\n\n",
instruction="Answer the question. If there is no evidence in the context, return \"Unknown\".\n\n"
)
extraction_qa_examples = [
pd.DataFrame([
{
"context": "According to Biraben, the plague was present somewhere in Italy and affected 1,200 people.",
"question": "Based on the context, Did the plague affect people in Europe?",
"answer": "yes, people in Italy, Europe",
},
{
"context": "Policies aiming at controlling unemployment and in particular at reducing its inequality-associated effects support economic growth.",
"question": "Based on the context, Is confidence a factor in increasing self-esteem?",
"answer": "unknown",
},
{
"context": "The term \"matter\" is used throughout physics in a bewildering variety of contexts: for example, one refers to \"condensed matter physics\", \"elementary matter\", \"partonic\" matter, \"dark\" matter, \"anti\"-matter, \"strange\" matter, and \"nuclear\" matter.",
"question": "Based on the context, Is anti-matter made of electrons? ",
"answer": "Unknown",
},
]),
pd.DataFrame([
{
"context": "According to Biraben, the plague was present somewhere in Italy only between 1346 and 1671, and not after that.",
"question": "Based on the context, Was the plague present in Italy during the 2000s?",
"answer": "No, it was present between 1346 and 1671"
},
{
"context": "The term \"matter\" is used throughout physics in a bewildering variety of contexts: for example, one refers to \"condensed matter physics\", \"elementary matter\", \"partonic\" matter, \"dark\" matter, \"anti\"-matter, \"strange\" matter, and \"nuclear\" matter.",
"question": "Based on the context, Is anti-matter made of electrons? ",
"answer": "Unknown"
},
{
"context": "Policies aiming at controlling unemployment and in particular at reducing its inequality-associated effects support economic growth.",
"question": "Based on the context, Is confidence a factor in increasing self-esteem?",
"answer": "unknown"
}
]),
pd.DataFrame([
{
"context": "Jenna's 10th birthday was yesterday evening and at least 10 of her friends attended the party.",
"question": "Based on the context, Did 10 friends attend Jenna's party?",
"answer": "Unknown"
},
{
"context": "The bullies attacked John when he was walking through the elementary school parking lot and then got sent to the teacher's office.",
"question": "Based on the context, Did the bullies attack John in the teacher's office?",
"answer": "No, parking lot"
},
{
"context": "WISS discovered a new monkey disease occurring in a remote tribe in the Amazon rainforrest.",
"question": "Based on the context, Did WISS discover a new monkey species?",
"answer": "No, a new monkey disease"
}
]),
pd.DataFrame([
{
"context": "When Judy and Jack went to school, they got in trouble with their teacher for being late. I didn't think it was very fair.",
"question": "Based on the context, Did she think it was fair?",
"answer": "No"
},
{
"context": "If inflation is occurring, leading to higher prices for basic necessities such as gas by 2 dollars. Do you think that inflation is good for society?",
"question": "Based on the context, Is inflation good for society?",
"answer": "Unknown"
},
{
"context": "Put yourself out there. The more time you spend dating and socializing, the more likely you will find a boyfriend you like.",
"question": "Based on the context, Does socializing help you find a boyfriend?",
"answer": "Yes"
},
{
"context": "According to Biraben, the plague was present somewhere in Italy and affected 1,200 people.",
"question": "Based on the context, Did the plague affect people in Europe?",
"answer": "yes, people in Italy, Europe",
},
{
"context": "Policies aiming at controlling unemployment and in particular at reducing its inequality-associated effects support economic growth.",
"question": "Based on the context, Is confidence a factor in increasing self-esteem?",
"answer": "unknown",
},
{
"context": "The term \"matter\" is used throughout physics in a bewildering variety of contexts: for example, one refers to \"condensed matter physics\", \"elementary matter\", \"partonic\" matter, \"dark\" matter, \"anti\"-matter, \"strange\" matter, and \"nuclear\" matter.",
"question": "Based on the context, Is anti-matter made of electrons? ",
"answer": "Unknown",
},
]),
pd.DataFrame([
{
"context": "According to Biraben, the plague was present somewhere in Italy and affected 1,200 people.",
"question": "Based on the context, Did the plague affect over 1,000 people?",
"answer": "yes, 1,200 people",
},
{
"context": "If inflation is occurring, leading to higher prices for basic necessities such as gas by 2 dollars. Do you think that inflation is good for society?",
"question": "Based on the context, Is inflation good for society?",
"answer": "Unknown"
},
{
"context": "Policies aiming at controlling unemployment and in particular at reducing its inequality-associated effects support economic growth.",
"question": "Based on the context, Is confidence a factor in increasing self-esteem?",
"answer": "unknown"
}
]),
]
##############################################################################################################################
class ANLIR2Decomp(Decomposition):
def __init__(self, task_name, data_dir, val_split="validation"):
super().__init__(task_name, data_dir, val_split)
def get_few_shot_examples(self, train_data, k_shot):
"""Get few shot examples"""
labels = [' True', ' Neither', ' False']
num_per_class = int(np.ceil(k_shot / len(labels)))
print(f"Selecting {num_per_class} examples per class.")
dfs = []
total_in_context = 0
for label in labels:
while num_per_class + total_in_context > k_shot:
num_per_class -= 1
sub_df = train_data[train_data["targets_pretokenized"] == label].sample(
num_per_class
)
dfs.append(sub_df)
total_in_context += num_per_class
if total_in_context == k_shot:
break
mini_df = pd.concat(dfs)
return mini_df
def _get_boost_decomp_examples(self, train_data, boost_id):
seed = [69, 987][boost_id]
k_shot = 64
random.seed(seed)
np.random.seed(seed)
data_train = pd.DataFrame(train_data)
labels = [' False', ' True', ' Neither']
num_per_class = int(np.ceil(k_shot / len(labels)))
dfs = []
total_in_context = 0
for label in labels:
while num_per_class + total_in_context > k_shot:
num_per_class -= 1
if seed % 2 == 1:
sub_df = data_train[data_train["targets_pretokenized"] == label].sample(num_per_class, random_state = seed)
elif seed % 2 == 0:
sub_df = data_train[data_train["targets_pretokenized"] != label].sample(num_per_class, random_state = seed)
dfs.append(sub_df)
total_in_context += num_per_class
if total_in_context == k_shot:
break
booster_df = pd.concat(dfs).sample(frac=1, random_state=0)
print(f"Selected: {len(booster_df)} in context examples.")
return [
booster_df
]
def get_boost_decomp_examples(self, train_data, boost_id):
if boost_id < 3:
return [
questioner_prompt_examples[boost_id],
extraction_qa_examples[boost_id],
]
else:
icl_examples = self._get_boost_decomp_examples(train_data, boost_id-3)[0]
return [
icl_examples
]
def zero_few_baseline(
self,
test_data,
few_shot_df,
manifest,
overwrite_manifest,
do_few_shot=True,
):
expt_log = {}
golds = []
preds = []
labels = set(test_data["targets_pretokenized"])
labels = [l.lower().strip() for l in labels]
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
if ind in expt_log:
pred = expt_log[ind]["pred"]
gold = expt_log[ind]["gold"]
else:
icl_str = ""
if do_few_shot:
for s_ind, s_row in few_shot_df.iterrows():
icl_str += f"{s_row['inputs_pretokenized']}{s_row['targets_pretokenized']}\n\n"
text = row["inputs_pretokenized"]
text = text.replace("True, False, or Neither?", "").strip().strip("\n")
text = text + " True, False, or Neither? "
gold = row["targets_pretokenized"]
prompt = f"{icl_str}{{text:}}"
pmp = prompt.format(text=text)
if i == 0:
print(pmp)
raw_answer = get_response(
pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=20,
)
answer = raw_answer.strip().lower()
answer = answer.split("\n")
answer = [a for a in answer if a]
answer = [
a for a in answer if any(l.lower() in a.lower() for l in labels)
]
if answer:
answer = answer[0]
else:
answer = ""
answer = "".join(
[a for a in answer if a not in [".", ",", "?", ";", ":", "'", '"']]
)
is_yes = "true" in answer.split()
is_no = "false" in answer.split()
is_maybe = "neither" in answer.split()
pred = "Neither"
if is_yes and (not is_maybe and not is_no):
pred = "True"
if is_no and (not is_maybe and not is_yes):
pred = "False"
if is_maybe and (not is_no and not is_yes):
pred = "Neither"
gold = gold.strip().lower()
pred = pred.strip().lower()
entry = {
"ind": ind,
"example": text,
"base_prompt": pmp,
"raw_answer": raw_answer,
"pred": pred,
"gold": gold,
}
expt_log[ind] = entry
golds.append(gold)
preds.append(pred)
report = classification_report(golds, preds, output_dict=True)
return expt_log, report["accuracy"]
def get_extraction(self, question, passage, prompt, boost_ex, manifest, overwrite_manifest):
prompt_suffix = prompt(boost_ex)
if "Based on the context," in prompt_suffix:
question_prefix = " Based on the context,"
else:
question_prefix = ""
extract_prompt = f"{prompt_suffix}\n\nContext: {{passage:}}\nQuestion:{question_prefix} {question}\nAnswer:"
extract_pmp = extract_prompt.format(passage=passage)
answer = get_response(
extract_pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=50,
)
answer = answer.replace(",", "").replace(".", "").replace("?", "")
answer = [a for a in answer.split("\n") if a]
if answer:
answer = answer[0]
else:
answer = passage
return answer, extract_pmp
def get_question(self, statement, prompt, boost_ex, manifest, overwrite_manifest):
prompt_suffix = prompt(boost_ex)
question_prompt = f"{prompt_suffix}\n\nStatement: {{statement:}}\nQuestion:"
question_pmp = question_prompt.format(statement=statement)
answer = get_response(
question_pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=50,
)
answer = answer.replace("Question: ", "")
answer = [a for a in answer.split("\n") if a]
if answer:
answer = answer[0].strip()
# answer = ''
statement = statement.strip().strip(".")
if (
not answer
or statement.lower() == answer.lower()
or not answer.strip().endswith("?")
):
answer = f"{statement}. Yes, no, or unknown?"
answer = answer.split("\n")[0]
return answer, question_pmp
def resolve_pred(self, answer):
is_yes = "yes" in answer.split() or "true" in answer.split()
is_no = "no" in answer.split() or "false" in answer.split()
is_maybe = "maybe" in answer.split() or "maybe" in answer.split()
pred = "Neither"
if is_yes and (not is_maybe and not is_no):
pred = "True"
if is_no and (not is_maybe and not is_yes):
pred = "False"
if is_maybe and (not is_no and not is_yes):
pred = "Neither"
return pred
def run_decomposed_prompt(
self, test_data, boost_data_train, boost_dfs, manifest, overwrite_manifest
):
expt_log, all_boost_preds, labels = self._run_decomp_single_data(test_data, boost_dfs, manifest, overwrite_manifest, run_limit=-1)
expt_log_train, all_boost_train_preds, train_labels = self._run_decomp_single_data(boost_data_train, boost_dfs, manifest, overwrite_manifest, run_limit=1000)
# Do WS
preds = self.merge_boosted_preds(all_boost_preds, all_boost_train_preds, train_labels, expt_log, expt_log_train, indecisive_ans="neither")
# Get accuracies across all boost sets
individual_accuracies = []
for i in range(len(all_boost_preds[0])):
report = classification_report(labels, [p[i] for p in all_boost_preds], output_dict=True)
individual_accuracies.append(report["accuracy"])
print(report)
print("\n\n")
report = classification_report(labels, preds, output_dict=True)
print(report)
return expt_log, expt_log_train, report["accuracy"], individual_accuracies
def _run_decomp_single_data(
self, test_data, boost_dfs, manifest, overwrite_manifest, run_limit = -1
):
expt_log = {}
all_boost_preds = []
labels = []
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
prompts_across_boost = []
preds_across_boost = []
if i == run_limit:
break
text = row["inputs_pretokenized"]
gold = row["targets_pretokenized"].strip()
passage = text.split("\n")[0]
statement = (
text.split("\n")[-1]
.replace("True, False, or Neither?", "")
.strip()
.strip("\n")
.replace("Question: ", "")
)
for boost_num, boost_examples in enumerate(boost_dfs):
all_prompts = []
# question / extract prompt
if boost_num < 3:
question, question_final_prompt = self.get_question(
statement, questioner_prompt, boost_examples[0], manifest, overwrite_manifest
)
all_prompts.append(question_final_prompt)
open_answer_f, extraction_final_prompt = self.get_extraction(
question,
passage,
extraction_qa,
boost_examples[1],
manifest,
overwrite_manifest,
)
all_prompts.append(extraction_final_prompt)
if i == 0:
print("\n".join(all_prompts))
answer_f = open_answer_f.lower()
pred = self.resolve_pred(answer_f)
pred = pred.strip().lower()
preds_across_boost.append(pred)
# just ICL
elif boost_num >= 3:
icl_str = ""
for s_ind, s_row in boost_examples[0].iterrows():
if s_row["targets_pretokenized"].strip() == "True":
demo_label = "yes"
elif s_row["targets_pretokenized"].strip() == "False":
demo_label = "no"
else:
demo_label = "unknown"
s_text = s_row["inputs_pretokenized"]
s_passage = s_text.split("\n")[0]
s_statement = (
s_text.split("\n")[-1]
.replace("True, False, or Neither?", "")
.strip()
.strip("\n")
.replace("Question: ", "")
)
icl = f"Statement: {s_statement}\nAnswer: {demo_label}"
icl_str += f"{icl}\n\n"
description = "Is the statement Yes, No, or Unknown?"
prompt = f"{description}\n\n{icl_str}Statement: {{statement:}}\nAnswer:"
pmp = prompt.format(statement=statement)
if i == 0:
print("PMP ICL")
print(pmp)
pred = get_response(
pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=10,
stop_token="\n",
)
pred = pred.lower().strip()
pred = pred.replace(".", "").replace(",", "").replace("Label: ", "").replace("Sentiment:", "")
pred = [p for p in pred.split("\n") if p]
if pred:
pred = pred[0]
else:
pred = ""
all_prompts.append(pmp)
prompts_across_boost.append(all_prompts)
pred = self.resolve_pred(pred).lower()
preds_across_boost.append(pred)
gold = gold.strip().lower()
expt_log[ind] = {
"ind": ind,
"preds_boost": preds_across_boost,
"prompts": prompts_across_boost,
"example": text,
"pred": pred,
"gold": gold,
}
all_boost_preds.append(preds_across_boost)
labels.append(gold)
return expt_log, all_boost_preds, labels
def main():
args = get_args()
args.num_boost = 5
task_name = "anli_r2"
data_dir = f"{DATA_DIR}/P3/data_feather/anli_GPT_3_style_r2"
decomp = ANLIR2Decomp(task_name, data_dir, val_split="test")
decomp.run(args)
if __name__ == "__main__":
main() | ama_prompting-main | tasks/ANLIR2_final.py |
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import numpy as np
from tqdm.auto import tqdm
import random
from sklearn.metrics import classification_report
from decomposition import Decomposition, get_args, DATA_DIR
from utils import get_response, InputOutputPrompt
##############################################################################################################################
# All prompts
questioner_prompt = InputOutputPrompt(
input_formatter=lambda x: f"Statement: {x['statement']}",
output_formatter=lambda x: f"Question: {x['question']}",
required_keys=["question", "statement"],
input_output_sep="\n",
example_sep="\n\n",
instruction="Rewrite the statement as a yes/no question.\n\n"
)
questioner_prompt_examples = [
pd.DataFrame([
{
"statement": "most of the light comes from the sun",
"question": "Does most of the light come from the sun?"
},
{
"statement": "the test was not hard",
"question": "Was the test not hard?",
},
{
"statement": "it is a good idea to buy your parents gifts",
"question": "Is it a good idea to buy your parents gifts?",
},
{
"statement": "the balloon popped",
"question": "Did the balloon pop?",
},
{
"statement": "The father and son went camping to California.",
"question": "Did the father and son go camping?",
},
]),
pd.DataFrame([
{
"statement": "most of the light comes from the sun",
"question": "Does most of the light come from the sun?"
},
{
"statement": "the test was not",
"question": "Was the test not hard?",
},
{
"statement": "it is a good idea to buy your parents gifts",
"question": "Is it a good idea to buy your parents gifts?",
},
{
"statement": "the balloon popped",
"question": "Did the balloon pop?",
},
{
"statement": "The father and son went camping to California.",
"question": "Did the father and son go camping?",
},
]),
pd.DataFrame([
{
"statement": "most of the light comes from the sun",
"question": "Does most of the light come from the sun?"
},
{
"statement": "the test was not hard",
"question": "Was the test not hard?",
},
{
"statement": "it is a good idea to buy your parents gifts",
"question": "Is it a good idea to buy your parents gifts?",
},
{
"statement": "the balloon popped",
"question": "Did the balloon pop?",
},
{
"statement": "The father and son went camping to California.",
"question": "Did the father and son go camping?",
},
]),
pd.DataFrame([
{
"statement": "most of the light comes from the sun",
"question": "Does most of the light come from the sun?"
},
{
"statement": "the test was not hard",
"question": "Was the test not hard?",
},
{
"statement": "it is a good idea to buy your parents gifts",
"question": "Is it a good idea to buy your parents gifts?",
},
{
"statement": "the balloon popped",
"question": "Did the balloon pop?",
},
{
"statement": "The father and son went camping to California.",
"question": "Did the father and son go camping?",
},
]),
pd.DataFrame([
{
"statement": "most of the light comes from the sun",
"question": "Does most of the light come from the sun?"
},
{
"statement": "the test was not hard",
"question": "Was the test not hard?",
},
{
"statement": "it is a good idea to buy your parents gifts",
"question": "Is it a good idea to buy your parents gifts?",
},
{
"statement": "the balloon popped",
"question": "Did the balloon pop?",
},
{
"statement": "The father and son went camping to California.",
"question": "Did the father and son go camping?",
},
]),
]
extraction_qa = InputOutputPrompt(
input_formatter=lambda x: f"Context: {x['context']}\nQuestion: {x['question']}",
output_formatter=lambda x: f"Answer: {x['answer']}",
required_keys=["context", "question", "answer"],
input_output_sep="\n",
example_sep="\n\n",
instruction="Answer the question. If there is no evidence in the context, return \"Unknown\".\n\n"
)
extraction_qa_examples = [
pd.DataFrame([
{
"context": "According to Biraben, the plague was present somewhere in Italy and affected 1,200 people.",
"question": "Based on the context, Did the plague affect people in Europe?",
"answer": "yes, people in Italy, Europe",
},
{
"context": "Policies aiming at controlling unemployment and in particular at reducing its inequality-associated effects support economic growth.",
"question": "Based on the context, Is confidence a factor in increasing self-esteem?",
"answer": "unknown",
},
{
"context": "The term \"matter\" is used throughout physics in a bewildering variety of contexts: for example, one refers to \"condensed matter physics\", \"elementary matter\", \"partonic\" matter, \"dark\" matter, \"anti\"-matter, \"strange\" matter, and \"nuclear\" matter.",
"question": "Based on the context, Is anti-matter made of electrons? ",
"answer": "Unknown",
},
]),
pd.DataFrame([
{
"context": "According to Biraben, the plague was present somewhere in Italy only between 1346 and 1671, and not after that.",
"question": "Based on the context, Was the plague present in Italy during the 2000s?",
"answer": "No, it was present between 1346 and 1671"
},
{
"context": "The term \"matter\" is used throughout physics in a bewildering variety of contexts: for example, one refers to \"condensed matter physics\", \"elementary matter\", \"partonic\" matter, \"dark\" matter, \"anti\"-matter, \"strange\" matter, and \"nuclear\" matter.",
"question": "Based on the context, Is anti-matter made of electrons? ",
"answer": "Unknown"
},
{
"context": "Policies aiming at controlling unemployment and in particular at reducing its inequality-associated effects support economic growth.",
"question": "Based on the context, Is confidence a factor in increasing self-esteem?",
"answer": "unknown"
}
]),
pd.DataFrame([
{
"context": "Jenna's 10th birthday was yesterday evening and at least 10 of her friends attended the party.",
"question": "Based on the context, Did 10 friends attend Jenna's party?",
"answer": "Unknown"
},
{
"context": "The bullies attacked John when he was walking through the elementary school parking lot and then got sent to the teacher's office.",
"question": "Based on the context, Did the bullies attack John in the teacher's office?",
"answer": "No, parking lot"
},
{
"context": "WISS discovered a new monkey disease occurring in a remote tribe in the Amazon rainforrest.",
"question": "Based on the context, Did WISS discover a new monkey species?",
"answer": "No, a new monkey disease"
}
]),
pd.DataFrame([
{
"context": "When Judy and Jack went to school, they got in trouble with their teacher for being late. I didn't think it was very fair.",
"question": "Based on the context, Did she think it was fair?",
"answer": "No"
},
{
"context": "If inflation is occurring, leading to higher prices for basic necessities such as gas by 2 dollars. Do you think that inflation is good for society?",
"question": "Based on the context, Is inflation good for society?",
"answer": "Unknown"
},
{
"context": "Put yourself out there. The more time you spend dating and socializing, the more likely you will find a boyfriend you like.",
"question": "Based on the context, Does socializing help you find a boyfriend?",
"answer": "Yes"
},
{
"context": "According to Biraben, the plague was present somewhere in Italy and affected 1,200 people.",
"question": "Based on the context, Did the plague affect people in Europe?",
"answer": "yes, people in Italy, Europe",
},
{
"context": "Policies aiming at controlling unemployment and in particular at reducing its inequality-associated effects support economic growth.",
"question": "Based on the context, Is confidence a factor in increasing self-esteem?",
"answer": "unknown",
},
{
"context": "The term \"matter\" is used throughout physics in a bewildering variety of contexts: for example, one refers to \"condensed matter physics\", \"elementary matter\", \"partonic\" matter, \"dark\" matter, \"anti\"-matter, \"strange\" matter, and \"nuclear\" matter.",
"question": "Based on the context, Is anti-matter made of electrons? ",
"answer": "Unknown",
},
]),
pd.DataFrame([
{
"context": "According to Biraben, the plague was present somewhere in Italy and affected 1,200 people.",
"question": "Based on the context, Did the plague affect over 1,000 people?",
"answer": "yes, 1,200 people",
},
{
"context": "If inflation is occurring, leading to higher prices for basic necessities such as gas by 2 dollars. Do you think that inflation is good for society?",
"question": "Based on the context, Is inflation good for society?",
"answer": "Unknown"
},
{
"context": "Policies aiming at controlling unemployment and in particular at reducing its inequality-associated effects support economic growth.",
"question": "Based on the context, Is confidence a factor in increasing self-esteem?",
"answer": "unknown"
}
]),
]
class ANLIDecomp(Decomposition):
def __init__(self, task_name, data_dir, val_split="validation"):
super().__init__(task_name, data_dir, val_split)
def get_few_shot_examples(self, train_data, k_shot):
"""Get few shot examples"""
labels = [' False', ' True', ' Neither']
num_per_class = int(np.ceil(k_shot / len(labels)))
print(f"Selecting {num_per_class} examples per class.")
dfs = []
total_in_context = 0
for label in labels:
while num_per_class + total_in_context > k_shot:
num_per_class -= 1
sub_df = train_data[train_data["targets_pretokenized"] == label].sample(
num_per_class
)
dfs.append(sub_df)
total_in_context += num_per_class
if total_in_context == k_shot:
break
mini_df = pd.concat(dfs)
return mini_df
def _get_boost_decomp_examples(self, train_data, boost_id):
seed = [69, 987][boost_id]
k_shot = 64
random.seed(seed)
np.random.seed(seed)
data_train = pd.DataFrame(train_data)
labels = [' True', ' False', ' Neither']
num_per_class = int(np.ceil(k_shot / len(labels)))
dfs = []
total_in_context = 0
for label in labels:
while num_per_class + total_in_context > k_shot:
num_per_class -= 1
if seed % 2 == 1:
sub_df = data_train[data_train["targets_pretokenized"] == label].sample(num_per_class, random_state = seed)
elif seed % 2 == 0:
sub_df = data_train[data_train["targets_pretokenized"] != label].sample(num_per_class, random_state = seed)
dfs.append(sub_df)
total_in_context += num_per_class
if total_in_context == k_shot:
break
booster_df = pd.concat(dfs).sample(frac=1, random_state=0)
print(f"Selected: {len(booster_df)} in context examples.")
return [
booster_df
]
def get_boost_decomp_examples(self, train_data, boost_id):
if boost_id < 3:
return [
questioner_prompt_examples[boost_id],
extraction_qa_examples[boost_id],
]
else:
icl_examples = self._get_boost_decomp_examples(train_data, boost_id-3)[0]
return [
icl_examples
]
def zero_few_baseline(
self,
test_data,
few_shot_df,
manifest,
overwrite_manifest,
do_few_shot=True,
):
expt_log = {}
golds = []
preds = []
labels = set(test_data["targets_pretokenized"])
labels = [l.lower().strip() for l in labels]
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
if ind in expt_log:
pred = expt_log[ind]["pred"]
gold = expt_log[ind]["gold"]
else:
icl_str = ""
if do_few_shot:
for s_ind, s_row in few_shot_df.iterrows():
icl_str += f"{s_row['inputs_pretokenized']}{s_row['targets_pretokenized']}\n\n"
text = row["inputs_pretokenized"]
text = text.replace("True, False, or Neither?", "").strip().strip("\n")
text = text + " True, False, or Neither? "
gold = row["targets_pretokenized"]
prompt = f"{icl_str}{{text:}}"
pmp = prompt.format(text=text)
if i == 0:
print(pmp)
raw_answer = get_response(
pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=20,
)
answer = raw_answer.strip().lower()
answer = answer.split("\n")
answer = [a for a in answer if a]
answer = [
a for a in answer if any(l.lower() in a.lower() for l in labels)
]
if answer:
answer = answer[0]
else:
answer = ""
answer = "".join(
[a for a in answer if a not in [".", ",", "?", ";", ":", "'", '"']]
)
is_yes = "true" in answer.split()
is_no = "false" in answer.split()
is_maybe = "neither" in answer.split()
pred = "Neither"
if is_yes and (not is_maybe and not is_no):
pred = "True"
if is_no and (not is_maybe and not is_yes):
pred = "False"
if is_maybe and (not is_no and not is_yes):
pred = "Neither"
gold = gold.strip().lower()
pred = pred.strip().lower()
entry = {
"ind": ind,
"example": text,
"base_prompt": pmp,
"raw_answer": raw_answer,
"pred": pred,
"gold": gold,
}
expt_log[ind] = entry
golds.append(gold)
preds.append(pred)
report = classification_report(golds, preds, output_dict=True)
return expt_log, report["accuracy"]
def get_extraction(self, question, passage, prompt, boost_ex, manifest, overwrite_manifest):
prompt_suffix = prompt(boost_ex)
if "Based on the context," in prompt_suffix:
question_prefix = " Based on the context,"
else:
question_prefix = ""
extract_prompt = f"{prompt_suffix}\n\nContext: {{passage:}}\nQuestion:{question_prefix} {question}\nAnswer:"
extract_pmp = extract_prompt.format(passage=passage)
answer = get_response(
extract_pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=50,
)
answer = answer.replace(",", "").replace(".", "").replace("?", "")
answer = [a for a in answer.split("\n") if a]
if answer:
answer = answer[0]
else:
answer = passage
return answer, extract_pmp
def get_question(self, statement, prompt, boost_ex, manifest, overwrite_manifest):
prompt_suffix = prompt(boost_ex)
question_prompt = f"{prompt_suffix}\n\nStatement: {{statement:}}\nQuestion:"
question_pmp = question_prompt.format(statement=statement)
answer = get_response(
question_pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=50,
)
answer = answer.replace("Question: ", "")
answer = [a for a in answer.split("\n") if a]
if answer:
answer = answer[0].strip()
else:
answer = ''
statement = statement.strip().strip(".")
if (
not answer
or statement.lower() == answer.lower()
or not answer.strip().endswith("?")
):
answer = f"{statement}. Yes, no, or unknown?"
answer = answer.split("\n")[0]
return answer, question_pmp
def resolve_pred(self, answer):
is_yes = "yes" in answer.split() or "true" in answer.split()
is_no = "no" in answer.split() or "false" in answer.split()
is_maybe = "maybe" in answer.split() or "maybe" in answer.split()
pred = "Neither"
if is_yes and (not is_maybe and not is_no):
pred = "True"
if is_no and (not is_maybe and not is_yes):
pred = "False"
if is_maybe and (not is_no and not is_yes):
pred = "Neither"
return pred
def run_decomposed_prompt(
self, test_data, boost_data_train, boost_dfs, manifest, overwrite_manifest
):
expt_log, all_boost_preds, labels = self._run_decomp_single_data(test_data, boost_dfs, manifest, overwrite_manifest, run_limit=-1)
expt_log_train, all_boost_train_preds, train_labels = self._run_decomp_single_data(boost_data_train, boost_dfs, manifest, overwrite_manifest, run_limit=1000)
# Do WS
preds = self.merge_boosted_preds(all_boost_preds, all_boost_train_preds, train_labels, expt_log, expt_log_train, indecisive_ans="neither")
# Get accuracies across all boost sets
individual_accuracies = []
for i in range(len(all_boost_preds[0])):
report = classification_report(labels, [p[i] for p in all_boost_preds], output_dict=True)
individual_accuracies.append(report["accuracy"])
print(report)
print("\n\n")
report = classification_report(labels, preds, output_dict=True)
print(report)
return expt_log, expt_log_train, report["accuracy"], individual_accuracies
def _run_decomp_single_data(
self, test_data, boost_dfs, manifest, overwrite_manifest, run_limit = -1
):
expt_log = {}
all_boost_preds = []
labels = []
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
prompts_across_boost = []
preds_across_boost = []
if i == run_limit:
break
text = row["inputs_pretokenized"]
gold = row["targets_pretokenized"].strip()
passage = text.split("\n")[0]
statement = (
text.split("\n")[-1]
.replace("True, False, or Neither?", "")
.strip()
.strip("\n")
.replace("Question: ", "")
)
for boost_num, boost_examples in enumerate(boost_dfs):
all_prompts = []
# question / extract prompt
if boost_num < 3:
question, question_final_prompt = self.get_question(
statement, questioner_prompt, boost_examples[0], manifest, overwrite_manifest
)
all_prompts.append(question_final_prompt)
open_answer_f, extraction_final_prompt = self.get_extraction(
question,
passage,
extraction_qa,
boost_examples[1],
manifest,
overwrite_manifest,
)
all_prompts.append(extraction_final_prompt)
if i == 0:
print("\n".join(all_prompts))
answer_f = open_answer_f.lower()
pred = self.resolve_pred(answer_f)
pred = pred.strip().lower()
preds_across_boost.append(pred)
else:
icl_str = ""
for s_ind, s_row in boost_examples[0].iterrows():
if s_row["targets_pretokenized"].strip() == "True":
demo_label = "yes"
elif s_row["targets_pretokenized"].strip() == "False":
demo_label = "no"
else:
demo_label = "unknown"
s_text = s_row["inputs_pretokenized"]
s_passage = s_text.split("\n")[0]
s_statement = (
s_text.split("\n")[-1]
.replace("True, False, or Neither?", "")
.strip()
.strip("\n")
.replace("Question: ", "")
)
icl = f"Statement: {s_statement}\nAnswer: {demo_label}"
icl_str += f"{icl}\n\n"
description = "Is the statement Yes, No, or Unknown?"
prompt = f"{description}\n\n{icl_str}Statement: {{statement:}}\nAnswer:"
pmp = prompt.format(statement=statement)
if i == 0:
print("PMP ICL")
print(pmp)
pred = get_response(
pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=10,
stop_token="\n",
)
pred = pred.lower().strip()
pred = pred.replace(".", "").replace(",", "").replace("Label: ", "").replace("Sentiment:", "")
pred = [p for p in pred.split("\n") if p]
if pred:
pred = pred[0]
else:
pred = ""
all_prompts.append(pmp)
prompts_across_boost.append(all_prompts)
pred = self.resolve_pred(pred).lower()
preds_across_boost.append(pred)
gold = gold.strip().lower()
expt_log[ind] = {
"ind": ind,
"preds_boost": preds_across_boost,
"prompts": prompts_across_boost,
"example": text,
"pred": pred,
"gold": gold,
}
all_boost_preds.append(preds_across_boost)
labels.append(gold)
return expt_log, all_boost_preds, labels
def main():
args = get_args()
task_name = "anli_r3"
args.num_boost = 5
data_dir = f"{DATA_DIR}/P3/data_feather/anli_GPT_3_style_r3"
decomp = ANLIDecomp(task_name, data_dir, val_split="test")
decomp.run(args)
if __name__ == "__main__":
main()
| ama_prompting-main | tasks/ANLIR3_final.py |
#!/usr/bin/env python
# coding: utf-8
from pathlib import Path
from tqdm.auto import tqdm
import pandas as pd
import numpy as np
import sys
import json
import string
import datetime
from decomposition import Decomposition, get_args, DATA_DIR
from utils import get_response, InputOutputPrompt
realtime_qa_path = Path(f"{DATA_DIR}/realtimeqa_public/scripts/utils")
sys.path.append(str(realtime_qa_path))
try:
from tools import f1_score, metric_max_over_ground_truths, fall_back, read_jsonl, check_jsonls
except ModuleNotFoundError:
print(f"realtimeQA tools not found. Please download from realtimeQA repo to {realtime_qa_path}.")
art1_answer = InputOutputPrompt(
input_formatter=lambda x: f"{x['passages']}Question: {x['question']}",
output_formatter=lambda x: f"Answer: {x['answer']}",
required_keys=["passages", "question", "answer"],
input_output_sep="\n",
example_sep="\n\n",
instruction="Answer the question given the article. Answer \"I don't know\" if you don't know.",
)
all_answer = InputOutputPrompt(
input_formatter=lambda x: f"{x['passages']}Question: {x['question']}",
output_formatter=lambda x: f"Answer: {x['answer']}",
required_keys=["passages", "question", "answer"],
input_output_sep="\n",
example_sep="\n\n",
instruction="Answer the question given the articles."
)
art1_answer_examples = [
pd.DataFrame([
{
"passages": "Article 1: Walmart is slashing prices on clothing and other products - CNN New York(CNN Business) Many shoppers have pulled back on buying clothing and other discretionary items as the highest inflation in four decades pinches their pocketbooks.",
"question": "Which major US retailer announced this week it is slashing prices on clothing and other products?",
"answer": "\"Walmart\""
},
{
"passages": "Article 1: Oak Fire: California's fast-moving wildfire burns 14,000 acres and ... (CNN) A wildfire raging for a third day Sunday in central California's Mariposa County outside Yosemite National Park has burned more than 14, 000 acres and forced thousands to evacuate from rural communities.",
"question": "A raging wildfire this week forced thousands of people to evacuate communities near which national park?",
"answer": "\"Yosemite National Park\""
},
{
"passages": "Article 1: Frontier Airlines, Spirit Airlines announce budget airline merger Budget airlines Frontier Airlines and Spirit Airlines.",
"question": "Which airline announced a deal this week to buy Spirit Airlines?",
"answer": "\"I don't know\""
}
]),
pd.DataFrame([
{
"passages": "Article 1: During the initial outbreak in Wuhan, the virus and disease were commonly referred to as \"coronavirus\", \"Wuhan coronavirus\", \"the coronavirus outbreak\" and the \"Wuhan coronavirus outbreak\", with the disease sometimes called \"Wuhan pneumonia\".",
"question": "From which country did COVID originate?",
"answer": "\"Wuhan, China\""
},
{
"passages": "Article 1: Philippines earthquake: 7.0-magnitude quake in Abra kills five.",
"question": "Which country was shaken by a 7.0-magnitude earthquake this week?",
"answer": "\"Philippines\""
},
{
"passages": "Article 1: Ten Things You Need to Know Today: 10 August 2022 | The Week UK Domino’s fails in land of the pizza Domino’s Pizza has been forced to pull out of the home of the pizza.",
"question": "What percentage of babies in the UK were born out of wedlock last year?",
"answer": "\"I don't know\""
}
]),
pd.DataFrame([
{
"passages": "Article 1: Japan\'s Sakurajima volcano erupts, prompting evacuation alerts.",
"question": "A volcano eruption in which country recently prompted a Level 5 alert – the highest level – calling for people to evacuate.?",
"answer": "\"Japan\""
},
{
"passages": "Article 1: \'Weekends With Adele\': Caesars Palace Las Vegas to host residency Adele has her fans exclaiming \"Oh My God\" with her latest announcement.",
"question": "Which popular singer will launch a Las Vegas residency later this year?",
"answer": "\"Adele\""
},
{
"passages": "Article 1: As of 2020, the largest selling record label in the U.S. is Sony Music Entertainment.",
"question": "Which artist is best seller 2020?",
"answer": "\"I don't know\""
}
])
]
all_answer_examples = [
pd.DataFrame([
{
"passages": "Article 1: Walmart is slashing prices on clothing and other products - CNN New York(CNN Business) Many shoppers have pulled back on buying clothing and other discretionary items as the highest inflation in four decades pinches their pocketbooks.\nArticle 2: Retail slowdown: Target cuts vendor orders, slashes prices as it ... Associated Press NEW YORK.\nArticle 3: Stores have too much stuff. That means discounts are coming | CNN ... New York(CNN Business).\nArticle 4: GM reports strong sales but says it's prepared for possible recession ... New York (CNN Business).\nArticle 5: Target is ramping up discounts. Here's why - CNN New York(CNN Business).\n",
"question": "Which major US retailer announced this week it is slashing prices on clothing and other products?",
"answer": "\"Walmart\""
},
{
"passages": "Article 1: Article 1: JetBlue announces a deal to buy Spirit Airlines. Fares could surge.\nArticle 2: JetBlue-Spirit merger: Airlines have complaints over flights and fees Christopher Elliott Special to USA TODAY.\nArticle 3: JetBlue announces a deal to buy Spirit Airlines | CNN Business The announcement comes a day after Spirit pulled the plug on a deal to merge with Frontier.\nArticle 4: Spirit and Frontier pull plug on deal, setting stage for JetBlue to buy ... New York (CNN Buiness).\nArticle 5: Frontier Airlines, Spirit Airlines announce budget airline merger Budget airlines Frontier Airlines and Spirit Airlines.\n",
"question": "Which airline announced a deal this week to buy Spirit Airlines?",
"answer": "\"JetBlue\""
},
{
"passages": "Article 1: Oak Fire: California's fast-moving wildfire burns 14,000 acres and ... (CNN) A wildfire raging for a third day Sunday in central California's Mariposa County outside Yosemite National Park has burned more than 14, 000 acres and forced thousands to evacuate from rural communities.\nArticle 2: California Oak Fire: Rapidly-growing fire engulfs homes near ... For more on the fires, \" United Shades of America with W. Kamau Bell \" heads to California to discover how communities are learning to coexist with the frequent destruction.\nArticle 3: 5 things to know for July 25: Wildfires, Ukraine, Monkeypox, Volcano ... If your day doesn't start until you're up to speed on the latest headlines, then let us introduce you to your new favorite morning fix.\nArticle 4: Wildfires in US: 2 firefighting helicopter pilots die in Idaho ... Multiple wildfires raged across the U.S. Saturday, causing deaths, destruction and thousands of forced evacuations.\nArticle 5: Boulder wildfires: Hundreds of homes burn evacuations ordered BOULDER, Colo. — A ferocious wind-driven wildfire on Thursday destroyed hundreds of homes and businesses near Denver, forcing tens of thousands to flee and blanketing the area in smoke.\n",
"question": "A raging wildfire this week forced thousands of people to evacuate communities near which national park?",
"answer": "\"Yosemite National Park\""
}
]),
pd.DataFrame([
{
"passages": "Article 1: During the initial outbreak in Wuhan, the virus and disease were commonly referred to as \"coronavirus\", \"Wuhan coronavirus\", \"the coronavirus outbreak\" and the \"Wuhan coronavirus outbreak\", with the disease sometimes called \"Wuhan pneumonia\".\nArticle 2: The first known outbreak started in Wuhan, Hubei, China, in November 2019.\nArticle 3: A cluster of patients in China\’s Hubei Province, in the city of Wuhan, begin to experience the symptoms of an atypical pneumonia-like illness that does not respond well to standard treatments.\nArticle 4: The World Health Organization(WHO) has released its plan to investigate the origins of the COVID pandemic. The search will start in Wuhan.\nArticle 5: The World Health Organization(WHO) Country Office in China is informed of several cases of a pneumonia of unknown etiology(cause) with symptoms including shortness of breath and fever occurring in Wuhan, China.\n",
"question": "From which country did COVID originate?",
"answer": "\"Wuhan, China\""
},
{
"passages": "Article 1: Philippines earthquake: 7.0-magnitude quake in Abra kills five.\nArticle 2: Haiti earthquakes: Comparing recent quake to deadly 2010 tragedy A decade ago, an earthquake struck just outside Haiti's capital of Port-au-Prince.\nArticle 3: Indonesia earthquake: Death toll rises as Lombok, Bali shaken The death toll rose to 98 after a magnitude 7.0 earthquake rocked the Indonesian island of Lombok and nearby Bali.\nArticle 4: Alaska earthquake: Aftershocks continue to shake Last Frontier Two days after a magnitude 7.0 earthquake struck near Anchorage, Alaska is still shaking.\n",
"question": "Which country was shaken by a 7.0-magnitude earthquake this week?",
"answer": "\"Philippines\""
},
{
"passages": "Article 1: According to latest Office for National Statistics (ONS) data, of 624,828 live births registered, 320,713 were to women who were not married or in a civil partnership at the time – 51.3% of the total.\nArticle 2: Ten Things You Need to Know Today: 10 August 2022 | The Week UK Domino’s fails in land of the pizza Domino’s Pizza has been forced to pull out of the home of the pizza.\nArticle 3: Gay couple sues State Department for denying daughter's citizenship Couple's daughter was born to a surrogate in Britain.\nArticle 4: Ex-Jehovah's Witnesses say church's shunning caused too many.\nArticle 5: Kids before marriage is becoming the norm (and that\'s not good) What\’s wrong with America? Everybody has an answer these days.\n",
"question": "What percentage of babies in the UK were born out of wedlock last year?",
"answer": "\"51.3%\""
}
]),
pd.DataFrame([
{
"passages": "Article 1: Japan\'s Sakurajima volcano erupts, prompting evacuation alerts.\nArticle 2: (CNN) Here\'s a tip if you are among the millions of people quitting your job: Don\'t cash out your 401(k)! As tempting as it may seem, there are other options that will give you better returns in the long run.\nArticle 3: (CNN) Did you take your vitamins this morning? Daily vitamin D and fish oil supplements may help prevent some adults from developing autoimmune disorders such as arthritis and psoriasis.\nArticle 4: (CNN) The federal tax filing season is underway. And even though the IRS still hasn\'t processed millions of returns from last year due to Covid-19 and a lack of funding, there are still ways to help ensure your tax filing experience is hassle-free.\nArticle 5: (CNN) Happy Valentine\'s Day, and happy Conveniently Call In Sick to Work Day for all the Los Angeles Rams fans out there.\n",
"question": "A volcano eruption in which country recently prompted a Level 5 alert – the highest level – calling for people to evacuate.?",
"answer": "\"Japan\""
},
{
"passages": "Article 1: \'Weekends With Adele\': Caesars Palace Las Vegas to host residency Adele has her fans exclaiming \"Oh My God\" with her latest announcement.\nArticle 2: Usher is \'ready to drop\' upcoming album \'Confessions 2\' this year Associated Press LOS ANGELES — Usher has a confession — he’s almost ready to release the sequel to his groundbreaking, epic 2004 album “Confessions.”\nArticle 3: When could Britney Spears start her Vegas residency shows in 2022?\nArticle 4: Backstreet Boys return to Las Vegas for holiday residency Backstreet will be back in Las Vegas.\nArticle 5: Miami Vice postpones Las Vegas residency due to rising COVID risks.\n",
"question": "Which popular singer will launch a Las Vegas residency later this year?",
"answer": "\"Adele\""
},
{
"passages": "Article 1: Best selling artists worldwide and Eminem makes into the Top 10. You can check the lit below: 1 BTS 2 Taylor Swift 3 Drake 4 The Weeknd 5 Billie Eilish.\nArticle 2: Pop group BTS have been named the number one artists of 2020 by the IFPI.\nArticle 3: The market reported total revenues of $21.6 billion (roughly €18.2bn), marking its sixth consecutive year of growth and the highest figure since 2002.\nArticle 4: BTS were the world\'s biggest act overall, marking the first time a South Korean band has topped the global chart.\nArticle 5: Music is an art form, and cultural activity, whose medium is sound. Well, music has no language and is soothing and stress relieving, no matter in whichever language it is.\n",
"question": "Which artist is best seller 2020?",
"answer": "\"pop group BTS\""
}
])
]
# Taken from realtimeQA github repo
# https://github.com/realtimeqa/realtimeqa_public
def get_retrieved_text(retrieved_datum, top_k=5, rm_date_r=False):
search_result = retrieved_datum["search_result"]
retrieved_text = ""
for art_i, article in enumerate(search_result[:top_k]):
if "publish_date" not in article:
continue
date = article["publish_date"]
content = article["text"]
if content == '':
continue
date = datetime.datetime.strptime(date, '%Y/%m/%d')
date = date.strftime("%B %d, %Y")
#first_paraph = content.split("\n\n")[0]
first_paraph = " ".join(content.split("\n\n")[:2])
if "title" in article.keys():
first_paraph = article["title"] + " " + first_paraph
if not rm_date_r:
retrieved_text += "Article on {}: {}\n".format(date, first_paraph)
else:
retrieved_text += "Article: {}\n".format(first_paraph)
return retrieved_text
def read_dates(data_dir, dates):
all_dfs = []
for date in dates:
passages = []
in_file = str(Path(data_dir) / f"{date}_qa.jsonl")
gold_df = pd.DataFrame([json.loads(line) for line in open(in_file)])
gold_df["gold_answers"] = gold_df.apply(lambda x: [x["choices"][int(idx)] for idx in x["answer"]], axis=1)
gcs_file = in_file.replace("_qa.jsonl", "_gcs.jsonl")
dpr_file = in_file.replace("_qa.jsonl", "_dpr.jsonl")
gcs = read_jsonl(gcs_file)
dpr = read_jsonl(dpr_file)
check_jsonls(gcs, dpr)
retrieved_data = fall_back(gcs, dpr)
for q_idx in range(len(gold_df)):
retrieved_text = get_retrieved_text(retrieved_data[q_idx], top_k=5, rm_date_r=True)
passages.append(retrieved_text)
gold_df["passages"] = passages
all_dfs.append(gold_df)
return pd.concat(all_dfs)
class RealtimeQADecomp(Decomposition):
def __init__(self, task_name, data_dir, val_split="validation"):
super().__init__(task_name, data_dir, val_split)
def get_boost_decomp_examples(self, train_data, boost_id):
# We boost by changing passage order
return [
art1_answer_examples[boost_id],
all_answer_examples[boost_id]
]
def read_data(self, save_dir, overwrite_data):
val_dates_for_benchmark = ["20220617", "20220624", "20220701", "20220708", "20220715", "20220722"]
train_dates_for_benchmark = ["20220729", "20220805", "20220812"]
save_data = Path(f"{save_dir}/{self.task_name}/data.jsonl")
if not save_data.exists() or overwrite_data:
test_data = read_dates(self.data_dir, val_dates_for_benchmark)
test_data = test_data.reset_index(drop=True)
test_data.to_feather(f"{save_data}")
else:
test_data = pd.read_feather(f"{save_data}")
save_data = Path(f"{save_dir}/{self.task_name}/train_data.feather")
if not save_data.exists() or overwrite_data:
train_data = read_dates(self.data_dir, train_dates_for_benchmark)
train_data = train_data.reset_index(drop=True)
train_data.to_feather(f"{save_data}")
else:
train_data = pd.read_feather(f"{save_data}")
print(f"Test Data Size: {len(test_data)}")
print(f"Train Data Size: {len(train_data)}")
return test_data, train_data
def zero_few_baseline(
self,
test_data,
few_shot_df,
manifest,
overwrite_manifest,
do_few_shot=True,
):
expt_log = {}
preds = []
labels = []
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
row["answer"] = row["answer"].tolist()
row["choices"] = row["choices"].tolist()
row["gold_answers"] = row["gold_answers"].tolist()
question = row["question_sentence"]
passages = row["passages"]
golds = row["gold_answers"]
assert len(golds) == 1
icl_str = ""
if do_few_shot:
# Taken from realtime_qa github repo
icl_str += f"Question: What is the capital city of Japan?\nAnswer: Tokyo\n\n"
pmp = f"{icl_str}{passages}Question: {question}\nAnswer:"
if i == 0:
print(pmp)
try:
raw_answer = get_response(
pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=30,
)
except:
passage_list = [p for p in passages.split("Article:") if p.strip()]
passage_list = [" ".join(p.split(" ")[:100]) for p in passage_list]
passages = "Article:" + "Article:".join(passage_list)
pmp = f"{passages}Question: {question}\nAnswer:"
raw_answer = get_response(
pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=30,
)
pred = raw_answer.split("\n")[0].strip()
entry = {
"ind": ind,
"example": row.to_dict(),
"base_prompt": pmp,
"raw_answer": raw_answer,
"pred": pred,
"gold": golds[0],
}
expt_log[ind] = entry
preds.append(pred)
labels.append(golds[0])
metric = np.mean([metric_max_over_ground_truths(f1_score, pred, [gold]) for pred, gold in zip(preds, labels)])
# Compute accuracy
# metric = np.mean([pred == gold for pred, gold in zip(preds, labels)])
return expt_log, metric
def run_decomposed_prompt(
self, test_data, boost_data_train, boost_dfs, manifest, overwrite_manifest
):
expt_log, all_boost_preds, labels = self._run_decomp_single_data(test_data, boost_dfs, manifest, overwrite_manifest)
expt_log_train, all_boost_train_preds, train_labels = self._run_decomp_single_data(boost_data_train, boost_dfs, manifest, overwrite_manifest, run_limit=1000)
# Do WS
preds = self.merge_boosted_preds(all_boost_preds, all_boost_train_preds, train_labels, expt_log, expt_log_train)
# Get accuracies across all boost sets
individual_accuracies = []
for i in range(len(all_boost_preds[0])):
individual_accuracies.append(
np.mean([metric_max_over_ground_truths(f1_score, pred, [gold]) for pred, gold in zip([p[i] for p in all_boost_preds], labels)])
)
metric = np.mean([metric_max_over_ground_truths(f1_score, pred, [gold]) for pred, gold in zip(preds, labels)])
return expt_log, expt_log_train, metric, individual_accuracies
def _run_decomp_single_data(self, test_data, boost_dfs, manifest, overwrite_manifest, run_limit=-1):
expt_log = {}
all_boost_preds = []
labels = []
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
if i == run_limit:
break
row["answer"] = [d for d in row["answer"]]
row["choices"] = [d for d in row["choices"]]
row["gold_answers"] = [d for d in row["gold_answers"]]
question = row["question_sentence"]
passages = row["passages"]
golds = row["gold_answers"]
assert len(golds) == 1
prompts_across_boost = []
preds_across_boost = []
for boost_idx, boost_examples in enumerate(boost_dfs):
all_prompts = []
passage_list = [p for p in passages.split("Article:") if p.strip()]
passage_list = [" ".join(p.split(" ")[:100]) for p in passage_list]
assert len(passage_list) > 1
passages_for_prompt = "" .join([f"Article {i+1}:{a}" for i, a in enumerate(passage_list[:1])])
# Art1 answer
icl_str = art1_answer(boost_examples[0])
pmp = f"{icl_str}\n\n{passages_for_prompt}Question: {question}\nAnswer:"
all_prompts.append(pmp)
raw_answer_art1 = get_response(
pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=30,
)
pred_art1 = raw_answer_art1.split("\n")[0].strip("\"").strip()
icl_str = all_answer(boost_examples[1])
passages_for_prompt = "" .join([f"Article {i+1}:{a}" for i, a in enumerate(passage_list)])
all_pmp = f"{icl_str}\n\n{passages_for_prompt}Question: {question}\nAnswer:"
all_prompts.append(all_pmp)
if i == 0:
print(pmp)
print(all_pmp)
raw_answer_all = get_response(
all_pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=30,
)
pred_all = raw_answer_all.split("\n")[0].strip("\"").strip()
if pred_art1 == "I don't know":
pred = pred_all
else:
pred = pred_art1
pred = pred.translate(str.maketrans('', '', string.punctuation))
pred = pred.lower()
# if pred != golds[0].lower() and golds[0].lower() in passages_for_prompt.lower():
# print("PASSAGES", passages_for_prompt)
# print("QUESTION", question)
# print("ANSWER", golds[0])
# print("PRED", pred)
prompts_across_boost.append(all_prompts)
preds_across_boost.append(pred)
entry = {
"ind": ind,
"example": row.to_dict(),
"prompts": prompts_across_boost,
"preds_boost": preds_across_boost,
"gold": golds[0].lower(),
}
expt_log[ind] = entry
all_boost_preds.append(preds_across_boost)
labels.append(golds[0])
return expt_log, all_boost_preds, labels
def main():
args = get_args()
if not Path(realtime_qa_path).exists():
raise ValueError(f"Path {realtime_qa_path} does not exist. Download from realtimeQA repo to this path.")
task_name = "realtime_qa"
data_dir = f"{DATA_DIR}/realtimeqa_public/past/2022"
if not Path(data_dir).exists():
raise ValueError(f"Data dir {data_dir} does not exist. Download from realtimeQA repo.")
decomp = RealtimeQADecomp(task_name, data_dir)
decomp.run(args)
if __name__ == "__main__":
main()
| ama_prompting-main | tasks/RealtimeQA_final.py |
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import numpy as np
from pathlib import Path
from tqdm.auto import tqdm
from sklearn.metrics import classification_report
from decomposition import Decomposition, get_args, DATA_DIR
from utils import get_response, InputOutputPrompt
##############################################################################################################################
# All prompts
summarize = InputOutputPrompt(
input_formatter=lambda x: f"Passage: {x['passage']}",
output_formatter=lambda x: f"Summarize: the passage \"Passage\": {x['summarize']}",
required_keys=["passage", "summarize"],
input_output_sep="\n",
example_sep="\n\n",
instruction="Summarize the passage.\n\n"
)
summarize_examples = [
pd.DataFrame([
{
"passage": "China overtakes United States as top destination for foreign investment (AFP). AFP - China overtook the United States as a top global destination for foreign direct investment (FDI) in 2003 while the Asia-Pacific region attracted more investment than any other developing region, a UN report said.",
"summarize": "The passage is about foreign direct investment."
},
{
"passage": "Colangelo resigns as CEO of D-Backs. Jerry Colangelo has resigned his position as chief executive officer of the Arizona Diamondbacks, effective immediately, handing the reins of the organization to CEO Elect Jeff Moorad.",
"summarize": "The passage is about the Arizona Diamondbacks."
},
{
"passage": "3 injured in plant fire in Japan. TOKYO, Aug. 20 (Xinhuanet) -- Fire broke out Friday at a tire plant belonging to Bridgestone Corp. in Amagi, western Fukuoka Prefecture of Japan, leaving 13 people injured.",
"summarize": "The passage is about a plant fire."
}
]),
pd.DataFrame([
{
"passage": "Abramovich faces rich list challenge. Lakshmi Mittal, the Indian-born steel magnate, yesterday staked a claim to overtake Roman Abramovich as Britain's richest man with a 10bn deal to create the world's largest steelmaker.",
"summarize": "The passage is about a 10bn deal."
},
{
"passage": "U.N. Deadlocks on Cloning Ban. The United Nations abandons efforts to ban all cloning and opts for a non-binding resolution. It's a blow to President Bush's efforts to push a ban and a victory for embryonic stem cell researchers. By Kristen Philipkoski",
"summarize": "The passage is about stem cell research."
},
{
"passage": "Tennis: Serena Williams Reaches Finals of China Open. Top seed Serena Williams of the United States has powered her way into the finals of the China Open tennis tournament in Beijing with a straight sets (6-2, 6-3) victory over fourth-seeded Vera Zvonareva of Russia.",
"summarize": "The passage is about tennis."
}
]),
pd.DataFrame([
{
"passage": "San Francisco at Atlanta, 1:05 PM. ATLANTA (Ticker) -- Rookie Noah Lowry looks to win his fourth straight decision when he starts for the San Francisco Giants in the finale of a four-game series with the Atlanta Braves.",
"summarize": "The passage is about the San Francisco Giants."
},
{
"passage": "Suffocation cited in most deaths. At least 84 Muslim protesters died, mostly from suffocation so severe their eyes bled, after being arrested and locked in army trucks following clashes with security forces in the south, officials said yesterday.",
"summarize": "The passage is about Muslim protesters."
},
{
"passage": "Merrill, UBS Up Apple Stock Estimates. As consumers start spending on Christmas, two brokerage houses raised their estimates on Apple Computer (AAPL) stock Monday to more than US $77, predicting.",
"summarize": "The passage is about Apple Stock Estimates."
}
]),
]
categorize = InputOutputPrompt(
input_formatter=lambda x: f"Passage: {x['passage']}\nSummary: {x['summary']}",
output_formatter=lambda x: f"The summary \"Summary\" fits \"Category\": {x['category']}",
required_keys=["passage", "summary", "category"],
input_output_sep="\n",
example_sep="\n\n",
instruction="Pick the correct category for the passage.\n\n\"Categories\":\n- World News\n- Sports\n- Business\n- Technology and Science\n\n"
)
categorize_examples = [
pd.DataFrame([
{
"passage": "China overtakes United States as top destination for foreign investment (AFP). AFP - China overtook the United States as a top global destination for foreign direct investment (FDI) in 2003 while the Asia-Pacific region attracted more investment than any other developing region, a UN report said.",
"summary": "The passage is about foreign direct investment.",
"category": "Business"
},
{
"passage": "Colangelo resigns as CEO of D-Backs. Jerry Colangelo has resigned his position as chief executive officer of the Arizona Diamondbacks, effective immediately, handing the reins of the organization to CEO Elect Jeff Moorad.",
"summary": "The passage is the Arizona Diamondbacks.",
"category": "Sports"
},
{
"passage": "3 injured in plant fire in Japan. TOKYO, Aug. 20 (Xinhuanet) -- Fire broke out Friday at a tire plant belonging to Bridgestone Corp. in Amagi, western Fukuoka Prefecture of Japan, leaving 13 people injured.",
"summary": "The passage is about a plant fire.",
"category": "World News"
}
]),
pd.DataFrame([
{
"passage": "Abramovich faces rich list challenge. Lakshmi Mittal, the Indian-born steel magnate, yesterday staked a claim to overtake Roman Abramovich as Britain's richest man with a 10bn deal to create the world's largest steelmaker.",
"summary": "The passage is about a 10bn deal.",
"category": "Business"
},
{
"passage": "U.N. Deadlocks on Cloning Ban. The United Nations abandons efforts to ban all cloning and opts for a non-binding resolution. It's a blow to President Bush's efforts to push a ban and a victory for embryonic stem cell researchers. By Kristen Philipkoski",
"summary": "The passage is about stem cell research.",
"category": "Technology and Science"
},
{
"passage": "Tennis: Serena Williams Reaches Finals of China Open. Top seed Serena Williams of the United States has powered her way into the finals of the China Open tennis tournament in Beijing with a straight sets (6-2, 6-3) victory over fourth-seeded Vera Zvonareva of Russia.",
"summary": "The passage is about tennis",
"category": "Sports"
}
]),
pd.DataFrame([
{
"passage": "San Francisco at Atlanta, 1:05 PM. ATLANTA (Ticker) -- Rookie Noah Lowry looks to win his fourth straight decision when he starts for the San Francisco Giants in the finale of a four-game series with the Atlanta Braves.",
"summary": "The passage is about the San Francisco Giants.",
"category": "Sports"
},
{
"passage": "Suffocation cited in most deaths. At least 84 Muslim protesters died, mostly from suffocation so severe their eyes bled, after being arrested and locked in army trucks following clashes with security forces in the south, officials said yesterday.",
"summary": "The passage is about Muslim protesters.",
"category": "World News"
},
{
"passage": "Merrill, UBS Up Apple Stock Estimates. As consumers start spending on Christmas, two brokerage houses raised their estimates on Apple Computer (AAPL) stock Monday to more than US $77, predicting",
"summary": "The passage is about Apple Stock Estimates.",
"category": "Business"
}
]),
]
description_zeroshot="""
Pick the correct category for the passage.
Categories:
- World News
- Sports
- Business
- Technology and Science"""
label_dict = {
0: 'World News',
1: 'Sports',
2: 'Business',
3: 'Technology and Science'
}
def format_data(df):
# Pre-processing code from: https://github.com/tonyzhaozh/few-shot-learning
sentences = df['Title'] + ". " + df['Description']
sentences = list(
[item.replace(' #39;s', '\'s').replace(' quot;', "\"").replace('\\', " ").replace(' #39;ll', "'ll") for item
in sentences]) # some basic cleaning
labels = list(df['Class Index'])
labels = [l - 1 for l in labels] # make them 0, 1, 2, 3 instead of 1, 2, 3, 4
return sentences, labels
class AGNews(Decomposition):
def __init__(self, task_name, data_dir, val_split="validation"):
super().__init__(task_name, data_dir, val_split)
def read_data(self, save_dir, overwrite_data):
save_data = Path(f"{save_dir}/{self.task_name}/data.feather")
if not save_data.exists() or overwrite_data:
test_data = pd.read_csv(f"{self.data_dir}/{self.val_split}.csv")
test_sentences, test_labels = format_data(test_data)
test_data = pd.DataFrame({
'sentence': test_sentences,
'label': test_labels,
})
test_data.to_feather(f"{save_data}")
else:
print(f"Reading test data from {save_data}")
test_data = pd.read_feather(save_data)
save_data = Path(f"{save_dir}/{self.task_name}/train_data.feather")
if not save_data.exists() or overwrite_data:
train_data = pd.read_csv(f"{self.data_dir}/train.csv")
train_sentences, train_labels = format_data(train_data)
train_data = pd.DataFrame({
'sentence': train_sentences,
'label': train_labels,
})
train_data.to_feather(f"{save_data}")
else:
print(f"Reading train data from {save_data}")
train_data = pd.read_feather(save_data)
print(f"Test Data Size: {len(test_data)}")
print(f"Train Data Size: {len(train_data)}")
return test_data, train_data
def get_boost_decomp_examples(self, train_data, boost_id):
return [
summarize_examples[boost_id],
categorize_examples[boost_id],
]
def get_few_shot_examples(self, train_data, k_shot):
"""Get few shot examples"""
labels = [0, 1, 2, 3]
num_per_class = int(np.ceil(k_shot / len(labels)))
print(f"Selecting {num_per_class} examples per class.")
dfs = []
total_in_context = 0
for label in labels:
while num_per_class + total_in_context > k_shot:
num_per_class -= 1
sub_df = train_data[train_data["label"] == label].sample(
num_per_class
)
dfs.append(sub_df)
total_in_context += num_per_class
if total_in_context == k_shot:
break
mini_df = pd.concat(dfs)
print(mini_df.index)
return mini_df
def zero_few_baseline(
self,
test_data,
few_shot_df,
manifest,
overwrite_manifest,
do_few_shot=True,
):
expt_log = {}
preds = []
labels = []
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
text = row['sentence']
gold = label_dict[row['label']]
gold = gold.replace("_", " ").strip().replace(",", "")
icl_str = f"{description_zeroshot}"
if do_few_shot:
for s_ind, s_row in few_shot_df.iterrows():
s_label = label_dict[s_row['label']]
icl_str += f"\n\nPassage: {s_row['sentence']}\nCategory: {s_label}"
prompt = f"{icl_str}\n\nPassage: {{text:}}\nCategory:"
pmp = prompt.format(text=text)
if i == 0:
print(pmp)
answer = get_response(
pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=10,
stop_token="\n\n",
)
answer = answer.split("\n")
answer = [a for a in answer if a]
pred = ''
if answer:
pred = answer[0]
pred = pred.replace("-", "").strip().replace(",", "")
entry = {
"ind": ind,
"example": text,
"base_prompt": pmp,
"pred": pred,
"gold": gold,
}
expt_log[ind] = entry
preds.append(pred)
labels.append(gold)
report = classification_report(labels, preds, output_dict=True)
return expt_log, report["accuracy"]
def run_decomposed_prompt(
self, test_data, boost_data_train, boost_dfs, manifest, overwrite_manifest
):
expt_log, all_boost_preds, labels = self._run_decomp_single_data(test_data, boost_dfs, manifest, overwrite_manifest, run_limit=-1)
expt_log_train, all_boost_train_preds, train_labels = self._run_decomp_single_data(boost_data_train, boost_dfs, manifest, overwrite_manifest, run_limit=1000)
# Do WS
preds = self.merge_boosted_preds(all_boost_preds, all_boost_train_preds, train_labels, expt_log, expt_log_train)
# Get accuracies across all boost sets
individual_accuracies = []
for i in range(len(all_boost_preds[0])):
individual_accuracies.append(classification_report(labels, [p[i] for p in all_boost_preds], output_dict=True)["accuracy"])
report = classification_report(labels, preds, output_dict=True)
return expt_log, expt_log_train, report["accuracy"], individual_accuracies
def _run_decomp_single_data(self, test_data, boost_dfs, manifest, overwrite_manifest, run_limit=-1):
expt_log = {}
all_boost_preds = []
labels = []
labels_clean = [v for k, v in label_dict.items()]
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
text = row['sentence']
gold = label_dict[row['label']]
if i == run_limit:
break
prompts_across_boost = []
preds_across_boost = []
for boost_examples in boost_dfs:
all_prompts = []
prompt_suffix = summarize(boost_examples[0])
summary_prompt = f"{prompt_suffix}\n\nPassage: {{text:}}\nSummarize: the passage \"Passage\":"
summary_pmp = summary_prompt.format(text=text)
output = get_response(
summary_pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=40,
)
summary = output.split("\n")[0].split(":")[-1].strip("\n")
all_prompts.append(summary_pmp)
prompt_suffix = categorize(boost_examples[1])
category_prompt = f"{prompt_suffix}\n\nPassage: {{text:}}\nSummary: {{summary:}}\nThe summary \"Summary\" fits \"Category\":"
category_pmp = category_prompt.format(text=text, summary=summary)
output = get_response(
category_pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=5,
)
all_prompts.append(category_pmp)
if i== 0:
print(summary_pmp)
print("\n---------------------------------------\n")
print(category_pmp)
answer = output.split("\n")[0].strip().lower()
answer = answer.replace("-", "").strip()
gold = gold.replace("_", " ").strip().lower()
pred = answer
for label in labels_clean:
if label.lower() in answer.lower():
pred = label.lower()
break
prompts_across_boost.append(all_prompts)
preds_across_boost.append(pred)
entry = {
"ind": ind,
"example": text,
"prompts": prompts_across_boost,
"preds_boost": preds_across_boost,
"gold": gold,
}
expt_log[ind] = entry
all_boost_preds.append(preds_across_boost)
labels.append(gold)
return expt_log, all_boost_preds, labels
def main():
args = get_args()
args.num_boost = 3
task_name = "agnews"
data_dir = f"{DATA_DIR}/agnews/"
if not Path(data_dir).exists():
raise ValueError(f"Data directory {data_dir} does not exist. Download AGNews from https://github.com/tonyzhaozh/few-shot-learning.")
decomp = AGNews(task_name, data_dir, val_split="test")
decomp.run(args)
if __name__ == "__main__":
main()
| ama_prompting-main | tasks/AGNews_final.py |
#!/usr/bin/env python
# coding: utf-8
from tqdm.auto import tqdm
import pandas as pd
import numpy as np
from tqdm.auto import tqdm
from sklearn.metrics import classification_report
from decomposition import Decomposition, get_args, DATA_DIR
from utils import get_response, InputOutputPrompt
extract = InputOutputPrompt(
input_formatter=lambda x: f"Context: {x['context']}\n\nQuestion: {x['question']}",
output_formatter=lambda x: f"Answer: {x['answer']}",
required_keys=["context", "question", "answer"],
input_output_sep="\n\n",
example_sep="\n\n----\n\n",
instruction="Answer the question using the context.\n\n"
)
extract_examples = [
pd.DataFrame([
{
"context": "Tonic water -- Tonic water (or Indian tonic water) is a carbonated soft drink in which quinine is dissolved. Originally used as a prophylactic against malaria, tonic water usually now has a significantly lower quinine content and is consumed for its distinctive bitter flavor. It is often used in mixed drinks, particularly in gin and tonic.",
"question": "does tonic water still have quinine in it?",
"answer": "yes"
},
{
"context": "Northern bobwhite -- The northern bobwhite, Virginia quail or (in its home range) bobwhite quail (Colinus virginianus) is a ground-dwelling bird native to the United States, Mexico, and the Caribbean. It is a member of the group of species known as New World quails (Odontophoridae). They were initially placed with the Old World quails in the pheasant family (Phasianidae), but are not particularly closely related. The name ``bobwhite'' derives from its characteristic whistling call. Despite its secretive nature, the northern bobwhite is one of the most familiar quails in eastern North America because it is frequently the only quail in its range. Habitat degradation has likely contributed to the northern bobwhite population in eastern North America declining by roughly 85% from 1966-2014. This population decline is apparently range-wide and continuing.",
"question": "is a quail the same as a bobwhite?",
"answer": "yes"
},
{
"context": "United States Department of Homeland Security -- In fiscal year 2017, it was allocated a net discretionary budget of $40.6 billion. With more than 240,000 employees, DHS is the third largest Cabinet department, after the Departments of Defense and Veterans Affairs. Homeland security policy is coordinated at the White House by the Homeland Security Council. Other agencies with significant homeland security responsibilities include the Departments of Health and Human Services, Justice, and Energy",
"question": "is department of homeland security part of dod?",
"answer": "no"
}
]),
pd.DataFrame([
{
"context": "Debit card cashback -- The services are restricted to debit cards where the merchant pays a fixed fee for the transaction, it is not offered on payments by credit card because they would pay a percentage commission on the additional cash amount to their bank or merchant service provider.",
"question": "can i ask for cashback with a credit card?",
"answer": "no"
},
{
"context": "Bundle branch block -- A bundle branch block can be diagnosed when the duration of the QRS complex on the ECG exceeds 120 ms. A right bundle branch block typically causes prolongation of the last part of the QRS complex, and may shift the heart's electrical axis slightly to the right. The ECG will show a terminal R wave in lead V1 and a slurred S wave in lead I. Left bundle branch block widens the entire QRS, and in most cases shifts the heart's electrical axis to the left. The ECG will show a QS or rS complex in lead V1 and a monophasic R wave in lead I. Another normal finding with bundle branch block is appropriate T wave discordance. In other words, the T wave will be deflected opposite the terminal deflection of the QRS complex. Bundle branch block, especially left bundle branch block, can lead to cardiac dyssynchrony. The simultaneous occurrence of left and right bundle branch block leads to total AV block.",
"question": "can you have a right and left bundle branch block?",
"answer": "yes"
},
{
"context": "Windsor Castle -- Queen Victoria and Prince Albert made Windsor Castle their principal royal residence, despite Victoria complaining early in her reign that the castle was ``dull and tiresome'' and ``prison-like'', and preferring Osborne and Balmoral as holiday residences. The growth of the British Empire and Victoria's close dynastic ties to Europe made Windsor the hub for many diplomatic and state visits, assisted by the new railways and steamships of the period. Indeed, it has been argued that Windsor reached its social peak during the Victorian era, seeing the introduction of invitations to numerous prominent figures to ``dine and sleep'' at the castle. Victoria took a close interest in the details of how Windsor Castle was run, including the minutiae of the social events. Few visitors found these occasions comfortable, both due to the design of the castle and the excessive royal formality. Prince Albert died in the Blue Room at Windsor Castle in 1861 and was buried in the Royal Mausoleum built at nearby Frogmore, within the Home Park. The prince's rooms were maintained exactly as they had been at the moment of his death and Victoria kept the castle in a state of mourning for many years, becoming known as the ``Widow of Windsor'', a phrase popularised in the famous poem by Rudyard Kipling. The Queen shunned the use of Buckingham Palace after Albert's death and instead used Windsor Castle as her residence when conducting official business near London. Towards the end of her reign, plays, operas, and other entertainments slowly began to be held at the castle again, accommodating both the Queen's desire for entertainment and her reluctance to be seen in public.",
"question": "is buckingham palace the same as windsor castle?",
"answer": "no"
}
]),
pd.DataFrame([
{
"context": "The Princess and the Goblin (film) -- The Princess and the Goblin (Hungarian: A hercegnő és a kobold) is a 1991 British-Hungarian-American animated musical fantasy film directed by József Gémes and written by Robin Lyons, an adaptation of George MacDonald's 1872 novel of the same name.",
"question": "is the princess and the goblin a disney movie?",
"answer": "no"
},
{
"context": "Field marshal (United Kingdom) -- Field Marshal has been the highest rank in the British Army since 1736. A five-star rank with NATO code OF-10, it is equivalent to an Admiral of the Fleet in the Royal Navy or a Marshal of the Royal Air Force in the Royal Air Force (RAF). A Field Marshal's insignia consists of two crossed batons surrounded by yellow leaves below St Edward's Crown. Like Marshals of the RAF and Admirals of the Fleet, Field Marshals traditionally remain officers for life, though on half-pay when not in an appointment. The rank has been used sporadically throughout its history and was vacant during parts of the 18th and 19th centuries (when all former holders of the rank were deceased). After the Second World War, it became standard practice to appoint the Chief of the Imperial General Staff (later renamed Chief of the General Staff) to the rank on his last day in the post. Army officers occupying the post of Chief of the Defence Staff, the professional head of all the British Armed Forces, were usually promoted to the rank upon their appointment.",
"question": "is there a field marshal in the british army?",
"answer": "yes"
},
{
"context": "Washington, D.C. -- The signing of the Residence Act on July 16, 1790, approved the creation of a capital district located along the Potomac River on the country's East Coast. The U.S. Constitution provided for a federal district under the exclusive jurisdiction of the Congress and the District is therefore not a part of any state. The states of Maryland and Virginia each donated land to form the federal district, which included the pre-existing settlements of Georgetown and Alexandria. Named in honor of President George Washington, the City of Washington was founded in 1791 to serve as the new national capital. In 1846, Congress returned the land originally ceded by Virginia; in 1871, it created a single municipal government for the remaining portion of the District.",
"question": "is washington dc a part of a state?",
"answer": "no"
}
]),
pd.DataFrame([
{
"context": "Legal issues in airsoft -- Under federal law, airsoft guns are not classified as firearms and are legal for all ages. People under the age of 18 are not permitted to buy airsoft guns over the counter in stores. However, a person of any age may use one (with the permission of their parents, of course, for anyone under 18). This is also the case for the laws in each state. However, in some major cities, the definition of a firearm within their respected ordinances includes propulsion by spring or compressed air, thus making airsoft subject to applicable laws. For example, airsoft guns within the state of California can only be bought by a person above the age of 18. However, no laws indicate an age requirement to sell airsoft guns. Generally speaking, toy, look-alike, and imitation firearms must have an orange tip during shipping and transportation.",
"question": "do you have to be 18 to buy airsoft guns?",
"answer": "yes"
},
{
"context": "India national football team -- India has never participated in the FIFA World Cup, although the team did qualify by default for the 1950 World Cup after all the other nations in their qualification group withdrew. However, India withdrew prior to the beginning of the tournament. The team has also appeared three times in the Asia's top football competition, the AFC Asian Cup. Their best result in the competition occurred in 1964 when the team finished as runners-up. India also participate in the SAFF Championship, the top regional football competition in South Asia. They have won the tournament six times since it began in 1993. \nQuestion: has india ever played in fifa world cup.",
"question": "has india ever played in fifa world cup?",
"answer": "no"
},
{
"context": "Pan-American Highway -- The Pan-American Highway is a network of roads measuring about 30,000 kilometres (19,000 mi) in total length. Except for a rainforest break of approximately 160 km (100 mi), called the Darién Gap, the road links almost all of the mainland countries of the Americas in a connected highway system. According to Guinness World Records, the Pan-American Highway is the world's longest ``motorable road''. However, because of the Darién Gap, it is not possible to cross between South America and Central America, alternatively being able to circumnavigate this terrestrial stretch by sea.",
"question": "could you drive from north america to south america?",
"answer": "no"
}
]),
pd.DataFrame([
{
"context": "Appointment and confirmation to the Supreme Court of the United States -- The appointment and confirmation of Justices to the Supreme Court of the United States involves several steps set forth by the United States Constitution, which have been further refined and developed by decades of tradition. Candidates are nominated by the President of the United States and must face a series of hearings in which both the nominee and other witnesses make statements and answer questions before the Senate Judiciary Committee, which can vote to send the nomination to the full United States Senate. Confirmation by the Senate allows the President to formally appoint the candidate to the court.",
"question": "do supreme court justices have to be approved by congress?",
"answer": "no"
},
{
"context": "Glowplug -- Diesel engines, unlike gasoline engines, do not use spark plugs to induce combustion. Instead, they rely solely on compression to raise the temperature of the air to a point where the diesel combusts spontaneously when introduced to the hot high pressure air. The high pressure and spray pattern of the diesel ensures a controlled, complete burn. The piston rises, compressing the air in the cylinder; this causes the air's temperature to rise. By the time the piston reaches the top of its travel path, the temperature in the cylinder is very high. The fuel mist is then sprayed into the cylinder; it instantly combusts, forcing the piston downwards, thus generating power. The pressure required to heat the air to that temperature, however, requires a large and strong engine block.",
"question": "is there a spark plug in diesel engine?",
"answer": "no"
},
{
"context": "Buffy the Vampire Slayer Season Eight -- Buffy the Vampire Slayer Season Eight is a comic book series published by Dark Horse Comics from 2007 to 2011. The series serves as a canonical continuation of the television series Buffy the Vampire Slayer, and follows the events of that show's final televised season. It is produced by Joss Whedon, who wrote or co-wrote three of the series arcs and several one-shot stories. The series was followed by Season Nine in 2011.",
"question": "is there a season 8 of buffy the vampire slayer?",
"answer": "yes"
}
]),
pd.DataFrame([
{
"context": "Uterus -- The uterus (from Latin ``uterus'', plural uteri) or womb is a major female hormone-responsive secondary sex organ of the reproductive system in humans and most other mammals. In the human, the lower end of the uterus, the cervix, opens into the vagina, while the upper end, the fundus, is connected to the fallopian tubes. It is within the uterus that the fetus develops during gestation. In the human embryo, the uterus develops from the paramesonephric ducts which fuse into the single organ known as a simplex uterus. The uterus has different forms in many other animals and in some it exists as two separate uteri known as a duplex uterus.",
"question": "are the womb and the uterus the same thing?",
"answer": "yes"
},
{
"context": "Super Bowl XLVII -- Super Bowl XLVII was an American football game between the American Football Conference (AFC) champion Baltimore Ravens and the National Football Conference (NFC) champion San Francisco 49ers to decide the National Football League (NFL) champion for the 2012 season. The Ravens defeated the 49ers by the score of 34--31, handing the 49ers their first Super Bowl loss in franchise history. The game was played on Sunday, February 3, 2013 at Mercedes-Benz Superdome in New Orleans, Louisiana. This was the tenth Super Bowl to be played in New Orleans, equaling Miami's record of ten in an individual city.",
"question": "did the 49ers win the superbowl in 2012?",
"answer": "no"
},
{
"context": "Blacklight -- A blacklight (or often black light), also referred to as a UV-A light, Wood's lamp, or simply ultraviolet light, is a lamp that emits long-wave (UV-A) ultraviolet light and not much visible light.",
"question": "are black lights and uv lights the same thing?",
"answer": "yes"
}
]),
pd.DataFrame([
{
"context": "2018 Winter Olympics -- In June 2017, Ubisoft announced that it would release an expansion pack for its winter sports video game Steep entitled Road to the Olympics, which features new game modes and content inspired by the 2018 Winter Olympics.",
"question": "will there be a winter olympics video game?",
"answer": "yes"
},
{
"context": "Castor oil -- Castor oil is a vegetable oil obtained by pressing the seeds of the castor oil plant (Ricinus communis). The common name ``castor oil'', from which the plant gets its name, probably comes from its use as a replacement for castoreum, a perfume base made from the dried perineal glands of the beaver (castor in Latin).",
"question": "is vegetable oil and castor oil the same?",
"answer": "no"
},
{
"context": "The Mother (How I Met Your Mother) -- Tracy McConnell, better known as ``The Mother'', is the title character from the CBS television sitcom How I Met Your Mother. The show, narrated by Future Ted, tells the story of how Ted Mosby met The Mother. Tracy McConnell appears in 8 episodes from ``Lucky Penny'' to ``The Time Travelers'' as an unseen character; she was first seen fully in ``Something New'' and was promoted to a main character in season 9. The Mother is played by Cristin Milioti.",
"question": "does how i met your mother ever show ted's wife?",
"answer": "yes"
}
]),
]
prefix_select_zeroshot = """Answer the question."""
class BoolQDecomp(Decomposition):
def __init__(self, task_name, data_dir, val_split="validation"):
super().__init__(task_name, data_dir, val_split)
def get_boost_decomp_examples(self, train_data, boost_id):
return [
extract_examples[boost_id],
]
def get_few_shot_examples(self, train_data, k_shot):
"""Get few shot examples"""
labels = ['No', 'Yes']
num_per_class = int(np.ceil(k_shot / len(labels)))
print(f"Selecting {num_per_class} examples per class.")
dfs = []
total_in_context = 0
for label in labels:
while num_per_class + total_in_context > k_shot:
num_per_class -= 1
sub_df = train_data[train_data["targets_pretokenized"] == label]
sub_df = sub_df.sample(num_per_class)
dfs.append(sub_df)
total_in_context += num_per_class
if total_in_context == k_shot:
break
mini_df = pd.concat(dfs)
return mini_df
def zero_few_baseline(
self,
test_data,
few_shot_df,
manifest,
overwrite_manifest,
do_few_shot=True,
):
expt_log = {}
preds = []
labels = []
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
text = row['inputs_pretokenized']
passage = text.split("\nQuestion")[0].strip()
question = text.split("\nQuestion")[-1].split("\nAnswer")[0].strip().strip(":").strip().strip("\n").strip("?").strip()
gold = row['targets_pretokenized']
icl_str = f"{prefix_select_zeroshot}"
if do_few_shot:
for s_ind, s_row in few_shot_df.iterrows():
s_text = s_row['inputs_pretokenized']
s_passage = s_text.split("\nQuestion")[0].strip("\n").strip()
s_question = s_text.split("\nQuestion")[-1].split("\nAnswer")[0].strip().strip(":").strip().strip("\n").strip("?").strip()
icl_str += f"\n\nContext: {s_passage}\nQuestion: {s_question}?\nAnswer: {s_row['targets_pretokenized']}"
prompt = f"{icl_str}\n\nContext: {passage}\nQuestion: {question}?\nAnswer:"
if i == 0:
print(prompt)
answer = get_response(
prompt,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=10,
stop_token="\n\n",
)
answer = answer.strip("\n").lower()
pred = answer.strip()
gold = gold.strip().lower()
entry = {
"ind": ind,
"example": text,
"base_prompt": prompt,
"pred": pred,
"gold": gold,
}
expt_log[ind] = entry
preds.append(pred)
labels.append(gold)
report = classification_report(labels, preds, output_dict=True)
return expt_log, report["accuracy"]
def run_decomposed_prompt(
self, test_data, boost_data_train, boost_dfs, manifest, overwrite_manifest
):
expt_log, all_boost_preds, labels = self._run_decomp_single_data(test_data, boost_dfs, manifest, overwrite_manifest, run_limit=-1)
expt_log_train, all_boost_train_preds, train_labels = self._run_decomp_single_data(boost_data_train, boost_dfs, manifest, overwrite_manifest, run_limit=1000)
# Do WS
preds = self.merge_boosted_preds(all_boost_preds, all_boost_train_preds, train_labels, expt_log, expt_log_train)
# Get accuracies across all boost sets
individual_accuracies = []
for i in range(len(all_boost_preds[0])):
individual_accuracies.append(classification_report(labels, [p[i] for p in all_boost_preds], output_dict=True)["accuracy"])
report = classification_report(labels, preds, output_dict=True)
return expt_log, expt_log_train, report["accuracy"], individual_accuracies
def _run_decomp_single_data(self, test_data, boost_dfs, manifest, overwrite_manifest, run_limit=-1):
expt_log = {}
all_boost_preds = []
labels = []
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
text = row['inputs_pretokenized']
passage = text.split("\nQuestion")[0].strip()
question = text.split("\nQuestion")[-1].split("\nAnswer")[0].replace(
"True or False?", "").strip().strip(":").strip().strip("?").strip()
gold = row['targets_pretokenized']
if i == run_limit:
break
prompts_across_boost = []
preds_across_boost = []
for boost_examples in boost_dfs:
all_prompts = []
prompt_suffix = extract(boost_examples[0])
extract_prompt = f"{prompt_suffix}\n\n----\n\nContext: {{passage:}}\n\nQuestion: {{question:}}?\n\nAnswer:"
extract_pmp = extract_prompt.format(passage=passage, question=question)
output = get_response(
extract_pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=5,
)
all_prompts.append(extract_pmp)
if i == 0:
print(extract_pmp)
answer = output.strip("\n").lower()
answer = [a for a in answer.split("\n") if a][0]
answer = "".join(
[a for a in answer if a not in [".", ",", "?", ";", ":", "'", '"']]
)
gold = gold.strip().lower()
pred = answer
is_yes = "yes" in pred.split()
is_no = "no" in pred.split()
pred = "No"
if is_yes and (not is_no):
pred = "Yes"
if is_no and (not is_yes):
pred = "No"
pred = pred.lower()
prompts_across_boost.append(all_prompts)
preds_across_boost.append(pred)
entry = {
"ind": ind,
"example": text,
"prompts": prompts_across_boost,
"preds_boost": preds_across_boost,
"gold": gold,
}
expt_log[ind] = entry
all_boost_preds.append(preds_across_boost)
labels.append(gold)
return expt_log, all_boost_preds, labels
def main():
args = get_args()
args.num_boost = 5
task_name = "super_glue_boolq"
data_dir = f"{DATA_DIR}/P3/data_feather/super_glue_boolq_GPT_3_Style/"
boolq = BoolQDecomp(task_name, data_dir)
boolq.run(args)
if __name__ == "__main__":
main()
| ama_prompting-main | tasks/BoolQ_final.py |
#!/usr/bin/env python
# coding: utf-8
import os
from tqdm.auto import tqdm
import pandas as pd
from sklearn.metrics import classification_report
from decomposition import Decomposition, get_args, DATA_DIR
from utils import get_response, InputOutputPrompt
from collections import defaultdict, Counter
class CBDecomp(Decomposition):
def __init__(self, task_name, data_dir, val_split="validation"):
super().__init__(task_name, data_dir, val_split)
def get_boost_decomp_examples(self, data_train, boost_id):
lst = [
'super_glue_cb_claim_true_false_inconclusive',
'super_glue_cb_does_this_imply',
'super_glue_cb_always_sometimes_never',
'super_glue_cb_does_it_follow_that',
'super_glue_cb_guaranteed_true',
'super_glue_cb_take_the_following_as_truth',
'super_glue_cb_justified_in_saying',
'super_glue_cb_should_assume',
'super_glue_cb_GPT_3_style',
'super_glue_cb_can_we_infer',
'super_glue_cb_consider_always_sometimes_never',
'super_glue_cb_guaranteed_possible_impossible',
'super_glue_cb_MNLI_crowdsource',
'super_glue_cb_based_on_the_previous_passage',
'super_glue_cb_must_be_true'
]
file_path = lst[boost_id]
print(f"FILE PATH: {file_path}")
train_data = pd.read_feather(f"{DATA_DIR}/P3/data_feather/{file_path}/train.feather")
val_data = pd.read_feather(f"{DATA_DIR}/P3/data_feather/{file_path}/validation.feather")
return [
train_data,
val_data
]
def zero_few_baseline(
self,
test_data,
few_shot_df,
manifest,
overwrite_manifest,
do_few_shot=True,
):
expt_log = {}
preds = []
labels = []
labels_names = set(test_data["targets_pretokenized"])
labels_names = [l.lower().strip() for l in labels_names]
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
if ind in expt_log:
pred = entry["pred"]
gold = entry["gold"]
else:
text = row["inputs_pretokenized"]
gold = row["targets_pretokenized"]
icl_str = ""
if do_few_shot:
for s_ind, s_row in few_shot_df.iterrows():
icl_str += f"{s_row['inputs_pretokenized']} {s_row['targets_pretokenized']}\n\n\n"
text = row["inputs_pretokenized"]
text = text.replace("True, False, or Neither?", "")
text = text + ". True, False, or Neither?"
gold = row["targets_pretokenized"]
prompt = f"{icl_str}{{text:}}"
pmp = prompt.format(text=text)
if i == 0:
print(pmp)
raw_answer = get_response(
pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=30,
)
answer = raw_answer.strip().lower()
answer = answer.split("\n")
answer = [a for a in answer if a]
answer = [
a
for a in answer
if any(l.lower() in a.lower() for l in labels_names)
]
if answer:
answer = answer[0]
else:
answer = ""
answer = "".join(
[a for a in answer if a not in [".", ",", "?", ";", ":", "'", '"']]
)
is_yes = "true" in answer.split()
is_no = "false" in answer.split()
is_maybe = "neither" in answer.split() or "unknown" in answer.split() or "maybe" in answer.split()
pred = "Neither"
if is_yes and (not is_maybe and not is_no):
pred = "True"
if is_no and (not is_maybe and not is_yes):
pred = "False"
if is_maybe and (not is_no and not is_yes):
pred = "Neither"
entry = {
"ind": ind,
"example": text,
"base_prompt": pmp,
"raw_answer": raw_answer,
"pred": pred,
"gold": gold,
}
expt_log[ind] = entry
preds.append(pred)
labels.append(gold)
report = classification_report(labels, preds, output_dict=True)
return expt_log, report["accuracy"]
def run_decomposed_prompt(
self, test_data, boost_data_train, boost_dfs, manifest, overwrite_manifest
):
expt_log, all_boost_preds, labels = self._run_decomp_single_data(test_data, boost_dfs, manifest, overwrite_manifest, do_train=0)
expt_log_train, all_boost_train_preds, train_labels = self._run_decomp_single_data(boost_data_train, boost_dfs, manifest, overwrite_manifest, do_train=1)
# Do WS
preds = self.merge_boosted_preds(all_boost_preds, all_boost_train_preds, train_labels, expt_log, expt_log_train)
# Get accuracies across all boost sets
individual_accuracies = []
for i in range(len(all_boost_preds[0])):
individual_accuracies.append(classification_report(labels, [p[i] for p in all_boost_preds], output_dict=True)["accuracy"])
report = classification_report(labels, preds, output_dict=True)
return expt_log, expt_log_train, report["accuracy"], individual_accuracies
def _run_decomp_single_data(self, test_data, boost_dfs, manifest, overwrite_manifest, do_train=-1):
expt_log = {}
all_boost_preds = []
labels = []
label_name_to_maping = {
"always": "true",
"never": "false",
"sometimes": 'neither',
"true": "true",
"false": "false",
"neither": 'neither',
"no": "false",
"yes": "true",
"maybe": "neither",
"unknown": "neither",
"inconclusive": "neither",
"impossible": "false",
"possible": "neither",
"guaranteed": "true",
}
prompts_across_boost = defaultdict(list)
preds_across_boost = defaultdict(list)
for boost_num, boost_examples in enumerate(boost_dfs):
if do_train:
data = boost_examples[0].iloc[:1]
elif not do_train:
data = boost_examples[1]
else:
raise ValueError("Unsupported value for do train.")
for i, (ind, row) in tqdm(enumerate(data.iterrows()), total=len(data)):
input = row["inputs_pretokenized"]
gold = row["targets_pretokenized"]
all_prompts = []
raw_answer = get_response(
input,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=30,
)
all_prompts.append(input)
answer = raw_answer.lower()
if answer not in label_name_to_maping:
pred = 'neither'
print("BAD ANSWER", answer)
else:
pred = label_name_to_maping[answer]
prompts_across_boost[i].append(all_prompts)
preds_across_boost[i].append(pred)
if gold.lower() not in label_name_to_maping:
import pdb;
pdb.set_trace()
for i, (ind, row) in enumerate(data.iterrows()):
label = row["targets_pretokenized"].lower()
entry = {
"ind": ind,
"prompts": prompts_across_boost[i],
"preds_boost": preds_across_boost[i],
"example": row['inputs_pretokenized'],
"gold": label,
}
expt_log[ind] = entry
all_boost_preds.append(preds_across_boost[ind])
labels.append(label_name_to_maping[label])
return expt_log, all_boost_preds, labels
def main():
args = get_args()
args.num_boost = 10
task_name = "cb_t0_variants"
data_dir = f"{DATA_DIR}/P3/data_feather/super_glue_cb_GPT_3_style/"
decomp = CBDecomp(task_name, data_dir)
decomp.run(args)
if __name__ == "__main__":
main()
| ama_prompting-main | ablations/T0_variants/CB_variants.py |
#!/usr/bin/env python
# coding: utf-8
import os
from tqdm.auto import tqdm
import pandas as pd
from sklearn.metrics import classification_report
from decomposition import Decomposition, get_args, DATA_DIR
from utils import get_response, InputOutputPrompt
from collections import defaultdict, Counter
class RTEDecomp(Decomposition):
def __init__(self, task_name, data_dir, val_split="validation"):
super().__init__(task_name, data_dir, val_split)
def get_boost_decomp_examples(self, data_train, boost_id):
lst = [
'super_glue_wsc.fixed_GPT_3_Style',
'super_glue_wsc.fixed_Who_or_what_is_are',
'super_glue_wsc.fixed_p_is_are_r',
'super_glue_wsc.fixed_by_p_they_mean',
'super_glue_wsc.fixed_replaced_with',
'super_glue_wsc.fixed_in_other_words',
'super_glue_wsc.fixed_I_think_they_mean',
'super_glue_wsc.fixed_does_p_stand_for',
'super_glue_wsc.fixed_does_the_pronoun_refer_to',
'super_glue_wsc.fixed_the_pronoun_refers_to'
]
file_path = lst[boost_id]
print(f"FILE PATH: {file_path}")
train_data = pd.read_feather(f"{DATA_DIR}/P3/data_feather/{file_path}/train.feather")
val_data = pd.read_feather(f"{DATA_DIR}/P3/data_feather/{file_path}/validation.feather")
return [
train_data,
val_data
]
def zero_few_baseline(
self,
test_data,
few_shot_df,
manifest,
overwrite_manifest,
do_few_shot=True,
):
expt_log = {}
preds = []
labels = []
labels_names = set(test_data["targets_pretokenized"])
labels_names = [l.lower().strip() for l in labels_names]
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
if ind in expt_log:
pred = entry["pred"]
gold = entry["gold"]
else:
text = row["inputs_pretokenized"]
gold = row["targets_pretokenized"]
icl_str = ""
if do_few_shot:
for s_ind, s_row in few_shot_df.iterrows():
icl_str += f"{s_row['inputs_pretokenized']} {s_row['targets_pretokenized']}\n\n\n"
text = row["inputs_pretokenized"]
text = text.replace("True, False, or Neither?", "")
text = text + ". True, False, or Neither?"
gold = row["targets_pretokenized"]
prompt = f"{icl_str}{{text:}}"
pmp = prompt.format(text=text)
if i == 0:
print(pmp)
raw_answer = get_response(
pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=30,
)
answer = raw_answer.strip().lower()
answer = answer.split("\n")
answer = [a for a in answer if a]
answer = [
a
for a in answer
if any(l.lower() in a.lower() for l in labels_names)
]
if answer:
answer = answer[0]
else:
answer = ""
answer = "".join(
[a for a in answer if a not in [".", ",", "?", ";", ":", "'", '"']]
)
is_yes = "true" in answer.split()
is_no = "false" in answer.split()
is_maybe = "neither" in answer.split() or "unknown" in answer.split() or "maybe" in answer.split()
pred = "Neither"
if is_yes and (not is_maybe and not is_no):
pred = "True"
if is_no and (not is_maybe and not is_yes):
pred = "False"
if is_maybe and (not is_no and not is_yes):
pred = "Neither"
entry = {
"ind": ind,
"example": text,
"base_prompt": pmp,
"raw_answer": raw_answer,
"pred": pred,
"gold": gold,
}
expt_log[ind] = entry
preds.append(pred)
labels.append(gold)
report = classification_report(labels, preds, output_dict=True)
return expt_log, report["accuracy"]
def run_decomposed_prompt(
self, test_data, boost_data_train, boost_dfs, manifest, overwrite_manifest
):
expt_log, all_boost_preds, labels = self._run_decomp_single_data(test_data, boost_dfs, manifest, overwrite_manifest, do_train=0)
expt_log_train, all_boost_train_preds, train_labels = self._run_decomp_single_data(boost_data_train, boost_dfs, manifest, overwrite_manifest, do_train=1)
# Do WS
preds = self.merge_boosted_preds(all_boost_preds, all_boost_train_preds, train_labels, expt_log, expt_log_train)
# Get accuracies across all boost sets
individual_accuracies = []
for i in range(len(all_boost_preds[0])):
individual_accuracies.append(classification_report(labels, [p[i] for p in all_boost_preds], output_dict=True)["accuracy"])
report = classification_report(labels, preds, output_dict=True)
return expt_log, expt_log_train, report["accuracy"], individual_accuracies
def _run_decomp_single_data(self, test_data, boost_dfs, manifest, overwrite_manifest, do_train=-1):
expt_log = {}
all_boost_preds = []
labels = []
label_name_to_maping = {
"always": "true",
"never": "false",
"sometimes": 'neither',
"true": "true",
"false": "false",
"neither": 'neither',
"no": "false",
"yes": "true",
"maybe": "neither",
"unknown": "neither",
"inconclusive": "neither",
"impossible": "false",
"possible": "neither",
"guaranteed": "true",
}
prompts_across_boost = defaultdict(list)
preds_across_boost = defaultdict(list)
for boost_num, boost_examples in enumerate(boost_dfs):
if do_train:
data = boost_examples[0].iloc[:1000]
elif not do_train:
data = boost_examples[1]
else:
raise ValueError("Unsupported value for do train.")
for i, (ind, row) in tqdm(enumerate(data.iterrows()), total=len(data)):
input = row["inputs_pretokenized"]
gold = row["targets_pretokenized"]
all_prompts = []
raw_answer = get_response(
input,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=30,
)
all_prompts.append(input)
answer = raw_answer.lower()
if answer not in label_name_to_maping:
pred = 'neither'
print(answer)
else:
pred = label_name_to_maping[answer]
prompts_across_boost[i].append(all_prompts)
preds_across_boost[i].append(pred)
if gold.lower() not in label_name_to_maping:
import pdb;
pdb.set_trace()
for i, (ind, row) in enumerate(data.iterrows()):
label = row["targets_pretokenized"].lower()
entry = {
"ind": ind,
"prompts": prompts_across_boost[i],
"preds_boost": preds_across_boost[i],
"example": row['inputs_pretokenized'],
"gold": label,
}
expt_log[ind] = entry
all_boost_preds.append(preds_across_boost[ind])
labels.append(label_name_to_maping[label])
return expt_log, all_boost_preds, labels
def main():
args = get_args()
args.num_boost = 10
task_name = "wsc_t0_variants"
data_dir = f"{DATA_DIR}/P3/data_feather/super_glue_wsc.fixed_GPT_3_Style/"
decomp = RTEDecomp(task_name, data_dir)
decomp.run(args)
if __name__ == "__main__":
main()
| ama_prompting-main | ablations/T0_variants/WSC_variants.py |
from pathlib import Path
from collections import Counter
import json
from datasets import load_dataset
import re
import pandas as pd
from typing import Callable, List
from manifest import Manifest
class InputOutputPrompt:
def __init__(self,
input_formatter: Callable,
output_formatter: Callable,
required_keys: List,
input_output_sep: str = "\n",
example_sep: str = "\n\n",
instruction: str = ""
):
self.input_formatter = input_formatter
self.output_formatter = output_formatter
self.required_keys = required_keys
self.input_output_sep = input_output_sep
self.example_sep = example_sep
self.instruction = instruction
def __call__(self, input_output_pairs: pd.DataFrame):
examples = []
for _, example in input_output_pairs.iterrows():
examples.append(f"{self.input_formatter(example)}{self.input_output_sep}{self.output_formatter(example)}")
if examples:
input_str = self.example_sep.join(examples)
res = f"{self.instruction}{input_str}"
else:
res = f"{self.instruction}".rstrip()
return res
def __repr__(self):
dummy_ex = pd.DataFrame([{k: f"<{k.upper()}>" for k in self.required_keys}])
st = self(dummy_ex)
return st
def prefix_formatter(ex_keys: List[str], prefix: str, error_on_empty: bool = True) -> str:
def full_prefix_formatter(ex: pd.Series):
for k in ex_keys:
if k in ex:
return f"{prefix} {getattr(ex, k)}"
if error_on_empty:
raise ValueError(f"Example {ex} has no value for any of the keys {ex_keys}")
else:
return f"{prefix}"
return full_prefix_formatter
def get_manifest_session(
client_name="huggingface",
client_engine=None,
client_connection="http://127.0.0.1:5000",
cache_connection=None,
temperature=0,
top_p=1.0,
):
if client_name == "huggingface" and temperature == 0:
params = {
"temperature": 0.001,
"do_sample": False,
"top_p": top_p,
}
elif client_name in {"openai", "ai21"}:
params = {
"temperature": temperature,
"top_p": top_p,
"engine": client_engine,
}
else:
raise ValueError(f"{client_name} is not a valid client name")
manifest = Manifest(
client_name=client_name,
client_connection=client_connection,
cache_name="sqlite",
cache_connection=cache_connection,
session_id=None,
**params,
)
params = manifest.client.get_model_params()
model_name = params["model_name"]
if "engine" in params:
model_name += f"_{params['engine']}"
return manifest, model_name
def get_response(
prompt,
manifest,
overwrite=False,
max_toks=10,
stop_token=None,
gold_choices=[],
verbose=False,
):
prompt = prompt.strip()
if gold_choices:
gold_choices = [" " + g.strip() for g in gold_choices]
response_obj = manifest.run(
prompt, gold_choices=gold_choices, overwrite_cache=overwrite, return_response=True
)
response_obj = response_obj.get_json_response()["choices"][0]
log_prob = response_obj["text_logprob"]
response = response_obj["text"]
else:
response = manifest.run(
prompt,
max_tokens=max_toks,
stop_token=stop_token,
overwrite_cache=overwrite,
)
log_prob = None
if verbose:
print("\n***Prompt***\n", prompt)
print("\n***Response***\n", response)
if log_prob:
return response, log_prob
return response
def load_hf_data(save_dir, task_name, val_split, hf_name, overwrite_data):
save_data = Path(f"{save_dir}/{task_name}/data.feather")
if not save_data.exists() or overwrite_data:
dataset = load_dataset(hf_name)
test_data = dataset[val_split].to_pandas()
test_data.to_feather(f"{save_data}")
else:
print(f"Reading test data from {save_data}")
test_data = pd.read_feather(f"{save_data}")
save_data_train = Path(f"{save_dir}/{task_name}/train_data.feather")
if not save_data_train.exists() or overwrite_data:
dataset = load_dataset(hf_name)
train_data = dataset["train"].to_pandas()
train_data.to_feather(f"{save_data_train}")
else:
print(f"Reading train data from {save_data_train}")
train_data = pd.read_feather(f"{save_data_train}")
print(f"Test Data Size: {len(test_data)}")
print(f"Train Data Size: {len(train_data)}")
return test_data, train_data
def save_log(task_name, expt_name, log, final_run_dir):
final_run_dir = Path(final_run_dir)
output_fpath = final_run_dir / task_name
output_fpath.mkdir(parents=True, exist_ok=True)
print("Saving to", output_fpath / f"{expt_name}.json")
assert all(a in list(log.values())[0].keys() for a in ["ind","example","pred","gold"])
with open(output_fpath / f"{expt_name}.json", "w") as f:
json.dump(log, f)
def text_f1(preds, golds):
"""Compute average F1 of text spans.
Taken from Squad without prob threshold for no answer.
"""
total_f1 = 0
for pred, gold in zip(preds, golds):
pred_toks = pred.split()
gold_toks = gold.split()
common = Counter(pred_toks) & Counter(gold_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
total_f1 += int(gold_toks == pred_toks)
elif num_same == 0:
total_f1 += 0
else:
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
total_f1 += f1
f1_avg = total_f1 / len(golds)
return f1_avg
def accuracy_span_overlap(preds, golds):
correct = 0
for pred, gold in zip(preds, golds):
found = False
for p in pred:
for g in gold:
if len(p) < len(g):
if p.lower() in g.lower():
found = True
break
else:
if g.lower() in p.lower():
found = True
break
if found: correct += 1
return correct / len(preds)
| ama_prompting-main | ablations/T0_variants/utils.py |
#!/usr/bin/env python
# coding: utf-8
from pathlib import Path
import argparse
from typing import Counter
import pandas as pd
import json
import numpy as np
import datetime
import os
import random
from utils import save_log, get_manifest_session
DATA_DIR = os.environ.get("AMA_DATA", "/home/data")
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type=int, default=3)
parser.add_argument(
"--num_run", type=int, default=-1, help="Number of rows of test data to run"
)
parser.add_argument(
"--k_shot", type=int, default=3, help="Number of few shot"
)
parser.add_argument(
"--num_boost", type=int, default=3, help="Number of few shot sets to boost over")
parser.add_argument(
"--boost_train_examples", type=int, default=1000, help="Number of training examples to run through for boosting"
)
parser.add_argument(
"--output_metrics_file", type=str, default="decomposition_metrics.json", help="Output file for all metrics."
)
parser.add_argument(
"--save_dir", type=str, default="/home/final_runs/", help="Data directory"
)
parser.add_argument(
"--run_decomp",
type=int,
default=1,
help="Run decomp",
choices=[0, 1],
)
parser.add_argument(
"--run_zeroshot",
type=int,
default=1,
help="Run zeroshot",
choices=[0, 1],
)
parser.add_argument(
"--run_fewshot",
type=int,
default=1,
help="Run fewshot",
choices=[0, 1],
)
parser.add_argument(
"--run_zeroshot_decomp",
type=int,
default=0,
help="Run zero shot decomp",
choices=[0, 1],
)
parser.add_argument(
"--overwrite_boost_exs",
type=int,
default=0,
help="Overwrite boost examples",
choices=[0, 1],
)
parser.add_argument(
"--overwrite_data",
type=int,
default=0,
help="Overwrite saved data examples",
choices=[0, 1],
)
# Manifest
parser.add_argument(
"--client_name",
type=str,
default="huggingface",
help="Client name manifest",
choices=["huggingface", "openai", "ai21"],
)
parser.add_argument(
"--client_engine",
type=str,
default=None,
help="Client engine manifest. Only used for openai/ai21",
choices=["davinci"],
)
parser.add_argument(
"--client_connection",
type=str,
default="http://127.0.0.1:5001",
help="Client connection str",
)
parser.add_argument(
"--cache_connection",
type=str,
default="/home/manifest/final_runs.sqlite",
help="Cache connection str",
)
parser.add_argument(
"--overwrite_manifest",
type=int,
default=0,
help="Overwrite manifest",
choices=[0, 1],
)
return parser.parse_args()
class Decomposition:
def __init__(self, task_name, data_dir, val_split="validation"):
self.task_name = task_name
self.data_dir = data_dir
self.val_split = val_split
def read_data(self, save_dir, overwrite_data):
save_data = Path(f"{save_dir}/{self.task_name}/data.feather")
if not save_data.exists() or overwrite_data:
test_data = pd.read_feather(f"{self.data_dir}/{self.val_split}.feather")
test_data.to_feather(f"{save_data}")
else:
print(f"Reading test data from {save_data}")
test_data = pd.read_feather(save_data)
save_data = Path(f"{save_dir}/{self.task_name}/train_data.feather")
if not save_data.exists() or overwrite_data:
train_data = pd.read_feather(f"{self.data_dir}/train.feather")
else:
print(f"Reading train data from {save_data}")
train_data = pd.read_feather(save_data)
print(f"Test Data Size: {len(test_data)}")
print(f"Train Data Size: {len(train_data)}")
return test_data, train_data
def get_few_shot_examples(self, train_data, k_shot):
"""Get few shot examples"""
return train_data.sample(k_shot)
def get_boost_decomp_examples(self, train_data, boost_i=0):
"""Get boost examples"""
raise NotImplementedError()
def zero_few_baseline(
self, test_data, few_shot_df, manifest, overwrite_manifest, do_few_shot=True
):
"""Zero and few shot baseline"""
raise NotImplementedError()
def run_decomposed_prompt(
self, test_data, boost_data_train, boost_dfs, manifest, overwrite_manifest
):
"""Decomposition run"""
raise NotImplementedError()
def merge_boosted_preds(self, boosted_preds, all_boost_train_preds, train_labels, exp_log, expt_log_train, indecisive_ans=None):
"""Merge boosted preds"""
if isinstance(boosted_preds, list):
boosted_preds = np.array(boosted_preds)
if isinstance(all_boost_train_preds, list):
all_boost_train_preds = np.array(all_boost_train_preds)
if isinstance(train_labels, list):
train_labels = np.array(train_labels)
uniq = np.unique(boosted_preds)
pred_map = {}
if "yes" in uniq:
pred_map = {"yes": 1, "no": -1, "neither": 0}
elif "true" in uniq:
pred_map = {"true": 1, "false": -1, "neither": 0}
elif "positive" in uniq:
pred_map = {"positive": 1, "negative": -1, "neutral": 0}
pred_map_inv = {v:k for k,v in pred_map.items()}
use_pred_map = False
if all(p.lower() in pred_map for p in uniq):
use_pred_map = True
if use_pred_map:
# Cast to integers
boosted_preds = np.array([[pred_map[p.lower()] for p in preds] for preds in boosted_preds])
all_boost_train_preds = np.array(
[[pred_map[p.lower()] for p in preds] for preds in all_boost_train_preds]
)
train_labels = np.array([pred_map[p.lower()] for p in train_labels])
if indecisive_ans:
indecisive_ans = pred_map[indecisive_ans.lower()]
# Take majority vote
preds_test = []
for i, voter_preds in enumerate(boosted_preds):
most_common = Counter(voter_preds).most_common(1)[0]
if indecisive_ans and len(voter_preds) > 1 and most_common[1] == 1:
majority_vote_pred = indecisive_ans
else:
majority_vote_pred = most_common[0]
if use_pred_map:
majority_vote_pred = pred_map_inv[majority_vote_pred]
preds_test.append(majority_vote_pred)
exp_log[i]["pred"] = majority_vote_pred
# Take majority vote
preds_train = []
for i, voter_preds in enumerate(all_boost_train_preds):
most_common = Counter(voter_preds).most_common(1)[0]
if indecisive_ans and len(voter_preds) > 1 and most_common[1] == 1:
majority_vote_pred = indecisive_ans
else:
majority_vote_pred = most_common[0]
if use_pred_map:
majority_vote_pred = pred_map_inv[majority_vote_pred]
preds_train.append(majority_vote_pred)
expt_log_train[i]["pred"] = majority_vote_pred
return preds_test
def run(self, args):
print(json.dumps(vars(args), indent=4))
random.seed(args.seed)
np.random.seed(args.seed)
save_path = Path(f"{args.save_dir}/{self.task_name}")
save_path.mkdir(parents=True, exist_ok=True)
data_test, data_train = self.read_data(args.save_dir, bool(args.overwrite_data))
# Subsample train for boost exps
if args.boost_train_examples >= 0:
boost_data_train = data_train.head(min(len(data_train), args.boost_train_examples))
else:
boost_data_train = data_train
# Reset indexes for enumerations
boost_data_train = boost_data_train.reset_index(drop=True)
data_test = data_test.reset_index(drop=True)
data_train = data_train.reset_index(drop=True)
num_run = (
min(args.num_run, len(data_test)) if args.num_run > 0 else len(data_test)
)
save_results = True
if num_run != len(data_test):
print("Using {} rows".format(num_run))
data_test = data_test.iloc[:num_run]
save_results = False
runner, model_name = get_manifest_session(
client_name=args.client_name,
client_engine=args.client_engine,
client_connection=args.client_connection,
cache_connection=args.cache_connection,
)
model_name = model_name.replace("/", "_")
print("Model name:", model_name)
# Read in few shot examples
few_shot_path = save_path /f"{args.k_shot}_shot_examples.feather"
if bool(args.overwrite_data) or not few_shot_path.exists():
mini_df = self.get_few_shot_examples(data_train, args.k_shot)
mini_df.reset_index().to_feather(few_shot_path)
else:
print(f"Reading few show examples from {few_shot_path}")
mini_df = pd.read_feather(few_shot_path)
# Read in few shot decomp examples - one data frame per decomp step
boost_examples = []
for i in range(args.num_boost):
boost_examples_per_step = []
# Get all steps
boost_examples_paths = list(save_path.glob(f"boost_examples_{i}_step*.feather"))
if bool(args.overwrite_boost_exs) or not boost_examples_paths or not all(p.exists() for p in boost_examples_paths):
boost_df_steps = self.get_boost_decomp_examples(data_train, boost_id=i)
if not isinstance(boost_df_steps, list) or not isinstance(
boost_df_steps[0], pd.DataFrame
):
raise ValueError("Must return list of dataframes, one per step")
for step, boost_df in enumerate(boost_df_steps):
boost_df.reset_index().to_feather(save_path / f"boost_examples_{i}_step{step}.feather")
print(f"Saving boost examples to", save_path / f"boost_examples_{i}_step{step}.feather")
boost_examples_per_step.append(boost_df)
else:
for boost_examples_p in sorted(boost_examples_paths):
print(f"Reading boost examples from {boost_examples_p}")
boost_examples_per_step.append(pd.read_feather(boost_examples_p))
boost_examples.append(boost_examples_per_step)
today = datetime.datetime.today().strftime("%m%d%Y")
# Default metrics
metric_zero = -1.0
metric_few = -1.0
metric_decomposed = -1.0
metric_decomposed_by_boost = []
metric_zeroshot_decomposed = -1.0
if bool(args.run_zeroshot):
# Zero Shot
run_name = f"{model_name}_0shot"
exp_zero, metric_zero = self.zero_few_baseline(
test_data=data_test,
few_shot_df=mini_df,
manifest=runner,
overwrite_manifest=args.overwrite_manifest,
do_few_shot=False,
)
if save_results:
save_log(self.task_name, run_name, exp_zero, args.save_dir)
if bool(args.run_fewshot):
# Few Shot
run_name = f"{model_name}_{args.k_shot}shot"
exp_few, metric_few = self.zero_few_baseline(
test_data=data_test,
few_shot_df=mini_df,
manifest=runner,
overwrite_manifest=args.overwrite_manifest,
do_few_shot=True,
)
if save_results:
save_log(self.task_name, run_name, exp_few, args.save_dir)
if bool(args.run_decomp):
# Decomp
run_name = f"{model_name}_decomposed_{today}"
exp_decomposed, exp_decomposed_train, metric_decomposed, metric_decomposed_by_boost = self.run_decomposed_prompt(
test_data=data_test, boost_data_train=boost_data_train, boost_dfs=boost_examples, manifest=runner, overwrite_manifest=args.overwrite_manifest
)
if save_results:
save_log(
self.task_name,
run_name,
exp_decomposed,
args.save_dir
)
if exp_decomposed_train:
save_log(
self.task_name,
f"{run_name}_train",
exp_decomposed_train,
args.save_dir
)
# Zero shot decomp
exp_zeroshot_decomposed = []
if bool(args.run_zeroshot_decomp):
run_name = f"{model_name}_decomposed_0shot_{today}"
(
exp_zeroshot_decomposed,
exp_zeroshot_decomposed_train,
metric_zeroshot_decomposed,
_,
) = self.run_decomposed_prompt(
test_data=data_test, boost_data_train=boost_data_train, boost_dfs=[[pd.DataFrame() for _ in range(len(boost_examples[0]))]], manifest=runner, overwrite_manifest=args.overwrite_manifest,
)
if save_results and len(exp_zeroshot_decomposed) > 0:
save_log(
self.task_name,
run_name,
exp_zeroshot_decomposed,
args.save_dir,
)
if exp_zeroshot_decomposed_train:
save_log(
self.task_name,
f"{run_name}_train",
exp_zeroshot_decomposed_train,
args.save_dir,
)
print("Accuracy Zero Shot", metric_zero)
print("Accuracy Few Shot", metric_few)
if len(metric_decomposed_by_boost) > 0:
print("Accuracy by Boost Set Decomposed", metric_decomposed_by_boost)
print("Accuracy by Boost Set Decomposed Average", np.mean(metric_decomposed_by_boost))
print("Accuracy Boost Decomposed", metric_decomposed)
if len(exp_zeroshot_decomposed) > 0:
print("Accuracy Zero Shot Decomposed", metric_zeroshot_decomposed)
metrics = {
"model_name": model_name,
"task_name": self.task_name,
"today": today,
"zero_shot": metric_zero,
"few_shot": metric_few,
"decomposed": metric_decomposed,
"decomposed_by_boost": metric_decomposed_by_boost,
"decomposed_by_boost_avg": np.mean(metric_decomposed_by_boost),
"zero_shot_decomposed": metric_zeroshot_decomposed,
}
output_metrics = Path(args.output_metrics_file)
output_metrics.parent.mkdir(parents=True, exist_ok=True)
with open(output_metrics, "a") as f:
f.write(json.dumps(metrics) + "\n")
print(f"Saved metrics to {output_metrics}")
print(f"Saved final data to", Path(args.save_dir) / self.task_name)
| ama_prompting-main | ablations/T0_variants/decomposition.py |
#!/usr/bin/env python
# coding: utf-8
import os
from tqdm.auto import tqdm
import pandas as pd
from sklearn.metrics import classification_report
from decomposition import Decomposition, get_args, DATA_DIR
from utils import get_response, InputOutputPrompt
from collections import defaultdict, Counter
class WICDecomp(Decomposition):
def __init__(self, task_name, data_dir, val_split="validation"):
super().__init__(task_name, data_dir, val_split)
def get_boost_decomp_examples(self, data_train, boost_id):
lst = [
"super_glue_wic_affirmation_true_or_false",
"super_glue_wic_GPT_3_prompt",
"super_glue_wic_GPT_3_prompt_with_label",
"super_glue_wic_grammar_homework",
"super_glue_wic_polysemous",
"super_glue_wic_question_context",
"super_glue_wic_question_context_meaning",
"super_glue_wic_question_context_meaning_with_label",
"super_glue_wic_same_sense",
"super_glue_wic_similar_sense",
]
file_path = lst[boost_id]
print(f"FILE PATH: {file_path}")
train_data = pd.read_feather(f"{DATA_DIR}/P3/data_feather/{file_path}/train.feather")
val_data = pd.read_feather(f"{DATA_DIR}/P3/data_feather/{file_path}/validation.feather")
return [
train_data,
val_data
]
def zero_few_baseline(
self,
test_data,
few_shot_df,
manifest,
overwrite_manifest,
do_few_shot=True,
):
expt_log = {}
preds = []
labels = []
labels_names = set(test_data["targets_pretokenized"])
labels_names = [l.lower().strip() for l in labels_names]
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
if ind in expt_log:
pred = entry["pred"]
gold = entry["gold"]
else:
text = row["inputs_pretokenized"]
gold = row["targets_pretokenized"]
icl_str = ""
if do_few_shot:
for s_ind, s_row in few_shot_df.iterrows():
icl_str += f"{s_row['inputs_pretokenized']} {s_row['targets_pretokenized']}\n\n\n"
text = row["inputs_pretokenized"]
gold = row["targets_pretokenized"]
prompt = f"{icl_str}{{text:}}"
pmp = prompt.format(text=text)
if i == 0:
print(pmp)
raw_answer = get_response(
pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=30,
)
answer = raw_answer.strip().lower()
answer = answer.split("\n")
answer = [a for a in answer if a]
answer = [
a
for a in answer
if any(l.lower() in a.lower() for l in labels_names)
]
if answer:
answer = answer[0]
else:
answer = ""
answer = "".join(
[a for a in answer if a not in [".", ",", "?", ";", ":", "'", '"']]
)
is_yes = "yes" in answer.split()
is_no = "no" in answer.split()
pred = ""
if is_yes and (not is_no):
pred = "yes"
if is_no and (not is_yes):
pred = "no"
entry = {
"ind": ind,
"example": text,
"base_prompt": pmp,
"raw_answer": raw_answer,
"pred": pred,
"gold": gold.strip().lower(),
}
expt_log[ind] = entry
preds.append(pred)
labels.append(gold)
report = classification_report(labels, preds, output_dict=True)
return expt_log, report["accuracy"]
def run_decomposed_prompt(
self, test_data, boost_data_train, boost_dfs, manifest, overwrite_manifest
):
expt_log, all_boost_preds, labels = self._run_decomp_single_data(test_data, boost_dfs, manifest, overwrite_manifest, do_train=0)
expt_log_train, all_boost_train_preds, train_labels = self._run_decomp_single_data(boost_data_train, boost_dfs, manifest, overwrite_manifest, do_train=1)
# Do WS
preds = self.merge_boosted_preds(all_boost_preds, all_boost_train_preds, train_labels, expt_log, expt_log_train)
# Get accuracies across all boost sets
individual_accuracies = []
for i in range(len(all_boost_preds[0])):
individual_accuracies.append(classification_report(labels, [p[i] for p in all_boost_preds], output_dict=True)["accuracy"])
report = classification_report(labels, preds, output_dict=True)
return expt_log, expt_log_train, report["accuracy"], individual_accuracies
def _run_decomp_single_data(self, test_data, boost_dfs, manifest, overwrite_manifest, do_train=-1):
expt_log = {}
all_boost_preds = []
labels = []
label_name_to_maping = {
"always": "true",
"never": "false",
"true": "true",
"false": "false",
"no": "false",
"yes": "true",
"impossible": "false",
"guaranteed": "true",
}
prompts_across_boost = defaultdict(list)
preds_across_boost = defaultdict(list)
for boost_num, boost_examples in enumerate(boost_dfs):
if do_train:
data = boost_examples[0].iloc[:1]
elif not do_train:
data = boost_examples[1]
else:
raise ValueError("Unsupported value for do train.")
for i, (ind, row) in tqdm(enumerate(data.iterrows()), total=len(data)):
input = row["inputs_pretokenized"]
gold = row["targets_pretokenized"]
all_prompts = []
raw_answer = get_response(
input,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=30,
)
all_prompts.append(input)
answer = raw_answer.lower()
if answer not in label_name_to_maping:
pred = ''
print("BAD ANSWER", answer)
else:
pred = label_name_to_maping[answer]
prompts_across_boost[i].append(all_prompts)
preds_across_boost[i].append(pred)
if gold.lower() not in label_name_to_maping:
import pdb;
pdb.set_trace()
for i, (ind, row) in enumerate(data.iterrows()):
label = row["targets_pretokenized"].lower()
entry = {
"ind": ind,
"prompts": prompts_across_boost[i],
"preds_boost": preds_across_boost[i],
"example": row['inputs_pretokenized'],
"gold": label,
}
expt_log[ind] = entry
all_boost_preds.append(preds_across_boost[ind])
labels.append(label_name_to_maping[label])
return expt_log, all_boost_preds, labels
def main():
args = get_args()
args.num_boost = 9
task_name = "wic_t0_variants"
data_dir = f"{DATA_DIR}/P3/data_feather/super_glue_wic_GPT_3_prompt/"
decomp = WICDecomp(task_name, data_dir)
decomp.run(args)
if __name__ == "__main__":
main()
| ama_prompting-main | ablations/T0_variants/WIC_variants.py |
#!/usr/bin/env python
# coding: utf-8
import os
from tqdm.auto import tqdm
import pandas as pd
from sklearn.metrics import classification_report
from decomposition import Decomposition, get_args, DATA_DIR
from utils import get_response, InputOutputPrompt
from collections import defaultdict, Counter
class RTEDecomp(Decomposition):
def __init__(self, task_name, data_dir, val_split="validation"):
super().__init__(task_name, data_dir, val_split)
def get_boost_decomp_examples(self, data_train, boost_id):
lst = [
'super_glue_rte_GPT_3_style',
'super_glue_rte_does_it_follow_that',
'super_glue_rte_MNLI_crowdsource',
'super_glue_rte_can_we_infer',
'super_glue_rte_justified_in_saying',
'super_glue_rte_does_this_imply',
'super_glue_rte_guaranteed_true',
'super_glue_rte_based_on_the_previous_passage',
'super_glue_rte_should_assume',
'super_glue_rte_must_be_true'
]
file_path = lst[boost_id]
print(f"FILE PATH: {file_path}")
train_data = pd.read_feather(f"{DATA_DIR}/P3/data_feather/{file_path}/train.feather")
val_data = pd.read_feather(f"{DATA_DIR}/P3/data_feather/{file_path}/validation.feather")
return [
train_data,
val_data
]
def zero_few_baseline(
self,
test_data,
few_shot_df,
manifest,
overwrite_manifest,
do_few_shot=True,
):
expt_log = {}
preds = []
labels = []
labels_names = set(test_data["targets_pretokenized"])
labels_names = [l.lower().strip() for l in labels_names]
for i, (ind, row) in tqdm(
enumerate(test_data.iterrows()), total=len(test_data)
):
if ind in expt_log:
pred = entry["pred"]
gold = entry["gold"]
else:
text = row["inputs_pretokenized"]
gold = row["targets_pretokenized"]
icl_str = ""
if do_few_shot:
for s_ind, s_row in few_shot_df.iterrows():
icl_str += f"{s_row['inputs_pretokenized']} {s_row['targets_pretokenized']}\n\n\n"
text = row["inputs_pretokenized"]
text = text.replace("True, False, or Neither?", "")
text = text + ". True, False, or Neither?"
gold = row["targets_pretokenized"]
prompt = f"{icl_str}{{text:}}"
pmp = prompt.format(text=text)
if i == 0:
print(pmp)
raw_answer = get_response(
pmp,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=30,
)
answer = raw_answer.strip().lower()
answer = answer.split("\n")
answer = [a for a in answer if a]
answer = [
a
for a in answer
if any(l.lower() in a.lower() for l in labels_names)
]
if answer:
answer = answer[0]
else:
answer = ""
answer = "".join(
[a for a in answer if a not in [".", ",", "?", ";", ":", "'", '"']]
)
is_yes = "true" in answer.split()
is_no = "false" in answer.split()
is_maybe = "neither" in answer.split() or "unknown" in answer.split() or "maybe" in answer.split()
pred = "Neither"
if is_yes and (not is_maybe and not is_no):
pred = "True"
if is_no and (not is_maybe and not is_yes):
pred = "False"
if is_maybe and (not is_no and not is_yes):
pred = "Neither"
entry = {
"ind": ind,
"example": text,
"base_prompt": pmp,
"raw_answer": raw_answer,
"pred": pred,
"gold": gold,
}
expt_log[ind] = entry
preds.append(pred)
labels.append(gold)
report = classification_report(labels, preds, output_dict=True)
return expt_log, report["accuracy"]
def run_decomposed_prompt(
self, test_data, boost_data_train, boost_dfs, manifest, overwrite_manifest
):
expt_log, all_boost_preds, labels = self._run_decomp_single_data(test_data, boost_dfs, manifest, overwrite_manifest, do_train=0)
expt_log_train, all_boost_train_preds, train_labels = self._run_decomp_single_data(boost_data_train, boost_dfs, manifest, overwrite_manifest, do_train=1)
# Do WS
preds = self.merge_boosted_preds(all_boost_preds, all_boost_train_preds, train_labels, expt_log, expt_log_train)
# Get accuracies across all boost sets
individual_accuracies = []
for i in range(len(all_boost_preds[0])):
individual_accuracies.append(classification_report(labels, [p[i] for p in all_boost_preds], output_dict=True)["accuracy"])
report = classification_report(labels, preds, output_dict=True)
return expt_log, expt_log_train, report["accuracy"], individual_accuracies
def _run_decomp_single_data(self, test_data, boost_dfs, manifest, overwrite_manifest, do_train=-1):
expt_log = {}
all_boost_preds = []
labels = []
label_name_to_maping = {
"always": "true",
"never": "false",
"true": "true",
"false": "false",
"no": "false",
"yes": "true",
"impossible": "false",
"guaranteed": "true",
}
prompts_across_boost = defaultdict(list)
preds_across_boost = defaultdict(list)
for boost_num, boost_examples in enumerate(boost_dfs):
if do_train:
data = boost_examples[0].iloc[:1000]
elif not do_train:
data = boost_examples[1]
else:
raise ValueError("Unsupported value for do train.")
for i, (ind, row) in tqdm(enumerate(data.iterrows()), total=len(data)):
input = row["inputs_pretokenized"]
gold = row["targets_pretokenized"]
all_prompts = []
raw_answer = get_response(
input,
manifest,
overwrite=bool(overwrite_manifest),
max_toks=30,
)
all_prompts.append(input)
answer = raw_answer.lower()
if answer not in label_name_to_maping:
pred = 'neither'
print("BAD ANSWER", answer)
else:
pred = label_name_to_maping[answer]
prompts_across_boost[i].append(all_prompts)
preds_across_boost[i].append(pred)
if gold.lower() not in label_name_to_maping:
import pdb;
pdb.set_trace()
for i, (ind, row) in enumerate(data.iterrows()):
label = row["targets_pretokenized"].lower()
entry = {
"ind": ind,
"prompts": prompts_across_boost[i],
"preds_boost": preds_across_boost[i],
"example": row['inputs_pretokenized'],
"gold": label,
}
expt_log[ind] = entry
all_boost_preds.append(preds_across_boost[ind])
labels.append(label_name_to_maping[label])
return expt_log, all_boost_preds, labels
def main():
args = get_args()
args.num_boost = 10
task_name = "rte_t0_variants"
data_dir = f"{DATA_DIR}/P3/data_feather/super_glue_rte_GPT_3_style/"
decomp = RTEDecomp(task_name, data_dir)
decomp.run(args)
if __name__ == "__main__":
main()
| ama_prompting-main | ablations/T0_variants/RTE_variants.py |
import numpy as np
import itertools
import matplotlib.pyplot as plt
import scipy.stats
class Ising():
def __init__(self, m, potentials, thetas = None, vals = [-1, 1], ) -> None:
self.m = m
self.v = m + 1 # total number of vertices
self.potentials = potentials
self.vals = vals
#TODO support values in 0, 1
if thetas is not None:
assert len(thetas) >= len(potentials), f"Need to specify at least {len(potentials)} theta parameters."
self.thetas = thetas
else:
self.thetas = np.random.rand(len(potentials))
self.support = np.array(list(map(list, itertools.product(vals, repeat=self.v))))
self._make_pdf()
self._make_cdf()
self._get_means()
self._get_balance()
self._get_accs()
def _exponential_family(self, labels):
x = 0.0
for i in range(len(self.potentials)):
x += self.thetas[i] * labels[self.potentials[i]].prod()
return np.exp(x)
def _make_pdf(self):
p = np.zeros(len(self.support))
for i, labels in enumerate(self.support):
p[i] = self._exponential_family(labels)
self.z = sum(p)
self.pdf = p/self.z
def _make_cdf(self):
self.cdf = np.cumsum(self.pdf)
def joint_p(self, C, values):
p = 0.0
for k, labels in enumerate(self.support):
flag = True
for i in range(len(C)):
prod = labels[C[i]].prod()
if prod != values[i]:
flag = False
if flag == True:
p += self.pdf[k]
return p
def expectation(self, C):
return self.vals[0] * self.joint_p(C, self.vals[0] * np.ones(len(C))) + self.vals[1] * self.joint_p(C, self.vals[1] * np.ones(len(C)))
def _get_means(self):
self.means = np.zeros(self.m)
for k in range(self.m):
self.means[k] = self.expectation([[k]])
def _get_balance(self):
self.balance = self.joint_p([[self.m]], [1])
# def _get_covariance(self):
def _get_accs(self):
"""
self.accs[k, i, j] = Pr(lf_k = j | y = i) (i, j scaled to -1, 1 if needed)
"""
self.accs = np.zeros((self.m, 2, 2))
for k in range(self.m):
self.accs[k, 1, 1] = self.joint_p([[k], [self.m]], [self.vals[1], self.vals[1]]) / self.balance
self.accs[k, 0, 0] = self.joint_p([[k], [self.m]], [self.vals[0], self.vals[0]]) / (1 - self.balance)
self.accs[k, 1, 0] = 1 - self.accs[k, 1, 1]
self.accs[k, 0, 1] = 1 - self.accs[k, 0, 0]
def sample(self):
r = np.random.random_sample()
smaller = np.where(self.cdf < r)[0]
if len(smaller) == 0:
i = 0
else:
i = smaller.max() + 1
return self.support[i]
def make_data(self, n, has_label = True):
L = np.zeros((n, self.m))
gold = np.zeros(n)
for i in range(n):
l = self.sample()
L[i, :] = l[:self.m]
if has_label:
gold[i] = l[self.m]
return L.astype(int), gold.astype(int)
def est_accs(m, vote, gold):
# compute pr(lf | y) accuracies. Each prompt has 4 values (2x2)
# we need to do this on the train/dev set
classes = [0, 1]
gold_idxs = [np.where(gold == -1)[0], np.where(gold == 1)[0]]
accs = np.zeros((m, 2, 2)) # [i, j, k] = Pr(prompt_i = j| y = k)
for p in range(m):
for i in classes:
for j in classes:
accs[p, i, j] = len(np.where(vote[gold_idxs[i], p] == 2*j-1)[0]) / len(gold_idxs[i])
return accs
def est_balance(gold, n):
return len(np.where(gold == 1)[0]) / n
# Pr(lf votes, y)
def get_cond_probs(m, votes, y, accs, balance):
pr_y = balance if y == 1 else 1 - balance
prod = pr_y
for i in range(m):
prod *= accs[i, y, int(0.5*(votes[i] + 1))] # this assumes everything is independent
return prod
# Pr(y = 1 | lf votes)
def get_probs(m, votes, accs, balance):
pos = get_cond_probs(m, votes, 1, accs, balance)
neg = get_cond_probs(m, votes, 0, accs, balance)
if pos == 0:
return 0
else:
return pos / (pos + neg)
def pick_best_prompt(m, vote, gold, n):
# overall accuracies Pr(lf_p = y) on test (we don't know these)
overall_train_acc = np.zeros(m)
for i in range(m):
overall_train_acc[i] = len(np.where((vote[:, i] == gold) == True)[0])/n
return overall_train_acc.argmax()
def main():
# number of weak labels
m = 5
# total number of vertices
v = m + 1
# randomly parametrize exponential family to determine accuracies and correlations
#theta = np.random.rand()
#theta_cliques = (np.random.randint(0, 2, 5)*2 - 1)*theta
#theta = np.random.rand()
#theta_cliques = [1, 1, 1, 1, 1, 1, 1]
thetas = np.random.rand(30)
# all conditionally independent
potentials = [[5], [0], [1], [4], [0, 5], [1, 5], [2, 5], [3, 5], [4, 5]]
pgm = Ising(m, potentials, thetas)
n_train = 10000
vote_train, gold_train = pgm.make_data(n_train)
n_test = 1000
vote_test, gold_test = pgm.make_data(n_test)
accs = est_accs(m, vote_train, gold_train)
balance = est_balance(gold_train, n_train)
nb_output = np.zeros(n_test) # naive bayes
mv_output = np.zeros(n_test)
nb_err = 0
mv_err = 0
for i in range(n_test):
nb_output[i] = 2*np.round(get_probs(m, vote_test[i], accs, balance))-1
if nb_output[i] != gold_test[i]:
nb_err += 1
# note: play around with MV tie breaking strategy
if len(np.where(vote_test[i] == 1)[0]) >= m / 2:
mv_output[i] = 1
elif len(np.where(vote_test[i] == 1)[0]) < m / 2:
mv_output[i] = -1
else:
mv_output[i] = 2*np.random.randint(0, 2)-1
if mv_output[i] != gold_test[i]:
mv_err += 1
nb_acc = 1 - (nb_err / n_test)
mv_acc = 1 - (mv_err / n_test)
#fs_acc = 1 - (fs_err / n_test)
best_prompt = pick_best_prompt(m, vote_train, gold_train, n_train)
best_prompt_acc = len(np.where((vote_test[:, best_prompt] == gold_test) == True)[0]) / n_test
print(f"Naive bayes: {nb_acc}")
print(f"Best prompt: {best_prompt_acc}")
print(f"Majority vote: {mv_acc}")
if __name__ == "__main__":
main() | ama_prompting-main | boosting/pgm.py |
import networkx as nx
import numpy as np
from itertools import chain, product, combinations
from scipy.sparse import issparse
import more_itertools
import torch
class DependentPGM:
"""
This class describes a PGM learned from labeled data with specified edge structure.
Args:
edges: list of edges that are dependent
train_votes: n x m array of votes in {0, 1}
train_gold: n array of true labels in {0, 1}
"""
def __init__(
self, edges, train_votes, train_gold, abstains = False, classes = [0, 1], abstain_value = -1) -> None:
"""
Initialize the PGM by computing its junction tree factorization (c_tree and c_data)
and by computing individual LF accuracy and class balance.
"""
self.edges = edges
self.train_votes = train_votes
self.train_gold = train_gold
self.classes = classes
self.k = len(classes)
assert len(np.unique(self.train_gold)) == self.k
self.abstains = abstains
assert len(np.unique(self.train_votes)) == int(abstains) + self.k
self.abstain_value = abstain_value
self.n, self.m = self.train_votes.shape
self.nodes = np.arange(self.m)
self.higher_order = len(edges) != 0
# construct data structures containing dependency graph information (maximal cliques and separator sets)
self._set_clique_tree()
self._set_clique_data()
# compute LF accuracies and class balance
self._get_accs_and_cb()
def _get_scaled(self):
if self.classes == [0, 1]:
self.train_votes_scaled = 2*self.train_votes - 1
self.train_gold_scaled = 2*self.train_gold - 1
if self.abstains:
self.train_votes_scaled[self.train_votes == self.abstain_value] = 0
else:
self.train_votes_scaled = self.train_votes
self.train_gold_scaled = self.train_gold
def _set_clique_tree(self):
G1 = nx.Graph()
G1.add_nodes_from(self.nodes)
G1.add_edges_from(self.edges)
# Check if graph is chordal
# TODO: Add step to triangulate graph if not
if not nx.is_chordal(G1):
raise nx.NetworkXError("Graph triangulation not implemented.")
# Create maximal clique graph G2
# Each node is a maximal clique C_i
# Let w = |C_i \cap C_j|; C_i, C_j have an edge with weight w if w > 0
G2 = nx.Graph()
for i, c in enumerate(nx.chordal_graph_cliques(G1)):
G2.add_node(i, members=c)
for i in G2.nodes():
for j in G2.nodes():
S = G2.nodes[i]["members"].intersection(G2.nodes[j]["members"])
w = len(S)
if w > 0:
G2.add_edge(i, j, weight=w, members=S)
self.c_tree = nx.maximum_spanning_tree(G2) # should be maximum??? Because we want maximal separator sets
# Return a minimum spanning tree of G2
def _set_clique_data(self):
# Create a helper data structure which maps cliques (as tuples of member
# sources) --> {start_index, end_index, maximal_cliques}, where
# the last value is a set of indices in this data structure
self.c_data = dict()
for i in range(self.m):
self.c_data[i] = {
"vertices": [i],
"max_cliques": set( # which max clique i belongs to
[
j
for j in self.c_tree.nodes()
if i in self.c_tree.nodes[j]["members"]
]
),
}
# Get the higher-order clique statistics based on the clique tree
# First, iterate over the maximal cliques (nodes of c_tree) and
# separator sets (edges of c_tree)
if self.higher_order:
counter = 0
for item in chain(self.c_tree.nodes(), self.c_tree.edges()):
if isinstance(item, int):
C = self.c_tree.nodes[item]
C_type = "node"
elif isinstance(item, tuple):
C = self.c_tree[item[0]][item[1]]
C_type = "edge"
else:
raise ValueError(item)
members = list(C["members"])
nc = len(members)
# Else add one column for each possible value
if nc != 1:
# Add to self.c_data as well
#idx = counter + m
self.c_data[tuple(members)] = {
"vertices": members,
"max_cliques": set([item]) if C_type == "node" else set(item),
}
counter += 1
def _get_accs_and_cb(self):
classes = [0, 1]
self.gold_idxs = [np.where(self.train_gold == c)[0] for c in classes]
self.accs = np.zeros((self.m, 2)) # [i, j, k] = Pr(prompt_i = j| y = k)
for p in range(self.m):
for i in classes:
self.accs[p, i] = len(np.where(self.train_votes[self.gold_idxs[i], p] == 1)[0]) / len(self.gold_idxs[i])
self.accs = np.clip(self.accs, 0.0001, 0.9999)
self.balance = len(self.gold_idxs[1]) / self.n
def get_clique_probs(self, idxs, vals, y):
"""
Computes marginal probability over voters indexed by idx, Pr(votes_idxs = vals | y).
"""
truth_matrix = np.ones(len(self.gold_idxs[y])).astype(bool)
for i, lf in enumerate(idxs):
truth_matrix = np.logical_and(truth_matrix, self.train_votes[self.gold_idxs[y], lf] == vals[i])
if len(np.where(truth_matrix == True)[0]) == 0:
return 0.00001
return len(np.where(truth_matrix == True)[0]) / len(self.gold_idxs[y])
def get_cond_probs(self, votes, y):
"""
Computes the probability Pr(votes | y).
"""
pr_y = self.balance if y == 1 else 1 - self.balance
prod = pr_y
for i in self.c_tree.nodes():
node = self.c_tree.nodes[i]
members = list(node['members'])
if len(members) == 1:
v = members[0]
print(f"multiplying by {votes[v] * self.accs[v, y]}")
prod *= votes[v] * self.accs[v, y] + (1 - votes[v]) * (1 - self.accs[v, y])
else:
print(members)
print(f"multiplying by {self.get_clique_probs(members, votes[members], y)}")
prod *= self.get_clique_probs(members, votes[members], y)
for i in self.c_tree.edges():
edge = self.c_tree.edges[i]
members = list(edge['members'])
if len(members) == 1:
v = members[0]
deg = len(self.c_data[v]['max_cliques'])
prod /= (votes[v] * self.accs[v, y] + (1 - votes[v]) * (1 - self.accs[v, y]))**(deg-1)
print(members)
print(f"Dividing by {votes[v] * self.accs[v, y] + (1 - votes[v]) * (1 - self.accs[v, y])} to the {deg - 1} power")
else:
deg = len(self.c_data[tuple(members)]['max_cliques'])
prod /= (self.get_clique_probs(members, votes[members], y))**(deg-1)
print(members)
print(f"Dividing by {self.get_clique_probs(members, votes[members], y)} to the {deg - 1} power")
print(prod)
return prod
def get_probs(self, votes):
"""
Computes the probability Pr(y = 1 | votes).
"""
pos = self.get_cond_probs(votes, 1)
neg = self.get_cond_probs(votes, 0)
if pos == 0:
return 0
else:
return pos / (pos + neg)
def evaluate(self, test_votes, test_gold):
"""
Using our learned PGM, output rounded estimates of Pr(y = 1 | votes) and computes its accuracy.
Args:
test_votes: vote array to perform inference on in {0, 1}
test_gold: true labels to compare to in {0, 1}
"""
n_test = len(test_votes)
output_rounded = np.zeros(n_test)
output_probs = np.zeros(n_test)
err = 0
for i in range(n_test):
output_probs[i] = self.get_probs(test_votes[i])
output_rounded[i] = np.round(output_probs[i])
err += output_rounded[i] != test_gold[i]
accuracy = 1 - err / n_test
return output_probs, output_rounded, accuracy
def is_triangulated(nodes, edges):
"""
If a graph is triangulated (e.g. if a junction tree factorization exists).
"""
G1 = nx.Graph()
G1.add_nodes_from(nodes)
G1.add_edges_from(edges)
return nx.is_chordal(G1)
def structure_learning(m, votes, gold, acc_theta, classes = [0, 1], l1_lambda=0.2):
"""
Structure learning algorithm (Ising model selection) from Ravikumar (2010).
Args:
- votes: n_train x m array of training votes
- gold: n_train array of gold labels on the training data
- acc_theta: E[vote_i y] (where vote and y are scaled to [-1, 1]). This is a scaled version of accuracy that we will initialize some of the
parameters in our PGM with in order to specify that we don't want to optimize over the edges between votes and y.
We only are learning edges among votes!
- classes: the list of classes the data can take on.
- l1_lambda: l1 regularization strength
"""
# scale the data
classes = np.sort(np.unique(gold))
vote_classes = np.sort(np.unique(votes))
if 0 in classes and 1 in classes:
votes_scaled = 2*votes - 1
gold_scaled = 2*gold - 1
if len(vote_classes) == len(classes) + 1:
votes_scaled[votes == -1] = 0
else:
votes_scaled = votes
gold_scaled = gold
acc_theta = torch.from_numpy(acc_theta).type(torch.FloatTensor)
all_thetas = np.zeros((m, m)) # learned thetas from alg
# for each prompt, we fit a logistic regression model on it with prompt_i's output as the response variable and all otehr prompt outputs as the covariates.
# big_theta is a vector of weights that denote dependence on each prompt (0 is independence).
for v in range(m):
print(f"Learning neighborhood of vertex {v}.")
if len(classes) == 2:
big_theta = learn_neighborhood(m, v, votes_scaled, gold_scaled, acc_theta, l1_lambda)
else:
big_theta = learn_neighborhood_multi(m, v, votes_scaled, gold_scaled, acc_theta, l1_lambda, classes)
all_thetas[v] = big_theta
return all_thetas
# v is the vertex whose neighborhood graph we are estimating
def learn_neighborhood(m, vertex, votes, gold, accs, l1_lambda, epochs = 50000):
"""
Learn the neighborhood graph for a vertex.
Args:
- m: number of prompts
- vertex: the index of the prompt we are selecting as the response variable
- votes: votes on training data
- gold: gold label of training data
- accs: training accuracies of each prompt we use to initialize the PGM parameters with
- l1_lambda: regularization strength
- epochs: number of iterations
"""
n = len(gold)
vote_y = np.concatenate((votes, gold.reshape(n, 1)), axis=1)
xr = vote_y[:, vertex]
x_notr = np.delete(vote_y, vertex, axis=1)
xr = torch.from_numpy(xr).type(torch.FloatTensor)
x_notr = torch.from_numpy(x_notr).type(torch.FloatTensor)
theta = torch.zeros(m) # last index is for accuracy between vertex and y
theta[m - 1] = accs[vertex] # initialize this to be the train accuracy. We do want this to be an optimizable variable still though.
theta.requires_grad_()
optimizer = torch.optim.SGD([theta], lr=0.0001)
for t in range(epochs):
optimizer.zero_grad()
# logistic regression from Ravikumar et al
fx = (torch.log(torch.exp(torch.matmul(x_notr, theta))
+ torch.exp(-torch.matmul(x_notr, theta))).mean())
loss = fx - torch.multiply(xr, x_notr.T).mean(dim=1).dot(theta) + l1_lambda * torch.linalg.vector_norm(theta[:m], ord=1)
loss.backward()
optimizer.step()
#if t % 1000 == 0:
# print(f"Loss: {loss}")
big_theta = np.concatenate([theta.detach().numpy()[:vertex], [0], theta.detach().numpy()[vertex:m - 1]])
return big_theta
# v is the vertex whose neighborhood graph we are estimating
def learn_neighborhood_multi(m, vertex, votes, gold, accs, l1_lambda, classes, epochs = 50000):
# votes: in range {0, ... k}
n = len(gold)
vote_y = np.concatenate((votes, gold.reshape(n, 1)), axis=1)
xr = vote_y[:, vertex]
x_notr = np.delete(vote_y, vertex, axis=1)
xr = torch.from_numpy(xr).type(torch.FloatTensor)
x_notr = torch.from_numpy(x_notr).type(torch.FloatTensor)
theta = torch.zeros(m) # last index is for accuracy between vertex and y
theta[m - 1] = accs[vertex] # initialize this
theta.requires_grad_()
optimizer = torch.optim.SGD([theta], lr=0.0001)
for t in range(epochs):
optimizer.zero_grad()
# logistic regression from Ravikumar et al
mu = 0
for i in range(x_notr.shape[1]):
# mu = \sum_i theta_i * \sum_data sign{x_r = x_i}
mu += (2*(xr == x_notr[:, i])-1).type(torch.FloatTensor).mean() * theta[i]
fx = 0
for k in classes:
# \sum_y exp( \sum_i theta_i sign(x_i = y)) "normalization"
fx += torch.exp(torch.matmul((2*(x_notr == k)-1).type(torch.FloatTensor), theta)).mean()
loss = fx - mu + l1_lambda * torch.linalg.vector_norm(theta[:m], ord=1)
loss.backward()
optimizer.step()
#if t % 1000 == 0:
# print(f"Loss: {loss}")
big_theta = np.concatenate([theta.detach().numpy()[:vertex], [0], theta.detach().numpy()[vertex:m - 1]])
return big_theta
def main():
# load data
vote_arr_train = np.load('./data/youtube-spam/train_votes.npy').T
vote_arr_test = np.load('./data/youtube-spam/test_votes.npy').T
gold_arr_train = np.load('./data/youtube-spam/train_gold.npy').T
gold_arr_test = np.load('./data/youtube-spam/test_gold.npy').T
# vote_arr_train = np.concatenate((vote_arr_train[:, 0: 2], vote_arr_train[:, 4:]), axis=1)
# vote_arr_test = np.concatenate((vote_arr_test[:, 0: 2], vote_arr_test[:, 4:]), axis=1)
n_train, num_prompts = vote_arr_train.shape
# make validation set
np.random.seed(4)
val_idxs = np.random.choice(np.arange(n_train), size= 28, replace=False)
vote_arr_val = vote_arr_train[val_idxs, :]
vote_arr_train = np.delete(vote_arr_train, val_idxs, axis=0)
gold_arr_val = gold_arr_train[val_idxs]
gold_arr_train = np.delete(gold_arr_train, val_idxs)
nodes = np. arange(num_prompts)
# specify edgeset
# edges =[(0, 1)]
#model = DependentPGM(edges, vote_arr_train, gold_arr_train)
#probs, output, acc = model.evaluate(vote_arr_test, gold_arr_test)
#print(acc)
# Brute-force iteration through a bunch of edges
all_edges = list(combinations(nodes, 2))
small_edgesets = list(more_itertools.powerset(all_edges))
#small_edgesets = list(combinations(all_edges, 0)) + list(combinations(all_edges, 1)) + list(combinations(all_edges, 2)) + list(combinations(all_edges, 3))
scores = np.zeros(len(small_edgesets))
for i, edgeset in enumerate(small_edgesets):
if len(edgeset) > 4:
break
if not is_triangulated(nodes, edgeset):
continue
model = DependentPGM(edgeset, vote_arr_train, gold_arr_train)
probs, output, scores[i] = model.evaluate(vote_arr_val, gold_arr_val)
if i % 100 == 0:
print(f"Edgeset: {edgeset} \n score: {scores[i]}")
print(f"Best edgeset score: {scores.max()}")
print(f"Best edgeset: {small_edgesets[scores.argmax()]}")
edges = small_edgesets[scores.argmax()]
vote_arr_train = np.concatenate((vote_arr_train, vote_arr_val))
gold_arr_train = np.concatenate((gold_arr_train, gold_arr_val))
model = DependentPGM(edges, vote_arr_train, gold_arr_train)
probs, output, acc = model.evaluate(vote_arr_test, gold_arr_test)
print(f"Final model accuracy: {acc}")
if __name__ == "__main__":
main()
| ama_prompting-main | boosting/binary_deps.py |
"""This script contains code to execute different methods"""
from readline import append_history_file
from sklearn.metrics import accuracy_score
import numpy as np
from snorkel.labeling.model import LabelModel
from snorkel.utils import probs_to_preds
import itertools
import math
import torch
import collections
from sklearn.linear_model import LogisticRegression
import networkx as nx
class Aggregator():
def __init__(self, train_votes, train_gold, test_votes, test_gold, abstains = False, classes=[0, 1], abstain_value = -1) -> None:
# set votes and golds
self.train_votes = train_votes
self.train_gold = train_gold
self.test_votes = test_votes
self.test_gold = test_gold
self.n_train, self.m = train_votes.shape
self.n_test = len(test_gold)
# in some cases, we need a validation set split from the training data
np.random.seed(0)
indices = np.random.permutation(self.n_train)
n_val = int(self.n_train / 5) # 20% of the training dataset
val_idx, train_idx = indices[:n_val], indices[n_val:]
self.train_no_val_votes = self.train_votes[train_idx, :]
self.val_votes = self.train_votes[val_idx, :]
self.train_no_val_gold = self.train_gold[train_idx]
self.val_gold = self.train_gold[val_idx]
# check classes
self.classes = classes
self.k = len(classes)
# print(np.unique(self.train_gold))
# print(np.unique(classes))
assert len(np.unique(self.train_gold)) == self.k
assert len(np.unique(self.test_gold)) == self.k
# check if abstains
self.abstains = abstains
#assert len(np.unique(self.train_votes)) == int(abstains) + self.k
#assert len(np.unique(self.test_votes)) == int(abstains) + self.k
self.abstain_value = abstain_value
self.vote_classes = self.classes.copy()
if abstains:
assert self.abstain_value in self.train_votes
assert self.abstain_value in self.test_votes
self.vote_classes.append(self.abstain_value)
self.nodes = np.arange(self.m)
# construct scaled arrays (for binary)
self._get_scaled()
# get true accuracies on train and test
self._get_train_acc()
self._get_test_acc()
# estimate some parameters
self._estimate_balance()
self._estimate_coverage()
self._estimate_accs()
self._estimate_test_accs()
self._estimate_symmetric_accs()
self._estimate_fs_accs()
def _get_scaled(self):
"""
For binary tasks defined with classes [0, 1] and abstain -1, we construct scaled versions with classes [-1, 1] and abstain 0.
Scaled versions of the data are used as input to certain methods that assume an Ising model (such as FlyingSquid).
"""
if self.classes == [0, 1]:
self.train_votes_scaled = 2*self.train_votes - 1
self.test_votes_scaled = 2*self.test_votes - 1
self.train_no_val_votes_scaled = 2*self.train_no_val_votes - 1
self.val_votes_scaled = 2*self.val_votes - 1
self.train_gold_scaled = 2*self.train_gold - 1
self.test_gold_scaled = 2*self.test_gold - 1
self.train_no_val_gold_scaled = 2*self.train_no_val_gold - 1
self.val_gold_scaled = 2*self.val_gold - 1
if self.abstains:
self.train_votes_scaled[self.train_votes == self.abstain_value] = 0
self.test_votes_scaled[self.test_votes == self.abstain_value] = 0
self.train_no_val_votes_scaled[self.train_no_val_votes == self.abstain_value] = 0
self.val_votes_scaled[self.val_votes == self.abstain_value] = 0
else:
self.train_votes_scaled = self.train_votes
self.test_votes_scaled = self.test_votes
self.train_no_val_votes_scaled = self.train_no_val_votes
self.val_votes_scaled = self.val_votes
self.train_gold_scaled = self.train_gold
self.test_gold_scaled = self.test_gold
self.train_no_val_gold_scaled = self.train_no_val_gold
self.val_gold_scaled = self.val_gold
def _set_clique_tree(self, edgeset):
"""
Constructs a data structure c_tree that contains nodes and edges of the junction tree.
Args:
edgeset: List of tuples (i, j) for i, j = {0, ..., m}
"""
G1 = nx.Graph()
G1.add_nodes_from(self.nodes)
G1.add_edges_from(edgeset)
self.higher_order = len(edgeset) != 0
# Check if graph is chordal
# TODO: Add step to triangulate graph if not
if not nx.is_chordal(G1):
raise nx.NetworkXError("Graph triangulation not implemented.")
# Create maximal clique graph G2
# Each node is a maximal clique C_i
# Let w = |C_i \cap C_j|; C_i, C_j have an edge with weight w if w > 0
G2 = nx.Graph()
for i, c in enumerate(nx.chordal_graph_cliques(G1)):
G2.add_node(i, members=c)
for i in G2.nodes():
for j in G2.nodes():
S = G2.nodes[i]["members"].intersection(G2.nodes[j]["members"])
w = len(S)
if w > 0:
G2.add_edge(i, j, weight=w, members=S)
self.c_tree = nx.maximum_spanning_tree(G2) # should be maximum??? Because we want maximal separator sets
# Return a minimum spanning tree of G2
def _set_clique_data(self):
"""
Creates a data structure c_data which maps cliques and separator sets to their maximal clique.
"""
self.c_data = dict()
for i in range(self.m):
self.c_data[i] = {
"vertices": [i],
"max_cliques": set( # which max clique i belongs to
[
j
for j in self.c_tree.nodes()
if i in self.c_tree.nodes[j]["members"]
]
),
}
# Get the higher-order clique statistics based on the clique tree
# First, iterate over the maximal cliques (nodes of c_tree) and
# separator sets (edges of c_tree)
if self.higher_order:
counter = 0
for item in itertools.chain(self.c_tree.nodes(), self.c_tree.edges()):
if isinstance(item, int):
C = self.c_tree.nodes[item]
C_type = "node"
elif isinstance(item, tuple):
C = self.c_tree[item[0]][item[1]]
C_type = "edge"
else:
raise ValueError(item)
members = list(C["members"])
nc = len(members)
# Else add one column for each possible value
if nc != 1:
# Add to self.c_data as well
#idx = counter + m
self.c_data[tuple(members)] = {
"vertices": members,
"max_cliques": set([item]) if C_type == "node" else set(item),
}
counter += 1
def _estimate_balance(self):
""" Estimates the class balance Pr(y) on training data. Sets self.balance to be an array of length k.
"""
self.gold_idxs = [np.where(self.train_gold == c)[0] for c in self.classes]
self.balance = np.array([len(self.gold_idxs[c])/self.n_train for c in range(self.k)])
def _estimate_accs(self):
""" Computes Pr(vote_i | y) on training data. Each prompt has k x k values.
We ignore the abstaining case Pr(vote_i = 0 | y), since this is handled by self.coverage.
"""
k_votes = self.k
vote_classes = self.classes
self.nb_accs = np.zeros((self.m, self.k, k_votes)) # [i, j, k] = Pr(prompt_i = j| y = k)
for p in range(self.m):
for i in range(self.k):
for j in range(k_votes):
vc = vote_classes[j]
self.nb_accs[p, i, j] = len(np.where(self.train_votes[self.gold_idxs[i], p] == vc)[0]) / len(self.gold_idxs[i])
# clip values to 0.0001, 0.9999
self.nb_accs[self.nb_accs > 1] = 0.9999
self.nb_accs[self.nb_accs == 0] = 0.0001
def _estimate_test_accs(self):
self.gold_test_idxs = [np.where(self.test_gold == c)[0] for c in self.classes]
self.nb_test_accs = np.zeros((self.m, self.k, self.k)) # [i, j, k] = Pr(prompt_i = j| y = k)
for p in range(self.m):
for i in range(self.k):
for j in range(self.k):
vc = self.classes[j]
self.nb_test_accs[p, i, j] = len(np.where(self.test_votes[self.gold_test_idxs[i], p] == vc)[0]) / len(self.gold_test_idxs[i])
# clip values to 0.0001, 0.9999
self.nb_test_accs[self.nb_test_accs > 1] = 0.9999
self.nb_test_accs[self.nb_test_accs == 0] = 0.0001
def _estimate_symmetric_accs(self):
""" Computes Pr(vote_i | y) on training data similarly to above, but assumes Pr(vote_i = c | y = c) = Pr(vote_i = y),
independent of what the value of y is. Then, Pr(vote_i = c' | y = c) = (1 - Pr(vote_i = y)) / (k - 1) (uniform assumption)
"""
self.sym_accs = np.zeros((self.m, self.k, self.k))
for i in range(self.m):
for j in range(self.k):
for k in range(self.k):
if j == k:
self.sym_accs[i, j, k] = self.train_acc[i] # Pr(lf_i = c | y = c) = Pr(lf = y)
else:
self.sym_accs[i, j, k] = (self.coverage[i] - self.train_acc[i])/(self.k - 1) # divide uniformly among other classes
def _estimate_coverage(self):
""" Computes Pr(vote_i != 0) (coverage) and Pr(vote_i = 0 | y) for each y (abstain_rate).
"""
# Pr(vote_i != 0)
self.coverage = np.array([len(np.where(self.train_votes[:, p] != self.abstain_value)[0]) / self.n_train for p in range(self.m)])
# Pr(vote_i = 0 | y)
self.abstain_rate = np.zeros((self.m, self.k))
for i in range(self.m):
for j in range(self.k):
self.abstain_rate[i, j] = len(np.where(self.train_votes[self.gold_idxs[j], i] == self.abstain_value)[0]) / len(self.gold_idxs[j])
def _estimate_fs_accs(self, on_all_data = True):
""" Estimates Pr(vote_i | y = 0, 1) using FlyingSquid algorithm.
Args:
- on_test: If we use the unlabeled test dataset or the labeled train dataset. Default is True.
This version of FlyingSquid only handles the binary case (and is called on one-vs-all for multiclass) and works with scaled data.
"""
if self.k > 2:
return
if on_all_data:
votes = np.concatenate((self.train_votes_scaled, self.test_votes_scaled))
n = self.n_train + self.n_test
else:
votes = self.test_votes_scaled
n = self.n_test
if self.abstains:
# compute M[i, j] = E[vote_i * vote_j | vote_i, vote_j not abstaining]
M = np.zeros((self.m, self.m))
for i in range(self.m):
for j in range(self.m):
no_abstains = np.where(np.logical_and(votes[:, i] != 0, votes[:, j] != 0))[0]
M[i, j] = votes[no_abstains, i].dot(votes[no_abstains, j]) / len(no_abstains)
else:
# M[i, j] = E[vote_i * vote_j]
M = votes.T.dot(votes)/n
triplets = list(itertools.combinations(np.arange(self.m), 3)) # all possible combinations of triplets
self.fs_accs = np.zeros((self.m, 2, 2))
total = math.comb(self.m-1, 2)
# average over every combination of triplets
for (i, j, k) in triplets:
a = np.zeros(3)
a[0] = 0.5*(np.sqrt(np.abs(M[i, j] * M[i, k] / M[j, k]))+1)
a[1] = 0.5*(np.sqrt(np.abs(M[j, k] * M[i, j] / M[i, k]))+1)
a[2] = 0.5*(np.sqrt(np.abs(M[i, k] * M[j, k] / M[i, j]))+1)
# edge cases
a[np.where(np.isnan(a))[0]] = 0.5
a[np.where(np.isinf(a))[0]] = 1
self.fs_accs[i, 1, 1] += a[0]
self.fs_accs[j, 1, 1] += a[1]
self.fs_accs[k, 1, 1] += a[2]
self.fs_accs /= total
self.fs_accs[self.fs_accs > 1] = 0.9999
# Flying Squid assumes symmetry, Pr(vote_i = 1 | y = 1) = Pr(vote_i = -1 | y = -1)
self.fs_accs[:, 0, 0] = self.fs_accs[:, 1, 1]
self.fs_accs[:, 1, 0] = 1 - self.fs_accs[:, 1, 1]
self.fs_accs[:, 0, 1] = 1 - self.fs_accs[:, 1, 1]
def _get_train_acc(self):
""" Compute Pr(vote_i = y) on the training data.
"""
self.train_acc = (self.train_votes.T == self.train_gold).mean(axis=1)
self.train_no_val_acc = (self.train_no_val_votes.T == self.train_no_val_gold).mean(axis=1)
def _get_test_acc(self):
""" Compute Pr(vote_i = y) on the test data.
"""
self.test_acc = (self.test_votes.T == self.test_gold).mean(axis=1)
def pick_best(self):
"""Use the predictor with the best performance on the train set.
"""
self.best_prompt = np.argmax(self.train_acc)
test_preds = self.test_votes[:, self.best_prompt]
return accuracy_score(self.test_gold, test_preds)
def majority_vote(self):
"""Take a majority vote over predictors. Current implementation ignores abstains.
When there is a tie, we pick the prompt with the lowest index.
When all prompts abstain, we just return the most common label argmax_y Pr(y).
"""
test_preds = np.zeros(self.n_test)
for i in range(self.n_test):
# Majority vote discards abstains if any
if self.abstains:
voters = self.test_votes[i, self.test_votes[i] != self.abstain_value]
else:
voters = self.test_votes[i]
counts = collections.Counter(voters)
if len(counts) == 0:
# e.g. all prompts abstain --> return most common class label
test_preds[i] = self.balance.argmax()
else:
test_preds[i] = counts.most_common(1)[0][0]
return test_preds.astype(int), accuracy_score(self.test_gold, test_preds)
def get_clique_probs(self, idxs, vals, y, symmetric = False):
"""
Computes marginal probability over votes indexed by idx, Pr(votes_idxs = vals | y), using training data.
"""
if symmetric:
truth_matrix = np.ones(self.n_train).astype(bool)
agree = np.where(vals == y)[0]
disagree = np.where((np.logical_and(vals != self.abstain_value, vals != y)) == True)[0]
for i, lf in enumerate(idxs):
if i in agree:
truth_matrix = np.logical_and(truth_matrix, self.train_votes[:, lf] == self.train_gold)
elif i in disagree:
truth_matrix = np.logical_and(truth_matrix, self.train_votes[:, lf] != self.train_gold)
else:
truth_matrix = np.logical_and(truth_matrix, self.train_votes[:, lf] == self.abstain_value)
else:
truth_matrix = np.ones(len(self.gold_idxs[y])).astype(bool)
for i, lf in enumerate(idxs):
truth_matrix = np.logical_and(truth_matrix, self.train_votes[self.gold_idxs[y], lf] == vals[i])
if len(np.where(truth_matrix == True)[0]) == 0:
return 0.00001
if symmetric:
return len(np.where(truth_matrix == True)[0]) / self.n_train
else:
return len(np.where(truth_matrix == True)[0]) / len(self.gold_idxs[y])
def get_clique_probs_unlabeled(self, idxs, on_all_data=True):
if on_all_data:
votes = np.concatenate((self.train_votes_scaled, self.test_votes_scaled))
n = self.n_train + self.n_test
else:
votes = self.test_votes_scaled
n = self.n_test
l = len(idxs)
e_y = 2*self.balance[1] - 1
vote_moment = votes[:, idxs].prod(axis=1).mean()
if l % 2 == 0:
# E[Y] * E[lfs] = E[lfs Y]
acc = vote_moment * e_y
else:
acc = vote_moment / e_y
def get_cond_probs(self, votes, y, accs, edgeset = None, symmetric=False, abstains_symmetric = True):
""" Computes the probability Pr(votes, y) assuming conditional independence.
Args:
- votes: m element array of votes in {-1, 0, ..., k-1}
- y: the value of the label, in {0, ..., k - 1}
- accs: the accuracy array, e.g. directly learned from data or from FlyingSquid
- edgeset: set of edges to factorize probability with
- abstains_symmetric: do we assume Pr(vote_i = 0 | y) = Pr(vote_i = 0) or not?
"""
pr_y = self.balance[y]
prod = pr_y
if edgeset is None:
# in this case, do not need junction tree factorization. Just multiply accs together
for p in range(len(votes)):
if self.abstains and votes[p] == self.abstain_value:
if abstains_symmetric:
# we can drop Pr(lf_i = 0 | y) since it appears the same amount of times in numerator and denominator
prod *= (1 - self.coverage[p])
# print(f"multiplying by abstain on {p}: {1 - self.coverage[p]}")
else:
prod *= self.abstain_rate[p, y]
else:
# print(f"multiplying by accuracy Pr(vote_{p} = {votes[p]} | y = {y}): {accs[p, y, votes[p]]}")
prod *= accs[p, y, votes[p]] # this assumes everything is independent
else:
# multiply over maximal cliques
for i in self.c_tree.nodes():
node = self.c_tree.nodes[i]
members = list(node['members'])
if len(members) == 1:
v = members[0]
if self.abstains and votes[v] == self.abstain_value:
if abstains_symmetric:
prod *= (1 - self.coverage[v])
# print(f"multiplying by abstain on {v}: {1 - self.coverage[v]}")
else:
#print("multiplying by abstains")
prod *= self.abstain_rate[v, y]
else:
#print(f"multiplying by accuracy of {v}: {accs[v, y, votes[v]] }")
prod *= accs[v, y, votes[v]]
# print(f"multiplying by Pr(vote_{v} = {votes[v]} | y = {y}): {accs[v, y, votes[v]]}")
else:
#print(f"multiplying by prob over clique {members}: {self.get_clique_probs(members, votes[members], y, symmetric)}")
prod *= self.get_clique_probs(members, votes[members], y, symmetric)
# divide over separator sets
for i in self.c_tree.edges():
edge = self.c_tree.edges[i]
members = list(edge['members'])
if len(members) == 1:
v = members[0]
deg = len(self.c_data[v]['max_cliques'])
if self.abstains and votes[v] == self.abstain_value:
if abstains_symmetric:
prod /= (1 - self.coverage[v])**(deg - 1)
else:
if self.abstain_rate[v, y] == 0:
prod /= 0.000001**(deg - 1) # edge cas
else:
prod /= self.abstain_rate[v, y]**(deg - 1)
else:
#print(f"Dividing by symmetric accuracy of {v}")
prod /= accs[v, y, votes[v]]**(deg - 1)
else:
#print(f"Dividing by prob over clique {members}: {self.get_clique_probs(members, votes[members], y, symmetric)}")
deg = len(self.c_data[tuple(members)]['max_cliques'])
prod /= (self.get_clique_probs(members, votes[members], y, symmetric))**(deg-1)
return prod
def get_probs(self, votes, accs, edgeset = None, symmetric=False, abstains_symmetric = True):
""" Computes the probability Pr(y | votes) using Bayes Rule over Pr(votes, y).
Args:
- votes: m element array of votes in {-1, 0, ..., k-1}
- accs: the accuracy array, e.g. directly learned from data or from FlyingSquid
- edgeset: set of edges to factorize probability with
- abstains_symmetric: do we assume Pr(vote_i = 0 | y) = Pr(vote_i = 0) or not?
"""
p = np.zeros(self.k)
for i in range(self.k):
p[i] = self.get_cond_probs(votes, self.classes[i], accs, edgeset, symmetric, abstains_symmetric)
p /= p.sum() # normalization
return p
def naive_bayes(self, accs = None, symmetric = False, abstains_symmetric=True):
""" Naive bayes estimation.
Estimate Pr(vote_i | y) from training data and use that to compute Pr(y = 1 | votes).
Assumes conditional independence.
Args:
- accs: the accuracies [m x k x k] we estimate with
- symmetric: Do we assume Pr(vote_i = c | y = c) = Pr(vote_i = y) for all c?
- abstains_symmetric: Do we assume Pr(vote_i = 0 | y) = Pr(vote_i = 0)? This is
reasonable when an abstain is due to a systematic error in the prompt that doesn't depend on the label of the data.
"""
test_preds = []
test_probs = []
if symmetric:
accs = self.sym_accs
else:
if accs is None:
accs = self.nb_accs
for votes in self.test_votes:
prob = self.get_probs(votes, accs, symmetric=symmetric, abstains_symmetric=abstains_symmetric)
test_probs.append(prob)
test_preds.append(np.argmax(prob))
return test_probs, accuracy_score(self.test_gold, test_preds)
def junction_tree(self, edgeset, symmetric=False, abstains_symmetric=True, data='test'):
""" Junction tree estimation.
Estimate Pr(vote_i | y) from training data and use that to compute Pr(y = 1 | votes).
Assumes edgeset structure.
Args:
- edgeset: List of tuples (i, j) for i, j in {0, ..., m} denoting edges to factorize distribution with.
- symmetric: Do we assume Pr(vote_i = c | y = c) = Pr(vote_i = y) for all c?
- abstains_symmetric: Do we assume Pr(vote_i = 0 | y) = Pr(vote_i = 0)? This is
reasonable when an abstain is due to a systematic error in the prompt that doesn't depend on the label of the data.
"""
# construct auxiliary data structures
self._set_clique_tree(edgeset)
self._set_clique_data()
# Get preds
preds = []
probs = []
if data=='val':
votes = self.val_votes
gold = self.val_gold
elif data=='test':
votes = self.test_votes
gold = self.test_gold
else:
votes = self.train_votes
gold = self.train_gold
if symmetric:
accs = self.sym_accs
else:
accs = self.nb_accs
for v in votes:
prob = self.get_probs(v, accs, edgeset, symmetric=False, abstains_symmetric= abstains_symmetric)
probs.append(prob)
preds.append(np.argmax(prob))
return probs, accuracy_score(gold, preds)
def conditional_entropy(self, votes, edgeset=None):
"""
Computes H(Y | votes) ~= -1/n sum_i sum_y' Pr(y = y' | votes_j) log Pr(y = y' | votes_j).
Uses learned distribution as true one over Pr(y | votes)
This computation is independent of aggregation approach.
It uses direct estimation on the training dataset to learn the PGM.
"""
ce = 0
if edgeset is not None:
self._set_clique_tree(edgeset)
self._set_clique_data()
#print("Votes 1")
#print(votes)
for i, vote in enumerate(votes):
# compute Pr(y | lf) for all y. We are treating this estimated probability as the true distribution.
prob_vector = self.get_probs(vote, self.nb_test_accs, edgeset, symmetric=False, abstains_symmetric=True)
# print(prob_vector, vote, i)
# print(prob_vector, vote)
for j in range(self.k):
if prob_vector[j] == 0:
continue
# print(vote, j, prob_vector[j])
ce += prob_vector[j] * np.log(prob_vector[j])
return -ce/len(votes)
def conditional_entropy_singleton(self, probs, gold, edgeset=None):
"""
Computes H(Y | WS output) = -1/n sum_i sum_j Pr(y-hat = y_j | lfs(x_i)) * sum_k Pr(y = y_k | y-hat = y_j) log Pr(y = y_k | y-hat = y_j)
"""
# First, compute WS estimate y-hat over dataset
preds = np.argmax(probs, axis=1) # need to
# Now estimate Pr(y | y-hat) (k by k) matrix
y_accs = np.zeros((self.k, self.k))
ws_idxs = [np.where(preds == c)[0] for c in self.classes]
for i in range(self.k):
for j in range(self.k):
y_accs[i, j] = len(np.where(gold[ws_idxs[i]] == self.classes[j])[0]) / len(ws_idxs[i])
# print(y_accs)
# finally, compute entropy: 1/n sum_i sum_j Pr(y-hat = y_j | lfs(x_i)) * sum_k Pr(y = y_k | y-hat = y_j) log Pr(y = y_k | y-hat = y_j)
ce = 0
for i in range(len(probs)):
for j in range(self.k):
for c in range(self.k):
y_prob = y_accs[c, j]
if y_prob == 0:
continue
ce += probs[i, j] * y_prob * np.log(y_prob)
return -ce/len(probs)
def conditional_entropy_mv(self, edgeset=None):
"""
Computes H(Y | MV output) = -1/n sum_i sum_k Pr(y = y_k | y-hat_i) log Pr(y = y_k | y-hat_i)
"""
# First, compute MV estimate y-hat over dataset
preds, _ = self.majority_vote()
# Now estimate Pr(y | y-hat) (k by k) matrix
y_accs = np.zeros((self.k, self.k))
ws_idxs = [np.where(preds == c)[0] for c in self.classes]
for i in range(self.k):
for j in range(self.k):
y_accs[i, j] = len(np.where(self.test_gold[ws_idxs[i]] == self.classes[j])[0]) / len(ws_idxs[i])
ce = 0
for i, vote in enumerate(self.test_votes):
v_pred = preds[i]
for j in range(self.k):
y_prob = y_accs[v_pred, j]
if y_prob == 0:
continue
ce += y_prob * np.log(y_prob)
return -ce/len(self.test_votes)
def cross_entropy_conditional(self, votes, golds, edgeset):
"""
Computes -1/n sum_i log Pr(y-hat = y | votes_j). This is the standard notion of CE loss.
"""
ce = 0
self._set_clique_tree(edgeset)
self._set_clique_data()
for i, vote in enumerate(votes):
# compute Pr(y | lf)
prob = self.get_probs(vote, self.nb_accs, edgeset, symmetric=False, abstains_symmetric=True)
ce += np.log(prob[golds[i]])
return -ce/len(votes)
def cross_entropy(self, votes, golds, edgeset):
"""
Computes -1/n sum_i log Pr(y-hat = y, votes_j), minimizing cross entropy over the joint distribution of Y, votes.
"""
ce = 0
self._set_clique_tree(edgeset)
self._set_clique_data()
for i, vote in enumerate(votes):
# compute Pr(votes, y)
prob = self.get_cond_probs(vote, golds[i], self.nb_accs, edgeset, symmetric=False, abstains_symmetric=True)
ce += np.log(prob)
return -ce/len(votes)
def cross_entropy_no_label(self, votes, edgeset):
"""
Computes -1/n sum_j log Pr(votes_j), minimizing cross entropy over the distribution of votes
"""
ce = 0
self._set_clique_tree(edgeset)
self._set_clique_data()
for i, vote in enumerate(votes):
# compute Pr(votes, y)
prob = 0
for c in self.classes:
prob+= self.get_cond_probs(vote, c, self.nb_accs, edgeset, symmetric=False, abstains_symmetric=True)
ce += np.log(prob)
return -ce/len(votes)
def flying_squid(self, abstains_symmetric=True):
""" FlyingSquid algorithm requires no labeled data (except for estimating class balance).
Assumes conditional independence (for now) and symmetric accuracies.
That is, Pr(vote_i = 1 | y = 1) = Pr(vote_i = 0 | y = 0).
Args:
- abstains_symmetric: Do we assume Pr(vote_i = 0 | y) = Pr(vote_i = 0)? This is
reasonable when an abstain is due to a systematic error in the prompt that doesn't depend on the label of the data.
"""
assert self.k == 2, "not implemented for more than 2 classes!"
assert 0 in self.classes
assert 1 in self.classes
# Get preds
test_preds = []
test_probs = []
for votes in self.test_votes:
prob = self.get_probs(votes, self.fs_accs, abstains_symmetric=abstains_symmetric)
test_probs.append(prob)
test_preds.append(np.argmax(prob))
return test_probs, accuracy_score(self.test_gold, test_preds)
def snorkel_lm(self, on_all_data=True):
""" Use Snorkel AI's label model. Under the hood: Metal "forward" algorithm.
"""
#assert self.k == 2, "not implemented for more than 2 classes!"
#assert 0 in self.classes
#assert 1 in self.classes
if on_all_data:
votes = np.concatenate((self.train_votes, self.test_votes))
n = self.n_train + self.n_test
else:
votes = self.test_votes
n = self.n_test
label_model = LabelModel(cardinality=self.k)
label_model.fit(L_train=votes, n_epochs=500, log_freq=100, seed=0)
probs_test = label_model.predict_proba(self.test_votes)
test_preds = np.argmax(probs_test, axis=1)
params = label_model.get_conditional_probs()
return params, accuracy_score(self.test_gold, test_preds)
def dp_learn_params(self, with_label=False, seed=0, lr=0.0001, epochs = 1000):
""" Learn the data programming parameters alpha and beta.
Args:
- with_label: Do we use y or not? If using label, use the train set and do MLE on Pr(y, votes);
else use the test set and do MLE on Pr(votes).
- seed: random seed for Pytorch.
- lr: learning rate
- epochs: number of epochs
Returns:
- alpha: parameter corresponding to accuracy Pr(vote_i = y) (symmetric)!
- beta: parameter corresponding to coverage Pr(vote_i != 0) (symmetric)!
"""
if with_label:
votes = self.train_votes_scaled
gold = self.train_gold_scaled
else:
votes = self.test_votes_scaled
gold = self.test_gold_scaled
torch.manual_seed(seed)
x = torch.from_numpy(votes).type(torch.FloatTensor)
# Initialize Parameters
alpha = torch.rand(self.m, requires_grad=True)
beta = torch.tensor(self.coverage, requires_grad = False).type(torch.FloatTensor) # we do not optimize beta for now
optimizer = torch.optim.SGD([alpha], lr=lr)
for t in range(epochs):
optimizer.zero_grad()
mu_1 = torch.prod((x == 1) * beta.multiply(alpha) + (x == -1) * beta.multiply(1 - alpha) + (x == 0) * (1 - beta), dim = 1)
mu_neg1 = torch.prod((x == -1) * beta.multiply(alpha) + (x == 1) * beta.multiply(1 - alpha) + (x == 0) * (1 - beta), dim=1)
if with_label:
# use the label information in MLE
snorkel_loss = -torch.log(mu_1[np.where(gold == 1)[0]]).sum() - torch.log(mu_neg1[np.where(gold == -1)[0]]).sum()
else:
# 50-50 for y = 1 vs -1
snorkel_loss = -torch.log(0.5*mu_1 + 0.5*mu_neg1).sum()
snorkel_loss.backward()
optimizer.step()
#if t % 100 == 0:
# print('Loss', snorkel_loss, 'alpha', alpha, 'beta', beta)
with torch.no_grad():
alpha.clamp_(0.5, 1) # assume that accuracy is better than random
beta.clamp_(0, 1) # coverage should be between 0 and 1
return alpha, beta
def data_programming(self, with_label=False, seed=0, lr = 0.0001, epochs=1000):
""" Data programming algorithm.
Args:
- with_label: Do we use y or not? If using label, use the train set and do MLE on Pr(y, votes);
else use the test set and do MLE on Pr(votes).
- seed: random seed for Pytorch.
- lr: learning rate
- epochs: number of epochs
"""
assert self.k == 2, "not implemented for more than 2 classes!"
assert 0 in self.classes
assert 1 in self.classes
# we don't need betas, will just cancel out when doing inference
alpha, beta = self.dp_learn_params(with_label, seed, lr, epochs)
alpha = alpha.detach().numpy()
if np.any(np.isnan(alpha)):
raise ValueError("SGD failed to converge.")
dp_accs = np.zeros((self.m, 2, 2))
dp_accs[:, 1, 1] = dp_accs[:, 0, 0] = alpha
dp_accs[:, 1, 0] = dp_accs[:, 0, 1] = 1 - alpha
if with_label:
self.dp_label_accs = dp_accs
else:
self.dp_nolabel_accs = dp_accs
# Get preds
test_preds = []
test_probs = []
for votes in self.test_votes:
prob = self.get_probs(votes, dp_accs)
test_probs.append(prob[1])
test_preds.append(np.argmax(prob))
return test_probs, accuracy_score(self.test_gold, test_preds)
def logistic_regression(self, pairwise=True, singleton=False, scaling=True, max_iter=100):
"""
Logistic regression baseline.
Args:
- pairwise: if true, we scale everything to [-1, 1] and look at vote_i * vote_j as (m choose 2) new features that
explicitly model their agreement and disagreement.
- singleton: do we include the original votes as features
- scaling: do logistic regression over [-1, 1] or [0, 1]
- max_iter: maximum number of iterations for sklearn LR algorithm.
"""
if scaling:
train_votes = self.train_no_val_votes_scaled
val_votes = self.val_votes_scaled
test_votes = self.test_votes_scaled
train_gold = self.train_no_val_gold_scaled
val_gold = self.val_gold_scaled
test_gold = self.test_gold_scaled
else:
train_votes = self.train_no_val_votes
val_votes = self.val_votes
test_votes = self.test_votes
train_gold = self.train_no_val_gold
val_gold = self.val_gold
test_gold = self.test_gold
if pairwise:
# get pairwise data
pair_idxs = list(itertools.combinations(np.arange(self.m), 2))
pairs_train = np.zeros((len(train_gold), len(pair_idxs)))
pairs_val = np.zeros((len(val_gold), len(pair_idxs)))
pairs_test = np.zeros((self.n_test, len(pair_idxs)))
for count, (i, j) in enumerate(pair_idxs):
pairs_train[:, count] = train_votes[:, i] * train_votes[:, j]
pairs_val[:, count] = val_votes[:, i] * val_votes[:, j]
pairs_test[:, count] = test_votes[:, i] * test_votes[:, j]
if not singleton:
train_votes = pairs_train
val_votes = pairs_val
test_votes = pairs_test
else:
train_votes = np.concatenate((train_votes, pairs_train), axis=1)
val_votes = np.concatenate((val_votes, pairs_val), axis=1)
test_votes = np.concatenate((test_votes, pairs_test), axis=1)
best_val = -1
best_test = -1
best_reg = -1
# grid search over regularization parameter using validation set
for c in [0.001, 0.01, 0.1, 0.25, 0.5, 5, 10, 100, 1000, 2000]:
clf = LogisticRegression(random_state=0, penalty='l1', solver='liblinear', fit_intercept=False, multi_class='ovr', C=c, max_iter=max_iter).fit(train_votes, train_gold)
test_score = clf.score(test_votes, test_gold)
val_score = clf.score(val_votes, val_gold)
if val_score >= best_val:
best_val = val_score
best_test = test_score
best_reg = c
clf = LogisticRegression(random_state=0, penalty='l1', solver='liblinear', fit_intercept=False, multi_class='ovr', C=best_reg, max_iter=max_iter).fit(train_votes, train_gold)
return clf.coef_, clf.predict_proba(test_votes), best_test
def exp_weight(self, option=1, etas=[0.25, 0.5, 1, 2, 4, 8, 16, 32]):
"""
Weighting rule 1: Pr(y | votes) ~ sum_i 1{vote_i = y} * exp(-eta*loss_i)
Weighting rule 2: Pr(y | votes) ~ exp(sum_i eta * accuracy * vote_i * y) (scaled to -1, 1)
Args:
- option: which weighting rule to use
- etas: list of temperature hyperparameters
"""
test_preds = []
test_probs = []
# grid search
best_eta = -1
best_acc = 0
for eta in etas:
val_preds = []
if option == 1:
weights = np.exp(-eta * (1 - self.train_no_val_acc))
else:
weights = eta*self.train_no_val_acc
for votes in self.val_votes:
if option == 1:
scores = np.array([weights[votes == y].sum() for y in self.classes])
else:
scores = np.array([np.exp((2*((votes == y).astype(int))-1).dot(weights)) for y in self.classes] )
if scores.sum() ==0:
# return prior
val_preds.append(np.argmax(self.balance))
else:
val_preds.append(np.argmax(scores))
val_acc = accuracy_score(self.val_gold, val_preds)
if val_acc > best_acc:
best_eta = eta
if option == 1:
weights = np.exp(-best_eta * (1 - self.train_no_val_acc))
else:
weights = best_eta*self.train_no_val_acc
for votes in self.test_votes:
if option == 1:
scores = np.array([weights[votes == y].sum() for y in self.classes])
else:
scores = np.array([np.exp((2*((votes == y).astype(int))-1).dot(weights)) for y in self.classes] )
if scores.sum() ==0:
# return prior
test_preds.append(np.argmax(self.balance))
else:
scores /= scores.sum()
test_probs.append(scores[1])
test_preds.append(np.argmax(scores))
return test_probs, accuracy_score(self.test_gold, test_preds)
class MultiAggregator(Aggregator):
def __init__(self, train_votes, train_gold, test_votes, test_gold, classes, abstains=False, abstain_value=-1) -> None:
super().__init__(train_votes, train_gold, test_votes, test_gold, abstains, classes, abstain_value)
def flying_squid(self, abstains_symmetric=True):
"""
For multi-class, FlyingSquid reduces into one-vs-all subproblems and picking the highest Pr(y | votes) from each of those.
"""
probs = np.zeros((self.n_test, self.k))
for i, c in enumerate(self.classes):
train_votes_c = np.where(self.train_votes == c, 1, 0)
train_votes_c[self.train_votes == self.abstain_value] = -1 # keep the abstains
train_gold_c = np.where(self.train_gold == c, 1, 0)
test_votes_c = np.where(self.test_votes == c, 1, 0)
test_votes_c[self.test_votes == self.abstain_value] = -1
test_gold_c = np.where(self.test_gold == c, 1, 0)
agg = Aggregator(train_votes_c, train_gold_c, test_votes_c, test_gold_c, self.abstains, classes=[0, 1])
fs_probs, fs_acc = agg.flying_squid(abstains_symmetric)
probs[:, i] = np.array(fs_probs)[:, 1]
test_preds = np.argmax(probs, axis=1)
return probs, accuracy_score(self.test_gold, test_preds)
def data_programming(self, epochs=1000, with_label=False, seed=0, lr=0.0001):
"""
For multi-class, data programming reduces into one-vs-all subproblems and picking the highest Pr(y | votes) from each of those.
"""
probs = np.zeros((self.n_test, self.k))
# one versus all
for i, c in enumerate(self.classes):
train_votes_c = np.where(self.train_votes == c, 1, 0)
train_votes_c[self.train_votes == self.abstain_value] = -1
train_gold_c = np.where(self.train_gold == c, 1, 0)
test_votes_c = np.where(self.test_votes == c, 1, 0)
test_votes_c[self.test_votes == self.abstain_value] = -1
test_gold_c = np.where(self.test_gold == c, 1, 0)
agg = Aggregator(train_votes_c, train_gold_c, test_votes_c, test_gold_c, self.abstains, classes=[0, 1])
probs[:, i], _ = agg.data_programming(with_label, seed, lr, epochs)
test_preds = np.argmax(probs, axis=1)
return accuracy_score(self.test_gold, test_preds)
| ama_prompting-main | boosting/methods.py |
import argparse
import numpy as np
import json
import os
import cvxpy as cp
import scipy as sp
import datetime
from methods import Aggregator
from metal.label_model import LabelModel
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--task_name", type=str, required=True)
parser.add_argument("--data_dir", type=str, default="../ama_logs/ama_final_runs")
parser.add_argument("--symmetric", action="store_true")
parser.add_argument("--model_prefix", type=str, default="EleutherAI_gpt-j-6B")
parser.add_argument("--override_date", type=str, default=None)
args = parser.parse_args()
return args
def get_data(task_name, data_dir, model_prefix, override_date=None):
"""
Load in dataset from task_name depending on where files are saved.
"""
task_dir = os.path.join(data_dir, task_name)
date = datetime.datetime.today().strftime("%m%d%Y") if not override_date else override_date
print(f"Loading runs from {date}")
dpath = os.path.join(task_dir, f"{model_prefix}_decomposed_{date}.json")
train_dpath = os.path.join(task_dir, f"{model_prefix}_decomposed_{date}_train.json")
print(dpath)
print(train_dpath)
if task_name in ["super_glue_rte"]:
label_name_to_int = {"True": 1, "False": 0}
elif task_name == "amazon":
label_name_to_int = {'amazon instant video':0,
'books':1,
'clothing shoes and jewelry':2,
'electronics':3,
'kindle store':4,
'movies and tv':5,
'musical instruments':6,
'office products':7,
'tools and home improvement':8}
elif task_name == 'wic':
label_name_to_int = {"Yes": 1, "No": 0}
elif task_name == 'super_glue_wsc':
label_name_to_int = {"True": 1, "False": 0}
elif task_name == "sst2":
label_name_to_int = {"positive": 1, "negative": 0, "neutral": 0}
elif task_name == "super_glue_boolq":
label_name_to_int = {"true": 1, "false": 0}
elif task_name in ["story_cloze", "story_cloze_v2", "story_cloze_v3"]:
label_name_to_int = {"2": 1, "1": 0}
elif task_name in ["anli_r1", "anli_r2", "anli_r3"]:
label_name_to_int = {"true": 1, "false": 0, "neither": 2}
elif task_name == "MR" or task_name == "mr":
label_name_to_int = {"positive": 1, "negative": 0}
elif task_name == "multirc":
label_name_to_int = {"yes": 1, "no": 0}
elif task_name == "super_glue_cb":
label_name_to_int = {"true": 0, "false": 2, "neither": 1}
elif task_name == "super_glue_copa":
label_name_to_int = {"1": 1, "2": 0}
elif task_name == "drop":
label_name_to_int = {"true": 1, "false": 0}
elif task_name == "super_glue_record":
label_name_to_int = {"true": 1, "false": 0}
elif task_name == "ag_news":
label_name_to_int = {"world news": 0, "sports": 1, "business": 2, "technology and science": 3}
elif task_name == "dbpedia":
label_name_to_int = {"company": 0, "educational institution": 1, "artist": 2, "athlete": 3,
"office holder": 4, "mean of transportation": 5, "building": 6, "natural place": 7,
"village": 8, "animal": 9, "plant": 10, "album": 11, "film": 12, "written work": 13}
else:
raise ValueError("Unsupported task!")
test_data = json.load(open(dpath))
train_data = json.load(open(train_dpath))
print(train_data['0']['example'])
print(train_data['0']['gold'])
n_test = len(test_data)
n_train = len(train_data)
m = len(test_data['0']['preds_boost'])
test_votes = np.zeros((n_test, m))
test_gold = np.zeros(n_test)
for i in range(n_test):
test_votes[i] = np.array([label_name_to_int[ans] if ans in label_name_to_int else -1 for ans in test_data[str(i)]['preds_boost']])
test_gold[i] = label_name_to_int[str(test_data[str(i)]['gold'])]
test_votes = test_votes.astype(int)
test_gold = test_gold.astype(int)
train_votes = np.zeros((n_train, m))
train_gold = np.zeros(n_train)
for i in range(n_train):
train_votes[i] = np.array([label_name_to_int[ans] if ans in label_name_to_int else -1 for ans in train_data[str(i)]['preds_boost']])
train_gold[i] = label_name_to_int[str(train_data[str(i)]['gold'])]
train_votes = train_votes.astype(int)
train_gold = train_gold.astype(int)
return train_votes, train_gold, test_votes, test_gold
def get_top_deps_from_inverse_sig(J, k):
m = J.shape[0]
deps = []
sorted_idxs = np.argsort(np.abs(J), axis=None)
n = m*m
idxs = sorted_idxs[-k:]
for idx in idxs:
i = int(np.floor(idx / m))
j = idx % m
if (j, i) in deps:
continue
deps.append((i, j))
return deps
def learn_structure(L):
m = L.shape[1]
n = float(np.shape(L)[0])
sigma_O = (np.dot(L.T,L))/(n-1) - np.outer(np.mean(L,axis=0), np.mean(L,axis=0))
#bad code
O = 1/2*(sigma_O+sigma_O.T)
O_root = np.real(sp.linalg.sqrtm(O))
# low-rank matrix
L_cvx = cp.Variable([m,m], PSD=True)
# sparse matrix
S = cp.Variable([m,m], PSD=True)
# S-L matrix
R = cp.Variable([m,m], PSD=True)
#reg params
lam = 1/np.sqrt(m)
gamma = 1e-8
objective = cp.Minimize(0.5*(cp.norm(R @ O_root, 'fro')**2) - cp.trace(R) + lam*(gamma*cp.pnorm(S,1) + cp.norm(L_cvx, "nuc")))
constraints = [R == S - L_cvx, L_cvx>>0]
prob = cp.Problem(objective, constraints)
result = prob.solve(verbose=False, solver=cp.SCS)
opt_error = prob.value
#extract dependencies
J_hat = S.value
if J_hat is None:
raise ValueError("CVXPY failed to solve the structured learning problem, use result without dependencies.")
for i in range(m):
J_hat[i, i] = 0
return J_hat
def learn_structure_multiclass(L, k):
m = L.shape[1]
J_hats = np.zeros((k, m, m))
for c in range(k):
all_votes_c = np.where(L == c, 1, 0)
J_hats[c] = learn_structure(all_votes_c)
return J_hats
def get_min_off_diagonal(J_hat):
J_hat_copy = J_hat.copy()
for i in range(len(J_hat_copy)):
J_hat_copy[i, i] = np.inf
return np.abs(J_hat_copy).min()
def main():
args = get_args()
task_name = args.task_name
data_dir = args.data_dir
symmetric = args.symmetric
train_votes, train_gold, test_votes, test_gold = get_data(task_name, data_dir, args.model_prefix, args.override_date)
classes = np.sort(np.unique(test_gold))
vote_classes = np.sort(np.unique(test_votes))
n_train, m = train_votes.shape
n_test = len(test_votes)
k = len(classes)
abstains = len(vote_classes) == len(classes) + 1
print(f"Abstains: {abstains}")
m = test_votes.shape[1]
all_votes= np.concatenate((train_votes, test_votes))
label_model = LabelModel(k=k, seed=123)
# scale to 0, 1, 2 (0 is abstain)
test_votes_scaled = (test_votes + np.ones((n_test, m))).astype(int)
test_gold_scaled = (test_gold + np.ones(n_test)).astype(int)
train_votes_scaled = (train_votes + np.ones((n_train, m))).astype(int)
train_gold_scaled = (train_gold + np.ones(n_train)).astype(int)
all_votes_scaled = np.concatenate((train_votes_scaled, test_votes_scaled))
label_model.train_model(all_votes_scaled, Y_dev=train_gold_scaled, abstains=abstains, symmetric=False, n_epochs=10000, log_train_every=50, lr=0.00001)
print('Trained Label Model Metrics (No deps):')
scores = label_model.score((test_votes_scaled, test_gold_scaled), metric=['accuracy','precision', 'recall', 'f1'])
print(scores)
all_votes_no_abstains = np.where(all_votes == -1, 0, all_votes)
if len(classes) == 2:
J_hat = learn_structure(all_votes_no_abstains)
else:
J_hats = learn_structure_multiclass(all_votes_no_abstains, len(classes))
J_hat = J_hats.mean(axis=0)
# if values in J are all too large, then everything is connected / structure learning isn't learning the right thing. Don't model deps then
min_entry = get_min_off_diagonal(J_hat)
if min_entry < 1:
deps = get_top_deps_from_inverse_sig(J_hat, 1)
print("Recovered dependencies: ", deps)
label_model.train_model(all_votes_scaled, Y_dev=train_gold_scaled, abstains=abstains, symmetric=symmetric, n_epochs=80000, log_train_every=50, lr=0.000001, deps=deps)
print('Trained Label Model Metrics (with deps):')
scores = label_model.score((test_votes_scaled, test_gold_scaled), metric=['accuracy', 'precision', 'recall', 'f1'])
print(scores)
try:
lm_probs = label_model.predict_proba(test_votes_scaled)
agg = Aggregator(test_votes, test_gold, test_votes, test_gold, abstains, classes=[0, 1]) #
print("H(Y | WS output):")
print(agg.conditional_entropy_singleton(lm_probs, test_gold))
except:
print(f"Failed to produce conditional entropy value: H(Y | WS output).")
if __name__ == "__main__":
main()
| ama_prompting-main | boosting/run_ws.py |
import numpy as np
import itertools
def get_probabilties(num_lfs, num_examples, predictions, label_name_to_int):
lf_array = np.zeros((num_lfs, num_examples))
golds = []
# Collect golds and preds
for i, (k, item) in enumerate(predictions.items()):
preds = item['chosen_answers_lst']
preds_mapped = []
for p in preds:
if p in label_name_to_int:
preds_mapped.append(label_name_to_int[p])
else:
preds_mapped.append(0)
preds = preds_mapped.copy()
for lf_num, p in zip(range(num_lfs), preds):
lf_array[lf_num][i] = p
gold = label_name_to_int[item['gold']]
golds.append(gold)
golds = np.array(golds)
neg_indices, pos_indices = [np.where(golds == -1)[0], np.where(golds == 1)[0]]
indices = {
-1: neg_indices,
1: pos_indices
}
# [i, j, k] = Pr(prompt_i = j| y = k)
# Accuracies
lf_accuracies = []
for i in range(num_lfs):
lf_accuracies.append(np.sum(golds == np.array(lf_array[i]))/num_examples)
print(f"LF Accs: {lf_accuracies}")
# [i, j, k] = Pr(prompt_i = j| y = k)
classes = label_name_to_int.values()
accs = np.zeros((num_lfs, len(classes), len(classes)))
for p in range(num_lfs):
for i in classes:
for j in classes:
j_idx = j
if j == -1:
j_idx = 0
i_idx = i
if i == -1:
i_idx = 0
accs[p, i_idx, j_idx] = len(np.where(lf_array[p, indices[i]] == j)[0]) / len(indices[i])
# Compute probabilities
pos_probs = []
for i in range(num_lfs):
sub_preds = lf_array[i][pos_indices]
sub_golds = golds[pos_indices]
pos_probs.append(np.sum(sub_golds == np.array(sub_preds))/len(pos_indices))
print(f"Pos Probs: {pos_probs}")
neg_probs = []
for i in range(num_lfs):
sub_preds = lf_array[i][neg_indices]
sub_golds = golds[neg_indices]
neg_probs.append(np.sum(sub_golds == np.array(sub_preds))/len(neg_indices))
print(f"Neg Probs: {neg_probs}\n\n")
return lf_accuracies, accs, pos_probs, neg_probs, golds, indices
""" Independence Assumption: take the product of probabilities as p(L1, L2, ..., LK | y) """
# Pr(y = 1 | lf votes)
def get_cond_probs(votes, y, indices_train, golds_train, accs_train, num_lfs_test):
prop_pos = len(indices_train[1])/len(golds_train)
pr_y = prop_pos if y == 1 else 1 - prop_pos
prod = pr_y
for i in range(num_lfs_test):
if y == -1:
y = 0
prod *= accs_train[i, y, votes[i]]
return prod
# Pr(y = 1 | lf votes)
def get_probs(votes, indices_train, golds_train, acc_train, num_lfs_test):
votes = [max(v, 0) for v in votes]
numerator = get_cond_probs(votes, 1, indices_train, golds_train, acc_train, num_lfs_test)
denominator = numerator + get_cond_probs(votes, -1, indices_train, golds_train, acc_train, num_lfs_test)
return numerator / denominator
def get_nb_accuracy(num_examples_test, num_lfs_test, predictions_test, label_name_to_int, golds_test, indices_train, golds_train, accs_train):
output = np.zeros(num_examples_test)
errors = 0
for i, (k, item) in enumerate(predictions_test.items()):
votes = item['chosen_answers_lst']
votes_mapped = []
for v in votes:
if v in label_name_to_int:
votes_mapped.append(label_name_to_int[v])
else:
votes_mapped.append(0)
votes = votes_mapped.copy()
probs = np.round(get_probs(votes, indices_train, golds_train, accs_train, num_lfs_test))
output[i] = probs
# Mean squared error
g = golds_test[i]
if golds_test[i] == -1:
g = 0
error = np.abs(output[i] - g)**2
errors += error
accuracy = 1 - (errors / num_examples_test)
return accuracy, output
def estimate_matrix(m, n, L):
E_prod = np.zeros((m, m))
l_avg = np.zeros(m)
for i in range(n):
l = L[i, :]
l_avg += l
E_prod += np.outer(l, l)
l_avg = l_avg/n
E_prod = E_prod/n
cov = E_prod - np.outer(l_avg, l_avg)
return (E_prod, cov, l_avg)
def get_vote_vectors(num_samples, num_lfs, predictions, label_name_to_int):
vectors = np.zeros((num_samples, num_lfs+1), float)
vectors_no_y = np.zeros((num_samples, num_lfs), float)
labels_vector = np.zeros((num_samples, 1), float)
for i, p in enumerate(predictions.values()):
votes = p['chosen_answers_lst']
votes_mapped = []
for v in votes:
if v in label_name_to_int:
votes_mapped.append(label_name_to_int[v])
else:
votes_mapped.append(0)
votes = votes_mapped.copy()
# votes = [max(v, 0) for v in votes]
gold = p['gold']
gold = label_name_to_int[gold]
vectors_no_y[i] = np.array(votes)
vectors[i] = np.array(votes + [gold]) #- lf_accuracies_train
labels_vector[i] = np.array([gold])
print(f"Shape: {vectors.shape}")
print(f"Sample: {vectors[0]}")
return vectors, vectors_no_y, labels_vector
def get_feature_vector(vote_vectors, include_pairwise=False, include_singletons=True):
feature_vectors = []
for votes in vote_vectors:
if include_singletons:
feature_vector = list(votes[:])
else:
feature_vector = []
if include_pairwise:
for subset in itertools.combinations(votes[:], 2):
feature_vector.append(subset[0] * subset[1])
feature_vectors.append(feature_vector)
X = np.matrix(feature_vectors)
return X | ama_prompting-main | boosting/utils.py |
import numpy as np
import itertools
import scipy.stats
import math
import networkx as nx
from itertools import chain
from methods import Aggregator
from binary_deps import structure_learning
from binary_deps import DependentPGM
from sklearn.metrics import log_loss, accuracy_score
class Ising():
def __init__(self, m, potentials, thetas = None, vals = [-1, 1], ) -> None:
self.m = m
self.v = m + 1 # total number of vertices
self.potentials = potentials
self.vals = vals
#TODO support values in 0, 1
if thetas is not None:
assert len(thetas) >= len(potentials), f"Need to specify at least {len(potentials)} theta parameters."
self.thetas = thetas
else:
self.thetas = np.random.rand(len(potentials))
# 2^v size support over y and all prompts
self.support = np.array(list(map(list, itertools.product(vals, repeat=self.v))))
# 2^m size support over all prompts
self.support_no_y = np.array(list(map(list, itertools.product(vals, repeat=self.m))))
self.n_vals = len(self.support)
self._make_pdf()
self._make_cdf()
self._get_means()
self._get_balance()
self._get_accs()
# set graph true graph structure
self._get_edges_nodes()
self.c_tree = self._set_clique_tree(self.edges)
self.c_data = self._set_clique_data(self.c_tree)
def _get_edges_nodes(self):
self.nodes = np.arange(self.m)
self.edges = [p for p in self.potentials if len(p) == 2 and self.m not in p]
if self.edges != []:
self.higher_order = True
else:
self.higher_order = False
def _exponential_family(self, labels):
x = 0.0
for i in range(len(self.potentials)):
x += self.thetas[i] * labels[self.potentials[i]].prod()
return np.exp(x)
def _make_pdf(self):
p = np.zeros(len(self.support))
for i, labels in enumerate(self.support):
p[i] = self._exponential_family(labels)
self.z = sum(p)
self.pdf = p/self.z
def _make_cdf(self):
self.cdf = np.cumsum(self.pdf)
def joint_p(self, C, values):
p = 0.0
for k, labels in enumerate(self.support):
flag = True
for i in range(len(C)):
prod = labels[C[i]].prod()
if prod != values[i]:
flag = False
if flag == True:
p += self.pdf[k]
return p
def expectation(self, C):
return self.vals[0] * self.joint_p(C, self.vals[0] * np.ones(len(C))) + self.vals[1] * self.joint_p(C, self.vals[1] * np.ones(len(C)))
def _get_means(self):
self.means = np.zeros(self.m)
for k in range(self.m):
self.means[k] = self.expectation([[k]])
def _get_balance(self):
self.balance = self.joint_p([[self.m]], [1])
def _get_covariance_y(self):
self.cov = np.zeros((self.m + 1, self.m + 1))
def aug_covariance(self, rvs):
l = len(rvs)
M = np.zeros((l, l))
for i in range(l):
for j in range(i + 1, l):
M[i, j] = self.joint_p([rvs[i], rvs[j]], [1, 1]) + self.joint_p([rvs[i], rvs[j]], [-1, -1])
for i in range(l):
for j in range(i + 1):
if i != j:
M[i, j] = M[j, i]
else:
M[i, j] = 1
M = 2*M - 1
mu = np.zeros(l)
for i in range(l):
mu[i] = self.joint_p([rvs[i]], [1])
mu = 2*mu - 1
return M - np.outer(mu, mu)
def aug_covariance_y(self, rvs, y):
p_y = self.balance if y == 1 else 1 - self.balance
l = len(rvs)
M = np.zeros((l, l))
for i in range(l):
for j in range(i + 1, l):
M[i, j] = (self.joint_p([rvs[i], rvs[j], [self.m]], [1, 1, y]) + self.joint_p([rvs[i], rvs[j], [self.m]], [-1, -1, y])) / p_y
for i in range(l):
for j in range(i + 1):
if i != j:
M[i, j] = M[j, i]
else:
M[i, j] = 1
M = 2*M - 1
mu = np.zeros(l)
for i in range(l):
mu[i] = self.joint_p([rvs[i], [self.m]], [1, y]) / p_y
mu = 2*mu - 1
return M - np.outer(mu, mu)
def _get_accs(self):
"""
self.accs[k, i, j] = Pr(lf_k = j | y = i) (i, j scaled to -1, 1 if needed)
"""
self.accs = np.zeros((self.m, 2, 2))
for k in range(self.m):
self.accs[k, 1, 1] = self.joint_p([[k], [self.m]], [self.vals[1], self.vals[1]]) / self.balance
self.accs[k, 0, 0] = self.joint_p([[k], [self.m]], [self.vals[0], self.vals[0]]) / (1 - self.balance)
self.accs[k, 1, 0] = 1 - self.accs[k, 1, 1]
self.accs[k, 0, 1] = 1 - self.accs[k, 0, 0]
def sample(self):
r = np.random.random_sample()
smaller = np.where(self.cdf < r)[0]
if len(smaller) == 0:
i = 0
else:
i = smaller.max() + 1
return self.support[i]
def make_data(self, n, has_label = True):
L = np.zeros((n, self.m))
gold = np.zeros(n)
for i in range(n):
l = self.sample()
L[i, :] = l[:self.m]
if has_label:
gold[i] = l[self.m]
return L.astype(int), gold.astype(int)
def _set_clique_tree(self, edges):
G1 = nx.Graph()
G1.add_nodes_from(self.nodes)
G1.add_edges_from(edges)
# Check if graph is chordal
# TODO: Add step to triangulate graph if not
if not nx.is_chordal(G1):
raise NotImplementedError("Graph triangulation not implemented.")
# Create maximal clique graph G2
# Each node is a maximal clique C_i
# Let w = |C_i \cap C_j|; C_i, C_j have an edge with weight w if w > 0
G2 = nx.Graph()
for i, c in enumerate(nx.chordal_graph_cliques(G1)):
G2.add_node(i, members=c)
for i in G2.nodes():
for j in G2.nodes():
S = G2.nodes[i]["members"].intersection(G2.nodes[j]["members"])
w = len(S)
if w > 0:
G2.add_edge(i, j, weight=w, members=S)
return nx.maximum_spanning_tree(G2) # should be maximum??? Because we want maximal separator sets
# Return a minimum spanning tree of G2
def _set_clique_data(self, c_tree):
# Create a helper data structure which maps cliques (as tuples of member
# sources) --> {start_index, end_index, maximal_cliques}, where
# the last value is a set of indices in this data structure
c_data = dict()
for i in range(self.m):
c_data[i] = {
"vertices": [i],
"max_cliques": set( # which max clique i belongs to
[
j
for j in c_tree.nodes()
if i in c_tree.nodes[j]["members"]
]
),
}
# Get the higher-order clique statistics based on the clique tree
# First, iterate over the maximal cliques (nodes of c_tree) and
# separator sets (edges of c_tree)
if self.higher_order:
counter = 0
for item in chain(c_tree.nodes(), c_tree.edges()):
if isinstance(item, int):
C = c_tree.nodes[item]
C_type = "node"
elif isinstance(item, tuple):
C = c_tree[item[0]][item[1]]
C_type = "edge"
else:
raise ValueError(item)
members = list(C["members"])
nc = len(members)
# Else add one column for each possible value
if nc != 1:
# Add to self.c_data as well
#idx = counter + m
c_data[tuple(members)] = {
"vertices": members,
"max_cliques": set([item]) if C_type == "node" else set(item),
}
counter += 1
return c_data
def get_cond_probs(self, votes, y, edgeset = None):
"""
Computes the probability Pr(votes | y).
"""
pr_y = self.balance if y == 1 else 1 - self.balance
prod = pr_y
votes_scaled = 2*votes - 1
y_scaled = 2*y - 1
if edgeset is not None:
c_tree = self._set_clique_tree(edgeset)
c_data = self._set_clique_data(c_tree)
else:
c_tree = self.c_tree
c_data = self.c_data
for i in c_tree.nodes():
node = c_tree.nodes[i]
members = list(node['members'])
if len(members) == 1:
v = members[0]
prod *= self.accs[v, y, votes[v]]
else:
# prod *= self.get_clique_probs(members, votes[members], y)
member_votes = np.append(votes_scaled[members], y_scaled)
members = [[m] for m in members] + [[self.m]]
clique_probs = self.joint_p(members, member_votes)/self.joint_p([[self.m]], [y_scaled])
#print("clique probs")
#print(members, member_votes)
#print(self.joint_p(members, member_votes))
#print(clique_probs)
prod *= clique_probs
for i in c_tree.edges():
edge = c_tree.edges[i]
members = list(edge['members'])
if len(members) == 1:
v = members[0]
deg = len(c_data[v]['max_cliques'])
prod /= (self.accs[v, y, votes[v]])**(deg-1)
else:
deg = len(c_data[tuple(members)]['max_cliques'])
# prod /= (self.get_clique_probs(members, votes[members], y))**(deg-1)
member_votes = np.concatenate(votes[members], y_scaled)
members = [[m] for m in members] + [[self.m]]
clique_probs = self.joint_p(members, member_votes)/self.joint_p([[self.m]], [y_scaled])
prod /= clique_probs**(deg-1)
return prod
def get_probs(self, votes, edgeset = None):
"""
Computes the probability Pr(y = 1 | votes).
"""
pos = self.get_cond_probs(votes, 1, edgeset)
neg = self.get_cond_probs(votes, 0, edgeset)
if pos == 0:
return 0
else:
return pos / (pos + neg)
def cross_entropy(self, edgeset):
ce = 0
for i in range(self.n_vals):
votes_unscaled = (0.5*(self.support[i, :self.m]+1)).astype(int)
y_unscaled = int(0.5*(self.support[i, self.m]+1))
ce += self.pdf[i] * np.log(self.get_cond_probs(votes_unscaled, y_unscaled, edgeset))
return -ce
def cross_entropy_conditional(self, edgeset):
ce = 0
for i in range(self.n_vals):
votes_unscaled = (0.5*(self.support[i, :self.m]+1)).astype(int)
y_unscaled = int(0.5*(self.support[i, self.m]+1))
prob = self.get_probs(votes_unscaled, edgeset)
if y_unscaled == 0:
prob = 1 - prob
ce += self.pdf[i] * np.log(prob)
return -ce
def cross_entropy_no_label(self, edgeset):
ce = 0
for i in range(len(self.support_no_y)):
sequence = self.support_no_y[i]
sequence_scaled = (0.5*(sequence+1)).astype(int) # scale back to 0/1
voters = [[i] for i in np.arange(self.m)]
true_prob = self.joint_p(voters, sequence)
pos = self.get_cond_probs(sequence_scaled, 1, edgeset)
neg = self.get_cond_probs(sequence_scaled, 0, edgeset)
ce += true_prob * np.log(pos + neg)
return -ce
def to01(labels):
return (0.5*(labels + 1)).astype(int)
# MV and picking the best prompt should do well when things are conditionally independent and equally same
def test0():
m = 3
thetas = [0, 0.5, 0.5, 0.5]
# all conditionally independent, some singletons are fine
potentials = [[3], [0, 3], [1, 3], [2, 3]]
pgm = Ising(m, potentials, thetas)
# make data
n_train = 1000
train_votes, train_gold = pgm.make_data(n_train)
n_test = 1000
test_votes, test_gold = pgm.make_data(n_test)
train_votes = to01(train_votes)
train_gold = to01(train_gold)
test_votes = to01(test_votes)
test_gold = to01(test_gold)
agg = Aggregator(train_votes, train_gold, test_votes, test_gold)
_, mv_acc = agg.majority_vote()
pb_acc = agg.pick_best()
_, nb_acc = agg.naive_bayes()
_, sym_acc = agg.naive_bayes(symmetric=True)
print(f"Majority vote: {mv_acc}")
print(f"Pick the best: {pb_acc}")
print(f"Naive bayes: {nb_acc}")
print(f"Naive bayes (symmetric): {sym_acc}") # should be worse!
print(f"Test passed: {nb_acc == mv_acc}\n ")
def test1():
m = 5
# randomly parametrize exponential family to determine accuracies and correlations
np.random.seed(2)
thetas = np.random.rand(30)
# all conditionally independent, some singletons are fine
potentials = [[5], [0], [1], [4], [0, 5], [1, 5], [2, 5], [3, 5], [4, 5]]
pgm = Ising(m, potentials, thetas)
# make data
n_train = 1000
train_votes, train_gold = pgm.make_data(n_train)
n_test = 1000
test_votes, test_gold = pgm.make_data(n_test)
train_votes = to01(train_votes)
train_gold = to01(train_gold)
test_votes = to01(test_votes)
test_gold = to01(test_gold)
agg = Aggregator(train_votes, train_gold, test_votes, test_gold)
_, mv_acc = agg.majority_vote()
pb_acc = agg.pick_best()
_, nb_acc = agg.naive_bayes()
_, sym_acc = agg.naive_bayes(symmetric=True)
print(f"Majority vote: {mv_acc}")
print(f"Pick the best: {pb_acc}")
print(f"Naive bayes: {nb_acc}")
print(f"Naive bayes (symmetric): {sym_acc}") # should be worse!
print(f"Test passed: {nb_acc >= max(mv_acc, pb_acc) and sym_acc < nb_acc}\n ")
def test2():
m = 3
# randomly parametrize exponential family to determine accuracies and correlations
#theta = np.random.rand()
#theta_cliques = (np.random.randint(0, 2, 5)*2 - 1)*theta
#theta = np.random.rand()
#theta_cliques = [1, 1, 1, 1, 1, 1, 1]
np.random.seed(3)
thetas = np.random.rand(30)
# all conditionally independent
potentials = [[3], [0, 3], [1, 3], [2, 3]]
pgm = Ising(m, potentials, thetas)
n_train = 100
train_votes, train_gold = pgm.make_data(n_train)
n_test = 100
test_votes, test_gold = pgm.make_data(n_test)
train_votes = to01(train_votes)
train_gold = to01(train_gold)
test_votes = to01(test_votes)
test_gold = to01(test_gold)
agg = Aggregator(train_votes, train_gold, test_votes, test_gold)
_, mv_acc = agg.majority_vote()
pb_acc = agg.pick_best()
_, nb_acc = agg.naive_bayes()
_, sym_acc = agg.naive_bayes(symmetric=True)
_, fs_acc = agg.flying_squid()
print(pgm.joint_p([[3]], [1]))
print(pgm.balance)
print(pgm.expectation([[3]]), pgm.expectation([[0]]), pgm.expectation([[0, 3]]))
print(pgm.expectation([[3]])*pgm.expectation([[0, 3]]))
print(f"Majority vote: {mv_acc}")
print(f"Pick the best: {pb_acc}")
print(f"Naive bayes: {nb_acc}")
print(f"Naive bayes (symmetric): {sym_acc}")
print(f"FlyingSquid: {fs_acc}")
print(f"Test passed: {fs_acc >= max(mv_acc, pb_acc) and nb_acc == sym_acc}\n")
def test3():
m = 3
np.random.seed(2)
thetas = [0.5, 0.1, 0.4, 0.4]
# all conditionally independent
potentials = [[3], [0, 3], [1, 3], [2, 3]]
pgm = Ising(m, potentials, thetas)
# print(pgm.balance)
n_train = 1000
train_votes, train_gold = pgm.make_data(n_train)
n_test = 1000
test_votes, test_gold = pgm.make_data(n_test)
train_votes = to01(train_votes)
train_gold = to01(train_gold)
test_votes = to01(test_votes)
test_gold = to01(test_gold)
agg = Aggregator(train_votes, train_gold, test_votes, test_gold)
#print(agg.fs_accs, agg.nb_accs, pgm.accs)
_, mv_acc = agg.majority_vote()
pb_acc = agg.pick_best()
_, nb_acc = agg.naive_bayes()
_, fs_acc = agg.flying_squid()
_, dp_nolabel_acc = agg.data_programming(with_label=False)
_, dp_label_acc = agg.data_programming(with_label=True)
#print(agg.dp_learn_params(with_label=False))
print(f"Majority vote: {mv_acc}")
print(f"Pick the best: {pb_acc}")
print(f"Naive bayes: {nb_acc}")
print(f"FlyingSquid: {fs_acc}")
print(f"Data Programming (no label): {dp_nolabel_acc}")
print(f"Data Programming (with label): {dp_label_acc}")
assert 0.69 <= mv_acc <= 0.7 and 0.69 <= pb_acc <= 0.7, f"MV and pick best should be 0.692 and 0.694."
assert 0.77 <= min(nb_acc, fs_acc, dp_nolabel_acc, dp_label_acc) <= 0.79, f"All methods should have accuracy 0.78."
print(f"Test passed: {min(nb_acc, fs_acc, dp_nolabel_acc, dp_label_acc) >= max(mv_acc, pb_acc)}\n")
def test4():
m = 3
# randomly parametrize exponential family to determine accuracies and correlations
#theta = np.random.rand()
#theta_cliques = (np.random.randint(0, 2, 5)*2 - 1)*theta
#theta = np.random.rand()
#theta_cliques = [1, 1, 1, 1, 1, 1, 1]
np.random.seed(3)
thetas = np.random.rand(30)
thetas[0] = 0.1
thetas[1] = 0.2
thetas[2] = 0.01
thetas[3] = 0.1
thetas[4] = 0.5 # make this hugeeee
potentials = [[3], [0, 3], [1, 3], [2, 3], [0, 1]]
pgm = Ising(m, potentials, thetas)
print(pgm.joint_p([[0], [1], [3]], [1, 1, 1])/pgm.balance)
print(pgm.joint_p([[0], [3]], [1, 1]) * pgm.joint_p([[1], [3]], [1, 1]) / pgm.balance**2)
print(pgm.joint_p([[0], [2], [3]], [1, 1, 1])/pgm.balance)
print(pgm.joint_p([[0], [3]], [1, 1]) * pgm.joint_p([[2], [3]], [1, 1]) / pgm.balance**2)
n_train = 10000
train_votes, train_gold = pgm.make_data(n_train)
n_test = 1000
test_votes, test_gold = pgm.make_data(n_test)
train_votes = to01(train_votes)
train_gold = to01(train_gold)
test_votes = to01(test_votes)
test_gold = to01(test_gold)
print(pgm.expectation([[3]]), pgm.expectation([[0, 1]]), pgm.expectation([[0, 1, 3]]))
print(pgm.expectation([[3]])*pgm.expectation([[0, 1]]))
edgeset = [(0, 1)]
agg = Aggregator(train_votes, train_gold, test_votes, test_gold)
_, mv_acc = agg.majority_vote()
pb_acc = agg.pick_best()
nb_probs, nb_acc = agg.naive_bayes()
fs_probs, fs_acc = agg.flying_squid()
jt_probs, jt_acc = agg.junction_tree(edgeset)
jt_sym_probs, jt_sym_acc = agg.junction_tree(edgeset, symmetric=True)
# print(agg.fs_accs, agg.nb_accs, pgm.accs)
#print(pgm.joint_p([[0], [1], [3]], [1, 1, 1]) / pgm.balance)
#print(pgm.joint_p([[0], [1], [3]], [-1, -1, -1]) / pgm.balance)
print(f"Majority vote: {mv_acc}")
print(f"Pick the best: {pb_acc}")
print(f"Naive bayes: {nb_acc}")
print(f"FlyingSquid: {fs_acc}")
print(f"Junction tree (with deps): {jt_acc}")
print(f"Junction tree (with deps, symmetric): {jt_sym_acc}")
print(agg.get_probs(np.array([1, 1, 0]), agg.sym_accs, edgeset=[(0, 1)], symmetric=True, abstains_symmetric=False))
print(agg.get_probs(np.array([1, 1, 0]), agg.nb_accs, edgeset=[(0, 1)], symmetric=False, abstains_symmetric=False))
print(pgm.get_probs(np.array([1, 1, 0])))
fail = False
for i, votes in enumerate(test_votes):
if np.abs(pgm.get_probs(votes) - jt_probs[i][1]) > 0.05:
print(votes)
print(pgm.get_probs(votes), jt_probs[i][1])
fail = True
#print(pgm.get_probs(votes), nb_probs[i], test_gold[i])
#print(np.round(pgm.get_probs(votes)), np.round(fs_probs[i]), test_gold[i])
if fail:
print("Test failed.")
else:
print("Test passed.")
def test5():
m = 3
# randomly parametrize exponential family to determine accuracies and correlations
#theta = np.random.rand()
#theta_cliques = (np.random.randint(0, 2, 5)*2 - 1)*theta
#theta = np.random.rand()
#theta_cliques = [1, 1, 1, 1, 1, 1, 1]
np.random.seed(3) # 6 is good , 9 , 10
thetas = np.random.rand(30)
thetas[0] = 0
thetas[1] = 0.1
thetas[2] = 0.6
thetas[3] = 0.1
thetas[4] = 0.6 # make this hugeeee
potentials = [[3], [0, 3], [1, 3], [2, 3], [0, 1]]
pgm = Ising(m, potentials, thetas)
print(pgm.joint_p([[0], [1], [3]], [1, 1, 1])/pgm.balance)
print(pgm.joint_p([[0], [3]], [1, 1]) * pgm.joint_p([[1], [3]], [1, 1]) / pgm.balance**2)
print(pgm.joint_p([[0], [2], [3]], [1, 1, 1])/pgm.balance)
print(pgm.joint_p([[0], [3]], [1, 1]) * pgm.joint_p([[2], [3]], [1, 1]) / pgm.balance**2)
n_train = 10000
train_votes, train_gold = pgm.make_data(n_train)
n_test = 100
test_votes, test_gold = pgm.make_data(n_test)
train_votes = to01(train_votes)
train_gold = to01(train_gold)
test_votes = to01(test_votes)
test_gold = to01(test_gold)
edgeset = [(0, 1)]
agg = Aggregator(train_votes, train_gold, test_votes, test_gold)
_, mv_acc = agg.majority_vote()
pb_acc = agg.pick_best()
nb_probs, nb_acc = agg.naive_bayes()
fs_probs, fs_acc = agg.flying_squid()
jt_probs, jt_acc = agg.junction_tree(edgeset)
jt_sym_probs, jt_sym_acc = agg.junction_tree(edgeset, symmetric=True)
print(f"Majority vote: {mv_acc}")
print(f"Pick the best: {pb_acc}")
print(f"Naive bayes: {accuracy_score(test_gold, np.array(nb_probs).argmax(axis=1))}")
print(f"FlyingSquid: {accuracy_score(test_gold, np.array(fs_probs).argmax(axis=1))}")
print(f"Junction tree (with deps): {accuracy_score(test_gold, np.array(jt_probs).argmax(axis=1))}")
print(f"Junction tree (with deps, symmetric): {accuracy_score(test_gold, np.array(jt_sym_probs).argmax(axis=1))}")
print(f"NB log loss {log_loss(test_gold, nb_probs)}")
print(f"FS log loss {log_loss(test_gold, fs_probs)}")
print(f"JT log loss {log_loss(test_gold, jt_probs)}")
print(agg.get_probs(np.array([1, 1, 0]), agg.sym_accs, edgeset=[(0, 1)], symmetric=True, abstains_symmetric=False))
print(agg.get_probs(np.array([1, 1, 0]), agg.nb_accs, edgeset=[(0, 1)], symmetric=False, abstains_symmetric=False))
print(pgm.get_probs(np.array([1, 1, 0])))
jt_probs = np.array(jt_probs)
jt_sym_probs = np.array(jt_sym_probs)
fail = False
pgm_probs = np.zeros(len(test_votes))
for i, votes in enumerate(test_votes):
pgm_probs[i] = pgm.get_probs(votes)
avg_jt_err = np.linalg.norm(pgm_probs - jt_probs[:, 1]) / n_test
avg_jt_sym_err = np.linalg.norm(pgm_probs - jt_sym_probs[:, 1]) / n_test
if avg_jt_err > 0.05:
fail = True
if avg_jt_err < avg_jt_sym_err:
print(avg_jt_err, avg_jt_sym_err)
fail= True
if fail:
print("Test failed.")
else:
print("Test passed.")
def test6():
m = 3
np.random.seed(5)
thetas = np.random.rand(30)
# model some edges - see if we can recover it
potentials = [[3], [0, 3], [1, 3], [2, 3], [0, 1], [0, 2]]
pgm = Ising(m, potentials, thetas)
# make big datasets
n_train = 1000
train_votes, train_gold = pgm.make_data(n_train)
n_test = 1000
test_votes, test_gold = pgm.make_data(n_test)
train_votes = to01(train_votes)
train_gold = to01(train_gold)
test_votes = to01(test_votes)
test_gold = to01(test_gold)
agg = Aggregator(train_votes, train_gold, test_votes, test_gold)
# overall accuracies Pr(lf_p = y) on train
acc_theta = np.zeros(m)
for i in range(m):
acc_theta[i] = len(np.where((train_votes[:, i] == train_gold) == True)[0])/n_train
acc_theta = 2*acc_theta - 1
all_thetas = structure_learning(m, train_votes, train_gold, acc_theta)
print(all_thetas)
#idx = np.argsort(all_thetas, axis=None)[-1]
#i = int(np.round(idx / m))
#j = idx % m
#print(f"Recovered edge: ({i}, {j})") # should be (0, 1)
ce = np.ones(m*m) * np.inf
ce_cond = np.ones(m*m) * np.inf
ce_nolabel = np.ones(m*m) * np.inf
true_ce = np.ones(m*m) * np.inf
true_ce_cond = np.ones(m*m) * np.inf
true_ce_nolabel = np.ones(m*m) * np.inf
neighborhood_size = len(all_thetas.flatten())
all_edgesets = []
for n in range(neighborhood_size):
print(f"edgeset size is {n}")
# try edgeset of size n
if n != 0:
idxs = np.argsort(np.abs(all_thetas), axis=None)[-n:]
edgeset = []
for idx in idxs:
i = int(np.floor(idx / m))
j = idx % m
# print(all_thetas[i, j])
# print(f"Recovered edge: ({i}, {j})") # should be (0, 1)
edgeset.append((i, j))
else:
edgeset = []
print(edgeset)
all_edgesets.append(edgeset)
try:
ce[n] = agg.cross_entropy(train_votes, train_gold, edgeset)
ce_cond[n] = agg.cross_entropy_conditional(train_votes, train_gold, edgeset)
ce_nolabel[n] = agg.cross_entropy_no_label(test_votes, edgeset)
true_ce[n] = pgm.cross_entropy(edgeset)
true_ce_cond[n] = pgm.cross_entropy_conditional(edgeset)
true_ce_nolabel[n] = pgm.cross_entropy_no_label(edgeset)
except nx.NetworkXError:
# skip if proposed graph is not triangulated
pass
print(ce)
print(ce_cond)
print(ce_nolabel)
best_ce = ce.argmin()
best_ce_cond = ce_cond.argmin()
best_ce_nolabel = ce_nolabel.argmin()
print(f"Recovered edgeset using MLE: {all_edgesets[best_ce]}")
print(f"Recovered edgeset using MLE (conditional): {all_edgesets[best_ce_cond]}")
print(f"Recovered edgeset using MLE (no labels): {all_edgesets[best_ce_nolabel]}")
print(true_ce)
print(true_ce_cond)
print(true_ce_nolabel)
def main():
#test0()
#test1()
#test2()
#test3()
test4()
# test5()
#test6()
if __name__ == "__main__":
main()
| ama_prompting-main | boosting/make_pgm.py |
""" Running and Scoring the AMA Diagnostics """
import os
import json
from collections import Counter
from tqdm import tqdm
import openai
from manifest import Manifest
openai.api_key = "" # Find this on the OpenAI Website
from datasets import load_metric
rouge = load_metric("rouge")
######################### HELPER FUNCTIONS #########################
def rougeL(preds, labels):
return rouge.compute(predictions=preds, references=labels)['rougeL'].mid.fmeasure
from collections import Counter
def text_f1(preds=[], labels=[]):
"""Compute average F1 of text spans.
Taken from Squad without prob threshold for no answer.
"""
total_f1 = 0
for pred, gold in zip(preds, labels):
pred_toks = pred.split()
gold_toks = gold.split()
common = Counter(pred_toks) & Counter(gold_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
total_f1 += int(gold_toks == pred_toks)
elif num_same == 0:
total_f1 += 0
else:
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
total_f1 += f1
f1_avg = total_f1 / len(labels)
return f1_avg
def get_response(manifest, prompt, max_toks=10, temperature = 0, gold_choices=[], model_name="manifest", engine="text-davinci-002", logit_bias={}):
prompt = prompt.strip()
if model_name == 'openai':
if logit_bias:
completion = openai.Completion.create(engine=engine, prompt=prompt, temperature=temperature, top_p=1, max_tokens=max_toks, logprobs=5, logit_bias=logit_bias)
else:
completion = openai.Completion.create(engine=engine, prompt=prompt, temperature=temperature, top_p=1, max_tokens=max_toks, logprobs=5)
response = completion.choices[0].text
if model_name == "manifest":
if gold_choices:
max_len = max([len(g.split()) for g in gold_choices])
else:
max_len = 0
max_token_args = ({"max_tokens": min(max_toks,8 * len(max_len),)}
if gold_choices is None
else {}
)
if gold_choices:
response = manifest.run(prompt, gold_choices=gold_choices,overwrite_cache=False,**max_token_args,)
else:
response = manifest.run(prompt, max_tokens=max_toks, overwrite_cache=False)
return response
def get_manifest_session(
client_name="huggingface",
client_connection="http://127.0.0.1:5000",
cache_connection=None,
temperature=0,
top_p=1.0,
):
if client_name == "huggingface" and temperature == 0:
params = {
"temperature": 0.001,
"do_sample": False,
"top_p": top_p,
}
elif client_name in {"openai", "ai21"}:
params = {
"temperature": temperature,
"top_p": top_p,
}
else:
raise ValueError(f"{client_name} is not a valid client name")
manifest = Manifest(
client_name=client_name,
client_connection=client_connection,
cache_name="sqlite",
cache_connection=cache_connection,
**params,
)
model_name = manifest.client.get_model_params()["model_name"]
return manifest, model_name
######################### SCORE FUNCTIONS #########################
""" A "blah" maps to category: blah """
def selection_easy(dataset, manifest):
preds = []
for i, (ind, row) in tqdm(enumerate(dataset.items())):
prefix = row['input']
pred = get_response(manifest, prefix, max_toks=50)
pred = [p for p in pred.split("\n") if p][0].strip()
preds.append(pred)
if i == 0:
print(prefix)
print(f"PRED: {pred}")
labels = [l.strip() for l in row['labels']]
num_valid = [p for p in preds if p in labels]
return len(num_valid)/len(dataset)
""" Does the model pick one of the given choices? """
def selection_hard(dataset, manifest):
preds = []
for i, (ind, row) in tqdm(enumerate(dataset.items())):
prefix = row['input']
pred = get_response(manifest, prefix, max_toks=50)
pred = [p for p in pred.split("\n")][0]
preds.append(pred)
if i == 0:
print(prefix)
print(f"PRED: {pred}")
valid = 0
for (ind, row), pred in zip(dataset.items(), preds):
choices = row['output']
if pred.lower().strip(".") in [c.lower().strip(".") for c in choices]:
valid += 1
return valid/len(dataset)
""" Does the model generate three choices? """
def text_generation(dataset, manifest):
preds = []
for i, (ind, row) in tqdm(enumerate(dataset.items())):
prefix = row['input']
pred = get_response(manifest, prefix, max_toks=50)
pred = pred.split("\n\n")[0]
pred = pred.split("\n")
pred = list(set([a.replace("- ", "").strip() for a in pred]))
preds.append(pred)
if i == 0:
print(prefix)
print(f"PRED: {pred}")
valid = 0
for pred in preds:
if len(pred) == 2:
valid += 1
return valid/len(dataset)
""" Does the model faithfully transform the statement to a question? """
def question_generation(dataset, manifest):
preds = []
for i, (ind, row) in tqdm(enumerate(dataset.items())):
prefix = row['input']
pred = get_response(manifest, prefix, max_toks=50)
pred = [p for p in pred.split("\n")][0]
preds.append(pred)
if i == 0:
print(prefix)
print(f"PRED: {pred}")
outputs = [row['output'] for ind, row in dataset.items()]
score = rougeL(preds=preds, labels = outputs)
return score
""" Does the model faithfully choose the sentence with the entity name? """
""" Does the model faithfully answer given a keyword for extraction? """
def extraction(dataset, manifest):
preds = []
for i, (ind, row) in tqdm(enumerate(dataset.items())):
prefix = row['input']
pred = get_response(manifest, prefix, max_toks=50)
pred = [p for p in pred.split("\n")][0]
preds.append(pred)
if i == 0:
print(prefix)
print(f"PRED: {pred}")
outputs = [row['output'] for ind, row in dataset.items()]
score = text_f1(preds=preds, labels = outputs)
return score
def main():
data_dir = "data/"
synthetics = {
'selection_easy':selection_easy,
'selection_hard':selection_hard,
'extraction':extraction,
"text_generation": text_generation,
"question_generation": question_generation
}
manifest, model_name = get_manifest_session()
synthetic_scores = {}
for synthetic, function in synthetics.items():
print(f"RUNNING {synthetic}")
with open(f"{data_dir}/{synthetic}.json") as f:
dataset = json.load(f)
score = function(dataset, manifest)
synthetic_scores[synthetic] = score
print(f"SCORE: {score}")
print(synthetic_scores)
model_name = model_name.replace("/", "_")
if not os.path.exists(f"results/"):
os.makedirs(f"results/")
with open(f"results/{model_name}_results.json", "w") as f:
json.dump(synthetic_scores, f)
print(f"Saved to: results/{model_name}_results.json")
if __name__ == "__main__":
main()
| ama_prompting-main | diagnostics/run_diagnostics.py |
from setuptools import setup, find_packages
setup(name='stratification', version='1.0', packages=find_packages())
| hidden-stratification-master | setup.py |
import os
import torch
from stratification.harness import GEORGEHarness
from stratification.utils.utils import set_seed, init_cuda
from stratification.utils.parse_args import get_config
def main():
config = get_config()
use_cuda = config['use_cuda'] and torch.cuda.is_available()
set_seed(config['seed'], use_cuda) # set seeds for reproducibility
init_cuda(config['deterministic'], config['allow_multigpu'])
torch.multiprocessing.set_sharing_strategy('file_system')
harness = GEORGEHarness(config, use_cuda=use_cuda)
harness.save_full_config(config)
first_mode = 'erm' if (config['mode'] == 'george') else config['mode']
dataloaders = harness.get_dataloaders(config, mode=first_mode)
num_classes = dataloaders['train'].dataset.get_num_classes('superclass')
model = harness.get_nn_model(config, num_classes=num_classes, mode=first_mode)
activ_done = config['activations_dir'] != 'NONE'
rep_done = config['representation_dir'] != 'NONE'
cluster_done = config['cluster_dir'] != 'NONE'
rep_done = rep_done or cluster_done # if we already have clusters, don't need to do reduction step
activ_done = activ_done or rep_done # don't need to get activations if we already have reduced ones
if config['classification_config']['eval_only']:
assert (activ_done)
if config['cluster_dir'] != 'NONE':
dataloaders = harness.get_dataloaders(
config, mode=first_mode,
subclass_labels=os.path.join(config['cluster_dir'], 'clusters.pt')
if os.path.isdir(config['cluster_dir']) else config['cluster_dir'])
# Train a model with ERM
if activ_done and not (config['classification_config']['eval_only']
or config['classification_config']['save_act_only']):
erm_dir = config['activations_dir']
else:
if config['classification_config']['eval_only'] or config['classification_config'][
'save_act_only']:
erm_dir = config['activations_dir']
model_path = os.path.join(erm_dir,
f'{config["classification_config"]["eval_mode"]}_model.pt')
print(f'Loading model from {model_path}...')
model.load_state_dict(torch.load(model_path)['state_dict'])
erm_dir = harness.classify(config['classification_config'], model, dataloaders,
mode=first_mode)
if config['classification_config']['eval_only'] or config['classification_config'][
'save_act_only']:
exit()
if config['mode'] == 'george':
if not config['classification_config']['bit_pretrained'] and not rep_done:
model.load_state_dict(torch.load(os.path.join(erm_dir, 'best_model.pt'))['state_dict'])
set_seed(config['seed'], use_cuda)
# Dimensionality-reduce the model activations
if rep_done:
reduction_dir = config['representation_dir']
else:
reduction_model = harness.get_reduction_model(config, nn_model=model)
reduction_dir = harness.reduce(config['reduction_config'], reduction_model,
inputs_path=os.path.join(erm_dir, 'outputs.pt'))
# Cluster the per-superclass features
if cluster_done:
cluster_dir = config['cluster_dir']
else:
cluster_model = harness.get_cluster_model(config)
cluster_dir = harness.cluster(config['cluster_config'], cluster_model,
inputs_path=os.path.join(reduction_dir, 'outputs.pt'))
set_seed(config['seed'], use_cuda) # reset random state
dataloaders = harness.get_dataloaders(
config, mode='george', subclass_labels=os.path.join(cluster_dir, 'clusters.pt'))
model = harness.get_nn_model(config, num_classes=num_classes, mode='george')
# Train the final (GEORGE) model
george_dir = harness.classify(config['classification_config'], model, dataloaders,
mode='george')
if __name__ == '__main__':
main()
| hidden-stratification-master | stratification/run.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.