python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
#!/usr/bin/env python
with open("AI-progress-metrics.html") as html_file:
html = html_file.read()
html = html.replace("https://cdnjs.cloudflare.com/ajax/libs/require.js/2.1.10/require.min.js",
"js/require-2.1.10.min.js")
html = html.replace("https://cdnjs.cloudflare.com/ajax/libs/jquery/2.0.3/jquery.min.js",
"js/require-2.0.3.min.js")
# MathJax is not a single script that's easy to relocate. Eventually we may want to follow
# http://docs.mathjax.org/en/latest/start.html#installing-your-own-copy-of-mathjax
# but for now, we don't actually use MathJax so let's remove it.
#html = html.replace("https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS_HTML",
# "js/MathJax.js")
html = html.replace('<script src="https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS_HTML"></script>', '')
assert 'script src="http' not in html, "HTMLified Notebook appears to contain unhandled 3rd party JS, please fix sanitise.py"
assert 'src="http' not in html, "HTMLified Notebook appears to contain unhandled 3rd party embed, please fix sanitise.py"
with open("AI-progress-metrics.html", "w") as html_file:
html_file.write(html)
print "Sanitised AI-progress-metrics.html, apprently successfully"
| AI-metrics-master | sanitise.py |
from math import log
# Different metrics and measurements for progress are made on very different types of scales
# we have some helper functions to regularise these a little bit, so we can tell (for instance)
# whether progress on some metric appears to be accelerating or decelerating.
# Interface:
# improvement(score1, score2): retrns a consistent measure of how much better score2 is than score1
# pseudolinear(score): returns a modified version of score where we would expect vaguely linear progress
class Linear():
offset = (2,-2)
axis_label = "Score"
col_label = "Score"
def improvement(self, score1, score2):
return score2 - score1
def pseudolinear(self, score):
return score
class AtariLinear():
offset = (2,-2)
axis_label = "Score"
col_label = "Raw Score"
def improvement(self, score1, score2):
return score2 - score1
def pseudolinear(self, score):
return score
linear = Linear()
score = Linear()
atari_linear = AtariLinear()
class ELO:
offset = (2,-2)
axis_label = "ELO rating"
col_label = "ELO"
def improvement(self, score1, score2):
"""
Normalise an ELO score
An ELO increase of 400 improves your odds by 10x, so we could justify something like
return 10.0 ** ((score2 - score1)/400.)
However, it seems that at least for chess ELO progress has been roughly linear over
time, both for humans and computers (though with different coefficients). Perhaps this
tracks exponential increases in ability to search the game's state space, driven directly
by Moore's law on the computer side, and indirectly for humans by access to better training
tools and more profound libraries of past play.
So for now let's treat this as linear? But ELO is not a chess-specific measure, and in other
contexts we may want to do exponentiation as documented above?
"""
return score2 - score1
def pseudolinear(self, score):
return score
elo = ELO()
class ErrorRate:
"""Many labelling contests use these measures"""
offset = (2,2)
axis_label = "Error rate"
col_label = "Error"
def improvement(self, score1, score2):
# improvement is measured as a negative log of the error rate
return log(score1) - log(score2)
def pseudolinear(self, score):
# error rate 1 => 0
# error rate 0 => infinity
return -log(score)
error_rate = ErrorRate()
# some problems have performance measured in bits per X (bits per character, bits per pixel, etc),
# reflecting the amount of information necessary for a model to accurately encode something from a corpus.
# Lower is better and zero is infinitely good, so we can re-use the error rate math for now (though
# scores above 1 are possible)
bits_per_x = ErrorRate()
bits_per_x.axis_label = "Model Entropy"
bits_per_x.col_label = "Model<br>Entropy"
# perplexity is 2 to the bits_per_x
perplexity = ErrorRate()
perplexity.axis_label = "Perplexity"
perplexity.col_label = "Perplexity"
class CorrectPercent:
"100 - error rate"
offset = (3,-6)
axis_label = "Percentage correct"
col_label = "% correct"
def erate(self, score):
return (100. - score)/100.
def improvement(self, score1, score2):
return score2 - score1
def pseudolinear(self, score):
from math import log
return -log(self.erate(score))
correct_percent = CorrectPercent()
class BLEUScore:
"50 is a perfect BLEU score, meaning a system produces exact matches to professional human translations"
offset = (3,-6)
axis_label = "BLEU score"
col_label = "BLEU"
def erate(self, score):
return (50. - score)/50.
def improvement(self, score1, score2):
return score2 - score1
def pseudolinear(self, score):
from math import log
return -log(self.erate(score))
bleu_score = BLEUScore()
class ErrorPercent:
"100 * error rate"
offset = (3,-6)
axis_label = "Percentage error"
col_label = "% error"
def erate(self, score):
return score/100.
def improvement(self, score1, score2):
return score1 - score2
def pseudolinear(self, score):
from math import log
return log(self.erate(score))
error_percent = ErrorPercent()
| AI-metrics-master | scales.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from collections import defaultdict
from math import log
import datetime
import json
import re
import sys
import traceback
from lxml.cssselect import CSSSelector
from matplotlib import pyplot as plt
from matplotlib import markers
#from IPython.display import HTML
import lxml.html
import matplotlib as mpl
import numpy
import requests
from scales import linear
try:
import seaborn as sns
except ImportError:
print("Seaborn style not installed")
date = datetime.date
problems = {}
metrics = {}
measurements = set() # we don't try to guarantee unique names for these, so use a set
all_attributes = set()
class Problem:
def __init__(self, name, attributes=[], solved=False, url=None):
self.name = name
self.attributes = attributes
for a in attributes:
global all_attributes
all_attributes.add(a)
self.subproblems = []
self.superproblems = []
self.metrics = []
self.solved = solved
self.url = url
global problems, metrics
problems[name] = self
def add_subproblem(self, other_problem):
# add this other problem as a subproblem of us
other_problem.superproblems.append(self)
self.subproblems.append(other_problem)
def metric(self, *args, **kwargs):
m = Metric(*args, **kwargs)
m.parent = self
self.metrics.append(m)
return m
def check_solved(self):
if all(m.solved for m in self.metrics + self.subproblems):
self.solved = True
for p in self.superproblems:
p.check_solved()
def __str__(self):
return "Problem({0})".format(self.name)
def print_structure(self, indent=0):
print(indent * " " + str(self))
for m in self.metrics:
print((indent + 4) * " " + str(m))
for p in self.subproblems:
p.print_structure(indent + 4)
def tables(self):
return render_tables(sorted(self.metrics, key=lambda m:m.name))
def graphs(self):
for m in sorted(self.metrics, key=lambda m:m.name):
m.graph()
mpl.rcParams["legend.fontsize"] = u"x-small"
mpl.rcParams["xtick.labelsize"] = u"xx-small"
mpl.rcParams["ytick.labelsize"] = u"x-small"
class Metric:
def __init__(self, name, url=None, solved=False, notes="", scale=linear, target=None, target_source=None,
parent=None, changeable=False, axis_label=None, target_label=None):
self.name = name
self.measures = []
self.solved = solved
self.url = url
self.notes = notes
self.scale = scale
self.target = target
self.target_source = target_source # Source for human-level performance number
self.changeable = changeable # True if a metric changes over time
self.graphed = False
global metrics
metrics[name] = self
self.parent = parent
self.target_label = target_label
self.axis_label = ( axis_label if axis_label
else self.scale.axis_label if hasattr(self.scale, "axis_label")
else self.name)
# primarily used by the table() method
self.data_url = self.find_edit_url(3) # 3 is stack depth for a problem.metric() call
self.data_path = None
def __str__(self):
solved = "SOLVED" if self.solved else "?" if not self.target else "not solved"
return "{0:<60}{1}".format("Metric(%s)" % self.name, solved)
def __repr__(self):
return 'Metric("{0}")'.format(self.name)
def measure(self, *args, **kwargs):
try:
m = Measurement(*args, **kwargs)
except AssertionError:
print("WARNING, failed to create measurement", args, kwargs)
traceback.print_exc()
return None
m.metric = self
if self.target:
if self.target_source == m.url and self.target == m.value:
print("Skipping apparent human performance (target_source) paper", m.url)
return None
if self.scale.improvement(self.target, m.value) >= 0:
self.solved = True
self.parent.check_solved()
self.measures.append(m)
self.data_url = self.find_edit_url()
return m
def find_edit_url(self, depth=2):
"Some magic hackery to find what file and line number a Metric was defined on and produce an edit link"
try:
# Deliberately trigger an exception so that we can inspect the stack
import nosuchthing
except:
# find where the most recent .measure call happened. The stack looks like this:
# 0. Metric.find_edit_url;
# 1. Metric.measure;
# 2. somemetric.measure() in a data/*.py file
# (So depth defaults to 2)
tb_frame = sys._getframe(depth)
line = tb_frame.f_lineno
filename = tb_frame and tb_frame.f_code and tb_frame.f_code.co_filename
if filename:
self.data_path = filename
return "https://github.com/AI-metrics/AI-metrics/edit/master/{0}#L{1}".format(filename, line)
else:
return "https://github.com/AI-metrics/AI-metrics"
def table(self):
if len(self.measures) < 2:
return u""
# TODO: split out CSS
table_html = [u'<table style="width: 100%">']
table_html.append(u"<caption>{0}</caption>".format(self.name))
col = 'style="background-color: #f7f7f7"'
table_html.append(u"<tr {1}><th>Date</th><th>Algorithm</th><th>{0}</th><th>Paper / Source</th></tr>".format(self.scale.col_label, col))
widest_alg = max(len(m.name) for m in self.measures)
alg_bound = u'style="width: 25%"' if widest_alg >= 45 else ""
for n, m in enumerate(self.measures):
bgcol = u'style="background-color: #f7f7f7"' if n % 2 == 1 else ''
table_html.append(u"<tr {0}>".format(bgcol))
table_html.append(u'<td align="center" style="width: 11%">{0}</td>'.format(m.date))
table_html.append(u'<td align="center" {1}>{0}</td>'.format(m.name, alg_bound))
table_html.append(u'<td align="center">{0} {1}</td>'.format(m.value, m.represent_uncertainty()))
source = u' (<a href="{0}">source code</a>)'.format(m.replicated_url) if m.replicated_url else ""
alglink = u' (algorithm from <a href="{0}">{1}</a>)'.format(m.algorithm_src_url, m.src_name) if m.src_name else ''
pname = m.papername if m.papername else m.url
table_html.append(u'<td align="center"><a href=\"{0}\">{1}</a>{2}{3}</td>'.format(m.url, pname, source, alglink))
table_html.append(u"</tr>")
table_html.append(u"</table>")
github_link = [u'''
<div style="text-align: right; font-style: italic" class="edit-links">
<a target="_blank" href="{0}">Edit/add data on GitHub</a>
<a target="_blank" href="/edit/{1}" style="display: none" class="local-edit">Edit locally</a>
</div>'''.format(self.data_url, self.data_path)]
html = u"".join(table_html + github_link)
return html
def graph(self, size=(7,5), scale=1.0, keep=False, reuse=None, title=None, llabel=None, fcol=None, pcol=None):
"Spaghetti code graphing function."
if len(self.measures) < 2:
return
if reuse:
subplot = reuse
else:
fig = plt.figure(dpi=300)
fig.set_size_inches((7*scale, 5*scale))
#fig.add_subplot(111).set_ylabel(self.name)
subplot = fig.add_subplot(111)
subplot.set_ylabel(self.axis_label)
subplot.set_title(title if title else self.name)
#fig.add_subplot(111).set_ylabel(self.name)
# dashed line for "solved" / strong human performance
if self.target:
target_label = ( self.target_label if self.target_label
else "Human performance" if self.parent and "agi" in self.parent.attributes
else "Target")
start = min([self.measures[0].date] + [m.min_date for m in self.measures if m.min_date])
end = max([self.measures[-1].date] + [m.max_date for m in self.measures if m.max_date])
plt.plot_date([start, end], 2 * [self.target], "r--", label=target_label)
self.measures.sort(key=lambda m: (m.date, m.metric.scale.pseudolinear(m.value)))
# scatter plot of results in the literature
available_markers = markers.MarkerStyle().markers
for n, m in enumerate(self.measures):
kwargs = {"c": pcol if pcol else "r"}
if self.target and self.scale.improvement(self.target, m.value) >= 0:
kwargs["c"] = "b"
if m.not_directly_comparable or self.changeable:
kwargs["c"] = "#000000"
if "*" in available_markers:
kwargs["marker"] = "*"
if m.withdrawn:
if "X" in available_markers:
kwargs["marker"] = "X"
kwargs["c"] = "#aaaaaa"
plt.plot_date([m.date], [m.value], **kwargs)
plt.annotate('%s' % m.label, xy=(m.date, m.value), xytext=m.offset or m.metric.scale.offset, fontsize=scale * 6, textcoords='offset points')
# cases where either results or dates of publication are uncertain
kwargs = {"c": "#80cf80", "linewidth": scale*1.0, "capsize": scale*1.5, "capthick": scale*0.5, "dash_capstyle": 'projecting'}
if m.min_date or m.max_date:
before = (m.date - m.min_date) if m.min_date else datetime.timedelta(0)
after = (m.max_date - m.date) if m.max_date else datetime.timedelta(0)
kwargs["xerr"] = numpy.array([[before], [after]])
if self.measures[n].value != self.measures[n].minval:
kwargs["yerr"] = numpy.array([[m.value - self.measures[n].minval], [self.measures[n].maxval - m.value]])
if "xerr" in kwargs or "yerr" in kwargs:
subplot.errorbar(m.date, m.value, **kwargs)
# line graph of the frontier of best results
if not self.changeable:
best = self.measures[0].value
frontier_x, frontier_y = [], []
for m in self.measures:
if self.scale.improvement(best, m.value) >= 0 and not m.withdrawn and not m.not_directly_comparable:
frontier_x.append(m.date)
frontier_y.append(m.value)
xy = (m.date, m.value)
best = m.value
kwargs = {"label": llabel} if llabel else {}
if fcol:
kwargs["c"] = fcol
plt.plot_date(frontier_x, frontier_y, "g-", **kwargs)
plt.legend()
self.graphed = True
if keep:
return subplot
else:
plt.show()
def render_tables(metrics):
"Jupyter Notebook only lets you call HTML() once per cell; this function emulates doing it several times"
table_html = u""
for m in metrics:
html = m.table()
if html is not None:
table_html += html
#HTML(table_html)
return table_html
def canonicalise(url):
if not url:
return ""
if url.startswith("http://arxiv.org"):
url = url.replace("http", "https")
if url.startswith("https://arxiv.org/pdf/"):
url = url.replace("pdf", "abs", 1)
url = url.replace(".pdf", "", 1)
return url
# dates of conferences help us date papers from the "Are We There Yet" dataset
conference_dates = {"ICML 2016": date(2016, 6, 19),
"NIPS 2015": date(2015, 12, 7),
"ICLR 2014": date(2014, 4, 14),
"ICML 2012": date(2012, 6, 26),
"ICML 2013": date(2013, 6, 16),
"ICML 2014": date(2014, 6, 21),
"IJCNN 2015": date(2015, 7, 12),
"CVPR 2012": date(2012, 6, 16),
"NIPS 2012": date(2012, 12, 3),
"CVPR 2015": date(2015, 6, 8),
"NIPS 2011": date(2011, 12, 17),
"NIPS 2014": date(2014, 12, 8),
"TUM-I1222 2013": date(2013, 10, 29),
"WMT 2014": date(2014, 2, 24),
"ECCV 2012": date(2012, 10, 7)}
conferences_wanted = defaultdict(lambda: 0)
offline = False
try:
r = requests.get('http://arxiv.org/abs/1501.02876')
if str(r.status_code).startswith("4"):
offline = True
print("Arxiv blocked!")
except requests.ConnectionError:
print("In Offline mode!")
offline = True
class Measurement:
def __init__(self, d, value, name, url, algorithms=[], uncertainty=0, minval=None, maxval=None, opensource=False, replicated="",
papername=None, venue=None, min_date=None, max_date=None, algorithm_src_url=None, withdrawn=False,
not_directly_comparable=False, long_label=False, notes="", offset=None):
self.date = d
self.value = value
assert isinstance(value, float) or isinstance(value, int), "Measurements on metrics need to be numbers"
self.name = name
# For papers on arxiv, always use the abstract link rather than the PDF link
self.url = canonicalise(url)
assert self.url or papername, "Measurements must have a URL or a paper name"
self.min_date = min_date
self.max_date = max_date
self.algorithm_src_url = canonicalise(algorithm_src_url)
if algorithm_src_url and not min_date:
_, prev_dates, _ = ade.get_paper_data(self.algorithm_src_url)
if prev_dates:
self.min_date = min(prev_dates.values())
self.determine_src_name()
self.uncertainty = uncertainty
self.minval = minval if minval else value - uncertainty
self.maxval = maxval if maxval else value + uncertainty
self.opensource = opensource
self.not_directly_comparable = not_directly_comparable
self.replicated_url = replicated
self.long_label = long_label
self.algorithms = algorithms
self.offset = offset
self.notes = notes
arxiv_papername, arxiv_dates, arxiv_withdrawn = ade.get_paper_data(self.url)
self.withdrawn = withdrawn or arxiv_withdrawn
if "arxiv.org" in self.url and not offline:
assert arxiv_dates, "Failed to extract arxiv dates for "+ self.url
self.papername = papername if papername else arxiv_papername
self.determine_paper_dates(d, arxiv_dates, venue)
self.set_label()
global measurements
measurements.add(self)
def determine_src_name(self):
"Figure out the name of a prior paper this result is based on, if applicable"
if self.algorithm_src_url:
self.src_name, _, _ = ade.get_paper_data(self.algorithm_src_url)
if not self.src_name:
self.src_name = self.algorithm_src_url
else:
self.src_name = None
def set_label(self):
self.label = self.name
if self.withdrawn and not "withdrawn" in self.label.lower():
self.label = "WITHDRAWN " + self.label
if len(self.label) >= 28 and not self.long_label:
self.label = self.label[:25] + "..."
year_re=re.compile(r"([0-9][0-9][0-9][0-9])")
def determine_paper_dates(self, d, arxiv_dates, venue):
"""
Try to figure out when a result was obtained, and our uncertainty on that.
:param datetime.date d: date supplied at paper entry time. We may not be able to trust this if the paper had multiple versions
and the person doing the entry didn't specify which version they got their result numbers from :/
:param dict arxiv_dates: either None or a dict like {"1": date(2017,1,13), "2": date(2017, 3, 4)...}
:param venue: for Rodriguo Benenson's data, a publication venue like "ICML 2016" or "arXiv 2014"
"""
# begin by trusting whoever entered the data
self.date = d
# but warn if it doesn't match arXiv dates
adates = sorted(arxiv_dates.values()) if arxiv_dates else []
if arxiv_dates and d:
if d < min(adates) and d > max(adates):
print("WARNING, date", self.date, "for", self.url, end="")
print("does not match any of the arXiv versions (%s)" % " ".join(str(s) for s in arxiv_dates.values()))
if arxiv_dates:
if len(arxiv_dates) == 1:
if not self.date:
self.date = adates[0]
else:
# multiple arxiv dates means the url wasn't versioned, and we might not have gotten the date exactly right
self.min_date = self.min_date if self.min_date else min(adates)
self.max_date = self.max_date if self.max_date else max(adates)
if not self.date:
midrange = datetime.timedelta(days=0.5 * (self.max_date - self.min_date).days)
self.date = self.min_date + midrange
elif venue and not self.date:
# if all we have is a conference / journal, we might be able to still figure something out..
if venue.upper() in conference_dates:
self.date = conference_dates[venue]
else:
conferences_wanted[venue] += 1
year = int(self.year_re.search(venue).groups(0)[0])
self.date = date(year, 7, 1)
self.min_date = date(year, 1, 1)
self.max_date = date(year, 12, 31)
if not self.date:
print(d, arxiv_dates, venue)
assert self.date, "Need a date for paper {0} {1}".format(self.url, self.papername)
def represent_uncertainty(self):
"Printable error bars for this value"
err = u""
if self.uncertainty:
err = u"± {0}".format(self.uncertainty)
elif not self.value == self.minval == self.maxval:
err = super_digits(u"+" + str(self.maxval)) + u" " + sub_digits(u"-" + str(self.minval))
return err
super_digits = lambda s: u''.join(dict(zip(u".-+0123456789", u"⋅⁻⁺⁰¹²³⁴⁵⁶⁷⁸⁹")).get(c, c) for c in s)
sub_digits = lambda s: u''.join(dict(zip(u".-+0123456789", u".₋₊₀₁₂₃₄₅₆₇₈₉")).get(c, c) for c in s)
#print canonicalise('http://arxiv.org/pdf/1412.6806.pdf')
#cifar10.measure(None, 96.53, 'Fractional Max-Pooling', 'http://arxiv.org/abs/1412.6071',
# papername='Fractional Max-Pooling', venue='arXiv 2015')
#cifar10.measure(None, 95.59, 'Striving for Simplicity: The All Convolutional Net',
# 'http://arxiv.org/pdf/1412.6806.pdf', papername='Striving for Simplicity: The All Convolutional Net', venue='ICLR 2015')
# simple hooks for letting us save & restore datetime.date objects in a JSON cache
class DateEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.date):
return str(obj)
return super(MyEncoder, self).default(obj)
def parse_date(json_dict):
if "dates" in json_dict:
for v, date_str in json_dict["dates"].items():
json_dict["dates"][v] = datetime.datetime.strptime(date_str, "%Y-%m-%d").date()
return json_dict
class ArxivDataExtractor:
def __init__(self):
try:
with open(".paper_cache.json") as f:
self.cache = json.load(f, object_hook=parse_date)
except:
print("Failed to load local paper cache, trying a network copy...")
try:
req = requests.get('https://raw.githubusercontent.com/AI-metrics/master_text/master/.paper_cache.json')
self.cache = json.loads(req.content, object_hook=parse_date)
except:
traceback.print_exc()
print("(Continuing with an empty cache)")
self.cache = {}
self.arxiv_re = re.compile(r"\[[0-9v.]+\] (.*)")
def save_cache(self):
try:
with open(".paper_cache.json", "w") as f:
json.dump(self.cache, f, indent=4, sort_keys=True, cls=DateEncoder)
except:
traceback.print_exc()
print("Not able to save cache")
ends_with_version = re.compile(r".*v([0-9])+$")
def arxiv_link_version(self, url):
m = self.ends_with_version.match(url)
if m:
return m.group(1)
else:
return None
multiwhitespace = re.compile(r"\s+") # gets rid of newlines
def get_paper_data(self, url):
"Ask arxiv for a (papername, {version:date}) if we don't know it"
if not url:
return (None, None, None)
if url in self.cache:
c = self.cache[url]
return (c["name"], c.get("dates"), c.get("withdrawn", False))
try:
req = requests.get(url)
except requests.ConnectionError:
print("Failed to fetch", url)
#traceback.print_exc()
return (None, None, None)
record = {}
tree = lxml.html.fromstring(req.content)
withdrawn = self.detect_withdrawn(tree, url)
if withdrawn:
record["withdrawn"] = True
#papername = CSSSelector("title")(tree)[0].text_content()
papername = tree.findtext('.//title')
dates = None
if papername:
papername = self.multiwhitespace.sub(" ", papername)
match = self.arxiv_re.match(papername)
if match:
papername = match.groups(0)[0]
v = self.arxiv_link_version(url)
dates = self.get_submission_dates(tree, v)
record["dates"] = dates
record["name"] = papername
self.cache[url] = record
print("Caching paper name:", papername)
self.save_cache()
return papername, dates, withdrawn
def detect_withdrawn(self, tree, url):
comment = CSSSelector(".tablecell.comments")(tree)
if comment:
comment = comment[0].text_content()
if "withdrawn" in comment.lower():
print("Paper", url, "appears to be withdrawn!")
return True
return False
version_re = re.compile(r"\[v([0-9]+)\] (.*[0-9][0-9][0-9][0-9]) ")
def get_submission_dates(self, arxiv_tree, queried_version):
links = CSSSelector("div.submission-history")(arxiv_tree)[0]
versions = {}
#print "Parsing", links.text_content()
for line in links.text_content().split("\n"):
match = self.version_re.match(line)
if match:
version, d = match.group(1), match.group(2)
d = datetime.datetime.strptime(d,'%a, %d %b %Y').date()
versions[version] = d
if queried_version == version:
return {version: d}
#print version, date
return versions
ade = ArxivDataExtractor()
#ade.get_paper_data("https://arxiv.org/abs/1501.02876")
| AI-metrics-master | taxonomy.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import print_function
from data.video_games import *
import re
# Machinery for importing both copy-and-pasted and (where necessary) OCR'd tables from various Atari research papers
# Copying and pasting tables from PDFs produces very weird results sometimes, so we make no promises that there aren't
# anyforms of weirdness here.
# The common case is that PDF tables paste column-wise; but some are row-wise so we have machinery for both.
# COLUMN-WISE RESULT TABLES:
wang_table_2 = """GAMES
Alien
Amidar
Assault
Asterix
Asteroids
Atlantis
Bank Heist
Battle Zone
Beam Rider
Berzerk
Bowling
Boxing
Breakout
Centipede
Chopper Command
Crazy Climber
Defender
Demon Attack
Double Dunk
Enduro
Fishing Derby
Freeway
Frostbite
Gopher
Gravitar
H.E.R.O.
Ice Hockey
James Bond
Kangaroo
Krull
Kung-Fu Master
Montezuma’s Revenge
Ms. Pac-Man
Name This Game
Phoenix
Pitfall!
Pong
Private Eye
Q*Bert
River Raid
Road Runner
Robotank
Seaquest
Skiing
Solaris
Space Invaders
Star Gunner
Surround
Tennis
Time Pilot
Tutankham
Up and Down
Venture
Video Pinball
Wizard Of Wor
Yars Revenge
Zaxxon
NO. ACTIONS
18
10
7
9
14
4
18
18
9
18
6
18
4
18
18
9
18
6
18
9
18
3
18
8
18
18
18
18
18
18
14
18
9
6
8
18
3
18
6
18
18
18
18
3
18
6
18
5
18
10
8
6
18
9
10
18
18
R ANDOM
227.8
5.8
222.4
210.0
719.1
12,850.0
14.2
2,360.0
363.9
123.7
23.1
0.1
1.7
2,090.9
811.0
10,780.5
2,874.5
152.1
-18.6
0.0
-91.7
0.0
65.2
257.6
173.0
1,027.0
-11.2
29.0
52.0
1,598.0
258.5
0.0
307.3
2,292.3
761.4
-229.4
-20.7
24.9
163.9
1,338.5
11.5
2.2
68.4
-17,098.1
1,236.3
148.0
664.0
-10.0
-23.8
3,568.0
11.4
533.4
0.0
16,256.9
563.5
3,092.9
32.5
HUMAN
7,127.7
1,719.5
742.0
8,503.3
47,388.7
29,028.1
753.1
37,187.5
16,926.5
2,630.4
160.7
12.1
30.5
12,017.0
7,387.8
35,829.4
18,688.9
1,971.0
-16.4
860.5
-38.7
29.6
4,334.7
2,412.5
3,351.4
30,826.4
0.9
302.8
3,035.0
2,665.5
22,736.3
4,753.3
6,951.6
8,049.0
7,242.6
6,463.7
14.6
69,571.3
13,455.0
17,118.0
7,845.0
11.9
42,054.7
-4,336.9
12,326.7
1,668.7
10,250.0
6.5
-8.3
5,229.2
167.6
11,693.2
1,187.5
17,667.9
4,756.5
54,576.9
9,173.3
DQN
1,620.0
978.0
4,280.4
4,359.0
1,364.5
279,987.0
455.0
29,900.0
8,627.5
585.6
50.4
88.0
385.5
4,657.7
6,126.0
110,763.0
23,633.0
12,149.4
-6.6
729.0
-4.9
30.8
797.4
8,777.4
473.0
20,437.8
-1.9
768.5
7,259.0
8,422.3
26,059.0
0.0
3,085.6
8,207.8
8,485.2
-286.1
19.5
146.7
13,117.3
7,377.6
39,544.0
63.9
5,860.6
-13,062.3
3,482.8
1,692.3
54,282.0
-5.6
12.2
4,870.0
68.1
9,989.9
163.0
196,760.4
2,704.0
18,098.9
5,363.0
DDQN
3,747.7
1,793.3
5,393.2
17,356.5
734.7
106,056.0
1,030.6
31,700.0
13,772.8
1,225.4
68.1
91.6
418.5
5,409.4
5,809.0
117,282.0
35,338.5
58,044.2
-5.5
1,211.8
15.5
33.3
1,683.3
14,840.8
412.0
20,130.2
-2.7
1,358.0
12,992.0
7,920.5
29,710.0
0.0
2,711.4
10,616.0
12,252.5
-29.9
20.9
129.7
15,088.5
14,884.5
44,127.0
65.1
16,452.7
-9,021.8
3,067.8
2,525.5
60,142.0
-2.9
-22.8
8,339.0
218.4
22,972.2
98.0
309,941.9
7,492.0
11,712.6
10,163.0
DUEL
4,461.4
2,354.5
4,621.0
28,188.0
2,837.7
382,572.0
1,611.9
37,150.0
12,164.0
1,472.6
65.5
99.4
345.3
7,561.4
11,215.0
143,570.0
42,214.0
60,813.3
0.1
2,258.2
46.4
0.0
4,672.8
15,718.4
588.0
20,818.2
0.5
1,312.5
14,854.0
11,451.9
34,294.0
0.0
6,283.5
11,971.1
23,092.2
0.0
21.0
103.0
19,220.3
21,162.6
69,524.0
65.3
50,254.2
-8,857.4
2,250.8
6,427.3
89,238.0
4.4
5.1
11,666.0
211.4
44,939.6
497.0
98,209.5
7,855.0
49,622.1
12,944.0
P RIOR .
4,203.8
1,838.9
7,672.1
31,527.0
2,654.3
357,324.0
1,054.6
31,530.0
23,384.2
1,305.6
47.9
95.6
373.9
4,463.2
8,600.0
141,161.0
31,286.5
71,846.4
18.5
2,093.0
39.5
33.7
4,380.1
32,487.2
548.5
23,037.7
1.3
5,148.0
16,200.0
9,728.0
39,581.0
0.0
6,518.7
12,270.5
18,992.7
-356.5
20.6
200.0
16,256.5
14,522.3
57,608.0
62.6
26,357.8
-9,996.9
4,309.0
2,865.8
63,302.0
8.9
0.0
9,197.0
204.6
16,154.1
54.0
282,007.3
4,802.0
11,357.0
10,469.0
PRIOR. DUEL.
3,941.0
2,296.8
11,477.0
375,080.0
1,192.7
395,762.0
1,503.1
35,520.0
30,276.5
3,409.0
46.7
98.9
366.0
7,687.5
13,185.0
162,224.0
41,324.5
72,878.6
-12.5
2,306.4
41.3
33.0
7,413.0
104,368.2
238.0
21,036.5
-0.4
812.0
1,792.0
10,374.4
48,375.0
0.0
3,327.3
15,572.5
70,324.3
0.0
20.9
206.0
18,760.3
20,607.6
62,151.0
27.5
931.6
-19,949.9
133.4
15,311.5
125,117.0
1.2
0.0
7,553.0
245.9
33,879.1
48.0
479,197.0
12,352.0
69,618.1
13,886.0"""
wang_table_3 = """GAMES
Alien
Amidar
Assault
Asterix
Asteroids
Atlantis
Bank Heist
Battle Zone
Beam Rider
Berzerk
Bowling
Boxing
Breakout
Centipede
Chopper Command
Crazy Climber
Defender
Demon Attack
Double Dunk
Enduro
Fishing Derby
Freeway
Frostbite
Gopher
Gravitar
H.E.R.O.
Ice Hockey
James Bond
Kangaroo
Krull
Kung-Fu Master
Montezuma’s Revenge
Ms. Pac-Man
Name This Game
Phoenix
Pitfall!
Pong
Private Eye
Q*Bert
River Raid
Road Runner
Robotank
Seaquest
Skiing
Solaris
Space Invaders
Star Gunner
Surround
Tennis
Time Pilot
Tutankham
Up and Down
Venture
Video Pinball
Wizard Of Wor
Yars Revenge
Zaxxon
N O . A CTIONS
18
10
7
9
14
4
18
18
9
18
6
18
4
18
18
9
18
6
18
9
18
3
18
8
18
18
18
18
18
18
14
18
9
6
8
18
3
18
6
18
18
18
18
3
18
6
18
5
18
10
8
6
18
9
10
18
18
R ANDOM
128.3
11.8
166.9
164.5
871.3
13,463.0
21.7
3,560.0
254.6
196.1
35.2
-1.5
1.6
1,925.5
644.0
9,337.0
1,965.5
208.3
-16.0
-81.8
-77.1
0.1
66.4
250.0
245.5
1,580.3
-9.7
33.5
100.0
1,151.9
304.0
25.0
197.8
1,747.8
1,134.4
-348.8
-18.0
662.8
183.0
588.3
200.0
2.4
215.5
-15,287.4
2,047.2
182.6
697.0
-9.7
-21.4
3,273.0
12.7
707.2
18.0
20,452.0
804.0
1,476.9
475.0
HUMAN
6,371.3
1,540.4
628.9
7,536.0
36,517.3
26,575.0
644.5
33,030.0
14,961.0
2,237.5
146.5
9.6
27.9
10,321.9
8,930.0
32,667.0
14,296.0
3,442.8
-14.4
740.2
5.1
25.6
4,202.8
2,311.0
3,116.0
25,839.4
0.5
368.5
2,739.0
2,109.1
20,786.8
4,182.0
15,375.0
6,796.0
6,686.2
5,998.9
15.5
64,169.1
12,085.0
14,382.2
6,878.0
8.9
40,425.8
-3,686.6
11,032.6
1,464.9
9,528.0
5.4
-6.7
5,650.0
138.3
9,896.1
1,039.0
15,641.1
4,556.0
47,135.2
8,443.0
DQN
634.0
178.4
3,489.3
3,170.5
1,458.7
292,491.0
312.7
23,750.0
9,743.2
493.4
56.5
70.3
354.5
3,973.9
5,017.0
98,128.0
15,917.5
12,550.7
-6.0
626.7
-1.6
26.9
496.1
8,190.4
298.0
14,992.9
-1.6
697.5
4,496.0
6,206.0
20,882.0
47.0
1,092.3
6,738.8
7,484.8
-113.2
18.0
207.9
9,271.5
4,748.5
35,215.0
58.7
4,216.7
-12,142.1
1,295.4
1,293.8
52,970.0
-6.0
11.1
4,786.0
45.6
8,038.5
136.0
154,414.1
1,609.0
4,577.5
4,412.0
DDQN
1,033.4
169.1
6,060.8
16,837.0
1,193.2
319,688.0
886.0
24,740.0
17,417.2
1,011.1
69.6
73.5
368.9
3,853.5
3,495.0
113,782.0
27,510.0
69,803.4
-0.3
1,216.6
3.2
28.8
1,448.1
15,253.0
200.5
14,892.5
-2.5
573.0
11,204.0
6,796.1
30,207.0
42.0
1,241.3
8,960.3
12,366.5
-186.7
19.1
-575.5
11,020.8
10,838.4
43,156.0
59.1
14,498.0
-11,490.4
810.0
2,628.7
58,365.0
1.9
-7.8
6,608.0
92.2
19,086.9
21.0
367,823.7
6,201.0
6,270.6
8,593.0
D UEL
1,486.5
172.7
3,994.8
15,840.0
2,035.4
445,360.0
1,129.3
31,320.0
14,591.3
910.6
65.7
77.3
411.6
4,881.0
3,784.0
124,566.0
33,996.0
56,322.8
-0.8
2,077.4
-4.1
0.2
2,332.4
20,051.4
297.0
15,207.9
-1.3
835.5
10,334.0
8,051.6
24,288.0
22.0
2,250.6
11,185.1
20,410.5
-46.9
18.8
292.6
14,175.8
16,569.4
58,549.0
62.0
37,361.6
-11,928.0
1,768.4
5,993.1
90,804.0
4.0
4.4
6,601.0
48.0
24,759.2
200.0
110,976.2
7,054.0
25,976.5
10,164.0
P RIOR .
1,334.7
129.1
6,548.9
22,484.5
1,745.1
330,647.0
876.6
25,520.0
31,181.3
865.9
52.0
72.3
343.0
3,489.1
4,635.0
127,512.0
23,666.5
61,277.5
16.0
1,831.0
9.8
28.9
3,510.0
34,858.8
269.5
20,889.9
-0.2
3,961.0
12,185.0
6,872.8
31,676.0
51.0
1,865.9
10,497.6
16,903.6
-427.0
18.9
670.7
9,944.0
11,807.2
52,264.0
56.2
25,463.7
-10,169.1
2,272.8
3,912.1
61,582.0
5.9
-5.3
5,963.0
56.9
12,157.4
94.0
295,972.8
5,727.0
4,687.4
9,474.0
P RIOR . D UEL .
823.7
238.4
10,950.6
364,200.0
1,021.9
423,252.0
1,004.6
30,650.0
37,412.2
2,178.6
50.4
79.2
354.6
5,570.2
8,058.0
127,853.0
34,415.0
73,371.3
-10.7
2,223.9
17.0
28.2
4,038.4
105,148.4
167.0
15,459.2
0.5
585.0
861.0
7,658.6
37,484.0
24.0
1,007.8
13,637.9
63,597.0
-243.6
18.4
1,277.6
14,063.0
16,496.8
54,630.0
24.7
1,431.2
-18,955.8
280.6
8,978.0
127,073.0
-0.2
-13.2
4,871.0
108.6
22,681.3
29.0
447,408.6
10,471.0
58,145.9
11,320.0"""
# Absorb the data from https://arxiv.org/abs/1703.03864v1
# Copy and paste from Table 3:
es_table3 = """Game
Alien
Amidar
Assault
Asterix
Asteroids
Atlantis
Bank Heist
Battle Zone
Beam Rider
Berzerk
Bowling
Boxing
Breakout
Centipede
Chopper Command
Crazy Climber
Demon Attack
Double Dunk
Enduro
Fishing Derby
Freeway
Frostbite
Gopher
Gravitar
Ice Hockey
Kangaroo
Krull
Montezumas Revenge
Name This Game
Phoenix
Pit Fall
Pong
Private Eye
Q Bert
River Raid
Road Runner
Robotank
Seaquest
Skiing
Solaris
Space Invaders
Star Gunner
Tennis
Time Pilot
Tutankham
Up and Down
Venture
Video Pinball
Wizard of Wor
Yars Revenge
Zaxxon
DQN
570.2
133.4
3332.3
124.5
697.1
76108.0
176.3
17560.0
8672.4
NaN
41.2
25.8
303.9
3773.1
3046.0
50992.0
12835.2
-21.6
475.6
-2.3
25.8
157.4
2731.8
216.5
-3.8
2696.0
3864.0
50.0
5439.9
NaN
NaN
16.2
298.2
4589.8
4065.3
9264.0
58.5
2793.9
NaN
NaN
1449.7
34081.0
-2.3
5640.0
32.4
3311.3
54.0
20228.1
246.0
NaN
831.0
A3C FF, 1 day
182.1
283.9
3746.1
6723.0
3009.4
772392.0
946.0
11340.0
13235.9
1433.4
36.2
33.7
551.6
3306.5
4669.0
101624.0
84997.5
0.1
-82.2
13.6
0.1
180.1
8442.8
269.5
-4.7
106.0
8066.6
53.0
5614.0
28181.8
-123.0
11.4
194.4
13752.3
10001.2
31769.0
2.3
2300.2
-13700.0
1884.8
2214.7
64393.0
-10.2
5825.0
26.1
54525.4
19.0
185852.6
5278.0
7270.8
2659.0
ES FF, 1 hour
994.0
112.0
1673.9
1440.0
1562.0
1267410.0
225.0
16600.0
744.0
686.0
30.0
49.8
9.5
7783.9
3710.0
26430.0
1166.5
0.2
95.0
-49.0
31.0
370.0
582.0
805.0
-4.1
11200.0
8647.2
0.0
4503.0
4041.0
0.0
21.0
100.0
147.5
5009.0
16590.0
11.9
1390.0
-15442.5
2090.0
678.5
1470.0
-4.5
4970.0
130.3
67974.0
760.0
22834.8
3480.0
16401.7
6380.0"""
bellemare_figure_14 = """GAMES
Alien
Amidar
Assault
Asterix
Asteroids
Atlantis
Bank Heist
Battle Zone
Beam Rider
Berzerk
Bowling
Boxing
Breakout
Centipede
Chopper Command
Crazy Climber
Defender
Demon Attack
Double Dunk
Enduro
Fishing Derby
Freeway
Frostbite
Gopher
Gravitar
H.E.R.O.
Ice Hockey
James Bond
Kangaroo
Krull
Kung-Fu Master
Montezuma’s Revenge
Ms. Pac-Man
Name This Game
Phoenix
Pitfall!
Pong
Private Eye
Q*Bert
River Raid
Road Runner
Robotank
Seaquest
Skiing
Solaris
Space Invaders
Star Gunner
Surround
Tennis
Time Pilot
Tutankham
Up and Down
Venture
Video Pinball
Wizard Of Wor
Yars’ Revenge
Zaxxon
RANDOM
227.8
5.8
222.4
210.0
719.1
12,850.0
14.2
2,360.0
363.9
123.7
23.1
0.1
1.7
2,090.9
811.0
10,780.5
2,874.5
152.1
-18.6
0.0
-91.7
0.0
65.2
257.6
173.0
1,027.0
-11.2
29.0
52.0
1,598.0
258.5
0.0
307.3
2,292.3
761.4
-229.4
-20.7
24.9
163.9
1,338.5
11.5
2.2
68.4
-17,098.1
1,236.3
148.0
664.0
-10.0
-23.8
3,568.0
11.4
533.4
0.0
16,256.9
563.5
3,092.9
32.5
HUMAN
7,127.7
1,719.5
742.0
8,503.3
47,388.7
29,028.1
753.1
37,187.5
16,926.5
2,630.4
160.7
12.1
30.5
12,017.0
7,387.8
35,829.4
18,688.9
1,971.0
-16.4
860.5
-38.7
29.6
4,334.7
2,412.5
3,351.4
30,826.4
0.9
302.8
3,035.0
2,665.5
22,736.3
4,753.3
6,951.6
8,049.0
7,242.6
6,463.7
14.6
69,571.3
13,455.0
17,118.0
7,845.0
11.9
42,054.7
-4,336.9
12,326.7
1,668.7
10,250.0
6.5
-8.3
5,229.2
167.6
11,693.2
1,187.5
17,667.9
4,756.5
54,576.9
9,173.3
DQN
1,620.0
978.0
4,280.4
4,359.0
1,364.5
279,987.0
455.0
29,900.0
8,627.5
585.6
50.4
88.0
385.5
4,657.7
6,126.0
110,763.0
23,633.0
12,149.4
-6.6
729.0
-4.9
30.8
797.4
8,777.4
473.0
20,437.8
-1.9
768.5
7,259.0
8,422.3
26,059.0
0.0
3,085.6
8,207.8
8,485.2
-286.1
19.5
146.7
13,117.3
7,377.6
39,544.0
63.9
5,860.6
-13,062.3
3,482.8
1,692.3
54,282.0
-5.6
12.2
4,870.0
68.1
9,989.9
163.0
196,760.4
2,704.0
18,098.9
5,363.0
DDQN
3,747.7
1,793.3
5,393.2
17,356.5
734.7
106,056.0
1,030.6
31,700.0
13,772.8
1,225.4
68.1
91.6
418.5
5,409.4
5,809.0
117,282.0
35,338.5
58,044.2
-5.5
1,211.8
15.5
33.3
1,683.3
14,840.8
412.0
20,130.2
-2.7
1,358.0
12,992.0
7,920.5
29,710.0
0.0
2,711.4
10,616.0
12,252.5
-29.9
20.9
129.7
15,088.5
14,884.5
44,127.0
65.1
16,452.7
-9,021.8
3,067.8
2,525.5
60,142.0
-2.9
-22.8
8,339.0
218.4
22,972.2
98.0
309,941.9
7,492.0
11,712.6
10,163.0
DUEL
4,461.4
2,354.5
4,621.0
28,188.0
2,837.7
382,572.0
1,611.9
37,150.0
12,164.0
1,472.6
65.5
99.4
345.3
7,561.4
11,215.0
143,570.0
42,214.0
60,813.3
0.1
2,258.2
46.4
0.0
4,672.8
15,718.4
588.0
20,818.2
0.5
1,312.5
14,854.0
11,451.9
34,294.0
0.0
6,283.5
11,971.1
23,092.2
0.0
21.0
103.0
19,220.3
21,162.6
69,524.0
65.3
50,254.2
-8,857.4
2,250.8
6,427.3
89,238.0
4.4
5.1
11,666.0
211.4
44,939.6
497.0
98,209.5
7,855.0
49,622.1
12,944.0
PRIOR. DUEL.
3,941.0
2,296.8
11,477.0
375,080.0
1,192.7
395,762.0
1,503.1
35,520.0
30,276.5
3,409.0
46.7
98.9
366.0
7,687.5
13,185.0
162,224.0
41,324.5
72,878.6
-12.5
2,306.4
41.3
33.0
7,413.0
104,368.2
238.0
21,036.5
-0.4
812.0
1,792.0
10,374.4
48,375.0
0.0
3,327.3
15,572.5
70,324.3
0.0
20.9
206.0
18,760.3
20,607.6
62,151.0
27.5
931.6
-19,949.9
133.4
15,311.5
125,117.0
1.2
0.0
7,553.0
245.9
33,879.1
48.0
479,197.0
12,352.0
69,618.1
13,886.0
C51
3,166
1,735
7,203
406,211
1,516
841,075
976
28,742
14,074
1,645
81.8
97.8
748
9,646
15,600
179,877
47,092
130,955
2.5
3,454
8.9
33.9
3,965
33,641
440
38,874
-3.5
1,909
12,853
9,735
48,192
0.0
3,415
12,542
17,490
0.0
20.9
15,095
23,784
17,322
55,839
52.3
266,434
-13,901
8,342
5,747
49,095
6.8
23.1
8,329
280
15,612
1,520
949,604
9,300
35,050
10,513"""
mnih_2013_table_1 = """Random
Sarsa [3]
Contingency [4]
DQN
Human
HNeat Best [8]
HNeat Pixel [8]
DQN Best
B. Rider Breakout Enduro Pong Q*bert Seaquest S. Invaders
354
996
1743
4092
7456
3616
1332
5184
1.2
5.2
6
168
31
52
4
225
0
129
159
470
368
106
91
661
−20.4
−19
−17
20
−3
19
−16
21
157
614
960
1952
18900
1800
1325
4500
110
665
723
1705
28010
920
800
1740
179
271
268
581
3690
1720
1145
1075"""
van_hasselt_2016_table1 = """Game
Alien
Amidar
Assault
Asterix
Asteroids
Atlantis
Bank Heist
Battle Zone
Beam Rider
Berzerk
Bowling
Boxing
Breakout
Centipede
Chopper Command
Crazy Climber
Defender
Demon Attack
Double Dunk
Enduro
Fishing Derby
Freeway
Frostbite
Gopher
Gravitar
H.E.R.O.
Ice Hockey
James Bond
Kangaroo
Krull
Kung-Fu Master
Montezuma’s Revenge
Ms. Pacman
Name This Game
Phoenix
Pitfall
Pong
Private Eye
Q*Bert
River Raid
Road Runner
Robotank
Seaquest
Skiing
Solaris
Space Invaders
Star Gunner
Surround
Tennis
Time Pilot
Tutankham
Up and Down
Venture
Video Pinball
Wizard of Wor
Yars Revenge
Zaxxon
Random
227.80
5.80
222.40
210.00
719.10
12850.00
14.20
2360.00
363.90
123.70
23.10
0.10
1.70
2090.90
811.00
10780.50
2874.50
152.10
−18.60
0.00
−91.70
0.00
65.20
257.60
173.00
1027.00
−11.20
29.00
52.00
1598.00
258.50
0.00
307.30
2292.30
761.40
−229.40
−20.70
24.90
163.90
1338.50
11.50
2.20
68.40
−17098.10
1236.30
148.00
664.00
−10.00
−23.80
3568.00
11.40
533.40
0.00
16256.90
563.50
3092.90
32.50
Human
7127.70
1719.50
742.00
8503.30
47388.70
29028.10
753.10
37187.50
16926.50
2630.40
160.70
12.10
30.50
12017.00
7387.80
35829.40
18688.90
1971.00
−16.40
860.50
−38.70
29.60
4334.70
2412.50
3351.40
30826.40
0.90
302.80
3035.00
2665.50
22736.30
4753.30
6951.60
8049.00
7242.60
6463.70
14.60
69571.30
13455.00
17118.00
7845.00
11.90
42054.70
−4336.90
12326.70
1668.70
10250.00
6.50
−8.30
5229.20
167.60
11693.20
1187.50
17667.90
4756.50
54576.90
9173.30
Double DQN
3747.70
1793.30
5393.20
17356.50
734.70
106056.00
1030.60
31700.00
13772.80
1225.40
68.10
91.60
418.50
5409.40
5809.00
117282.00
35338.50
58044.20
−5.50
1211.80
15.50
33.30
1683.30
14840.80
412.00
20130.20
−2.70
1358.00
12992.00
7920.50
29710.00
0.00
2711.40
10616.00
12252.50
−29.90
20.90
129.70
15088.50
14884.50
44127.00
65.10
16452.70
−9021.80
3067.80
2525.50
60142.00
−2.90
−22.80
8339.00
218.40
22972.20
98.00
309941.90
7492.00
11712.60
10163.00
Double DQN with Pop-Art
3213.50
782.50
9011.60
18919.50
2869.30
340076.00
1103.30
8220.00
8299.40
1199.60
102.10
99.30
344.10
49065.80
775.00
119679.00
11099.00
63644.90
−11.50
2002.10
45.10
33.40
3469.60
56218.20
483.50
14225.20
−4.10
507.50
13150.00
9745.10
34393.00
0.00
4963.80
15851.20
6202.50
−2.60
20.60
286.70
5236.80
12530.80
47770.00
64.30
10932.30
−13585.10
4544.80
2589.70
589.00
−2.50
12.10
4870.00
183.90
22474.40
1172.00
56287.00
483.00
21409.50
14402.00"""
# COLUMN-ORIENTED processing
remove_re = re.compile(r"['’!\.]")
underscore_re = re.compile(r"[ \-\*]")
def game_metric_name(s):
"Calculate the name of the Metric() object from a game's name"
name = s.strip().lower()
name = remove_re.sub("", name)
name = underscore_re.sub("_", name)
name = name.replace("pac_man", "pacman") # the papers are inconsistent; "Pac-Man" is most correct but pacman most pythonic
return name + "_metric"
verb = False # Set to True for debugging
TSIZE = 57 # Number of games reported in the more recent papers
def get_game_metric(metric_name, human_name, target, target_source):
"""Get a reference to the metric object for a game, creating it if necessary."""
metric = globals().get(metric_name, None)
if not metric:
if verb: print("Creating metric for", human_name, "target: " + str(target) if target else "")
metric = simple_games.metric("Atari 2600 " + human_name, target=target,
scale=atari_linear, target_source=target_source)
globals()[metric_name] = metric
return metric
def get_column(raw, n, size=TSIZE):
assert isinstance(raw, list), "Not a list: {0}".format(type(raw))
start_pos = n * (size + 1) # Size + headers
name = raw[start_pos]
data = raw[start_pos + 1:start_pos + size + 1]
return name, data
def ingest_column(src, n, paper_url, alg=None, extras={}, size=TSIZE):
algorithm, data = get_column(src, n, size=size)
_, games = get_column(src, 0, size=size)
if verb and algorithm.lower() not in alg.lower():
print(u"# {0} not in {1}".format(algorithm, alg))
for i, score in enumerate(data):
# Maybe someone should fix Python's float() function...
score = float(score.replace(",", "").replace('\xe2\x88\x92', "-").replace("−", "-"))
game = game_metric_name(games[i])
metric = get_game_metric(game, games[i], targets[i], "https://arxiv.org/abs/1509.06461")
if verb: print(u'{0}.measure(None, {1}, "{2}", url="{3}"{4})'.format(game, score, alg, paper_url, extras if extras else ""))
metric.measure(None, score, alg, url=paper_url, **extras)
noop_data = wang_table_2.split("\n")
human_start_data = wang_table_3.split("\n")
es_data = es_table3.split("\n")
distributional_data = bellemare_figure_14.split("\n")
early_data = mnih_2013_table_1.split("\n")
pop_art_data = van_hasselt_2016_table1.split("\n")
# Weirdly, the noop start human performance is consistently better than the human start human performance data
# Is this because it's newer and at a higher standard? Or because the recorded human starts consistently hamper strong
# human play?
_, human_noop = get_column(noop_data, 3)
human_noop = [float(score.replace(",", "")) for score in human_noop]
_, human_human = get_column(human_start_data, 3)
human_human = [float(score.replace(",", "")) for score in human_human]
targets = [max(scores) for scores in zip(human_noop, human_human)]
#ingest_column(early_data, 2, "https://arxiv.org/abs/1312.5602", u"SARSA(λ)",
# {"algorithm_src_url": "https://arxiv.org/abs/1207.4708v1"}, size=7)
ingest_column(es_data, 3, "https://arxiv.org/abs/1703.03864v1", "ES FF (1 hour) noop", size=51)
ingest_column(distributional_data, 7, "https://arxiv.org/abs/1707.06887v1", "C51 noop")
ingest_column(pop_art_data, 4, "https://arxiv.org/abs/1602.07714v1", "DDQN+Pop-Art noop")
ingest_column(noop_data, 4, "https://arxiv.org/abs/1509.06461v1", "DQN noop",
{"algorithm_src_url": "https://web.stanford.edu/class/psych209/Readings/MnihEtAlHassibis15NatureControlDeepRL.pdf",
"min_date": date(2015, 2, 26)})
ingest_column(human_start_data, 4, "https://arxiv.org/abs/1509.06461v1", "DQN hs",
{"algorithm_src_url": "https://web.stanford.edu/class/psych209/Readings/MnihEtAlHassibis15NatureControlDeepRL.pdf",
"min_date": date(2015, 2, 26)})
# v1 of the DDQN paper reported only "untuned" results
# TODO import those "untuned" results. May require OCR due to missing table columns...
# v3 of that paper added "tuned" results for the hs condition
# However this (presumably tuned) DDQN noop data is included by Wang et al but seems NOT to be in the DDQN paper.
# Conjecture, did Wang et al first report it?
ingest_column(noop_data, 5, "https://arxiv.org/abs/1511.06581v1", "DDQN (tuned) noop",
{"algorithm_src_url": "https://arxiv.org/abs/1509.06461v3"})
ingest_column(human_start_data, 5, "https://arxiv.org/abs/1509.06461v3", alg="DDQN (tuned) hs")
ingest_column(noop_data, 6, "https://arxiv.org/abs/1511.06581v1", alg="Duel noop")
ingest_column(human_start_data, 6, "https://arxiv.org/abs/1511.06581v1", alg="Duel hs")
ingest_column(noop_data, 7, "https://arxiv.org/abs/1511.05952", alg="Prior noop")
ingest_column(human_start_data, 7, "https://arxiv.org/abs/1511.05952", alg="Prior hs")
ingest_column(noop_data, 8, "https://arxiv.org/abs/1511.06581v3", "Prior+Duel noop",
{"algorithm_src_url":"https://arxiv.org/abs/1511.05952"})
ingest_column(human_start_data, 8, "https://arxiv.org/abs/1509.06461v3", "Prior+Duel hs",
{"algorithm_src_url":"https://arxiv.org/abs/1511.05952"})
# ROW-ORIENTED DATA
# The row parsers are harder and need to be customized per table, essentially
# OCR output:
mnih_extended_table_2 = """Game Random Play Best Linear Learner Contingency (SARSA) Human DQN (± std) Normalized DQN (% Human)
Alien 227.8 939.2 103.2 6875 3069 (±1093) 42.7%
Amidar 5.8 103.4 183.6 1676 739.5 (±3024) 43.9%
Assault 222.4 628 537 1496 3359 (±775) 246.2%
Asterix 210 987.3 1332 8503 6012 (±1744) 70.0%
Asteroids 719.1 907.3 89 13157 1629 (±542) 7.3%
Atlantis 12850 62687 852.9 29028 85641 (±17600) 449.9%
Bank Heist 14.2 190.8 67.4 734.4 429.7 (±650) 57.7%
Battle Zone 2360 15820 16.2 37800 26300 (±7725) 67.6%
Beam Rider 363.9 929.4 1743 5775 6846 (±1619) 119.8%
Bowling 23.1 43.9 36.4 154.8 42.4 (±88) 14.7%
Boxing 0.1 44 9.8 4.3 71.8 (±8.4) 1707.9%
Breakout 1.7 5.2 6.1 31.8 401.2 (±26.9) 1327.2%
Centipede 2091 8803 4647 11963 8309 (±5237) 63.0%
Chopper Command 811 1582 16.9 9882 6687 (±2916) 64.8%
Crazy Climber 10781 23411 149.8 35411 114103 (±22797) 419.5%
Demon Attack 152.1 520.5 0 3401 9711 (±2406) 294.2%
Double Dunk -18.6 -13.1 -16 -15.5 -18.1 (±2.6) 17.1%
Enduro 0 129.1 159.4 309.6 301.8 (±24.6) 97.5%
Fishing Derby -91.7 -89.5 -85.1 5.5 -0.8 (±19.0) 93.5%
Freeway 0 19.1 19.7 29.6 30.3 (±0.7) 102.4%
Frostbite 65.2 216.9 180.9 4335 328.3 (±250.5) 6.2%
Gopher 257.6 1288 2368 2321 8520 (±3279) 400.4%
Gravitar 173 387.7 429 2672 306.7 (±223.9) 5.3%
H.E.R.O. 1027 6459 7295 25763 19950 (±158) 76.5%
Ice Hockey -11.2 -9.5 -3.2 0.9 -1.6 (±2.5) 79.3%
James Bond 29 202.8 354.1 406.7 576.7 (±175.5) 145.0%
Kangaroo 52 1622 8.8 3035 6740 (±2959) 224.2%
Krull 1598 3372 3341 2395 3805 (±1033) 277.0%
Kung-Fu Master 258.5 19544 29151 22736 23270 (±5955) 102.4%
Montezuma's Revenge 0 10.7 259 4367 0 (±0) 0.0%
Ms. Pacman 307.3 1692 1227 15693 2311 (±525) 13.0%
Name This Game 2292 2500 2247 4076 7257 (±547) 278.3%
Pong -20.7 -19 -17.4 9.3 18.9 (±1.3) 132.0%
Private Eye 24.9 684.3 86 69571 1788 (±5473) 2.5%
Q*Bert 163.9 613.5 960.3 13455 10596 (±3294) 78.5%
River Raid 1339 1904 2650 13513 8316 (±1049) 57.3%
Road Runner 11.5 67.7 89.1 7845 18257 (±4268) 232.9%
Robotank 2.2 28.7 12.4 11.9 51.6 (±4.7) 509.0%
Seaquest 68.4 664.8 675.5 20182 5286 (±1310) 25.9%
Space Invaders 148 250.1 267.9 1652 1976 (±893) 121.5%
Star Gunner 664 1070 9.4 10250 57997 (±3152) 598.1%
Tennis -23.8 -0.1 0 -8.9 -2.5 (±1.9) 143.2%
Time Pilot 3568 3741 24.9 5925 5947 (±1600) 100.9%
Tutankham 11.4 114.3 98.2 167.6 186.7 (±41.9) 112.2%
Up and Down 533.4 3533 2449 9082 8456 (±3162) 92.7%
Venture 0 66 0.6 1188 380 (±238.6) 32.0%
Video Pinball 16257 16871 19761 17298 42684 (±16287) 2539.4%
Wizard of Wor 563.5 1981 36.9 4757 3393 (±2019) 67.5%
Zaxxon 32.5 3365 21.4 9173 4977 (±1235) 54.1%"""
nature_rows = mnih_extended_table_2.split("\n")[1:]
name_re = re.compile(r'[^0-9\t]+')
for row in nature_rows:
match = name_re.match(row)
game = game_metric_name(match.group(0))
rest = name_re.sub("", row, 1)
cols = rest.split()
random, bll, sarsa, human, dqn, dqn_err, norm = cols
dqn, sarsa, bll = float(dqn), float(sarsa), float(bll)
dqn_err = float(re.search("[0-9]+", dqn_err).group(0))
globals()[game].measure(date(2015, 2, 26), dqn, 'Nature DQN',
url='https://web.stanford.edu/class/psych209/Readings/MnihEtAlHassibis15NatureControlDeepRL.pdf',
papername="Human-level control through deep reinforcement learning",
uncertainty=dqn_err)
# The Mnih et al. Nature paper attributes these SARSA results to this paper,
# but the paper doesn't actually seem to include them?
globals()[game].measure(date(2012, 7, 14), sarsa, 'SARSA',
url='https://www.aaai.org/ocs/index.php/AAAI/AAAI12/paper/view/5162',
papername="Investigating Contingency Awareness Using Atari 2600 Games")
globals()[game].measure(None, bll, 'Best linear',
url='https://arxiv.org/abs/1207.4708v1')
if verb:
print("{0}.measure(None, {1}, 'Nature DQN', papername='Human-level control through deep reinforcement learning' "
"url='https://web.stanford.edu/class/psych209/Readings/MnihEtAlHassibis15NatureControlDeepRL.pdf'"
", uncertainty={2})".format(game, dqn, dqn_err))
a3c_table_s3 = """Game DQN Gorila Double Dueling Prioritized A3C FF, 1 day A3C FF A3C LSTM
Alien 570.2 813.5 1033.4 1486.5 900.5 182.1 518.4 945.3
Amidar 133.4 189.2 169.1 172.7 218.4 283.9 263.9 173.0
Assault 3332.3 1195.8 6060.8 3994.8 7748.5 3746.1 5474.9 14497.9
Asterix 124.5 3324.7 16837.0 15840.0 31907.5 6723.0 22140.5 17244.5
Asteroids 697.1 933.6 1193.2 2035.4 1654.0 3009.4 4474.5 5093.1
Atlantis 76108.0 629166.5 319688.0 445360.0 593642.0 772392.0 911091.0 875822.0
Bank Heist 176.3 399.4 886.0 1129.3 816.8 946.0 970.1 932.8
Battle Zone 17560.0 19938.0 24740.0 31320.0 29100.0 11340.0 12950.0 20760.0
Beam Rider 8672.4 3822.1 17417.2 14591.3 26172.7 13235.9 22707.9 24622.2
Berzerk 1011.1 910.6 1165.6 1433.4 817.9 862.2
Bowling 41.2 54.0 69.6 65.7 65.8 36.2 35.1 41.8
Boxing 25.8 74.2 73.5 77.3 68.6 33.7 59.8 37.3
Breakout 303.9 313.0 368.9 411.6 371.6 551.6 681.9 766.8
Centipede 3773.1 6296.9 3853.5 4881.0 3421.9 3306.5 3755.8 1997.0
Chopper Command 3046.0 3191.8 3495.0 3784.0 6604.0 4669.0 7021.0 10150.0
Crazy Climber 50992.0 65451.0 113782.0 124566.0 131086.0 101624.0 112646.0 138518.0
Defender 27510.0 33996.0 21093.5 36242.5 56533.0 233021.5
Demon Attack 12835.2 14880.1 69803.4 56322.8 73185.8 84997.5 113308.4 115201.9
Double Dunk -21.6 -11.3 -0.3 -0.8 2.7 0.1 -0.1 0.1
Enduro 475.6 71.0 1216.6 2077.4 1884.4 -82.2 -82.5 -82.5
Fishing Derby -2.3 4.6 3.2 -4.1 9.2 13.6 18.8 22.6
Freeway 25.8 10.2 28.8 0.2 27.9 0.1 0.1 0.1
Frostbite 157.4 426.6 1448.1 2332.4 2930.2 180.1 190.5 197.6
Gopher 2731.8 4373.0 15253.0 20051.4 57783.8 8442.8 10022.8 17106.8
Gravitar 216.5 538.4 200.5 297.0 218.0 269.5 303.5 320.0
H.E.R.O. 12952.5 8963.4 14892.5 15207.9 20506.4 28765.8 32464.1 28889.5
Ice Hockey -3.8 -1.7 -2.5 -1.3 -1.0 -4.7 -2.8 -1.7
James Bond 348.5 444.0 573.0 835.5 3511.5 351.5 541.0 613.0
Kangaroo 2696.0 1431.0 11204.0 10334.0 10241.0 106.0 94.0 125.0
Krull 3864.0 6363.1 6796.1 8051.6 7406.5 8066.6 5560.0 5911.4
Kung-Fu Master 11875.0 20620.0 30207.0 24288.0 31244.0 3046.0 28819.0 40835.0
Montezuma’s Revenge 50.0 84.0 42.0 22.0 13.0 53.0 67.0 41.0
Ms. Pacman 763.5 1263.0 1241.3 2250.6 1824.6 594.4 653.7 850.7
Name This Game 5439.9 9238.5 8960.3 11185.1 11836.1 5614.0 10476.1 12093.7
Phoenix 12366.5 20410.5 27430.1 28181.8 52894.1 74786.7
Pit Fall -186.7 -46.9 -14.8 -123.0 -78.5 -135.7
Pong 16.2 16.7 19.1 18.8 18.9 11.4 5.6 10.7
Private Eye 298.2 2598.6 -575.5 292.6 179.0 194.4 206.9 421.1
Q*Bert 4589.8 7089.8 11020.8 14175.8 11277.0 13752.3 15148.8 21307.5
River Raid 4065.3 5310.3 10838.4 16569.4 18184.4 10001.2 12201.8 6591.9
Road Runner 9264.0 43079.8 43156.0 58549.0 56990.0 31769.0 34216.0 73949.0
Robotank 58.5 61.8 59.1 62.0 55.4 2.3 32.8 2.6
Seaquest 2793.9 10145.9 14498.0 37361.6 39096.7 2300.2 2355.4 1326.1
Skiing -11490.4 -11928.0 -10852.8 -13700.0 -10911.1 -14863.8
Solaris 810.0 1768.4 2238.2 1884.8 1956.0 1936.4
Space Invaders 1449.7 1183.3 2628.7 5993.1 9063.0 2214.7 15730.5 23846.0
Star Gunner 34081.0 14919.2 58365.0 90804.0 51959.0 64393.0 138218.0 164766.0
Surround 1.9 4.0 -0.9 -9.6 -9.7 -8.3
Tennis -2.3 -0.7 -7.8 4.4 -2.0 -10.2 -6.3 -6.4
Time Pilot 5640.0 8267.8 6608.0 6601.0 7448.0 5825.0 12679.0 27202.0
Tutankham 32.4 118.5 92.2 48.0 33.6 26.1 156.3 144.2
Up and Down 3311.3 8747.7 19086.9 24759.2 29443.7 54525.4 74705.7 105728.7
Venture 54.0 523.4 21.0 200.0 244.0 19.0 23.0 25.0
Video Pinball 20228.1 112093.4 367823.7 110976.2 374886.9 185852.6 331628.1 470310.5
Wizard of Wor 246.0 10431.0 6201.0 7054.0 7451.0 5278.0 17244.0 18082.0
Yars Revenge 6270.6 25976.5 5965.1 7270.8 7157.5 5615.5
Zaxxon 831.0 6159.4 8593.0 10164.0 9501.0 2659.0 24622.0 23519.0"""
# Caption:
# Table S3. Raw scores for the human start condition (30 minutes emulator time). DQN scores taken from (Nair et al.,
# 2015). Double DQN scores taken from (Van Hasselt et al., 2015), Dueling scores from (Wang et al., 2015) and
# Prioritized scores taken from (Schaul et al., 2015)
a3c_rows = a3c_table_s3.split("\n")[1:]
for row in a3c_rows:
cols = row.split("\t")
game, dqn, gorila, ddqn, duel, prior, a3c_ff_1, a3c_ff, a3c_lstm = cols
game1 = game_metric_name(game)
metric = get_game_metric(game1, game, None, None)
for alg, score in [("A3C FF (1 day) hs", a3c_ff_1), ("A3C FF hs", a3c_ff), ("A3C LSTM hs", a3c_lstm)]:
score = float(score)
metric.measure(None, score, alg, url="https://arxiv.org/abs/1602.01783")
if verb: print('{0}.measure(None, {1}, "{2}", url="{3}")'.format(game1, score, alg, "https://arxiv.org/abs/1602.01783"))
try:
score = float(gorila)
metric.measure(None, score, "Gorila", url="https://arxiv.org/abs/1507.04296")
if verb: print('{0}.measure(None, {1}, "Gorila", url="{2}")'.format(game1, score, "https://arxiv.org/abs/1507.04296"))
except ValueError:
if verb: print("No Gorila score for", game)
# vim: set list:listchars=tab:!·,trail:·
| AI-metrics-master | scrapers/atari.py |
AI-metrics-master | scrapers/__init__.py |
|
#!/usr/bin/env ipython
import re
import os
import lxml
from lxml.cssselect import CSSSelector
import requests
from taxonomy import offline
from data.vision import *
# So we can pipe the output of this code somewhere
os.environ["PYTHONIOENCODING"] = "utf-8"
# Rodriguo Benenson's "Are We There Yet?" data!
reimport_awty = True
if reimport_awty and not offline:
# Fetch the summary page
awty_url = "https://rodrigob.github.io/are_we_there_yet/build/"
req = requests.get(awty_url)
page = req.content.replace("</html>", "", 1) # There is a crazy weird </html> near the top of the page that breaks everything
tree = lxml.html.fromstring(page)
from urlparse import urlparse
awty_datasets = {}
if reimport_awty and not offline:
for e in CSSSelector('div.span7')(tree):
#print dir(e)
node = e.getchildren()[0].getchildren()[0]
link = node.attrib["href"]
metric_name = node.text_content()
print "%40s" % metric_name, link
awty_datasets[metric_name] = urlparse(link)
try:
got = json.load(open(".awty_cache.json"))
except:
got = {}
# For lots of AWTY data, there's no date but there is a conference, so we can look up the dates that way.
conf_dates = {}
def parse_awty_dataset(name, link, verbose=False):
if offline or not reimport_awty: return []
print "# Handling", repr(name), link.geturl()
if link.path not in got:
page = requests.get(awty_url + link.path).content
tree = lxml.html.fromstring(page.replace("</html>", "", 1))
got[link.path] = tree
else:
tree = got[link.path]
#print dir(tree)
#print "fragment:", link.fragment
#print page
results_section = CSSSelector("div#" + link.fragment)(tree)[0]
rows = CSSSelector("tr")(results_section)
results = []
for r in rows[1:]:
result, paperlink, journal, details = CSSSelector("td")(r)
result, papername, journal = [e.text_content() for e in (result, paperlink, journal)]
notes = CSSSelector("div")(details)
notes = notes[0].text_content().strip() if notes else ""
notes = re.sub("\s+", " ", notes, flags=re.UNICODE)
assert isinstance(notes, str) or isinstance(notes, unicode), "Expecting stringy notes %s " % type(notes)
if "%" not in result:
print "# Skipping", result, papername, journal
continue
if verbose:
print "%6s" % result, "%90s" % papername, "%10s" %journal, notes
e = CSSSelector("a")(paperlink)
paper_url = e[0].attrib["href"] if e else None
results.append((result, papername, paper_url, journal, notes))
return results
percent_re = re.compile(r'([0-9.]+) *% *(\(?±([0-9\.]+))?')
done = {}
def ingest_awty_dataset(name, metric, label, regex=percent_re):
if offline or not reimport_awty:
#print "Offline, not ingesting", name
return None
done[name] = True
for n, (result, papername, paper_url, journal, notes) in enumerate(parse_awty_dataset(name, awty_datasets[name])):
try:
match = regex.match(result)
value = float(match.group(1))
except AttributeError:
print "result", result, "does not parse"
continue
try:
uncertainty = float(match.group(3)) if match.group(3) else 0.0
except IndexError:
uncertainty = 0.0
#if "Graph Cut based inference" in papername or "Spatial and Global Constraints Really" in papername or (paper_url and'http://research.microsoft.com/en-us/um/people/pkohli/papers/lrkt_eccv2010.pdf' in paper_url):
print "%s.measure(%s, %r, %r, url=%r, papername=%r, uncertainty=%r, venue=%r, notes=%r)" % (
label, None, value, papername, paper_url, papername, uncertainty, journal, notes)
try:
metric.measure(None, value, papername, url=paper_url, papername=papername,
uncertainty=uncertainty, venue=journal, notes=notes)
except requests.ConnectionError, e:
print "Network error on {0} ({1}), skipping:".format(paper_url, papername)
print e
msrc21_pc = image_classification.metric("MSRC-21 image semantic labelling (per-class)", "http://jamie.shotton.org/work/data.html", scale=correct_percent)
msrc21_pp = image_classification.metric("MSRC-21 image semantic labelling (per-pixel)", "http://jamie.shotton.org/work/data.html", scale=correct_percent)
cifar100 = image_classification.metric("CIFAR-100 Image Recognition", "http://https://www.cs.toronto.edu/~kriz/cifar.html", scale=correct_percent)
cifar10 = image_classification.metric("CIFAR-10 Image Recognition", "http://https://www.cs.toronto.edu/~kriz/cifar.html", scale=correct_percent, target=94, target_source="http://karpathy.github.io/2011/04/27/manually-classifying-cifar10/")
svhn = image_classification.metric("Street View House Numbers (SVHN)", "http://ufldl.stanford.edu/housenumbers/", scale=error_percent, target=2.0, target_source="http://ufldl.stanford.edu/housenumbers/nips2011_housenumbers.pdf")
# We declare MNIST solved because the gap between best performance and human performance appears to be less than the uncertainty in human performance
mnist = image_classification.metric("MNIST handwritten digit recognition", "http://yann.lecun.com/exdb/mnist/", scale=error_percent, target=0.2, target_source="http://people.idsia.ch/~juergen/superhumanpatternrecognition.html", solved=True)
# This awty URL broken
mnist.measure(date(2013,2,28), 0.52, 'COSFIRE', 'http://www.rug.nl/research/portal/files/2390194/2013IEEETPAMIAzzopardi.pdf', papername='Trainable COSFIRE Filters for Keypoint Detection and Pattern Recognition')
# awty transcribes what's in this paper, but it seems to somehow have confused, wildly different units from everything else:
for i, m in enumerate(mnist.measures):
if m.url == "http://personal.ie.cuhk.edu.hk/~ccloy/files/aaai_2015_target_coding.pdf":
del mnist.measures[i]
stl10 = image_classification.metric("STL-10 Image Recognition", "https://cs.stanford.edu/~acoates/stl10/", scale=correct_percent)
if not offline: ingest_awty_dataset('STL-10', stl10, 'stl10')
leeds_sport_poses = image_classification.metric("Leeds Sport Poses")
if reimport_awty and not offline:
ingest_awty_dataset('SVHN', svhn, 'svhn')
ingest_awty_dataset('CIFAR-100', cifar100, 'cifar100')
ingest_awty_dataset('CIFAR-10', cifar10, 'cifar10')
ingest_awty_dataset('MNIST', mnist, 'mnist')
ingest_awty_dataset('MSRC-21', msrc21_pc, 'msrc21_pc')
ingest_awty_dataset('MSRC-21', msrc21_pp, 'msrc21_pp', regex=re.compile("[0-9.]+ *% */ * ([0-9.]+) *%"))
for name, link in awty_datasets.items():
if not link.scheme and name not in done:
parse_awty_dataset(name, link, verbose=True)
| AI-metrics-master | scrapers/awty.py |
import os
import re
import string
import urllib2
from BeautifulSoup import BeautifulSoup
from dateutil import parser
import mistune
from data.acoustics import speech_recognition
from scales import error_rate # or error_percent?
from taxonomy import offline
# needs to be manually updated when more metrics are added
h2_name_map = {
"LibriSpeech" : "librispeech",
"WSJ" : "wsj",
"Switchboard Hub5'00" : "swb_hub_500",
"Fisher" : "fisher",
"CHiME (noisy speech)" : "chime",
"TIMIT" : "timit"
}
def get_metrics(h2_name, first_row, file_output, scale = "error_percent", targets = (None, None), target_source = None):
metric_names = []
name = h2_name_map[h2_name]
if name == "":
raise SystemExit("Error! Need to add h2_name to h2_name_map to be able to parse!")
for column, target in zip(first_row, targets):
column = str(column.getText()).translate(None, string.punctuation)
metric_name = str(name + "_" + "_".join(re.findall(r"[\w']+", column))).replace("test", "")
metric_names.append(metric_name)
if targets != (None, None):
s = "{0} = speech_recognition.metric(name=\"{1} {2}\", scale={3}, target={4}, target_source=\"{5}\")\n".format(metric_name, name, column, scale, target, target_source)
else:
s = "{0} = speech_recognition.metric(name=\"{1} {2}\", scale={3})\n".format(metric_name, name, column, scale)
file_output += s
return (metric_names, file_output)
def add_measures(metric_names, row):
def row_data(row):
columns = row.findAll('td')
l = len(columns)
url = columns[l-3].find('a', href=True)['href']
columns = map(lambda x: x.getText(), columns)
date = str(parser.parse(columns[l-2]).date()).split("-")
date = "date({0}, {1}, {2})".format(int(date[0]), int(date[1]), int(date[2]))
r = {
'name': columns[l-1].encode('ascii', 'ignore'),
'date': date,
'url': url,
'values': map(lambda x: x.strip('%'), columns[:-3])
}
return r
data = row_data(row)
notes = data['name']
targets = []
if notes == "Humans":
targets = data['values']
return ([], targets, data['url'])
table = []
for metric_name, value in zip(metric_names, data['values']):
if not value:
continue
s = "{0}.measure({1}, {2}, '{3}', '{4}')\n".format(metric_name, data['date'], value, notes, data['url'])
table.append(s)
return (table, targets, '')
def main():
md = urllib2.urlopen('https://raw.githubusercontent.com/syhw/wer_are_we/master/README.md').read()
bs = BeautifulSoup(mistune.markdown(md))
wer_data_file = os.path.abspath(os.path.join(os.path.dirname(__file__), "../data/wer.py"))
file_output = "# The file was autogenerated by ../scrapers/wer.py\n\nfrom datetime import date\n\nfrom data.acoustics import speech_recognition, swb_hub_500\nfrom scales import *\n\n"
wer_metrics = []
for table, header in zip(bs.findAll('table'), bs.findAll('h3')):
header = header.getText()
rows = table.findAll('tr')
metric_data = get_metrics(header, rows[0].findAll('th')[:-3], file_output)
metric_names = metric_data[0]
wer_metrics += metric_names
table_data = []
for row in rows:
if row.findAll('td') == []:
continue
measure_data, targets, target_source = add_measures(metric_names, row)
if not targets:
table_data += measure_data
elif not measure_data:
metric_data = get_metrics(header, rows[0].findAll('th')[:-3], file_output, targets = targets, target_source = target_source)
file_output = metric_data[1]
file_output += "".join(sorted(table_data))
file_output = file_output + "\n\nwer_metrics=[" + ", ".join(wer_metrics) + "]"
with open(wer_data_file, 'wb') as f:
f.write(file_output)
if not offline:
main()
| AI-metrics-master | scrapers/wer.py |
# Absorb the data from https://arxiv.org/abs/1703.03864v1
# Copy and paste from Table 3:
table3 = """Game
Alien
Amidar
Assault
Asterix
Asteroids
Atlantis
Bank Heist
Battle Zone
Beam Rider
Berzerk
Bowling
Boxing
Breakout
Centipede
Chopper Command
Crazy Climber
Demon Attack
Double Dunk
Enduro
Fishing Derby
Freeway
Frostbite
Gopher
Gravitar
Ice Hockey
Kangaroo
Krull
Montezumas Revenge
Name This Game
Phoenix
Pit Fall
Pong
Private Eye
Q Bert
River Raid
Road Runner
Robotank
Seaquest
Skiing
Solaris
Space Invaders
Star Gunner
Tennis
Time Pilot
Tutankham
Up and Down
Venture
Video Pinball
Wizard of Wor
Yars Revenge
Zaxxon
DQN
570.2
133.4
3332.3
124.5
697.1
76108.0
176.3
17560.0
8672.4
NaN
41.2
25.8
303.9
3773.1
3046.0
50992.0
12835.2
-21.6
475.6
-2.3
25.8
157.4
2731.8
216.5
-3.8
2696.0
3864.0
50.0
5439.9
NaN
NaN
16.2
298.2
4589.8
4065.3
9264.0
58.5
2793.9
NaN
NaN
1449.7
34081.0
-2.3
5640.0
32.4
3311.3
54.0
20228.1
246.0
NaN
831.0
A3C FF, 1 day
182.1
283.9
3746.1
6723.0
3009.4
772392.0
946.0
11340.0
13235.9
1433.4
36.2
33.7
551.6
3306.5
4669.0
101624.0
84997.5
0.1
-82.2
13.6
0.1
180.1
8442.8
269.5
-4.7
106.0
8066.6
53.0
5614.0
28181.8
-123.0
11.4
194.4
13752.3
10001.2
31769.0
2.3
2300.2
-13700.0
1884.8
2214.7
64393.0
-10.2
5825.0
26.1
54525.4
19.0
185852.6
5278.0
7270.8
2659.0
ES FF, 1 hour
994.0
112.0
1673.9
1440.0
1562.0
1267410.0
225.0
16600.0
744.0
686.0
30.0
49.8
9.5
7783.9
3710.0
26430.0
1166.5
0.2
95.0
-49.0
31.0
370.0
582.0
805.0
-4.1
11200.0
8647.2
0.0
4503.0
4041.0
0.0
21.0
100.0
147.5
5009.0
16590.0
11.9
1390.0
-15442.5
2090.0
678.5
1470.0
-4.5
4970.0
130.3
67974.0
760.0
22834.8
3480.0
16401.7
6380.0"""
data = table3.split("\n")
names = data[1:52]
dqn = data[53:104]
a3cs = data[105:156]
ess = data[157:]
for name, a3c, es in zip(names, a3cs, ess):
metric_name = name.lower().replace(" ", "_") + "_metric"
a3c_score = float(a3c)
es_score = float(es)
print metric_name + ".measure(None, "+ `es_score` + ', "ES (1 hour)", url="https://arxiv.org/abs/1703.03864v1")'
print metric_name + ".measure(None, "+ `a3c_score` + ', "A3C FF (1 day)", url="https://arxiv.org/abs/1703.03864v1", algorithm_src_url="https://arxiv.org/pdf/1602.01783.pdf", min_date=date(2016,2,4))'
| AI-metrics-master | scrapers/es.py |
from taxonomy import Problem
from scales import *
import datetime
date = datetime.date
abstract_strategy_games = Problem("Abstract strategy games", ["agi", "abstract-games"])
playing_with_hints = Problem("Playing abstract games with extensive hints", ["abstract-games"], solved=True)
abstract_strategy_games.add_subproblem(playing_with_hints)
playing_with_hints.notes = """
Complex abstract strategy games have been solved to super-human levels
by computer systems with extensive rule-hinting and heuristics,
in some cases combined with machine learning techniques.
"""
computer_chess = playing_with_hints.metric("Computer Chess", scale=elo, target=2882, target_label="Best human play", target_source="https://en.wikipedia.org/w/index.php?title=Comparison_of_top_chess_players_throughout_history&oldid=777500496#Elo_system")
computer_go = playing_with_hints.metric("Computer Go", scale=elo, target=3632, target_label="Best human play", target_source="https://www.goratings.org/en/history/")
computer_go.solved = True # until we get proper data
# For some caveats, see https://en.wikipedia.org/w/index.php?title=Chess_engine&oldid=764341963#Ratings
# We could script ingestion of data from CCRL, or get data from Katja
computer_chess.measure(date(1997,5,11), 2725, "Deep Blue", uncertainty=25, url="https://www.quora.com/What-was-Deep-Blues-Elo-rating")
computer_chess.measure(date(2006,5,27), 2995, "Rybka 1.1 64bit", uncertainty=25, url="https://web.archive.org/web/20060531091049/http://www.computerchess.org.uk/ccrl/4040/rating_list_all.html")
computer_chess.measure(date(2010,8,7), 3269, "Rybka 4 64bit", uncertainty=22, url="https://web.archive.org/web/20100923131123/http://www.computerchess.org.uk/ccrl/4040/rating_list_all.html")
computer_chess.measure(date(2013,7,20), 3248, "Houdini 3 64bit", uncertainty=16, url="https://web.archive.org/web/20130415000000*/http://www.computerchess.org.uk/ccrl/4040/rating_list_all.html")
computer_chess.measure(date(2015,7,4), 3332, "Komodo 9", uncertainty=24, url="https://web.archive.org/web/20150708104805/http://www.computerchess.org.uk/ccrl/4040/rating_list_all.html")
computer_chess.measure(date(2017,2,27), 3393, "Stockfish", uncertainty=50, url="https://web.archive.org/web/20170227044521/http://www.computerchess.org.uk/ccrl/4040/")
# Wikipedia has some nice data here:
computer_chess.measure(date(1984,12,31), 1631, "Novag Super Constellation 6502 4 MHz", url="https://en.wikipedia.org/wiki/Swedish_Chess_Computer_Association#Rating_list_year-end_leaders")
computer_chess.measure(date(1985,12,31), 1827, "Mephisto Amsterdam 68000 12 MHz", url="https://en.wikipedia.org/wiki/Swedish_Chess_Computer_Association#Rating_list_year-end_leaders")
computer_chess.measure(date(1986,12,31), 1827, "Mephisto Amsterdam 68000 12 MHz", url="https://en.wikipedia.org/wiki/Swedish_Chess_Computer_Association#Rating_list_year-end_leaders")
computer_chess.measure(date(1987,12,31), 1923, "Mephisto Dallas 68020 14 MHz", url="https://en.wikipedia.org/wiki/Swedish_Chess_Computer_Association#Rating_list_year-end_leaders")
computer_chess.measure(date(1988,12,31), 1993, "Mephisto MM 4 Turbo Kit 6502 16 MHz", url="https://en.wikipedia.org/wiki/Swedish_Chess_Computer_Association#Rating_list_year-end_leaders")
computer_chess.measure(date(1989,12,31), 2027, "Mephisto Portorose 68020 12 MHz", url="https://en.wikipedia.org/wiki/Swedish_Chess_Computer_Association#Rating_list_year-end_leaders")
computer_chess.measure(date(1990,12,31), 2138, "Mephisto Portorose 68030 36 MHz", url="https://en.wikipedia.org/wiki/Swedish_Chess_Computer_Association#Rating_list_year-end_leaders")
computer_chess.measure(date(1991,12,31), 2127, "Mephisto Vancouver 68030 36 MHz", url="https://en.wikipedia.org/wiki/Swedish_Chess_Computer_Association#Rating_list_year-end_leaders")
computer_chess.measure(date(1992,12,31), 2174, "Chess Machine Schroder 3.0 ARM2 30 MHz", url="https://en.wikipedia.org/wiki/Swedish_Chess_Computer_Association#Rating_list_year-end_leaders")
computer_chess.measure(date(1993,12,31), 2235, "Mephisto Genius 2.0 486/50-66 MHz", url="https://en.wikipedia.org/wiki/Swedish_Chess_Computer_Association#Rating_list_year-end_leaders")
computer_chess.measure(date(1995,12,31), 2306, "MChess Pro 5.0 Pentium 90 MHz", url="https://en.wikipedia.org/wiki/Swedish_Chess_Computer_Association#Rating_list_year-end_leaders")
computer_chess.measure(date(1996,12,31), 2337, "Rebel 8.0 Pentium 90 MHz", url="https://en.wikipedia.org/wiki/Swedish_Chess_Computer_Association#Rating_list_year-end_leaders")
computer_chess.measure(date(1997,12,31), 2418, "HIARCS 6.0 49MB P200 MMX", url="https://en.wikipedia.org/wiki/Swedish_Chess_Computer_Association#Rating_list_year-end_leaders")
computer_chess.measure(date(1998,12,31), 2460, "Fritz 5.0 PB29% 67MB P200 MMX", url="https://en.wikipedia.org/wiki/Swedish_Chess_Computer_Association#Rating_list_year-end_leaders")
computer_chess.measure(date(1999,12,31), 2594, "Chess Tiger 12.0 DOS 128MB K6-2 450 MHz", url="https://en.wikipedia.org/wiki/Swedish_Chess_Computer_Association#Rating_list_year-end_leaders")
computer_chess.measure(date(2000,12,31), 2607, "Fritz 6.0 128MB K6-2 450 MHz", url="https://en.wikipedia.org/wiki/Swedish_Chess_Computer_Association#Rating_list_year-end_leaders")
computer_chess.measure(date(2001,12,31), 2709, "Chess Tiger 14.0 CB 256MB Athlon 1200", url="https://en.wikipedia.org/wiki/Swedish_Chess_Computer_Association#Rating_list_year-end_leaders")
computer_chess.measure(date(2002,12,31), 2759, "Deep Fritz 7.0 256MB Athlon 1200 MHz", url="https://en.wikipedia.org/wiki/Swedish_Chess_Computer_Association#Rating_list_year-end_leaders")
computer_chess.measure(date(2003,12,31), 2791, "Shredder 7.04 UCI 256MB Athlon 1200 MHz", url="https://en.wikipedia.org/wiki/Swedish_Chess_Computer_Association#Rating_list_year-end_leaders")
computer_chess.measure(date(2004,12,31), 2800, "Shredder 8.0 CB 256MB Athlon 1200 MHz", url="https://en.wikipedia.org/wiki/Swedish_Chess_Computer_Association#Rating_list_year-end_leaders")
computer_chess.measure(date(2005,12,31), 2808, "Shredder 9.0 UCI 256MB Athlon 1200 MHz", url="https://en.wikipedia.org/wiki/Swedish_Chess_Computer_Association#Rating_list_year-end_leaders")
computer_chess.measure(date(2006,12,31), 2902, "Rybka 1.2 256MB Athlon 1200 MHz", url="https://en.wikipedia.org/wiki/Swedish_Chess_Computer_Association#Rating_list_year-end_leaders")
computer_chess.measure(date(2007,12,31), 2935, "Rybka 2.3.1 Arena 256MB Athlon 1200 MHz", url="https://en.wikipedia.org/wiki/Swedish_Chess_Computer_Association#Rating_list_year-end_leaders")
computer_chess.measure(date(2008,12,31), 3238, "Deep Rybka 3 2GB Q6600 2.4 GHz", url="https://en.wikipedia.org/wiki/Swedish_Chess_Computer_Association#Rating_list_year-end_leaders")
computer_chess.measure(date(2009,12,31), 3232, "Deep Rybka 3 2GB Q6600 2.4 GHz", url="https://en.wikipedia.org/wiki/Swedish_Chess_Computer_Association#Rating_list_year-end_leaders")
computer_chess.measure(date(2010,12,31), 3227, "Deep Rybka 3 2GB Q6600 2.4 GHz", url="https://en.wikipedia.org/wiki/Swedish_Chess_Computer_Association#Rating_list_year-end_leaders")
computer_chess.measure(date(2011,12,31), 3216, "Deep Rybka 4 2GB Q6600 2.4 GHz", url="https://en.wikipedia.org/wiki/Swedish_Chess_Computer_Association#Rating_list_year-end_leaders")
computer_chess.measure(date(2012,12,31), 3221, "Deep Rybka 4 x64 2GB Q6600 2.4 GHz", url="https://en.wikipedia.org/wiki/Swedish_Chess_Computer_Association#Rating_list_year-end_leaders")
computer_chess.measure(date(2013,12,31), 3241, "Komodo 5.1 MP x64 2GB Q6600 2.4 GHz", url="https://en.wikipedia.org/wiki/Swedish_Chess_Computer_Association#Rating_list_year-end_leaders")
computer_chess.measure(date(2014,12,31), 3295, "Komodo 7.0 MP x64 2GB Q6600 2.4 GHz", url="https://en.wikipedia.org/wiki/Swedish_Chess_Computer_Association#Rating_list_year-end_leaders")
computer_chess.measure(date(2015,12,31), 3334, "Stockfish 6 MP x64 2GB Q6600 2.4 GHz", url="https://en.wikipedia.org/wiki/Swedish_Chess_Computer_Association#Rating_list_year-end_leaders")
computer_chess.measure(date(2016,12,31), 3366, "Komodo 9.1 MP x64 2GB Q6600 2.4 GHz", url="https://en.wikipedia.org/wiki/Swedish_Chess_Computer_Association#Rating_list_year-end_leaders")
mastering_historical_games = Problem("Superhuman mastery of arbitrary abstract strategy games", ["super", "abstract-games"])
abstract_strategy_games.add_subproblem(mastering_historical_games)
mastering_chess = mastering_historical_games.metric("mastering chess")
mastering_chess.notes = """
Beating all humans at chess, given a corpus of past play amongst masters,
but no human-crafted policy constraints and heuristics. This will probably fall out
immediately once learning_abstract_game_rules is solved, since playing_with_hints
has been solved.
"""
# Are there any published metrics for these yet?
learning_abstract_game_rules = Problem("Learning the rules of complex strategy games from examples", ["agi", "abstract-games"])
abstract_strategy_games.add_subproblem(learning_abstract_game_rules)
learning_chess = learning_abstract_game_rules.metric("learning chess")
learning_chess.notes = """
Chess software contains hard-coded policy constraints for valid play; this metric is whether RL
or other agents can correctly build those policy constraints from examples or oracles"""
learning_go = learning_abstract_game_rules.metric("learning go")
learning_go.notes = """
Go software contains policy constraints for valid play and evaluating the number of
liberties for groups. This metric is whether RL or other agents can correctly build those
policy constraints from examples or oracles"""
learning_arbitrary_abstract_games = Problem("Play an arbitrary abstract game, first learning the rules", ["agi", "abstract-games"])
abstract_strategy_games.add_subproblem(learning_arbitrary_abstract_games)
#computer_chess.graph()
| AI-metrics-master | data/strategy_games.py |
from taxonomy import Problem
from scales import *
import datetime
date = datetime.date
vision = Problem("Vision", ["agi", "vision", "world-modelling"])
image_comprehension = Problem("Image comprehension", ["agi", "vision", "language", "world-modelling"])
image_classification = Problem("Image classification", ["vision", "agi"])
image_classification.add_subproblem(image_comprehension)
vision.add_subproblem(image_classification)
imagenet = image_classification.metric("Imagenet Image Recognition", "http://image-net.org", scale=error_rate, target=0.051)
imagenet.notes = """
Correctly label images from the Imagenet dataset. As of 2016, this includes:
- Object localization for 1000 categories.
- Object detection for 200 fully labeled categories.
- Object detection from video for 30 fully labeled categories.
- Scene classification for 365 scene categories (Joint with MIT Places team) on Places2 Database http://places2.csail.mit.edu.
- Scene parsing for 150 stuff and discrete object categories (Joint with MIT Places team).
"""
imagenet.measure(date(2010,8,31), 0.28191, "NEC UIUC", "http://image-net.org/challenges/LSVRC/2010/results")
imagenet.measure(date(2011,10,26), 0.2577, "XRCE","http://image-net.org/challenges/LSVRC/2011/results")
imagenet.measure(date(2012,10,13), 0.16422, "AlexNet / SuperVision",
"http://image-net.org/challenges/LSVRC/2012/results.html", algorithm_src_url="https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks")
imagenet.measure(date(2013,11,14), 0.11743, "Clarifai","http://www.image-net.org/challenges/LSVRC/2013/results.php")
imagenet.measure(date(2014,8,18), 0.07405, "VGG", "http://image-net.org/challenges/LSVRC/2014/index")
imagenet.measure(date(2015,12,10), 0.03567, "MSRA", "http://image-net.org/challenges/LSVRC/2015/results", algorithms=["residual-networks"])
imagenet.measure(date(2016,9,26), 0.02991, "Trimps-Soushen", "http://image-net.org/challenges/LSVRC/2016/results")
imagenet.measure(date(2017,7,21), 0.02251, "SE-ResNet152 / WMW", "http://image-net.org/challenges/LSVRC/2017/results")
# Test automatic detection of withdrawn papers
imagenet.measure(None, 0.0458, "withdrawn", "https://arxiv.org/abs/1501.02876")
video_classification = Problem("Recognise events in videos")
vision.add_subproblem(video_classification)
video_classification.metric("YouTube-8M video labelling", url="https://research.google.com/youtube8m/")
# The VQA paper breaks human performance down by real/abstract image so we need to compute the overall number...
# Also they don't seem to have human performance numbers for VQA multiple choice?
vqa_abstract_human_performance = 87.49
vqa_real_human_performance = 83.3
vqa_real_oe = image_comprehension.metric("COCO Visual Question Answering (VQA) real images 1.0 open ended", url="http://visualqa.org/",
target=vqa_real_human_performance, target_source="https://arxiv.org/abs/1505.00468", scale=correct_percent)
vqa_real_mc = image_comprehension.metric("COCO Visual Question Answering (VQA) real images 1.0 multiple choice", url="http://visualqa.org/", scale=correct_percent, solved=False)
vqa_abstract_oe = image_comprehension.metric("COCO Visual Question Answering (VQA) abstract images 1.0 open ended", url="http://visualqa.org/",
target=vqa_abstract_human_performance, target_source="https://arxiv.org/abs/1505.00468", scale=correct_percent)
vqa_abstract_mc = image_comprehension.metric("COCO Visual Question Answering (VQA) abstract 1.0 multiple choice", url="http://visualqa.org/", scale=correct_percent, solved=False)
# other visual question answering metrics (we don't have data for these yet)
# For a survey: https://arxiv.org/pdf/1607.05910
image_comprehension.metric("Toronto COCO-QA", url="http://www.cs.toronto.edu/~mren/imageqa/data/cocoqa/" )
image_comprehension.metric("DAQUAR", url="https://www.mpi-inf.mpg.de/departments/computer-vision-and-multimodal-computing/research/vision-and-language/visual-turing-challenge/", scale=correct_percent, target=60.27, target_source="https://arxiv.org/abs/1505.02074")
visual_genome_pairs = image_comprehension.metric("Visual Genome (pairs)", url="http://visualgenome.org", scale=correct_percent, axis_label="Top-1 precision")
visual_genome_subjects = image_comprehension.metric("Visual Genome (subjects)", url="http://visualgenome.org", scale=correct_percent, axis_label="Top-1 precision")
visual7w = image_comprehension.metric("Visual7W", url="https://arxiv.org/abs/1511.03416", scale=correct_percent)
image_comprehension.metric("FM-IQA", url="http://idl.baidu.com/FM-IQA.html")
image_comprehension.metric("Visual Madlibs", url="http://tamaraberg.com/visualmadlibs/")
vqa_real_oe.measure(date(2015,12,15), 55.89, "iBOWIMG baseline", url="https://arxiv.org/abs/1512.02167")
vqa_real_mc.measure(date(2015,12,15), 61.97, "iBOWIMG baseline", url="https://arxiv.org/abs/1512.02167")
vqa_real_oe.measure(None, 58.24, "SMem-VQA", url="https://arxiv.org/abs/1511.05234v2")
# not so clear what the number in the SANv1 paper was...
#vqa_real_oe.measure(None, 57.6, "SAN(2,CNN)", url="https://arxiv.org/abs/1511.02274v1")
vqa_real_oe.measure(None, 58.9, "SAN", url="https://arxiv.org/abs/1511.02274v2")
vqa_real_oe.measure(None, 59.5, "CNN-RNN", url="https://arxiv.org/abs/1603.02814v1")
vqa_real_oe.measure(None, 59.5, "FDA", url="https://arxiv.org/abs/1604.01485v1")
vqa_real_mc.measure(None, 64.2, "FDA", url="https://arxiv.org/abs/1604.01485v1")
vqa_real_oe.measure(None, 62.1, "HQI+ResNet", url="https://arxiv.org/abs/1606.00061v1")
vqa_real_mc.measure(None, 66.1, "HQI+ResNet", url="https://arxiv.org/abs/1606.00061v1")
vqa_real_oe.measure(None, 58.2, "LSTM Q+I", url="https://arxiv.org/abs/1505.00468v1")
vqa_real_mc.measure(None, 63.1, "LSTM Q+I", url="https://arxiv.org/abs/1505.00468v1")
vqa_real_oe.measure(None, 63.2, "joint-loss", url="https://arxiv.org/abs/1606.03647")
vqa_real_mc.measure(None, 67.3, "joint-loss", url="https://arxiv.org/abs/1606.03647")
vqa_real_oe.measure(None, 66.5, "MCB 7 att.", url="https://arxiv.org/abs/1606.01847v1", replicated="https://github.com/akirafukui/vqa-mcb")
vqa_real_mc.measure(None, 70.1, "MCB 7 att.", url="https://arxiv.org/abs/1606.01847v1", replicated="https://github.com/akirafukui/vqa-mcb")
visual7w.measure(None, 62.2, "MCB+Att.", url="https://arxiv.org/abs/1606.01847v1")
vqa_abstract_mc.measure(None, 61.41, "LSTM blind", url="http://visualqa.org/amc.html",
algorithm_src_url="https://arxiv.org/abs/1511.05099", venue="vqa competition 2016")
vqa_abstract_oe.measure(None, 57.19, "LSTM blind", url="http://visualqa.org/aoe.html",
algorithm_src_url="https://arxiv.org/abs/1511.05099", venue="vqa competition 2016")
vqa_abstract_mc.measure(None, 69.21, "LSTM + global features", url="http://visualqa.org/amc.html",
algorithm_src_url="https://arxiv.org/abs/1511.05099", venue="vqa competition 2016")
vqa_abstract_oe.measure(None, 65.02, "LSTM + global features", url="http://visualqa.org/aoe.html",
algorithm_src_url="https://arxiv.org/abs/1511.05099", venue="vqa competition 2016")
vqa_abstract_mc.measure(None, 71.18, "Dualnet ensemble", url="http://visualqa.org/amc.html",
algorithm_src_url="https://arxiv.org/abs/1606.06108", venue="vqa competition 2016")
vqa_abstract_oe.measure(None, 69.73, "Dualnet ensemble", url="http://visualqa.org/aoe.html",
algorithm_src_url="https://arxiv.org/abs/1606.06108", venue="vqa competition 2016")
vqa_real_mc.measure(None, 66.33, "MRN", url="https://arxiv.org/abs/1606.01455v1")
vqa_real_oe.measure(None, 61.84, "MRN + global features", url="https://arxiv.org/abs/1606.01455v1")
vqa_abstract_mc.measure(None, 74.37, "Graph VQA", url="https://arxiv.org/abs/1609.05600v1")
vqa_abstract_oe.measure(None, 70.42, "Graph VQA", url="https://arxiv.org/abs/1609.05600v1")
vqa_real_oe.measure(None, 64.2, "N2NMN", "https://arxiv.org/abs/1704.05526v2", replicated="http://ronghanghu.com/n2nmn/")
vqa2_real_oe = image_comprehension.metric("COCO Visual Question Answering (VQA) real images 2.0 open ended", url="http://visualqa.org/", scale=correct_percent)
vqa2_real_oe.measure(None, 54.22, "d-LSTM+nI", url="https://arxiv.org/abs/1612.00837v1", algorithm_src_url="https://github.com/VT-vision-lab/VQA_LSTM_CNN", min_date=date(2015,12,14))
vqa2_real_oe.measure(None, 62.27, "MCB", url="https://arxiv.org/abs/1612.00837v1", algorithm_src_url="https://arxiv.org/abs/1606.01847v1")
vqa2_real_oe.measure(None, 70.34, "Up-Down", url="https://arxiv.org/abs/1707.07998v1")
vqa2_real_oe.measure(date(2017,7,26), 68.16, "HDU-USYD-UNCC", url="http://www.visualqa.org/roe_2017.html")
vqa2_real_oe.measure(date(2017,7,26), 68.07, "DLAIT", url="http://www.visualqa.org/roe_2017.html")
visual7w.measure(None, 72.53, "CMN", url="https://arxiv.org/abs/1611.09978v1")
visual_genome_pairs.measure(None, 28.52, "CMN", url="https://arxiv.org/abs/1611.09978v1")
visual_genome_subjects.measure(None, 44.24, "CMN", url="https://arxiv.org/abs/1611.09978v1")
| AI-metrics-master | data/vision.py |
"Data files for the AI Progress Measurement Notebook"
| AI-metrics-master | data/__init__.py |
# -*- coding: utf-8 -*-
"Vision data generated by scrapers/awty.py and then edited by hand"
from data.vision import *
# Data imported from Rodrigo Benenson's "Who is the Best at X / Are we there
# yet?" (https://rodrigob.github.io/are_we_there_yet/build/#about)
msrc21_pc = image_classification.metric("MSRC-21 image semantic labelling (per-class)", "http://jamie.shotton.org/work/data.html", scale=correct_percent)
msrc21_pp = image_classification.metric("MSRC-21 image semantic labelling (per-pixel)", "http://jamie.shotton.org/work/data.html", scale=correct_percent)
cifar100 = image_classification.metric("CIFAR-100 Image Recognition", "http://https://www.cs.toronto.edu/~kriz/cifar.html", scale=correct_percent)
cifar10 = image_classification.metric("CIFAR-10 Image Recognition", "http://https://www.cs.toronto.edu/~kriz/cifar.html", scale=correct_percent, target=94, target_source="http://karpathy.github.io/2011/04/27/manually-classifying-cifar10/")
svhn = image_classification.metric("Street View House Numbers (SVHN)", "http://ufldl.stanford.edu/housenumbers/", scale=error_percent, target=2.0, target_source="http://ufldl.stanford.edu/housenumbers/nips2011_housenumbers.pdf")
# We declare MNIST solved because the gap between best performance and human performance appears to be less than the uncertainty in human performance
mnist = image_classification.metric("MNIST handwritten digit recognition", "http://yann.lecun.com/exdb/mnist/", scale=error_percent, target=0.2, target_source="http://people.idsia.ch/~juergen/superhumanpatternrecognition.html", solved=True)
stl10 = image_classification.metric("STL-10 Image Recognition", "https://cs.stanford.edu/~acoates/stl10/", scale=correct_percent)
leeds_sport_poses = image_classification.metric("Leeds Sport Poses")
# Data not in the AWTY scrape at all
# This awty URL broken
mnist.measure(date(2013,2,28), 0.52, 'COSFIRE', 'http://www.cs.rug.nl/~george/articles/PAMI2013.pdf', papername='Trainable COSFIRE Filters for Keypoint Detection and Pattern Recognition')
mnist.measure(None, 0.38, 'Fitnet-LSUV-SVM', url='http://arxiv.org/abs/1511.06422', papername='All you need is a good init', uncertainty=0.0, venue='ICLR 2015')
# -------------------------------------------------
# additional, newer data on the AWTY problems
# -------------------------------------------------
cifar100.measure(None, 100 - 22.71, "ResNet-1001", url="https://arxiv.org/pdf/1603.05027", uncertainty=0.22)
cifar10.measure(None, 100 - 4.62, "ResNet-1001", url="https://arxiv.org/pdf/1603.05027", uncertainty=0.20)
cifar100.measure(None, 69.0, "NiN+Superclass+CDJ", url="https://arxiv.org/abs/1706.02003")
stl10.measure(None, 77.79, u"CC-GAN²", url="https://arxiv.org/abs/1611.06430v1", uncertainty=0.8)
cifar10.measure(None, 100 - 5.62, "ResNet+ELU", url="https://arxiv.org/pdf/1604.04112.pdf")
cifar100.measure(None, 100 - 26.55, "ResNet+ELU", url="https://arxiv.org/pdf/1604.04112.pdf")
cifar10.measure(None, 95.6, "Evolution ensemble", url="https://arxiv.org/pdf/1703.01041.pdf")
cifar100.measure(None, 77.0, "Evolution", url="https://arxiv.org/pdf/1703.01041.pdf")
# ---------------------------------------------------------------------------------
# Originally generated by scrapers/awty.py
# Algorithm names hurriedly edited to improve graphs; please fix them where they
# don't match what's used in the literature
# Handling 'STL-10' classification_datasets_results.html#53544c2d3130
stl10.measure(None, 74.33, 'SWWAE', url='http://arxiv.org/abs/1506.02351', papername='Stacked What-Where Auto-encoders', uncertainty=0.0, venue='arXiv 2015', notes='')
stl10.measure(None, 74.1, 'Convolutional Clustering', url='http://arxiv.org/abs/1511.06241', papername='Convolutional Clustering for Unsupervised Learning', uncertainty=0.0, venue='arXiv 2015', notes='3 layers + multi dict. With 2 layers, reaches 71.4%')
stl10.measure(None, 73.15, 'Deep Representation Learning with Target Coding', url='http://personal.ie.cuhk.edu.hk/~ccloy/files/aaai_2015_target_coding.pdf', papername='Deep Representation Learning with Target Coding', uncertainty=0.0, venue='AAAI 2015', notes='')
stl10.measure(None, 72.8, 'Discriminative Unsupervised Feature Learning with Convolutional Neural Networks', url='http://papers.nips.cc/paper/5548-discriminative-unsupervised-feature-learning-with-convolutional-neural-networks.pdf', papername='Discriminative Unsupervised Feature Learning with Convolutional Neural Networks', uncertainty=0.0, venue='NIPS 2014', notes='Unsupervised feature learning + linear SVM')
stl10.measure(None, 70.2, 'An Analysis of Unsupervised Pre-training in Light of Recent Advances', url='http://arxiv.org/abs/1412.6597', papername='An Analysis of Unsupervised Pre-training in Light of Recent Advances', uncertainty=0.0, venue='ICLR 2015', notes='Unsupervised pre-training, with supervised fine-tuning. Uses dropout and data-augmentation.')
stl10.measure(None, 70.1, 'Multi-Task Bayesian Optimization', url='http://hips.seas.harvard.edu/files/swersky-multi-nips-2013.pdf', papername='Multi-Task Bayesian Optimization', uncertainty=0.0, venue='NIPS 2013', notes='Also uses CIFAR-10 training data')
stl10.measure(None, 68.23, 'C-SVDDNet', url='http://arxiv.org/abs/1412.7259', papername='C-SVDDNet: An Effective Single-Layer Network for Unsupervised Feature Learning', uncertainty=0.0, venue='arXiv 2014', notes='')
stl10.measure(None, 68.0, 'DFF Committees', url='http://arxiv.org/abs/1406.5947', papername='Committees of deep feedforward networks trained with few data', uncertainty=0.0, venue='arXiv 2014', notes='')
stl10.measure(None, 67.9, 'Nonnegativity Constraints ', url='http://jmlr.org/proceedings/papers/v32/line14.pdf', papername='Stable and Efficient Representation Learning with Nonnegativity Constraints ', uncertainty=0.0, venue='ICML 2014', notes=u'3-layers + multi-dict. 5 \xb1 0.5 with 3-layers only. 6 \xb1 0.6 with 1-layers only.')
stl10.measure(None, 64.5, 'RGB-D Based Object Recognition', url='http://homes.cs.washington.edu/~lfb/paper/iser12.pdf', papername='Unsupervised Feature Learning for RGB-D Based Object Recognition', uncertainty=0.0, venue='ISER 2012', notes='Hierarchical sparse coding using Matching Pursuit and K-SVD')
stl10.measure(None, 62.32, 'CKN', url='http://arxiv.org/abs/1406.3332', papername='Convolutional Kernel Networks', uncertainty=0.0, venue='arXiv 2014', notes='No data augmentation.')
stl10.measure(None, 62.3, 'Discriminative Learning of Sum-Product Networks', url='http://homes.cs.washington.edu/~rcg/papers/dspn.pdf', papername='Discriminative Learning of Sum-Product Networks', uncertainty=0.0, venue='NIPS 2012', notes='')
stl10.measure(None, 61.0, 'No more meta-parameter tuning in unsupervised sparse feature learning', url='http://arxiv.org/abs/1402.5766', papername='No more meta-parameter tuning in unsupervised sparse feature learning', uncertainty=0.0, venue='arXiv 2014', notes='')
stl10.measure(None, 61.0, 'Simulated Fixations', url='http://papers.nips.cc/paper/4730-deep-learning-of-invariant-features-via-simulated-fixations-in-video', papername='Deep Learning of Invariant Features via Simulated Fixations in Video', uncertainty=0.0, venue='NIPS 2012 2012', notes='')
stl10.measure(None, 60.1, 'Receptive Fields', url='http://www.stanford.edu/~acoates/papers/coatesng_nips_2011.pdf', papername='Selecting Receptive Fields in Deep Networks ', uncertainty=0.0, venue='NIPS 2011', notes='')
stl10.measure(None, 58.7, 'Invariant Representations with Local Transformations', url='http://web.eecs.umich.edu/~honglak/icml12-invariantFeatureLearning.pdf', papername='Learning Invariant Representations with Local Transformations', uncertainty=0.0, venue='ICML 2012', notes='')
stl10.measure(None, 58.28, 'Pooling-Invariant', url='http://arxiv.org/pdf/1302.5056v1.pdf', papername='Pooling-Invariant Image Feature Learning ', uncertainty=0.0, venue='arXiv 2012', notes='1600 codes, learnt using 2x PDL')
stl10.measure(None, 56.5, 'Deep Learning of Invariant Features via Simulated Fixations in Video', url='http://ai.stanford.edu/~wzou/nips_ZouZhuNgYu12.pdf', papername='Deep Learning of Invariant Features via Simulated Fixations in Video', uncertainty=0.0, venue='NIPS 2012', notes='Trained also with video (unrelated to STL-10) obtained 61%')
# Handling 'SVHN' classification_datasets_results.html#5356484e
svhn.measure(None, 1.69, 'Tree+Max-Avg pooling', url='http://arxiv.org/abs/1509.08985', papername='Generalizing Pooling Functions in Convolutional Neural Networks: Mixed, Gated, and Tree', uncertainty=0.0, venue='AISTATS 2016', notes='Single model without data augmentation')
svhn.measure(None, 1.76, 'CMsC', url='http://arxiv.org/abs/1511.05635', papername='Competitive Multi-scale Convolution', uncertainty=0.0, venue='arXiv 2015', notes='')
svhn.measure(None, 1.77, 'RCNN-96', url='http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2B_004.pdf', papername='Recurrent Convolutional Neural Network for Object Recognition', uncertainty=0.0, venue='CVPR 2015', notes='Without data augmentation')
svhn.measure(None, 1.81, 'BNM NiN', url='http://arxiv.org/abs/1511.02583', papername='Batch-normalized Maxout Network in Network', uncertainty=0.0, venue='arXiv 2015', notes='(k=5 maxout pieces in each maxout unit).')
svhn.measure(None, 1.92, 'DSN', url='http://vcl.ucsd.edu/~sxie/2014/09/12/dsn-project/', papername='Deeply-Supervised Nets', uncertainty=0.0, venue='arXiv 2014', notes='')
svhn.measure(None, 1.92, 'MLR DNN', url='http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7258343', papername='Multi-Loss Regularized Deep Neural Network', uncertainty=0.0, venue='CSVT 2015', notes='Based on NiN architecture.')
svhn.measure(None, 1.94, 'Regularization of Neural Networks using DropConnect', url='http://cs.nyu.edu/~wanli/dropc/', papername='Regularization of Neural Networks using DropConnect', uncertainty=0.0, venue='ICML 2013', notes='')
svhn.measure(None, 1.97, 'MIM', url='http://arxiv.org/abs/1508.00330', papername='On the Importance of Normalisation Layers in Deep Learning with Piecewise Linear Activation Units', uncertainty=0.08, venue='arXiv 2015', notes='')
#svhn.measure(None, 2.0, 'Estimated human performance', url='http://ufldl.stanford.edu/housenumbers/nips2011_housenumbers.pdf', papername='Estimated human performance', uncertainty=0.0, venue='NIPS 2011', notes='Based on the paper that introduced the dataset Reading Digits in Natural Images with Unsupervised Feature Learning, section 5.')
#Skipping apparent human performance (target_source) paper http://ufldl.stanford.edu/housenumbers/nips2011_housenumbers.pdf
svhn.measure(None, 2.15, 'BinaryConnect', url='http://papers.nips.cc/paper/5647-binaryconnect-training-deep-neural-networks-with-binary-weights-during-propagations.pdf', papername='BinaryConnect: Training Deep Neural Networks with binary weights during propagations', uncertainty=0.0, venue='NIPS 2015', notes='')
svhn.measure(None, 2.16, 'DCNN', url='http://openreview.net/document/0c571b22-f4b6-4d58-87e4-99d7de42a893#0c571b22-f4b6-4d58-87e4-99d7de42a893', papername='Multi-digit Number Recognition from Street View Imagery using Deep Convolutional Neural Networks', uncertainty=0.0, venue='ICLR 2014', notes='For classification of individual digits with a single network, error rate is 2.16%. For classification of the entire digit sequence (first paper doing this): error rate of 3.97%.')
svhn.measure(None, 2.35, 'NiN', url='http://openreview.net/document/9b05a3bb-3a5e-49cb-91f7-0f482af65aea#9b05a3bb-3a5e-49cb-91f7-0f482af65aea', papername='Network in Network', uncertainty=0.0, venue='ICLR 2014', notes='NIN + Dropout The code for NIN available at https://github.com/mavenlin/cuda-convnet')
svhn.measure(None, 2.38, 'ReNet', url='http://arxiv.org/abs/1505.00393', papername='ReNet: A Recurrent Neural Network Based Alternative to Convolutional Networks', uncertainty=0.0, venue='arXiv 2015', notes='')
svhn.measure(None, 2.47, 'Maxout', url='http://jmlr.org/proceedings/papers/v28/goodfellow13.pdf', papername='Maxout Networks', uncertainty=0.0, venue='ICML 2013', notes='This result was obtained using convolution but not any synthetic transformations of the training data.')
svhn.measure(None, 2.8, 'Stochastic Pooling', url='http://arxiv.org/pdf/1301.3557.pdf', papername='Stochastic Pooling for Regularization of Deep Convolutional Neural Networks', uncertainty=0.0, venue='arXiv 2013', notes='64-64-128 Stochastic Pooling')
svhn.measure(None, 3.96, 'FLSCNN', url='http://arxiv.org/abs/1503.04596', papername='Enhanced Image Classification With a Fast-Learning Shallow Convolutional Neural Network', uncertainty=0.0, venue='arXiv 2015', notes='No data augmentation')
svhn.measure(None, 4.9, u'Convolutional neural networks applied to house numbers digit classification', url='http://yann.lecun.com/exdb/publis/pdf/sermanet-icpr-12.pdf', papername='Convolutional neural networks applied to house numbers digit classification', uncertainty=0.0, venue='ICPR 2012', notes='ConvNet / MS / L4 / Padded')
# Handling 'CIFAR-100' classification_datasets_results.html#43494641522d313030
cifar100.measure(None, 75.72, 'Exponential Linear Units', url='http://arxiv.org/abs/1511.07289', papername='Fast and Accurate Deep Network Learning by Exponential Linear Units', uncertainty=0.0, venue='arXiv 2015', notes='Without data augmentation.')
cifar100.measure(None, 75.7, 'SSCNN', url='http://arxiv.org/abs/1409.6070', papername='Spatially-sparse convolutional neural networks', uncertainty=0.0, venue='arXiv 2014', notes='')
cifar100.measure(None, 73.61, 'Fractional MP', url='http://arxiv.org/abs/1412.6071', papername='Fractional Max-Pooling', uncertainty=0.0, venue='arXiv 2015', notes='Uses 12 passes at test time. Reaches 68.55% when using a single pass at test time. Uses data augmentation during training.')
cifar100.measure(None, 72.6, 'Tuned CNN', url='http://arxiv.org/abs/1502.05700', papername='Scalable Bayesian Optimization Using Deep Neural Networks', uncertainty=0.0, venue='ICML 2015', notes='')
cifar100.measure(None, 72.44, 'CMsC', url='http://arxiv.org/abs/1511.05635', papername='Competitive Multi-scale Convolution', uncertainty=0.0, venue='arXiv 2015', notes='')
cifar100.measure(None, 72.34, 'Fitnet4-LSUV', url='http://arxiv.org/abs/1511.06422', papername='All you need is a good init', uncertainty=0.0, venue='ICLR 2015', notes='Using RMSProp optimizer')
cifar100.measure(None, 71.14, 'BNM NiN', url='http://arxiv.org/abs/1511.02583', papername='Batch-normalized Maxout Network in Network', uncertainty=0.0, venue='arXiv 2015', notes='(k=5 maxout pieces in each maxout unit).')
cifar100.measure(None, 70.8, 'MIM', url='http://arxiv.org/abs/1508.00330', papername='On the Importance of Normalisation Layers in Deep Learning with Piecewise Linear Activation Units', uncertainty=0.2, venue='arXiv 2015', notes='')
cifar100.measure(None, 69.17, 'NiN+APL', url='http://arxiv.org/abs/1412.6830', papername='Learning Activation Functions to Improve Deep Neural Networks', uncertainty=0.0, venue='ICLR 2015', notes='Uses a piecewise linear activation function. 69.17% accuracy with data augmentation and 65.6% accuracy without data augmentation.')
cifar100.measure(None, 69.12, 'SWWAE', url='http://arxiv.org/abs/1506.02351', papername='Stacked What-Where Auto-encoders', uncertainty=0.0, venue='arXiv 2015', notes='')
cifar100.measure(None, 68.53, 'MLR DNN', url='http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7258343', papername='Multi-Loss Regularized Deep Neural Network', uncertainty=0.0, venue='CSVT 2015', notes='With data augmentation, 65.82% without. Based on NiN architecture.')
cifar100.measure(None, 68.4, 'Spectral Representations for Convolutional Neural Networks', url='http://papers.nips.cc/paper/5649-spectral-representations-for-convolutional-neural-networks.pdf', papername='Spectral Representations for Convolutional Neural Networks', uncertainty=0.0, venue='NIPS 2015', notes='')
cifar100.measure(None, 68.25, 'RCNN-96', url='http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2B_004.pdf', papername='Recurrent Convolutional Neural Network for Object Recognition', uncertainty=0.0, venue='CVPR 2015', notes='')
cifar100.measure(None, 67.76, 'VDN', url='http://people.idsia.ch/~rupesh/very_deep_learning/', papername='Training Very Deep Networks', uncertainty=0.0, venue='NIPS 2015', notes='Best result selected on test set. 67.61% average over multiple trained models.')
cifar100.measure(None, 67.68, 'DCNN+GFE', url='http://www.isip.uni-luebeck.de/fileadmin/uploads/tx_wapublications/hertel_ijcnn_2015.pdf', papername='Deep Convolutional Neural Networks as Generic Feature Extractors', uncertainty=0.0, venue='IJCNN 2015', notes='feature extraction part of convnet is trained on imagenet (external training data), classification part is trained on cifar-100')
cifar100.measure(None, 67.63, 'Tree+Max-Avg pooling', url='http://arxiv.org/abs/1509.08985', papername='Generalizing Pooling Functions in Convolutional Neural Networks: Mixed, Gated, and Tree', uncertainty=0.0, venue='AISTATS 2016', notes='Single model without data augmentation')
cifar100.measure(None, 67.38, 'HD-CNN', url='https://sites.google.com/site/homepagezhichengyan/home/hdcnn', papername='HD-CNN: Hierarchical Deep Convolutional Neural Network for Large Scale Visual Recognition', uncertainty=0.0, venue='ICCV 2015', notes='')
cifar100.measure(None, 67.16, 'Universum Prescription', url='http://arxiv.org/abs/1511.03719', papername='Universum Prescription: Regularization using Unlabeled Data', uncertainty=0.0, venue='arXiv 2015', notes='')
cifar100.measure(None, 66.29, 'ACN', url='http://arxiv.org/pdf/1412.6806.pdf', papername='Striving for Simplicity: The All Convolutional Net', uncertainty=0.0, venue='ICLR 2014', notes='')
cifar100.measure(None, 66.22, 'Deep Networks with Internal Selective Attention through Feedback Connections', url='http://papers.nips.cc/paper/5276-deep-networks-with-internal-selective-attention-through-feedback-connections.pdf', papername='Deep Networks with Internal Selective Attention through Feedback Connections', uncertainty=0.0, venue='NIPS 2014', notes='')
cifar100.measure(None, 65.43, 'DSN', url='http://vcl.ucsd.edu/~sxie/2014/09/12/dsn-project/', papername='Deeply-Supervised Nets', uncertainty=0.0, venue='arXiv 2014', notes='Single model, without data augmentation.')
cifar100.measure(None, 64.77, 'Deep Representation Learning with Target Coding', url='http://personal.ie.cuhk.edu.hk/~ccloy/files/aaai_2015_target_coding.pdf', papername='Deep Representation Learning with Target Coding', uncertainty=0.0, venue='AAAI 2015', notes='')
cifar100.measure(None, 64.32, 'NiN', url='http://openreview.net/document/9b05a3bb-3a5e-49cb-91f7-0f482af65aea#9b05a3bb-3a5e-49cb-91f7-0f482af65aea', papername='Network in Network', uncertainty=0.0, venue='ICLR 2014', notes='NIN + Dropout The code for NIN available at https://github.com/mavenlin/cuda-convnet')
cifar100.measure(None, 63.15, 'Tree Priors', url='http://www.cs.toronto.edu/~nitish/treebasedpriors.pdf', papername='Discriminative Transfer Learning with Tree-based Priors', uncertainty=0.0, venue='NIPS 2013', notes=u'The baseline Convnet + max pooling + dropout reaches 62.80% (without any tree prior).')
cifar100.measure(None, 61.86, 'DNN+Probabilistic Maxout', url='http://openreview.net/document/28d9c3ab-fe88-4836-b898-403d207a037c#28d9c3ab-fe88-4836-b898-403d207a037c', papername='Improving Deep Neural Networks with Probabilistic Maxout Units', uncertainty=0.0, venue='ICLR 2014', notes='')
cifar100.measure(None, 61.43, 'Maxout Networks', url='http://jmlr.org/proceedings/papers/v28/goodfellow13.pdf', papername='Maxout Networks', uncertainty=0.0, venue='ICML 2013', notes='Uses convolution. Does not use dataset agumentation.')
cifar100.measure(None, 60.8, 'Stable and Efficient Representation Learning with Nonnegativity Constraints ', url='http://jmlr.org/proceedings/papers/v32/line14.pdf', papername='Stable and Efficient Representation Learning with Nonnegativity Constraints ', uncertainty=0.0, venue='ICML 2014', notes='3-layers + multi-dict. 7 with 3-layers only. 3 with 1-layers only.')
cifar100.measure(None, 59.75, 'RReLU', url='http://arxiv.org/pdf/1505.00853.pdf', papername='Empirical Evaluation of Rectified Activations in Convolution Network', uncertainty=0.0, venue='ICML workshop 2015', notes='Using Randomized Leaky ReLU')
cifar100.measure(None, 57.49, 'Stochastic Pooling', url='http://arxiv.org/pdf/1301.3557.pdf', papername='Stochastic Pooling for Regularization of Deep Convolutional Neural Networks', uncertainty=0.0, venue='arXiv 2013', notes='')
cifar100.measure(None, 56.29, 'Smooth Pooling Regions', url='http://www.d2.mpi-inf.mpg.de/content/learning-smooth-pooling-regions-visual-recognition', papername='Smooth Pooling Regions', uncertainty=0.0, venue='BMVC 2013', notes='No data augmentation.')
cifar100.measure(None, 54.23, 'Receptive Field Learning', url='http://www.eecs.berkeley.edu/~jiayq/assets/pdf/cvpr12_pooling.pdf', papername='Beyond Spatial Pyramids: Receptive Field Learning for Pooled Image Features', uncertainty=0.0, venue='CVPR 2012', notes='')
# Handling 'CIFAR-10' classification_datasets_results.html#43494641522d3130
cifar10.measure(None, 96.53, 'Fractional MP', url='http://arxiv.org/abs/1412.6071', papername='Fractional Max-Pooling', uncertainty=0.0, venue='arXiv 2015', notes='Uses 100 passes at test time. Reaches 95.5% when using a single pass at test time, and 96.33% when using 12 passes.. Uses data augmentation during training.')
cifar10.measure(None, 95.59, 'ACN', url='http://arxiv.org/pdf/1412.6806.pdf', papername='Striving for Simplicity: The All Convolutional Net', uncertainty=0.0, venue='ICLR 2015', notes='92% without data augmentation, 92.75% with small data augmentation, 95.59% when using agressive data augmentation and larger network.')
cifar10.measure(None, 94.16, 'Fitnet4-LSUV', url='http://arxiv.org/abs/1511.06422', papername='All you need is a good init', uncertainty=0.0, venue='ICLR 2016', notes='Only mirroring and random shifts, no extreme data augmentation. Uses thin deep residual net with maxout activations.')
#cifar10.measure(None, 94.0, 'Lessons learned from manually classifying CIFAR-10', url='http://karpathy.github.io/2011/04/27/manually-classifying-cifar10/', papername='Lessons learned from manually classifying CIFAR-10', uncertainty=0.0, venue='unpublished 2011', notes='Rough estimate from a single individual, over 400 training images (~1% of training data).')
#Skipping apparent human performance (target_source) paper http://karpathy.github.io/2011/04/27/manually-classifying-cifar10/
cifar10.measure(None, 93.95, 'Tree+Max-Avg pooling', url='http://arxiv.org/abs/1509.08985', papername='Generalizing Pooling Functions in Convolutional Neural Networks: Mixed, Gated, and Tree', uncertainty=0.0, venue='AISTATS 2016', notes='Single model with data augmentation, 92.38% without.')
cifar10.measure(None, 93.72, 'SSCNN', url='http://arxiv.org/abs/1409.6070', papername='Spatially-sparse convolutional neural networks', uncertainty=0.0, venue='arXiv 2014', notes='')
cifar10.measure(None, 93.63, 'Tuned CNN', url='http://arxiv.org/abs/1502.05700', papername='Scalable Bayesian Optimization Using Deep Neural Networks', uncertainty=0.0, venue='ICML 2015', notes='')
cifar10.measure(None, 93.57, 'DRL', url='http://arxiv.org/abs/1512.03385', papername='Deep Residual Learning for Image Recognition', uncertainty=0.0, venue='arXiv 2015', notes='Best performance reached with 110 layers. Using 1202 layers leads to 92.07%, 56 layers lead to 93.03%.')
cifar10.measure(None, 93.45, 'Exponential Linear Units', url='http://arxiv.org/abs/1511.07289', papername='Fast and Accurate Deep Network Learning by Exponential Linear Units', uncertainty=0.0, venue='arXiv 2015', notes='Without data augmentation.')
cifar10.measure(None, 93.34, 'Universum Prescription', url='http://arxiv.org/abs/1511.03719', papername='Universum Prescription: Regularization using Unlabeled Data', uncertainty=0.0, venue='arXiv 2015', notes='')
cifar10.measure(None, 93.25, 'BNM NiN', url='http://arxiv.org/abs/1511.02583', papername='Batch-normalized Maxout Network in Network', uncertainty=0.0, venue='arXiv 2015', notes='(k=5 maxout pieces in each maxout unit). Reaches 92.15% without data augmentation.')
cifar10.measure(None, 93.13, 'CMsC', url='http://arxiv.org/abs/1511.05635', papername='Competitive Multi-scale Convolution', uncertainty=0.0, venue='arXiv 2015', notes='')
cifar10.measure(None, 92.91, 'RCNN-96', url='http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2B_004.pdf', papername='Recurrent Convolutional Neural Network for Object Recognition', uncertainty=0.0, venue='CVPR 2015', notes='Reaches 91.31% without data augmentation.')
cifar10.measure(None, 92.49, 'NiN+APL', url='http://arxiv.org/abs/1412.6830', papername='Learning Activation Functions to Improve Deep Neural Networks', uncertainty=0.0, venue='ICLR 2015', notes='Uses an adaptive piecewise linear activation function. 92.49% accuracy with data augmentation and 90.41% accuracy without data augmentation.')
cifar10.measure(None, 92.45, 'cifar.torch', url='http://torch.ch/blog/2015/07/30/cifar.html', papername='cifar.torch', uncertainty=0.0, venue='unpublished 2015', notes='Code available at https://github.com/szagoruyko/cifar.torch')
cifar10.measure(None, 92.4, 'VDN', url='http://people.idsia.ch/~rupesh/very_deep_learning/', papername='Training Very Deep Networks', uncertainty=0.0, venue='NIPS 2015', notes='Best result selected on test set. 92.31% average over multiple trained models.')
cifar10.measure(None, 92.23, 'SWWAE', url='http://arxiv.org/abs/1506.02351', papername='Stacked What-Where Auto-encoders', uncertainty=0.0, venue='arXiv 2015', notes='')
cifar10.measure(None, 91.88, 'MLR DNN', url='http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7258343', papername='Multi-Loss Regularized Deep Neural Network', uncertainty=0.0, venue='CSVT 2015', notes='With data augmentation, 90.45% without. Based on NiN architecture.')
cifar10.measure(None, 91.78, 'DSN', url='http://vcl.ucsd.edu/~sxie/2014/09/12/dsn-project/', papername='Deeply-Supervised Nets', uncertainty=0.0, venue='arXiv 2014', notes='Single model, with data augmentation: 91.78%. Without data augmentation: 90.22%.')
cifar10.measure(None, 91.73, 'BinaryConnect', url='http://papers.nips.cc/paper/5647-binaryconnect-training-deep-neural-networks-with-binary-weights-during-propagations.pdf', papername='BinaryConnect: Training Deep Neural Networks with binary weights during propagations', uncertainty=0.0, venue='NIPS 2015', notes='These results were obtained without using any data-augmentation.')
cifar10.measure(None, 91.48, 'MIM', url='http://arxiv.org/abs/1508.00330', papername='On the Importance of Normalisation Layers in Deep Learning with Piecewise Linear Activation Units', uncertainty=0.2, venue='arXiv 2015', notes='')
cifar10.measure(None, 91.4, 'Spectral Representations for Convolutional Neural Networks', url='http://papers.nips.cc/paper/5649-spectral-representations-for-convolutional-neural-networks.pdf', papername='Spectral Representations for Convolutional Neural Networks', uncertainty=0.0, venue='NIPS 2015', notes='')
cifar10.measure(None, 91.2, 'NiN', url='http://openreview.net/document/9b05a3bb-3a5e-49cb-91f7-0f482af65aea#9b05a3bb-3a5e-49cb-91f7-0f482af65aea', papername='Network In Network', uncertainty=0.0, venue='ICLR 2014', notes='The code for NIN available at https://github.com/mavenlin/cuda-convnet NIN + Dropout 89.6% NIN + Dropout + Data Augmentation 91.2%')
cifar10.measure(None, 91.19, 'ELC', url='http://aad.informatik.uni-freiburg.de/papers/15-IJCAI-Extrapolation_of_Learning_Curves.pdf', papername='Speeding up Automatic Hyperparameter Optimization of Deep Neural Networks by Extrapolation of Learning Curves', uncertainty=0.0, venue='IJCAI 2015', notes=u'Based on the "call convolutional" architecture. which reaches 90.92% by itself.')
cifar10.measure(None, 90.78, 'Deep Networks with Internal Selective Attention through Feedback Connections', url='http://papers.nips.cc/paper/5276-deep-networks-with-internal-selective-attention-through-feedback-connections.pdf', papername='Deep Networks with Internal Selective Attention through Feedback Connections', uncertainty=0.0, venue='NIPS 2014', notes='No data augmentation')
cifar10.measure(None, 90.68, 'DropConnect', url='http://cs.nyu.edu/~wanli/dropc/', papername='Regularization of Neural Networks using DropConnect', uncertainty=0.0, venue='ICML 2013', notes='')
cifar10.measure(None, 90.65, 'Maxout Networks', url='http://jmlr.org/proceedings/papers/v28/goodfellow13.pdf', papername='Maxout Networks', uncertainty=0.0, venue='ICML 2013', notes='This result was obtained using both convolution and synthetic translations / horizontal reflections of the training data. Reaches 88.32% when using convolution, but without any synthetic transformations of the training data.')
cifar10.measure(None, 90.61, 'DNN+Probabilistic Maxout', url='http://openreview.net/document/28d9c3ab-fe88-4836-b898-403d207a037c#28d9c3ab-fe88-4836-b898-403d207a037c', papername='Improving Deep Neural Networks with Probabilistic Maxout Units', uncertainty=0.0, venue='ICLR 2014', notes='65% without data augmentation. 61% when using data augmentation.')
cifar10.measure(None, 90.5, 'GP EI', url='http://papers.nips.cc/paper/4522-practical-bayesian-optimization-of-machine-learning-algorithms.pdf', papername='Practical Bayesian Optimization of Machine Learning Algorithms ', uncertainty=0.0, venue='NIPS 2012', notes=u'Reaches 85.02% without data augmentation. With data augmented with horizontal reflections and translations, 90.5% accuracy on test set is achieved.')
cifar10.measure(None, 89.67, 'APAC', url='http://arxiv.org/abs/1505.03229', papername='APAC: Augmented PAttern Classification with Neural Networks', uncertainty=0.0, venue='arXiv 2015', notes='')
cifar10.measure(None, 89.14, 'DCNN+GFE', url='http://www.isip.uni-luebeck.de/fileadmin/uploads/tx_wapublications/hertel_ijcnn_2015.pdf', papername='Deep Convolutional Neural Networks as Generic Feature Extractors', uncertainty=0.0, venue='IJCNN 2015', notes='feature extraction part of convnet is trained on imagenet (external training data), classification part is trained on cifar-10')
cifar10.measure(None, 89.0, 'DCNN', url='http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks', papername='ImageNet Classification with Deep Convolutional Neural Networks', uncertainty=0.0, venue='NIPS 2012', notes='87% error on the unaugmented data.')
cifar10.measure(None, 88.8, 'RReLU', url='http://arxiv.org/pdf/1505.00853.pdf', papername='Empirical Evaluation of Rectified Activations in Convolution Network', uncertainty=0.0, venue='ICML workshop 2015', notes='Using Randomized Leaky ReLU')
cifar10.measure(None, 88.79, 'MCDNN', url='http://www.idsia.ch/~ciresan/data/cvpr2012.pdf', papername='Multi-Column Deep Neural Networks for Image Classification ', uncertainty=0.0, venue='CVPR 2012', notes='Supplemental material, Technical Report')
cifar10.measure(None, 87.65, 'ReNet', url='http://arxiv.org/abs/1505.00393', papername='ReNet: A Recurrent Neural Network Based Alternative to Convolutional Networks', uncertainty=0.0, venue='arXiv 2015', notes='')
cifar10.measure(None, 86.7, 'An Analysis of Unsupervised Pre-training in Light of Recent Advances', url='http://arxiv.org/abs/1412.6597', papername='An Analysis of Unsupervised Pre-training in Light of Recent Advances', uncertainty=0.0, venue='ICLR 2015', notes='Unsupervised pre-training, with supervised fine-tuning. Uses dropout and data-augmentation.')
cifar10.measure(None, 84.87, 'Stochastic Pooling', url='http://arxiv.org/pdf/1301.3557.pdf', papername='Stochastic Pooling for Regularization of Deep Convolutional Neural Networks', uncertainty=0.0, venue='arXiv 2013', notes='')
cifar10.measure(None, 84.4, 'Improving neural networks by preventing co-adaptation of feature detectors', url='http://arxiv.org/pdf/1207.0580.pdf', papername='Improving neural networks by preventing co-adaptation of feature detectors', uncertainty=0.0, venue='arXiv 2012', notes=u'So called "dropout" method.')
cifar10.measure(None, 83.96, 'Discriminative Learning of Sum-Product Networks', url='http://papers.nips.cc/paper/4516-discriminative-learning-of-sum-product-networks', papername='Discriminative Learning of Sum-Product Networks', uncertainty=0.0, venue='NIPS 2012', notes='')
cifar10.measure(None, 82.9, 'Nonnegativity Constraints ', url='http://jmlr.org/proceedings/papers/v32/line14.pdf', papername='Stable and Efficient Representation Learning with Nonnegativity Constraints ', uncertainty=0.0, venue='ICML 2014', notes='Full data, 3-layers + multi-dict. 4 with 3-layers only. 0 with 1-layers only.')
cifar10.measure(None, 82.2, 'Local Transformations', url='http://icml.cc/2012/papers/659.pdf', papername='Learning Invariant Representations with Local Transformations', uncertainty=0.0, venue='ICML 2012', notes='K= 4,000')
cifar10.measure(None, 82.18, 'CKN', url='http://arxiv.org/abs/1406.3332', papername='Convolutional Kernel Networks', uncertainty=0.0, venue='arXiv 2014', notes='No data augmentation.')
cifar10.measure(None, 82.0, 'Discriminative Unsupervised Feature Learning with Convolutional Neural Networks', url='http://papers.nips.cc/paper/5548-discriminative-unsupervised-feature-learning-with-convolutional-neural-networks.pdf', papername='Discriminative Unsupervised Feature Learning with Convolutional Neural Networks', uncertainty=0.0, venue='NIPS 2014', notes='Unsupervised feature learning + linear SVM')
cifar10.measure(None, 80.02, 'Smooth Pooling Regions', url='http://www.d2.mpi-inf.mpg.de/content/learning-smooth-pooling-regions-visual-recognition', papername='Learning Smooth Pooling Regions for Visual Recognition', uncertainty=0.0, venue='BMVC 2013', notes='')
cifar10.measure(None, 80.0, 'Hierarchical Kernel Descriptors', url='http://research.cs.washington.edu/istc/lfb/paper/cvpr11.pdf', papername='Object Recognition with Hierarchical Kernel Descriptors', uncertainty=0.0, venue='CVPR 2011', notes='')
cifar10.measure(None, 79.7, 'Learning with Recursive Perceptual Representations', url='http://papers.nips.cc/paper/4747-learning-with-recursive-perceptual-representations', papername='Learning with Recursive Perceptual Representations', uncertainty=0.0, venue='NIPS 2012', notes='Code size 1600.')
cifar10.measure(None, 79.6, 'An Analysis of Single-Layer Networks in Unsupervised Feature Learning ', url='http://www.stanford.edu/~acoates/papers/coatesleeng_aistats_2011.pdf', papername='An Analysis of Single-Layer Networks in Unsupervised Feature Learning ', uncertainty=0.0, venue='AISTATS 2011', notes='6% obtained using K-means over whitened patches, with triangle encoding and 4000 features (clusters).')
cifar10.measure(None, 78.67, 'PCANet', url='http://arxiv.org/abs/1404.3606', papername='PCANet: A Simple Deep Learning Baseline for Image Classification?', uncertainty=0.0, venue='arXiv 2014', notes='No data augmentation. Multiple feature scales combined. 77.14% when using only a single scale.')
cifar10.measure(None, 75.86, 'FLSCNN', url='http://arxiv.org/abs/1503.04596', papername='Enhanced Image Classification With a Fast-Learning Shallow Convolutional Neural Network', uncertainty=0.0, venue='arXiv 2015', notes='No data augmentation')
# Handling 'MNIST' classification_datasets_results.html#4d4e495354
mnist.measure(None, 0.21, 'DropConnect', url='http://cs.nyu.edu/~wanli/dropc/', papername='Regularization of Neural Networks using DropConnect', uncertainty=0.0, venue='ICML 2013', notes='')
mnist.measure(None, 0.23, 'MCDNN', url='http://www.idsia.ch/~ciresan/data/cvpr2012.pdf', papername=u'Multi-column Deep Neural Networks for Image Classification ', uncertainty=0.0, venue='CVPR 2012', notes='')
mnist.measure(None, 0.23, 'APAC', url='http://arxiv.org/abs/1505.03229', papername='APAC: Augmented PAttern Classification with Neural Networks', uncertainty=0.0, venue='arXiv 2015', notes='')
mnist.measure(None, 0.24, 'BNM NiN', url='http://arxiv.org/abs/1511.02583', papername='Batch-normalized Maxout Network in Network', uncertainty=0.0, venue='arXiv 2015', notes='(k=5 maxout pieces in each maxout unit).')
mnist.measure(None, 0.29, 'Tree+Max-Avg pooling', url='http://arxiv.org/abs/1509.08985', papername='Generalizing Pooling Functions in Convolutional Neural Networks: Mixed, Gated, and Tree', uncertainty=0.0, venue='AISTATS 2016', notes='Single model without data augmentation')
mnist.measure(None, 0.31, 'RCNN-96', url='http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/2B_004.pdf', papername='Recurrent Convolutional Neural Network for Object Recognition', uncertainty=0.0, venue='CVPR 2015', notes='')
mnist.measure(None, 0.35, 'MIM', url='http://arxiv.org/abs/1508.00330', papername='On the Importance of Normalisation Layers in Deep Learning with Piecewise Linear Activation Units', uncertainty=0.03, venue='arXiv 2015', notes='')
mnist.measure(None, 0.32, 'Fractional MP', url='http://arxiv.org/abs/1412.6071', papername='Fractional Max-Pooling', uncertainty=0.0, venue='arXiv 2015', notes='Uses 12 passes at test time. Reaches 0.5% when using a single pass at test time.')
mnist.measure(None, 0.33, 'CMsC', url='http://arxiv.org/abs/1511.05635', papername='Competitive Multi-scale Convolution', uncertainty=0.0, venue='arXiv 2015', notes='')
mnist.measure(None, 0.35, 'DBSNN', url='http://arxiv.org/pdf/1003.0358.pdf', papername='Deep Big Simple Neural Nets Excel on Handwritten Digit Recognition', uncertainty=0.0, venue='Neural Computation 2010', notes='6-layer NN 784-2500-2000-1500-1000-500-10 (on GPU), uses elastic distortions')
mnist.measure(None, 0.35, 'C-SVDDNet', url='http://arxiv.org/abs/1412.7259', papername='C-SVDDNet: An Effective Single-Layer Network for Unsupervised Feature Learning', uncertainty=0.0, venue='arXiv 2014', notes='')
mnist.measure(None, 0.37, 'FLSCNN', url='http://arxiv.org/abs/1503.04596', papername='Enhanced Image Classification With a Fast-Learning Shallow Convolutional Neural Network', uncertainty=0.0, venue='arXiv 2015', notes='No data augmentation')
mnist.measure(None, 0.39, 'Energy-Based Sparse Represenation', url='http://papers.nips.cc/paper/3112-efficient-learning-of-sparse-representations-with-an-energy-based-model', papername=u'Efficient Learning of Sparse Representations with an Energy-Based Model', uncertainty=0.0, venue='NIPS 2006', notes='Large conv. net, unsup pretraining, uses elastic distortions')
mnist.measure(None, 0.39, 'CKN', url='http://arxiv.org/abs/1406.3332', papername='Convolutional Kernel Networks', uncertainty=0.0, venue='arXiv 2014', notes='No data augmentation.')
mnist.measure(None, 0.39, 'DSN', url='http://vcl.ucsd.edu/~sxie/2014/09/12/dsn-project/', papername='Deeply-Supervised Nets', uncertainty=0.0, venue='arXiv 2014', notes='')
mnist.measure(None, 0.4, 'Best Practices for Convolutional Neural Networks Applied to Visual Document Analysis', url='http://citeseerx.ist.psu.edu/viewdoc/download;jsessionid=D1C7D701BD39935473808DA5A93426C5?doi=10.1.1.160.8494&rep=rep1&type=pdf', papername='Best Practices for Convolutional Neural Networks Applied to Visual Document Analysis', uncertainty=0.0, venue='Document Analysis and Recognition 2003', notes='')
mnist.measure(None, 0.4, 'HOPE', url='http://arxiv.org/pdf/1502.00702.pdf', papername='Hybrid Orthogonal Projection and Estimation (HOPE): A New Framework to Probe and Learn Neural Networks', uncertainty=0.0, venue=' arXiv 2015', notes='')
mnist.measure(None, 0.42, 'MLR DNN', url='http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7258343', papername='Multi-Loss Regularized Deep Neural Network', uncertainty=0.0, venue='CSVT 2015', notes='Based on NiN architecture.')
mnist.measure(None, 0.45, 'Maxout Networks', url='http://jmlr.org/proceedings/papers/v28/goodfellow13.pdf', papername='Maxout Networks', uncertainty=0.0, venue='ICML 2013', notes='Uses convolution. Does not use dataset augmentation.')
mnist.measure(None, 0.45, 'VDN', url='http://people.idsia.ch/~rupesh/very_deep_learning/', papername='Training Very Deep Networks', uncertainty=0.0, venue='NIPS 2015', notes='Best result selected on test set. 0.46% average over multiple trained models.')
mnist.measure(None, 0.45, 'ReNet', url='http://arxiv.org/abs/1505.00393', papername='ReNet: A Recurrent Neural Network Based Alternative to Convolutional Networks', uncertainty=0.0, venue='arXiv 2015', notes='')
mnist.measure(None, 0.46, 'DCNN+GFE', url='http://www.isip.uni-luebeck.de/fileadmin/uploads/tx_wapublications/hertel_ijcnn_2015.pdf', papername='Deep Convolutional Neural Networks as Generic Feature Extractors', uncertainty=0.0, venue='IJCNN 2015', notes='feature extraction part of convnet is trained on imagenet (external training data), classification part is trained on cifar-10')
mnist.measure(None, 0.47, 'NiN', url='http://openreview.net/document/9b05a3bb-3a5e-49cb-91f7-0f482af65aea#9b05a3bb-3a5e-49cb-91f7-0f482af65aea', papername='Network in Network', uncertainty=0.0, venue='ICLR 2014', notes='NIN + Dropout The code for NIN available at https://github.com/mavenlin/cuda-convnet')
# skip broken URL
#mnist.measure(None, 0.52, 'COSFIRE', url='http://iwi.eldoc.ub.rug.nl/FILES/root/2013/IEEETPAMIAzzopardi/2013IEEETPAMIAzzopardi.pdf', papername='Trainable COSFIRE filters for keypoint detection and pattern recognition', uncertainty=0.0, venue='PAMI 2013', notes='Source code available.')
mnist.measure(None, 0.53, 'The Best Multi-Stage Architecture', url='http://yann.lecun.com/exdb/publis/pdf/jarrett-iccv-09.pdf', papername='What is the Best Multi-Stage Architecture for Object Recognition?', uncertainty=0.0, venue='ICCV 2009', notes='Large conv. net, unsup pretraining, no distortions')
mnist.measure(None, 0.54, 'Deformation Models', url='http://www.keysers.net/daniel/files/Keysers--Deformation-Models--TPAMI2007.pdf', papername='Deformation Models for Image Recognition', uncertainty=0.0, venue='PAMI 2007', notes='K-NN with non-linear deformation (IDM) (Preprocessing: shiftable edges)')
mnist.measure(None, 0.54, 'Trainable feature extractor', url='http://hal.inria.fr/docs/00/05/75/61/PDF/LauerSuenBlochPR.pdf', papername='A trainable feature extractor for handwritten digit recognition', uncertainty=0.0, venue='\n Journal\n Pattern Recognition 2007\n ', notes='Trainable feature extractor + SVMs, uses affine distortions')
mnist.measure(None, 0.56, 'ISVM', url='http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.88.9924&rep=rep1&type=pdf', papername='Training Invariant Support Vector Machines', uncertainty=0.0, venue='Machine Learning 2002', notes='Virtual SVM, deg-9 poly, 2-pixel jittered (Preprocessing: deskewing)')
mnist.measure(None, 0.59, 'Sparse Coding', url='http://www.inb.uni-luebeck.de/publikationen/pdfs/LaBaMa08c.pdf', papername='Simple Methods for High-Performance Digit Recognition Based on Sparse Coding', uncertainty=0.0, venue='TNN 2008', notes='Unsupervised sparse features + SVM, no distortions')
mnist.measure(None, 0.62, 'invariant feature hierarchies', url='http://yann.lecun.com/exdb/publis/pdf/ranzato-cvpr-07.pdf', papername='Unsupervised learning of invariant feature hierarchies with applications to object recognition', uncertainty=0.0, venue='CVPR 2007', notes='Large conv. net, unsup features, no distortions')
mnist.measure(None, 0.62, 'PCANet', url='http://arxiv.org/abs/1404.3606', papername='PCANet: A Simple Deep Learning Baseline for Image Classification?', uncertainty=0.0, venue='arXiv 2014', notes='No data augmentation.')
mnist.measure(None, 0.63, 'Shape contexts', url='http://citeseerx.ist.psu.edu/viewdoc/download;jsessionid=B2AAC2BC3824F19757CAC66986D5F3FF?doi=10.1.1.18.8852&rep=rep1&type=pdf', papername='Shape matching and object recognition using shape contexts', uncertainty=0.0, venue='PAMI 2002', notes='K-NN, shape context matching (preprocessing: shape context feature extraction)')
mnist.measure(None, 0.64, 'Receptive Field Learning', url='http://www.icsi.berkeley.edu/pubs/vision/beyondspatial12.pdf', papername='Beyond Spatial Pyramids: Receptive Field Learning for Pooled Image Features', uncertainty=0.0, venue='CVPR 2012', notes='')
mnist.measure(None, 0.68, 'CNN+Gabor Filters', url='http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.133.6559&rep=rep1&type=pdf', papername='Handwritten Digit Recognition using Convolutional Neural Networks and Gabor Filters', uncertainty=0.0, venue='ICCI 2003', notes='')
mnist.measure(None, 0.69, 'On Optimization Methods for Deep Learning', url='http://ai.stanford.edu/~quocle/LeNgiCoaLahProNg11.pdf', papername='On Optimization Methods for Deep Learning', uncertainty=0.0, venue='ICML 2011', notes='')
mnist.measure(None, 0.71, 'Deep Fried Convnets', url='http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Yang_Deep_Fried_Convnets_ICCV_2015_paper.pdf', papername='Deep Fried Convnets', uncertainty=0.0, venue='ICCV 2015', notes='Uses about 10x fewer parameters than the reference model, which reaches 0.87%.')
mnist.measure(None, 0.75, 'Sparse Activity and Sparse Connectivity in Supervised Learning', url='http://jmlr.org/papers/v14/thom13a.html', papername='Sparse Activity and Sparse Connectivity in Supervised Learning', uncertainty=0.0, venue='JMLR 2013', notes='')
mnist.measure(None, 0.78, 'Explaining and Harnessing Adversarial Examples', url='http://arxiv.org/abs/1412.6572', papername='Explaining and Harnessing Adversarial Examples', uncertainty=0.0, venue='ICLR 2015', notes='permutation invariant network used')
mnist.measure(None, 0.82, 'CDBN', url=None, papername='Convolutional Deep Belief Networks for Scalable Unsupervised Learning of Hierarchical Representations', uncertainty=0.0, venue='ICML 2009', notes='')
mnist.measure(None, 0.84, 'Supervised Translation-Invariant Sparse Coding', url='http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.206.339&rep=rep1&type=pdf', papername='Supervised Translation-Invariant Sparse Coding', uncertainty=0.0, venue='CVPR 2010', notes='Uses sparse coding + svm.')
mnist.measure(None, 0.94, 'Large-Margin kNN', url=None, papername='Large-Margin kNN Classification using a Deep Encoder Network', uncertainty=0.0, venue=' 2009', notes='')
mnist.measure(None, 0.95, 'Deep Boltzmann Machines', url='http://www.utstat.toronto.edu/~rsalakhu/papers/dbm.pdf', papername='Deep Boltzmann Machines', uncertainty=0.0, venue='AISTATS 2009', notes='')
mnist.measure(None, 1.01, 'BinaryConnect', url='http://papers.nips.cc/paper/5647-binaryconnect-training-deep-neural-networks-with-binary-weights-during-propagations.pdf', papername='BinaryConnect: Training Deep Neural Networks with binary weights during propagations', uncertainty=0.0, venue='NIPS 2015', notes='Using 50% dropout')
mnist.measure(None, 1.1, 'StrongNet', url='http://www.alglib.net/articles/tr-20140813-strongnet.pdf', papername='StrongNet: mostly unsupervised image recognition with strong neurons', uncertainty=0.0, venue='technical report on ALGLIB website 2014', notes=u'StrongNet is a neural design which uses two innovations: (a) strong neurons - highly nonlinear neurons with multiple outputs and (b) mostly unsupervised architecture backpropagation-free design with all layers except for the last one being trained in a completely unsupervised setting.')
mnist.measure(None, 1.12, 'DBN', url=None, papername='CS81: Learning words with Deep Belief Networks', uncertainty=0.0, venue=' 2008', notes='')
mnist.measure(None, 1.19, 'CNN', url=None, papername='Convolutional Neural Networks', uncertainty=0.0, venue=' 2003', notes=u'The ConvNN is based on the paper "Best Practices for Convolutional Neural Networks Applied to Visual Document Analysis".')
mnist.measure(None, 1.2, 'Reducing the dimensionality of data with neural networks', url=None, papername='Reducing the dimensionality of data with neural networks', uncertainty=0.0, venue=' 2006', notes='')
mnist.measure(None, 1.4, 'Convolutional Clustering', url='http://arxiv.org/abs/1511.06241', papername='Convolutional Clustering for Unsupervised Learning', uncertainty=0.0, venue='arXiv 2015', notes='2 layers + multi dict.')
mnist.measure(None, 1.5, 'Deep learning via semi-supervised embedding', url=None, papername='Deep learning via semi-supervised embedding', uncertainty=0.0, venue=' 2008', notes='')
# This paper has reasonable results on other things, but one so bad on MNIST that it messes up the graph. So we exclude it:
# mnist.measure(None, 14.53, 'Deep Representation Learning with Target Coding', url='http://personal.ie.cuhk.edu.hk/~ccloy/files/aaai_2015_target_coding.pdf', papername='Deep Representation Learning with Target Coding', uncertainty=0.0, venue='AAAI 2015', notes='')
# Handling 'MSRC-21' semantic_labeling_datasets_results.html#4d5352432d3231
msrc21_pc.measure(None, 80.9, 'Large FC CRF', url='http://ai2-s2-pdfs.s3.amazonaws.com/daba/eb9185990f65f807c95ff4d09057c2bf1cf0.pdf', papername='Large-Scale Semantic Co-Labeling of Image Sets', uncertainty=0.0, venue='WACV 2014', notes='')
msrc21_pc.measure(None, 80.0, 'Harmony Potentials', url='http://link.springer.com/article/10.1007%2Fs11263-011-0449-8', papername='Harmony Potentials - Fusing Local and Global Scale for Semantic Image Segmentation', uncertainty=0.0, venue='IJCV 2012', notes='per-class % / per-pixel %')
msrc21_pc.measure(None, 79.0, 'Describing the Scene as a Whole: Joint Object Detection, Scene Classification and Semantic Segmentation', url='http://ttic.uchicago.edu/~rurtasun/publications/yao_et_al_cvpr12.pdf', papername='Describing the Scene as a Whole: Joint Object Detection, Scene Classification and Semantic Segmentation', uncertainty=0.0, venue='CVPR 2012', notes='')
msrc21_pc.measure(None, 78.2, 'MPP', url='http://mediatum.ub.tum.de/doc/1175516/1175516.pdf', papername='Morphological Proximity Priors: Spatial Relationships for Semantic Segmentation', uncertainty=0.0, venue='TUM-I1222 2013', notes='')
msrc21_pc.measure(None, 78.0, 'FC CRF', url='http://graphics.stanford.edu/projects/densecrf/densecrf.pdf', papername='Efficient Inference in Fully Connected CRFs with Gaussian Edge Potentials', uncertainty=0.0, venue='NIPS 2011', notes='Strong unary used provides 76.6% / 84.0%')
msrc21_pc.measure(None, 77.0, 'HCRF+CO', url='http://research.microsoft.com/en-us/um/people/pkohli/papers/lrkt_eccv2010.pdf', papername='Graph Cut based Inference with Co-occurrence Statistics', uncertainty=0.0, venue='ECCV 2010', notes='')
msrc21_pc.measure(None, 77.0, 'Are Spatial and Global Constraints Really Necessary for Segmentation?', url='http://infoscience.epfl.ch/record/169178/files/lucchi_ICCV11.pdf', papername='Are Spatial and Global Constraints Really Necessary for Segmentation?', uncertainty=0.0, venue='ICCV 2011', notes='Several variants are examined, no single method attains the overall best results, i.e. both best per-class and per-pixel averages simultaneously. Indicated result corresponds to the method that we best on the average (per-class + per-pixel / 2). Experiment data available.')
msrc21_pc.measure(None, 76.0, 'Kernelized SSVM/CRF', url='https://infoscience.epfl.ch/record/180188/files/top.pdf', papername='Structured Image Segmentation using Kernelized Features', uncertainty=0.0, venue='ECCV 2012', notes='70 % / 73 % when using only local features (not considering global features)')
msrc21_pc.measure(None, 72.8, 'PMG', url='http://users.cecs.anu.edu.au/~sgould/papers/eccv12-patchGraph.pdf', papername='PatchMatchGraph: Building a Graph of Dense Patch Correspondences for Label Transfer', uncertainty=0.0, venue='ECCV 2012', notes='8% / 63.3% raw PatchMatchGraph accuracy, 72.8% / 79.0% when using Boosted CRF. Code available.')
msrc21_pc.measure(None, 69.0, 'Auto-Context', url='http://pages.ucsd.edu/~ztu/publication/pami_autocontext.pdf', papername='Auto-Context and Its Application to High-Level Vision Tasks and 3D Brain Image Segmentation', uncertainty=0.0, venue='PAMI 2010', notes='')
msrc21_pc.measure(None, 67.0, 'STF', url='http://mi.eng.cam.ac.uk/~cipolla/publications/inproceedings/2008-CVPR-semantic-texton-forests.pdf', papername='Semantic Texton Forests for Image Categorization and Segmentation', uncertainty=0.0, venue='CVPR 2008', notes='')
msrc21_pc.measure(None, 57.0, 'TextonBoost', url='http://research.microsoft.com/pubs/117885/ijcv07a.pdf', papername='TextonBoost for Image Understanding', uncertainty=0.0, venue='IJCV 2009', notes='?? / 69.6 % (per-class / per-pixel) the unaries alone (no CRF on top)')
# Handling 'MSRC-21' semantic_labeling_datasets_results.html#4d5352432d3231
msrc21_pp.measure(None, 86.8, 'Large FC CRF', url='http://ai2-s2-pdfs.s3.amazonaws.com/daba/eb9185990f65f807c95ff4d09057c2bf1cf0.pdf', papername='Large-Scale Semantic Co-Labeling of Image Sets', uncertainty=0.0, venue='WACV 2014', notes='')
msrc21_pp.measure(None, 83.0, 'Harmony Potentials', url='http://link.springer.com/article/10.1007%2Fs11263-011-0449-8', papername='Harmony Potentials - Fusing Local and Global Scale for Semantic Image Segmentation', uncertainty=0.0, venue='IJCV 2012', notes='per-class % / per-pixel %')
msrc21_pp.measure(None, 86.0, 'Describing the Scene as a Whole: Joint Object Detection, Scene Classification and Semantic Segmentation', url='http://ttic.uchicago.edu/~rurtasun/publications/yao_et_al_cvpr12.pdf', papername='Describing the Scene as a Whole: Joint Object Detection, Scene Classification and Semantic Segmentation', uncertainty=0.0, venue='CVPR 2012', notes='')
msrc21_pp.measure(None, 85.0, 'MPP', url='http://mediatum.ub.tum.de/doc/1175516/1175516.pdf', papername='Morphological Proximity Priors: Spatial Relationships for Semantic Segmentation', uncertainty=0.0, venue='TUM-I1222 2013', notes='')
msrc21_pp.measure(None, 86.0, 'FC CRF', url='http://graphics.stanford.edu/projects/densecrf/densecrf.pdf', papername='Efficient Inference in Fully Connected CRFs with Gaussian Edge Potentials', uncertainty=0.0, venue='NIPS 2011', notes='Strong unary used provides 76.6% / 84.0%')
msrc21_pp.measure(None, 87.0, 'HCRF+CO', url='http://research.microsoft.com/en-us/um/people/pkohli/papers/lrkt_eccv2010.pdf', papername='Graph Cut based Inference with Co-occurrence Statistics', uncertainty=0.0, venue='ECCV 2010', notes='')
msrc21_pp.measure(None, 85.0, 'Are Spatial and Global Constraints Really Necessary for Segmentation?', url='http://infoscience.epfl.ch/record/169178/files/lucchi_ICCV11.pdf', papername='Are Spatial and Global Constraints Really Necessary for Segmentation?', uncertainty=0.0, venue='ICCV 2011', notes='Several variants are examined, no single method attains the overall best results, i.e. both best per-class and per-pixel averages simultaneously. Indicated result corresponds to the method that we best on the average (per-class + per-pixel / 2). Experiment data available.')
msrc21_pp.measure(None, 82.0, 'Kernelized SSVM/CRF', url='https://infoscience.epfl.ch/record/180188/files/top.pdf', papername='Structured Image Segmentation using Kernelized Features', uncertainty=0.0, venue='ECCV 2012', notes='70 % / 73 % when using only local features (not considering global features)')
msrc21_pp.measure(None, 79.0, 'PatchMatchGraph', url='http://users.cecs.anu.edu.au/~sgould/papers/eccv12-patchGraph.pdf', papername='PatchMatchGraph: Building a Graph of Dense Patch Correspondences for Label Transfer', uncertainty=0.0, venue='ECCV 2012', notes='8% / 63.3% raw PatchMatchGraph accuracy, 72.8% / 79.0% when using Boosted CRF. Code available.')
msrc21_pp.measure(None, 78.0, 'Auto-Context', url='http://pages.ucsd.edu/~ztu/publication/pami_autocontext.pdf', papername='Auto-Context and Its Application to High-Level Vision Tasks and 3D Brain Image Segmentation', uncertainty=0.0, venue='PAMI 2010', notes='')
msrc21_pp.measure(None, 72.0, 'STF', url='http://mi.eng.cam.ac.uk/~cipolla/publications/inproceedings/2008-CVPR-semantic-texton-forests.pdf', papername='Semantic Texton Forests for Image Categorization and Segmentation', uncertainty=0.0, venue='CVPR 2008', notes='')
msrc21_pp.measure(None, 72.0, 'TextonBoost', url='http://research.microsoft.com/pubs/117885/ijcv07a.pdf', papername='TextonBoost for Image Understanding', uncertainty=0.0, venue='IJCV 2009', notes='?? / 69.6 % (per-class / per-pixel) the unaries alone (no CRF on top)')
# Handling 'Pascal VOC 2011 comp3' detection_datasets_results.html#50617363616c20564f43203230313120636f6d7033
# Skipping 40.6 mAP Fisher and VLAD with FLAIR CVPR 2014
# Handling 'Leeds Sport Poses' pose_estimation_datasets_results.html#4c656564732053706f727420506f736573
#69.2 % Strong Appearance and Expressive Spatial Models for Human Pose Estimation ICCV 2013 Starting model reaches 58.1 %, improved local appearances reaches 66.9 %, and 69.2% when using the full model.
#64.3 % Appearance sharing for collective human pose estimation ACCV 2012
#63.3 % Poselet conditioned pictorial structures CVPR 2013
#60.8 % Articulated pose estimation with flexible mixtures-of-parts CVPR 2011
# 55.6% Pictorial structures revisited: People detection and articulated pose estimation CVPR 2009
# Handling 'Pascal VOC 2007 comp3' detection_datasets_results.html#50617363616c20564f43203230303720636f6d7033
# Skipping 22.7 mAP Ensemble of Exemplar-SVMs for Object Detection and Beyond ICCV 2011
# Skipping 27.4 mAP Measuring the objectness of image windows PAMI 2012
# Skipping 28.7 mAP Automatic discovery of meaningful object parts with latent CRFs CVPR 2010
# Skipping 29.0 mAP Object Detection with Discriminatively Trained Part Based Models PAMI 2010
# Skipping 29.6 mAP Latent Hierarchical Structural Learning for Object Detection CVPR 2010
# Skipping 32.4 mAP Deformable Part Models with Individual Part Scaling BMVC 2013
# Skipping 34.3 mAP Histograms of Sparse Codes for Object Detection CVPR 2013
# Skipping 34.3 mAP Boosted local structured HOG-LBP for object localization CVPR 2011
# Skipping 34.7 mAP Discriminatively Trained And-Or Tree Models for Object Detection CVPR 2013
# Skipping 34.7 mAP Incorporating Structural Alternatives and Sharing into Hierarchy for Multiclass Object Recognition and Detection CVPR 2013
# Skipping 34.8 mAP Color Attributes for Object Detection CVPR 2012
# Skipping 35.4 mAP Object Detection with Discriminatively Trained Part Based Models PAMI 2010
# Skipping 36.0 mAP Machine Learning Methods for Visual Object Detection archives-ouvertes 2011
# Skipping 38.7 mAP Detection Evolution with Multi-Order Contextual Co-occurrence CVPR 2013
# Skipping 40.5 mAP Segmentation Driven Object Detection with Fisher Vectors ICCV 2013
# Skipping 41.7 mAP Regionlets for Generic Object Detection ICCV 2013
# Skipping 43.7 mAP Beyond Bounding-Boxes: Learning Object Shape by Model-Driven Grouping ECCV 2012
# Handling 'Pascal VOC 2007 comp4' detection_datasets_results.html#50617363616c20564f43203230303720636f6d7034
# Skipping 59.2 mAP Spatial Pyramid Pooling in Deep Convolutional Networks for Visual Recognition ECCV 2014
# Skipping 58.5 mAP Rich feature hierarchies for accurate object detection and semantic segmentation CVPR 2014
# Skipping 29.0 mAP Multi-Component Models for Object Detection ECCV 2012
# Handling 'Pascal VOC 2010 comp3' detection_datasets_results.html#50617363616c20564f43203230313020636f6d7033
# Skipping 24.98 mAP Learning Collections of Part Models for Object Recognition CVPR 2013
# Skipping 29.4 mAP Discriminatively Trained And-Or Tree Models for Object Detection CVPR 2013
# Skipping 33.4 mAP Object Detection with Discriminatively Trained Part Based Models PAMI 2010
# Skipping 34.1 mAP Segmentation as selective search for object recognition ICCV 2011
# Skipping 35.1 mAP Selective Search for Object Recognition IJCV 2013
# Skipping 36.0 mAP Latent Hierarchical Structural Learning for Object Detection CVPR 2010
# Skipping 36.8 mAP Object Detection by Context and Boosted HOG-LBP ECCV 2010
# Skipping 38.4 mAP Segmentation Driven Object Detection with Fisher Vectors ICCV 2013
# Skipping 39.7 mAP Regionlets for Generic Object Detection ICCV 2013
# Skipping 40.4 mAP Fisher and VLAD with FLAIR CVPR 2014
# Handling 'Pascal VOC 2010 comp4' detection_datasets_results.html#50617363616c20564f43203230313020636f6d7034
# Skipping 53.7 mAP Rich feature hierarchies for accurate object detection and semantic segmentation CVPR 2014
# Skipping 40.4 mAP Bottom-up Segmentation for Top-down Detection CVPR 2013
# Skipping 33.1 mAP Multi-Component Models for Object Detection ECCV 2012
| AI-metrics-master | data/awty.py |
# The file was autogenerated by ../scrapers/wer.py
from datetime import date
from data.acoustics import speech_recognition, swb_hub_500
from scales import *
librispeech_WER_clean = speech_recognition.metric(name="librispeech WER testclean", scale=error_percent, target=5.83, target_source="http://arxiv.org/abs/1512.02595v1")
librispeech_WER_other = speech_recognition.metric(name="librispeech WER testother", scale=error_percent, target=12.69, target_source="http://arxiv.org/abs/1512.02595v1")
librispeech_WER_clean.measure(date(2015, 12, 23), 5.33, '9-layer model w/ 2 layers of 2D-invariant convolution & 7 recurrent layers, w/ 68M parameters trained on 11940h', 'http://arxiv.org/abs/1512.02595v1')
librispeech_WER_clean.measure(date(2015, 8, 23), 4.83, 'HMM-TDNN + iVectors', 'http://speak.clsp.jhu.edu/uploads/publications/papers/1048_pdf.pdf')
librispeech_WER_clean.measure(date(2015, 8, 23), 5.51, 'HMM-DNN + pNorm*', 'http://www.danielpovey.com/files/2015_icassp_librispeech.pdf')
librispeech_WER_clean.measure(date(2015, 8, 23), 8.01, 'HMM-(SAT)GMM', 'http://kaldi-asr.org/')
librispeech_WER_clean.measure(date(2016, 9, 23), 4.28, 'HMM-TDNN trained with MMI + data augmentation (speed) + iVectors + 3 regularizations', 'http://www.danielpovey.com/files/2016_interspeech_mmi.pdf')
librispeech_WER_other.measure(date(2015, 12, 23), 13.25, '9-layer model w/ 2 layers of 2D-invariant convolution & 7 recurrent layers, w/ 68M parameters trained on 11940h', 'http://arxiv.org/abs/1512.02595v1')
librispeech_WER_other.measure(date(2015, 8, 23), 12.51, 'TDNN + pNorm + speed up/down speech', 'http://www.danielpovey.com/files/2015_interspeech_augmentation.pdf')
librispeech_WER_other.measure(date(2015, 8, 23), 13.97, 'HMM-DNN + pNorm*', 'http://www.danielpovey.com/files/2015_icassp_librispeech.pdf')
librispeech_WER_other.measure(date(2015, 8, 23), 22.49, 'HMM-(SAT)GMM', 'http://kaldi-asr.org/')
wsj_WER_eval92 = speech_recognition.metric(name="wsj WER eval92", scale=error_percent, target=5.03, target_source="http://arxiv.org/abs/1512.02595v1")
wsj_WER_eval93 = speech_recognition.metric(name="wsj WER eval93", scale=error_percent, target=8.08, target_source="http://arxiv.org/abs/1512.02595v1")
wsj_WER_eval92.measure(date(2014, 8, 23), 5.6, 'CNN over RAW speech (wav)', 'http://infoscience.epfl.ch/record/203464/files/Palaz_Idiap-RR-18-2014.pdf')
wsj_WER_eval92.measure(date(2015, 12, 23), 3.60, '9-layer model w/ 2 layers of 2D-invariant convolution & 7 recurrent layers, w/ 68M parameters', 'http://arxiv.org/abs/1512.02595v1')
wsj_WER_eval92.measure(date(2015, 4, 23), 3.47, 'TC-DNN-BLSTM-DNN', 'http://arxiv.org/pdf/1504.01482v1.pdf')
wsj_WER_eval92.measure(date(2015, 8, 23), 3.63, 'test-set on open vocabulary (i.e. harder), model = HMM-DNN + pNorm*', 'http://www.danielpovey.com/files/2015_icassp_librispeech.pdf')
wsj_WER_eval93.measure(date(2015, 12, 23), 4.98, '9-layer model w/ 2 layers of 2D-invariant convolution & 7 recurrent layers, w/ 68M parameters', 'http://arxiv.org/abs/1512.02595v1')
wsj_WER_eval93.measure(date(2015, 8, 23), 5.66, 'test-set on open vocabulary (i.e. harder), model = HMM-DNN + pNorm*', 'http://www.danielpovey.com/files/2015_icassp_librispeech.pdf')
#swb_hub_500_WER_SWB = speech_recognition.metric(name="swb_hub_500 WER SWB", scale=error_percent)
swb_hub_500_WER_SWB = swb_hub_500
swb_hub_500_WER_fullSWBCH = speech_recognition.metric(name="swb_hub_500 WER fullSWBCH", scale=error_percent)
swb_hub_500_WER_SWB.measure(date(2013, 8, 23), 11.5, 'CNN', 'http://www.cs.toronto.edu/~asamir/papers/icassp13_cnn.pdf')
swb_hub_500_WER_SWB.measure(date(2013, 8, 23), 12.6, 'HMM-DNN +sMBR', 'http://www.danielpovey.com/files/2013_interspeech_dnn.pdf')
swb_hub_500_WER_SWB.measure(date(2014, 12, 23), 12.6, 'CNN + Bi-RNN + CTC (speech to letters), 25.9% WER if trainedonlyon SWB', 'http://arxiv.org/abs/1412.5567')
swb_hub_500_WER_SWB.measure(date(2014, 6, 23), 15, 'DNN + Dropout', 'http://arxiv.org/abs/1406.7806v2')
swb_hub_500_WER_SWB.measure(date(2014, 8, 23), 10.4, 'CNN on MFSC/fbanks + 1 non-conv layer for FMLLR/I-Vectors concatenated in a DNN', 'http://www.mirlab.org/conference_papers/International_Conference/ICASSP%202014/papers/p5609-soltau.pdf')
swb_hub_500_WER_SWB.measure(date(2015, 8, 23), 11, 'HMM-TDNN + iVectors', 'http://speak.clsp.jhu.edu/uploads/publications/papers/1048_pdf.pdf')
swb_hub_500_WER_SWB.measure(date(2015, 8, 23), 12.9, 'HMM-TDNN + pNorm + speed up/down speech', 'http://www.danielpovey.com/files/2015_interspeech_augmentation.pdf')
swb_hub_500_WER_SWB.measure(date(2015, 9, 23), 12.2, 'Deep CNN (10 conv, 4 FC layers), multi-scale feature maps', 'http://arxiv.org/pdf/1509.08967v1.pdf')
swb_hub_500_WER_SWB.measure(date(2016, 6, 23), 6.6, 'RNN + VGG + LSTM acoustic model trained on SWB+Fisher+CH, N-gram + "model M" + NNLM language model', 'http://arxiv.org/pdf/1604.08242v2.pdf')
swb_hub_500_WER_SWB.measure(date(2016, 9, 23), 6.3, 'VGG/Resnet/LACE/BiLSTM acoustic model trained on SWB+Fisher+CH, N-gram + RNNLM language model trained on Switchboard+Fisher+Gigaword+Broadcast', 'http://arxiv.org/pdf/1609.03528v1.pdf')
swb_hub_500_WER_SWB.measure(date(2016, 9, 23), 8.5, 'HMM-BLSTM trained with MMI + data augmentation (speed) + iVectors + 3 regularizations + Fisher', 'http://www.danielpovey.com/files/2016_interspeech_mmi.pdf')
swb_hub_500_WER_SWB.measure(date(2016, 9, 23), 9.2, 'HMM-TDNN trained with MMI + data augmentation (speed) + iVectors + 3 regularizations + Fisher (10% / 15.1% respectively trained on SWBD only)', 'http://www.danielpovey.com/files/2016_interspeech_mmi.pdf')
swb_hub_500_WER_SWB.measure(date(2017, 3, 23), 5.5, 'ResNet + BiLSTMs acoustic model, with 40d FMLLR + i-Vector inputs, trained on SWB+Fisher+CH, n-gram + model-M + LSTM + Strided ( trous) convs-based LM trained on Switchboard+Fisher+Gigaword+Broadcast', 'https://arxiv.org/pdf/1703.02136.pdf')
swb_hub_500_WER_fullSWBCH.measure(date(2013, 8, 23), 18.4, 'HMM-DNN +sMBR', 'http://www.danielpovey.com/files/2013_interspeech_dnn.pdf')
swb_hub_500_WER_fullSWBCH.measure(date(2014, 12, 23), 16, 'CNN + Bi-RNN + CTC (speech to letters), 25.9% WER if trainedonlyon SWB', 'http://arxiv.org/abs/1412.5567')
swb_hub_500_WER_fullSWBCH.measure(date(2014, 6, 23), 19.1, 'DNN + Dropout', 'http://arxiv.org/abs/1406.7806v2')
swb_hub_500_WER_fullSWBCH.measure(date(2015, 8, 23), 17.1, 'HMM-TDNN + iVectors', 'http://speak.clsp.jhu.edu/uploads/publications/papers/1048_pdf.pdf')
swb_hub_500_WER_fullSWBCH.measure(date(2015, 8, 23), 19.3, 'HMM-TDNN + pNorm + speed up/down speech', 'http://www.danielpovey.com/files/2015_interspeech_augmentation.pdf')
swb_hub_500_WER_fullSWBCH.measure(date(2016, 6, 23), 12.2, 'RNN + VGG + LSTM acoustic model trained on SWB+Fisher+CH, N-gram + "model M" + NNLM language model', 'http://arxiv.org/pdf/1604.08242v2.pdf')
swb_hub_500_WER_fullSWBCH.measure(date(2016, 9, 23), 11.9, 'VGG/Resnet/LACE/BiLSTM acoustic model trained on SWB+Fisher+CH, N-gram + RNNLM language model trained on Switchboard+Fisher+Gigaword+Broadcast', 'http://arxiv.org/pdf/1609.03528v1.pdf')
swb_hub_500_WER_fullSWBCH.measure(date(2016, 9, 23), 13, 'HMM-BLSTM trained with MMI + data augmentation (speed) + iVectors + 3 regularizations + Fisher', 'http://www.danielpovey.com/files/2016_interspeech_mmi.pdf')
swb_hub_500_WER_fullSWBCH.measure(date(2016, 9, 23), 13.3, 'HMM-TDNN trained with MMI + data augmentation (speed) + iVectors + 3 regularizations + Fisher (10% / 15.1% respectively trained on SWBD only)', 'http://www.danielpovey.com/files/2016_interspeech_mmi.pdf')
swb_hub_500_WER_fullSWBCH.measure(date(2017, 3, 23), 10.3, 'ResNet + BiLSTMs acoustic model, with 40d FMLLR + i-Vector inputs, trained on SWB+Fisher+CH, n-gram + model-M + LSTM + Strided ( trous) convs-based LM trained on Switchboard+Fisher+Gigaword+Broadcast', 'https://arxiv.org/pdf/1703.02136.pdf')
fisher_WER = speech_recognition.metric(name="fisher WER", scale=error_percent)
fisher_WER.measure(date(2016, 9, 23), 9.6, 'HMM-BLSTMtrained with MMI + data augmentation (speed) + iVectors + 3 regularizations + SWBD', 'http://www.danielpovey.com/files/2016_interspeech_mmi.pdf')
fisher_WER.measure(date(2016, 9, 23), 9.8, 'HMM-TDNNtrained with MMI + data augmentation (speed) + iVectors + 3 regularizations + SWBD', 'http://www.danielpovey.com/files/2016_interspeech_mmi.pdf')
chime_clean = speech_recognition.metric(name="chime clean", scale=error_percent)
chime_real = speech_recognition.metric(name="chime real", scale=error_percent)
chime_clean.measure(date(2014, 12, 23), 6.30, 'CNN + Bi-RNN + CTC (speech to letters)', 'http://arxiv.org/abs/1412.5567')
chime_clean.measure(date(2015, 12, 23), 3.34, '9-layer model w/ 2 layers of 2D-invariant convolution & 7 recurrent layers, w/ 68M parameters', 'http://arxiv.org/abs/1512.02595v1')
chime_real.measure(date(2014, 12, 23), 67.94, 'CNN + Bi-RNN + CTC (speech to letters)', 'http://arxiv.org/abs/1412.5567')
chime_real.measure(date(2015, 12, 23), 21.79, '9-layer model w/ 2 layers of 2D-invariant convolution & 7 recurrent layers, w/ 68M parameters', 'http://arxiv.org/abs/1512.02595v1')
timit_PER = speech_recognition.metric(name="timit PER", scale=error_percent)
timit_PER.measure(date(2009, 8, 23), 23, '(first, modern) HMM-DBN', 'http://www.cs.toronto.edu/~asamir/papers/NIPS09.pdf')
timit_PER.measure(date(2013, 3, 23), 17.7, 'Bi-LSTM + skip connections w/ CTC', 'http://arxiv.org/abs/1303.5778v1')
timit_PER.measure(date(2014, 8, 23), 16.7, 'CNN in time and frequency + dropout, 17.6% w/o dropout', 'http://www.inf.u-szeged.hu/~tothl/pubs/ICASSP2014.pdf')
timit_PER.measure(date(2015, 6, 23), 17.6, 'Bi-RNN + Attention', 'http://arxiv.org/abs/1506.07503')
timit_PER.measure(date(2015, 9, 23), 16.5, 'Hierarchical maxout CNN + Dropout', 'https://link.springer.com/content/pdf/10.1186%2Fs13636-015-0068-3.pdf')
timit_PER.measure(date(2016, 3, 23), 17.3, 'RNN-CRF on 24(x3) MFSC', 'https://arxiv.org/abs/1603.00223')
wer_metrics=[librispeech_WER_clean, librispeech_WER_other, wsj_WER_eval92, wsj_WER_eval93, swb_hub_500_WER_SWB, swb_hub_500_WER_fullSWBCH, fisher_WER, chime_clean, chime_real, timit_PER]
| AI-metrics-master | data/wer.py |
"Hand-entered acoustic data"
from taxonomy import Problem
from scales import *
import datetime
date = datetime.date
"""
http://melodi.ee.washington.edu/s3tp/
* * *
**_Word error rate on Switchboard (specify details): [Month, Year: Score [SWB]: Team]. Compiled by Jack Clark._**
A note about measurement: We're measuring Switchboard (SWB) and Call Home (CH) performance (mostly) from the Hub5'00 dataset, with main scores assesses in terms of word error rate on SWB. We also create
Why do we care: Reflects the improvement of audio processing systems on speech over time.
"""
speech_recognition = Problem(name="Speech Recognition", attributes=["language", "agi"])
swb_hub_500 = speech_recognition.metric(name="Word error rate on Switchboard trained against the Hub5'00 dataset",
scale=error_percent, target=5.9)
swb_hub_500.measure(date(2011,8,31), 16.1, "CD-DNN", "https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/CD-DNN-HMM-SWB-Interspeech2011-Pub.pdf")
swb_hub_500.measure(date(2012,4,27), 18.5, "DNN-HMM", "https://pdfs.semanticscholar.org/ce25/00257fda92338ec0a117bea1dbc0381d7c73.pdf?_ga=1.195375081.452266805.1483390947")
swb_hub_500.measure(date(2013,8,25), 12.9, "DNN MMI", "http://www.danielpovey.com/files/2013_interspeech_dnn.pdf")
swb_hub_500.measure(date(2013,8,25), 12.6, "DNN sMBR", "http://www.danielpovey.com/files/2013_interspeech_dnn.pdf")
swb_hub_500.measure(date(2013,8,25), 12.9, "DNN MPE", "http://www.danielpovey.com/files/2013_interspeech_dnn.pdf")
swb_hub_500.measure(date(2013,8,25), 12.9, "DNN BMMI", "http://www.danielpovey.com/files/2013_interspeech_dnn.pdf")
swb_hub_500.measure(date(2014,6,30), 16, "DNN", "https://arxiv.org/abs/1406.7806v1")
swb_hub_500.measure(date(2014,12,7), 20, "Deep Speech", "https://arxiv.org/abs/1412.5567")
swb_hub_500.measure(date(2014,12,7), 12.6, "Deep Speech + FSH", url="https://arxiv.org/abs/1412.5567") # TODO: why is this also included?
swb_hub_500.measure(date(2015,5,21), 8.0, "IBM 2015", "https://arxiv.org/abs/1505.05899") # TODO: (name check)
swb_hub_500.measure(date(2016,4,27), 6.9, "IBM 2016", "https://arxiv.org/abs/1604.08242v1") # TODO: (name check)
swb_hub_500.measure(date(2017,2,17), 6.9, "RNNLM", "https://arxiv.org/abs/1609.03528") # TODO: (name check)
swb_hub_500.measure(date(2017,2,17), 6.2, "Microsoft 2016", "https://arxiv.org/abs/1609.03528") # TODO: (name check)
swb_hub_500.measure(date(2016,10,17), 6.6, "CNN-LSTM", "https://arxiv.org/abs/1610.05256") # TODO: (name check)
swb_hub_500.measure(None, 5.8, "Microsoft 2016b", "https://arxiv.org/abs/1610.05256") # TODO: (name check)
"""
Author Yann Bayle
E-mail [email protected]
Created 03/08/2017
Updated 04/08/2017
Object Music Information Retrieval tasks
Why do we care: Reflects the improvement of music auto-tagging over time.
"""
# Precision for the Instrumentals detection in a musical database in order to create an Instrumental playlist
instrumentals_recognition = Problem(name="Detection of Instrumentals musical tracks", attributes=["language", "agi"])
satin = instrumentals_recognition.metric(name="Precision of Instrumentals detection reached when tested on SATIN (Bayle et al. 2017)", scale=correct_percent, target=99)
satin.measure(date(2013, 10, 17), 17.3, "Ghosal et al.", "https://link.springer.com/article/10.1186/2193-1801-2-526")
satin.measure(date(2014, 9, 30), 12.5, "SVMBFF", "https://arxiv.org/pdf/1410.0001.pdf")
satin.measure(date(2014, 9, 30), 29.8, "VQMM", "https://arxiv.org/pdf/1410.0001.pdf")
satin.measure(date(2017, 6, 23), 82.5, "Bayle et al.", "https://arxiv.org/abs/1706.07613")
# MAP and MAR for Cover Song Identification currently being written
# Need for people to work on genres recognition
| AI-metrics-master | data/acoustics.py |
from taxonomy import Problem
from scales import *
import datetime
date = datetime.date
read_stem_papers = Problem("Read a scientific or technical paper, and comprehend its contents", ["language", "world-modelling", "super"])
# Getting some major results from an abstract, tables or conclusion is much easier than understanding the entire paper, its assumptions, robustness, support for its claims, etc
extract_results = Problem("Extract major numerical results or progress claims from a STEM paper", ["language", "world-modelling", "agi"])
read_stem_papers.add_subproblem(extract_results)
extract_results.metric("Automatically find new relevant ML results on arXiv")
extract_results.notes = """
This metric is the ability to automatically update the ipython Notebook you are reading by spotting results in pdfs uploaded to arxiv.org.
Pull requests demonstrating solutions are welcome :)
"""
solve_technical_problems = Problem("Given an arbitrary technical problem, solve it as well as a typical professional in that field", ["language", "world-modelling"])
program_induction = Problem("Writing software from specifications")
solve_technical_problems.add_subproblem(program_induction)
program_induction.metric("Card2Code", url="https://github.com/deepmind/card2code", scale=correct_percent)
vaguely_constrained_technical_problems = Problem("Solve vaguely or under-constrained technical problems")
solve_technical_problems.add_subproblem(vaguely_constrained_technical_problems)
# This subset of technical problems is much easier; here we assume that a human / worldly problem has been reduced to something that can be
# subjected to clear computational evaluation ("is this purported proof of theorem X correct?", "does this circuit perform task Y efficiently?"
# "will this airframe fly with reasonable characteristics?")
solve_constrained_technical_problems = Problem("Solve technical problems with clear constraints (proofs, circuit design, aerofoil design, etc)")
solve_technical_problems.add_subproblem(solve_constrained_technical_problems)
vaguely_constrained_technical_problems.add_subproblem(read_stem_papers)
# Note that this theorem proving problem (learning to prove theorems) is a little different from the pure search
# through proof space that characterises the classic ATP field, though progress there may also be interesting
theorem_proving = Problem("Given examples of proofs, find correct proofs of simple mathematical theorems", ["agi", "math"])
circuit_design = Problem("Given desired circuit characteristics, and many examples, design new circuits to spec", ["agi", "math"])
solve_constrained_technical_problems.add_subproblem(theorem_proving)
theorem_proving.metric("HolStep", url="https://arxiv.org/abs/1703.00426")
solve_constrained_technical_problems.add_subproblem(circuit_design)
# TODO: find well-defined metrics for some of these problems in the literature. Or create some!
# Some relevant papers:
# http://www.ise.bgu.ac.il/faculty/kalech/publications/ijcai13.pdf
# https://www.researchgate.net/publication/2745078_Use_of_Automatically_Defined_Functions_and_ArchitectureAltering_Operations_in_Automated_Circuit_Synthesis_with_Genetic_Programming
# https://link.springer.com/article/10.1007/s10817-014-9301-5
program_induction = Problem("Write computer programs from specifications")
vaguely_constrained_technical_problems.add_subproblem(program_induction)
card2code_mtg_acc = program_induction.metric("Card2Code MTG accuracy", url="https://github.com/deepmind/card2code", scale=correct_percent, target=100, target_label="Bug-free card implementation")
card2code_hs_acc = program_induction.metric("Card2Code Hearthstone accuracy", url="https://github.com/deepmind/card2code", scale=correct_percent, target=100, target_label="Bug-free card implementation")
card2code_mtg_acc.measure(None, 4.8, "LPN", url="https://arxiv.org/abs/1603.06744v1")
card2code_hs_acc.measure(None, 6.1, "LPN", url="https://arxiv.org/abs/1603.06744v1")
card2code_hs_acc.measure(None, 13.6, "Seq2Tree-Unk", url="https://arxiv.org/abs/1704.01696v1", algorithm_src_url="https://arxiv.org/abs/1601.01280v1")
card2code_hs_acc.measure(None, 1.5, "NMT", url="https://arxiv.org/abs/1704.01696v1", algorithm_src_url="https://arxiv.org/abs/1409.0473v1")
#card2code_hs_acc.measure(None, 16.2, "SNM", url="https://arxiv.org/abs/1704.01696v1")
card2code_hs_acc.measure(None, 16.7, "SNM -frontier embed", url="https://arxiv.org/abs/1704.01696v1")
understand_conditional_expressions = Problem("Parse and implement complex conditional expressions")
program_induction.add_subproblem(understand_conditional_expressions)
science_question_answering = Problem("Answering Science Exam Questions", ["science", "qa"])
vaguely_constrained_technical_problems.add_subproblem(science_question_answering)
elementery_ndmc_acc = science_question_answering.metric("Elementery NDMC accuracy", url="", scale=correct_percent,
target=100, target_label="Perfect Score")
| AI-metrics-master | data/stem.py |
# -*- coding: utf-8 -*-
"Hand-entered data about written language problems"
from taxonomy import Problem
from scales import *
import datetime
date = datetime.date
modelling_english = Problem("Accurate modelling of human language.", ["language", "agi"])
ptperplexity = modelling_english.metric(name="Penn Treebank (Perplexity when parsing English sentences)", scale=perplexity)
ptperplexity.measure(date(2016,9,26), 70.9, "Pointer Sentinel-LSTM", "https://arxiv.org/pdf/1609.07843v1.pdf")
ptperplexity.measure(date(2016,10,5), 73.4, "Variational LSTM", "https://arxiv.org/pdf/1512.05287v5.pdf")
ptperplexity.measure(date(2013,12,20), 107.5, "Deep RNN", "https://arxiv.org/abs/1312.6026")
ptperplexity.measure(date(2012,4,7), 78.8, "KN5+RNNME ensemble", "http://www.fit.vutbr.cz/~imikolov/rnnlm/google.pdf")
ptperplexity.measure(date(2012,4,7), 125.7, "KN5+cache baseline", "http://www.fit.vutbr.cz/~imikolov/rnnlm/google.pdf")
ptperplexity.measure(date(2012,7,27), 124.7, "RNNLM", "https://www.microsoft.com/en-us/research/wp-content/uploads/2012/07/rnn_ctxt_TR.sav_.pdf")
ptperplexity.measure(date(2012,7,27), 74.1, "RNN-LDA+all", "https://www.microsoft.com/en-us/research/wp-content/uploads/2012/07/rnn_ctxt_TR.sav_.pdf")
ptperplexity.measure(date(2012,7,27), 113.7, "RNN-LDA LM", "https://www.microsoft.com/en-us/research/wp-content/uploads/2012/07/rnn_ctxt_TR.sav_.pdf")
ptperplexity.measure(date(2012,7,27), 92.0, "RNN-LDA LM+KN5+cache", "https://www.microsoft.com/en-us/research/wp-content/uploads/2012/07/rnn_ctxt_TR.sav_.pdf")
ptperplexity.measure(date(2012,7,27), 80.1, "RNN-LDA ensemble", "https://www.microsoft.com/en-us/research/wp-content/uploads/2012/07/rnn_ctxt_TR.sav_.pdf")
ptperplexity.measure(None, 68.7, "RNN Dropout Regularization", "https://arxiv.org/abs/1409.2329v1")
ptperplexity.measure(None, 68.5, "RHN", "https://arxiv.org/pdf/1607.03474v3")
ptperplexity.measure(None, 66, "RHN+WT", "https://arxiv.org/pdf/1607.03474v3")
ptperplexity.measure(None, 71.3, "Variational RHN", "https://arxiv.org/abs/1607.03474")
hp_compression = modelling_english.metric(name="Hutter Prize (bits per character to encode English text)", scale=bits_per_x, target=1.3)
hp_compression.measure(date(2016,10,31), 1.313, "Surprisal-Driven Zoneout",
"https://pdfs.semanticscholar.org/e9bc/83f9ff502bec9cffb750468f76fdfcf5dd05.pdf")
hp_compression.measure(date(2016,10,19), 1.37, "Surprisal-Driven Feedback RNN",
"https://arxiv.org/pdf/1608.06027.pdf")
hp_compression.measure(date(2016,9,27), 1.39, "Hypernetworks", "https://arxiv.org/abs/1609.09106")
hp_compression.measure(date(2016,9,6), 1.32, " Hierarchical Multiscale RNN", "https://arxiv.org/abs/1609.01704")
hp_compression.measure(date(2016,7,12), 1.32, "Recurrent Highway Networks", "https://arxiv.org/abs/1607.03474")
hp_compression.measure(date(2015,7,6), 1.47, "Grid LSTM", "https://arxiv.org/abs/1507.01526")
hp_compression.measure(date(2015,2,15), 1.58, "Gated Feedback RNN", "https://arxiv.org/abs/1502.02367")
# we need to match/double check the release date of the specific version of cmix that got this performance?
# hp_compression.measure(date(2014,4,13), 1.245, "cmix", "http://www.byronknoll.com/cmix.html")
hp_compression.measure(date(2013,8,4), 1.67, "RNN, LSTM", "https://arxiv.org/abs/1308.0850")
hp_compression.measure(date(2011,6,28), 1.60, "RNN", "http://www.cs.utoronto.ca/~ilya/pubs/2011/LANG-RNN.pdf")
lambada = modelling_english.metric("LAMBADA prediction of words in discourse", url="https://arxiv.org/abs/1606.06031",
scale=correct_percent, target=86, target_source="https://arxiv.org/abs/1610.08431v3")
lambada.measure(None, 21.7, "Stanford Reader", url="https://arxiv.org/abs/1610.08431v3", algorithm_src_url="https://arxiv.org/abs/1606.02858")
lambada.measure(None, 32.1, "Modified Stanford", url="https://arxiv.org/abs/1610.08431v3", algorithm_src_url="https://arxiv.org/abs/1606.02858")
lambada.measure(None, 49.0, "GA + feat.", url="https://arxiv.org/abs/1610.08431v3", algorithm_src_url="https://arxiv.org/abs/1606.01549v2")
lambada.measure(None, 44.5, "AS + feat.", url="https://arxiv.org/abs/1610.08431v3", algorithm_src_url="https://arxiv.org/abs/1603.01547")
lambada.measure(None, 51.6, "MAGE (48)", url="https://arxiv.org/abs/1703.02620v1")
turing_test = Problem("Conduct arbitrary sustained, probing conversation", ["agi", "language", "world-modelling", "communication"])
easy_turing_test = Problem("Turing test for casual conversation", ["agi", "language", "world-modelling", "communication"])
turing_test.add_subproblem(easy_turing_test)
loebner = easy_turing_test.metric("The Loebner Prize scored selection answers", url="http://www.aisb.org.uk/events/loebner-prize",
scale=correct_percent, changeable=True, target=100, target_label="Completely plausible answers",
axis_label='Percentage of answers rated plausible\n(each year is a different test)')
# XXX humans probably don't get 100% on the Loebner Prize selection questions; we should ask the organizers to score
# some humans
loebner.notes = """
The Loebner Prize is an actual enactment of the Turing Test. Importantly, judges are instructed to engage in casual, natural
conversation rather than deliberately probing to determine if participants are "intelligent" (Brian Christian, The Most Human Human).
This makes it considerably easier than a probing Turing Test, and it is close to being solved.
However these aren't scores for the full Loebner Turing Test; since 2014 the Loebner prize has scored its entrants by
giving them a corpus of conversation and scoring their answers. We use these numbers because they remove variability
in the behaviour of the judges. Unfortunately, these questions change from year to year (and have to, since
entrants will test with last year's data).
"""
loebner.measure(date(2016,9,17), 90, "Mitsuku 2016", url="http://www.aisb.org.uk/events/loebner-prize#Results16")
loebner.measure(date(2016,9,17), 78.3, "Tutor 2016", url="http://www.aisb.org.uk/events/loebner-prize#Results16")
loebner.measure(date(2016,9,17), 77.5, "Rose 2016", url="http://www.aisb.org.uk/events/loebner-prize#Results16")
loebner.measure(date(2016,9,17), 77.5, "Arckon 2016", url="http://www.aisb.org.uk/events/loebner-prize#Results16")
loebner.measure(date(2016,9,17), 76.7, "Katie 2016", url="http://www.aisb.org.uk/events/loebner-prize#Results16")
loebner.measure(date(2015,9,19), 83.3, "Mitsuku 2015", url="http://www.aisb.org.uk/events/loebner-prize#Results15")
loebner.measure(date(2015,9,19), 80, "Lisa 2015", url="http://www.aisb.org.uk/events/loebner-prize#Results15")
loebner.measure(date(2015,9,19), 76.7, "Izar 2015", url="http://www.aisb.org.uk/events/loebner-prize#Results15")
loebner.measure(date(2015,9,19), 75, "Rose 2015",url="http://www.aisb.org.uk/events/loebner-prize#Results15")
loebner.measure(date(2014,11,15), 89.2, "Rose 2014", url="http://www.aisb.org.uk/events/loebner-prize#contest2014")
loebner.measure(date(2014,11,15), 88.3, "Izar 2014", url="http://www.aisb.org.uk/events/loebner-prize#contest2014")
loebner.measure(date(2014,11,15), 88.3, "Misuku 2014", url="http://www.aisb.org.uk/events/loebner-prize#contest2014")
loebner.measure(date(2014,11,15), 81.67, "Uberbot 2014", url="http://www.aisb.org.uk/events/loebner-prize#contest2014")
loebner.measure(date(2014,11,15), 80.83, "Tutor 2014", url="http://www.aisb.org.uk/events/loebner-prize#contest2014")
loebner.measure(date(2014,11,15), 76.7, "The Professor 2014", url="http://www.aisb.org.uk/events/loebner-prize#contest2014")
reading_comprehension = Problem("Language comprehension and question-answering", ["language", "world-modelling", "agi"])
turing_test.add_subproblem(reading_comprehension)
# Overview of Machine Reading Comprehension (MRC) datasets here:
# http://eric-yuan.me/compare-popular-mrc-datasets/
bAbi10k = reading_comprehension.metric("bAbi 20 QA (10k training examples)", url="http://fb.ai/babi", scale=correct_percent, target=99, target_label="Excellent performance")
bAbi1k = reading_comprehension.metric("bAbi 20 QA (1k training examples)", url="http://fb.ai/babi", scale=correct_percent, target=99, target_label="Excellent performance")
bAbi1k.notes = """
A synthetic environment inspired by text adventures and SHRDLU, which enables generation
of ground truths, describing sentences, and inferential questions. Includes:
supporting facts, relations, yes/no questions, counting, lists/sets, negation, indefiniteness,
conference, conjunction, time, basic deduction and induction, reasoning about position, size,
path finding and motivation.
Table 3 of https://arxiv.org/abs/1502.05698 actually breaks this down into 20 submeasures
but initially we're lumping all of this together.
Originally "solving" bABI was defined as 95% accuracy (or perhaps) 95% accuracy on all submeasures,
but clearly humans and now algorithms are better than that.
TODO: bAbi really needs to be decomposed into semi-supervised and unsupervised variants, and
by amount of training data provided
"""
bAbi10k.measure(date(2015,2,19), 93.3, "MemNN-AM+NG+NL (1k + strong supervision)", "https://arxiv.org/abs/1502.05698v1",
not_directly_comparable=True, long_label=True, offset=(2,5)) # not literally a 10K example, but more comparable to it
#bAbi1k.measure(None, 48.7, "LSTM", "https://arxiv.org/abs/1502.05698v1", algorithm_src_url="http://isle.illinois.edu/sst/meetings/2015/hochreiter-lstm.pdf", min_date=date(1997,11,15))
bAbi1k.measure(date(2015,3,31), 86.1, "MemN2N-PE+LS+RN", "https://arxiv.org/abs/1503.08895")
bAbi10k.measure(date(2015,3,31), 93.4, "MemN2N-PE+LS+RN", "https://arxiv.org/abs/1503.08895")
bAbi1k.measure(date(2015,6,24), 93.6, "DMN", "https://arxiv.org/abs/1506.07285", offset=(3,-2), not_directly_comparable=True) # The paper doesn't say if this is 1k or 10k
bAbi10k.measure(date(2016,1,5), 96.2, "DNC", "https://www.gwern.net/docs/2016-graves.pdf")
bAbi10k.measure(date(2016,9,27), 97.1, "SDNC", "https://arxiv.org/abs/1606.04582v4")
bAbi10k.measure(date(2016,12,12), 99.5, "EntNet", "https://arxiv.org/abs/1612.03969")
bAbi1k.measure(date(2016,12,12), 89.1, "EntNet", "https://arxiv.org/abs/1612.03969")
bAbi10k.measure(date(2016,12,9), 99.7, "QRN", "https://arxiv.org/abs/1606.04582v4", offset=(2,3))
bAbi1k.measure(date(2016,12,9), 90.1, "QRN", "https://arxiv.org/abs/1606.04582v4")
bAbi1k.measure(None, 66.8, "DMN+", "https://arxiv.org/abs/1606.04582v4", algorithm_src_url="https://arxiv.org/abs/1607.00036", replicated="https://github.com/therne/dmn-tensorflow")
bAbi10k.measure(date(2016,6,30), 97.2, "DMN+", "https://arxiv.org/abs/1607.00036")
# More papers:
# https://www.aclweb.org/anthology/D/D13/D13-1020.pdf
mctest160 = reading_comprehension.metric("Reading comprehension MCTest-160-all", scale=correct_percent, url="https://www.microsoft.com/en-us/research/wp-content/uploads/2016/11/MCTest_EMNLP2013.pdf")
mctest160.measure(date(2013, 10, 1), 69.16, "SW+D+RTE", url="https://www.microsoft.com/en-us/research/wp-content/uploads/2016/11/MCTest_EMNLP2013.pdf", papername="MCTest: A Challenge Dataset for the Open-Domain Machine Comprehension of Text")
mctest160.measure(date(2015, 7, 26), 75.27, "Wang-et-al", url="http://arxiv.org/abs/1603.08884")
mctest160.measure(date(2015, 7, 26), 73.27, "Narasimhan-model3", url="https://people.csail.mit.edu/regina/my_papers/MCDR15.pdf", papername="Machine Comprehension with Discourse Relations")
mctest160.measure(date(2016, 3, 29), 74.58, "Parallel-Hierarchical", url="http://arxiv.org/abs/1603.08884")
mctest500 = reading_comprehension.metric("Reading comprehension MCTest-500-all", scale=correct_percent, url="https://www.microsoft.com/en-us/research/wp-content/uploads/2016/11/MCTest_EMNLP2013.pdf")
mctest500.measure(date(2013, 10, 1), 63.33, "SW+D+RTE", url="https://www.microsoft.com/en-us/research/wp-content/uploads/2016/11/MCTest_EMNLP2013.pdf", papername="MCTest: A Challenge Dataset for the Open-Domain Machine Comprehension of Text")
mctest500.measure(date(2015, 7, 26), 69.94, "Wang-et-al", url="http://arxiv.org/abs/1603.08884")
mctest500.measure(date(2015, 7, 26), 63.75, "Narasimhan-model3", url="https://people.csail.mit.edu/regina/my_papers/MCDR15.pdf", papername="Machine Comprehension with Discourse Relations")
mctest500.measure(date(2015, 7, 26), 67.83, "LSSVM", url="https://pdfs.semanticscholar.org/f26e/088bc4659a9b7fce28b6604d26de779bcf93.pdf", papername="Learning Answer-Entailing Structures for Machine Comprehension")
mctest500.measure(date(2016, 3, 29), 71.00, "Parallel-Hierarchical", url="http://arxiv.org/abs/1603.08884")
cbtest_ne = reading_comprehension.metric("bAbi Children's Book comprehension CBtest NE", url="http://fb.ai/babi", scale=correct_percent, target=81.6, target_source="https://arxiv.org/abs/1511.02301")
cbtest_cn = reading_comprehension.metric("bAbi Children's Book comprehension CBtest CN", url="http://fb.ai/babi", scale=correct_percent, target=81.6, target_source="https://arxiv.org/abs/1511.02301")
cnn = reading_comprehension.metric("CNN Comprehension test", url="https://github.com/deepmind/rc-data/", scale=correct_percent)
daily_mail = reading_comprehension.metric("Daily Mail Comprehension test", url="https://github.com/deepmind/rc-data/", scale=correct_percent)
cnn.measure(date(2015, 6, 10), 63.0, "Attentive reader", url="https://arxiv.org/abs/1506.03340")
cnn.measure(date(2015, 6, 10), 63.8, "Impatient reader", url="https://arxiv.org/abs/1506.03340")
daily_mail.measure(date(2015, 6, 10), 69.0, "Attentive reader", url="https://arxiv.org/abs/1506.03340")
daily_mail.measure(date(2015, 6, 10), 68.0, "Impatient reader", url="https://arxiv.org/abs/1506.03340")
cnn.measure(date(2016, 6, 7), 75.7, "AIA", url="https://arxiv.org/abs/1606.02245v1")
cbtest_ne.measure(date(2016, 6, 7), 72.0, "AIA", url="https://arxiv.org/abs/1606.02245v1")
cbtest_ne.measure(date(2016, 6, 7), 71.0, "AIA", url="https://arxiv.org/abs/1606.02245v1")
cnn.measure(date(2016, 11, 9), 76.1, "AIA", url="https://arxiv.org/abs/1606.02245v4")
cnn.measure(date(2016, 6, 7), 74.0, "EpiReader", url="https://arxiv.org/abs/1606.02270")
cbtest_ne.measure(date(2016, 6, 7), 69.7, "EpiReader", url="https://arxiv.org/abs/1606.02270")
cbtest_cn.measure(date(2016, 6, 7), 67.4, "EpiReader", url="https://arxiv.org/abs/1606.02270")
cbtest_cn.measure(date(2016, 6, 5), 69.4, "GA reader", url="https://arxiv.org/abs/1606.01549v1")
cbtest_ne.measure(date(2016, 6, 5), 71.9, "GA reader", url="https://arxiv.org/abs/1606.01549v1")
cnn.measure(date(2016, 6, 5), 77.4, "GA reader", url="https://arxiv.org/abs/1606.01549v1")
daily_mail.measure(date(2016, 6, 5), 78.1, "GA reader", url="https://arxiv.org/abs/1606.01549v1")
cnn.measure(None, 77.9, "GA update L(w)", url="https://arxiv.org/abs/1606.01549v2")
daily_mail.measure(None, 80.9, "GA update L(w)", url="https://arxiv.org/abs/1606.01549v2")
cbtest_ne.measure(None, 74.9, "GA +feature, fix L(w)", url="https://arxiv.org/abs/1606.01549v2")
cbtest_cn.measure(None, 70.7, "GA +feature, fix L(w)", url="https://arxiv.org/abs/1606.01549v2")
# Neural semantic encoders invented in https://arxiv.org/abs/1607.04315v1 and retrospectively applied to CBTest by other authors
cbtest_ne.measure(date(2016, 12, 1), 73.2, "NSE", url="https://arxiv.org/abs/1606.01549v2", algorithm_src_url="https://arxiv.org/abs/1607.04315", min_date=date(2016,7,4))
cbtest_cn.measure(date(2016, 12, 1), 71.9, "NSE", url="https://arxiv.org/abs/1606.01549v2", algorithm_src_url="https://arxiv.org/abs/1607.04315", min_date=date(2016,7,4))
cnn.measure(date(2016, 8, 4), 74.4, "AoA reader", url="https://arxiv.org/pdf/1607.04423")
cbtest_ne.measure(date(2016, 8, 4), 72.0, "AoA reader", url="https://arxiv.org/pdf/1607.04423")
cbtest_cn.measure(date(2016, 8, 4), 69.4, "AoA reader", url="https://arxiv.org/pdf/1607.04423")
cnn.measure(date(2016, 8, 8), 77.6, "Attentive+relabling+ensemble", url="https://arxiv.org/abs/1606.02858")
daily_mail.measure(date(2016, 8, 8), 79.2, "Attentive+relabling+ensemble", url="https://arxiv.org/abs/1606.02858")
cnn.measure(None, 75.4, "AS reader (avg)", url="https://arxiv.org/abs/1603.01547v1")
cnn.measure(None, 74.8, "AS reader (greedy)", url="https://arxiv.org/abs/1603.01547v1")
daily_mail.measure(None, 77.1, "AS reader (avg)", url="https://arxiv.org/abs/1603.01547v1")
daily_mail.measure(None, 77.7, "AS reader (greedy)", url="https://arxiv.org/abs/1603.01547v1")
cbtest_ne.measure(None, 70.6, "AS reader (avg)", url="https://arxiv.org/abs/1603.01547v1")
cbtest_ne.measure(None, 71.0, "AS reader (greedy)", url="https://arxiv.org/abs/1603.01547v1")
cbtest_cn.measure(None, 68.9, "AS reader (avg)", url="https://arxiv.org/abs/1603.01547v1")
cbtest_cn.measure(None, 67.5, "AS reader (greedy)", url="https://arxiv.org/abs/1603.01547v1")
squad_em = reading_comprehension.metric("Stanford Question Answering Dataset EM test", url="https://stanford-qa.com/")
squad_f1 = reading_comprehension.metric("Stanford Question Answering Dataset F1 test", url="https://stanford-qa.com/")
squad_em.measure(date(2017, 3, 8), 76.922, "r-net (ensemble)", url="https://www.microsoft.com/en-us/research/wp-content/uploads/2017/05/r-net.pdf")
squad_f1.measure(date(2017, 3, 8), 84.006, "r-net (ensemble)", url="https://www.microsoft.com/en-us/research/wp-content/uploads/2017/05/r-net.pdf")
squad_em.measure(date(2017, 3, 8), 74.614, "r-net (single model)", url="https://www.microsoft.com/en-us/research/wp-content/uploads/2017/05/r-net.pdf")
squad_f1.measure(date(2017, 3, 8), 82.458, "r-net (single model)", url="https://www.microsoft.com/en-us/research/wp-content/uploads/2017/05/r-net.pdf")
squad_em.measure(date(2017, 5, 8), 73.754, "Mnemonic reader (ensemble)", url="https://arxiv.org/pdf/1705.02798.pdf")
squad_f1.measure(date(2017, 5, 8), 81.863, "Mnemonic reader (ensemble)", url="https://arxiv.org/pdf/1705.02798.pdf")
squad_em.measure(date(2017, 4, 20), 73.723, "SEDT+BiDAF (ensemble)", url="https://arxiv.org/pdf/1703.00572.pdf")
squad_f1.measure(date(2017, 4, 20), 81.53, "SEDT+BiDAF (ensemble)", url="https://arxiv.org/pdf/1703.00572.pdf")
squad_em.measure(date(2017, 2, 24), 73.744, "BiDAF (ensemble)", url="https://arxiv.org/abs/1611.01603")
squad_f1.measure(date(2017, 2, 24), 81.525, "BiDAF (ensemble)", url="https://arxiv.org/abs/1611.01603")
squad_em.measure(date(2017, 5,31), 73.01, "jNet (ensemble)",url="https://arxiv.org/abs/1703.04617", min_date=date(2017,5,1))
squad_f1.measure(date(2017, 5,31), 81.517, "jNet (ensemble)", url="https://arxiv.org/abs/1703.04617", min_date=date(2017,5,1))
squad_em.measure(date(2016, 12, 13), 73.765, "MPM (ensemble)", url="https://arxiv.org/abs/1612.04211")
squad_f1.measure(date(2016, 12, 13), 81.257, "MPM (ensemble)", url="https://arxiv.org/abs/1612.04211")
squad_em.measure(date(2016, 11, 4), 66.233, "Dynamic Coattention Networks (single model)", url="https://arxiv.org/pdf/1611.01604v1")
squad_f1.measure(date(2016, 11, 4), 75.896, "Dynamic Coattention Networks (single model)", url="https://arxiv.org/pdf/1611.01604v1")
squad_em.measure(date(2016, 11, 4), 71.625, "Dynamic Coattention Networks (ensemble)", url="https://arxiv.org/pdf/1611.01604v1")
squad_f1.measure(date(2016, 11, 4), 80.383, "Dynamic Coattention Networks (ensemble)", url="https://arxiv.org/pdf/1611.01604v1")
squad_em.measure(date(2017, 5,31), 70.607, "jNet (single model)", url="https://arxiv.org/abs/1703.04617", min_date=date(2017,5,1))
squad_f1.measure(date(2017, 5,31), 79.456, "jNet (single model)", url="https://arxiv.org/abs/1703.04617", min_date=date(2017,5,1))
squad_em.measure(date(2017, 4, 24), 70.639, "Ruminating Reader (single model)", url="https://arxiv.org/pdf/1704.07415.pdf")
squad_f1.measure(date(2017, 4, 24), 79.821, "Ruminating Reader (single model)", url="https://arxiv.org/pdf/1704.07415.pdf")
squad_em.measure(date(2017, 3, 31), 70.733, "Document Reader (single model)", url="https://arxiv.org/abs/1704.00051")
squad_f1.measure(date(2017, 3, 31), 79.353, "Document Reader (single model)", url="https://arxiv.org/abs/1704.00051")
squad_em.measure(date(2017, 5, 8), 69.863, "Mnemonic reader (single model)", url="https://arxiv.org/pdf/1705.02798.pdf")
squad_f1.measure(date(2017, 5, 8), 79.207, "Mnemonic reader (single model)", url="https://arxiv.org/pdf/1705.02798.pdf")
squad_em.measure(date(2016, 12, 29), 70.849, "FastQAExt", url="https://arxiv.org/abs/1703.04816")
squad_f1.measure(date(2016, 12, 29), 78.857, "FastQAExt", url="https://arxiv.org/abs/1703.04816")
squad_em.measure(date(2016, 12, 13), 70.387, "MPM (single model)", url="https://arxiv.org/abs/1612.04211")
squad_f1.measure(date(2016, 12, 13), 78.784, "MPM (single model)", url="https://arxiv.org/abs/1612.04211")
squad_em.measure(date(2017, 5, 31), 70.849, "RaSoR (single model)", url="https://arxiv.org/abs/1611.01436", min_date=date(2017,5,1))
squad_f1.measure(date(2017, 5, 31), 78.741, "RaSoR (single model)", url="https://arxiv.org/abs/1611.01436", min_date=date(2017,5,1))
squad_em.measure(date(2017, 4, 20), 68.478, "SEDT+BiDAF (single model)", url="https://arxiv.org/pdf/1703.00572.pdf")
squad_f1.measure(date(2017, 4, 20), 77.971, "SEDT+BiDAF (single model)", url="https://arxiv.org/pdf/1703.00572.pdf")
squad_em.measure(date(2016, 11, 29), 68.478, "BiDAF (single model)", url="https://arxiv.org/abs/1611.01603")
squad_f1.measure(date(2016, 11, 29), 77.971, "BiDAF (single model)", url="https://arxiv.org/abs/1611.01603")
squad_em.measure(date(2016, 12, 29), 68.436, "FastQA", url="https://arxiv.org/abs/1703.04816")
squad_f1.measure(date(2016, 12, 29), 77.07, "FastQA", url="https://arxiv.org/abs/1703.04816")
squad_em.measure(date(2016, 11, 7), 67.901, "Match-LSTM+Ans-Ptr", url="https://arxiv.org/pdf/1608.07905v2")
squad_f1.measure(date(2016, 11, 7), 77.022, "Match-LSTM+Ans-Ptr", url="https://arxiv.org/pdf/1608.07905v2")
translation = Problem("Translation between human langauges", ["agi", "language"])
en_fr_bleu = translation.metric("news-test-2014 En-Fr BLEU", url="http://aclweb.org/anthology/P/P02/P02-1040.pdf", scale=bleu_score, target_label="Identical to professional human translations", target=50)
en_de_bleu = translation.metric("news-test-2014 En-De BLEU", url="http://aclweb.org/anthology/P/P02/P02-1040.pdf", scale=bleu_score, target_label="Identical to professional human translations", target=50)
en_ro_bleu = translation.metric("news-test-2016 En-Ro BLEU", url="http://www.statmt.org/wmt16/book.pdf", scale=bleu_score, target_label="Identical to professional human translations", target=50)
en_fr_bleu.measure(None, 37, "PBMT", url="http://www.anthology.aclweb.org/W/W14/W14-33.pdf", papername=u"Edinburgh’s phrase-based machine translation systems for WMT-14", venue="WMT 2014")
en_de_bleu.measure(None, 20.7, "PBMT", url="http://www.anthology.aclweb.org/W/W14/W14-33.pdf", papername=u"Edinburgh’s phrase-based machine translation systems for WMT-14", venue="WMT 2014")
en_fr_bleu.measure(date(2014, 9, 1), 36.15, "RNN-search50*", url="https://arxiv.org/abs/1409.0473")
en_fr_bleu.measure(date(2014, 10, 30), 37.5, "LSTM6 + PosUnk", url="https://arxiv.org/abs/1410.8206")
# XXX need a better way of indicating that LSTM is old.... we don't want the axes running
# all the way back to 1997; maybe we can use ellipses?
en_fr_bleu.measure(None, 34.81, "LSTM", "https://arxiv.org/abs/1409.3215v1", algorithm_src_url="http://www.bioinf.jku.at/publications/older/2604.pdf")#, min_date=date(2010,1,1))
en_fr_bleu.measure(None, 36.5, "SMT+LSTM5", "https://arxiv.org/abs/1409.3215v1")
en_fr_bleu.measure(date(2016, 9, 26), 39.92, "GNMT+RL", url="https://arxiv.org/abs/1609.08144")
en_de_bleu.measure(date(2016, 9, 26), 26.30, "GNMT+RL", url="https://arxiv.org/abs/1609.08144")
# Lots of this data is coming via https://arxiv.org/abs/1609.08144
en_fr_bleu.measure(date(2016, 7, 23), 39.2, "Deep-Att + PosUnk", url="https://arxiv.org/abs/1606.04199")
en_de_bleu.measure(date(2016, 7, 23), 20.7, "Deep-Att", url="https://arxiv.org/abs/1606.04199")
en_fr_bleu.measure(date(2017, 1, 23), 40.56, "MoE 2048", url="https://arxiv.org/pdf/1701.06538")
en_de_bleu.measure(date(2017, 1, 23), 26.03, "MoE 2048", url="https://arxiv.org/pdf/1701.06538")
en_fr_bleu.measure(None, 41.29, "ConvS2S ensemble", url="https://arxiv.org/abs/1705.03122v2")
en_de_bleu.measure(None, 26.36, "ConvS2S ensemble", url="https://arxiv.org/abs/1705.03122v2")
en_de_bleu.measure(date(2016, 7, 14), 17.93, "NSE-NSE", url="https://arxiv.org/abs/1607.04315v1")
en_ro_bleu.measure(date(2016, 7, 11), 28.9, "GRU BPE90k", papername="The QT21/HimL Combined Machine Translation System", url="http://www.statmt.org/wmt16/pdf/W16-2320.pdf")
en_ro_bleu.measure(None, 29.88, "ConvS2S BPE40k", url="https://arxiv.org/abs/1705.03122v2")
# XXX add more languages
| AI-metrics-master | data/language.py |
"Hand-entered data about performance of generative models"
from taxonomy import Problem
from scales import *
import datetime
date = datetime.date
"""
* * *
**_Generative models of CIFAR-10 Natural Images _****[Year: bits-per-subpixel, method]. Compiled by Durk Kingma.**
**Why we care:**
(1) The compression=prediction=understanding=intelligence view (see Hutter prize, etc.). (Note that perplexity, log-likelihood, and #bits are all equivalent measurements.)
(2) Learning a generative model is a prominent auxiliary task towards semi-supervised learning. Current SOTA semi-supervised classification results utilize generative models.
3) You're finding patterns in the data that let you compress it more efficiently. Ultimate pattern recognition benchmark because you're trying to find the patterns in all the data.
"""
image_generation = Problem("Drawing pictures", ["vision", "agi"])
# note: this section is not on scene generation, but making the distinction seemed like a good idea.
scene_generation = Problem("Be able to generate complex scene e.g. a baboon receiving their degree at convocatoin.", ["vision", "world-modelling", "agi"])
scene_generation.add_subproblem(image_generation)
# NOTE: scale, and target need to be checked
image_generation_metric = image_generation.metric("Generative models of CIFAR-10 images", scale=bits_per_x, axis_label="Model entropy (bits per pixel)")
image_generation_metric.measure(date(2014,10,30), 4.48, "NICE", "https://arxiv.org/abs/1410.8516")
image_generation_metric.measure(date(2015,2,16), 4.13, "DRAW", "https://arxiv.org/abs/1502.04623")
image_generation_metric.measure(date(2016,5,27), 3.49, "Real NVP", "https://arxiv.org/abs/1605.08803")
image_generation_metric.measure(date(2016,6,15), 3.11, "VAE with IAF", "https://papers.nips.cc/paper/6581-improved-variational-inference-with-inverse-autoregressive-flow")
image_generation_metric.measure(date(2016,5,27), 3.0, "PixelRNN", "https://arxiv.org/abs/1605.08803")
image_generation_metric.measure(date(2016,11,4), 2.92, "PixelCNN++","https://openreview.net/forum?id=BJrFC6ceg", replicated="https://github.com/openai/pixel-cnn")
| AI-metrics-master | data/generative.py |
from taxonomy import Problem
from scales import *
import datetime
date = datetime.date
computer_games = Problem("Play real-time computer & video games", ["world-modelling", "realtime-games", "agi", "language"])
games_requiring_novel_language = Problem("Games that require inventing novel language, forms of speech, or communication")
games_requiring_speech = Problem("Games that require both understanding and speaking a language")
games_requiring_speech.metric("Starcraft")
games_requiring_language_comprehension = Problem("Games that require language comprehension", ["agi", "languge"])
computer_games.add_subproblem(games_requiring_novel_language)
games_requiring_novel_language.add_subproblem(games_requiring_speech)
games_requiring_speech.add_subproblem(games_requiring_language_comprehension)
# Atari 2600 Games: Breakout, Enduro, Pong, Q*Bert, Seaquest, S. Invaders. Each game has its own metric.
# We previously used hand-compiled by Yomna Nasser and Miles Brundage; this is
# now mostly obsolete, and the data is scraped in scrapers/atari.py
simple_games = Problem("Simple video games", ["world-modelling", "realtime-games", "agi"])
computer_games.add_subproblem(simple_games)
# Alien
alien_metric = simple_games.metric("Atari 2600 Alien", scale=atari_linear, target=6875, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# alien_metric.measure(date(2015, 2, 26), 3069, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# alien_metric.measure(date(2015,11,20), 1620, "DQN","https://arxiv.org/abs/1511.06581v1")
# alien_metric.measure(date(2015,11,20), 3747.7, "DDQN","https://arxiv.org/abs/1511.06581v1")
# alien_metric.measure(date(2015,11,20), 4461.4, "Duel","https://arxiv.org/abs/1511.06581v1")
# Amidar
amidar_metric = simple_games.metric("Atari 2600 Amidar", scale=atari_linear, target=1676, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# amidar_metric.measure(date(2015, 2, 26), 739.5, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# amidar_metric.measure(date(2015,11,20), 978, "DQN","https://arxiv.org/abs/1511.06581v1")
# amidar_metric.measure(date(2015,11,20), 1793.3, "DDQN","https://arxiv.org/abs/1511.06581v1")
# amidar_metric.measure(date(2015,11,20), 2354.5, "Duel","https://arxiv.org/abs/1511.06581v1")
# Assault
assault_metric = simple_games.metric("Atari 2600 Assault", scale=atari_linear, target=1496, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# assault_metric.measure(date(2015, 2, 26), 3359, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# assault_metric.measure(date(2015,11,20), 4280.4, "DQN","https://arxiv.org/abs/1511.06581v1")
# assault_metric.measure(date(2015,11,20), 5393.2, "DDQN","https://arxiv.org/abs/1511.06581v1")
# assault_metric.measure(date(2015,11,20), 4621.0, "Duel","https://arxiv.org/abs/1511.06581v1")
# Asterix
asterix_metric = simple_games.metric("Atari 2600 Asterix", scale=atari_linear, target=8503, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# asterix_metric.measure(date(2015, 2, 26), 6012, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# asterix_metric.measure(date(2015,11,20), 4359, "DQN","https://arxiv.org/abs/1511.06581v1")
# asterix_metric.measure(date(2015,11,20), 17356, "DDQN","https://arxiv.org/abs/1511.06581v1")
# asterix_metric.measure(date(2015,11,20), 28188, "Duel","https://arxiv.org/abs/1511.06581v1")
# Asteroids
asteroids_metric = simple_games.metric("Atari 2600 Asteroids", scale=atari_linear, target=13157, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# asteroids_metric.measure(date(2015, 2, 26), 1629, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# asteroids_metric.measure(date(2015,11,20), 1364.5, "DQN","https://arxiv.org/abs/1511.06581v1")
# asteroids_metric.measure(date(2015,11,20), 734.7, "DDQN","https://arxiv.org/abs/1511.06581v1")
# asteroids_metric.measure(date(2015,11,20), 2837.7, "Duel","https://arxiv.org/abs/1511.06581v1")
# Atlantis
atlantis_metric = simple_games.metric("Atari 2600 Atlantis", scale=atari_linear, target=29028, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# atlantis_metric.measure(date(2015, 2, 26), 85641, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# atlantis_metric.measure(date(2015,11,20), 279987, "DQN","https://arxiv.org/abs/1511.06581v1")
# atlantis_metric.measure(date(2015,11,20), 106056, "DDQN","https://arxiv.org/abs/1511.06581v1")
# atlantis_metric.measure(date(2015,11,20), 382572, "Duel","https://arxiv.org/abs/1511.06581v1")
# Bank Heist
bank_heist_metric = simple_games.metric("Atari 2600 Bank Heist", scale=atari_linear, target=734.4, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# bank_heist_metric.measure(date(2015, 2, 26), 429.7, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# bank_heist_metric.measure(date(2015,11,20), 455, "DQN","https://arxiv.org/abs/1511.06581v1")
# bank_heist_metric.measure(date(2015,11,20), 1030.6, "DDQN","https://arxiv.org/abs/1511.06581v1")
# bank_heist_metric.measure(date(2015,11,20), 1611.9, "Duel","https://arxiv.org/abs/1511.06581v1")
# Battle Zone
battle_zone_metric = simple_games.metric("Atari 2600 Battle Zone", scale=atari_linear, target=37800, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# battle_zone_metric.measure(date(2015, 2, 26), 26300, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# battle_zone_metric.measure(date(2015,11,20), 29900, "DQN","https://arxiv.org/abs/1511.06581v1")
# battle_zone_metric.measure(date(2015,11,20), 31700, "DDQN","https://arxiv.org/abs/1511.06581v1")
# battle_zone_metric.measure(date(2015,11,20), 37150, "Duel","https://arxiv.org/abs/1511.06581v1")
# Beam Rider
beam_rider_metric = simple_games.metric("Atari 2600 Beam Rider", scale=atari_linear, target=7456, target_source="https://arxiv.org/pdf/1312.5602.pdf")
# beam_rider_metric.measure(date(2013,12,19), 4092, "DQN", "https://arxiv.org/pdf/1312.5602.pdf")
# beam_rider_metric.measure(date(2015,11,20), 8627, "DQN","https://arxiv.org/abs/1511.06581v1")
# beam_rider_metric.measure(date(2015,11,20), 13772, "DDQN","https://arxiv.org/abs/1511.06581v1")
# beam_rider_metric.measure(date(2015,11,20), 12164, "Duel","https://arxiv.org/abs/1511.06581v1")
# Berzerk
berzerk_metric = simple_games.metric("Atari 2600 Berzerk", scale=atari_linear, target=2630.4, target_source="https://arxiv.org/abs/1511.06581v1")
# berzerk_metric.measure(date(2015,11,20), 585, "DQN","https://arxiv.org/abs/1511.06581v1")
# berzerk_metric.measure(date(2015,11,20), 1225, "DDQN","https://arxiv.org/abs/1511.06581v1")
# berzerk_metric.measure(date(2015,11,20), 1472, "Duel","https://arxiv.org/abs/1511.06581v1")
# Bowling
bowling_metric = simple_games.metric("Atari 2600 Bowling", scale=atari_linear, target=154.8, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# bowling_metric.measure(date(2015, 2, 26), 42.8, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# bowling_metric.measure(date(2015,11,20), 50.4, "DQN","https://arxiv.org/abs/1511.06581v1")
# bowling_metric.measure(date(2015,11,20), 68.1, "DDQN","https://arxiv.org/abs/1511.06581v1")
# bowling_metric.measure(date(2015,11,20), 65.5, "Duel","https://arxiv.org/abs/1511.06581v1")
# Boxing
boxing_metric = simple_games.metric("Atari 2600 Boxing", scale=atari_linear, target=4.3, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# boxing_metric.measure(date(2015, 2, 26), 71.8, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# boxing_metric.measure(date(2015,11,20), 88, "DQN","https://arxiv.org/abs/1511.06581v1")
# boxing_metric.measure(date(2015,11,20), 91.6, "DDQN","https://arxiv.org/abs/1511.06581v1")
# boxing_metric.measure(date(2015,11,20), 99.4, "Duel","https://arxiv.org/abs/1511.06581v1")
# Breakout
breakout_metric = simple_games.metric("Atari 2600 Breakout", scale=atari_linear, target=31.8, target_source="https://pdfs.semanticscholar.org/340f/48901f72278f6bf78a04ee5b01df208cc508.pdf")
# breakout_metric.measure(date(2013,12,19), 225, "DQN", "https://arxiv.org/pdf/1312.5602.pdf")
# breakout_metric.measure(date(2015,2,26), 401.2, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# breakout_metric.measure(date(2015,10,22), 375, "DoubleDQN", "https://pdfs.semanticscholar.org/3b97/32bb07dc99bde5e1f9f75251c6ea5039373e.pdf")
# breakout_metric.measure(date(2015,11,20), 385.5, "DQN", "https://arxiv.org/abs/1511.06581v1")
# breakout_metric.measure(date(2015,11,20), 418.5, "DDQN", "https://arxiv.org/abs/1511.06581v1")
# breakout_metric.measure(date(2015,11,20), 345.3, "Duel", "https://arxiv.org/abs/1511.06581v1")
# breakout_metric.measure(date(2016,6,16), 766.8, "A3C LSTM", "https://arxiv.org/pdf/1602.01783.pdf")
# Centipede
centipede_metric = simple_games.metric("Atari 2600 Centipede", scale=atari_linear, target=11963, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# centipede_metric.measure(date(2015, 2, 26), 8309, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# centipede_metric.measure(date(2015,11,20), 4657, "DQN","https://arxiv.org/abs/1511.06581v1")
# centipede_metric.measure(date(2015,11,20), 5409, "DDQN","https://arxiv.org/abs/1511.06581v1")
# centipede_metric.measure(date(2015,11,20), 7561, "Duel","https://arxiv.org/abs/1511.06581v1")
# Chopper Command
chopper_command_metric = simple_games.metric("Atari 2600 Chopper Command", scale=atari_linear, target=9882, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# chopper_command_metric.measure(date(2015, 2, 26), 6687, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# chopper_command_metric.measure(date(2015,11,20), 6126, "DQN","https://arxiv.org/abs/1511.06581v1")
# chopper_command_metric.measure(date(2015,11,20), 5809, "DDQN","https://arxiv.org/abs/1511.06581v1")
# chopper_command_metric.measure(date(2015,11,20), 11215, "Duel","https://arxiv.org/abs/1511.06581v1")
# Crazy Climber
crazy_climber_metric = simple_games.metric("Atari 2600 Crazy Climber", scale=atari_linear, target=35411, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# crazy_climber_metric.measure(date(2015, 2, 26), 114103, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# crazy_climber_metric.measure(date(2015,11,20), 110763, "DQN","https://arxiv.org/abs/1511.06581v1")
# crazy_climber_metric.measure(date(2015,11,20), 117282, "DDQN","https://arxiv.org/abs/1511.06581v1")
# crazy_climber_metric.measure(date(2015,11,20), 143570, "Duel","https://arxiv.org/abs/1511.06581v1")
# Demon Attack
demon_attack_metric = simple_games.metric("Atari 2600 Demon Attack", scale=atari_linear, target=3401, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# demon_attack_metric.measure(date(2015, 2, 26), 9711, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# demon_attack_metric.measure(date(2015,11,20), 12149, "DQN","https://arxiv.org/abs/1511.06581v1")
# demon_attack_metric.measure(date(2015,11,20), 58044, "DDQN","https://arxiv.org/abs/1511.06581v1")
# demon_attack_metric.measure(date(2015,11,20), 60813, "Duel","https://arxiv.org/abs/1511.06581v1")
# Double Dunk
# TODO: investigate alternate scale
double_dunk_metric = simple_games.metric("Atari 2600 Double Dunk", scale=atari_linear, target=-15.5, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# double_dunk_metric.measure(date(2015, 2, 26), -18.1, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# double_dunk_metric.measure(date(2015,11,20), -6.6, "DQN","https://arxiv.org/abs/1511.06581v1")
# double_dunk_metric.measure(date(2015,11,20), -5.5, "DDQN","https://arxiv.org/abs/1511.06581v1")
# double_dunk_metric.measure(date(2015,11,20), 0.1, "Duel","https://arxiv.org/abs/1511.06581v1")
# Enduro
enduro_metric = simple_games.metric("Atari 2600 Enduro", scale=atari_linear, target=309.6, target_source="https://pdfs.semanticscholar.org/340f/48901f72278f6bf78a04ee5b01df208cc508.pdf")
# enduro_metric.measure(date(2013,12,19), 661, "DQN", "https://arxiv.org/pdf/1312.5602.pdf")
# enduro_metric.measure(date(2015,2,26), 301, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# enduro_metric.measure(date(2015,10,22), 319, "DoubleDQN", "https://pdfs.semanticscholar.org/3b97/32bb07dc99bde5e1f9f75251c6ea5039373e.pdf")
# enduro_metric.measure(date(2015,11,20), 729.0, "DQN", "https://arxiv.org/abs/1511.06581v1")
# enduro_metric.measure(date(2015,11,20), 1211.8, "DDQN", "https://arxiv.org/abs/1511.06581v1")
# enduro_metric.measure(date(2015,11,20), 2258.2, "Duel", "https://arxiv.org/abs/1511.06581v1")
# enduro_metric.measure(date(2016,6,16), 82.5, "A3C LSTM", "https://arxiv.org/pdf/1602.01783.pdf")
# Fishing Derby
fishing_derby_metric = simple_games.metric("Atari 2600 Fishing Derby", scale=atari_linear, target=5.5, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# fishing_derby_metric.measure(date(2015, 2, 26), -0.8, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# fishing_derby_metric.measure(date(2015,11,20), -4.9, "DQN","https://arxiv.org/abs/1511.06581v1")
# fishing_derby_metric.measure(date(2015,11,20), 15.5, "DDQN","https://arxiv.org/abs/1511.06581v1")
# fishing_derby_metric.measure(date(2015,11,20), 46.4, "Duel","https://arxiv.org/abs/1511.06581v1")
# Freeway
freeway_metric = simple_games.metric("Atari 2600 Freeway", scale=atari_linear, target=29.6, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# freeway_metric.measure(date(2015, 2, 26), 30.3, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# freeway_metric.measure(date(2015,11,20), 30.8, "DQN","https://arxiv.org/abs/1511.06581v1")
# freeway_metric.measure(date(2015,11,20), 33.3, "DDQN","https://arxiv.org/abs/1511.06581v1")
# freeway_metric.measure(date(2015,11,20), 0, "Duel","https://arxiv.org/abs/1511.06581v1")
# Frostbite
frostbite_metric = simple_games.metric("Atari 2600 Frostbite", scale=atari_linear, target=4355, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# frostbite_metric.measure(date(2015, 2, 26), 328.3, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# frostbite_metric.measure(date(2015,11,20), 797.7, "DQN","https://arxiv.org/abs/1511.06581v1")
# frostbite_metric.measure(date(2015,11,20), 1683.3, "DDQN","https://arxiv.org/abs/1511.06581v1")
# frostbite_metric.measure(date(2015,11,20), 4672, "Duel","https://arxiv.org/abs/1511.06581v1")
# Gopher
gopher_metric = simple_games.metric("Atari 2600 Gopher", scale=atari_linear, target=2321, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# gopher_metric.measure(date(2015, 2, 26), 8520, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# gopher_metric.measure(date(2015,11,20), 8777, "DQN","https://arxiv.org/abs/1511.06581v1")
# gopher_metric.measure(date(2015,11,20), 14840, "DDQN","https://arxiv.org/abs/1511.06581v1")
# gopher_metric.measure(date(2015,11,20), 15718, "Duel","https://arxiv.org/abs/1511.06581v1")
# Gravitar
gravitar_metric = simple_games.metric("Atari 2600 Gravitar", scale=atari_linear, target=2672, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# gravitar_metric.measure(date(2015, 2, 26), 306.7, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# gravitar_metric.measure(date(2015,11,20), 473, "DQN","https://arxiv.org/abs/1511.06581v1")
# gravitar_metric.measure(date(2015,11,20), 412, "DDQN","https://arxiv.org/abs/1511.06581v1")
# gravitar_metric.measure(date(2015,11,20), 588, "Duel","https://arxiv.org/abs/1511.06581v1")
# H.E.R.O.
hero_metric = simple_games.metric("Atari 2600 HERO", scale=atari_linear, target=25763, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# hero_metric.measure(date(2015, 2, 26), 19950, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# hero_metric.measure(date(2015,11,20), 20437, "DQN","https://arxiv.org/abs/1511.06581v1")
# hero_metric.measure(date(2015,11,20), 20130, "DDQN","https://arxiv.org/abs/1511.06581v1")
# hero_metric.measure(date(2015,11,20), 20818, "Duel","https://arxiv.org/abs/1511.06581v1")
# Ice Hockey
ice_hockey_metric = simple_games.metric("Atari 2600 Ice Hockey", scale=atari_linear, target=0.9, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# ice_hockey_metric.measure(date(2015, 2, 26), -1.6, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# James Bond
james_bond_metric = simple_games.metric("Atari 2600 James Bond", scale=atari_linear, target=406.7, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# james_bond_metric.measure(date(2015, 2, 26), 576.7, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# Kangaroo
kangaroo_metric = simple_games.metric("Atari 2600 Kangaroo", scale=atari_linear, target=3035, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# kangaroo_metric.measure(date(2015, 2, 26), 6740, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# Krull
krull_metric = simple_games.metric("Atari 2600 Krull", scale=atari_linear, target=2395, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# krull_metric.measure(date(2015, 2, 26), 3805, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# Kung-Fu Master
kung_fu_master_metric = simple_games.metric("Atari 2600 Kung-Fu Master", scale=atari_linear, target=22736, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# kung_fu_master_metric.measure(date(2015, 2, 26), 23270, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# Montezuma's Revenge
montezumas_revenge_metric = simple_games.metric("Atari 2600 Montezuma's Revenge", scale=atari_linear, target=4367, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# montezumas_revenge_metric.measure(date(2015, 2, 26), 0, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# Ms. Pacman
ms_pacman_metric = simple_games.metric("Atari 2600 Ms. Pacman", scale=atari_linear, target=15693, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# ms_pacman_metric.measure(date(2015, 2, 26), 2311, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# Name This Game
name_this_game_metric = simple_games.metric("Atari 2600 Name This Game", scale=atari_linear, target=4076, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# name_this_game_metric.measure(date(2015, 2, 26), 7257, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# Pong
pong_metric = simple_games.metric("Atari 2600 Pong", scale=atari_linear, target=9.3, target_source="https://pdfs.semanticscholar.org/340f/48901f72278f6bf78a04ee5b01df208cc508.pdf")
# pong_metric.measure(date(2013,12,19), 21, "DQN", "https://arxiv.org/pdf/1312.5602.pdf")
# pong_metric.measure(date(2015,2,26), 18.9, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# pong_metric.measure(date(2015,10,22), 21, "DoubleDQN", "https://pdfs.semanticscholar.org/3b97/32bb07dc99bde5e1f9f75251c6ea5039373e.pdf")
# pong_metric.measure(date(2015,11,20), 21, "DQN", "https://arxiv.org/abs/1511.06581v1")
# pong_metric.measure(date(2015,11,20), 20.9, "DDQN", "https://arxiv.org/abs/1511.06581v1")
# pong_metric.measure(date(2015,11,20), 19.5, "Duel", "https://arxiv.org/abs/1511.06581v1")
# pong_metric.measure(date(2016,6,16), 10.7, "A3C LSTM", "https://arxiv.org/pdf/1602.01783.pdf")
# Private Eye
private_eye_metric = simple_games.metric("Atari 2600 Private Eye", scale=atari_linear, target=69571, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# private_eye_metric.measure(date(2015, 2, 26), 1788, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# Q*Bert
q_bert_metric = simple_games.metric("Atari 2600 Q*Bert", scale=atari_linear, target=13455, target_source="https://pdfs.semanticscholar.org/340f/48901f72278f6bf78a04ee5b01df208cc508.pdf")
# q_bert_metric.measure(date(2013,12,19), 4500, "DQN", "https://arxiv.org/pdf/1312.5602.pdf")
# q_bert_metric.measure(date(2015,2,26), 10596, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# q_bert_metric.measure(date(2015,10,22), 14875, "DoubleDQN", "https://pdfs.semanticscholar.org/3b97/32bb07dc99bde5e1f9f75251c6ea5039373e.pdf")
# q_bert_metric.measure(date(2015,11,20), 13117.3, "DQN", "https://arxiv.org/abs/1511.06581v1")
# q_bert_metric.measure(date(2015,11,20), 15088.5, "DDQN", "https://arxiv.org/abs/1511.06581v1")
# q_bert_metric.measure(date(2015,11,20), 19220, "Duel", "https://arxiv.org/abs/1511.06581v1")
# q_bert_metric.measure(date(2016,6,16), 21307, "A3C LSTM", "https://arxiv.org/pdf/1602.01783.pdf")
# River Raid
river_raid_metric = simple_games.metric("Atari 2600 River Raid", scale=atari_linear, target=13513, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# river_raid_metric.measure(date(2015, 2, 26), 8316, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# Road Runner
road_runner_metric = simple_games.metric("Atari 2600 Road Runner", scale=atari_linear, target=7845, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# road_runner_metric.measure(date(2015, 2, 26), 18257, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# Robotank
robotank_metric = simple_games.metric("Atari 2600 Robotank", scale=atari_linear, target=11.9, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# robotank_metric.measure(date(2015, 2, 26), 51.6, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# Seaquest
seaquest_metric = simple_games.metric("Atari 2600 Seaquest", scale=atari_linear, target=20182, target_source="https://pdfs.semanticscholar.org/340f/48901f72278f6bf78a04ee5b01df208cc508.pdf")
# seaquest_metric.measure(date(2013,12,19), 1740, "DQN", "https://arxiv.org/pdf/1312.5602.pdf")
# seaquest_metric.measure(date(2015,2,26), 5286, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# seaquest_metric.measure(date(2015,10,22), 7995, "DoubleDQN", "https://arxiv.org/abs/1509.06461")
# seaquest_metric.measure(date(2015,11,20), 5860.6, "DQN", "https://arxiv.org/abs/1511.06581v1")
# seaquest_metric.measure(date(2015,11,20), 16452.7, "DDQN", "https://arxiv.org/abs/1511.06581v1")
# seaquest_metric.measure(date(2015,11,20), 50254.2, "Duel", "https://arxiv.org/abs/1511.06581v1")
# seaquest_metric.measure(date(2016,6,16), 1326.1, "A3C LSTM", "https://arxiv.org/pdf/1602.01783.pdf")
# Space Invaders
space_invaders_metric = simple_games.metric("Atari 2600 Space Invaders", scale=atari_linear, target=1652, target_source="https://pdfs.semanticscholar.org/340f/48901f72278f6bf78a04ee5b01df208cc508.pdf")
# space_invaders_metric.measure(date(2013,12,19), 1075, "DQN", "https://arxiv.org/pdf/1312.5602.pdf")
# space_invaders_metric.measure(date(2015,2,26), 1976, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# space_invaders_metric.measure(date(2015,10,22), 3154, "DoubleDQN", "https://arxiv.org/abs/1509.06461")
# space_invaders_metric.measure(date(2015,11,20), 1692.3, "DQN", "https://arxiv.org/abs/1511.06581v1")
# space_invaders_metric.measure(date(2015,11,20), 2525.5, "DDQN", "https://arxiv.org/abs/1511.06581v1")
# space_invaders_metric.measure(date(2015,11,20), 6427.3, "Duel", "https://arxiv.org/abs/1511.06581v1")
# space_invaders_metric.measure(date(2016,6,16), 23846, "A3C LSTM", "https://arxiv.org/pdf/1602.01783.pdf")
# Star Gunner
star_gunner_metric = simple_games.metric("Atari 2600 Star Gunner", scale=atari_linear, target=10250, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# star_gunner_metric.measure(date(2015, 2, 26), 57997, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# Tennis
# TODO: negative linear scale?
tennis_metric = simple_games.metric("Atari 2600 Tennis", scale=atari_linear, target=-8.9, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# tennis_metric.measure(date(2015, 2, 26), -2.5, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# Time Pilot
time_pilot_metric = simple_games.metric("Atari 2600 Time Pilot", scale=atari_linear, target=5925, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# time_pilot_metric.measure(date(2015, 2, 26), 5947, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# Tutankham
tutankham_metric = simple_games.metric("Atari 2600 Tutankham", scale=atari_linear, target=167.6, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# tutankham_metric.measure(date(2015, 2, 26), 186.7, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# Up and Down
up_and_down_metric = simple_games.metric("Atari 2600 Up and Down", scale=atari_linear, target=9082, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# up_and_down_metric.measure(date(2015, 2, 26), 8456, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# Venture
venture_metric = simple_games.metric("Atari 2600 Venture", scale=atari_linear, target=1188, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# venture_metric.measure(date(2015, 2, 26), 3800, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# Video Pinball
video_pinball_metric = simple_games.metric("Atari 2600 Video Pinball", scale=atari_linear, target=17298, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# video_pinball_metric.measure(date(2015, 2, 26), 42684, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# Wizard of Wor
wizard_of_wor_metric = simple_games.metric("Atari 2600 Wizard of Wor", scale=atari_linear, target=4757, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# wizard_of_wor_metric.measure(date(2015, 2, 26), 3393, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# Zaxxon
zaxxon_metric = simple_games.metric("Atari 2600 Zaxxon", scale=atari_linear, target=9173, target_source="https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# zaxxon_metric.measure(date(2015, 2, 26), 4977, "DQN", "https://www.semanticscholar.org/paper/Human-level-control-through-deep-reinforcement-Mnih-Kavukcuoglu/340f48901f72278f6bf78a04ee5b01df208cc508")
# Results from the original DQN paper
beam_rider_metric.measure(None, 5184, "DQN best", "https://arxiv.org/abs/1312.5602")
breakout_metric.measure(None, 225, "DQN best", "https://arxiv.org/abs/1312.5602")
enduro_metric.measure(None, 661, "DQN best", "https://arxiv.org/abs/1312.5602")
pong_metric.measure(None, 21, "DQN best", "https://arxiv.org/abs/1312.5602")
q_bert_metric.measure(None, 4500, "DQN best", "https://arxiv.org/abs/1312.5602")
seaquest_metric.measure(None, 1740, "DQN best", "https://arxiv.org/abs/1312.5602")
space_invaders_metric.measure(None, 1075, "DQN best", "https://arxiv.org/abs/1312.5602")
| AI-metrics-master | data/video_games.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import logging
import os
import random
from io import open
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset, RandomSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import BertTokenizer, BertModel, BertForPreTraining
from transformers import AdamW, get_linear_schedule_with_warmup
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
class BERTDataset(Dataset):
def __init__(self, corpus_path, tokenizer, seq_len, encoding="utf-8", corpus_lines=None, on_memory=True):
self.vocab = tokenizer.vocab
self.tokenizer = tokenizer
self.seq_len = seq_len
self.on_memory = on_memory
self.corpus_lines = corpus_lines # number of non-empty lines in input corpus
self.corpus_path = corpus_path
self.encoding = encoding
self.current_doc = 0 # to avoid random sentence from same doc
# for loading samples directly from file
self.sample_counter = 0 # used to keep track of full epochs on file
self.line_buffer = None # keep second sentence of a pair in memory and use as first sentence in next pair
# for loading samples in memory
self.current_random_doc = 0
self.num_docs = 0
self.sample_to_doc = [] # map sample index to doc and line
# load samples into memory
if on_memory:
self.all_docs = []
doc = []
self.corpus_lines = 0
with open(corpus_path, "r", encoding=encoding) as f:
for line in tqdm(f, desc="Loading Dataset", total=corpus_lines):
line = line.strip()
if line == "":
self.all_docs.append(doc)
doc = []
#remove last added sample because there won't be a subsequent line anymore in the doc
self.sample_to_doc.pop()
else:
#store as one sample
sample = {"doc_id": len(self.all_docs),
"line": len(doc)}
self.sample_to_doc.append(sample)
doc.append(line)
self.corpus_lines = self.corpus_lines + 1
# if last row in file is not empty
if self.all_docs[-1] != doc:
self.all_docs.append(doc)
self.sample_to_doc.pop()
self.num_docs = len(self.all_docs)
# load samples later lazily from disk
else:
if self.corpus_lines is None:
with open(corpus_path, "r", encoding=encoding) as f:
self.corpus_lines = 0
for line in tqdm(f, desc="Loading Dataset", total=corpus_lines):
if line.strip() == "":
self.num_docs += 1
else:
self.corpus_lines += 1
# if doc does not end with empty line
if line.strip() != "":
self.num_docs += 1
self.file = open(corpus_path, "r", encoding=encoding)
self.random_file = open(corpus_path, "r", encoding=encoding)
def __len__(self):
# last line of doc won't be used, because there's no "nextSentence". Additionally, we start counting at 0.
return self.corpus_lines - self.num_docs - 1
def __getitem__(self, item):
cur_id = self.sample_counter
self.sample_counter += 1
if not self.on_memory:
# after one epoch we start again from beginning of file
if cur_id != 0 and (cur_id % len(self) == 0):
self.file.close()
self.file = open(self.corpus_path, "r", encoding=self.encoding)
t1, t2, is_next_label = self.random_sent(item)
# tokenize
tokens_a = self.tokenizer.tokenize(t1)
tokens_b = self.tokenizer.tokenize(t2)
# combine to one sample
cur_example = InputExample(guid=cur_id, tokens_a=tokens_a, tokens_b=tokens_b, is_next=is_next_label)
# transform sample to features
cur_features = convert_example_to_features(cur_example, self.seq_len, self.tokenizer)
cur_tensors = (torch.tensor(cur_features.input_ids),
torch.tensor(cur_features.input_mask),
torch.tensor(cur_features.segment_ids),
torch.tensor(cur_features.lm_label_ids),
torch.tensor(cur_features.is_next))
return cur_tensors
def random_sent(self, index):
"""
Get one sample from corpus consisting of two sentences. With prob. 50% these are two subsequent sentences
from one doc. With 50% the second sentence will be a random one from another doc.
:param index: int, index of sample.
:return: (str, str, int), sentence 1, sentence 2, isNextSentence Label
"""
t1, t2 = self.get_corpus_line(index)
if random.random() > 0.5:
label = 0
else:
t2 = self.get_random_line()
label = 1
assert len(t1) > 0
assert len(t2) > 0
return t1, t2, label
def get_corpus_line(self, item):
"""
Get one sample from corpus consisting of a pair of two subsequent lines from the same doc.
:param item: int, index of sample.
:return: (str, str), two subsequent sentences from corpus
"""
t1 = ""
t2 = ""
assert item < self.corpus_lines
if self.on_memory:
sample = self.sample_to_doc[item]
t1 = self.all_docs[sample["doc_id"]][sample["line"]]
t2 = self.all_docs[sample["doc_id"]][sample["line"]+1]
# used later to avoid random nextSentence from same doc
self.current_doc = sample["doc_id"]
return t1, t2
else:
if self.line_buffer is None:
# read first non-empty line of file
while t1 == "" :
t1 = next(self.file).strip()
t2 = next(self.file).strip()
else:
# use t2 from previous iteration as new t1
t1 = self.line_buffer
t2 = next(self.file).strip()
# skip empty rows that are used for separating documents and keep track of current doc id
while t2 == "" or t1 == "":
t1 = next(self.file).strip()
t2 = next(self.file).strip()
self.current_doc = self.current_doc+1
self.line_buffer = t2
assert t1 != ""
assert t2 != ""
return t1, t2
def get_random_line(self):
"""
Get random line from another document for nextSentence task.
:return: str, content of one line
"""
# Similar to original tf repo: This outer loop should rarely go for more than one iteration for large
# corpora. However, just to be careful, we try to make sure that
# the random document is not the same as the document we're processing.
for _ in range(10):
if self.on_memory:
rand_doc_idx = random.randint(0, len(self.all_docs)-1)
rand_doc = self.all_docs[rand_doc_idx]
line = rand_doc[random.randrange(len(rand_doc))]
else:
rand_index = random.randint(1, self.corpus_lines if self.corpus_lines < 1000 else 1000)
#pick random line
for _ in range(rand_index):
line = self.get_next_line()
#check if our picked random line is really from another doc like we want it to be
if self.current_random_doc != self.current_doc:
break
return line
def get_next_line(self):
""" Gets next line of random_file and starts over when reaching end of file"""
try:
line = next(self.random_file).strip()
#keep track of which document we are currently looking at to later avoid having the same doc as t1
if line == "":
self.current_random_doc = self.current_random_doc + 1
line = next(self.random_file).strip()
except StopIteration:
self.random_file.close()
self.random_file = open(self.corpus_path, "r", encoding=self.encoding)
line = next(self.random_file).strip()
return line
class InputExample(object):
"""A single training/test example for the language model."""
def __init__(self, guid, tokens_a, tokens_b=None, is_next=None, lm_labels=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
tokens_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
tokens_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.tokens_a = tokens_a
self.tokens_b = tokens_b
self.is_next = is_next # nextSentence
self.lm_labels = lm_labels # masked words for language model
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, is_next, lm_label_ids):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.is_next = is_next
self.lm_label_ids = lm_label_ids
def random_word(tokens, tokenizer):
"""
Masking some random tokens for Language Model task with probabilities as in the original BERT paper.
:param tokens: list of str, tokenized sentence.
:param tokenizer: Tokenizer, object used for tokenization (we need it's vocab here)
:return: (list of str, list of int), masked tokens and related labels for LM prediction
"""
output_label = []
for i, token in enumerate(tokens):
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
prob /= 0.15
# 80% randomly change token to mask token
if prob < 0.8:
tokens[i] = "[MASK]"
# 10% randomly change token to random token
elif prob < 0.9:
tokens[i] = random.choice(list(tokenizer.vocab.items()))[0]
# -> rest 10% randomly keep current token
# append current token to output (we will predict these later)
try:
output_label.append(tokenizer.vocab[token])
except KeyError:
# For unknown words (should not occur with BPE vocab)
output_label.append(tokenizer.vocab["[UNK]"])
logger.warning("Cannot find token '{}' in vocab. Using [UNK] insetad".format(token))
else:
# no masking token (will be ignored by loss function later)
output_label.append(-100)
return tokens, output_label
def convert_example_to_features(example, max_seq_length, tokenizer):
"""
Convert a raw sample (pair of sentences as tokenized strings) into a proper training sample with
IDs, LM labels, input_mask, CLS and SEP tokens etc.
:param example: InputExample, containing sentence input as strings and is_next label
:param max_seq_length: int, maximum length of sequence.
:param tokenizer: Tokenizer
:return: InputFeatures, containing all inputs and labels of one sample as IDs (as used for model training)
"""
tokens_a = example.tokens_a
tokens_b = example.tokens_b
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
tokens_a, t1_label = random_word(tokens_a, tokenizer)
tokens_b, t2_label = random_word(tokens_b, tokenizer)
# concatenate lm labels and account for CLS, SEP, SEP
lm_label_ids = ([-100] + t1_label + [-100] + t2_label + [-100])
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
assert len(tokens_b) > 0
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
lm_label_ids.append(-100)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(lm_label_ids) == max_seq_length
if example.guid < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("LM label: %s " % (lm_label_ids))
logger.info("Is next sentence label: %s " % (example.is_next))
features = InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
lm_label_ids=lm_label_ids,
is_next=example.is_next)
return features
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--train_corpus",
default=None,
type=str,
required=True,
help="The input train corpus.")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints will be written.")
## Other parameters
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
action='store_true',
help="Whether to run training.")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--learning_rate",
default=3e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--adam_epsilon",
default=1e-8,
type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_steps",
default=0,
type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--on_memory",
action='store_true',
help="Whether to load train samples into memory or use disk")
parser.add_argument("--do_lower_case",
action='store_true',
help="Whether to lower case the input text. True for uncased models, False for cased models.")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumualte before performing a backward/update pass.")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
args = parser.parse_args()
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train:
raise ValueError("Training is currently the only implemented execution option. Please set `do_train`.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
if not os.path.exists(args.output_dir) and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
os.makedirs(args.output_dir)
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
#train_examples = None
num_train_optimization_steps = None
if args.do_train:
print("Loading Train Dataset", args.train_corpus)
train_dataset = BERTDataset(args.train_corpus, tokenizer, seq_len=args.max_seq_length,
corpus_lines=None, on_memory=args.on_memory)
num_train_optimization_steps = int(
len(train_dataset) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
if args.local_rank != -1:
num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
# Prepare model
model = BertForPreTraining.from_pretrained(args.bert_model)
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
if args.do_train:
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=num_train_optimization_steps)
global_step = 0
if args.do_train:
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
if args.local_rank == -1:
train_sampler = RandomSampler(train_dataset)
else:
#TODO: check if this works with current data generator from disk that relies on next(file)
# (it doesn't return item back by index)
train_sampler = DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
model.train()
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, lm_label_ids, is_next = batch
outputs = model(input_ids, segment_ids, input_mask, labels=lm_label_ids, next_sentence_label=is_next)
loss = outputs[0]
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step() # Update learning rate schedule
optimizer.zero_grad()
global_step += 1
# Save a trained model
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
logger.info("** ** * Saving fine - tuned model ** ** * ")
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
if __name__ == "__main__":
main()
| commonsense-kg-completion-master | lm_finetuning/simple_lm_finetuning.py |
__author__ = "chaitanya" # partially borrowed from implemenation of ConvE
import torch
import math
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.nn.init import xavier_normal_
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.enabled = True
class DistMult(nn.Module):
def __init__(self, num_entities, num_relations, args):
super(DistMult, self).__init__()
self.no_cuda = args.no_cuda
self.w_relation = torch.nn.Embedding(num_relations, args.embedding_dim, padding_idx=0)
self.inp_drop = torch.nn.Dropout(args.input_dropout)
def init(self):
xavier_normal_(self.w_relation.weight.data)
def forward(self, embedding, e1, rel):
batch_size = e1.shape[0]
e1_embedded = embedding[e1].squeeze()
rel_embedded = self.w_relation(rel).squeeze()
e1_embedded = self.inp_drop(e1_embedded)
rel_embedded = self.inp_drop(rel_embedded)
score = torch.mm(e1_embedded * rel_embedded, embedding.t())
score = F.sigmoid(score)
return score
class ConvE(nn.Module):
def __init__(self, num_entities, num_relations, args):
super(ConvE, self).__init__()
self.w_relation = torch.nn.Embedding(num_relations, args.n_hidden, padding_idx=0)
self.inp_drop = torch.nn.Dropout(args.input_dropout)
self.hidden_drop = torch.nn.Dropout(args.dropout)
self.feature_map_drop = torch.nn.Dropout2d(args.feature_map_dropout)
self.conv1 = torch.nn.Conv2d(1, 32, (3, 3), 1, 0, bias=args.use_bias)
self.bn0 = torch.nn.BatchNorm2d(1)
self.bn1 = torch.nn.BatchNorm2d(32)
self.bn2 = torch.nn.BatchNorm1d(args.n_hidden)
self.register_parameter('b', Parameter(torch.zeros(num_entities)))
self.fc = torch.nn.Linear(10368, args.n_hidden)
def init(self):
xavier_normal_(self.w_relation.weight.data)
def forward(self, embedding, e1, rel):
batch_size = e1.shape[0]
e1_embedded = embedding[e1].view(-1, 1, 10, 20)
rel_embedded = self.w_relation(rel).view(-1, 1, 10, 20)
stacked_inputs = torch.cat([e1_embedded, rel_embedded], 2)
stacked_inputs = self.bn0(stacked_inputs)
x = self.inp_drop(stacked_inputs)
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
x = self.feature_map_drop(x)
x = x.view(batch_size, -1)
x = self.fc(x)
x = self.hidden_drop(x)
x = self.bn2(x)
x = F.relu(x)
x = torch.mm(x, embedding.t())
# x = torch.mm(x, self.emb_e.weight.transpose(1,0))
# x += self.b.expand_as(x)
x += self.b.expand_as(x)
pred = torch.sigmoid(x)
return pred
class ConvKB(nn.Module):
"""
Difference from ConvE: loss function is different, convolve over all e's at once
"""
def __init__(self, num_entities, num_relations, args):
super(ConvKB, self).__init__()
self.w_relation = torch.nn.Embedding(num_relations, args.n_hidden, padding_idx=0)
self.inp_drop = torch.nn.Dropout(args.input_dropout)
self.hidden_drop = torch.nn.Dropout(args.dropout)
self.feature_map_drop = torch.nn.Dropout(args.feature_map_dropout)
self.loss = torch.nn.BCELoss()
# 1D convolutions
self.conv1 = torch.nn.Conv1d(3, 50, 3, bias=args.use_bias)
self.bn0 = torch.nn.BatchNorm1d(1)
self.bn1 = torch.nn.BatchNorm1d(50)
self.bn2 = torch.nn.BatchNorm1d(1)
self.fc = torch.nn.Linear(24900, 1)
print(num_entities, num_relations)
def init(self):
xavier_normal_(self.w_relation.weight.data)
def forward(self, embedding, triplets):
e1 = triplets[:, 0]
e2 = triplets[:, 2]
rel = triplets[:, 1]
batch_size = len(triplets)
e1_embedded = embedding[e1]
e2_embedded = embedding[e2]
rel_embedded = self.w_relation(rel)
stacked_inputs = torch.stack([e1_embedded, rel_embedded, e2_embedded])
# stacked_inputs = self.bn0(stacked_inputs)
x = self.inp_drop(stacked_inputs)
x = self.conv1(x.transpose(0, 1))
x = self.bn1(x)
x = F.relu(x)
x = self.feature_map_drop(x)
x = x.view(batch_size, -1)
x = self.fc(x)
x = self.hidden_drop(x)
x = self.bn2(x)
x = F.relu(x)
# x = torch.mm(x, self.emb_e.weight.transpose(1,0))
# x += self.b.expand_as(x)
pred = torch.sigmoid(x)
return pred.squeeze(1)
def evaluate(self, embedding, e1, rel):
batch_size = e1.shape[0]
e1_embedded = embedding[e1].view(-1, 1, 10, 20)
rel_embedded = self.w_relation(rel).view(-1, 1, 10, 20)
stacked_inputs = torch.cat([e1_embedded, rel_embedded], 2)
stacked_inputs = self.bn0(stacked_inputs)
x = self.inp_drop(stacked_inputs)
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
x = self.feature_map_drop(x)
x = x.view(batch_size, -1)
x = self.fc(x)
x = self.hidden_drop(x)
x = self.bn2(x)
x = F.relu(x)
x = torch.mm(x, embedding.t())
# x = torch.mm(x, self.emb_e.weight.transpose(1,0))
# x += self.b.expand_as(x)
x += self.b
pred = torch.sigmoid(x)
return pred
class ConvTransE(nn.Module):
def __init__(self, num_entities, num_relations, args):
"""
Difference from ConvE: no reshaping after stacking e_1 and e_r
"""
super(ConvTransE, self).__init__()
bert_dims = 1024
self.no_cuda = args.no_cuda
if args.bert_concat or args.tying:
emb_dim = args.embedding_dim + bert_dims
elif args.bert_mlp:
emb_dim = 600
else:
emb_dim = args.embedding_dim
if args.gcn_type == "MultiHeadGATLayer":
num_heads = 8
emb_dim = args.embedding_dim * num_heads + bert_dims
self.embedding_dim = emb_dim
self.w_relation = torch.nn.Embedding(num_relations, emb_dim, padding_idx=0)
self.inp_drop = torch.nn.Dropout(args.input_dropout)
self.hidden_drop = torch.nn.Dropout(args.dropout)
self.feature_map_drop = torch.nn.Dropout(args.feature_map_dropout)
kernel_size = 5
self.channels = 200
self.conv1 = nn.Conv1d(2, self.channels, kernel_size, stride=1, padding= int(math.floor(kernel_size/2)))
# kernel size is odd, then padding = math.floor(kernel_size/2)
self.bn0 = torch.nn.BatchNorm1d(2)
self.bn1 = torch.nn.BatchNorm1d(self.channels)
self.bn2 = torch.nn.BatchNorm1d(emb_dim)
self.fc = torch.nn.Linear(self.channels * emb_dim, emb_dim)
self.loss = torch.nn.BCELoss()
self.cur_embedding = None
def init(self):
xavier_normal_(self.w_relation.weight.data)
def forward(self, e1, rel, target):
embedding = self.cur_embedding
if not self.no_cuda:
embedding = embedding.to(torch.cuda.current_device())
batch_size = e1.shape[0]
e1 = e1.unsqueeze(1)
rel = rel.unsqueeze(1)
e1_embedded = embedding[e1]
rel_embedded = self.w_relation(rel)
stacked_inputs = torch.cat([e1_embedded, rel_embedded], 1)
stacked_inputs = self.bn0(stacked_inputs)
x = self.inp_drop(stacked_inputs)
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
x = self.feature_map_drop(x)
x = x.view(batch_size, -1)
x = self.fc(x)
x = self.hidden_drop(x)
x = self.bn2(x)
x = F.relu(x)
x = torch.mm(x, embedding.t())
pred = torch.sigmoid(x)
if target is None:
return pred
else:
return self.loss(pred, target)
| commonsense-kg-completion-master | src/decoder.py |
from src import reader_utils
import os
import sys
data_path = "data/atomic/"
filename = sys.argv[1]
with open(os.path.join(data_path, filename)) as f:
data = f.readlines()
edge_dict = {}
for inst in data:
inst = inst.strip()
if inst:
inst = inst.split('\t')
src, rel, tgt = inst
src = reader_utils.preprocess_atomic_sentence(src).replace("-", " ")
tgt = reader_utils.preprocess_atomic_sentence(tgt).replace("-", " ")
if src and tgt:
if (src, rel) in edge_dict:
edge_dict[(src, rel)].add(tgt)
else:
edge_dict[(src, rel)] = set([tgt])
out_lines = []
for k, v in edge_dict.items():
if len(v) > 1 and "none" in v:
edge_dict[k].remove("none")
out_lines.append([k[0] + "\t" + k[1] + "\t" + e2 + "\n" for e2 in edge_dict[k]])
out_lines = [line for sublist in out_lines for line in sublist]
with open(os.path.join(data_path, filename.replace(".txt", ".preprocessed.txt")), 'w') as f:
f.writelines(out_lines)
| commonsense-kg-completion-master | src/preprocess_atomic.py |
import numpy as np
import torch
import math
import json
import logging
# import matplotlib
# matplotlib.use('Agg')
# import matplotlib.pyplot as plt
#######################################################################
# Utility functions for evaluation
#######################################################################
def sort_and_rank(score, target):
sorted, indices = torch.sort(score, dim=1, descending=True)
indices = torch.nonzero(indices == target.view(-1, 1))
indices = indices[:, 1].view(-1)
return indices
def get_filtered_ranks(score, target, batch_a, batch_r, e1_to_multi_e2):
filtered_scores = score.clone().detach()
for i, t in enumerate(target):
filter_ents = e1_to_multi_e2[(batch_a[i].item(), batch_r[i].item())]
# these filters contain ALL labels
target_value = filtered_scores[i][t].clone()
# zero all known cases => corresponds to the filtered setting
filtered_scores[i][filter_ents] = 0.0
assert t in filter_ents
# write base the saved values
filtered_scores[i][t] = target_value
return sort_and_rank(filtered_scores, target)
def perturb_and_get_rank(model, embedding, w, a, r, b, e1_to_multi_e2,
num_entity, batch_size=128, perturbed="subj"):
"""
Perturb one element in the triplets
"""
num_triples = len(a)
n_batch = math.ceil(num_triples / batch_size)
gold_scores = []
ranks = []
filtered_ranks = []
for idx in range(n_batch):
print("batch {} / {}".format(idx, n_batch), end="\r")
batch_start = idx * batch_size
batch_end = min(num_triples, (idx + 1) * batch_size)
batch_a = a[batch_start: batch_end]
batch_r = r[batch_start: batch_end]
emb_ar = embedding[batch_a] * w[batch_r]
emb_ar = emb_ar.transpose(0, 1).unsqueeze(2) # size: D x E x 1
emb_c = embedding.transpose(0, 1).unsqueeze(1) # size: D x 1 x V
# out-prod and reduce sum
out_prod = torch.bmm(emb_ar, emb_c) # size D x E x V
score = torch.sum(out_prod, dim=0) # size E x V
score = torch.sigmoid(score)
target = b[batch_start: batch_end]
gold_score = torch.FloatTensor([score[i][idx] for i, idx in enumerate(target)])
ranks.append(sort_and_rank(score, target))
gold_scores.append(gold_score)
filtered_ranks.append(get_filtered_ranks(score, target, batch_a, batch_r, e1_to_multi_e2, perturbed))
return torch.cat(ranks), torch.cat(filtered_ranks), torch.cat(gold_scores)
def perturb_and_get_rank_conv(model, embedding, w, a, r, b, e1_to_multi_e2,
num_entity, batch_size=128, perturbed="subj"):
"""
Perturb one element in the triplets for a convolution-based decoder
"""
num_triples = len(a)
n_batch = math.ceil(num_triples / batch_size)
gold_scores = []
ranks = []
filtered_ranks = []
for idx in range(n_batch):
print("batch {} / {}".format(idx, n_batch), end="\r")
batch_start = idx * batch_size
batch_end = min(num_triples, (idx + 1) * batch_size)
batch_a = a[batch_start: batch_end]
batch_r = r[batch_start: batch_end]
with torch.no_grad():
score = model.calc_score(batch_a, batch_r)
target = b[batch_start: batch_end]
gold_score = torch.FloatTensor([score[i][idx] for i, idx in enumerate(target)])
ranks.append(sort_and_rank(score, target))
gold_scores.append(gold_score)
filtered_ranks.append(get_filtered_ranks(score, target, batch_a, batch_r, e1_to_multi_e2, perturbed))
return torch.cat(ranks), torch.cat(filtered_ranks), torch.cat(gold_scores)
def ranking_and_hits(test_graph, model, test_triplets, e1_to_multi_e2, network, fusion="graph-only",
sim_relations=False, write_results=False, debug=False, epoch=None):
print(model)
s = test_triplets[:, 0]
r = test_triplets[:, 1]
o = test_triplets[:, 2]
if fusion == "sum":
gembedding = model.evaluate(test_graph)
init_embedding = model.rgcn.layers[0].embedding.weight
with torch.no_grad():
embedding = gembedding + init_embedding
elif fusion == "init":
embedding = model.rgcn.layers[0].embedding.weight
elif fusion == "graph-only":
embedding = model.evaluate(test_graph, epoch)
if sim_relations:
rel_offset = model.num_rels - 1
else:
rel_offset = model.num_rels
#model.decoder.module.cur_embedding = embedding
model.decoder.cur_embedding = embedding
hits_left = []
hits_right = []
hits = []
ranks = []
ranks_left = []
ranks_right = []
scores = []
node_mrr = {}
for i in range(10):
hits_left.append([])
hits_right.append([])
hits.append([])
batch_size = 128
if debug:
end = min(5000, len(test_triplets))
else:
end = len(test_triplets)
# for i in range(0, len(test_triplets), batch_size):
for i in range(0, end, batch_size):
e1 = s[i: i + batch_size]
e2 = o[i: i + batch_size]
rel = r[i: i + batch_size]
rel_reverse = rel + rel_offset
cur_batch_size = len(e1)
e2_multi1 = [torch.LongTensor(e1_to_multi_e2[(e.cpu().item(), r.cpu().item())]) for e, r in zip(e1, rel)]
e2_multi2 = [torch.LongTensor(e1_to_multi_e2[(e.cpu().item(), r.cpu().item())]) for e, r in
zip(e2, rel_reverse)]
with torch.no_grad():
pred1 = model.calc_score(e1, rel)
pred2 = model.calc_score(e2, rel_reverse)
pred1, pred2 = pred1.data, pred2.data
scores.append(pred1)
e1, e2 = e1.data, e2.data
for j in range(0, cur_batch_size):
# these filters contain ALL labels
filter1 = e2_multi1[j].long()
filter2 = e2_multi2[j].long()
# save the prediction that is relevant
target_value1 = pred1[j, e2[j].item()].item()
target_value2 = pred2[j, e1[j].item()].item()
# zero all known cases (this are not interesting)
# this corresponds to the filtered setting
pred1[j][filter1] = 0.0
pred2[j][filter2] = 0.0
# EXP: also remove self-connections
pred1[j][e1[j].item()] = 0.0
pred2[j][e2[j].item()] = 0.0
# write base the saved values
pred1[j][e2[j]] = target_value1
pred2[j][e1[j]] = target_value2
# sort and rank
max_values, argsort1 = torch.sort(pred1, 1, descending=True)
max_values, argsort2 = torch.sort(pred2, 1, descending=True)
for j in range(0, cur_batch_size):
# find the rank of the target entities
# rank1 = np.where(argsort1[i]==e2[i].item())[0][0]
# rank2 = np.where(argsort2[i]==e1[i].item())[0][0]
rank1 = (argsort1[j] == e2[j]).nonzero().cpu().item()
rank2 = (argsort2[j] == e1[j]).nonzero().cpu().item()
# rank+1, since the lowest rank is rank 1 not rank 0
ranks.append(rank1 + 1)
ranks_left.append(rank1 + 1)
ranks.append(rank2 + 1)
ranks_right.append(rank2 + 1)
node1 = network.graph.nodes[e1[j].cpu().item()]
node2 = network.graph.nodes[e2[j].cpu().item()]
if node1 not in node_mrr:
node_mrr[node1] = []
if node2 not in node_mrr:
node_mrr[node2] = []
node_mrr[node1].append(rank1)
node_mrr[node2].append(rank2)
for hits_level in range(0, 10):
if rank1 <= hits_level:
hits[hits_level].append(1.0)
hits_left[hits_level].append(1.0)
else:
hits[hits_level].append(0.0)
hits_left[hits_level].append(0.0)
if rank2 <= hits_level:
hits[hits_level].append(1.0)
hits_right[hits_level].append(1.0)
else:
hits[hits_level].append(0.0)
hits_right[hits_level].append(0.0)
for k in range(0, 10):
logging.info('Hits left @{0}: {1}'.format(k + 1, np.mean(hits_left[k])))
logging.info('Hits right @{0}: {1}'.format(k + 1, np.mean(hits_right[k])))
logging.info('Hits @{0}: {1}'.format(k + 1, np.mean(hits[k])))
logging.info('Mean rank left: {0}'.format(np.mean(ranks_left)))
logging.info('Mean rank right: {0}'.format(np.mean(ranks_right)))
logging.info('Mean rank: {0}'.format(np.mean(ranks)))
logging.info('Mean reciprocal rank left: {0}'.format(np.mean(1. / np.array(ranks_left))))
logging.info('Mean reciprocal rank right: {0}'.format(np.mean(1. / np.array(ranks_right))))
logging.info('Mean reciprocal rank: {0}'.format(np.mean(1. / np.array(ranks))))
if write_results:
write_topk_tuples(torch.cat(scores, dim=0).cpu().numpy(), test_triplets, network)
# plot_degree_mrr(node_mrr)
return np.mean(1. / np.array(ranks))
def plot_degree_mrr(node_ranks):
degree_rank = {}
for node, rank in node_ranks.items():
node_degree = node.get_degree()
if node_degree not in degree_rank:
degree_rank[node_degree] = []
degree_rank[node_degree].append(sum(rank) / len(rank))
degrees = []
ranks = []
for k in sorted(degree_rank.keys()):
if k < 20:
# degrees.append(k)
# ranks.append(sum(degree_rank[k])/len(degree_rank[k]))
for rank in degree_rank[k]:
if rank < 100:
degrees.append(k)
ranks.append(rank)
fig, ax = plt.subplots()
ax.scatter(degrees, ranks, marker='.')
ax.set(xlabel="degree", ylabel="mean ranks")
ax.grid()
fig.savefig("comet_cn_degree_ranks.png")
def write_topk_tuples(scores, input_prefs, network, k=50):
out_lines = []
argsort = [np.argsort(-1 * np.array(score)) for score in np.array(scores)]
for i, sorted_scores in enumerate(argsort):
pref = input_prefs[i]
e1 = pref[0].cpu().item()
rel = pref[1].cpu().item()
e2 = pref[2].cpu().item()
cur_point = {}
cur_point['gold_triple'] = {}
cur_point['gold_triple']['e1'] = network.graph.nodes[e1].name
cur_point['gold_triple']['e2'] = network.graph.nodes[e2].name
cur_point['gold_triple']['relation'] = network.graph.relations[rel].name
topk_indices = sorted_scores[:k]
topk_tuples = [network.graph.nodes[elem] for elem in topk_indices]
# if golds[i] in topk_tuples:
# topk_indices = argsort[i][:k+1]
# topk_tuples = [input_batch[i][elem] for elem in topk_indices if input_batch[i][elem]!=golds[i]]
cur_point['candidates'] = []
for j, node in enumerate(topk_tuples):
tup = {}
tup['e1'] = network.graph.nodes[e1].name
tup['e2'] = node.name
tup['relation'] = network.graph.relations[rel].name
tup['score'] = str(scores[i][topk_indices[j]])
cur_point['candidates'].append(tup)
out_lines.append(cur_point)
with open("topk_candidates.jsonl", 'w') as f:
for entry in out_lines:
json.dump(entry, f)
f.write("\n")
| commonsense-kg-completion-master | src/evaluation_utils.py |
__author__ = "chaitanya"
from collections import defaultdict
class Graph:
def __init__(self, directed=True):
self.relations = defaultdict()
self.nodes = defaultdict()
self.node2id = {}
self.relation2id = {}
self.edges = {}
self.edgeCount = 0
self.directed = directed
#self.add_node("UNK-NODE")
#self.add_relation("UNK-REL")
def add_edge(self, node1, node2, rel, label, weight, uri=None):
"""
:param node1: source node
:param node2: target node
:param rel: relation
:param label: relation
:param weight: weight of edge from [0.0, 1.0]
:param uri: uri of edge
:return: Edge object
"""
new_edge = Edge(node1, node2, rel, label, weight, uri)
if node2 in self.edges[node1]:
self.edges[node1][node2].append(new_edge)
else:
self.edges[node1][node2] = [new_edge]
# node1.neighbors.add(node2)
node2.neighbors.add(node1)
self.edgeCount += 1
if (self.edgeCount + 1) % 10000 == 0:
print("Number of edges: %d" % self.edgeCount, end="\r")
return new_edge
def add_node(self, name):
"""
:param name:
:return:
"""
new_node = Node(name, len(self.nodes))
self.nodes[len(self.nodes)] = new_node
self.node2id[new_node.name] = len(self.nodes) - 1
self.edges[new_node] = {}
return self.node2id[new_node.name]
def add_relation(self, name):
"""
:param name
:return:
"""
new_relation = Relation(name, len(self.relations))
self.relations[len(self.relations)] = new_relation
self.relation2id[new_relation.name] = len(self.relations) - 1
return self.relation2id[new_relation.name]
def find_node(self, name):
"""
:param name:
:return:
"""
if name in self.node2id:
return self.node2id[name]
else:
return -1
def find_relation(self, name):
"""
:param name:
:return:
"""
if name in self.relation2id:
return self.relation2id[name]
else:
return -1
def is_connected(self, node1, node2):
"""
:param node1:
:param node2:
:return:
"""
if node1 in self.edges:
if node2 in self.edges[node1]:
return True
return False
def node_exists(self, node):
"""
:param node: node to check
:return: Boolean value
"""
if node in self.nodes.values():
return True
return False
def find_all_connections(self, relation):
"""
:param relation:
:return: list of all edges representing this relation
"""
relevant_edges = []
for edge in self.edges:
if edge.relation == relation:
relevant_edges.append(edge)
return relevant_edges
def iter_nodes(self):
return list(self.nodes.values())
def iter_relations(self):
return list(self.relations.values())
def iter_edges(self):
for node in self.edges:
for edge_list in self.edges[node].values():
for edge in edge_list:
yield edge
def __str__(self):
for node in self.nodes:
print(node)
class Node:
def __init__(self, name, id, lang='en'):
self.name = name
self.id = id
self.lang = lang
self.neighbors = set([])
def get_neighbors(self):
"""
:param node:
:return:
"""
return self.neighbors
def get_degree(self):
"""
:param node:
:return:
"""
return len(self.neighbors)
def __str__(self):
out = ("Node #%d : %s" % (self.id, self.name))
return out
class Relation:
def __init__(self, name, id):
self.name = name
self.id = id
class Edge:
def __init__(self, node1, node2, relation, label, weight, uri):
self.src = node1
self.tgt = node2
self.relation = relation
self.label = label
self.weight = weight
self.uri = uri
def __str__(self):
out = ("%s: %s --> %s" % (self.relation.name, self.src.name, self.tgt.name))
return out
| commonsense-kg-completion-master | src/graph.py |
# Main script for experimenting with training on a subgraph
from collections import Counter
import argparse
import numpy as np
import sys
import os
import json
import time
import random
import torch
import torch.nn as nn
from model import LinkPredictor
from reader import AtomicTSVReader, ConceptNetTSVReader, FB15kReader
import utils
import reader_utils
import evaluation_utils
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def set_seeds(seed):
random.seed(42)
np.random.seed(42)
torch.manual_seed(42)
torch.cuda.manual_seed(42)
def load_data(dataset, reader_cls, data_dir, sim_relations):
train_network = reader_cls(dataset)
dev_network = reader_cls(dataset)
test_network = reader_cls(dataset)
train_network.read_network(data_dir=data_dir, split="train")
train_network.print_summary()
node_list = train_network.graph.iter_nodes()
node_degrees = [node.get_degree() for node in node_list]
degree_counter = Counter(node_degrees)
avg_degree = sum([k * v for k, v in degree_counter.items()]) / sum([v for k, v in degree_counter.items()])
print("Average Degree: ", avg_degree)
dev_network.read_network(data_dir=data_dir, split="valid", train_network=train_network)
test_network.read_network(data_dir=data_dir, split="test", train_network=train_network)
word_vocab = train_network.graph.node2id
# Add sim nodes
if sim_relations:
print("Adding sim edges..")
train_network.add_sim_edges_bert()
train_data, _ = reader_utils.prepare_batch_dgl(word_vocab, train_network, train_network)
test_data, test_labels = reader_utils.prepare_batch_dgl(word_vocab, test_network, train_network)
valid_data, valid_labels = reader_utils.prepare_batch_dgl(word_vocab, dev_network, train_network)
return train_data, valid_data, test_data, valid_labels, test_labels, train_network
def get_model_name(args):
name = '_subgraph_model_state.pth'
name = "_" + args.gcn_type + "_" + args.decoder + name
if args.sim_relations:
name = "_sim_relations" + name
if args.sim_sim:
name = "_sim-sim" + name
if args.bert_concat:
name = "_bert_concat" + name
if args.bert_mlp:
name = "_bert_mlp" + name
if args.tying:
name = "_tying" + name
if args.bert_sum:
name = "_bert_sum" + name
if args.input_layer == "bert":
name = "_inp-bert" + name
model_state_file = args.dataset + name
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
model_state_file = os.path.join(args.output_dir, model_state_file)
return model_state_file
def main(args):
set_seeds(args.seed)
# load graph data
if args.dataset == "FB15K-237":
dataset_cls = FB15kReader
data_dir = "data/FB15k-237/"
elif args.dataset == "atomic":
dataset_cls = AtomicTSVReader
data_dir = "data/atomic/"
elif args.dataset == "conceptnet":
dataset_cls = ConceptNetTSVReader
data_dir = "data/ConceptNet/"
else:
raise ValueError("Invalid option for dataset.")
# Store entity-wise dicts for filtered metrics
train_data, valid_data, test_data, valid_labels, test_labels, train_network = load_data(args.dataset,
dataset_cls,
data_dir,
args.sim_relations)
num_nodes = len(train_network.graph.nodes)
num_rels = len(train_network.graph.relations)
all_tuples = train_data.tolist() + valid_data.tolist() + test_data.tolist()
# for filtered ranking
all_e1_to_multi_e2, all_e2_to_multi_e1 = reader_utils.create_entity_dicts(all_tuples, num_rels, args.sim_relations)
# for training
train_e1_to_multi_e2, train_e2_to_multi_e1 = reader_utils.create_entity_dicts(train_data.tolist(), num_rels,
args.sim_relations)
# the below dicts `include` sim relations
sim_train_e1_to_multi_e2, sim_train_e2_to_multi_e1 = reader_utils.create_entity_dicts(train_data.tolist(), num_rels)
# check cuda
use_cuda = torch.cuda.is_available()
if use_cuda and not args.no_cuda:
torch.cuda.set_device(args.gpu)
cpu_decoding = args.cpu_decoding
# atomic graph is much larger, so we perform evaluation on cpu
cpu_eval = True if args.dataset == "atomic" else False
# create model
model = LinkPredictor(num_nodes,
num_rels,
args,
use_cuda=use_cuda)
# build graph
graph_train_data = train_data
test_graph, test_rel, test_norm = utils.build_test_graph(num_nodes, num_rels, graph_train_data)
test_deg = test_graph.in_degrees(range(test_graph.number_of_nodes())).float().view(-1, 1)
test_node_id = torch.arange(0, num_nodes, dtype=torch.long).view(-1, 1)
test_rel = torch.from_numpy(test_rel).view(-1, 1)
test_norm = torch.from_numpy(test_norm).view(-1, 1)
# transfer graph data to gpu
if use_cuda and not args.no_cuda and not cpu_decoding:
test_node_id = test_node_id.cuda()
test_norm = test_norm.cuda()
test_rel = test_rel.cuda()
# validation and testing triplets
valid_data = torch.LongTensor(valid_data)
test_data = torch.LongTensor(test_data)
if use_cuda and not args.no_cuda and not cpu_eval:
valid_data = valid_data.cuda()
test_data = test_data.cuda()
test_graph.ndata.update({'id': test_node_id, 'norm': test_norm})
test_graph.edata['type'] = test_rel
if use_cuda and not args.no_cuda:
# model = nn.DataParallel(model, device_ids=[0,1])
model = model.cuda()
model_state_file = get_model_name(args)
# writer = SummaryWriter("runs/" + model_state_file.replace(".pth",".log"))
# check if only evaluation needs to be performed
if args.eval_only:
if args.load_model:
model_state_file = args.load_model
else:
print("Please provide model path for evaluation (--load_model)")
sys.exit(0)
checkpoint = torch.load(model_state_file)
if use_cuda and not args.no_cuda and cpu_eval:
model.cpu()
test_graph.ndata['id'] = test_graph.ndata['id'].cpu()
test_graph.ndata['norm'] = test_graph.ndata['norm'].cpu()
test_graph.edata['type'] = test_graph.edata['type'].cpu()
model.decoder.no_cuda = True
model.eval()
model.load_state_dict(checkpoint['state_dict'])
print(model)
print("================DEV=================")
mrr = evaluation_utils.ranking_and_hits(test_graph, model, valid_data, all_e1_to_multi_e2, train_network,
fusion="graph-only", sim_relations=args.sim_relations,
write_results=args.write_results, debug=args.debug)
print("================TEST================")
mrr = evaluation_utils.ranking_and_hits(test_graph, model, test_data, all_e1_to_multi_e2, train_network,
fusion="graph-only", sim_relations=args.sim_relations, debug=args.debug)
sys.exit(0)
if os.path.isfile(model_state_file):
print(model_state_file)
overwrite = input('Model already exists. Overwrite? Y = yes, N = no\n')
if overwrite.lower() == 'n':
print("Quitting")
sys.exit(0)
elif overwrite.lower() != 'y':
raise ValueError("Invalid Option")
# build adj list and calculate degrees for sampling
adj_list, degrees, sparse_adj_matrix, rel = utils.get_adj_and_degrees(num_nodes, num_rels, train_data)
# remove sim edges from sampling_edge_ids (we sample from the original graph and then densify it)
if args.sim_relations:
sim_edge_ids = np.where(graph_train_data[:, 1] == num_rels - 1)[0]
sampling_edge_ids = np.delete(np.arange(len(graph_train_data)), sim_edge_ids)
else:
sampling_edge_ids = None
# optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
forward_time = []
backward_time = []
# training loop
print("Starting training...")
epoch = 0
best_mrr = 0
while True:
model.train()
epoch += 1
cur_train_data = graph_train_data[:]
# build dgl graph
g, node_id, edge_type, node_norm, data, labels = \
utils.generate_sampled_graph_and_labels(
cur_train_data, args.graph_batch_size,
num_rels, adj_list, degrees, args.negative_sample, args.sim_sim, args.sim_relations,
sim_train_e1_to_multi_e2, sampling_edge_ids)
node_id_copy = np.copy(node_id)
node_dict = {v: k for k, v in dict(enumerate(node_id_copy)).items()}
# set node/edge feature
node_id = torch.from_numpy(node_id).view(-1, 1)
edge_type = torch.from_numpy(edge_type)
node_norm = torch.from_numpy(node_norm).view(-1, 1)
if use_cuda and not args.no_cuda:
node_id = node_id.cuda()
edge_type, node_norm = edge_type.cuda(), node_norm.cuda()
g.ndata.update({'id': node_id, 'norm': node_norm})
g.edata['type'] = edge_type
batch_size = args.decoder_batch_size
e1_keys = list(train_e1_to_multi_e2.keys())
sub_e1_keys = {}
# Add inverse edges to training samples
src, dst = np.concatenate((cur_train_data[:, 0], cur_train_data[:, 2])), \
np.concatenate((cur_train_data[:, 2], cur_train_data[:, 0]))
rel = cur_train_data[:, 1]
rel = np.concatenate((rel, rel + num_rels))
cur_train_data = np.stack((src, rel, dst)).transpose()
# The loop below constructs a dict for the decoding step
# with the key (src, rel) and the value as the list of nodes present in the original graph
# where the source and target nodes both belong to the list of sampled nodes in subgraph
for e in cur_train_data:
rel = e[1]
# Don't use sim relations for decoding
if args.sim_relations:
if rel == num_rels - 1 or rel == (num_rels * 2) - 1:
continue
elif rel >= num_rels:
rel -= 1
if e[0] in node_id_copy and e[2] in node_id_copy:
subgraph_src_idx = node_dict[e[0]]
subgraph_tgt_idx = node_dict[e[2]]
if (subgraph_src_idx, rel) not in sub_e1_keys:
sub_e1_keys[(subgraph_src_idx, rel)] = [subgraph_tgt_idx]
else:
sub_e1_keys[(subgraph_src_idx, rel)].append(subgraph_tgt_idx)
key_list = list(sub_e1_keys.keys())
random.shuffle(key_list)
cum_loss = 0.0
for i in range(0, len(key_list), batch_size):
optimizer.zero_grad()
# compute graph embeddings
graph_embeddings = model.get_graph_embeddings(g, epoch)
#model.decoder.module.cur_embedding = graph_embeddings
model.decoder.cur_embedding = graph_embeddings
batch = key_list[i: i + batch_size]
# Don't train with batches of size 1 and always set batch_size > 1 since batch norm
# fails with batch_size=1
if len(batch) == 1:
continue
e1 = torch.LongTensor([elem[0] for elem in batch])
rel = torch.LongTensor([elem[1] for elem in batch])
# e2 -> list of target nodes in subgraph
e2 = [sub_e1_keys[(k[0], k[1])] for k in batch]
batch_len = len(batch)
if use_cuda and not args.no_cuda and not cpu_decoding:
target = torch.cuda.FloatTensor(batch_len, node_id_copy.shape[0]).fill_(0)
e1 = e1.cuda()
rel = rel.cuda()
else:
target = torch.zeros((batch_len, node_id_copy.shape[0]))
# construct target tensor
for j, inst in enumerate(e2):
target[j, inst] = 1.0
# perform label smoothing
target = ((1.0 - args.label_smoothing_epsilon) * target) + (1.0 / target.size(1))
if cpu_decoding:
graph_embeddings = graph_embeddings.cpu()
model.decoder.cpu()
model.decoder.no_cuda = True
t0 = time.time()
loss = model.get_score(e1, rel, target, graph_embeddings)
loss = torch.mean(loss)
cum_loss += loss.cpu().item()
t1 = time.time()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_norm) # clip gradients
optimizer.step()
t2 = time.time()
forward_time.append(t1 - t0)
backward_time.append(t2 - t1)
del graph_embeddings, target, batch, loss, e1, rel, e2
# the below make training very slow
# gc.collect()
# torch.cuda.empty_cache()
print("Epoch {:04d} | Loss {:.4f} | Best MRR {:.4f} | Forward {:.4f}s | Backward {:.4f}s".
format(epoch, cum_loss, best_mrr, forward_time[-1], backward_time[-1]))
# writer.add_scalar('data/loss', cum_loss , epoch)
# Save model every 100 epochs
# if epoch + 1 % 100==0:
# print("saving current model..")
# torch.save({'state_dict': model.state_dict(), 'epoch': epoch},
# model_state_file)
# validation
if epoch % args.evaluate_every == 0:
# perform validation on CPU when full graph is too large
if use_cuda and not args.no_cuda and cpu_eval:
model.cpu()
test_graph.ndata['id'] = test_graph.ndata['id'].cpu()
test_graph.ndata['norm'] = test_graph.ndata['norm'].cpu()
test_graph.edata['type'] = test_graph.edata['type'].cpu()
model.decoder.no_cuda = True
model.eval()
print("start eval")
print("===========DEV============")
mrr = evaluation_utils.ranking_and_hits(test_graph, model, valid_data, all_e1_to_multi_e2,
train_network, fusion="graph-only", sim_relations=args.sim_relations,
debug=args.debug, epoch=epoch)
# writer.add_scalar('data/mrr', mrr, epoch)
# save best model
# torch.save({'state_dict': model.state_dict(), 'epoch': epoch},
# model_state_file)
if mrr < best_mrr:
if epoch >= args.n_epochs:
break
else:
best_mrr = mrr
print("[saving best model so far]")
torch.save({'state_dict': model.state_dict(), 'epoch': epoch},
model_state_file)
metrics = {"best_mrr": best_mrr,
"cum_loss": cum_loss
}
with open(os.path.join(args.output_dir, 'metrics.json'), 'w') as f:
f.write(json.dumps(metrics))
# transfer graph back to gpu device
if use_cuda and not args.no_cuda and cpu_eval:
model.cuda()
test_graph.ndata['id'] = test_graph.ndata['id'].cuda()
test_graph.ndata['norm'] = test_graph.ndata['norm'].cuda()
test_graph.edata['type'] = test_graph.edata['type'].cuda()
model.decoder.no_cuda = False
print("training done")
print("Mean forward time: {:4f}s".format(np.mean(forward_time)))
print("Mean Backward time: {:4f}s".format(np.mean(backward_time)))
# writer.export_scalars_to_json("./all_scalars.json")
# writer.close()
print("\nStart testing")
# use best model checkpoint
checkpoint = torch.load(model_state_file)
model.eval()
model.load_state_dict(checkpoint['state_dict'])
print("Using best epoch: {}".format(checkpoint['epoch']))
evaluation_utils.ranking_and_hits(test_graph, model, test_data, all_e1_to_multi_e2, train_network, fusion="graph-only",
sim_relations=args.sim_relations)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Options for Commonsense Knowledge Base Completion')
# General
parser.add_argument("-d", "--dataset", type=str, required=True,
help="dataset to use")
parser.add_argument("--sim_relations", action='store_true', default=False,
help="add similarity edges when constructing graph")
parser.add_argument("--sim_sim", action='store_true', default=False,
help="add sim-sim edges to graph")
parser.add_argument("--load_model", type=str, default=None, help="Path to model file")
parser.add_argument("--decoder", type=str, default='ConvTransE', help="decoder used to compute scores")
parser.add_argument("--n-epochs", type=int, default=200,
help="number of minimum training epochs")
parser.add_argument("--evaluate-every", type=int, default=10,
help="perform evaluation every n epochs")
parser.add_argument("--output_dir", type=str, required=False, default="saved_models",
help="output directory to store metrics and model file")
parser.add_argument("--bert_concat", action='store_true', default=False,
help="concat bert embeddings before decoder layer")
parser.add_argument("--bert_sum", action='store_true', default=False,
help="sum bert embeddings before decoder layer")
parser.add_argument("--bert_mlp", action='store_true', default=False,
help="use mlp after concatenated bert+gcn embeddings before decoder layer")
parser.add_argument("--tying", action='store_true', default=False,
help="tie input bert layer to gcn with concatenated tensor before decoding")
parser.add_argument("--cpu_decoding", action='store_true', default=False,
help="perform decoding on cpu")
parser.add_argument("--eval_only", action='store_true', default=False,
help="only evaluate using an existing model")
parser.add_argument("--write_results", action='store_true', default=False,
help="write top-k candidate tuples for evaluation set to file")
parser.add_argument("--eval-batch-size", type=int, default=500,
help="batch size when evaluating")
parser.add_argument("--gpu", type=int, default=-1,
help="gpu")
parser.add_argument("--no_cuda", action='store_true', default=False,
help="prevents using cuda")
parser.add_argument("--seed", type=int, default=42,
help="random seed value")
parser.add_argument("--debug", action='store_true', default=False,
help="use fewer eval instances in debugging mode")
# GCN
parser.add_argument("--init_embedding_dim", type=int, default=200,
help="embedding dimension of input to GCN")
parser.add_argument("--input_layer", type=str, default="lookup",
help="initialization layer for GCN")
parser.add_argument("--n-bases", type=int, default=100,
help="number of weight blocks for each relation (for RGCN)")
parser.add_argument("--n-layers", type=int, default=2,
help="number of propagation rounds")
parser.add_argument("--gcn_type", type=str, default="WGCNAttentionLayer",
help="type of GCN to be used (class name)")
# Miscellaneous Hyperparameters
parser.add_argument("--lr", type=float, default=1e-4,
help="learning rate")
parser.add_argument("--dropout", type=float, default=0.2,
help="dropout probability")
parser.add_argument("--input_dropout", type=float, default=0.2,
help="input dropout")
parser.add_argument("--feature_map_dropout", type=float, default=0.2,
help="feature map dropout")
parser.add_argument("--label_smoothing_epsilon", type=float, default=0.1,
help="epsilon for performing label smoothing")
parser.add_argument("--embedding_dim", type=int, default=200,
help="output embedding dimension of GCN")
parser.add_argument("--n-hidden", type=int, default=200,
help="number of hidden units")
parser.add_argument("--use_bias", action='store_true', default=False,
help="use bias")
parser.add_argument("--regularization", type=float, default=0.1,
help="regularization weight")
parser.add_argument("--grad-norm", type=float, default=1.0,
help="norm to clip gradient to")
parser.add_argument("--graph-batch-size", type=int, default=30000,
help="number of edges to sample in each iteration")
parser.add_argument("--negative-sample", type=int, default=0,
help="number of negative samples per positive sample")
parser.add_argument("--decoder_batch_size", type=int, default=128,
help="batch size for decoder")
parser.add_argument("--layer_norm", action='store_true', default=False,
help="use layer normalization on embeddings fed to decoder")
args = parser.parse_args()
print(args)
try:
main(args)
except KeyboardInterrupt:
print('Interrupted')
# writer.export_scalars_to_json("./all_scalars.json")
# writer.close()
| commonsense-kg-completion-master | src/run_kbc_subgraph.py |
__author__ = "chaitanya"
import torch
from torch.nn.init import xavier_normal_
import torch.nn as nn
import torch.nn.functional as F
from bert_feature_extractor import BertLayer
from decoder import DistMult, ConvE, ConvTransE, ConvKB
import layers
class LinkPredictor(nn.Module):
def __init__(self, num_nodes, num_rels, args, use_cuda=False):
super(LinkPredictor, self).__init__()
self.rgcn = GCN(num_nodes, num_rels * 2, args, use_cuda)
# Use 2 * num_rels to account for inverse relations
if args.sim_relations:
decoder_rels = (num_rels - 1) * 2
else:
decoder_rels = num_rels * 2
# initialize decoder
if args.decoder == "ConvE":
self.decoder = ConvE(num_nodes, decoder_rels, args)
elif args.decoder == "ConvTransE":
self.decoder = ConvTransE(num_nodes, decoder_rels, args)
elif args.decoder == "ConvKB":
self.decoder = ConvKB(num_nodes, decoder_rels, args)
else:
self.decoder = DistMult(num_nodes, decoder_rels, args)
#self.decoder = nn.DataParallel(self.decoder)
#self.decoder.module.init()
self.decoder.init()
# Model-wide arguments
self.num_rels = num_rels
self.num_nodes = num_nodes
self.use_cuda = use_cuda
self.reg_param = args.regularization
self.input_layer = args.input_layer
self.bert_concat = args.bert_concat
self.bert_sum = args.bert_sum
self.bert_mlp = args.bert_mlp
self.tying = args.tying
self.layer_norm = args.layer_norm
self.bert_dim = 1024
if self.bert_concat:
self.bert_concat_layer = EmbeddingLayer(num_nodes, self.bert_dim, args.dataset, init_bert=True)
if self.bert_sum:
self.bert_concat_layer = EmbeddingLayer(num_nodes, self.bert_dim, args.dataset, init_bert=True)
self.beta = 0.5
if self.bert_mlp:
self.bert_concat_layer = EmbeddingLayer(num_nodes, self.bert_dim, args.dataset, init_bert=True)
self.bmlp = nn.Linear(self.bert_dim + 200, 600)
if self.layer_norm:
self.bert_norm = nn.LayerNorm(self.bert_dim)
self.gcn_norm = nn.LayerNorm(args.embedding_dim)
def mask_by_schedule(self, tensor, epoch, epoch_cutoff=100):
if epoch < epoch_cutoff:
cuda_check = tensor.is_cuda
if cuda_check:
mask = torch.zeros((tensor.size(0), tensor.size(1)), device='cuda')
else:
mask = torch.zeros((tensor.size(0), tensor.size(1)))
k = int((epoch / epoch_cutoff) * tensor.size(1))
perm = torch.randperm(tensor.size(1))
indices = perm[:k]
mask[:, indices] = 1
return tensor * mask
else:
return tensor
def forward(self, g, epoch=None):
if self.bert_concat:
bert_embs = self.bert_concat_layer.embedding(g.ndata['id'].squeeze(1))
gcn_embs = self.rgcn.forward(g)
if self.layer_norm:
bert_embs = self.bert_norm(bert_embs)
gcn_embs = self.gcn_norm(gcn_embs)
if epoch is not None:
bert_embs = self.mask_by_schedule(bert_embs, epoch)
# Fisher Test for checking importance of graph embeddings:
# rand_indices = torch.randperm(gcn_embs.size(0))
# gcn_embs = gcn_embs[rand_indices, :]
# gcn_embs = torch.zeros((gcn_embs.size(0), gcn_embs.size(1)), device='cuda')
out_embs = torch.cat([bert_embs, gcn_embs], dim=1)
return out_embs
elif self.bert_mlp:
bert_embs = self.bert_concat_layer.embedding(g.ndata['id'].squeeze(1))
gcn_embs = self.rgcn.forward(g)
full_embs = torch.cat([bert_embs, gcn_embs], dim=1)
full_embs_transf = self.bmlp(full_embs)
return full_embs_transf
elif self.bert_sum:
bert_embs = self.bert_concat_layer.embedding(g.ndata['id'].squeeze(1))
gcn_embs = self.rgcn.forward(g)
if self.layer_norm:
bert_embs = self.bert_norm(bert_embs)
gcn_embs = self.gcn_norm(gcn_embs)
return bert_embs + self.beta * gcn_embs
elif self.tying:
init_embs = self.rgcn.layers[0].embedding.weight
gcn_embs = self.rgcn.forward(g)
full_embs = torch.cat([init_embs, gcn_embs], dim=1)
return full_embs
else:
gcn_embs = self.rgcn.forward(g)
return gcn_embs
def evaluate(self, g, epoch=None):
# get embedding and relation weight without grad
with torch.no_grad():
embedding = self.forward(g, epoch)
return embedding
def regularization_loss(self, embedding):
dec_weight = self.decoder.module.w_relation.weight.pow(2)
if self.rgcn.num_hidden_layers > 0 and self.bert_concat:
return torch.mean(embedding[:, -self.rgcn.layers[-1].out_feat:].pow(2)) + torch.mean(dec_weight)
else:
return torch.mean(embedding.pow(2)) + torch.mean(dec_weight)
def calc_score(self, e1, rel, target=None):
return self.decoder(e1, rel, target)
def get_graph_embeddings(self, g, epoch=None):
embedding = self.forward(g, epoch)
return embedding
def get_score(self, e1, rel, target, embedding):
decoder_score = self.calc_score(e1, rel, target)
# predict_loss = F.binary_cross_entropy(decoder_score, target)
predict_loss = decoder_score
if self.reg_param != 0.0:
reg_loss = self.regularization_loss(embedding)
return predict_loss + self.reg_param * reg_loss
else:
return predict_loss
class EmbeddingLayer(nn.Module):
def __init__(self, num_nodes, h_dim, dataset=None, init_bert=None):
super(EmbeddingLayer, self).__init__()
self.embedding = torch.nn.Embedding(num_nodes, h_dim, padding_idx=0)
if not init_bert:
xavier_normal_(self.embedding.weight.data)
else:
self.init_with_bert(num_nodes, dataset)
def forward(self, g):
node_id = g.ndata['id'].squeeze(1)
g.ndata['h'] = self.embedding(node_id)
def init_with_bert(self, num_nodes, dataset):
bert_model = BertLayer(dataset)
bert_weights = bert_model.forward_as_init(num_nodes)
self.embedding.load_state_dict({'weight': bert_weights})
# self.embedding.weight.requires_grad = False
class BaseGCN(nn.Module):
def __init__(self, num_nodes, num_rels, args, use_cuda=False):
super(BaseGCN, self).__init__()
self.num_nodes = num_nodes
self.num_rels = num_rels
self.use_cuda = use_cuda
self.input_dim = args.init_embedding_dim
self.embedding_dim = args.embedding_dim
self.h_dim = args.n_hidden
self.out_dim = args.n_hidden
self.num_bases = args.n_bases
self.num_hidden_layers = args.n_layers
self.dropout = args.dropout
self.input_layer = args.input_layer
self.gcn_type = args.gcn_type
self.bias = args.use_bias
# create gcn layers
self.build_model(args.dataset)
def build_model(self, dataset):
self.layers = nn.ModuleList()
# i2h
i2h = self.build_input_layer(dataset, self.input_layer)
if i2h is not None:
self.layers.append(i2h)
# h2h
for idx in range(self.num_hidden_layers):
h2h = self.build_hidden_layer(idx)
self.layers.append(h2h)
# h2o
h2o = self.build_output_layer()
if h2o is not None:
self.layers.append(h2o)
# initialize feature for each node
def create_features(self):
return None
def build_input_layer(self):
return None
def build_hidden_layer(self):
raise NotImplementedError
def build_output_layer(self):
return None
def forward(self, g):
for layer in self.layers:
layer(g)
return g.ndata.pop('h')
class GCN(BaseGCN):
def build_input_layer(self, dataset, input_layer="lookup"):
if input_layer == "lookup":
return EmbeddingLayer(self.num_nodes, self.input_dim)
elif input_layer == "bilstm":
# Node representations from BiLSTM
self.node_embs = nn.Embedding(len(self.word_vocab), self.embedding_dim, padding_idx=self.word_vocab["PAD"])
if self.pretrained_embs:
self.init_embs()
self.lstm = nn.LSTM(num_layers=self.n_layers, input_size=self.embedding_dim, hidden_size=self.hidden_dim,
bidirectional=True, dropout=self.dropout, batch_first=True)
if self.max_pooling:
self.pooler = nn.AdaptiveMaxPool1d(1)
elif input_layer == "bert":
return EmbeddingLayer(self.num_nodes, self.input_dim, dataset, input_layer)
def build_hidden_layer(self, idx):
#act = F.relu if idx < self.num_hidden_layers - 1 else None
act = F.tanh if idx < self.num_hidden_layers else None
self_loop = True
if idx == 0:
input_dim = self.input_dim
output_dim = self.h_dim
elif idx == self.num_hidden_layers-1:
input_dim = self.h_dim
output_dim = self.embedding_dim
else:
input_dim = self.h_dim
output_dim = self.h_dim
if self.gcn_type == "MultiHeadGATLayer":
if idx != 0:
input_dim = input_dim * 8
if self.gcn_type == "WGCNAttentionSAGELayer":
# output_dim = input_dim * 2
self_loop = False
#if idx != 0:
# input_dim *= 2
cls = getattr(layers, self.gcn_type)
return cls(input_dim, output_dim, self.num_rels, self.num_bases, self.bias,
activation=act, self_loop=self_loop, dropout=self.dropout)
| commonsense-kg-completion-master | src/model.py |
__author__ = "chaitanya"
import logging as logger
from graph import Graph
import csv
import json
import os
import pandas as pd
import random
from sklearn.metrics.pairwise import cosine_similarity
from bert_feature_extractor import BertLayer
import numpy as np
class Reader:
def print_summary(self):
print("\n\nGraph Summary")
print("\nNodes: %d" % len(self.graph.nodes))
print("Edges: %d" % self.graph.edgeCount)
print("Relations: %d" % len(self.graph.relation2id))
density = self.graph.edgeCount / (len(self.graph.nodes) * (len(self.graph.nodes) - 1))
print("Density: %f" % density)
print("\n******************* Sample Edges *******************")
for i, edge in enumerate(self.graph.iter_edges()):
print(edge)
if (i+1) % 10 == 0:
break
print("***************** ***************** *****************\n")
def gen_negative_examples(self, tgt_size=None, sampling_type="random"):
print("Generating negative examples..")
existing_edges = list(self.graph.iter_edges())
existing_nodes = list(self.graph.iter_nodes())
existing_relations = list(self.graph.iter_relations())
if tgt_size:
selected_edges = random.sample(existing_edges, tgt_size)
else:
selected_edges = existing_edges
# Generate 3 negative samples per example
idx = 0
for i, edge in enumerate(selected_edges):
src, rel, tgt = edge.src, edge.relation, edge.tgt
rand_nodes = []
while len(rand_nodes) != 2:
sample = random.sample(existing_nodes, 1)
if sample not in [src, tgt]:
rand_nodes.append(sample[0])
found = False
while not found:
sample = random.sample(existing_relations, 1)
if sample != rel:
rand_rel = sample
found = True
self.add_example(src.name, rand_nodes[0].name, rel.name, 1.0, 0)
self.add_example(rand_nodes[1].name, tgt.name, rel.name, 1.0, 0)
self.add_example(src.name, tgt.name, rand_rel[0].name, 1.0, 0)
idx += 3
print("Added %d negative examples using %s sampling" %(idx, sampling_type))
def add_sim_edges_bert(self):
if self.dataset == "conceptnet":
threshold = 0.95
elif self.dataset == "atomic":
threshold = 0.98
sim_counter = 0
bert_model = BertLayer(self.dataset)
node_list = self.graph.iter_nodes()
vecs = bert_model.forward(node_list)
vecs = vecs.cpu().numpy()
# vecs = np.vstack(vecs)
print("Computed embeddings.")
batch_size = 1000
out_sims = []
for row_i in range(0, int(vecs.shape[0] / batch_size) + 1):
start = row_i * batch_size
end = min([(row_i + 1) * batch_size, vecs.shape[0]])
if end <= start:
break
rows = vecs[start: end]
sim = cosine_similarity(rows, vecs) # rows is O(1) size
# 2 nodes with unknown text can have perfect similarity
sim[sim == 1.0] = 0
sim[sim < threshold] = 0
for i in range(rows.shape[0]):
indices = np.nonzero(sim[i])[0]
for index in indices:
if index!=i+start:
self.add_example(node_list[i+start].name, node_list[index].name, "sim", 1.0)
out_sims.append((node_list[i+start].name, node_list[index].name))
#self.add_example(node_list[index], node_list[i+start], "sim", 1.0)
sim_counter += 1
# with open("bert_atomic_sims.txt", 'w') as f:
# f.writelines([s[0] + "\t" + s[1] + "\n" for s in out_sims])
print("Added %d sim edges" % sim_counter)
class ConceptNetTSVReader(Reader):
def __init__(self, dataset):
logger.info("Reading ConceptNet")
self.dataset = dataset
self.graph = Graph()
self.rel2id = {}
def read_network(self, data_dir, split="train", train_network=None):
if split == "train":
data_path = os.path.join(data_dir, "train.txt")
elif split == "valid":
data_path = os.path.join(data_dir, "valid.txt")
elif split == "test":
data_path = os.path.join(data_dir, "test.txt")
with open(data_path) as f:
data = f.readlines()
if split == "test":
data = data[:1200]
for inst in data:
inst = inst.strip()
if inst:
inst = inst.split('\t')
rel, src, tgt = inst
weight = 1.0
src = src.lower()
tgt = tgt.lower()
if split != "train":
self.add_example(src, tgt, rel, float(weight), int(weight), train_network)
else:
self.add_example(src, tgt, rel, float(weight))
self.rel2id = self.graph.relation2id
def add_example(self, src, tgt, relation, weight, label=1, train_network=None):
src_id = self.graph.find_node(src)
if src_id == -1:
src_id = self.graph.add_node(src)
tgt_id = self.graph.find_node(tgt)
if tgt_id == -1:
tgt_id = self.graph.add_node(tgt)
relation_id = self.graph.find_relation(relation)
if relation_id == -1:
relation_id = self.graph.add_relation(relation)
edge = self.graph.add_edge(self.graph.nodes[src_id],
self.graph.nodes[tgt_id],
self.graph.relations[relation_id],
label,
weight)
# add nodes/relations from evaluation graphs to training graph too
if train_network is not None and label == 1:
src_id = train_network.graph.find_node(src)
if src_id == -1:
src_id = train_network.graph.add_node(src)
tgt_id = train_network.graph.find_node(tgt)
if tgt_id == -1:
tgt_id = train_network.graph.add_node(tgt)
relation_id = train_network.graph.find_relation(relation)
if relation_id == -1:
relation_id = train_network.graph.add_relation(relation)
return edge
class AtomicReader(Reader):
def __init__(self):
logger.info("Reading ATOMIC corpus")
self.graph = Graph()
self.rel2id = {}
def read_network(self, data_path, split="trn"):
df = pd.read_csv(os.path.join(data_path, "atomic/v4_atomic_all.csv"), index_col=0)
df.iloc[:, :9] = df.iloc[:, :9].apply(lambda col: col.apply(json.loads))
for rel in df.columns[:9]:
self.rel2id[rel] = len(self.rel2id)
for index, row in df[df['split'] == split].iterrows():
event = row.name
for rel in self.rel2id:
if row[rel] or row[rel] == ["none"]:
for inst in row[rel]:
self.add_example(event, inst, rel)
def add_example(self, src, tgt, rel, label=1):
start_id = self.graph.find_node(src)
if start_id == -1:
start_id = self.graph.add_node(src)
end_id = self.graph.find_node(tgt)
if end_id == -1:
end_id = self.graph.add_node(tgt)
rel_id = self.graph.find_relation(rel)
if rel_id == -1:
rel_id = self.graph.add_relation(rel)
self.graph.add_edge(self.graph.nodes[start_id],
self.graph.nodes[end_id],
self.graph.relations[rel_id],
label,
1.0)
class AtomicTSVReader(Reader):
def __init__(self, dataset):
logger.info("Reading ATOMIC corpus in TSV format")
self.dataset = dataset
self.graph = Graph()
self.rel2id = {}
def read_network(self, data_dir, split="train", train_network=None):
data_path = data_dir
filename = split + ".preprocessed.txt"
#filename = split + ".txt"
with open(os.path.join(data_path, filename)) as f:
data = f.readlines()
for inst in data:
inst = inst.strip()
if inst:
inst = inst.split('\t')
if len(inst) == 3:
src, rel, tgt = inst
#src = reader_utils.preprocess_atomic_sentence(src).replace("-", " ")
#tgt = reader_utils.preprocess_atomic_sentence(tgt).replace("-", " ")
if split != "train":
self.add_example(src, tgt, rel, train_network=train_network)
else:
self.add_example(src, tgt, rel)
def add_example(self, src, tgt, relation, weight=1.0, label=1, train_network=None):
src_id = self.graph.find_node(src)
if src_id == -1:
src_id = self.graph.add_node(src)
tgt_id = self.graph.find_node(tgt)
if tgt_id == -1:
tgt_id = self.graph.add_node(tgt)
relation_id = self.graph.find_relation(relation)
if relation_id == -1:
relation_id = self.graph.add_relation(relation)
edge = self.graph.add_edge(self.graph.nodes[src_id],
self.graph.nodes[tgt_id],
self.graph.relations[relation_id],
label,
weight)
# add nodes/relations from evaluation graphs to training graph too
if train_network is not None and label == 1:
src_id = train_network.graph.find_node(src)
if src_id == -1:
src_id = train_network.graph.add_node(src)
tgt_id = train_network.graph.find_node(tgt)
if tgt_id == -1:
tgt_id = train_network.graph.add_node(tgt)
relation_id = train_network.graph.find_relation(relation)
if relation_id == -1:
relation_id = train_network.graph.add_relation(relation)
return edge
class FB15kReader(Reader):
def __init__(self, dataset):
logger.info("Reading FB15K-237..")
self.graph = Graph()
self.rel2id = {}
def read_network(self, data_dir, keep_fraction=100, split="train", train_network=None):
data_path = data_dir
if split == "train":
filename = split + str(keep_fraction) + "p.txt"
else:
filename = split + ".txt"
with open(os.path.join(data_path, filename)) as f:
data = f.readlines()
for inst in data:
inst = inst.strip()
if inst:
inst = inst.split('\t')
src, rel, tgt = inst
src = src.lower()
tgt = tgt.lower()
if split != "train":
self.add_example(src, tgt, rel, train_network=train_network)
else:
self.add_example(src, tgt, rel)
def add_example(self, src, tgt, relation, weight=1.0, label=1, train_network=None):
src_id = self.graph.find_node(src)
if src_id == -1:
src_id = self.graph.add_node(src)
tgt_id = self.graph.find_node(tgt)
if tgt_id == -1:
tgt_id = self.graph.add_node(tgt)
relation_id = self.graph.find_relation(relation)
if relation_id == -1:
relation_id = self.graph.add_relation(relation)
edge = self.graph.add_edge(self.graph.nodes[src_id],
self.graph.nodes[tgt_id],
self.graph.relations[relation_id],
label,
weight)
# add nodes/relations from evaluation graphs to training graph too
if train_network is not None and label == 1:
src_id = train_network.graph.find_node(src)
if src_id == -1:
src_id = train_network.graph.add_node(src)
tgt_id = train_network.graph.find_node(tgt)
if tgt_id == -1:
tgt_id = train_network.graph.add_node(tgt)
relation_id = train_network.graph.find_relation(relation)
if relation_id == -1:
relation_id = train_network.graph.add_relation(relation)
return edge
| commonsense-kg-completion-master | src/reader.py |
# Main script for experimenting with training on full training graph in an epoch
import argparse
import numpy as np
np.random.seed(42)
import sys
import os
import json
import time
import collections
import torch
torch.manual_seed(42)
torch.cuda.manual_seed(42)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.set_num_threads(16)
import torch.nn as nn
import random
random.seed(42)
from collections import Counter
#from dgl.contrib.data import load_data
from model import LinkPredict
from reader import AtomicTSVReader, ConceptNetTSVReader, ConceptNetFullReader, FB15kReader
import reader_utils
from src import utils
from dgl.contrib.data import load_data
#from graphviz import Digraph
#from torchviz import make_dot
def load_atomic_data(dataset, sim_relations):
train_network = AtomicTSVReader(dataset)
dev_network = AtomicTSVReader(dataset)
test_network = AtomicTSVReader(dataset)
#train_network.read_network(data_dir="data/atomic-original/", split="train")
train_network.read_network(data_dir="data/atomic/", split="train")
train_network.print_summary()
node_list = train_network.graph.iter_nodes()
node_degrees = [node.get_degree() for node in node_list]
degree_counter = Counter(node_degrees)
avg_degree = sum([k*v for k,v in degree_counter.items()]) / sum([v for k,v in degree_counter.items()])
print("Average Degree: ", avg_degree)
#dev_network.read_network(data_dir="data/atomic-original/", split="valid", train_network=train_network)
#test_network.read_network(data_dir="data/atomic-original/", split="test", train_network=train_network)
dev_network.read_network(data_dir="data/atomic/", split="valid", train_network=train_network)
test_network.read_network(data_dir="data/atomic/", split="test", train_network=train_network)
word_vocab = train_network.graph.node2id
#node_names = []
#for node in train_network.graph.iter_nodes():??????
# node_names.append(node.name + "\n")
#with open("atomic_node_names.txt", 'w') as f:
# f.writelines([reader_utils.preprocess_atomic_sentence(n.replace("-", " ??????")) for n in node_names])
#import sys; sys.exit(0)
# Add sim nodes
if sim_relations:
print("Adding sim edges..")
train_network.add_sim_edges_bert()
train_data, _ = reader_utils.prepare_batch_dgl(word_vocab, train_network, train_network)
test_data, test_labels = reader_utils.prepare_batch_dgl(word_vocab, test_network, train_network)
valid_data, valid_labels = reader_utils.prepare_batch_dgl(word_vocab, dev_network, train_network)
return len(train_network.graph.nodes), train_data, valid_data, test_data, len(train_network.graph.relations), valid_labels, test_labels, train_network
def load_fb15k_data(dataset, sim_relations, keep_fraction=100):
train_network = FB15kReader(dataset)
dev_network = FB15kReader(dataset)
test_network = FB15kReader(dataset)
train_network.read_network(data_dir="data/FB15k-237/", keep_fraction=keep_fraction, split="train")
train_network.print_summary()
node_list = train_network.graph.iter_nodes()
node_degrees = [node.get_degree() for node in node_list]
degree_counter = Counter(node_degrees)
avg_degree = sum([k*v for k,v in degree_counter.items()]) / sum([v for k,v in degree_counter.items()])
print("Average Degree: ", avg_degree)
dev_network.read_network(data_dir="data/FB15k-237/", split="valid", train_network=train_network)
test_network.read_network(data_dir="data/FB15k-237/", split="test", train_network=train_network)
word_vocab = train_network.graph.node2id
train_data, _ = reader_utils.prepare_batch_dgl(word_vocab, train_network, train_network)
test_data, test_labels = reader_utils.prepare_batch_dgl(word_vocab, test_network, train_network)
valid_data, valid_labels = reader_utils.prepare_batch_dgl(word_vocab, dev_network, train_network)
return len(train_network.graph.nodes), train_data, valid_data, test_data, len(train_network.graph.relations), valid_labels, test_labels, train_network
def load_cn_data(dataset, sim_relations, eval_accuracy=False):
train_network = ConceptNetTSVReader(dataset)
dev1_network = ConceptNetTSVReader(dataset)
dev2_network = ConceptNetTSVReader(dataset)
test_network = ConceptNetTSVReader(dataset)
positive_only = not eval_accuracy
train_network.read_network(data_dir="data/conceptnet/", split="train")
train_network.print_summary()
#node_list = train_network.graph.iter_nodes()
#node_degrees = [node.get_degree() for node in node_list]
#degree_counter = Counter(node_degrees)
#avg_degree = sum([k*v for k,v in degree_counter.items()]) / sum([v for k,v in degree_counter.items()])
#print("Average Degree: ", avg_degree)
dev1_network.read_network(data_dir="data/conceptnet/", split="valid1", train_network=train_network, positive_only=positive_only)
dev2_network.read_network(data_dir="data/conceptnet/", split="valid2", train_network=train_network, positive_only=positive_only)
test_network.read_network(data_dir="data/conceptnet/", split="test", train_network=train_network, positive_only=positive_only)
# Add sim nodes
if sim_relations:
print("Adding sim edges..")
train_network.add_sim_edges_bert()
#word_vocab, word_freqs = reader_utils.create_vocab(train_network)
word_vocab = train_network.graph.node2id
train_data, _ = reader_utils.prepare_batch_dgl(word_vocab, train_network, train_network)
test_data, test_labels = reader_utils.prepare_batch_dgl(word_vocab, test_network, train_network)
valid1_data, valid1_labels = reader_utils.prepare_batch_dgl(word_vocab, dev1_network, train_network)
valid2_data, valid2_labels = reader_utils.prepare_batch_dgl(word_vocab, dev2_network, train_network)
return len(train_network.graph.nodes), train_data, valid1_data, test_data, len(train_network.graph.relations), valid1_labels, test_labels, train_network
def load_cn_full_data(dataset, sim_relations):
train_network = ConceptNetFullReader(dataset)
dev_network = ConceptNetFullReader(dataset)
test_network = ConceptNetFullReader(dataset)
train_network.read_network(data_dir="/data/conceptnet-5.6/", split="train")
train_network.print_summary()
node_list = train_network.graph.iter_nodes()
node_degrees = [node.get_degree() for node in node_list]
degree_counter = Counter(node_degrees)
avg_degree = sum([k*v for k,v in degree_counter.items()]) / sum([v for k,v in degree_counter.items()])
print("Average Degree: ", avg_degree)
dev_network.read_network(data_dir="/data/conceptnet-5.6/", split="valid", train_network=train_network)
test_network.read_network(data_dir="/data/conceptnet-5.6/", split="test", train_network=train_network)
#node_names = []
#for node in train_network.graph.iter_nodes():
# node_names.append(node.name)
#with open("cn-full_node_names.txt", 'w') as f:
# f.writelines([n.split("/")[-2].replace("_", " ")+"\n" for n in node_names if n not in string.punctuation and not n.isdigit()])
#import sys; sys.exit(0)
if sim_relations:
print("Adding sim edges..")
train_network.add_sim_edges_bert()
#word_vocab, word_freqs = reader_utils.create_vocab(train_network)
word_vocab = train_network.graph.node2id
train_data, _ = reader_utils.prepare_batch_dgl(word_vocab, train_network, train_network)
test_data, test_labels = reader_utils.prepare_batch_dgl(word_vocab, test_network, train_network)
valid_data, valid_labels = reader_utils.prepare_batch_dgl(word_vocab, dev_network, train_network)
return len(train_network.graph.nodes), train_data, valid_data, test_data, len(train_network.graph.relations), valid_labels, test_labels, train_network
def main(args):
#visualize_graph_degrees()
# load graph data
if args.dataset == "FB15K-237":
data = load_data(args.dataset)
num_nodes = data.num_nodes
train_data = data.train
valid_data = data.valid
test_data = data.test
num_rels = data.num_rels
train_network = None
# Deletion experiment
delete_fraction = args.delete_fraction
delete_indices = random.sample(range(len(train_data)), int(delete_fraction * len(train_data)))
train_data = np.array([tup for i, tup in enumerate(train_data) if i not in delete_indices])
selected_nodes = train_data[:,0].tolist() + train_data[:,2].tolist()
num_nodes = len(set(selected_nodes))
print(len(train_data))
print(num_nodes)
# Store entity-wise dicts for filtered metrics
all_tuples = train_data.tolist() + valid_data.tolist() + test_data.tolist()
# print("Graph Density: %f" % (len(train_data) / (num_nodes * (num_nodes - 1))))
elif args.dataset == "atomic":
num_nodes, train_data, valid_data, test_data, num_rels, valid_labels, test_labels, train_network = load_atomic_data(args.dataset, args.sim_relations)
all_tuples = train_data.tolist() + valid_data.tolist() + test_data.tolist()
elif args.dataset == "conceptnet":
num_nodes, train_data, valid_data, test_data, num_rels, valid_labels, test_labels, train_network = load_cn_data(args.dataset, args.sim_relations, args.eval_accuracy)
all_tuples = train_data.tolist() + valid_data.tolist() + test_data.tolist()
elif args.dataset == "conceptnet-5.6":
num_nodes, train_data, valid_data, test_data, num_rels, valid_labels, test_labels, train_network = load_cn_full_data(args.dataset, args.sim_relations)
all_tuples = train_data.tolist() + valid_data.tolist() + test_data.tolist()
elif args.dataset == "FB15k-237":
num_nodes, train_data, valid_data, test_data, num_rels, valid_labels, test_labels, train_network = load_fb15k_data(args.dataset, args.sim_relations, keep_fraction=100-int(args.delete_fraction))
#delete_fraction = args.delete_fraction
#delete_indices = random.sample(range(len(train_data)), int(delete_fraction * len(train_data)))
#train_data = np.array([tup for i, tup in enumerate(train_data) if i not in delete_indices])
#selected_nodes = train_data[:,0].tolist() + train_data[:,2].tolist()
#num_nodes = len(set(selected_nodes))
#print("Remaining edges: ", len(train_data))
#print("Remaining nodes: " , num_nodes)
all_tuples = train_data.tolist() + valid_data.tolist() + test_data.tolist()
else:
raise ValueError("Invalid Option for Dataset")
# for filtered ranking
all_e1_to_multi_e2, all_e2_to_multi_e1 = reader_utils.create_entity_dicts(all_tuples, num_rels, args.sim_relations)
# for training
train_e1_to_multi_e2, train_e2_to_multi_e1 = reader_utils.create_entity_dicts(train_data.tolist(), num_rels, args.sim_relations)
# check cuda
use_cuda = torch.cuda.is_available()
#use_cuda = False
if use_cuda and not args.no_cuda:
torch.cuda.set_device(args.gpu)
cpu_decoding = args.cpu_decoding
# create model
model = LinkPredict(num_nodes,
num_rels,
args,
use_cuda=use_cuda)
# validation and testing triplets
valid_data = torch.LongTensor(valid_data)
test_data = torch.LongTensor(test_data)
# remove cpu eval for now
cpu_eval = True if args.dataset == "atomic" else False
if use_cuda and not args.no_cuda and not cpu_eval:
valid_data = valid_data.cuda()
test_data = test_data.cuda()
# build test graph
if args.sim_sim and args.sim_relations:
graph_train_data = utils.sim_sim_connect(train_data, train_data, num_rels)
else:
graph_train_data = train_data
test_graph, test_rel, test_norm = utils.build_test_graph(
num_nodes, num_rels, graph_train_data)
test_deg = test_graph.in_degrees(
range(test_graph.number_of_nodes())).float().view(-1,1)
test_node_id = torch.arange(0, num_nodes, dtype=torch.long).view(-1, 1)
test_rel = torch.from_numpy(test_rel).view(-1, 1)
test_norm = torch.from_numpy(test_norm).view(-1, 1)
if use_cuda and not args.no_cuda:
test_node_id = test_node_id.cuda()
test_norm = test_norm.cuda()
test_rel = test_rel.cuda()
test_graph.ndata.update({'id': test_node_id, 'norm': test_norm})
test_graph.edata['type'] = test_rel
if use_cuda and not args.no_cuda:
#model = nn.DataParallel(model, device_ids=[0,1])
model = model.cuda()
name = '_model_state.pth'
name = "_" + args.model + "_" + args.gcn_type + "_" + args.decoder + name
if args.sim_relations:
name = "_sim_relations" + name
if args.sim_sim:
name = "_sim-sim" + name
if args.bert_concat:
name = "_bert_concat" + name
if args.bert_mlp:
name = "_bert_mlp" + name
if args.tying:
name = "_tying" + name
if args.bert_sum:
name = "_bert_sum" + name
if args.input_layer == "bert":
name = "_inp-bert" + name
if args.delete_fraction:
keep = 100 - int(args.delete_fraction)
name = "_" + str(keep) + name
#name = str(datetime.now().time()).split(".")[0] + name
model_state_file = args.dataset + name
model_state_file = os.path.join(args.output_dir, model_state_file)
#writer = SummaryWriter("runs/" + model_state_file.replace(".pth",".log"))
if args.eval_only:
if args.model_name:
model_state_file=args.model_name
checkpoint = torch.load(model_state_file)
#if use_cuda:
# model.cpu() # test on CPU
model.eval()
model.load_state_dict(checkpoint['state_dict'])
#model.rgcn.layers[-1].device = torch.device("cpu")
print(model)
if use_cuda and not args.no_cuda and cpu_eval:
model.cpu()
test_graph.ndata['id'] = test_graph.ndata['id'].cpu()
test_graph.ndata['norm'] = test_graph.ndata['norm'].cpu()
test_graph.edata['type'] = test_graph.edata['type'].cpu()
model.decoder.no_cuda = True
if args.dataset != "atomic" and args.dataset != "conceptnet":
valid_labels = None
test_labels = None
else:
valid_labels = torch.LongTensor(valid_labels)
test_labels = torch.LongTensor(test_labels)
if args.eval_accuracy:
threshold = utils.evaluate_accuracy(test_graph, model, valid_data, num_nodes, labels=valid_labels, network=train_network,
eval_bz=args.eval_batch_size)
utils.evaluate_accuracy(test_graph, model, test_data, num_nodes, labels=test_labels, network=train_network, threshold=threshold,
eval_bz=args.eval_batch_size)
else:
print("===========DEV============")
mrr = utils.ranking_and_hits(test_graph, model, valid_data, all_e1_to_multi_e2,
train_network, comb="graph", sim_relations=args.sim_relations,
write_results=args.write_results, debug=args.debug)
print("===========TEST============")
mrr = utils.ranking_and_hits(test_graph, model, test_data, all_e1_to_multi_e2,
train_network, comb="graph", sim_relations=args.sim_relations, debug=args.debug)
if args.dataset=="atomic":
mrr = utils.ranking_and_hits(test_graph, model, test_data, all_e1_to_multi_e2, train_network, comb="graph", sim_relations=args.sim_relations, no_nones=True)
#mrr = utils.evaluate(test_graph, model, valid_data, all_e1_to_multi_e2, num_nodes, valid_labels, train_network,
# hits=[1, 3, 10], eval_bz=args.eval_batch_size)
#mrr = utils.evaluate(test_graph, model, test_data, all_e1_to_multi_e2, num_nodes, test_labels, train_network,
# hits=[1, 3, 10], eval_bz=args.eval_batch_size)
sys.exit(0)
# build adj list and calculate degrees for sampling
# adj_list, degrees, sparse_adj_matrix, rel = utils.get_adj_and_degrees(num_nodes, num_rels, train_data)
# optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
#optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
if os.path.isfile(model_state_file):
print(model_state_file)
overwrite = input('Model already exists. Overwrite? Y = yes, N = no or C=continue training\n')
if overwrite.lower() == 'n':
print("Quitting")
sys.exit(0)
elif overwrite.lower() == 'c':
print("Loading model")
checkpoint = torch.load(model_state_file)
model.load_state_dict(checkpoint['state_dict'])
print("Continuing training")
elif overwrite.lower() != 'y':
raise ValueError("Invalid Option")
forward_time = []
backward_time = []
# training loop
print("Starting training...")
epoch = 0
best_mrr = 0
while True:
model.train()
epoch += 1
g = test_graph
#data = graph_train_data
#data = torch.from_numpy(data)
#labels = None
#if use_cuda and not args.no_cuda:
# data = data.cuda()
batch_size = args.decoder_batch_size
e1_keys = list(train_e1_to_multi_e2.keys())
random.shuffle(e1_keys)
cum_loss = 0.0
start_time = time.time()
for i in range(0, len(e1_keys), batch_size):
optimizer.zero_grad()
graph_embeddings = model.get_graph_embeddings(g, epoch)
model.decoder.module.cur_embedding = graph_embeddings
batch = e1_keys[i : i + batch_size]
e1 = torch.LongTensor([elem[0] for elem in batch])
rel = torch.LongTensor([elem[1] for elem in batch])
e2 = [train_e1_to_multi_e2[elem] for elem in batch]
if use_cuda and not args.no_cuda and not cpu_decoding:
target = torch.cuda.FloatTensor(len(batch), num_nodes).fill_(0)
e1 = e1.cuda()
rel = rel.cuda()
else:
target = torch.zeros((len(batch), num_nodes))
for j, inst in enumerate(e2):
target[j, inst] = 1.0
target = ((1.0-args.label_smoothing_epsilon)*target) + (1.0/target.size(1))
#if use_cuda and not args.no_cuda and not cpu_decoding:
# target = target.cuda()
if cpu_decoding:
graph_embeddings = graph_embeddings.cpu()
model.decoder.cpu()
model.decoder.no_cuda = True
t0 = time.time()
loss = model.get_score(e1, rel, target, graph_embeddings)
loss = torch.mean(loss)
cum_loss += loss.cpu().item()
t1 = time.time()
#loss.backward(retain_graph=True)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_norm) # clip gradients
optimizer.step()
t2 = time.time()
forward_time.append(t1 - t0)
backward_time.append(t2 - t1)
#torch.cuda.empty_cache()
del graph_embeddings, target, batch, loss, e1, rel, e2
#gc.collect()
print("Epoch {:04d} | Loss {:.4f} | Best MRR {:.4f} | Forward {:.4f}s | Backward {:.4f}s".
format(epoch, cum_loss, best_mrr, forward_time[-1], backward_time[-1]))
print("Total Time:", time.time() - start_time)
#writer.add_scalar('data/loss', cum_loss , epoch)
# Save model every 100 epochs
#if epoch+1%100==0:
# print("saving current model..")
# torch.save({'state_dict': model.state_dict(), 'epoch': epoch},
# model_state_file)
# validation
if epoch % args.evaluate_every == 0:
# perform validation on CPU because full graph is too large
if use_cuda and not args.no_cuda and cpu_eval:
model.cpu()
test_graph.ndata['id'] = test_graph.ndata['id'].cpu()
test_graph.ndata['norm'] = test_graph.ndata['norm'].cpu()
test_graph.edata['type'] = test_graph.edata['type'].cpu()
model.decoder.no_cuda = True
model.eval()
print("start eval")
print("===========DEV============")
mrr = utils.ranking_and_hits(test_graph, model, valid_data, all_e1_to_multi_e2, train_network, comb="graph", sim_relations=args.sim_relations, debug=True)
print("===========TEST===========")
test_mrr = utils.ranking_and_hits(test_graph, model, test_data, all_e1_to_multi_e2, train_network, comb="graph", sim_relations=args.sim_relations, debug=True)
#mrr = utils.evaluate(test_graph, model, valid_data, e1_to_multi_e2, num_nodes, labels, train_network,
# hits=[1, 3, 10], eval_bz=args.eval_batch_size)
#writer.add_scalar('data/mrr', mrr, epoch)
#mrr = utils.evaluate(test_graph, model, test_data, num_nodes, labels, train_network,
# hits=[1, 3, 10], eval_bz=args.eval_batch_size)
# save best model
# torch.save({'state_dict': model.state_dict(), 'epoch': epoch},
# model_state_file)
if mrr < best_mrr:
if epoch >= args.n_epochs:
break
else:
best_mrr = mrr
torch.save({'state_dict': model.state_dict(), 'epoch': epoch},
model_state_file)
metrics = {"best_mrr": best_mrr,
"cum_loss": cum_loss
}
with open(os.path.join(args.output_dir, 'metrics.json'), 'w') as f:
f.write(json.dumps(metrics))
if use_cuda and not args.no_cuda:
model.cuda()
test_graph.ndata['id'] = test_graph.ndata['id'].cuda()
test_graph.ndata['norm'] = test_graph.ndata['norm'].cuda()
test_graph.edata['type'] = test_graph.edata['type'].cuda()
model.decoder.no_cuda = False
print("training done")
print("Mean forward time: {:4f}s".format(np.mean(forward_time)))
print("Mean Backward time: {:4f}s".format(np.mean(backward_time)))
#writer.export_scalars_to_json("./all_scalars.json")
#writer.close()
print("\nstart testing")
# use best model checkpoint
checkpoint = torch.load(model_state_file)
#if use_cuda:
# model.cpu() # test on CPU
model.eval()
model.load_state_dict(checkpoint['state_dict'])
print("Using best epoch: {}".format(checkpoint['epoch']))
mrr = utils.ranking_and_hits(test_graph, model, test_data, all_e1_to_multi_e2, train_network, comb="graph", sim_relations=args.sim_relations)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Machine Commonsense Completion')
parser.add_argument("--dropout", type=float, default=0.2,
help="dropout probability")
parser.add_argument("--input_dropout", type=float, default=0.2,
help="input dropout")
parser.add_argument("--feature_map_dropout", type=float, default=0.2,
help="feature map dropout")
parser.add_argument("--label_smoothing_epsilon", type=float, default=0.1,
help="epsilon for performing label smoothing over target")
parser.add_argument("--init_embedding_dim", type=int, default=200,
help="init embedding dimension")
parser.add_argument("--embedding_dim", type=int, default=200,
help="embedding dimension")
parser.add_argument("--n-hidden", type=int, default=200,
help="number of hidden units")
parser.add_argument("--use_bias", action='store_true', default=False,
help="use bias")
parser.add_argument("--gpu", type=int, default=-1,
help="gpu")
parser.add_argument("--no_cuda", action='store_true', default=False,
help="prevents using cuda")
parser.add_argument("--sim_relations", action='store_true', default=False,
help="add sim edges to graph")
parser.add_argument("--sim_sim", action='store_true', default=False,
help="add sim-sim edges to graph")
parser.add_argument("--model", type=str, default='RGCN',
help="model architecture")
parser.add_argument("--decoder", type=str, default='ConvTransE',
help="decoder used to compute scores")
parser.add_argument("--lr", type=float, default=1e-4,
help="learning rate")
parser.add_argument("--input_layer", type=str, default="lookup",
help="initialization layer for rgcn")
parser.add_argument("--n-bases", type=int, default=100,
help="number of weight blocks for each relation")
parser.add_argument("--n-layers", type=int, default=2,
help="number of propagation rounds")
parser.add_argument("--n-epochs", type=int, default=200,
help="number of minimum training epochs")
parser.add_argument("-d", "--dataset", type=str, required=True,
help="dataset to use")
parser.add_argument("--eval_only", action='store_true', default=False,
help="only evaluate using an existing model")
parser.add_argument("--write_results", action='store_true', default=False,
help="write topk candidate tuples for evaluation set to file")
parser.add_argument("--eval_accuracy", action='store_true', default=False,
help="evaluate binary classification accuracy")
parser.add_argument("--eval-batch-size", type=int, default=500,
help="batch size when evaluating")
parser.add_argument("--regularization", type=float, default=0.05,
help="regularization weight")
parser.add_argument("--grad-norm", type=float, default=1.0,
help="norm to clip gradient to")
parser.add_argument("--graph-batch-size", type=int, default=30000,
help="number of edges to sample in each iteration")
parser.add_argument("--graph-split-size", type=float, default=0.5,
help="portion of edges used as positive sample")
parser.add_argument("--negative-sample", type=int, default=10,
help="number of negative samples per positive sample")
parser.add_argument("--evaluate-every", type=int, default=10,
help="perform evaluation every n epochs")
parser.add_argument("--decoder_batch_size", type=int, default=128,
help="batch size for decoder")
parser.add_argument("--model_name", type=str, required=False, default=None,
help="model to load")
parser.add_argument("--output_dir", type=str, required=False, default="saved_models",
help="output directory to store metrics and model file")
parser.add_argument("--bert_concat", action='store_true', default=False,
help="concat bert embeddings before decoder layer")
parser.add_argument("--bert_sum", action='store_true', default=False,
help="sum bert embeddings before decoder layer")
parser.add_argument("--bert_mlp", action='store_true', default=False,
help="use mlp after concatenated bert+gcn embeddings before decoder layer")
parser.add_argument("--tying", action='store_true', default=False,
help="tie input bert layer to gcn with concatenated tensor before decoding")
parser.add_argument("--cpu_decoding", action='store_true', default=False,
help="perform decoding on cpu")
parser.add_argument("--pretrained_gcn", action='store_true', default=False,
help="use mlp after concatenated bert+gcn embeddings before decoder layer")
parser.add_argument("--multi_step", action='store_true', default=False,
help="multi-step classification between nones and remaining entities, for atomic")
parser.add_argument("--layer_norm", action='store_true', default=False,
help="use layer normalization on embeddings fed to decoder")
parser.add_argument("--gcn_type", type=str, default="WGCNAttentionLayer",
help="type of GCN to be used")
parser.add_argument("--delete_fraction", type=float, default=0.0,
help="fraction of edges to delete")
parser.add_argument("--use_triplet_loss", action='store_true', default=False,
help="use triplet loss")
parser.add_argument("--debug", action='store_true', default=False,
help="use fewer eval instances in debugging mode")
args = parser.parse_args()
print(args)
try:
main(args)
except KeyboardInterrupt:
print('Interrupted')
#writer.export_scalars_to_json("./all_scalars.json")
#writer.close()
| commonsense-kg-completion-master | src/run_kbc_fullgraph_beaker.py |
import numpy as np
import torch
import dgl
import random
import itertools
from scipy.sparse import coo_matrix
torch.set_printoptions(profile="full")
def get_adj_and_degrees(num_nodes, num_rels, triplets):
""" Get adjacency list and degrees of the graph
"""
col = []
row = []
rel = []
adj_list = [[] for _ in range(num_nodes)]
for i, triplet in enumerate(triplets):
adj_list[triplet[0]].append([i, triplet[2]])
adj_list[triplet[2]].append([i, triplet[0]])
row.append(triplet[0])
col.append(triplet[2])
rel.append(triplet[1])
row.append(triplet[2])
col.append(triplet[0])
rel.append(triplet[1] + num_rels)
sparse_adj_matrix = coo_matrix((np.ones(len(triplets)*2), (row, col)), shape=(num_nodes, num_nodes))
degrees = np.array([len(a) for a in adj_list])
adj_list = [np.array(a) for a in adj_list]
return adj_list, degrees, sparse_adj_matrix, rel
def sample_edge_neighborhood(adj_list, degrees, n_triplets, sample_size, sample=True, sampling_edge_ids=None):
""" Edge neighborhood sampling to reduce training graph size
"""
if sample:
edges = np.zeros((sample_size), dtype=np.int32)
# initialize
sample_counts = np.array([d for d in degrees])
picked = np.array([False for _ in range(n_triplets)])
seen = np.array([False for _ in degrees])
i = 0
while i != sample_size:
weights = sample_counts * seen
if np.sum(weights) == 0:
weights = np.ones_like(weights)
weights[np.where(sample_counts == 0)] = 0
probabilities = weights / np.sum(weights)
chosen_vertex = np.random.choice(np.arange(degrees.shape[0]),
p=probabilities)
chosen_adj_list = adj_list[chosen_vertex]
seen[chosen_vertex] = True
chosen_edge = np.random.choice(np.arange(chosen_adj_list.shape[0]))
chosen_edge = chosen_adj_list[chosen_edge]
edge_number = chosen_edge[0]
while picked[edge_number]:
chosen_edge = np.random.choice(np.arange(chosen_adj_list.shape[0]))
chosen_edge = chosen_adj_list[chosen_edge]
edge_number = chosen_edge[0]
edges[i] = edge_number
other_vertex = chosen_edge[1]
picked[edge_number] = True
sample_counts[chosen_vertex] -= 1
sample_counts[other_vertex] -= 1
seen[other_vertex] = True
i += 1
else:
if sampling_edge_ids is None:
random_edges = random.sample(range(n_triplets), sample_size)
else:
random_edges = np.random.choice(sampling_edge_ids, sample_size, replace=False)
edges = np.array(random_edges)
return edges
def generate_sampled_graph_and_labels(triplets, sample_size,
num_rels, adj_list, degrees,
negative_rate, sim_sim=False, add_sim_relations=False,
sim_train_e1_to_multi_e2=None, sampling_edge_ids=None):
"""Get training graph and signals
First perform edge neighborhood sampling on graph, then perform negative
sampling to generate negative samples
"""
# perform edge neighbor sampling
edges = sample_edge_neighborhood(adj_list, degrees, len(triplets),
sample_size, sample=False, sampling_edge_ids=sampling_edge_ids)
edges = triplets[edges]
# add sim edges
if add_sim_relations:
edges = densify_subgraph(edges, num_rels, sim_train_e1_to_multi_e2)
# connect neighbors of nodes connected by sim edges (not used)
if sim_sim:
edges = sim_sim_connect(edges, triplets, num_rels)
src, rel, dst = edges.transpose()
# relabel nodes to have consecutive node ids
# uniq_v : sorted unique nodes in subsampled graph (original node ids)
uniq_v, edges = np.unique((src, dst), return_inverse=True)
# node ids now lie in range(0, number of unique nodes in subsampled graph)
src, dst = np.reshape(edges, (2, -1))
# relabeled_edges = np.stack((src, rel, dst)).transpose()
# Add inverse edges to training samples
src, dst = np.concatenate((src, dst)), np.concatenate((dst, src))
rel = np.concatenate((rel, rel+num_rels))
relabeled_edges = np.stack((src, rel, dst)).transpose()
# negative sampling
if negative_rate == 0:
samples = relabeled_edges
labels = np.ones(len(samples))
else:
samples, labels = negative_sampling(relabeled_edges, len(uniq_v),
negative_rate)
# build DGL graph
print("# sampled nodes: {}".format(len(uniq_v)))
print("# sampled edges: {}".format(len(src)))
g, rel, norm = build_graph_from_triplets(len(uniq_v), num_rels,
(src, rel, dst))
return g, uniq_v, rel, norm, samples, labels
def densify_subgraph(edges, num_rels, sim_train_e1_to_multi_e2):
sim_edges = []
no_sim_indices = np.where(edges[:, 1] != num_rels-1)[0]
no_sim_edges = edges[no_sim_indices]
unique, edges = np.unique((no_sim_edges[:, 0], no_sim_edges[:, 2]), return_inverse=True)
for pair in itertools.combinations(unique, 2):
if (pair[0], num_rels-1) in sim_train_e1_to_multi_e2:
if pair[1] in sim_train_e1_to_multi_e2[(pair[0], num_rels-1)]:
sim_edges.append(np.array([pair[0], num_rels-1, pair[1]]))
return np.concatenate((no_sim_edges, np.array(sim_edges)))
def comp_deg_norm(g):
in_deg = g.in_degrees(range(g.number_of_nodes())).float().numpy()
norm = 1.0 / in_deg
norm[np.isinf(norm)] = 0
return norm
def build_graph_from_triplets(num_nodes, num_rels, triplets):
"""
Create a DGL graph.
"""
g = dgl.DGLGraph()
g.add_nodes(num_nodes)
src, rel, dst = triplets
g.add_edges(src, dst)
norm = comp_deg_norm(g)
print("# nodes: {}, # edges: {}".format(num_nodes, len(src)))
return g, rel, norm
def build_test_graph(num_nodes, num_rels, edges):
src, rel, dst = edges.transpose()
print("Test graph:")
return build_graph_from_triplets(num_nodes, num_rels, (src, rel, dst))
def negative_sampling(pos_samples, num_entity, negative_rate):
size_of_batch = len(pos_samples)
num_to_generate = size_of_batch * negative_rate
neg_samples = np.tile(pos_samples, (negative_rate, 1))
labels = np.zeros(size_of_batch * (negative_rate + 1), dtype=np.float32)
labels[: size_of_batch] = 1
# TODO: pick negative samples only with same relations
values = np.random.randint(num_entity, size=num_to_generate)
#values = np.random.choice(tot_entities, size=num_to_generate, replace=False)
choices = np.random.uniform(size=num_to_generate)
subj = choices > 0.5
obj = choices <= 0.5
neg_samples[subj, 0] = values[subj]
neg_samples[obj, 2] = values[obj]
#for i, sample in enumerate(neg_samples):
# if any(np.array_equal(sample, x) for x in pos_samples):
# labels[i+size_of_batch] = 1
return np.concatenate((pos_samples, neg_samples)), labels
def sim_sim_connect(pos_samples, all_triplets, num_rels):
"""
connect neighbors of node with sim edge type to a candidate node
"""
# filter sim relations
sample_ids = np.where(pos_samples[:, 1] == num_rels-1)[0]
sampled_sim_edges = pos_samples[sample_ids]
addl_samples = []
for edge in sampled_sim_edges:
src, rel, tgt = edge
# find all neighboring edges of tgt in large graph
neighbors = np.where(all_triplets[:, 0] == tgt)[0]
no_sim_neighbors = np.where(all_triplets[neighbors][:, 1] != num_rels-1)[0]
new_edges = np.copy(all_triplets[neighbors][no_sim_neighbors])
new_edges[:, 0] = src
addl_samples.append(new_edges)
if len(addl_samples) == 0:
return pos_samples
else:
addl_samples = np.concatenate(addl_samples)
final_samples = np.concatenate((pos_samples, addl_samples))
unique_samples = np.unique(final_samples, axis=0)
print("Adding %d sim-sim edges" % (unique_samples.shape[0] - pos_samples.shape[0]))
return unique_samples
| commonsense-kg-completion-master | src/utils.py |
# Main script for experimenting with training on full training graph in an epoch
import argparse
import numpy as np
import sys
import os
import time
import torch
torch.manual_seed(42)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
import torch.nn as nn
import random
random.seed(42)
from collections import Counter
from dgl.contrib.data import load_data
from model import LinkPredict
from reader import AtomicTSVReader, ConceptNetTSVReader, ConceptNetFullReader, FB15kReader
from tensorboardX import SummaryWriter
import reader_utils
from src import utils
def load_atomic_data(dataset, sim_relations):
train_network = AtomicTSVReader(dataset)
dev_network = AtomicTSVReader(dataset)
test_network = AtomicTSVReader(dataset)
train_network.read_network(data_dir="/home/chaitanyam/.dgl/Atomic/", split="train")
train_network.print_summary()
node_list = train_network.graph.iter_nodes()
node_degrees = [node.get_degree() for node in node_list]
degree_counter = Counter(node_degrees)
avg_degree = sum([k*v for k,v in degree_counter.items()]) / sum([v for k,v in degree_counter.items()])
print("Average Degree: ", avg_degree)
dev_network.read_network(data_dir="/home/chaitanyam/.dgl/Atomic/", split="valid", train_network=train_network)
test_network.read_network(data_dir="/home/chaitanyam/.dgl/Atomic/", split="test", train_network=train_network)
word_vocab = train_network.graph.node2id
#node_names = []
#for node in train_network.graph.iter_nodes():
# node_names.append(node.name + "\n")
#with open("atomic_node_names.txt", 'w') as f:
# f.writelines([reader_utils.preprocess_atomic_sentence(n.replace("-", " ")) for n in node_names])
#import sys; sys.exit(0)
# Add sim nodes
if sim_relations:
print("Adding sim edges..")
train_network.add_sim_edges_bert()
train_data, _ = reader_utils.prepare_batch_dgl(word_vocab, train_network, train_network)
test_data, test_labels = reader_utils.prepare_batch_dgl(word_vocab, test_network, train_network)
valid_data, valid_labels = reader_utils.prepare_batch_dgl(word_vocab, dev_network, train_network)
return len(train_network.graph.nodes), train_data, valid_data, test_data, len(train_network.graph.relations), valid_labels, test_labels, train_network
def load_fb15k_data(dataset, sim_relations):
train_network = FB15kReader(dataset)
dev_network = FB15kReader(dataset)
test_network = FB15kReader(dataset)
train_network.read_network(data_dir="/net/efs.mosaic/chaitanyam/ConvE/data/FB15k-237/", split="train")
train_network.print_summary()
node_list = train_network.graph.iter_nodes()
node_degrees = [node.get_degree() for node in node_list]
degree_counter = Counter(node_degrees)
avg_degree = sum([k*v for k,v in degree_counter.items()]) / sum([v for k,v in degree_counter.items()])
print("Average Degree: ", avg_degree)
dev_network.read_network(data_dir="/net/efs.mosaic/chaitanyam/ConvE/data/FB15k-237/", split="valid", train_network=train_network)
test_network.read_network(data_dir="/net/efs.mosaic/chaitanyam/ConvE/data/FB15k-237/", split="test", train_network=train_network)
word_vocab = train_network.graph.node2id
train_data, _ = reader_utils.prepare_batch_dgl(word_vocab, train_network, train_network)
test_data, test_labels = reader_utils.prepare_batch_dgl(word_vocab, test_network, train_network)
valid_data, valid_labels = reader_utils.prepare_batch_dgl(word_vocab, dev_network, train_network)
return len(train_network.graph.nodes), train_data, valid_data, test_data, len(train_network.graph.relations), valid_labels, test_labels, train_network
def load_cn_data(dataset, sim_relations, eval_accuracy=False):
train_network = ConceptNetTSVReader(dataset)
dev1_network = ConceptNetTSVReader(dataset)
dev2_network = ConceptNetTSVReader(dataset)
test_network = ConceptNetTSVReader(dataset)
positive_only = not eval_accuracy
train_network.read_network(data_dir="data/", split="train")
train_network.print_summary()
#node_list = train_network.graph.iter_nodes()
#node_degrees = [node.get_degree() for node in node_list]
#degree_counter = Counter(node_degrees)
#avg_degree = sum([k*v for k,v in degree_counter.items()]) / sum([v for k,v in degree_counter.items()])
#print("Average Degree: ", avg_degree)
dev1_network.read_network(data_dir="data/", split="valid1", train_network=train_network, positive_only=positive_only)
dev2_network.read_network(data_dir="data/", split="valid2", train_network=train_network, positive_only=positive_only)
test_network.read_network(data_dir="data/", split="valid2", train_network=train_network, positive_only=positive_only)
# Add sim nodes
if sim_relations:
print("Adding sim edges..")
train_network.add_sim_edges_bert()
#word_vocab, word_freqs = reader_utils.create_vocab(train_network)
word_vocab = train_network.graph.node2id
train_data, _ = reader_utils.prepare_batch_dgl(word_vocab, train_network, train_network)
test_data, test_labels = reader_utils.prepare_batch_dgl(word_vocab, test_network, train_network)
valid1_data, valid1_labels = reader_utils.prepare_batch_dgl(word_vocab, dev1_network, train_network)
valid2_data, valid2_labels = reader_utils.prepare_batch_dgl(word_vocab, dev2_network, train_network)
return len(train_network.graph.nodes), train_data, valid1_data, test_data, len(train_network.graph.relations), valid1_labels, test_labels, train_network
def load_cn_full_data(dataset, sim_relations):
train_network = ConceptNetFullReader(dataset)
dev_network = ConceptNetFullReader(dataset)
test_network = ConceptNetFullReader(dataset)
train_network.read_network(data_dir="/net/efs.mosaic/chaitanyam/ConvE/data/", split="train")
train_network.print_summary()
node_list = train_network.graph.iter_nodes()
node_degrees = [node.get_degree() for node in node_list]
degree_counter = Counter(node_degrees)
avg_degree = sum([k*v for k,v in degree_counter.items()]) / sum([v for k,v in degree_counter.items()])
print("Average Degree: ", avg_degree)
dev_network.read_network(data_dir="/net/efs.mosaic/chaitanyam/ConvE/data/", split="valid", train_network=train_network)
test_network.read_network(data_dir="/net/efs.mosaic/chaitanyam/ConvE/data/", split="test", train_network=train_network)
#node_names = []
#for node in train_network.graph.iter_nodes():
# node_names.append(node.name)
#with open("cn-full_node_names.txt", 'w') as f:
# f.writelines([n.split("/")[-2].replace("_", " ")+"\n" for n in node_names if n not in string.punctuation and not n.isdigit()])
#import sys; sys.exit(0)
if sim_relations:
print("Adding sim edges..")
train_network.add_sim_edges_bert()
#word_vocab, word_freqs = reader_utils.create_vocab(train_network)
word_vocab = train_network.graph.node2id
train_data, _ = reader_utils.prepare_batch_dgl(word_vocab, train_network, train_network)
test_data, test_labels = reader_utils.prepare_batch_dgl(word_vocab, test_network, train_network)
valid_data, valid_labels = reader_utils.prepare_batch_dgl(word_vocab, dev_network, train_network)
return len(train_network.graph.nodes), train_data, valid_data, test_data, len(train_network.graph.relations), valid_labels, test_labels, train_network
def main(args):
# load graph data
if args.dataset == "FB15K-237":
data = load_data(args.dataset)
num_nodes = data.num_nodes
train_data = data.train
valid_data = data.valid
test_data = data.test
num_rels = data.num_rels
train_network = None
# Deletion experiment
# delete_fraction = args.delete_fraction
# delete_indices = random.sample(range(len(train_data)), int(delete_fraction * len(train_data)))
# train_data = np.array([tup for i, tup in enumerate(train_data) if i not in delete_indices])
# selected_nodes = train_data[:,0].tolist() + train_data[:,2].tolist()
# num_nodes = len(set(selected_nodes))
# Store entity-wise dicts for filtered metrics
all_tuples = train_data.tolist() + valid_data.tolist() + test_data.tolist()
# print("Graph Density: %f" % (len(train_data) / (num_nodes * (num_nodes - 1))))
elif args.dataset == "atomic":
num_nodes, train_data, valid_data, test_data, num_rels, valid_labels, test_labels, train_network = load_atomic_data(args.dataset, args.sim_relations)
all_tuples = train_data.tolist() + valid_data.tolist() + test_data.tolist()
elif args.dataset == "conceptnet":
num_nodes, train_data, valid_data, test_data, num_rels, valid_labels, test_labels, train_network = load_cn_data(args.dataset, args.sim_relations, args.eval_accuracy)
all_tuples = train_data.tolist() + valid_data.tolist() + test_data.tolist()
elif args.dataset == "conceptnet-5.6":
num_nodes, train_data, valid_data, test_data, num_rels, valid_labels, test_labels, train_network = load_cn_full_data(args.dataset, args.sim_relations)
all_tuples = train_data.tolist() + valid_data.tolist() + test_data.tolist()
elif args.dataset == "FB15k-237":
num_nodes, train_data, valid_data, test_data, num_rels, valid_labels, test_labels, train_network = load_fb15k_data(args.dataset, args.sim_relations)
all_tuples = train_data.tolist() + valid_data.tolist() + test_data.tolist()
else:
raise ValueError("Invalid Option for Dataset")
# for filtered ranking
all_e1_to_multi_e2, all_e2_to_multi_e1 = reader_utils.create_entity_dicts(all_tuples, num_rels, args.sim_relations)
# for training
train_e1_to_multi_e2, train_e2_to_multi_e1 = reader_utils.create_entity_dicts(train_data.tolist(), num_rels, args.sim_relations)
# check cuda
use_cuda = torch.cuda.is_available()
#use_cuda = False
if use_cuda and not args.no_cuda:
torch.cuda.set_device(args.gpu)
# create model
model = LinkPredict(train_network,
num_nodes,
num_rels,
args,
use_cuda=use_cuda)
# validation and testing triplets
valid_data = torch.LongTensor(valid_data)
test_data = torch.LongTensor(test_data)
if use_cuda and not args.no_cuda:
valid_data = valid_data.cuda()
test_data = test_data.cuda()
# build test graph
if args.sim_sim and args.sim_relations:
graph_train_data = utils.sim_sim_connect(train_data, train_data, num_rels)
else:
graph_train_data = train_data
test_graph, test_rel, test_norm = utils.build_test_graph(
num_nodes, num_rels, graph_train_data)
test_deg = test_graph.in_degrees(
range(test_graph.number_of_nodes())).float().view(-1,1)
test_node_id = torch.arange(0, num_nodes, dtype=torch.long).view(-1, 1)
test_rel = torch.from_numpy(test_rel).view(-1, 1)
test_norm = torch.from_numpy(test_norm).view(-1, 1)
if use_cuda and not args.no_cuda:
test_node_id = test_node_id.cuda()
test_norm = test_norm.cuda()
test_rel = test_rel.cuda()
test_graph.ndata.update({'id': test_node_id, 'norm': test_norm})
# Add bert embedding
test_graph.edata['type'] = test_rel
if use_cuda and not args.no_cuda:
model.cuda()
name = '_standard_model_state.pth'
name = "_" + args.model + "_" + args.decoder + name
if args.sim_relations:
name = "_sim_relations" + name
if args.sim_sim:
name = "_sim-sim" + name
if args.bert_trainable:
name = '_bert_trainable_model_state.pth'
if args.bert:
name = '_bert_model_state.pth'
if args.input_layer == "bert":
name = "_inp-bert" + name
#name = str(datetime.now().time()).split(".")[0] + name
model_state_file = args.dataset + name
writer = SummaryWriter("runs/" + model_state_file.replace(".pth",".log"))
if args.eval_only:
if args.model_name:
model_state_file=args.model_name
checkpoint = torch.load(model_state_file)
#if use_cuda:
# model.cpu() # test on CPU
model.eval()
model.load_state_dict(checkpoint['state_dict'])
#model.rgcn.layers[-1].device = torch.device("cpu")
print(model)
if args.dataset != "atomic" and args.dataset != "conceptnet":
valid_labels = None
test_labels = None
else:
valid_labels = torch.LongTensor(valid_labels)
test_labels = torch.LongTensor(test_labels)
if args.eval_accuracy:
threshold = utils.evaluate_accuracy(test_graph, model, valid_data, num_nodes, labels=valid_labels, network=train_network,
eval_bz=args.eval_batch_size)
utils.evaluate_accuracy(test_graph, model, test_data, num_nodes, labels=test_labels, network=train_network, threshold=threshold,
eval_bz=args.eval_batch_size)
else:
mrr = utils.ranking_and_hits(test_graph, model, valid_data, all_e1_to_multi_e2, valid_labels, train_network, comb="graph", sim_relations=args.sim_relations)
mrr = utils.ranking_and_hits(test_graph, model, test_data, all_e1_to_multi_e2, test_labels, train_network, comb="graph", sim_relations=args.sim_relations)
#mrr = utils.evaluate(test_graph, model, valid_data, all_e1_to_multi_e2, num_nodes, valid_labels, train_network,
# hits=[1, 3, 10], eval_bz=args.eval_batch_size)
#mrr = utils.evaluate(test_graph, model, test_data, all_e1_to_multi_e2, num_nodes, test_labels, train_network,
# hits=[1, 3, 10], eval_bz=args.eval_batch_size)
sys.exit(0)
# build adj list and calculate degrees for sampling
adj_list, degrees, sparse_adj_matrix, rel = utils.get_adj_and_degrees(num_nodes, num_rels, train_data)
# optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
if os.path.isfile(model_state_file):
print(model_state_file)
overwrite = input('Model already exists. Overwrite? Y = yes, N = no\n')
if overwrite.lower() == 'n':
print("Quitting")
sys.exit(0)
elif overwrite.lower() != 'y':
raise ValueError("Invalid Option")
forward_time = []
backward_time = []
# training loop
print("Starting training...")
epoch = 0
best_mrr = 0
while True:
model.train()
epoch += 1
g = test_graph
data = graph_train_data
data = torch.from_numpy(data)
labels = None
if use_cuda and not args.no_cuda:
data = data.cuda()
batch_size = 128
e1_keys = list(train_e1_to_multi_e2.keys())
random.shuffle(e1_keys)
cum_loss = 0.0
for i in range(0, len(e1_keys), batch_size):
graph_embeddings = model.get_graph_embeddings(g, data, labels, train_network)
optimizer.zero_grad()
batch = e1_keys[i : i + batch_size]
e1 = [elem[0] for elem in batch]
rel = [elem[1] for elem in batch]
e2 = [train_e1_to_multi_e2[elem] for elem in batch]
target = torch.zeros((len(batch), num_nodes))
for j, inst in enumerate(e2):
target[j, inst] = 1.0
target = ((1.0-args.label_smoothing_epsilon)*target) + (1.0/target.size(1))
if use_cuda and not args.no_cuda:
target = target.cuda()
t0 = time.time()
loss = model.get_score(batch, target, graph_embeddings, train_network)
cum_loss += loss.cpu().item()
t1 = time.time()
loss.backward(retain_graph=True)
#loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_norm) # clip gradients
optimizer.step()
t2 = time.time()
forward_time.append(t1 - t0)
backward_time.append(t2 - t1)
print("Epoch {:04d} | Loss {:.4f} | Best MRR {:.4f} | Forward {:.4f}s | Backward {:.4f}s".
format(epoch, cum_loss, best_mrr, forward_time[-1], backward_time[-1]))
writer.add_scalar('data/loss', cum_loss , epoch)
# Save model every 100 epochs
if epoch+1%100==0:
print("saving current model..")
torch.save({'state_dict': model.state_dict(), 'epoch': epoch},
model_state_file)
# validation
if epoch % args.evaluate_every == 0:
# perform validation on CPU because full graph is too large
#if use_cuda:
# model.cpu()
model.eval()
#model.rgcn.layers[0].device = torch.device("cpu")
#model.rgcn.layers[-1].device = torch.device("cpu")
print("start eval")
labels = len(valid_data) * [1]
labels = torch.LongTensor(labels)
if use_cuda and not args.no_cuda:
labels = labels.cuda()
mrr = utils.ranking_and_hits(test_graph, model, valid_data, all_e1_to_multi_e2, labels, train_network, comb="graph", sim_relations=args.sim_relations)
#mrr = utils.evaluate(test_graph, model, valid_data, e1_to_multi_e2, num_nodes, labels, train_network,
# hits=[1, 3, 10], eval_bz=args.eval_batch_size)
writer.add_scalar('data/mrr', mrr, epoch)
metrics = {"best_mrr": best_mrr,
"cum_loss": cum_loss
}
os.makedirs(args.output_dir, exist_ok=True)
with open(os.path.join("/output/", 'metrics.json'), 'w') as f:
f.write(json.dumps(metrics))
#mrr = utils.evaluate(test_graph, model, test_data, num_nodes, labels, train_network,
# hits=[1, 3, 10], eval_bz=args.eval_batch_size)
# save best model
# torch.save({'state_dict': model.state_dict(), 'epoch': epoch},
# model_state_file)
if mrr < best_mrr:
if epoch >= args.n_epochs:
break
else:
best_mrr = mrr
torch.save({'state_dict': model.state_dict(), 'epoch': epoch},
model_state_file)
if use_cuda and not args.no_cuda:
model.cuda()
#model.rgcn.layers[-1].device = torch.device("cuda")
#model.rgcn.layers[0].device = torch.device("cuda")
print("training done")
print("Mean forward time: {:4f}s".format(np.mean(forward_time)))
print("Mean Backward time: {:4f}s".format(np.mean(backward_time)))
writer.export_scalars_to_json("./all_scalars.json")
writer.close()
print("\nstart testing")
# use best model checkpoint
checkpoint = torch.load(model_state_file)
#if use_cuda:
# model.cpu() # test on CPU
model.eval()
model.load_state_dict(checkpoint['state_dict'])
print("Using best epoch: {}".format(checkpoint['epoch']))
labels = len(test_data) * [1]
mrr = utils.ranking_and_hits(test_graph, model, test_data, all_e1_to_multi_e2, labels, train_network, comb="graph", sim_relations=args.sim_relations)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Machine Commonsense Completion')
parser.add_argument("--dropout", type=float, default=0.2,
help="dropout probability")
parser.add_argument("--input_dropout", type=float, default=0.2,
help="input dropout")
parser.add_argument("--feature_map_dropout", type=float, default=0.2,
help="feature map dropout")
parser.add_argument("--label_smoothing_epsilon", type=float, default=0.1,
help="epsilon for performing label smoothing over target")
parser.add_argument("--init_embedding_dim", type=int, default=200,
help="init embedding dimension")
parser.add_argument("--embedding_dim", type=int, default=200,
help="embedding dimension")
parser.add_argument("--n-hidden", type=int, default=200,
help="number of hidden units")
parser.add_argument("--use_bias", action='store_true', default=True,
help="use bias")
parser.add_argument("--gpu", type=int, default=-1,
help="gpu")
parser.add_argument("--no_cuda", action='store_true', default=False,
help="prevents using cuda")
parser.add_argument("--sim_relations", action='store_true', default=False,
help="add sim edges to graph")
parser.add_argument("--sim_sim", action='store_true', default=False,
help="add sim-sim edges to graph")
parser.add_argument("--model", type=str, default='RGCN', help="model architecture")
parser.add_argument("--decoder", type=str, default='DistMult', help="decoder used to compute scores")
parser.add_argument("--lr", type=float, default=1e-2,
help="learning rate")
parser.add_argument("--input_layer", type=str, default="lookup",
help="initialization layer for rgcn")
parser.add_argument("--n-bases", type=int, default=100,
help="number of weight blocks for each relation")
parser.add_argument("--n-layers", type=int, default=2,
help="number of propagation rounds")
parser.add_argument("--n-epochs", type=int, default=200,
help="number of minimum training epochs")
parser.add_argument("-d", "--dataset", type=str, required=True,
help="dataset to use")
parser.add_argument("--eval_only", action='store_true', default=False,
help="only evaluate using an existing model")
parser.add_argument("--eval_accuracy", action='store_true', default=False,
help="evaluate binary classification accuracy")
parser.add_argument("--eval-batch-size", type=int, default=500,
help="batch size when evaluating")
parser.add_argument("--regularization", type=float, default=0.01,
help="regularization weight")
parser.add_argument("--grad-norm", type=float, default=1.0,
help="norm to clip gradient to")
parser.add_argument("--graph-batch-size", type=int, default=30000,
help="number of edges to sample in each iteration")
parser.add_argument("--graph-split-size", type=float, default=0.5,
help="portion of edges used as positive sample")
parser.add_argument("--negative-sample", type=int, default=10,
help="number of negative samples per positive sample")
parser.add_argument("--evaluate-every", type=int, default=100,
help="perform evaluation every n epochs")
parser.add_argument("--model_name", type=str, required=False, default=None,
help="model to load")
parser.add_argument("--bert", action='store_true', default=False,
help="use bert")
parser.add_argument("--bert_trainable", action='store_true', default=False,
help="finetune bert further")
args = parser.parse_args()
print(args)
try:
main(args)
except KeyboardInterrupt:
print('Interrupted')
writer.export_scalars_to_json("./all_scalars.json")
writer.close()
| commonsense-kg-completion-master | src/run_kbc_fullgraph.py |
__author__ = "chaitanya" # Adapted from DGL official examples
import torch
import torch.nn as nn
from torch.nn.parallel import data_parallel
import numpy as np
import dgl.function as fn
from torch.nn.init import xavier_normal_, xavier_uniform_
from torch.nn import functional as F, Parameter
class RGCNLayer(nn.Module):
def __init__(self, in_feat, out_feat, bias=None, activation=None,
self_loop=False, dropout=0.0):
super(RGCNLayer, self).__init__()
self.bias = bias
self.activation = activation
self.self_loop = self_loop
if self.bias:
self.bias_weight = nn.Parameter(torch.Tensor(out_feat))
# Following bias initialization used in ConvTransE
stdv = 1. / np.sqrt(out_feat)
self.bias_weight.data.uniform_(-stdv, stdv)
#nn.init.xavier_uniform_(self.bias,
# gain=nn.init.calculate_gain('relu'))
# weight for self loop
if self.self_loop:
if isinstance(self, MultiHeadGATLayer):
self.loop_weight = nn.Parameter(torch.Tensor(in_feat, out_feat * 8))
else:
self.loop_weight = nn.Parameter(torch.Tensor(in_feat, out_feat))
self.loop_rel = nn.Parameter(torch.Tensor(1))
nn.init.xavier_uniform_(self.loop_weight,
gain=nn.init.calculate_gain('relu'))
if dropout:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = None
# define how propagation is done in subclass
def propagate(self, g):
raise NotImplementedError
def forward(self, g):
#loop_message = g.ndata['h'] * self.loop_weight
#if self.dropout is not None:
# loop_message = self.dropout(loop_message)
if self.self_loop:
loop_message = torch.mm(g.ndata['h'], self.loop_weight)
#g.ndata['h_mm'] = torch.mm(g.ndata['h'], self.weight)
#g.edata['alpha'] = self.weight_rel(g.edata['type'])
self.propagate(g)
# additional processing
# apply bias and activation
node_repr = g.ndata['h']
if self.bias:
node_repr = node_repr + self.bias_weight
if self.self_loop:
node_repr = node_repr + loop_message
# Apply batch normalization
if not isinstance(self, MultiHeadGATLayer) and not isinstance(self, GATLayer):
node_repr = self.bn(node_repr)
if self.activation:
node_repr = self.activation(node_repr)
if self.dropout is not None:
node_repr = self.dropout(node_repr)
g.ndata['h'] = node_repr
class RGCNBasisLayer(RGCNLayer):
def __init__(self, in_feat, out_feat, num_rels, num_bases=-1, bias=None,
activation=None, is_input_layer=False):
super(RGCNBasisLayer, self).__init__(in_feat, out_feat, bias, activation)
self.in_feat = in_feat
self.out_feat = out_feat
self.num_rels = num_rels
self.num_bases = num_bases
self.is_input_layer = is_input_layer
if self.num_bases <= 0 or self.num_bases > self.num_rels:
self.num_bases = self.num_rels
# add basis weights
self.weight = nn.Parameter(torch.Tensor(self.num_bases, self.in_feat, self.out_feat))
if self.num_bases < self.num_rels:
# linear combination coefficients
self.w_comp = nn.Parameter(torch.Tensor(self.num_rels, self.num_bases))
nn.init.xavier_uniform_(self.weight, gain=nn.init.calculate_gain('relu'))
if self.num_bases < self.num_rels:
nn.init.xavier_uniform_(self.w_comp,
gain=nn.init.calculate_gain('relu'))
def propagate(self, g):
if self.num_bases < self.num_rels:
# generate all weights from bases
weight = self.weight.view(self.num_bases,
self.in_feat * self.out_feat)
weight = torch.matmul(self.w_comp, weight).view(
self.num_rels, self.in_feat, self.out_feat)
else:
weight = self.weight
if self.is_input_layer:
def msg_func(edges):
# for input layer, matrix multiply can be converted to be
# an embedding lookup using source node id
embed = weight.view(-1, self.out_feat)
index = edges.data['type'] * self.in_feat + edges.src['id']
return {'msg': embed.index_select(0, index) * edges.data['norm']}
else:
def msg_func(edges):
w = weight.index_select(0, edges.data['type'].squeeze())
msg = torch.bmm(edges.src['h'].unsqueeze(1), w).squeeze()
msg = msg * edges.data['norm']
return {'msg': msg}
g.update_all(msg_func, fn.sum(msg='msg', out='h'), None)
class RGCNBlockLayer(RGCNLayer):
def __init__(self, in_feat, out_feat, num_rels, num_bases, bias=None,
activation=None, self_loop=False, dropout=0.0):
super(RGCNBlockLayer, self).__init__(in_feat, out_feat, bias,
activation, self_loop=self_loop,
dropout=dropout)
self.num_rels = num_rels
self.num_bases = num_bases
assert self.num_bases > 0
self.out_feat = out_feat
self.submat_in = in_feat // self.num_bases
self.submat_out = out_feat // self.num_bases
self.bn = torch.nn.BatchNorm1d(self.out_feat)
# assuming in_feat and out_feat are both divisible by num_bases
self.weight = nn.Parameter(torch.Tensor(self.num_rels, self.num_bases * self.submat_in * self.submat_out))
nn.init.xavier_uniform_(self.weight, gain=nn.init.calculate_gain('relu'))
def msg_func(self, edges):
"""
Compute messages only from source node features
"""
weight = self.weight.index_select(0, edges.data['type'].squeeze()).view(
-1, self.submat_in, self.submat_out)
node = edges.src['h'].view(-1, 1, self.submat_in)
msg = torch.bmm(node, weight).view(-1, self.out_feat)
return {'msg': msg}
def propagate(self, g):
g.update_all(self.msg_func, fn.sum(msg='msg', out='h'), self.apply_func)
def apply_func(self, nodes):
return {'h': nodes.data['h'] * nodes.data['norm']}
class RGCNBlockAttentionLayer(RGCNLayer):
def __init__(self, in_feat, out_feat, num_rels, num_bases, bias=None,
activation=None, self_loop=False, dropout=0.0, bert=False, bert_trainable=False):
super(RGCNBlockAttentionLayer, self).__init__(in_feat, out_feat, bias,
activation, self_loop=self_loop,
dropout=dropout)
self.num_rels = num_rels
self.num_bases = num_bases
assert self.num_bases > 0
self.out_feat = out_feat
self.submat_in = in_feat // self.num_bases
self.submat_out = out_feat // self.num_bases
self.bn = torch.nn.BatchNorm1d(self.out_feat)
# assuming in_feat and out_feat are both divisible by num_bases
self.weight = nn.Parameter(torch.Tensor(self.num_rels, self.num_bases * self.submat_in * self.submat_out))
nn.init.xavier_uniform_(self.weight, gain=nn.init.calculate_gain('relu'))
def msg_func(self, edges):
"""
Compute messages only from source node features
"""
weight = self.weight.index_select(0, edges.data['type'].squeeze()).view(
-1, self.submat_in, self.submat_out)
node = edges.src['h'].view(-1, 1, self.submat_in)
msg = torch.bmm(node, weight).view(-1, self.out_feat)
return {'msg': msg, 'node_id': edges.src['id']}
def compute_attn_weights():
pass
def attn_reduce(self, nodes):
# TODO: Weigh neighbors by attention on top of BERT feature representations
pass
def propagate(self, g):
g.update_all(self.msg_func, self.attn_reduce, self.apply_func)
def apply_func(self, nodes):
return {'h': nodes.data['h'] * nodes.data['norm']}
class WGCNLayer(RGCNLayer):
def __init__(self, in_feat, out_feat, num_rels, num_bases, bias=True,
activation=None, self_loop=False, dropout=0.2):
super(WGCNLayer, self).__init__(in_feat, out_feat, bias,
activation, self_loop=self_loop,
dropout=dropout)
self.num_rels = num_rels
self.in_feat = in_feat
self.out_feat = out_feat
self.weight = Parameter(torch.FloatTensor(self.in_feat, self.out_feat))
self.weight_rel = torch.nn.Embedding(self.num_rels, 1, padding_idx=0)
self.bn = torch.nn.BatchNorm1d(self.out_feat)
xavier_normal_(self.weight.data)
#stdv = 1. / np.sqrt(self.weight.size(1))
#self.weight.data.uniform_(-stdv, stdv)
def msg_func(self, edges):
"""
Compute messages only from source node features
"""
edge_types = edges.data['type'].squeeze()
alpha = self.weight_rel(edge_types)
node = torch.mm(edges.src['h'], self.weight)
msg = alpha.expand_as(node) * node
return {'msg': msg}
def propagate(self, g):
#g.update_all(fn.src_mul_edge(src='h_mm', edge='alpha', out='msg'),
# fn.sum(msg='msg', out='h'),
# apply_node_func=lambda nodes: {'h': nodes.data['h'] * nodes.data['norm']})
g.update_all(self.msg_func, fn.sum(msg='msg', out='h'), self.apply_func)
def apply_func(self, nodes):
return {'h': nodes.data['h'] * nodes.data['norm']}
class WGCNAttentionLayer(RGCNLayer):
def __init__(self, in_feat, out_feat, num_rels, num_bases, bias=True,
activation=None, self_loop=False, dropout=0.2):
super(WGCNAttentionLayer, self).__init__(in_feat, out_feat, bias, activation,
self_loop=self_loop, dropout=dropout)
self.num_rels = num_rels
self.in_feat = in_feat
self.out_feat = out_feat
self.weight = Parameter(torch.FloatTensor(self.in_feat, self.out_feat))
self.weight_rel = torch.nn.Embedding(self.num_rels, 1, padding_idx=0)
self.bn = torch.nn.BatchNorm1d(self.out_feat)
xavier_normal_(self.weight.data)
#stdv = 1. / np.sqrt(self.weight.size(1))
#self.weight.data.uniform_(-stdv, stdv)
def msg_func(self, edges):
"""
Compute messages only from source node features
"""
edge_types = edges.data['type'].squeeze()
alpha = self.weight_rel(edge_types)
node = torch.mm(edges.src['h'], self.weight)
msg = alpha.expand_as(node) * node
return {'msg': msg}
def attn_reduce(self, nodes):
attn_vector = torch.bmm(nodes.mailbox['msg'], nodes.data['h'].unsqueeze(2))
attn_probs = torch.softmax(attn_vector, dim=1)
attn_weighted_msgs = nodes.mailbox['msg'] * attn_probs.expand_as(nodes.mailbox['msg'])
attn_sum = torch.sum(attn_weighted_msgs, dim=1)
return {'h': attn_sum}
def propagate(self, g):
#g.update_all(fn.src_mul_edge(src='h_mm', edge='alpha', out='msg'),
# fn.sum(msg='msg', out='h'),
# apply_node_func=lambda nodes: {'h': nodes.data['h'] * nodes.data['norm']})
g.update_all(self.msg_func, self.attn_reduce, self.apply_func)
def apply_func(self, nodes):
return {'h': nodes.data['h'] * nodes.data['norm']}
class WGCNAttentionSAGELayer(RGCNLayer):
def __init__(self, in_feat, out_feat, num_rels, num_bases, bias=True,
activation=None, self_loop=False, dropout=0.2):
super(WGCNAttentionSAGELayer, self).__init__(in_feat, out_feat, bias,
activation, self_loop=self_loop,
dropout=dropout)
self.num_rels = num_rels
self.in_feat = in_feat
self.out_feat = out_feat
self.weight = Parameter(torch.FloatTensor(self.in_feat*2, self.out_feat))
self.weight_rel = torch.nn.Embedding(self.num_rels, 1, padding_idx=0)
self.bn = torch.nn.BatchNorm1d(self.out_feat)
xavier_normal_(self.weight.data)
#stdv = 1. / np.sqrt(self.weight.size(1))
#self.weight.data.uniform_(-stdv, stdv)
def msg_func(self, edges):
"""
Compute messages only from source node features
"""
edge_types = edges.data['type'].squeeze()
alpha = self.weight_rel(edge_types)
#node = torch.mm(edges.src['h'], self.weight)
msg = alpha.expand_as(edges.src['h']) * edges.src['h']
return {'msg': msg}
def compute_attn_weights():
pass
def attn_reduce(self, nodes):
mean_msg = torch.mean(nodes.mailbox['msg'], dim=1)
aggreg_msg = torch.cat((nodes.data['h'], mean_msg), dim=1)
node_repr = torch.mm(aggreg_msg, self.weight)
return {'h': node_repr}
def propagate(self, g):
#g.update_all(fn.src_mul_edge(src='h_mm', edge='alpha', out='msg'),
# fn.sum(msg='msg', out='h'),
# apply_node_func=lambda nodes: {'h': nodes.data['h'] * nodes.data['norm']})
g.update_all(self.msg_func, self.attn_reduce, self.apply_func)
def apply_func(self, nodes):
return {'h': nodes.data['h'] * nodes.data['norm']}
class GATLayer(RGCNLayer):
def __init__(self, in_feat, out_feat, num_rels, num_bases, bias=True,
activation=None, self_loop=False, dropout=0.2):
super(GATLayer, self).__init__(in_feat, out_feat, bias,
activation, self_loop=self_loop, dropout=dropout)
self.in_feat = in_feat
self.out_feat = out_feat
self.weight = nn.Linear(in_feat, out_feat, bias=False)
self.weight_rel = torch.nn.Embedding(num_rels, 1, padding_idx=0)
self.attn_fc = nn.Linear(2 * out_feat, 1, bias=False)
def edge_attention(self, edges):
# edge UDF for equation (2)
z2 = torch.cat([edges.src['z'], edges.dst['z']], dim=1)
a = self.attn_fc(z2)
edge_types = edges.data['type'].squeeze()
rel_alpha = self.weight_rel(edge_types)
return {'e': F.leaky_relu(a), 'rel_alpha': rel_alpha}
def message_func(self, edges):
# message UDF for equation (3) & (4)
return {'z': edges.src['z'], 'e': edges.data['e'], 'rel_alpha': edges.data['rel_alpha']}
def reduce_func(self, nodes):
# reduce UDF for equation (3) & (4)
# equation (3)
alpha = F.softmax(nodes.mailbox['e'], dim=1)
rel_alpha = nodes.mailbox['rel_alpha']
# equation (4)
h = torch.sum(rel_alpha * alpha * nodes.mailbox['z'], dim=1)
return {'h': h}
def propagate(self, g):
# equation (1)
h = g.ndata['h']
z = self.weight(h)
g.ndata['z'] = z
# equation (2)
g.apply_edges(self.edge_attention)
# equation (3) & (4)
g.update_all(self.message_func, self.reduce_func)
#return self.g.ndata.pop('h')
class GATSubLayer(nn.Module):
def __init__(self, in_feat, out_feat, num_rels):
super(GATSubLayer, self).__init__()
self.in_feat = in_feat
self.out_feat = out_feat
self.weight = nn.Linear(in_feat, out_feat, bias=False)
self.weight_rel = torch.nn.Embedding(num_rels, 1, padding_idx=0)
self.attn_fc = nn.Linear(2 * out_feat, 1, bias=False)
def edge_attention(self, edges):
# edge UDF for equation (2)
z2 = torch.cat([edges.src['z'], edges.dst['z']], dim=1)
edge_types = edges.data['type'].squeeze()
rel_alpha = self.weight_rel(edge_types)
a = self.attn_fc(z2)
return {'e': F.leaky_relu(a), 'rel_alpha': rel_alpha}
def message_func(self, edges):
# message UDF for equation (3) & (4)
return {'z': edges.src['z'], 'e': edges.data['e'], 'rel_alpha': edges.data['rel_alpha']}
def reduce_func(self, nodes):
# reduce UDF for equation (3) & (4)
# equation (3)
alpha = F.softmax(nodes.mailbox['e'], dim=1)
rel_alpha = nodes.mailbox['rel_alpha']
# equation (4)
h = torch.sum(rel_alpha * alpha * nodes.mailbox['z'], dim=1)
return {'head-out': h}
def propagate(self, g):
# equation (1)
h = g.ndata['h']
z = self.weight(h)
g.ndata['z'] = z
# equation (2)
g.apply_edges(self.edge_attention)
# equation (3) & (4)
g.update_all(self.message_func, self.reduce_func)
return g.ndata.pop('head-out')
class MultiHeadGATLayer(RGCNLayer):
def __init__(self, in_feat, out_feat, num_rels, num_bases, bias=True,
activation=None, self_loop=False, dropout=0.2):
super(MultiHeadGATLayer, self).__init__(in_feat, out_feat, bias,
activation, self_loop=self_loop, dropout=dropout)
self.heads = nn.ModuleList()
for i in range(8):
self.heads.append(GATSubLayer(in_feat, out_feat, num_rels))
self.merge = "cat"
self.out_feat = out_feat
self.in_feat = in_feat
def propagate(self, g):
head_outs = [attn_head.propagate(g) for attn_head in self.heads]
if self.merge == 'cat':
# concat on the output feature dimension (dim=1)
g.ndata['h'] = torch.cat(head_outs, dim=1)
else:
# merge using average
g.ndata['h'] = torch.mean(torch.stack(head_outs))
return g.ndata['h']
| commonsense-kg-completion-master | src/layers.py |
__author__ = "chaitanya"
import torch
import numpy as np
import string
def create_word_vocab(network):
word_vocab = {}
word_freqs = {}
word_vocab["PAD"] = len(word_vocab)
for node in network.graph.iter_nodes():
for word in node.name.split():
word = word.lower()
if word not in word_vocab:
word_vocab[word] = len(word_vocab)
if word not in word_freqs:
word_freqs[word] = 1
else:
word_freqs[word] += 1
word_vocab["UNK"] = len(word_vocab)
return word_vocab, word_freqs
def create_vocab(network):
word_vocab = {}
word_freqs = {}
for node in network.graph.iter_nodes():
if node.name not in word_vocab:
word_vocab[node.name] = len(word_vocab)
if node.name not in word_freqs:
word_freqs[node.name] = 1
else:
word_freqs[node.name] += 1
word_vocab["UNK"] = len(word_vocab)
return word_vocab, word_freqs
def get_vocab_idx(vocab, token):
if token not in vocab:
return vocab["UNK"]
else:
return vocab[token]
def map_to_ids(vocab, seq):
return [get_vocab_idx(vocab, word) for word in seq]
def get_relation_id(rel_name, train_network):
rel_id = train_network.graph.find_relation(rel_name)
if rel_id == -1:
return len(train_network.rel2id)
else:
return rel_id
def prepare_batch_nodes(vocab, batch):
lens = [len(name) for name in batch]
max_len = max(lens)
sorted_batch = [x for _, x in sorted(zip(lens, batch), reverse=True, key=lambda x: x[0])]
all_lens = []
word_ids_batch = []
for node in sorted_batch:
word_ids = map_to_ids(vocab, node)
padding_length = max_len - len(word_ids)
all_lens.append(len(word_ids))
word_ids += [get_vocab_idx(vocab, "PAD")] * padding_length
word_ids_batch.append(word_ids)
return torch.LongTensor(word_ids_batch), torch.LongTensor(all_lens)
def prepare_batch_dgl(vocab, test_network, train_network):
all_edges = []
all_labels = []
for edge in test_network.graph.iter_edges():
src_id = get_vocab_idx(vocab, edge.src.name)
tgt_id = get_vocab_idx(vocab, edge.tgt.name)
rel_id = get_relation_id(edge.relation.name, train_network)
all_edges.append(np.array([src_id, rel_id, tgt_id]))
all_labels.append(edge.label)
return np.array(all_edges), all_labels
def create_entity_dicts(all_tuples, num_rels, sim_relations=False):
e1_to_multi_e2 = {}
e2_to_multi_e1 = {}
for tup in all_tuples:
e1, rel, e2 = tup
# No need to use sim edges for decoding
if rel == num_rels-1 and sim_relations:
continue
rel_offset = num_rels
if sim_relations:
rel_offset -= 1
if (e1, rel) in e1_to_multi_e2:
e1_to_multi_e2[(e1, rel)].append(e2)
else:
e1_to_multi_e2[(e1, rel)] = [e2]
if (e2, rel+rel_offset) in e1_to_multi_e2:
e1_to_multi_e2[(e2, rel+rel_offset)].append(e1)
else:
e1_to_multi_e2[(e2, rel+rel_offset)] = [e1]
if (e2, rel+rel_offset) in e2_to_multi_e1:
e2_to_multi_e1[(e2, rel+rel_offset)].append(e1)
else:
e2_to_multi_e1[(e2, rel+rel_offset)] = [e1]
return e1_to_multi_e2, e2_to_multi_e1
def preprocess_atomic_sentence(sent):
puncts = list(string.punctuation)
puncts.remove('-')
sent = [c for c in sent.lower() if c not in puncts or c == "'"]
sent = ''.join([c for c in sent if not c.isdigit()])
sent = sent.replace("person x", "personx").replace(" x's", " personx's").replace(" x ", " personx ")
if sent[:2] == "x " or sent[:2] == "x'":
sent = sent.replace("x ", "personx ", 1).replace("x'", "personx'")
if sent[-3:] == " x\n":
sent = sent.replace(" x\n", "personx\n")
sent = sent.replace("person y", "persony").replace(" y's", " persony's").replace(" y ", " persony ")
if sent[:2] == "y " or sent[:2] == "y'":
sent = sent.replace("y ", "persony ", 1).replace("y'", "persony'")
if sent[-3:] == " y\n":
sent = sent.replace(" y\n", "persony\n")
return sent.replace("personx", "John").replace("persony", "Tom")
| commonsense-kg-completion-master | src/reader_utils.py |
__author__ = "chaitanya" # Adapted from HuggingFace implementation
from transformers import BertTokenizer, BertModel, BertForMaskedLM
import os
import re
import torch
import torch.nn as nn
from torch.utils.data import TensorDataset, DataLoader, SequentialSampler
import numpy as np
"""
Feature Extractor for BERT
"""
class InputExample(object):
"""A single training/test example for simple sequence classification with BERT."""
def __init__(self, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id=None):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
def convert_examples_to_features(examples, max_seq_length, tokenizer, label_list=None):
"""Loads a data file into a list of `InputBatch`s."""
if label_list:
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if example.label:
label_id = label_map[example.label]
else:
label_id = None
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
return features
def camel_case_split(identifier):
matches = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', identifier)
return (" ".join([m.group(0) for m in matches])).lower()
def convert_edges_to_examples(edges, labels, network):
examples = []
for i, edge in enumerate(edges):
edge = edge.cpu().numpy()
text_a = network.graph.nodes[edge[0]].name + " " + camel_case_split(network.graph.relations[edge[1]].name) + " " + network.graph.nodes[edge[2]].name
label = labels[i].cpu().item()
examples.append(
InputExample(text_a=text_a, text_b=None, label=label))
return examples
def convert_nodes_to_examples(node_list):
examples = []
for node in node_list:
text_a = node.name
examples.append(
InputExample(text_a=text_a))
return examples
class BertLayer(nn.Module):
def __init__(self, dataset):
super(BertLayer, self).__init__()
bert_model = "bert-large-uncased"
self.dataset = dataset
if self.dataset == "conceptnet":
output_dir = "bert_model_embeddings/nodes-lm-conceptnet/"
elif self.dataset == "atomic":
output_dir = "bert_model_embeddings/nodes-lm-atomic/"
self.filename = os.path.join(output_dir, self.dataset + "_bert_embeddings.pt")
print(self.filename)
if os.path.isfile(self.filename):
self.exists = True
return
self.exists = False
self.max_seq_length = 32
self.eval_batch_size = 128
self.tokenizer = BertTokenizer.from_pretrained(bert_model, do_lower_case=False)
output_model_file = os.path.join(output_dir, "lm_pytorch_model.bin")
print("Loading model from %s" % output_dir)
self.bert_model = torch.load(output_model_file, map_location='cpu')
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.bert_model.to(self.device)
# Make BERT parameters non-trainable
# bert_params = list(self.bert_model.parameters())
# for param in bert_params:
# param.requires_grad = False
def forward(self, node_list):
#
if self.exists:
print("Loading BERT embeddings from disk..")
return torch.load(self.filename)
print("Computing BERT embeddings..")
self.bert_model.eval()
eval_examples = convert_nodes_to_examples(node_list)
eval_features = convert_examples_to_features(
eval_examples, max_seq_length=self.max_seq_length, tokenizer=self.tokenizer)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=self.eval_batch_size)
sequence_outputs = []
idx = 0
for input_ids, input_mask, segment_ids in eval_dataloader:
input_ids = input_ids.to(self.device)
input_mask = input_mask.to(self.device)
segment_ids = segment_ids.to(self.device)
with torch.no_grad():
sequence_output, _ = self.bert_model.bert(input_ids, segment_ids, input_mask, output_all_encoded_layers=False)
sequence_outputs.append(sequence_output[:, 0])
if len(sequence_outputs) == 800:
self.save_to_disk(torch.cat(sequence_outputs, dim=0), idx)
sequence_outputs = []
idx += 1
self.save_to_disk(torch.cat(sequence_outputs, dim=0), idx)
return torch.cat(sequence_outputs, dim=0)
def forward_as_init(self, num_nodes, network=None):
if self.exists:
print("Loading BERT embeddings from disk..")
return torch.load(self.filename)
node_ids = np.arange(num_nodes)
node_list = [network.graph.nodes[idx] for idx in node_ids]
print("Computing BERT embeddings..")
self.bert_model.eval()
eval_examples = convert_nodes_to_examples(node_list)
eval_features = convert_examples_to_features(
eval_examples, max_seq_length=self.max_seq_length, tokenizer=self.tokenizer)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=self.eval_batch_size)
sequence_outputs = []
for input_ids, input_mask, segment_ids in eval_dataloader:
input_ids = input_ids.to(self.device)
input_mask = input_mask.to(self.device)
segment_ids = segment_ids.to(self.device)
with torch.no_grad():
sequence_output, _ = self.bert_model.bert(input_ids, segment_ids, input_mask,
output_all_encoded_layers=False)
sequence_outputs.append(sequence_output[:, 0])
return torch.cat(sequence_outputs, dim=0)
def save_to_disk(self, tensor, idx):
torch.save(tensor, self.dataset + str(idx) + "_bert_embeddings.pt")
| commonsense-kg-completion-master | src/bert_feature_extractor.py |
import pandas as pd
import numpy as np
#----------------------------------------------------------
# get stats for ACL Demo track submission
#----------------------------------------------------------
df1 = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/ACCoRD/1-sentence-annotations copy.csv")
df2 = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/ACCoRD/2-sentence-annotations copy.csv")
df_all = pd.concat([df1, df2])
# save df with only positive rows
rslt_df = df_all[df_all['is_relational']==1]
#----------------------------------------------------------
print("\n----BASIC STATS-----")
# get number of positive and negative examples in ACCoRD
print("Number of rows = %d" % len(df_all))
print("Number of extractions with a positive label = %d" % len(df_all[df_all['is_relational']==1]))
print("Number of extractions with a negative label = %d" % len(df_all[df_all['is_relational']==0]))
print("Number of extractions with is_relational==nan = %d" % len(df_all[pd.isna(df_all['is_relational'])]))
print("Number of formatted statements = %d" % len(df_all[pd.notna(df_all['formatted_statement'])]))
print("--> Number of formatted statements made from an extraction = %d" % len(df_all[pd.notna(df_all['formatted_statement']) & (df_all['is_relational']==1)]))
print("--> Number of additional formatted statements made from an extraction = %d" % len(df_all[pd.notna(df_all['formatted_statement']) & pd.isna(df_all['is_relational'])]))
# get number of instances without a differentia
num_without_differentia = 0
for index, row in rslt_df.iterrows():
if pd.notna(row['formatted_statement']):
if row['formatted_statement'].endswith(row['concept_b']):
num_without_differentia+=1
print("Number of formatted sentences with no differentia (end with the annotated concept B) = %d" % num_without_differentia)
#----------------------------------------------------------
print("\n----DESCRIPTION + CONCEPT B STATS-----")
group_count_conceptb = rslt_df.groupby('concept_a')['concept_b'].nunique().reset_index(name='num_unique_concept_bs')
group_count_descriptions = rslt_df.groupby(by=["concept_a"]).size().reset_index(name='counts')
# merge dfs on concept_a column
merged = pd.merge(group_count_conceptb, group_count_descriptions, on="concept_a")
print(np.average(merged['counts']))
print(merged['counts']. value_counts())
merged.to_csv("test.csv")
#----------------------------------------------------------
print("\n----ERRORS-----")
# ERRORS
print("rows that don't have a formatted statement but are also not marked 0 for is_relational\n")
print(df_all[pd.isna(df_all['formatted_statement']) & pd.isna(df_all['is_relational'])])
print(df_all[pd.isna(df_all['formatted_statement']) & (df_all['is_relational']==1)]) | ACCoRD-main | corpus/accord-stats.py |
from re import A
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from statsmodels.stats import inter_rater as irr
import numpy as np
from statistics import mean
df_expertise_ratings = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/user-study/demo-evaluation/expertise-ratings.csv")
df_median_expertise = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/user-study/demo-evaluation/median-expertise-ratings.csv")
df_description_preferences = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/user-study/demo-evaluation/description-preferences.csv")
df_error_analysis = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/user-study/demo-evaluation/completed-error-analysis.csv")
error_analysis_concepts = set(df_error_analysis['forecite_concept'].to_list())
# remove _expertise to isolate concept
concepts = list(map(lambda x: x.replace('_expertise',''), df_median_expertise['concept_expertise']))
df_expertise_ratings['concept_expertise'] = list(map(lambda x: x.replace('_expertise',''), df_expertise_ratings['concept_expertise']))
df_median_expertise['concept_expertise'] = list(map(lambda x: x.replace('_expertise',''), df_median_expertise['concept_expertise']))
# rename concept columns
df_median_expertise = df_median_expertise.rename(columns={"concept_expertise": "concept"})
df_expertise_ratings = df_expertise_ratings.rename(columns={"concept_expertise": "concept"})
print(df_expertise_ratings.keys())
print(df_description_preferences.keys())
# merge expertise ratings and description preferences dfs
df_expertise_preferences = pd.merge(df_expertise_ratings, df_description_preferences, on=["concept", "email"])
print(df_expertise_preferences.keys())
res = df_expertise_preferences[['email', 'concept', 'expertise', 'description', 'preference_rating']]
res.to_csv("merged-expertise-description-preferences.csv")
#--------------------------------------------------------
# Do people with more expertise tend to agree with each
# other more than they do with those with less expertise?
#--------------------------------------------------------
# function to select and format description preferences for each segment
# and calculate Fleiss' kappa
def calculateKappaPerSegment(df_concept_description_preferences, segmentEmails):
# select rows of the description preferences df for this concept where emailIDs are in the segment list
df_concept_description_preferences_segment = df_concept_description_preferences.loc[df_concept_description_preferences['email'].isin(segmentEmails)]
# select relevant columns from each df
df_concept_description_preferences_segment = df_concept_description_preferences_segment[['email', 'description', 'preference_rating']]
# convert categorical preference ratings to numeric
df_concept_description_preferences_segment['preference_rating'] = pd.factorize(df_concept_description_preferences_segment['preference_rating'])[0]
# pivot df to get columns = emails, rows = descriptions
np_concept_description_preferences_segment = df_concept_description_preferences_segment.pivot(index='description', columns='email', values='preference_rating').to_numpy()
# calculate kappa for segment
kappa = irr.fleiss_kappa(irr.aggregate_raters(np_concept_description_preferences_segment)[0], method='fleiss')
return kappa
# #--------------------------------------------------------
# # store each kappa calculation for all concepts
# above_median_concept_kappas = []
# below_median_concept_kappas = []
# full_concept_kappas = []
# count = 0
# n_runs = 5
# # for each concept
# for concept in concepts:
# # select rows for this concept
# df_concept_description_preferences = df_description_preferences[df_description_preferences['concept']==concept]
# all_emails = list(set(df_concept_description_preferences['email'].to_list()))
# # # get median expertise for this concept
# # median = df_median_expertise.loc[df_median_expertise['concept_expertise'] == concept, 'median.expertise'].iloc[0]
# # global median
# median = 4
# # # global average
# # median = 3.35
# # select rows with expertise rating > median
# above_median_segment = df_expertise_ratings.loc[(df_expertise_ratings['concept'] == concept) & (df_expertise_ratings['expertise'] > median)]
# # select rows with expertise rating < median
# below_median_segment = df_expertise_ratings.loc[(df_expertise_ratings['concept'] == concept) & (df_expertise_ratings['expertise'] < median)]
# # select rows with expertise rating < median
# median_segment = df_expertise_ratings.loc[(df_expertise_ratings['concept'] == concept) & (df_expertise_ratings['expertise'] == median)]
# above_median_kappas = []
# below_median_kappas = []
# for i in range(n_runs):
# # randomly + equally assign participants with median expertise to above-median segment and below-median segment
# part_50 = median_segment.sample(frac = 0.5, random_state=i)
# # Creating dataframe with rest of the 50% values
# rest_part_50 = median_segment.drop(part_50.index)
# # concatenate portion of median segment to each above-median and below-median segment
# # select emailID column
# above_median_segment_emails = pd.concat([above_median_segment, part_50])['email'].to_list()
# below_median_segment_emails = pd.concat([below_median_segment, rest_part_50])['email'].to_list()
# print(len(above_median_segment_emails), len(below_median_segment_emails))
# above_median_kappa = calculateKappaPerSegment(df_concept_description_preferences, above_median_segment_emails)
# below_median_kappa = calculateKappaPerSegment(df_concept_description_preferences, below_median_segment_emails)
# full_kappa = calculateKappaPerSegment(df_concept_description_preferences, all_emails)
# above_median_kappas.append(above_median_kappa)
# below_median_kappas.append(below_median_kappa)
# # average all the kappas for this concept
# avg_above_median_kappa = mean(above_median_kappas)
# avg_below_median_kappa = mean(below_median_kappas)
# # append to list for all concepts
# above_median_concept_kappas.append(avg_above_median_kappa)
# below_median_concept_kappas.append(avg_below_median_kappa)
# full_concept_kappas.append(full_kappa)
# print(concept, avg_below_median_kappa > full_kappa)
# if (avg_above_median_kappa > full_kappa):
# count+=1
# # print(avg_above_median_kappa)
# # print(avg_below_median_kappa)
# # print(full_kappa)
# print(count)
# # format data for seaborn plot
# newlist = [x for x in above_median_concept_kappas if np.isnan(x) == False]
# print(sum(newlist)/len(newlist))
# print(sum(below_median_concept_kappas)/len(below_median_concept_kappas))
# print(sum(full_concept_kappas)/len(full_concept_kappas))
# kappas_long = above_median_concept_kappas + below_median_concept_kappas + full_concept_kappas
# conditions_long = np.repeat(['above', 'below', 'full'], 20)
# concepts_long = concepts + concepts + concepts
# # combine lists into a df to plot
# df_res_long = pd.DataFrame(list(zip(concepts_long, kappas_long, conditions_long)),
# columns=['concept','kappa', 'condition'])
# df_res_long.to_csv("per-expertise-segment-kappas.csv")
# # # plot
# # sns.catplot(x="concept", y="kappa", hue="condition", kind="bar", data=df_res_long, palette="ch:.25")
# # plt.xticks(rotation='vertical')
# # plt.savefig("/net/nfs2.s2-research/soniam/concept-rel/user-study/demo-evaluation/expertise-segment-kappas-barplot-global-average.png", bbox_inches="tight")
#--------------------------------------------------------
# how often below-median users chose descriptions with error, compared to above-median ones?
# This would help disentangle whether the agreement difference is due to novice users just making error, or something else.
n_runs = 5
for concept in error_analysis_concepts:
# select rows for this concept
df_concept_description_preferences = df_description_preferences[df_description_preferences['concept']==concept]
all_emails = list(set(df_concept_description_preferences['email'].to_list()))
print(df_concept_description_preferences)
# global median
median = 4
# select rows with expertise rating > median
above_median_segment = df_expertise_ratings.loc[(df_expertise_ratings['concept'] == concept) & (df_expertise_ratings['expertise'] > median)]
# select rows with expertise rating < median
below_median_segment = df_expertise_ratings.loc[(df_expertise_ratings['concept'] == concept) & (df_expertise_ratings['expertise'] < median)]
# select rows with expertise rating < median
median_segment = df_expertise_ratings.loc[(df_expertise_ratings['concept'] == concept) & (df_expertise_ratings['expertise'] == median)]
above_median_kappas = []
below_median_kappas = []
for i in range(n_runs):
# randomly + equally assign participants with median expertise to above-median segment and below-median segment
part_50 = median_segment.sample(frac = 0.5, random_state=i)
# Creating dataframe with rest of the 50% values
rest_part_50 = median_segment.drop(part_50.index)
# concatenate portion of median segment to each above-median and below-median segment
# select emailID column
above_median_segment_emails = pd.concat([above_median_segment, part_50])['email'].to_list()
below_median_segment_emails = pd.concat([below_median_segment, rest_part_50])['email'].to_list()
print(len(above_median_segment_emails), len(below_median_segment_emails))
| ACCoRD-main | system/user-study/analyze-study-responses.py |
from os import rename
import pandas as pd
import re
def renameDfColumns(version):
df = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/user-study/demo-evaluation/study-responses-%s.csv" % version)
# iterate over remaining columns and rename
for label, content in df.iteritems():
if label == "Timestamp":
df.rename(columns={label:"timestamp"},inplace=True)
if label == "Email Address":
df.rename(columns={label:"email"},inplace=True)
if label == "I consider myself proficient in the following areas of computer science:":
df.rename(columns={label:"proficiency_areas"},inplace=True)
if label == "How many years of experience do you have in NLP?":
df.rename(columns={label:"years_experience"},inplace=True)
if label == "Have you published a peer-reviewed academic paper in NLP?":
df.rename(columns={label:"has_published"},inplace=True)
if label == "What is the highest level of education you've completed (in computer science)?":
df.rename(columns={label:"highest_edu"},inplace=True)
# handle label names with concepts
if "[" in label:
result = re.findall('\[.*?\]',label)
if len(result) == 1:
concept = result[0].lower().strip("[]").split()
concept = "".join(concept)
elif len(result) == 2:
concept = result[0].lower().strip("[]").split()
concept = "".join(concept)
question = result[1].strip("[]")
# append question type
if "How well do you know this concept?" in label:
question = "expertise"
if "Imagine that you’re reading a paper" in label:
question = "set_preference"
final_label = concept + "_" + question
df.rename(columns={label:final_label},inplace=True)
# handle free response question
if "Please describe how you evaluated/determined your preference for the above sets of descriptions." in label:
df.rename(columns={label:"set_description_free_text"},inplace=True)
if "Can you explain to me why you preferred certain descriptions over others?" in label:
df.rename(columns={label:"individual_description_free_text"},inplace=True)
if "Here, we asked you to rate each description individually." in label:
df.rename(columns={label:"indivudal_vs_set_free_text"},inplace=True)
print(df.keys())
return df
def mapResponses(df, version):
if version == "versionA":
df = df.replace('Set A', 'extractions_our_ranking')
df = df.replace('Set B', 'generations_intuitive_ranking')
df = df.replace('Set C', 'generations_our_ranking')
elif version == "versionB":
df = df.replace('Set A', 'generations_our_ranking')
df = df.replace('Set B', 'extractions_our_ranking')
df = df.replace('Set C', 'generations_intuitive_ranking')
elif version == "versionC":
df = df.replace('Set A', 'generations_intuitive_ranking')
df = df.replace('Set B', 'generations_our_ranking')
df = df.replace('Set C', 'extractions_our_ranking')
# add column for version to each df
df['version'] = [version] * len(df)
# map description preference responses
df = df.replace("👎 I would NOT want to see this description of the concept", "dont_want")
df = df.replace("👍 I would want to see this description of the concept", "want")
df = df.replace("⚫ No preference/opinion", "no_preference")
return df
dfA = renameDfColumns('versionA')
dfB = renameDfColumns('versionB')
dfC = renameDfColumns('versionC')
dfA = mapResponses(dfA, 'versionA')
dfB = mapResponses(dfB, 'versionB')
dfC = mapResponses(dfC, 'versionC')
df_all = pd.concat([dfA, dfB, dfC]).reset_index()
print(df_all)
df_all.to_csv("/net/nfs2.s2-research/soniam/concept-rel/user-study/demo-evaluation/study-responses-all.csv") | ACCoRD-main | system/user-study/aggregate-study-responses.py |
import os
import json
import pandas as pd
import csv
# USE THIS SCRIPT ON SERVER 1 TO ACCESS LOCAL COPY OF S2ORC
# filter papers using metadata values
def getValidPaperIDs(batch_id):
all_valid_ids = []
count = 0
with open(f'/disk2/s2orc/20200705v1/full/metadata//metadata_{batch_id}.jsonl') as f_meta:
for line in f_meta:
metadata_dict = json.loads(line)
paper_id = metadata_dict['paper_id']
# suppose we only care about CS papers and papers with pdf parses available
if (metadata_dict['mag_field_of_study'] == None) or ('Computer Science' not in metadata_dict['mag_field_of_study']):
continue
if not metadata_dict['has_pdf_parse']:
continue
if not metadata_dict['has_pdf_parsed_body_text']:
continue
all_valid_ids.append(paper_id)
count +=1
print("number of valid paper IDs = % d" % count)
return all_valid_ids
# copy full parses of papers that meet our criteria
def copyValidPaperIDs(all_valid_ids, batch_id):
count = 0
for paper_id in all_valid_ids:
count += 1
directory = paper_id[:len(paper_id)-4]
file_name = paper_id[-4:]
if not os.path.exists("./metadata-%s/%s.json" % (batch_id, paper_id)):
os.system("cp /disk2/s2orc/20200705v1/expanded/%s/%s.json ./metadata-%s/%s.json" % (directory, file_name, batch_id, paper_id))
if count % 100 == 0:
print("copied %d papers from metadata %d" % (count, batch_id))
# get desired sections of full pdf parses
def getPaperSections(batch_id):
all_text = []
for filename in os.listdir("./metadata-%d/" % batch_id):
path = "./metadata-%d/%s" % (batch_id, filename)
if os.path.exists(path) and os.path.getsize(path) > 0:
with open(path) as f_pdf:
# dict of metadata keys
pdf_parse_dict = json.loads(f_pdf.read())
subject = pdf_parse_dict['mag_field_of_study']
paper_id = pdf_parse_dict['paper_id']
# dict of pdf parse related keys
pdf_parse_dict = pdf_parse_dict['pdf_parse']
# (2) pull out fields we need from the pdf parse, including abstract & text
paragraphs = pdf_parse_dict['abstract'] + pdf_parse_dict['body_text']
# (3) loop over paragraphs
for paragraph in paragraphs:
section_heading = paragraph['section'].lower()
sections_of_interest = []
# sections we care about:
AIR = ['abstract', 'introduction', 'previous', 'related work', 'related literature', 'background', 'motivation']
for i in AIR:
if i in section_heading:
sections_of_interest.append(paragraph)
# (4) loop over each section in this paragraph
for section in sections_of_interest:
all_text.append({
'paper_id': paper_id,
'subject': subject,
'text': paragraph['text'].encode('ascii', 'ignore').decode('ascii')
})
return all_text
def processText(all_text, output_file):
df = pd.DataFrame.from_dict(all_text)
print("total dataset = %s entries" % len(df))
print("number of unique papers = %d" % len(pd.unique(df['paper_id'])))
print("removing missing values...")
df = df.dropna() # remove missing values
print("total dataset = %s entries" % len(df))
print("number of unique papers = %d" % len(pd.unique(df['paper_id'])))
print("removing duplicates...")
df = df.drop_duplicates(subset=['text'])
print("total dataset = %s entries" % len(df))
print("number of unique papers = %d" % len(pd.unique(df['paper_id'])))
df.to_csv(output_file, index=False)
# main
for batch_id in range(30, 31):
# make a directory for the papers from this batch_id
os.system("mkdir metadata-%d" % batch_id)
all_valid_ids = getValidPaperIDs(batch_id)
copyValidPaperIDs(all_valid_ids, batch_id)
all_text = getPaperSections(batch_id)
processText(all_text, "./text-batch-id-%d.csv" % batch_id)
| ACCoRD-main | system/resources/s2orc/get-cs-papers.py |
import os
import json
# feel free to wrap this into a larger loop for batches 0~99
BATCH_ID = 0
# create a lookup for the pdf parse based on paper ID
paper_id_to_pdf_parse = {}
with open('./pdfparse.jsonl') as f_pdf:
for line in f_pdf:
pdf_parse_dict = json.loads(line)
paper_id_to_pdf_parse[pdf_parse_dict['paper_id']] = pdf_parse_dict
# filter papers using metadata values
all_text = []
count = 0
with open('./metadata.jsonl') as f_meta:
for line in f_meta:
metadata_dict = json.loads(line)
paper_id = metadata_dict['paper_id']
# print("Currently viewing S2ORC paper: "+ paper_id)
# suppose we only care about CS papers
if (metadata_dict['mag_field_of_study'] == None) or ('Computer Science' not in metadata_dict['mag_field_of_study']):
continue
# get citation context (paragraphs)!
if paper_id in paper_id_to_pdf_parse:
print("Currently viewing S2ORC paper: "+ paper_id)
print(metadata_dict['mag_field_of_study'])
print("pdf parse is available")
print("")
# (1) get the full pdf parse from the previously computed lookup dict
pdf_parse = paper_id_to_pdf_parse[paper_id]
# (2) pull out fields we need from the pdf parse, including bibliography & text
paragraphs = pdf_parse['abstract'] + pdf_parse['body_text']
# (3) loop over paragraphs
for paragraph in paragraphs:
# sections we care about:
AIR = ['abstract', 'Abstract', 'INTRODUCTION', 'Introduction', 'RELATED WORK', 'Related Work']
sections_of_interest = []
if paragraph['section'] in AIR:
sections_of_interest.append(paragraph)
# (4) loop over each section in this paragraph
for section in sections_of_interest:
all_text.append({
'paper_id': paper_id,
'subject': metadata_dict['mag_field_of_study'],
'text': paragraph['text'].encode('ascii', 'ignore').decode('ascii')
})
import csv
keys = all_text[0].keys()
with open('cleaned_text.csv', 'w') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(all_text) | ACCoRD-main | system/resources/s2orc/create-dataset.py |
import math
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
import transformers
import matplotlib.pyplot as plt
from transformers import *
import matplotlib.ticker as mticker
import spacy
import ast
import re
import random
def processPhrase(phrase):
if len(phrase.text.split()) > 1:
p = phrase.text.split()
if p[0] in all_stopwords: # if first word is a stop word
phrase_no_stopwords = " ".join(p[1:])
elif p[-1] in all_stopwords: # if last word is a stop word
phrase_no_stopwords = " ".join(p[:-1])
else: # if neither is a stop word
phrase_no_stopwords = phrase.text
# if there is only one word in this phrase, just append the text version of it
else: phrase_no_stopwords = phrase.text
return phrase_no_stopwords
def main(sentence, separator, forecite_concept):
doc = nlp(sentence)
fc_phrase = ""
for phrase in doc.noun_chunks:
tokens = [t for t in phrase]
phrase_key = " ".join([t.lemma_ for t in tokens if not (t.is_stop)])
if phrase_key == forecite_concept:
fc_phrase = processPhrase(phrase)
break
# get starting index of this phrase in the sentence
# get an iterable object containing the start and end indices of each occurrence of pattern in string
matches = re.finditer(str(fc_phrase), sentence)
# get a list containing only the start indices.
matches_positions = [match.start() for match in matches]
# if there are matches
if len(matches_positions) > 0:
start = matches_positions[0]
txt = sentence[:start] + separator + " " + sentence[start:start+len(fc_phrase)] + " " + separator + sentence[start+len(fc_phrase):]
else:
txt = "concept not found"
return txt
#----------------------------------------------------------
# set up sentencizer
nlp = spacy.load("en_core_web_md")
tokenizer = nlp.tokenizer
nlp.add_pipe("sentencizer")
all_stopwords = nlp.Defaults.stop_words
separator = "§"
# # ex 1
# sentence = "neural networks with long short-term memory (lstm), which have emerged as effective and scalable model for several learning problems related to sequential data (e.g., handwriting recognition, speech recognition, human activity recognition and traffic prediction), and it does not suffer from the effect of the vanishing or exploding gradient problem as simple recurrent networks do [1] ."
# forecite_concept = "long short - term memory"
# print(main(sentence, separator, forecite_concept))
# # ex 2
# sentence = "person reidentification is a task of recognizing person based on appearance matching under different camera views."
# forecite_concept = "different camera view"
# print(main(sentence, separator, forecite_concept))
# # ex 3
# sentence = "according to google, the tpu can compute neural networks up to 30x faster and up to 80x more power efficient than cpu's or gpu's performing similar applications [6] . the tpu excels because its hardware processing flow is specifically adapted to the inference problem it solves."
# forecite_concept = "tpu"
# print(main(sentence, separator, forecite_concept))
df = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/annotations-round2/union-test-from-heddex-results.csv")
demarcated_sentences = []
for index, row in df.iterrows():
demarcated_sentence = main(row['sentence'], separator, row['forecite_concept'])
print(row['sentence'])
print(demarcated_sentence)
print()
demarcated_sentences.append(demarcated_sentence)
df['sentence'] = demarcated_sentences
print(df)
df.to_csv("/net/nfs2.s2-research/soniam/concept-rel/annotations-round2/union-test-from-heddex-results-%s.csv" % separator)
| ACCoRD-main | system/resources/forecite/locate-forecite-concepts.py |
import pandas as pd
import pickle
import matplotlib.pyplot as plt
import collections
# import seaborn as sns
#----------------------------------------------------------
# get count of concepts in output
#----------------------------------------------------------
# count number of each concept in my extracted sentences
def getConceptOccurrences(df):
all_concepts = []
for index, row in df.iterrows():
concepts = list(eval(row['forecite_concepts']))
all_concepts.extend(concepts)
occurrences = collections.Counter(all_concepts)
return occurrences
# plot counts of concepts present in my extracted sentences
def makePlottableDF(occurrences):
# format data
scores = []
counts = []
concepts = []
for k in occurrences.keys():
concepts.append(k)
scores.append(concept_dict[k])
counts.append(occurrences[k])
df_output = pd.DataFrame(list(zip(concepts, scores, counts)),
columns =['concept', 'score', 'count'])
return df_output
def makePlot(occurrences, filepath):
# make plottable df
df = makePlottableDF(occurrences)
# plot scores vs. counts for each concept
plot = sns.scatterplot(data=df, x="score", y="count")
for index, row in df.iterrows():
plot.text(x=row['score']+0.1, y=row['count']+0.1, s=row['concept'], fontsize=6)
plot.figure.savefig(filepath)
#-----------------------------
# unpickle concept dictionary
with open('forecite_concept_dict.pickle', 'rb') as handle:
concept_dict = pickle.load(handle)
#-----------------------------
# data for noun phrase (np) level concepts
df_np = pd.read_csv("./score-threshold-1-0/single-sentence-noun-phrases.csv")
# data for word-level substrings (ss) of noun phrases
df_ss = pd.read_csv("./score-threshold-1-0/single-sentence-all-substrings.csv")
ss_occurrences = getConceptOccurrences(df_ss)
np_occurrences = getConceptOccurrences(df_np)
# print("plotting noun phrase level counts vs. scores...")
# makePlot(np_occurrences, "./score-threshold-1-0/noun-phrase-level-concepts-scatter.png")
# print("done")
# print("plotting substring level counts vs. scores...")
# makePlot(ss_occurrences, "./score-threshold-1-0/substring-level-concepts-scatter.png")
# print("done")
#-----------------------------
# get sentence lengths
df = pd.read_csv("./score-threshold-1-0/single-sentence-min1-noun-phrase.csv")
text = df['sentence']
# plot sentence lengths
seq_len = [len(i.split()) for i in text]
pd.Series(seq_len).hist(bins = 30)
plt.savefig('single-sentence-min1-np-lengths.png')
count = 0
# remove sentences with length < 10
for index, row in df.iterrows():
if len(row['sentence'].split()) < 10:
print(row['sentence'])
print(row['forecite_concepts'])
count += 1
df = df.drop(index)
print(df)
df.to_csv("./score-threshold-1-0/single-sentence-min1-noun-phrase.csv")
| ACCoRD-main | system/resources/forecite/get-forecite-stats.py |
import os
import json
import pandas as pd
import pickle
concept_dict = {}
# for files in the data directory
count = 1
for filename in os.listdir("/net/nfs2.s2-research/soniam/concept-rel/resources/forecite/noun-phrase-scores-top-papers/"):
print("getting concepts for file %d" % count)
if filename.endswith(".json"):
# open the json file
with open("/net/nfs2.s2-research/soniam/concept-rel/resources/forecite/noun-phrase-scores-top-papers/%s" % filename) as f:
# iterate over lines in this file
for line in f:
data = json.loads(line)
concept = data['phrase']
score = data['score']
n = data['n']
paper_id = data['corpus_paper_id']
concept_dict[concept] = (score, n, paper_id)
count += 1
with open('./forecite_concept_score_count_dict.pickle', 'wb') as handle:
pickle.dump(concept_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
print("pickled concept dictionary")
# with open('concept-scores-counts.json', 'w') as fp:
# json.dump(concept_dict, fp) | ACCoRD-main | system/resources/forecite/get-forecite-concepts.py |
import pickle
import matplotlib.pyplot as plt
import pandas as pd
import spacy
import re
from collections import Counter
import random
import sys
#----------------------------------------------------------
# process ForeCite concepts
#----------------------------------------------------------
# unpickle concept dictionary
with open('forecite_concept_dict.pickle', 'rb') as handle:
concept_dict = pickle.load(handle)
print("unpickled forecite concept dictionary")
# print(concept_dict["k - near neighbor"]) # found => lemmatization is correct
# print(concept_dict["k-nearest neighbor"]) # not found
# # plot histogram of scores
# n, bins, patches = plt.hist(concept_dict.values(), 10, density=True, facecolor='g', alpha=0.75)
# print(len(set(concept_dict.values())))
# print(max(concept_dict.values()))
# print(min(concept_dict.values()))
# plt.xlabel('ForeCite score')
# plt.ylabel('Count')
# plt.savefig('scores-histogram.png')
# get concepts with score above a certain threshold
score_threshold = 1.0
concepts_above_threshold = [k for k,v in concept_dict.items() if float(v) >= score_threshold]
concept_set = set(concepts_above_threshold)
print("%d concepts with score above %2f" % (len(concept_set), score_threshold))
# max_length = 0
# max_word = ""
# lens = []
# for c in concepts_above_threshold:
# c = c.split()
# lens.append(len(c))
# if len(c) > max_length:
# max_length = len(c)
# max_word = c
# print("max length of concepts = %d" % (max_length))
# print(max_word)
# print()
# plt.hist(lens)
# plt.savefig("concept-length-hist.png")
#----------------------------------------------------------
# helper functions
#----------------------------------------------------------
def getPhraseSubstrings(doc):
# get spacy noun chunks for each sentence
phrase_keys = []
for phrase in doc.noun_chunks:
# iterate over tokens and get all word substrings
for i in range(len(phrase)):
for j in range(i, len(phrase)+1):
tokens = [t for t in phrase[i:j]]
lemmatized = [t.lemma_ for t in tokens if not (t.is_stop)] # lemmatize
phrase_key = " ".join(lemmatized)
phrase_keys.append(phrase_key)
return phrase_keys
def getPhrases(doc):
phrase_keys = []
for phrase in doc.noun_chunks:
tokens = [t for t in phrase]
phrase_key = " ".join([t.lemma_ for t in tokens if not (t.is_stop)])
phrase_keys.append(phrase_key)
return phrase_keys
#----------------------------------------------------------
# get sentences with two or more concepts
df_text_0 = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/s2orc/text-batch-id-0.csv")
df_text_1 = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/s2orc/text-batch-id-1.csv")
df_text_2 = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/s2orc/text-batch-id-2.csv")
df_text_3 = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/s2orc/text-batch-id-3.csv")
df_text_4 = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/s2orc/text-batch-id-4.csv")
df = pd.concat([df_text_1, df_text_2, df_text_3, df_text_4])
print("total rows across all batches = %d" % len(df))
# set up sentencizer
nlp = spacy.load("en_core_web_md")
tokenizer = nlp.tokenizer
nlp.add_pipe("sentencizer")
# test_id = 27044448
# df_paper = df.loc[df['paper_id'] == test_id]
#----------------------------------------------------------
# 2 sentence: only one sentence has a forecite concept
#----------------------------------------------------------
# # get paper ids from final 1- and 2-sentence annotations and don't include those in this dataset
# df1 = pd.read_csv("../../annotations-round2/single-sentence-final-§.csv")
# df2 = pd.read_csv("../../annotations-round2/2-sentence-final-§.csv")
# df1_paper_ids = set(df1['paper_id'])
# df2_paper_ids = set(df2['paper_id'])
# all_annotated_paper_ids = df1_paper_ids | df2_paper_ids
def twoSentenceOneConcept(batch_id):
# iterate over rows in df (paragraph level)
num_sents = 0
paper_ids = []
sentences = []
sentences_cased = []
concepts = []
df = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/s2orc/text-batch-id-%d.csv" % batch_id)
print("%d rows in batch %d" % (len(df), batch_id))
for index, row in df.iterrows():
# progress update
if index % 100 == 0: print('...processing row %d' % (index))
# # if this paper_id doesn't already have an annotation
# if row['paper_id'] not in all_annotated_paper_ids:
# check that row is not na
if pd.notna(row['text']):
doc = nlp(row['text'])
sents = list(doc.sents)
# iterate over sentences
for s in range(1, len(sents)-1):
num_sents += 1
count = 0
previous_sent = sents[s-1].text.lower()
current_sent = sents[s].text.lower()
next_sent = sents[s+1].text.lower()
previous_sent_cased = sents[s-1].text
current_sent_cased = sents[s].text
next_sent_cased = sents[s+1].text
# if the context sentences are too short to be good, just continue
if (len(previous_sent) < 5) or (len(next_sent) < 5):
continue
else:
# # get spacy noun chunks
# curr_sent_concepts = set(getPhrases(nlp(current_sent)))
# # get intersection of sentence concepts and all concepts
# curr_intersection = concept_set.intersection(curr_sent_concepts)
# generate random number to pick whether prev or next sentence gets appended as context
num = random.randint(1, 2)
if num == 1:
concat = previous_sent + " " + current_sent
concat_cased = previous_sent_cased + " " + current_sent_cased
if num == 2:
concat = current_sent + " " + next_sent
concat_cased = current_sent_cased + " " + next_sent_cased
# get spacy noun chunks
concat_sent_concepts = set(getPhrases(nlp(concat)))
# get intersection of sentence concepts and all concepts
concat_intersection = concept_set.intersection(concat_sent_concepts)
if len(concat_intersection) > 0:
paper_ids.append(row['paper_id'])
sentences.append(concat)
sentences_cased.append(concat_cased)
concepts.append(concat_intersection)
# if na skip over this row
else: continue
print("%d sentences in %d rows" % (num_sents, index))
# make final output df
df_output = pd.DataFrame(list(zip(paper_ids, sentences, sentences_cased, concepts)),
columns =['paper_id', 'sentence', 'sentence_original_case', 'forecite_concepts'])
print(df_output)
df_output.to_csv("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/inputs/concepts-from-both-sentences/all-2sentence-1concept-rows-batch-%d.csv" % batch_id)
id = int(sys.argv[1])
twoSentenceOneConcept(id)
# # make final output df for cased version-
# df_output = pd.DataFrame(list(zip(paper_ids, sentences_cased, concepts)),
# columns =['paper_id', 'sentence', 'forecite_concepts'])
# print(df_output)
# df_output.to_csv("./score-threshold-1-0/2-sentence-only-one-sent-has-fc-concept-originalcase.csv")
# #----------------------------------------------------------
# # 3 sentence: noun phrase level, middle sentence has forecite concept
# #----------------------------------------------------------
# # get paper ids from final 1- and 2-sentence annotations and don't include those in this dataset
# df1 = pd.read_csv("../../annotations-round2/single-sentence-final-§.csv")
# df2 = pd.read_csv("../../annotations-round2/2-sentence-final-§.csv")
# df1_paper_ids = set(df1['paper_id'])
# df2_paper_ids = set(df2['paper_id'])
# all_annotated_paper_ids = df1_paper_ids | df2_paper_ids
# for index, row in df.iterrows():
# # progress update
# if index % 100 == 0: print('...processing row %d' % (index))
# # if this paper_id doesn't already have an annotation
# if row['paper_id'] not in all_annotated_paper_ids:
# # check that row is not na
# if pd.notna(row['text']):
# doc = nlp(row['text'])
# sents = list(doc.sents)
# # iterate over sentences
# for s in range(1, len(sents)-1):
# num_sents += 1
# count = 0
# previous_sent = sents[s-1].text.lower()
# current_sent = sents[s].text.lower()
# next_sent = sents[s+1].text.lower()
# # if the context sentences are too short to be good, just continue
# if (len(previous_sent) < 5) or (len(next_sent) < 5):
# continue
# else:
# # get spacy noun chunks
# curr_sent_concepts = set(getPhrases(nlp(current_sent)))
# # get intersection of sentence concepts and all concepts
# curr_intersection = concept_set.intersection(curr_sent_concepts)
# concat = previous_sent + " " + current_sent + " " + next_sent
# if len(curr_intersection) > 0:
# paper_ids.append(row['paper_id'])
# sentences.append(concat)
# concepts.append(curr_intersection)
# # if na skip over this row
# else: continue
# print("%d sentences in %d rows" % (num_sents, index))
# # make final output df
# df_output = pd.DataFrame(list(zip(paper_ids, sentences, concepts)),
# columns =['paper_id', 'sentence', 'forecite_concepts'])
# print(df_output)
# df_output.to_csv("./score-threshold-1-0/3-sentence-min1-noun-phrase.csv")
#----------------------------------------------------------
# 2 continguous sentences: noun phrase level and substring level
#----------------------------------------------------------
# sentence1_concepts = []
# sentence2_concepts = []
# for index, row in df.iterrows():
# # progress update
# if index % 100 == 0:
# print('...processing row %d' % (index))
# # check that row is not na
# if pd.notna(row['text']):
# doc = nlp(row['text'])
# sents = list(doc.sents)
# # iterate over sentences
# for s in range(len(sents)-1):
# num_sents += 1
# count = 0
# current_sent = sents[s].text.lower()
# next_sent = sents[s+1].text.lower()
# current_sent_cased = sents[s].text
# next_sent_cased = sents[s+1].text
# # Option 1: get spacy noun chunks
# curr_sent_concepts = set(getPhrases(nlp(current_sent)))
# next_sent_concepts = set(getPhrases(nlp(next_sent)))
# # Option 2: get word-level substrings for spacy noun chunks
# # curr_sent_concepts = set(getPhraseSubstrings(nlp(current_sent)))
# # next_sent_concepts = set(getPhraseSubstrings(nlp(next_sent)))
# # get intersection of sentence concepts and all concepts
# curr_intersection = concept_set.intersection(curr_sent_concepts)
# next_intersection = concept_set.intersection(next_sent_concepts)
# # if row['paper_id'] == 12205351:
# # print(curr_intersection)
# # print(next_intersection)
# concat = current_sent_cased + " " + next_sent_cased
# if len(curr_intersection) > 0 and len(next_intersection) > 0:
# paper_ids.append(row['paper_id'])
# sentences.append(concat)
# sentence1_concepts.append(curr_intersection)
# sentence2_concepts.append(next_intersection)
# # if na skip over this row
# else: continue
# print("%d sentences in %d rows" % (num_sents, index))
# # make final output df
# df_output = pd.DataFrame(list(zip(paper_ids, sentences, sentence1_concepts, sentence2_concepts)),
# columns =['paper_id', 'sentences', 'sentence1_concepts', 'sentence2_concepts'])
# print(df_output)
# df_output.to_csv("./score-threshold-1-0/2-contiguous-sentences-noun-phrases-only-originalcase.csv")
#----------------------------------------------------------
# single sentence: noun phrase level and substring level
#----------------------------------------------------------
# count = 0
# num_1concept = 0
# for index, row in df.iterrows():
# # progress update
# if index % 100 == 0:
# print('...processing row %d' % (index))
# # check that row is not na
# if pd.notna(row['text']):
# doc = nlp(row['text'])
# # iterate over sentences
# for sent in doc.sents:
# num_sents += 1
# # count = 0
# text = sent.text.lower()
# # get noun phrase level concepts
# phrase_keys = getPhrases(nlp(text))
# # # get all word-level substrings concepts
# # phrase_keys = getPhraseSubstrings(nlp(text))
# # make set out of phrases for this sentence
# sentence_concept_set = set(phrase_keys)
# # get intersection of sentence concepts and all concepts
# intersection = concept_set.intersection(sentence_concept_set)
# # get all single sentences with 1+ noun phrase
# if len(intersection) > 0:
# count+=1
# if len(intersection) == 1:
# num_1concept += 1
# paper_ids.append(row['paper_id'])
# concepts.append(intersection)
# sentences.append(sent.text)
# # if na skip over this row
# else: continue
# print(count)
# print(num_1concept)
# print("%d sentences in %d rows" % (num_sents, index))
# # make final output df
# df_output = pd.DataFrame(list(zip(paper_ids, sentences, concepts)),
# columns =['paper_id', 'sentence', 'forecite_concepts'])
# print(df_output)
# df_output.to_csv("./score-threshold-1-0/single-sentence-min1-noun-phrase-originalcase.csv")
| ACCoRD-main | system/resources/forecite/get-sentences-topn-concepts.py |
import pickle
import pandas as pd
from operator import itemgetter
import numpy as np
import openai
from rouge_score import rouge_scorer
import spacy
import re
#--------------------------------------------------------
openai.api_key = "sk-NzQrkRfqE5lnJPubH7faej1ZcDuz0s40qCkTTeFt"
pd.set_option('display.max_colwidth', None)
#--------------------------------------------------------
# unpickle concept dictionary
with open('/net/nfs2.s2-research/soniam/concept-rel/resources/forecite/forecite_concept_dict.pickle', 'rb') as handle:
concept_dict = pickle.load(handle)
print("...unpickled forecite concept dictionary")
print(len(concept_dict))
# Initialize N
N = 100000
# N largest values in dictionary
# Using sorted() + itemgetter() + items()
res = dict(sorted(concept_dict.items(), key = itemgetter(1), reverse = True)[:N])
# printing result
# print("The top N value pairs are " + str(res))
concept_list = list(res.keys())
#--------------------------------------------------------
# CONCEPT LIST
#--------------------------------------------------------
selected_nlp_concepts = ['adversarial training', 'beam search', 'bert', 'elmo', 'gpt', 'glove', 'word2vec', 'resnet', 'domain shift', 'ulmfit', 'newsqa', 'squad', 'imagenet', 'lstm', 'roberta', 'variational autoencoder', 'dropout', 'fasttext', 'hierarchical softmax', 'distant supervision']
#--------------------------------------------------------
df_nlp_concepts = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/gpt-generations/top-n-forecite-concepts-gpt-generations/nlp_concepts.csv")
nlp_concept_scores = []
for index, row in df_nlp_concepts.iterrows():
# get forecite score for each nlp concept
nlp_concept_scores.append(concept_dict[row['concept']])
df_nlp_concepts['score'] = nlp_concept_scores
df_nlp_concepts = df_nlp_concepts[~df_nlp_concepts.concept.isin(['bert model', 'pre - train bert model', 'pre - train bert', 'moses', 'cho et al', 'dagan', 'yarowsky', 'hochreiter', 'turney', 'ney', 'och', 'grosz', 'steedman', 'well translation'])]
# # top 150 nlp concepts by score
# top150_nlp_concepts = df_nlp_concepts.sort_values(by='score', ascending=False)[:150]['concept'].tolist()
# top 150 nlp concepts by num_papers
top150_nlp_concepts = df_nlp_concepts.sort_values(by='num_papers', ascending=False)[:150]['concept'].tolist()
#--------------------------------------------------------
#--------------------------------------------------------
relations = ['compare', 'isa']
version = "v1-4class"
concept_set = "nlp-concepts"
df_all = pd.DataFrame()
for concept in top150_nlp_concepts[:20]:
our_extractions_top6 = []
our_generations_top6 = []
df_concept = pd.DataFrame()
for relation in relations:
print(concept, relation)
# load data
df_concept_relation = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/gpt-generations/top-n-forecite-concepts-gpt-generations/%s/%s/gpt-formatted-statements-all-s2orc-%s-%s.csv" % (version, concept_set, concept, relation))
df_concept_relation = df_concept_relation.loc[:, ~df_concept_relation.columns.str.contains('^Unnamed')]
df_concept_relation = df_concept_relation.dropna()
if len(df_concept_relation) != 0:
# heuristic #1: remove rows with "our" in the text because we don't want references to things that can't be resolved
df_concept_relation = df_concept_relation[~df_concept_relation['gpt_generation'].str.contains("our")]
# heuristic #2: remove rows with "et al" in the concept b because we don't want authors names as the concept b
df_concept_relation = df_concept_relation[~df_concept_relation['concept_b'].str.contains("et al")]
# heuristic #3: remove rows where the conceptA occurs more than once in the generation because we don't want to explain the concept in terms of itself
for index, row in df_concept_relation.iterrows():
if row['gpt_generation'].count(concept) > 1:
df_concept_relation = df_concept_relation.drop(index)
#--------------------------------------------------------
# append this concept-relation specific df to a df for the concept so we can select top 6 by multilabel pred score later
df_concept = df_concept.append(df_concept_relation)
basic_ranking_top6 = df_concept_relation.sort_values(by=['max_multilabel_pred_score'])['gpt_generation'][:6]
#--------------------------------------------------------
# select rows that have conceptB == forecite concept
df_concept_relation = df_concept_relation[df_concept_relation['is_conceptb_forecite']==1]
# count prevalence of concept Bs
df_sorted_by_conceptB_counts = df_concept_relation.groupby(['concept_b'])['concept_b'].count().to_frame(name='count').sort_values(['count'], ascending=False).apply(lambda x: x)
# get frequency of concept Bs
df_sorted_by_conceptB_frequency = df_concept_relation['concept_b'].value_counts(normalize=True).to_frame(name='concept_b_frequency').sort_values(['concept_b_frequency'], ascending=False).apply(lambda x: x)
df_sorted_by_conceptB_frequency = df_sorted_by_conceptB_frequency.reset_index().rename(columns={'index':'concept_b'})
# print(df_sorted_by_conceptB_frequency)
# group by conceptB --> select the rows with the max multilabel pred score
df_concept_relation_groupby_conceptB_maxpredscore = df_concept_relation.loc[df_concept_relation.groupby("concept_b")['%s_pred_score' % relation].idxmax()]
# print(df_concept_relation_groupby_conceptB_maxpredscore)
# merge with concept_b frequencies df on concept_b
result = pd.merge(df_sorted_by_conceptB_frequency, df_concept_relation_groupby_conceptB_maxpredscore, on="concept_b")
# #save all ranked-filtered descriptions
# result.to_csv("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/gpt-generations/top-n-forecite-concepts-gpt-generations/%s/ranked-filtered/%s/%s-%s.csv" % (version, concept_set, concept, relation))
# result.to_json("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/gpt-generations/top-n-forecite-concepts-gpt-generations/%s/ranked-filtered/%s/%s-%s.json" % (version, concept_set, concept, relation), orient="records")
#--------------------------------------------------------
# save top 3
if len(result) < 3:
top = len(result)
else:
top = 3
result = result[:top]
print(result)
# result.to_csv("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/gpt-generations/top-n-forecite-concepts-gpt-generations/%s/ranked-filtered/%s/%s-%s-top.csv" % (version, concept_set, concept, relation))
# result.to_json("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/gpt-generations/top-n-forecite-concepts-gpt-generations/%s/ranked-filtered/%s/%s-%s-top.json" % (version, concept_set, concept, relation), orient="records")
df_all = df_all.append(result)
#--------------------------------------------------------
# get top 3 descriptions for this concept-relation pair
our_generations_top3 = result['gpt_generation'][:top]
our_extractions_top3 = result['original_sentence'][:top]
# add it to the set of top descriptions for this concept
our_generations_top6.extend(our_generations_top3)
our_extractions_top6.extend(our_extractions_top3)
#--------------------------------------------------------
for i in range(len(our_generations_top6)):
print("extraction: %s" % our_extractions_top6[i])
print("generation: %s" % our_generations_top6[i])
print("")
# # FOR USER STUDY
# # sort the df that includes the rows for all the relations by the max_multilabel_pred_score and select the top 6 generations
# df_concept = df_concept.sort_values(by=['max_multilabel_pred_score'])['gpt_generation'][:6]
# df_survey = pd.DataFrame(list(zip(our_extractions_top6, basic_ranking_top6, our_generations_top6)), columns =['Set A', 'Set B', 'Set C'])
# df_survey.to_csv("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/gpt-generations/top-n-forecite-concepts-gpt-generations/v1-4class/top5-sets/%s.csv" % concept)
# df_all.reset_index(drop=True, inplace=True)
# print(df_all)
# fn = "top-150-nlp-concepts"
# df_all.to_csv("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/gpt-generations/top-n-forecite-concepts-gpt-generations/%s/ranked-filtered/%s/%s.csv" % (version, concept_set, fn))
# df_all.to_json("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/gpt-generations/top-n-forecite-concepts-gpt-generations/%s/ranked-filtered/%s/%s.json" % (version, concept_set, fn), orient="records")
| ACCoRD-main | system/code/selection/rank-concept-gpt-formatted-statements.py |
import pandas as pd
import random
#------------------------------------------------------------------------------------------------
# filter output from top 150 NLP concepts to only those concepts with 3+ descriptions
# format demo data to only include [concept B] [elaboration]
# INPUT = ../ranked-filtered/nlp-concepts/top-150-nlp-concepts.csv
# OUTPUT = ../demo-data/top-nlp-concepts-with-gt-2-descriptions.csv and "".json
# or = ../error-analysis-data/top-nlp-concepts-with-gt-2-descriptions.csv and "".json
#------------------------------------------------------------------------------------------------
data_version = 'demo-data'
df = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/gpt-generations/top-n-forecite-concepts-gpt-generations/v1-4class/ranked-filtered/nlp-concepts/top-150-nlp-concepts.csv")
print(df.keys())
# # select rows for error analysis spreadsheet
# df = df[['paper_id','forecite_concept', 'relation', 'concept_b', 'original_sentence', 'gpt_generation']]
# make paper_id into link
df['paper_id'] = 'api.semanticscholar.org/CorpusId:' + df['paper_id'].astype(str)
print(df)
# select rows where forecite_concept has 2+ rows
v = df['forecite_concept'].value_counts()
df = df[df['forecite_concept'].isin(v.index[v.gt(2)])]
print("number of forecite concepts with at least 3 entries: %d" % len(df['forecite_concept'].value_counts().to_list()))
print(df['forecite_concept'].value_counts())
# remove forecite concept from gpt generation for demo
df['gpt_generation_demo'] = df.apply(lambda L: L['gpt_generation'].replace(L['forecite_concept'] + " ", ''), axis=1)
df['gpt_generation_demo'] = df.apply(lambda L: L['gpt_generation_demo'].replace('is a ', ''), axis=1)
df['gpt_generation_demo'] = df.apply(lambda L: L['gpt_generation_demo'].replace('is like ', ''), axis=1)
print(df['gpt_generation_demo'])
ids = df['forecite_concept'].unique()
random.Random(42).shuffle(ids)
df = df.set_index('forecite_concept').loc[ids].reset_index()
print(df)
# save filtered data for demo and error analysis
df.to_csv("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/gpt-generations/top-n-forecite-concepts-gpt-generations/v1-4class/%s/top-nlp-concepts-with-gt-2-descriptions.csv" % data_version)
df.to_json("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/gpt-generations/top-n-forecite-concepts-gpt-generations/v1-4class/%s/top-nlp-concepts-with-gt-2-descriptions.json" % data_version, orient="records") | ACCoRD-main | system/code/selection/filter-format-demo-data.py |
import pandas as pd
import ast
import numpy as np
experiment = "best-params-all-s2orc"
df_preds = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/scibert-multilabel-classification/run-best-model/predictions/scibert-weightedBCE-cls/%s/seed=1-epochs=10-lr=0.000020-bs=32-%s.csv" % (experiment, experiment))
df_train_val = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/annotations-round2/union-1sentence-both2sentence/union-multilabel-data-§.csv")
# we don't want to consider the paper_ids that were in the training and val sets
paper_ids_to_exclude = df_train_val['paper_id']
df_test = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/inputs/concepts-from-both-sentences/all-2sentence-1concept-rows-demarcated.csv")
# df_test_undemarcated = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/inputs/concepts-from-both-sentences/all-2sentence-1concept-rows.csv")
# get paper ids and forecite_concepts from test data used in the best scibert-multilabel run
paper_ids = df_test['paper_id']
forecite_concepts = df_test['forecite_concept']
lowercase_sentences = df_test['original_sentence']
# add these paper_ids and forecite_concepts as a column to the predictions df
df_preds['paper_id'] = paper_ids
df_preds['forecite_concept'] = forecite_concepts
df_preds['sentence_lowercase'] = lowercase_sentences
# print(len(df_preds))
# # remove paper_ids in train/val set from predictions
# df_preds = df_preds[~df_preds['paper_id'].isin(paper_ids_to_exclude)]
# print(len(df_preds))
# convert string predictions to list
demarcated_sentences = df_preds['sentence'].tolist()
paper_ids = df_preds['paper_id'].tolist()
all_preds = []
for index, row in df_preds.iterrows():
temp = row['scibert_pred'].strip("[]").split()
preds = []
for t in temp:
preds.append(float(t))
all_preds.append(preds)
all_preds = np.array(all_preds)
def getSortedPredictions(all_preds, column_to_sort, category):
# sort on column_to_sort of all predictions and then reverse the array to get max -> min
argsort_indices = np.argsort(all_preds[:,column_to_sort])[::-1]
sorted_demarcated_sentences = np.array(demarcated_sentences)[argsort_indices]
sorted_lowercase_sentences = np.array(lowercase_sentences)[argsort_indices]
sorted_paper_ids = np.array(paper_ids)[argsort_indices]
sorted_forecite_concepts = np.array(forecite_concepts)[argsort_indices]
sorted_preds = all_preds[argsort_indices]
# top20_sentences = sorted_sentences[:20]
# top20_preds = sorted_preds[:20]
df_output = pd.DataFrame(list(zip(sorted_paper_ids, sorted_lowercase_sentences, sorted_demarcated_sentences, sorted_forecite_concepts, sorted_preds)),
columns =['%s_paper_ids' % category, '%s_lowercase_sentences' % category, '%s_demarcated_sentences' % category, '%s_forecite_concepts' % category, '%s_preds' % category])
print(df_output)
df_output.to_csv("/net/nfs2.s2-research/soniam/concept-rel/scibert-multilabel-classification/run-best-model/predictions/scibert-weightedBCE-cls/%s/sorted-predictions-seed=1-epochs=10-lr=0.000020-bs=32-%s.csv" % (experiment, category))
return sorted_paper_ids, sorted_lowercase_sentences, sorted_demarcated_sentences, sorted_forecite_concepts, sorted_preds
# ['compare' 'is-a' 'part-of' 'used-for']
# sorted_compare_paper_ids, sorted_compare_lowercase_sentences, sorted_compare_demarcated_sentences, sorted_compare_forecite_concepts, sorted_compare_preds = getSortedPredictions(all_preds, 0, 'compare') # compare = column 0
# sorted_partof_paper_ids, sorted_partof_lowercase_sentences, sorted_partof_demarcated_sentences, sorted_partof_forecite_concepts, sorted_partof_preds = getSortedPredictions(all_preds, 2, 'partof') # part-of = column 2
# sorted_usedfor_paper_ids, sorted_usedfor_lowercase_sentences, sorted_usedfor_demarcated_sentences, sorted_usedfor_forecite_concepts, sorted_usedfor_preds = getSortedPredictions(all_preds, 3, 'usedfor') # used-for = column 3
# getSortedPredictions(all_preds, 0, 'compare') # compare = column 0
getSortedPredictions(all_preds, 2, 'partof') # part-of = column 2
getSortedPredictions(all_preds, 3, 'usedfor') # used-for = column 3
# df_output = pd.DataFrame(list(zip(sorted_compare_paper_ids, sorted_compare_sentences, sorted_compare_preds, sorted_partof_paper_ids, sorted_partof_sentences, sorted_partof_preds, sorted_usedfor_paper_ids, sorted_usedfor_sentences, sorted_usedfor_preds)),
# columns =['compare_paper_id', 'compare_sentence', 'compare_pred', 'partof_paper_id', 'partof_sentence', 'partof_pred', 'usedfor_paper_id', 'usedfor_sentence', 'usedfor_pred'])
# print(df_output)
# df_output.to_csv("/net/nfs2.s2-research/soniam/concept-rel/scibert-multilabel-classification/run-best-model/predictions/scibert-weightedBCE-cls/%s/sorted-predictions-seed=1-epochs=1-lr=0.000020-bs=32-%s.csv" % (experiment, experiment))
| ACCoRD-main | system/code/extraction/rank-multilabel-predictions.py |
# Hyperparameter search for scibert model
# Code borrowed from:
# https://colab.research.google.com/drive/14Ea4lIzsn5EFvPpYKtWStXEByT9qmbkj?usp=sharing#scrollTo=qAYbKDu4UR6M
# https://github.com/pnageshkar/NLP/blob/master/Medium/Multi_label_Classification_BERT_Lightning.ipynb
# https://pytorch-lightning.readthedocs.io/en/latest/starter/introduction_guide.html
# https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html
#----------------------------------------------------------
import pandas as pd
import numpy as np
import random
import torchmetrics
import os.path
from os import path
from tqdm.auto import tqdm
import ipdb
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
import transformers
from transformers import *
import pytorch_lightning as pl
from pytorch_lightning.metrics.functional import accuracy, f1, auroc
from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping
from pytorch_lightning.loggers import TensorBoardLogger
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, multilabel_confusion_matrix
from sklearn.model_selection import KFold, GroupShuffleSplit
from sklearn.metrics import f1_score
import matplotlib.pyplot as plt
from matplotlib import rc
import json
#----------------------------------------------------------
LABEL = 'is_relational'
# convert raw text into list of tokens using tokenizer
MODEL_NAME = 'scibert'
LOSS_NAME = 'softf1'
EMBEDDING_TYPE = 'cls'
MODEL_PATH = 'allenai/scibert_scivocab_uncased'
NUM_TOKENS = 512
tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
#----------------------------------------------------------
# wrap tokenization process in a PyTorch Dataset, along with converting the labels to tensors
class RelationalDataset(Dataset):
def __init__(self, data: pd.DataFrame, tokenizer: tokenizer, max_token_len: int = 128):
self.tokenizer = tokenizer
self.data = data
self.max_token_len = max_token_len
def __len__(self):
return len(self.data)
def __getitem__(self, index: int):
data_row = self.data.iloc[index]
sentence = data_row.sentence
paper_id = data_row.paper_id
labels = data_row[LABEL]
encoding = self.tokenizer.encode_plus(
sentence,
add_special_tokens=True,
max_length=self.max_token_len,
return_token_type_ids=False,
padding="max_length",
truncation=True,
return_attention_mask=True,
return_tensors='pt',
)
input_ids = encoding['input_ids'].flatten()
attention_mask = encoding['attention_mask'].flatten()
return {
'paper_id': paper_id,
'sentence': sentence,
'input_ids': input_ids,
'attention_mask': attention_mask,
'labels': torch.tensor(labels, dtype=torch.long)
}
#----------------------------------------------------------
# wrap custom dataset into LightningDataModule
class RelationalDataModule(pl.LightningDataModule):
def __init__(self, train_df, val_df, tokenizer, batch_size, max_token_len):
super().__init__()
self.batch_size = batch_size
self.train_df = train_df
self.val_df = val_df
self.tokenizer = tokenizer
self.max_token_len = max_token_len
def setup(self, stage=None):
self.train_dataset = RelationalDataset(self.train_df, self.tokenizer, self.max_token_len)
self.val_dataset = RelationalDataset(self.val_df, self.tokenizer, self.max_token_len)
def train_dataloader(self):
return DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=2)
def val_dataloader(self):
return DataLoader(self.val_dataset, batch_size=self.batch_size, num_workers=2)
def test_dataloader(self):
return DataLoader(self.val_dataset, batch_size=self.batch_size, num_workers=2)
#----------------------------------------------------------
class RelationalClassifier(pl.LightningModule):
def __init__(self, n_classes: int, model_seed=None, num_epochs=None, n_training_steps=None, n_warmup_steps=None, learning_rate=None, batch_size=None):
super().__init__()
self.bert = AutoModel.from_pretrained(MODEL_PATH, return_dict=False)
self.save_hyperparameters()
self.model_seed = model_seed
self.num_epochs = num_epochs
self.n_training_steps = n_training_steps
self.n_warmup_steps = n_warmup_steps
self.learning_rate = learning_rate
self.batch_size = batch_size
self.tokenizer = AutoTokenizer.from_pretrained('allenai/scibert_scivocab_uncased')
self.linear1 = nn.Linear(1536,768)
self.linear2 = nn.Linear(768,2)
self.relu = nn.ReLU()
# # Normal binary cross entropy
# self.loss = nn.NLLLoss()
# self.softmax = nn.LogSoftmax(dim=1)
# Custom loss: soft F1 loss
self.loss = SoftF1Loss()
self.softmax = nn.Softmax(dim=1)
self.train_acc = torchmetrics.Accuracy()
self.val_acc = torchmetrics.Accuracy()
def forward(self, input_ids, attention_mask, labels=None):
# pass the inputs to the model
last_hs, cls_hs = self.bert(input_ids, attention_mask=attention_mask)
# # option 1 (maxpool): max pooling over last_hs (doesn't work because there are 0s and negatives)
# output = last_hs.max(dim=1)[0]
# option 2 (cls): using cls embedding
output = cls_hs
# # option 3 (entities): some pooling of all the tokens in the span + cls embedding
# span_mask = input_ids==7253
# span_mask = span_mask.cumsum(dim=1)
# span_mask = torch.where(span_mask==1, span_mask, 0) # this mask has an extra 1 for the first entity marker
# marker_mask = ~(input_ids==7253) # so make marker mask to identify entity demarcators
# final_mask = marker_mask*span_mask # multiply to get rid of first entity marker in span mask
# span = last_hs*final_mask.unsqueeze(dim=2) # get weights in last_hs for this span by sending all non-span tokens to 0
# span = torch.sum(span, dim=1) # [32, 70, 768] --> [32, 768]
# num_tokens = torch.sum(final_mask, dim=1) # [32, 70] --> [32]
# mean_pooled_span = torch.div(span, num_tokens.unsqueeze(dim=1)) # get average embedding by dividing sum of token embeddings by num_tokens
# # markers = last_hs*marker_mask.unsqueeze(dim=2) # get entity markers
# output = torch.cat((cls_hs, mean_pooled_span), dim=1) # concatenate cls embedding and mean pooled ent embeddings to get [32, 1536] embedding
# if not all(x>0 for x in num_tokens):
# ipdb.set_trace()
# # for cls + pooled entity embedding
# output = self.linear1(output)
# output = self.relu(output)
# output = self.linear2(output)
# for cls embedding
output = self.linear2(output)
output = self.relu(output)
output = self.softmax(output)
loss = 0
if labels is not None:
loss = self.loss(output, labels)
return loss, output
def training_step(self, batch, batch_idx):
input_ids = batch["input_ids"]
attention_mask = batch["attention_mask"]
labels = batch["labels"]
loss, outputs = self(input_ids, attention_mask, labels)
self.log("train_loss", loss, prog_bar=True, logger=True)
# log step metric
self.log('train_acc', self.train_acc(outputs.argmax(dim=1), labels), on_step=True, on_epoch=False)
return {"loss": loss, "predictions": outputs, "labels": labels}
def validation_step(self, batch, batch_idx):
input_ids = batch["input_ids"]
attention_mask = batch["attention_mask"]
labels = batch["labels"]
loss, outputs = self(input_ids, attention_mask, labels)
self.log("val_loss", loss, prog_bar=True, logger=True)
# log step metric
self.log('val_acc', self.val_acc(outputs.argmax(dim=1), labels), on_step=True, on_epoch=True)
return {"loss": loss, "predictions": outputs, "labels": labels}
def test_step(self, batch, batch_idx):
input_ids = batch["input_ids"]
attention_mask = batch["attention_mask"]
labels = batch["labels"]
loss, outputs = self(input_ids, attention_mask, labels)
self.log("test_loss", loss, prog_bar=True, logger=True)
return loss
def training_epoch_end(self, outputs):
avg_loss = torch.stack([x["loss"] for x in outputs]).mean()
self.logger.experiment.add_scalar('loss',avg_loss, self.current_epoch)
# log epoch metric
# self.logger.experiment.add_scalar('train_acc_epoch', self.train_acc.compute(), self.current_epoch)
def validation_epoch_end(self, outputs):
avg_loss = torch.stack([x["loss"] for x in outputs]).mean()
self.logger.experiment.add_scalar('loss',avg_loss, self.current_epoch)
# log epoch metric
# self.logger.experiment.add_scalar('val_acc_epoch', self.val_acc.compute(), self.current_epoch)
def configure_optimizers(self):
optimizer = AdamW(self.parameters(), lr=self.learning_rate)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=self.n_warmup_steps, num_training_steps=self.n_training_steps)
return dict(
optimizer=optimizer,
lr_scheduler=dict(
scheduler=scheduler,
interval='step'
)
)
#----------------------------------------------------------
# custom loss function for SoftF1Loss
# https://gist.github.com/SuperShinyEyes/dcc68a08ff8b615442e3bc6a9b55a354
class SoftF1Loss(nn.Module):
def __init__(self, epsilon=1e-7):
super().__init__()
self.epsilon = epsilon
def forward(self, y_pred, y_true):
assert y_true.ndim == 1
assert y_pred.ndim == 1 or y_pred.ndim == 2
# ipdb.set_trace()
if y_pred.ndim == 2:
y_pred = y_pred[:,1]
tp = (y_true * y_pred).sum(dim=0).to(torch.float32)
tn = ((1 - y_true) * (1 - y_pred)).sum(dim=0).to(torch.float32)
fp = ((1 - y_true) * y_pred).sum(dim=0).to(torch.float32)
fn = (y_true * (1 - y_pred)).sum(dim=0).to(torch.float32)
precision = tp / (tp + fp + self.epsilon)
recall = tp / (tp + fn + self.epsilon)
f1 = 2* (precision*recall) / (precision + recall + self.epsilon)
f1 = f1.clamp(min=self.epsilon, max=1-self.epsilon)
return 1 - f1.mean()
#----------------------------------------------------------
def runModel(train_df, val_df, test_df, num_epochs, learning_rate, batch_size, setting):
# create instance of data module
data_module = RelationalDataModule(train_df, val_df, tokenizer, batch_size=batch_size, max_token_len=NUM_TOKENS)
# set up data module
data_module.setup()
# create an instance of our model
# to use the scheduler, we need to calculate the number of training and warm-up steps.
# The number of training steps per epoch is equal to number of training examples / batch size.
# The number of total training steps is training steps per epoch * number of epochs:
steps_per_epoch=len(train_df) // batch_size
total_training_steps = steps_per_epoch * num_epochs
warmup_steps = total_training_steps // 5 # use a fifth of the training steps for a warm-up
print(warmup_steps, total_training_steps)
model = RelationalClassifier(
n_classes=len(LABEL),
# model_seed=model_seed,
num_epochs=num_epochs,
n_warmup_steps=warmup_steps,
n_training_steps=total_training_steps,
learning_rate=learning_rate,
batch_size=batch_size
)
# set output directories for checkpoints and logger to model name + loss type + embedding type
output_dir = "%s-%s-%s/%s" % (MODEL_NAME, LOSS_NAME, EMBEDDING_TYPE, setting)
# set output filename to hyperparam combo
output_file = "epochs=%d-lr=%f-bs=%d-%s" % (num_epochs, learning_rate, batch_size, setting)
print(output_dir)
print(output_file)
checkpoint_callback = ModelCheckpoint(
dirpath="checkpoints/%s" % output_dir,
filename=output_file,
save_top_k=1,
verbose=True,
monitor="val_loss",
mode="min"
)
logger = TensorBoardLogger("lightning_logs/%s" % output_dir, name=output_file)
trainer = pl.Trainer(
logger=logger,
callbacks=[checkpoint_callback],
max_epochs=num_epochs,
gpus=1,
progress_bar_refresh_rate=20
)
trainer.fit(model, data_module)
print("fit model")
trainer.test()
# load best version of the model according to val loss
# https://github.com/pykale/pykale/pull/149#discussion_r638841687
trained_model = RelationalClassifier.load_from_checkpoint(
trainer.checkpoint_callback.best_model_path,
n_classes=len(LABEL)
)
# put model into eval mode
trained_model.eval()
trained_model.freeze()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
trained_model = trained_model.to(device)
# get predictions
dataset = RelationalDataset(test_df, tokenizer, max_token_len=NUM_TOKENS)
predictions = []
labels = []
sentences = []
paper_ids = []
for item in tqdm(dataset):
_, prediction = trained_model(
item["input_ids"].unsqueeze(dim=0).to(device),
item["attention_mask"].unsqueeze(dim=0).to(device)
)
paper_ids.append(item["paper_id"])
sentences.append(item["sentence"])
predictions.append(prediction.flatten())
labels.append(item["labels"].int())
predictions = torch.stack(predictions).detach().cpu().numpy()
predictions = predictions[:,1]
# predictions = np.argmax(predictions, axis = 1)
labels = torch.stack(labels).detach().cpu().numpy()
# output preds
df_preds = pd.DataFrame(list(zip(paper_ids, sentences, predictions)),
columns =['paper_id', 'sentence', 'scibert_pred'])
df_preds.to_csv("./predictions/%s/%s-pred-score.csv" % (output_dir, output_file))
# print(classification_report(labels, predictions))
# report = classification_report(labels, predictions, output_dict=True)
# return report
#----------------------------------------------------------
# # data to split into train and validation sets
# cv_df = pd.read_csv("../annotations-round2/1-sentence-final-§-cv.csv")
# # drop columns with null values
# cv_df = cv_df[cv_df['is_relational'].notna()]
# # define groups for sklearn GroupShuffleSplit
# groups = cv_df['paper_id']
# output_dir = "%s-%s-%s/learning-curve-all-sentences/" % (MODEL_NAME, LOSS_NAME, EMBEDDING_TYPE)
# # for this combo of parameters and this train_size
# for train_size in train_sizes:
# # define split
# gss = GroupShuffleSplit(n_splits=2, train_size=train_size, test_size=0.1, random_state=42)
# # reset results
# results = {}
# # go through all model seeds
# for model_seed in range(1,6):
# # set model seed
# pl.seed_everything(model_seed)
# cv_fold = 0 # count number of folds
# # split into 5 partitions of train/val with fixed cv seed
# for train_idx, test_idx in gss.split(X=cv_df, groups=groups):
# cv_fold += 1
# train_df = cv_df.iloc[train_idx]
# val_df = cv_df.iloc[test_idx]
# this_combo = "seed=%d-fold=%d-epochs=%d-lr=%f-bs=%d-trainsize=%.2f" % (model_seed, cv_fold, num_epochs, learning_rate, batch_size, train_size)
# print(this_combo)
# print(len(train_df))
# print(len(val_df))
# # if there was already a classification report made for this combo,
# # don't run model again for it
# if path.exists("./classification_reports/%s/%s.json" % (output_dir, this_combo)):
# print("found path")
# # if this combo does NOT have a classification report, run model, save report
# else:
# print("path not found, running model...")
# report = runModel(train_df, val_df, model_seed, cv_fold, num_epochs, learning_rate, batch_size, train_size)
# results[this_combo] = report
# with open('./classification_reports/%s/%s.json' % (output_dir, this_combo), 'w') as outfile:
# json.dump(results, outfile)
# # DELETE checkpoint regardless
# print("deleting checkpoint for combo %s..." % this_combo)
# if path.exists("./checkpoints/%s/%s.ckpt" % (output_dir, this_combo)):
# os.remove("./checkpoints/%s/%s.ckpt" % (output_dir, this_combo))
#----------------------------------------------------------
# data to split into train and validation sets
df = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/annotations-round2/union-1sentence-both2sentence/union-binary-label-data-§.csv")
df = df[df['is_relational'].notna()] # drop columns with null values
train_df = df[:3410]
val_df = df[3411:]
#------------------------
test_df = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/inputs/concepts-from-both-sentences/all-2sentence-1concept-rows-demarcated.csv")
# test_df = test_df[:100]
test_df['is_relational'] = [0] * len(test_df)
#------------------------
# get paper_ids of s2orc sentences that were used in data augmentation and do not use those
df_compare = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/inputs/augmented-data/supplementary-compare-sentences.csv")
df_partof = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/inputs/augmented-data/supplementary-partof-sentences.csv")
df_usedfor = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/inputs/augmented-data/supplementary-usedfor-sentences.csv")
# remove augmentation sentence paper_ids from test set
augmentation_ids_to_exclude = df_compare['paper_ids'].tolist() + df_partof['paper_ids'].tolist() + df_usedfor['paper_ids'].tolist()
test_df = test_df[~test_df['paper_id'].isin(augmentation_ids_to_exclude)]
# remove training/val set ids from test set
train_val_ids_to_exclude = df['paper_id']
test_df = test_df[~test_df['paper_id'].isin(train_val_ids_to_exclude)]
#------------------------
# best params
num_epochs = 10
learning_rate = 1e-5
batch_size = 16
setting = 'all-s2orc'
results = {}
output_dir = "%s-%s-%s/%s" % (MODEL_NAME, LOSS_NAME, EMBEDDING_TYPE, setting)
this_combo = "epochs=%d-lr=%f-bs=%d-%s" % (num_epochs, learning_rate, batch_size, setting)
pl.seed_everything(42)
print("running model...")
runModel(train_df, val_df, test_df, num_epochs, learning_rate, batch_size, setting)
# DELETE checkpoint regardless
print("deleting checkpoint for combo %s..." % this_combo)
if path.exists("./checkpoints/%s/%s.ckpt" % (output_dir, this_combo)):
os.remove("./checkpoints/%s/%s.ckpt" % (output_dir, this_combo)) | ACCoRD-main | system/code/extraction/run-scibert-binary-classifier.py |
# Hyperparameter search for scibert model
# Code borrowed from:
# https://colab.research.google.com/drive/14Ea4lIzsn5EFvPpYKtWStXEByT9qmbkj?usp=sharing#scrollTo=qAYbKDu4UR6M
# https://github.com/pnageshkar/NLP/blob/master/Medium/Multi_label_Classification_BERT_Lightning.ipynb
# https://pytorch-lightning.readthedocs.io/en/latest/starter/introduction_guide.html
# https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html
#----------------------------------------------------------
import pandas as pd
import numpy as np
import random
import torchmetrics
import os.path
from os import path
from tqdm.auto import tqdm
import ipdb
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
import transformers
from transformers import *
import pytorch_lightning as pl
from pytorch_lightning.metrics.functional import accuracy, f1, auroc
from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping
from pytorch_lightning.loggers import TensorBoardLogger
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, multilabel_confusion_matrix
from sklearn.model_selection import KFold, GroupShuffleSplit
from sklearn.metrics import f1_score
import matplotlib.pyplot as plt
from matplotlib import rc
import json
import ast
#----------------------------------------------------------
LABEL = 'binarized_relations'
# convert raw text into list of tokens using tokenizer
MODEL_NAME = 'scibert'
LOSS_NAME = 'weightedBCE'
EMBEDDING_TYPE = 'cls'
MODEL_PATH = 'allenai/scibert_scivocab_uncased'
NUM_TOKENS = 512
tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
#----------------------------------------------------------
# wrap tokenization process in a PyTorch Dataset, along with converting the labels to tensors
class RelationalDataset(Dataset):
def __init__(self, data: pd.DataFrame, tokenizer: tokenizer, max_token_len: int = 128):
self.tokenizer = tokenizer
self.data = data
self.max_token_len = max_token_len
def __len__(self):
return len(self.data)
def __getitem__(self, index: int):
data_row = self.data.iloc[index]
sentence = data_row.sentence
labels = list(ast.literal_eval(data_row[LABEL]))
encoding = self.tokenizer.encode_plus(
sentence,
add_special_tokens=True,
max_length=self.max_token_len,
return_token_type_ids=False,
padding="max_length",
truncation=True,
return_attention_mask=True,
return_tensors='pt',
)
input_ids = encoding['input_ids'].flatten()
attention_mask = encoding['attention_mask'].flatten()
return {
'sentence': sentence,
'input_ids': input_ids ,
'attention_mask': attention_mask,
'labels': torch.tensor(labels, dtype= torch.long)
}
#----------------------------------------------------------
# wrap custom dataset into LightningDataModule
class RelationalDataModule(pl.LightningDataModule):
def __init__(self, train_df, val_df, tokenizer, batch_size, max_token_len):
super().__init__()
self.batch_size = batch_size
self.train_df = train_df
self.val_df = val_df
self.tokenizer = tokenizer
self.max_token_len = max_token_len
def setup(self, stage=None):
self.train_dataset = RelationalDataset(self.train_df, self.tokenizer, self.max_token_len)
self.val_dataset = RelationalDataset(self.val_df, self.tokenizer, self.max_token_len)
def train_dataloader(self):
return DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=2)
def val_dataloader(self):
return DataLoader(self.val_dataset, batch_size=self.batch_size, num_workers=2)
def test_dataloader(self):
return DataLoader(self.val_dataset, batch_size=self.batch_size, num_workers=2)
#----------------------------------------------------------
class RelationalClassifier(pl.LightningModule):
def __init__(self, n_classes: int, model_seed=None, num_epochs=None, n_training_steps=None, n_warmup_steps=None, learning_rate=None, batch_size=None):
super().__init__()
self.bert = AutoModel.from_pretrained(MODEL_PATH, return_dict=False)
self.save_hyperparameters()
self.model_seed = model_seed
self.num_epochs = num_epochs
self.n_training_steps = n_training_steps
self.n_warmup_steps = n_warmup_steps
self.learning_rate = learning_rate
self.batch_size = batch_size
self.classifier = nn.Linear(self.bert.config.hidden_size, 4)
self.relu = nn.ReLU()
# Normal binary cross entropy (from single label classification)
# self.loss = nn.NLLLoss()
# self.softmax = nn.LogSoftmax(dim=1)
# Binary cross entropy (multilabel classification)
# Since the output is multi-label (multiple tags associated with a question), we may tend to use a Sigmoid activation function for the final output
# and a Binary Cross-Entropy loss function. However, the Pytorch documentation recommends using the BCEWithLogitsLoss () function which combines a
# Sigmoid layer and the BCELoss in one single class instead of having a plain Sigmoid followed by a BCELoss.
# self.loss = nn.BCEWithLogitsLoss()
self.loss = nn.BCEWithLogitsLoss(pos_weight=torch.full((1, 4), 2))
# # Custom loss: soft F1 loss
# self.loss = SoftF1Loss()
# self.softmax = nn.Softmax(dim=1)
# self.train_acc = torchmetrics.Accuracy()
# self.val_acc = torchmetrics.Accuracy()
def forward(self, input_ids, attention_mask, labels=None):
# pass the inputs to the model
last_hs, cls_hs = self.bert(input_ids, attention_mask=attention_mask)
# output = last_hs.max(dim=1)[0] # max pooling
output = self.classifier(cls_hs)
loss = 0
if labels is not None:
loss = self.loss(output, labels.float())
return loss, output
def training_step(self, batch, batch_idx):
input_ids = batch["input_ids"]
attention_mask = batch["attention_mask"]
labels = batch["labels"]
loss, outputs = self(input_ids, attention_mask, labels)
self.log("train_loss", loss, prog_bar=True, logger=True)
# log step metric
# self.log('train_acc', self.train_acc(outputs.argmax(dim=1), labels), on_step=True, on_epoch=False)
return {"loss": loss, "predictions": outputs, "labels": labels}
def validation_step(self, batch, batch_idx):
input_ids = batch["input_ids"]
attention_mask = batch["attention_mask"]
labels = batch["labels"]
loss, outputs = self(input_ids, attention_mask, labels)
self.log("val_loss", loss, prog_bar=True, logger=True)
# log step metric
# self.log('val_acc', self.val_acc(outputs.argmax(dim=1), labels), on_step=True, on_epoch=True)
return {"loss": loss, "predictions": outputs, "labels": labels}
def test_step(self, batch, batch_idx):
input_ids = batch["input_ids"]
attention_mask = batch["attention_mask"]
labels = batch["labels"]
loss, outputs = self(input_ids, attention_mask, labels)
self.log("test_loss", loss, prog_bar=True, logger=True)
return loss
def training_epoch_end(self, outputs):
avg_loss = torch.stack([x["loss"] for x in outputs]).mean()
self.logger.experiment.add_scalar('loss',avg_loss, self.current_epoch)
# log epoch metric
# self.logger.experiment.add_scalar('train_acc_epoch', self.train_acc.compute(), self.current_epoch)
def validation_epoch_end(self, outputs):
avg_loss = torch.stack([x["loss"] for x in outputs]).mean()
self.logger.experiment.add_scalar('loss',avg_loss, self.current_epoch)
# log epoch metric
# self.logger.experiment.add_scalar('val_acc_epoch', self.val_acc.compute(), self.current_epoch)
def configure_optimizers(self):
optimizer = AdamW(self.parameters(), lr=self.learning_rate)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=self.n_warmup_steps, num_training_steps=self.n_training_steps)
return dict(
optimizer=optimizer,
lr_scheduler=dict(
scheduler=scheduler,
interval='step'
)
)
#----------------------------------------------------------
# custom loss function for SoftF1Loss
# https://gist.github.com/SuperShinyEyes/dcc68a08ff8b615442e3bc6a9b55a354
class SoftF1Loss(nn.Module):
def __init__(self, epsilon=1e-7):
super().__init__()
self.epsilon = epsilon
def forward(self, y_pred, y_true):
assert y_true.ndim == 1
assert y_pred.ndim == 1 or y_pred.ndim == 2
# ipdb.set_trace()
if y_pred.ndim == 2:
y_pred = y_pred[:,1]
tp = (y_true * y_pred).sum(dim=0).to(torch.float32)
tn = ((1 - y_true) * (1 - y_pred)).sum(dim=0).to(torch.float32)
fp = ((1 - y_true) * y_pred).sum(dim=0).to(torch.float32)
fn = (y_true * (1 - y_pred)).sum(dim=0).to(torch.float32)
precision = tp / (tp + fp + self.epsilon)
recall = tp / (tp + fn + self.epsilon)
f1 = 2* (precision*recall) / (precision + recall + self.epsilon)
f1 = f1.clamp(min=self.epsilon, max=1-self.epsilon)
return 1 - f1.mean()
#----------------------------------------------------------
# Plot precision-recall curves
def plotPrecisionRecallCurve(Y_test, y_score, output_dir, output_file):
n_classes = Y_test.shape[1]
# For each class
precision = dict()
recall = dict()
average_precision = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(Y_test[:, i],
y_score[:, i])
average_precision[i] = average_precision_score(Y_test[:, i], y_score[:, i])
# A "micro-average": quantifying score on all classes jointly
precision["micro"], recall["micro"], _ = precision_recall_curve(Y_test.ravel(),
y_score.ravel())
average_precision["micro"] = average_precision_score(Y_test, y_score,
average="micro")
print('Average precision score, micro-averaged over all classes: {0:0.2f}'
.format(average_precision["micro"]))
#----------------------------------------------------------
# # Plot the micro-averaged Precision-Recall curve¶
# plt.figure()
# plt.step(recall['micro'], precision['micro'], where='post')
# plt.xlabel('Recall')
# plt.ylabel('Precision')
# plt.ylim([0.0, 1.05])
# plt.xlim([0.0, 1.0])
# plt.title(
# 'Average precision score, micro-averaged over all classes: AP={0:0.2f}'
# .format(average_precision["micro"]))
# plt.savefig("micro-averaged-precision-recall-curve.png")
#----------------------------------------------------------
# Plot Precision-Recall curve for each class and iso-f1 curves
from itertools import cycle
# setup plot details
colors = cycle(['navy', 'turquoise', 'darkorange', 'cornflowerblue', 'teal'])
plt.figure(figsize=(7, 8))
f_scores = np.linspace(0.2, 0.8, num=4)
lines = []
labels = []
for f_score in f_scores:
x = np.linspace(0.01, 1)
y = f_score * x / (2 * x - f_score)
l, = plt.plot(x[y >= 0], y[y >= 0], color='gray', alpha=0.2)
plt.annotate('f1={0:0.1f}'.format(f_score), xy=(0.9, y[45] + 0.02))
lines.append(l)
labels.append('iso-f1 curves')
l, = plt.plot(recall["micro"], precision["micro"], color='gold', lw=2)
lines.append(l)
labels.append('micro-average Precision-recall (area = {0:0.2f})'
''.format(average_precision["micro"]))
for i, color in zip(range(n_classes), colors):
l, = plt.plot(recall[i], precision[i], color=color, lw=2)
lines.append(l)
labels.append('Precision-recall for class {0} (area = {1:0.2f})'
''.format(i, average_precision[i]))
fig = plt.gcf()
fig.subplots_adjust(bottom=0.25)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title(output_file)
plt.legend(lines, labels, loc=(0, -.38), prop=dict(size=14))
plt.savefig("./precision-recall-curves/%s/%s.png" % (output_dir,output_file))
#----------------------------------------------------------
def runModel(train_df, val_df, test_df, model_seed, num_epochs, learning_rate, batch_size):
# create instance of data module
data_module = RelationalDataModule(train_df, val_df, tokenizer, batch_size=batch_size, max_token_len=NUM_TOKENS)
# set up data module
data_module.setup()
# create an instance of our model
# to use the scheduler, we need to calculate the number of training and warm-up steps.
# The number of training steps per epoch is equal to number of training examples / batch size.
# The number of total training steps is training steps per epoch * number of epochs:
steps_per_epoch=len(train_df) // batch_size
total_training_steps = steps_per_epoch * num_epochs
warmup_steps = total_training_steps // 5 # use a fifth of the training steps for a warm-up
print(warmup_steps, total_training_steps)
model = RelationalClassifier(
n_classes=len(LABEL),
model_seed=model_seed,
num_epochs=num_epochs,
n_warmup_steps=warmup_steps,
n_training_steps=total_training_steps,
learning_rate=learning_rate,
batch_size=batch_size
)
# set output directories for checkpoints and logger to model name + loss type + embedding type
output_dir = "%s-%s-%s/%s" % (MODEL_NAME, LOSS_NAME, EMBEDDING_TYPE, experiment)
# set output filename to hyperparam combo
output_file = "seed=%d-epochs=%d-lr=%f-bs=%d-%s" % (model_seed, num_epochs, learning_rate, batch_size, experiment)
print(output_dir)
print(output_file)
checkpoint_callback = ModelCheckpoint(
dirpath="checkpoints/%s" % output_dir,
filename=output_file,
save_top_k=1,
verbose=True,
monitor="val_loss",
mode="min"
)
logger = TensorBoardLogger("lightning_logs/%s" % output_dir, name=output_file)
trainer = pl.Trainer(
logger=logger,
callbacks=[checkpoint_callback],
max_epochs=num_epochs,
gpus=1,
progress_bar_refresh_rate=20,
track_grad_norm=2
)
trainer.fit(model, data_module)
print("fit model")
trainer.test()
# load best version of the model according to val loss
# https://github.com/pykale/pykale/pull/149#discussion_r638841687
trained_model = RelationalClassifier.load_from_checkpoint(
trainer.checkpoint_callback.best_model_path,
n_classes=len(LABEL)
)
# put model into eval mode
trained_model.eval()
trained_model.freeze()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
trained_model = trained_model.to(device)
# get predictions
dataset = RelationalDataset(test_df, tokenizer, max_token_len=NUM_TOKENS)
predictions = []
labels = []
sentences = []
for item in tqdm(dataset):
_, prediction = trained_model(
item["input_ids"].unsqueeze(dim=0).to(device),
item["attention_mask"].unsqueeze(dim=0).to(device)
)
sentences.append(item["sentence"])
predictions.append(prediction.flatten())
# labels.append(item["labels"].int())
predictions = torch.stack(predictions).detach().cpu().numpy()
# upper, lower = 1, 0
# predictions = np.where(predictions > 0.5, upper, lower)
# labels = torch.stack(labels).detach().cpu().numpy()
# output preds
df_preds = pd.DataFrame(list(zip(sentences, predictions)),
columns =['sentence', 'scibert_pred'])
df_preds.to_csv("./predictions/%s/%s.csv" % (output_dir, output_file))
# print(classification_report(labels, predictions, zero_division=0))
# report = classification_report(labels, predictions, output_dict=True, zero_division=0)
# return report
#----------------------------------------------------------
# Train set: use ALL annotations (both cv and held out test sets since we're running on the 170k new sentences)
df = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/annotations-round2/union-1sentence-both2sentence/union-multilabel-data-§-quality-control-with-contrast-class.csv")
train_df = df[:3510]
val_df = df[3511:]
print(len(train_df))
print(len(val_df))
# Test set: all 2sentence-1concepts rows from the 6 batches of s2orc metadata
test_df = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/inputs/concepts-from-both-sentences/all-2sentence-1concept-rows-demarcated.csv")
# test_df = test_df[:100]
test_df['binarized_relations'] = ["[0, 0, 0]"] * len(test_df)
print(len(test_df))
# run best model and test on held-out test set
# best params
num_epochs = 10
learning_rate = 5e-5
batch_size = 32
model_seed = 42
pl.seed_everything(model_seed)
results = {}
experiment = "best-params-all-s2orc"
output_dir = "%s-%s-%s/%s" % (MODEL_NAME, LOSS_NAME, EMBEDDING_TYPE, experiment)
this_combo = "seed=%d-epochs=%d-lr=%f-bs=%d-%s" % (model_seed, num_epochs, learning_rate, batch_size, experiment)
# run model
print("running model...")
runModel(train_df, val_df, test_df, model_seed, num_epochs, learning_rate, batch_size)
# DELETE checkpoint regardless
print("deleting checkpoint for combo %s..." % this_combo)
if path.exists("./checkpoints/%s/%s.ckpt" % (output_dir, this_combo)):
os.remove("./checkpoints/%s/%s.ckpt" % (output_dir, this_combo))
| ACCoRD-main | system/code/extraction/run-scibert-multilabel-classifier.py |
import pickle
import pandas as pd
from operator import itemgetter
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from os.path import exists
#--------------------------------------------------------
# unpickle concept dictionary
with open('/net/nfs2.s2-research/soniam/concept-rel/resources/forecite/forecite_concept_dict.pickle', 'rb') as handle:
concept_dict = pickle.load(handle)
print("...unpickled forecite concept dictionary")
# Initialize N
N = 50
# N largest values in dictionary
# Using sorted() + itemgetter() + items()
res = dict(sorted(concept_dict.items(), key = itemgetter(1), reverse = True)[:N])
# printing result
# print("The top N value pairs are " + str(res))
concept_list = list(res.keys())
# print(concept_list)
#--------------------------------------------------------
df_nlp_concepts = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/gpt-generations/top-n-forecite-concepts-gpt-generations/nlp_concepts.csv")
nlp_concept_scores = []
for index, row in df_nlp_concepts.iterrows():
# get forecite score for each nlp concept
nlp_concept_scores.append(concept_dict[row['concept']])
df_nlp_concepts['score'] = nlp_concept_scores
df_nlp_concepts = df_nlp_concepts[~df_nlp_concepts.concept.isin(['bert model', 'pre - train bert model', 'pre - train bert', 'moses', 'cho et al', 'dagan', 'yarowsky', 'hochreiter', 'turney', 'ney', 'och', 'grosz', 'steedman', 'well translation'])]
# # top 150 nlp concepts by score
# top150_nlp_concepts = df_nlp_concepts.sort_values(by='score', ascending=False)[:150]['concept'].tolist()
# top 150 nlp concepts by num_papers
top150_nlp_concepts = df_nlp_concepts.sort_values(by='num_papers', ascending=False)[:150]['concept'].tolist()
print(top150_nlp_concepts)
selected_nlp_concepts = ['adversarial training', 'beam search', 'bert', 'elmo', 'gpt', 'glove', 'word2vec', 'resnet', 'domain shift', 'ulmfit', 'newsqa', 'squad', 'random forest', 'imagenet', 'lstm', 'roberta', 'variational autoencoder', 'dropout', 'fasttext', 'hierarchical softmax', 'distant supervision']
relations = ['compare','isa']
#--------------------------------------------------------
# 1) PLOT NUMBER OF DESCRIPTIONS BEFORE RANKING FILTERING
#--------------------------------------------------------
description_counts = []
relation_counts = []
concept_counts = []
isa_counts = []
compare_counts = []
version = "v1-4class/nlp-concepts"
for concept in top150_nlp_concepts:
df_concept = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/inputs/top-n-forecite-concept-source-sentences/%s/scibert-multilabel-binary-predictions-all-s2orc-with-forecite-%s.csv" % (version, concept))
df_concept = df_concept.loc[:, ~df_concept.columns.str.contains('^Unnamed')]
if len(df_concept) == 0:
continue
for index, relation in enumerate(relations):
df_concept_relation = df_concept[df_concept['%s_pred_score' % relation]>0]
# add to df to plot
concept_counts.append(concept)
relation_counts.append(relation)
description_counts.append(len(df_concept_relation))
if relation == "compare":
compare_counts.append(len(df_concept_relation))
elif relation == "isa":
isa_counts.append(len(df_concept_relation))
print(sum(isa_counts)/len(isa_counts))
print(sum(compare_counts)/len(compare_counts))
# plot for first N concepts (by number of papers)
num_concepts = 50
df_counts = pd.DataFrame(list(zip(concept_counts, relation_counts, description_counts)), columns =['concept', 'relation', 'num_descriptions'])
df_counts_isa = df_counts[df_counts['relation']=="isa"].sort_values(by='num_descriptions', ascending=False)[:num_concepts]
df_counts_compare = df_counts[df_counts['relation']=="compare"].sort_values(by='num_descriptions', ascending=False)[:num_concepts]
# # plot compare
# sns.catplot(data=df_counts_compare, x="concept", y="num_descriptions", col="relation", kind="bar", height=5, aspect=3)
# plt.xticks(rotation='vertical')
# plt.savefig("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/gpt-generations/top-n-forecite-concepts-gpt-generations/v1-4class/plots/description-counts-compare.pdf", bbox_inches="tight")
# # plot isa
# sns.catplot(data=df_counts_isa, x="concept", y="num_descriptions", col="relation", kind="bar", height=5, aspect=3)
# plt.xticks(rotation='vertical')
# plt.savefig("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/gpt-generations/top-n-forecite-concepts-gpt-generations/v1-4class/plots/description-counts-isa.pdf", bbox_inches="tight")
#--------------------------------------------------------
# GET STATS FOR NUMBER OF CONCEPT Bs per CONCEPT A
#--------------------------------------------------------
version = "v1-4class"
concept_set = "nlp-concepts"
relations = ['compare', 'isa']
concepta_conceptb_counts = []
relation_conceptb_counts = []
conceptb_counts = []
isa_counts = []
compare_counts = []
for concept in top150_nlp_concepts:
for index, relation in enumerate(relations):
file_path = "/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/gpt-generations/top-n-forecite-concepts-gpt-generations/%s/ranked-filtered/%s/%s-%s.csv" % (version, concept_set, concept, relation)
if exists(file_path):
df_ranked_filtered_concept_relation = pd.read_csv(file_path)
else:
continue
# skip if file doesn't have any descriptions
if len(df_ranked_filtered_concept_relation) == 0:
continue
concepta_conceptb_counts.append(concept)
relation_conceptb_counts.append(relation)
conceptb_counts.append(len(df_ranked_filtered_concept_relation))
if relation == "compare":
compare_counts.append(len(df_ranked_filtered_concept_relation))
elif relation == "isa":
isa_counts.append(len(df_ranked_filtered_concept_relation))
print(sum(isa_counts)/len(isa_counts))
print(sum(compare_counts)/len(compare_counts))
num_concepts = 150
df_counts = pd.DataFrame(list(zip(concepta_conceptb_counts, relation_conceptb_counts, conceptb_counts)), columns =['concept', 'relation', 'num_concept_bs'])
df_counts_isa = df_counts[df_counts['relation']=="isa"].sort_values(by='num_concept_bs', ascending=False)[:num_concepts]
df_counts_compare = df_counts[df_counts['relation']=="compare"].sort_values(by='num_concept_bs', ascending=False)[:num_concepts]
# plot compare
fig, axs = plt.subplots(1, 2, figsize=(4, 3))
plt.setp(axs, ylim=(0,41))
sns.histplot(data=df_counts_isa, x="num_concept_bs", color="darkcyan", ax=axs[0])
sns.histplot(data=df_counts_compare, x="num_concept_bs", color="darkorange", ax=axs[1])
# sns.catplot(x="num_concept_bs", col="relation", data=df_counts, kind="count", height=4, aspect=.7);
# sns.histplot(data=df_counts, x="num_concept_bs", hue="relation")
plt.savefig("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/gpt-generations/top-n-forecite-concepts-gpt-generations/v1-4class/plots/conceptb-counts-histogram.pdf", bbox_inches="tight") | ACCoRD-main | system/code/generation/analyze-description-diversity.py |
import pickle
import pandas as pd
from operator import itemgetter
import numpy as np
import openai
from rouge_score import rouge_scorer
import spacy
import re
import seaborn as sns
import matplotlib.pyplot as plt
#--------------------------------------------------------
openai.api_key = ""
# 4class: ['compare' 'is-a' 'part-of' 'used-for']
# 3class: ['compare', contrast, 'is-a']
sum_len = 0
selected_nlp_concepts = ['adversarial training', 'beam search', 'bert', 'elmo', 'gpt', 'glove', 'word2vec', 'resnet', 'domain shift', 'ulmfit', 'newsqa', 'squad', 'random forest', 'imagenet', 'lstm', 'roberta', 'variational autoencoder', 'dropout', 'fasttext', 'hierarchical softmax', 'distant supervision']
relations = ['compare','isa']
#--------------------------------------------------------
# unpickle concept dictionary
with open('/net/nfs2.s2-research/soniam/concept-rel/resources/forecite/forecite_concept_dict.pickle', 'rb') as handle:
concept_dict = pickle.load(handle)
print("...unpickled forecite concept dictionary")
# Initialize N
N = 50
# N largest values in dictionary
# Using sorted() + itemgetter() + items()
res = dict(sorted(concept_dict.items(), key = itemgetter(1), reverse = True)[:N])
# printing result
# print("The top N value pairs are " + str(res))
concept_list = list(res.keys())
# print(concept_list)
#--------------------------------------------------------
df_nlp_concepts = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/gpt-generations/top-n-forecite-concepts-gpt-generations/nlp_concepts.csv")
nlp_concept_scores = []
for index, row in df_nlp_concepts.iterrows():
# get forecite score for each nlp concept
nlp_concept_scores.append(concept_dict[row['concept']])
df_nlp_concepts['score'] = nlp_concept_scores
df_nlp_concepts = df_nlp_concepts[~df_nlp_concepts.concept.isin(['bert model', 'pre - train bert model', 'pre - train bert', 'moses', 'cho et al', 'dagan', 'yarowsky', 'hochreiter', 'turney', 'ney', 'och', 'grosz', 'steedman', 'well translation'])]
# # top 150 nlp concepts by score
# top150_nlp_concepts = df_nlp_concepts.sort_values(by='score', ascending=False)[:150]['concept'].tolist()
# top 150 nlp concepts by num_papers
top150_nlp_concepts = df_nlp_concepts.sort_values(by='num_papers', ascending=False)[:150]['concept'].tolist()
print(top150_nlp_concepts)
#--------------------------------------------------------
#--------------------------------------------------------
# set up spacy
nlp = spacy.load("en_core_web_md")
tokenizer = nlp.tokenizer
nlp.add_pipe("sentencizer")
all_stopwords = nlp.Defaults.stop_words
def getConceptB(generation, concept):
# get concept B by selecting the second noun chunk
doc = nlp(generation)
noun_chunks = list(doc.noun_chunks)
# if the generation is not valid: make conceptb an empty string
if (len(noun_chunks) <= 1) or (generation == " variable HYDRA_FULL_ERROR=1 for a complete stack tra"):
print("concept b not found")
concept_b = ""
# conceptbs.append("")
# if the generation is valid and "used-for":
elif re.search('<ENT> is used (to|for)', generation):
find_used = re.search('used (to|for)', generation)
end = find_used.span()[1] # get index of end of "is"
concept_b = generation[end+1:]
# conceptbs.append(generation[end+1:]) # index into generation with these indices, removing leading and trailing spaces
# if the generation is valid and "compare":
elif re.search('is like(.*)in that they are both', generation):
find_is = re.search('is like', generation)
find_that = re.search('in that they', generation)
end = find_is.span()[1] # get index of end of "is"
start = find_that.span()[0] # get index of beginning of "that"
concept_b = generation[end+1:start-1]
# conceptbs.append(generation[end+1:start-1]) # index into generation with these indices, removing leading and trailing spaces
# if the generation is valid and "compare":
elif re.search('is an alternative (to|for)(.*)that', generation):
find_is = re.search('is an alternative (to|for)', generation)
find_that = re.search('that', generation)
end = find_is.span()[1] # get index of end of "is"
start = find_that.span()[0] # get index of beginning of "that"
concept_b = generation[end+1:start-1]
# conceptbs.append(generation[end+1:start-1]) # index into generation with these indices, removing leading and trailing spaces
# if the generation is valid and "used-for":
elif re.search('is (a|an)(.*)that', generation):
find_is = re.search('is (a|an)', generation)
find_that = re.search('that', generation)
end = find_is.span()[1] # get index of end of "is"
start = find_that.span()[0] # get index of beginning of "that"
concept_b = generation[end+1:start-1]
# conceptbs.append(generation[end+1:start-1]) # index into generation with these indices, removing leading and trailing spaces
# all other relation types
else:
concept_b = ""
# iterate through noun chunks to find first valid one
for chunk in noun_chunks:
if ("ENT" in chunk.text) or ("type" in chunk.text) or (concept in chunk.text):
continue
else:
concept_b = chunk
break
# check if concept_b is a string, because it has no .text parameter
if isinstance(concept_b, str):
print(concept_b)
# if the first word of this noun phrase is "a" or "an", select all words past that word
else:
if (concept_b[0].text == "a") or (concept_b[0].text == "an") or (concept_b[0].text == "the"):
concept_b = concept_b[1:]
# even if the concept didn't have an article or anything, save it's .text version
concept_b = concept_b.text
# remove any punctuation or special characters from final value appended
# concept_b = re.sub('[^A-Za-z0-9- ]+', '', concept_b)
return concept_b
#--------------------------------------------------------
def getGPTOutput(j, best_sentences, best_formatted_statements, df_supplementary_sentences, relation, generations, conceptbs, is_conceptb_forecite, conceptb_forecite_score, rouge1, rouge2, rougeL, scorer, relations):
# print("GENERATIONS FOR %s CLASS" % relation)
prompt = "Describe the provided concept in terms of another concept in the text. \n\n"
# prompt examples
for i in range(len(best_sentences)):
text = "Text: " + best_sentences[i] + "\n"
concept_description = "Concept description: " + best_formatted_statements[i] + "\n"
separator = "###" + "\n"
prompt += text + concept_description + separator
prompt_examples = prompt
# supplementary sentence we want a generation for
row = df_supplementary_sentences.iloc[j]
sentence = row['original_sentence']
concept = row['forecite_concept']
# recover the original case of the sentence from the undemarcated dataframe by searching for the lowercase sentence
# sentence_original_case = df_all_rows_original_case.loc[df_all_rows_original_case['sentence'] == sentence]['sentence_original_case'].item()
prompt += "Text: " + sentence + "\n"
prompt += "Concept description: " + concept
if relation == "compare":
prompt += " is like"
elif relation == "used-for":
prompt += " can be used for"
elif relation == "isa":
prompt += " is a"
elif relation == "part-of":
prompt += " is a part of"
# print(prompt)
response = openai.Completion.create(engine="davinci-instruct-beta", prompt=prompt, max_tokens=300, temperature=0, frequency_penalty=1, echo=True, stop="\n")
# print(response['choices'][0]['text'])
generation = response['choices'][0]['text'].splitlines()[-1]
generation = generation[21:]
conceptb = getConceptB(generation, concept)
generations.append(generation)
scores = scorer.score(sentence, generation)
rouge1.append(scores['rouge1'][2])
rouge2.append(scores['rouge2'][2])
rougeL.append(scores['rougeL'][2])
conceptbs.append(conceptb)
relations.append(relation)
if conceptb in concept_dict.keys():
is_conceptb_forecite.append(1)
conceptb_forecite_score.append(concept_dict[conceptb])
else:
is_conceptb_forecite.append(0)
conceptb_forecite_score.append("")
print(sentence)
print("--> " + generation)
print("--> " + conceptb)
# print(scores['rouge1'][2])
# print(scores['rouge2'][2])
print(scores['rougeL'][2])
print(row['max_multilabel_pred_score'])
print("")
#----------------------------------------------------------
def getGPTGenerationsForRelation(relation, concept, df_annotations, best_sentences, best_formatted_statements):
annotations_generations = []
rouge1 = []
rouge2 = []
rougeL = []
conceptbs = []
is_conceptb_forecite = []
conceptb_forecite_score = []
relations = []
# convert prompt examples to lowercase
best_sentences = list(map(lambda x: x.lower(), best_sentences))
best_formatted_statements = list(map(lambda x: x.lower(), best_formatted_statements))
# remove sentences that are in prompt examples
df_annotations = df_annotations[~df_annotations['original_sentence'].isin(best_sentences)]
print(df_annotations)
scorer = rouge_scorer.RougeScorer(['rouge1', 'rouge2', 'rougeL'], use_stemmer=True)
for a in range(len(df_annotations)):
getGPTOutput(a, best_sentences, best_formatted_statements, df_annotations, relation, annotations_generations, conceptbs, is_conceptb_forecite, conceptb_forecite_score, rouge1, rouge2, rougeL, scorer, relations)
df_annotations['gpt_generation'] = annotations_generations
df_annotations['concept_b'] = conceptbs
df_annotations['is_conceptb_forecite'] = is_conceptb_forecite
df_annotations['conceptb_forecite_score'] = conceptb_forecite_score
df_annotations['relation'] = relations
df_annotations['rouge1'] = rouge1
df_annotations['rouge2'] = rouge2
df_annotations['rougeL'] = rougeL
print(len(df_annotations))
df_annotations.to_csv("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/gpt-generations/top-n-forecite-concepts-gpt-generations/%s/gpt-formatted-statements-all-s2orc-%s-%s.csv" % (version, concept, relation))
#----------------------------------------------------------
best_isa_sentences = ["although word2vec has successfully been used to learn word embeddings, these kinds of word embeddings capture only co-occurrence relationships between words (levy and goldberg, 2014) .",
"there currently exist many software tools for short read alignment, including soap2 [15] , bwa [14] , and bowtie2",
"this paper presents the spatial hypertext wiki as a collaborative tool for supporting creativity in the re process.",
"cnn architecture consists of several types of layers including convolution, pooling, and fully connected. the network expert has to make multiple choices while designing a cnn such as the number and ordering of layers, the hyperparameters for each type of layer (receptive field size, stride, etc.).",
"nodetrix representations are a popular way to visualize clustered graphs; they represent clusters as adjacency matrices and intercluster edges as curves connecting the matrix boundaries. we study the complexity of constructing nodetrix representations focusing on planarity testing problems, and we show several np-completeness results and some polynomial-time algorithms.",
"unlike traditional word embeddings that represent words with fixed vectors, these embedding models encode both words and their contexts and generate context-specific representations. while contextualized embeddings are useful, we observe that a language model-based embedding model, elmo (peters et al., 2018) , cannot accurately capture the semantic equivalence of contexts."]
best_isa_formatted_statements = ["word2vec is a word embedding that captures only co-occurrence relationships between words.",
"soap2 is a software tool for short read alignment.",
"Spatial hypertext wiki is a collaborative tool that is used for supporting creativity in the re process.",
"stride is a hyperparameter for layers in a cnn.",
"nodetrix representations are a way to visualize clustered graphs that represent clusters as adjacency matrices and intercluster edges as curves connecting the matrix boundaries. ",
"elmo is a language model-based embedding model that cannot accurately capture the semantic equivalence of contexts."]
#----------------------------------------------------------
# best_compare_sentences = ["Recently, many NLP tasks have shown great improvements thanks to pre-trained language models. Models such as ELMo [10] , BERT [3] and GPT [11] include a large language model or contextual embedder with hundreds of millions of parameters trained on large datasets and were shown to produce state-of-the-art models for many NLP tasks, including low-resource scenarios where very little annotated data is available.",
# "OrthoDisease (9) and PhenomicDB (8, 26) are two other resources that allow researchers to look simultaneously at all available phenotypes for an orthologous gene group. The PhenomicDB and OrthoDisease are useful resources integrating the phenotypes with the homologous genes from a variety of species.",
# "Highway Networks [7] and Residual Networks [1] are the latest methods to tackle this problem. These network architectures introduce skip connections, which allow flow of information to later layers, empowering deeper networks with better accuracy.",
# "MCG is affected less by conductivity variations in the body (lungs, muscles, and skin) than ECG. In addition, because MCG is a fully non-contact method, therefore problems in the skinelectrode contact as encountered in ECG are avoided (Kanzaki et al., 2003; Tsukada et al., 1999; Tavarozzi et al., 2002) .",
# "In contrast to Pwrake and GXP Make, Snakemake does not rely on any password-less SSH setup or custom server processes running on the cluster nodes. Finally, Snakemake is the first system to support file name inference with multiple named wildcards in rules.",
# "the multidimensional frequencies of a single component is treated as a whole, and the probability density function is projected as independent univariate von mises distribution to perform tractable inference."]
# # alternate compare: "We refer the reader to [20] for a mathematical formulation of multilayer networks, of which multiplex networks are a subset. Unlike a multilayer network, a multiplex network only allows for a single type of inter-layer connections via which any given node is connected only to its counterpart nodes in the other layers."
# best_compare_formatted_statements = ["ELMo is like BERT in that they are both pre-trained language models that include a large language model or contextual embedder with hundreds of millions of parameters trained on large datasets and were shown to produce state-of-the-art models for many nlp tasks, including low-resource scenarios where very little annotated data is available.",
# "PhenomicDB is like OrthoDisease in that they are both resources that allow researchers to look simultaneously at all available phenotypes for an orthologous gene group and are useful resources integrating the phenotypes with the homologous genes from a variety of species.",
# "Residual Networks are like Highway Networks in that they are both network architectures that introduce skip connections, which allow flow of information to later layers, empowering deeper networks with better accuracy.",
# "MCG is like ECG, except MCG is affected less by conductivity variations in the body (lungs, muscles, and skin).",
# "Snakemake is like Pwrake, except Snakemake does not rely on any password-less ssh setup or custom server processes running on the cluster nodes.",
# "reinforcement learning is like motivated learning except that reinforcement learning only has a single value function, relies only on externally set objectives, maximizes its reward (and is therefore unstable), and is always active."]
#----------------------------------------------------------
# REVISION FOR 3CLASS SETTING and v1-4CLASS: SEPARATE COMPARE AND CONTRAST EXAMPLES
#----------------------------------------------------------
best_compare_sentences = ["Recently, many NLP tasks have shown great improvements thanks to pre-trained language models. Models such as ELMo [10] , BERT [3] and GPT [11] include a large language model or contextual embedder with hundreds of millions of parameters trained on large datasets and were shown to produce state-of-the-art models for many NLP tasks, including low-resource scenarios where very little annotated data is available.",
"OrthoDisease (9) and PhenomicDB (8, 26) are two other resources that allow researchers to look simultaneously at all available phenotypes for an orthologous gene group. The PhenomicDB and OrthoDisease are useful resources integrating the phenotypes with the homologous genes from a variety of species.",
"Highway Networks [7] and Residual Networks [1] are the latest methods to tackle this problem. These network architectures introduce skip connections, which allow flow of information to later layers, empowering deeper networks with better accuracy.",
"similarly to ftg+pm, wires [41] supports the specification and execution of model transformation workflows.",
"like the nevanlinna-pick interpolation problem, the covariance extension problem has deep roots in the mathematical literature [2] and have numerous applications in various engineering fields, ranging from systems theory [3] , [4] to control design [5] and signal processing [6] ."]
# alternate compare: "We refer the reader to [20] for a mathematical formulation of multilayer networks, of which multiplex networks are a subset. Unlike a multilayer network, a multiplex network only allows for a single type of inter-layer connections via which any given node is connected only to its counterpart nodes in the other layers."
best_compare_formatted_statements = ["ELMo is like BERT in that they are both pre-trained language models that include a large language model or contextual embedder with hundreds of millions of parameters trained on large datasets and were shown to produce state-of-the-art models for many nlp tasks, including low-resource scenarios where very little annotated data is available.",
"PhenomicDB is like OrthoDisease in that they are both resources that allow researchers to look simultaneously at all available phenotypes for an orthologous gene group and are useful resources integrating the phenotypes with the homologous genes from a variety of species.",
"Residual Networks are like Highway Networks in that they are both network architectures that introduce skip connections, which allow flow of information to later layers, empowering deeper networks with better accuracy.",
"ftg+pm is like wires in that they both support the specification and execution of model transformation workflows.",
"covariance extension problem is like the nevanlinna-pick interpolation problem in that they are both problems that have deep roots in the mathematical literature and have numerous applications in various engineering fields, ranging from systems theory, to control design and signal processing."]
#----------------------------------------------------------
best_contrast_sentences = ["MCG is affected less by conductivity variations in the body (lungs, muscles, and skin) than ECG. In addition, because MCG is a fully non-contact method, therefore problems in the skinelectrode contact as encountered in ECG are avoided (Kanzaki et al., 2003; Tsukada et al., 1999; Tavarozzi et al., 2002) .",
"In contrast to Pwrake and GXP Make, Snakemake does not rely on any password-less SSH setup or custom server processes running on the cluster nodes. Finally, Snakemake is the first system to support file name inference with multiple named wildcards in rules.",
"in comparison to reinforcement learning, a motivated learning (ml) agent has multiple value functions, sets its own objectives, solves the minimax problem, is stable, and acts when needed. in contrast, a reinforcement learning (rl) agent typically only has a single value function, relies only on externally set objectives, maximizes its reward (and is therefore unstable), and is always active.",
"suitable data for the vqg task can come from standard image datasets on which questions have been manually annotated, such as v qg coco, v qg f lickr, v qg bing (mostafazadeh et al., 2016), each consisting of 5000 images with 5 questions per image. alternatively, vqg samples can be derived from visual question answering datasets, such as v qa1.0 (antol et al., 2015), by \"reversing\" them (taking images as inputs and questions as outputs).",
"uchime either uses a database of chimera-free sequences or detects chimeras de novo by exploiting abundance data. uchime has better sensitivity than chimeraslayer (previously the most sensitive database method), especially with short, noisy sequences.",
"partially labeled lda (plda) extends labeled lda to incorporate per-label latent topics (ramage et al., 2011)."]
best_contrast_formatted_statements = ["MCG is like ECG, except MCG is affected less by conductivity variations in the body (lungs, muscles, and skin).",
"Snakemake is like Pwrake, except Snakemake does not rely on any password-less ssh setup or custom server processes running on the cluster nodes.",
"reinforcement learning is like motivated learning except that reinforcement learning only has a single value function, relies only on externally set objectives, maximizes its reward (and is therefore unstable), and is always active.",
"v qg coco is like v qa1.0, except v qg coco is a standard image dataset and v qa1.0 is a visual question answering dataset.",
"uchime is like chimeraslayer except that it has better sensitivity, especially with short, noisy sequences.",
"partially labeled lda (plda) is like labeled lda, except partially labeled lda incorporates per-label latent topics."]
#----------------------------------------------------------
best_usedfor_sentences = ["In [24] , for example, the need to construct the cloaking regions and to receive the responses from the server through other users can considerably degrade the service. Many obfuscation-based techniques are based on k-anonymity, which has been shown inadequate to protect privacy [8] , [25] .",
"Recently, hashing methods have been widely used in ANN search. They usually learn a hamming space which is refined to maintain similarity between features Song et al., 2018c;",
"Perhaps even more promising and exciting, however, is recent work on using Reo for programming multicore applications. When it comes to multicore programming, Reo has a number of advantages over conventional programming languages, which feature a fixed set of low-level synchronization constructs (locks, mutexes, etc.).",
"Supercompilation is a program transformation technique that was first described by V. F. Turchin in the 1970s. In supercompilation, Turchin's relation as a similarity relation on call-stack configurations is used both for call-by-value and call-by-name semantics to terminate unfolding of the program being transformed.",
"Recently, convolutional networks have been used for automatic feature extraction of large image databases, where they have obtained state-of-the-art results. In this work we introduce EEGNet, a compact fully convolutional network for EEG-based BCIs developed using Deep Learning approaches."]
# alternate used-for: "Convolutional neural networks (CNN) in recommendation systems have been used to capture localized item feature representations of music [31] , text [16, 29] and images [40] . Previous methods represent text as bag-of-words representations, CNN overcomes this limitation by learning weight filters to identify the most prominent phrases within the text."
best_usedfor_formatted_statements = ["K-anonymity can be used for many obfuscation-based techniques, which has been shown inadequate to protect privacy.",
"Hashing methods can be used in ANN search to usually learn a hamming space which is refined to maintain similarity between features.",
"Reo can be used for programming multicore applications.",
"Turchin's relation can be used for call-by-value and call-by-name semantics to terminate unfolding of the program being transformed in supercompilation.",
"Convolutional networks can be used for automatic feature extraction of large image databases, where they have obtained state-of-the-art results."]
#----------------------------------------------------------
best_partof_sentences = ["The pose graph is constructed by using spatial human poses (black dots and lines), spatial object poses (red dots and lines), and temporal connections (blue lines). In spatial and temporal domains, the graph is used as the input to GCNs.",
"In fact, cosegmentations promise to be useful in other bioimaging (and eventually image processing) applications beyond cell tracking. One straightforward application where cosegmentation is of high relevance are protein colocalization studies.",
"Sparse coding problems assume the data y can be represented as a sparse linear combination of columns (features) of a matrix H, termed a dictionary. Given the dictionary H, methods such as orthogonal matching pursuit [1] and basis pursuit [2] find the sparse representation.",
"In this work, the graphs we consider have a special structure, in the form of a multiplex network, in the sense that each graph can be decomposed into a sequence of subgraphs, each of which corresponds to a layer of the network, and there exist interconnections linking nodes across different layers. We refer the reader to [20] for a mathematical formulation of multilayer networks, of which multiplex networks are a subset.",
"CNNs perform very well on any visual recognition tasks. The CNN architecture consists of special layers called convolutional layers and pooling layers."]
best_partof_formatted_statements = ["The pose graph is a part of spatial and temporal domains that is constructed by using spatial human poses (black dots and lines), spatial object poses (red dots and lines), and temporal connections (blue lines).",
"Cosegmentation is a part of protein colocalization studies.",
"Sparse linear combinations are a part of sparse coding problems that represents the data y in the columns (features) of a matrix h, termed a dictionary.",
"Multiplex networks are a part of multilayer networks, that can decompose each graph into a sequence of subgraphs, each of which corresponds to a layer of the network, and there exist interconnections linking nodes across diferent layers.",
"Pooling layers are part of CNN architectures."]
#----------------------------------------------------------
#----------------------------------------------------------
version = "v1-4class/nlp-concepts"
for concept in top150_nlp_concepts:
df_concept = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/inputs/top-n-forecite-concept-source-sentences/%s/scibert-multilabel-binary-predictions-all-s2orc-with-forecite-%s.csv" % (version, concept))
df_concept = df_concept.loc[:, ~df_concept.columns.str.contains('^Unnamed')]
if len(df_concept) == 0:
continue
for index, relation in enumerate(relations):
# select the rows that belong to the same relation
df_concept_relation = df_concept[df_concept['%s_pred_score' % relation]>0]
# drop duplicate source text
df_concept_relation = df_concept_relation.drop_duplicates(subset=['original_sentence'])
print(concept, relation, len(df_concept_relation))
# generate for only top N extractions to minimize GPT costs
top_N = 100
if len(df_concept_relation) > top_N:
# sort by max multilabel pred score and take the highest N
df_concept_relation = df_concept_relation.sort_values('%s_pred_score' % relation, ascending=False)
df_concept_relation = df_concept_relation[:top_N]
print(len(df_concept_relation))
# add to running total of number of descriptions
sum_len+=len(df_concept_relation)
if relation == "compare":
best_sentences = best_compare_sentences
best_formatted_statements = best_compare_formatted_statements
elif relation == "contrast":
best_sentences = best_contrast_sentences
best_formatted_statements = best_contrast_formatted_statements
elif relation == "isa":
best_sentences = best_isa_sentences
best_formatted_statements = best_isa_formatted_statements
# elif relation == "part-of":
# best_sentences = best_partof_sentences
# best_formatted_statements = best_partof_formatted_statements
# elif relation == "used-for":
# best_sentences = best_usedfor_sentences
# best_formatted_statements = best_usedfor_formatted_statements
getGPTGenerationsForRelation(relation, concept, df_concept_relation, best_sentences, best_formatted_statements)
#----------------------------------------------------------
# EXTRA CODE (IGNORE)
#----------------------------------------------------------
# # for "compare" class,select things that are marked as positive compare_pred_score but negative contrast_pred_score
# if relation =="compare":
# df_concept_relation = df_concept[df_concept['%s_pred_score' % relation]>0]
# df_concept_relation = df_concept[df_concept['contrast_pred_score']<0]
# else:
# # select rows with a positive prediction score because >0 == >0.5 probability
# df_concept_relation = df_concept[df_concept['%s_pred_score' % relation]>0]
| ACCoRD-main | system/code/generation/get-concept-gpt-formatted-statements.py |
import numpy as np
import pandas as pd
import spacy
import random
from sklearn.model_selection import GroupShuffleSplit
import ast
import re
setting = "union-1sentence-both2sentence"
# load test set
df = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/annotations-round2/%s/union-multilabel-data-withformattedstatements-§-test.csv" % setting)
sources = []
targets = []
sources_compare = []
sources_isa = []
sources_partof = []
sources_usedfor = []
targets_compare = []
targets_isa = []
targets_partof = []
targets_usedfor = []
separator = "<ENT>"
for index, row in df.iterrows():
if pd.notna(row['formatted_statement']) and ('[' not in row['formatted_statement']):
sentence = row['sentence']
formatted_statement = row['formatted_statement']
print(sentence)
matches = re.finditer('\u00a7', sentence)
# get a list containing only the start indices.
matches_positions = [match.start() for match in matches]
# if there are matches
start = matches_positions[0]
end = matches_positions[1]
# get demarcated concept from source sentence
concept = sentence[start+2:end-1]
# find and demarcate concept in target
matches = re.finditer(concept, formatted_statement)
# get a list containing only the start indices.
matches_positions = [match.start() for match in matches]
if len(matches_positions) < 1:
# print(concept)
# print(formatted_statement)
continue
start = matches_positions[0]
demarcated_formatted_statement = formatted_statement[:start] + separator + " " + formatted_statement[start:start+len(concept)] + " " + separator + formatted_statement[start+len(concept):]
# create dataset for all examples
sources.append(sentence.replace('\u00a7', '<ENT>')) # append new entity demarcated sentence to source list
targets.append(demarcated_formatted_statement)
# if 'compare' in row['relation']:
# sources_compare.append(sentence.replace('\u00a7', '<ENT>')) # append new entity demarcated sentence to source list
# targets_compare.append(demarcated_formatted_statement)
# if ('is-a' in row['relation']) or ('type-of' in row['relation']):
# sources_isa.append(sentence.replace('\u00a7', '<ENT>')) # append new entity demarcated sentence to source list
# targets_isa.append(demarcated_formatted_statement)
# if 'part-of' in row['relation']:
# sources_partof.append(sentence.replace('\u00a7', '<ENT>')) # append new entity demarcated sentence to source list
# targets_partof.append(demarcated_formatted_statement)
# if ('used-for' in row['relation']) or ('based-on' in row['relation']):
# sources_usedfor.append(sentence.replace('\u00a7', '<ENT>')) # append new entity demarcated sentence to source list
# targets_usedfor.append(demarcated_formatted_statement)
dataset = 'train'
# create data for all examples
df_output = pd.DataFrame(list(zip(sources, targets)),
columns =['source', 'target'])
df_output.to_json("./%s/%s.json" % (setting, dataset), orient='records', lines=True)
# # create separate datasets for each relation category
# df_output = pd.DataFrame(list(zip(sources_compare, targets_compare)),
# columns =['source', 'target'])
# df_output.to_json("./%s/%s-compare.json" % (setting, dataset), orient='records', lines=True)
# df_output = pd.DataFrame(list(zip(sources_isa, targets_isa)),
# columns =['source', 'target'])
# df_output.to_json("./%s/%s-isa.json" % (setting, dataset), orient='records', lines=True)
# df_output = pd.DataFrame(list(zip(sources_partof, targets_partof)),
# columns =['source', 'target'])
# df_output.to_json("./%s/%s-partof.json" % (setting, dataset), orient='records', lines=True)
# df_output = pd.DataFrame(list(zip(sources_usedfor, targets_usedfor)),
# columns =['source', 'target'])
# df_output.to_json("./%s/%s-usedfor.json" % (setting, dataset), orient='records', lines=True) | ACCoRD-main | system/inputs/format-lightning-transformers-input.py |
import numpy as np
import pandas as pd
import spacy
import random
from sklearn.model_selection import GroupShuffleSplit
import ast
import re
import os
#----------------------------------------------------------
# aggregate all batches' 2-sentence, 1-concept instances
#----------------------------------------------------------
# df_all = pd.DataFrame()
# for i in range(0,100):
# df = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/inputs/concepts-from-both-sentences/all-2sentence-1concept-rows-batch-%d.csv" % i)
# df_all = df_all.append(df)
# print(df_all)
# df_all = df_all.drop_duplicates(subset=['sentence', 'sentence_original_case', 'forecite_concepts'])
# print(df_all)
# df_all.to_csv("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/inputs/concepts-from-both-sentences/all-2sentence-1concept-rows.csv")
#----------------------------------------------------------
# aggregate all demarcated rows
#---------------------------------------------------------
df_demarcated = pd.DataFrame()
directory = "/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/inputs/concepts-from-both-sentences/demarcated-batches/"
for filename in os.listdir(directory):
print(filename)
df = pd.read_csv(directory + filename)
df_demarcated = df_demarcated.append(df)
df_demarcated = df_demarcated.drop_duplicates(subset=['paper_id','original_sentence','sentence','forecite_concept'])
print(df_demarcated)
# remove paper_ids that are in the annotated data
df_annotations = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/annotations-round2/union-1sentence-both2sentence/union-multilabel-data-§.csv")
paper_ids_to_exclude = df_annotations['paper_id']
df_demarcated = df_demarcated[~df_demarcated['paper_id'].isin(paper_ids_to_exclude)]
print(df_demarcated)
df_demarcated.to_csv("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/inputs/concepts-from-both-sentences/all-2sentence-1concept-rows-demarcated.csv")
| ACCoRD-main | system/inputs/aggregate-data-batches.py |
import pandas as pd
from ast import literal_eval
#--------------------------------------------------------
# AGGREGATE SCIBERT MULTILABEL AND BINARY PREDICTIONS
#--------------------------------------------------------
# get df of all positive predictions from scibert
df_binary_positive = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/bart-lightning-transformers/scibert-positive-predictions-all-s2orc-with-concepta.csv")
print(df_binary_positive)
# get df of all rows with forecite concepts
df_binary_positive_with_concepts = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/inputs/concepts-from-both-sentences/all-2sentence-1concept-rows-demarcated.csv")
print(df_binary_positive_with_concepts)
print(df_binary_positive_with_concepts.keys())
# get df of all multilabel predictions from scibert
df_multilabel = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/scibert-multilabel-classification/run-best-model-corrected-with-contrast-class/predictions/scibert-3class-weightedBCE-cls/best-params-all-s2orc/seed=42-epochs=10-lr=0.000050-bs=32-best-params-all-s2orc.csv")
print(df_multilabel)
# select the rows from the multilabel dataset that have the same sentences as the binary label dataset
df_multilabel_positive = df_multilabel.merge(df_binary_positive, on=["sentence"])
print(df_multilabel_positive.keys())
print(df_multilabel_positive)
df_multilabel_positive_with_forecite_concepts = pd.merge(df_binary_positive_with_concepts, df_multilabel_positive, on="sentence", how="outer")
# remove rows where forecite_concept is nan
df_multilabel_positive_with_forecite_concepts = df_multilabel_positive_with_forecite_concepts.dropna()
print(df_multilabel_positive_with_forecite_concepts.keys())
# Select the ones you want
df_multilabel_positive_with_forecite_concepts = df_multilabel_positive_with_forecite_concepts[['paper_id', 'original_sentence','sentence', 'forecite_concept', 'scibert_pred_x','scibert_pred_y','bart_sentences', 'concept_a']]
print(df_multilabel_positive_with_forecite_concepts)
print(df_multilabel_positive_with_forecite_concepts.keys())
print(df_multilabel_positive_with_forecite_concepts['scibert_pred_x'])
print(df_multilabel_positive_with_forecite_concepts['scibert_pred_y'])
df_multilabel_positive_with_forecite_concepts = df_multilabel_positive_with_forecite_concepts.rename(columns={"scibert_pred_x": "multilabel_pred", "scibert_pred_y": "binary_pred"})
# df_multilabel_positive_with_forecite_concepts['multilabel_pred'] = pd.eval(df_multilabel_positive_with_forecite_concepts['multilabel_pred'])
# print(type(df_multilabel_positive_with_forecite_concepts.at[0, 'multilabel_pred']))
df_multilabel_positive_with_forecite_concepts.to_csv("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/inputs/scibert-multilabel-binary-predictions-all-s2orc-with-forecite-concepts-revised-3class.csv") | ACCoRD-main | system/inputs/aggregate-scibert-multilabel-binary-predictions.py |
import math
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
import transformers
import matplotlib.pyplot as plt
from transformers import *
import matplotlib.ticker as mticker
import spacy
import ast
import re
import random
import Levenshtein as lev
import os
import sys
#----------------------------------------------------------
# load data
#----------------------------------------------------------
start = int(sys.argv[1])
end = int(sys.argv[2])
df = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/inputs/concepts-from-both-sentences/all-2sentence-1concept-rows.csv")
df = df[start:end]
# set up sentencizer
nlp = spacy.load("en_core_web_md")
tokenizer = nlp.tokenizer
nlp.add_pipe("sentencizer")
all_stopwords = nlp.Defaults.stop_words
separator = "§"
#----------------------------------------------------------
# demarcate concepts
#----------------------------------------------------------
def processPhrase(phrase):
if len(phrase.text.split()) > 1:
p = phrase.text.split()
if p[0] in all_stopwords: # if first word is a stop word
phrase_no_stopwords = " ".join(p[1:])
elif p[-1] in all_stopwords: # if last word is a stop word
phrase_no_stopwords = " ".join(p[:-1])
else: # if neither is a stop word
phrase_no_stopwords = phrase.text
# if there is only one word in this phrase, just append the text version of it
else: phrase_no_stopwords = phrase.text
return phrase_no_stopwords
def demarcateConcepts(all_phrase_keys, fc_concept, fc_phrase, sentence):
# get starting index of this phrase in the sentence
# get an iterable object containing the start and end indices of each occurrence of pattern in string
matches = re.finditer(re.escape(str(fc_phrase)), sentence)
# list comprehension to get a list containing only the start indices.
matches_positions = [match.start() for match in matches]
if fc_phrase == "":
print("sentence had no match with concept = %s" % fc_concept)
print(all_phrase_keys)
print(sentence)
print("")
txt = "sentence had no concept match"
elif len(matches_positions) == 1:
for m in matches_positions:
start = m
txt = sentence[:start] + separator + " " + sentence[start:start+len(fc_phrase)] + " " + separator + sentence[start+len(fc_phrase):]
df.at[index, 'sentence'] = txt
# if there are multiple occurrences of this forecite concept in the text
elif len(matches_positions) >= 2:
for i in range(len(matches_positions)):
start = matches_positions[i]
if i == 0:
txt = sentence[:start] + separator + " " + sentence[start:start+len(fc_phrase)] + " " + separator + sentence[start+len(fc_phrase):]
sentence = txt # set the modified sentence as the new sentence for the next iteration
if i == 1:
start += 4 # add 4 characters to accomodate separator and space x2 that were added in previous round
txt = sentence[:start] + separator + " " + sentence[start:start+len(fc_phrase)] + " " + separator + sentence[start+len(fc_phrase):]
df.at[index, 'sentence'] = txt
else:
txt = "sentence had no concept match"
return txt
#----------------------------------------------------------
demarcated_sentences = []
lowercase_sentences = []
original_case_sentences = []
concepts = []
paper_ids = []
for index, row in df.iterrows():
if index % 1000 == 0:
print("...processing row %d" % index)
original_case_sentence = row['sentence_original_case']
sentence = row['sentence']
paper_id = row['paper_id']
all_unique_fc_concepts = list(set(list(ast.literal_eval(row['forecite_concepts']))))
for fc_concept in all_unique_fc_concepts:
# get variants of forecite concepts for matching
lemmatized_fc_concept = " ".join([t.lemma_ for t in nlp(fc_concept) if not (t.is_stop)])
no_punc_fc_concept = re.sub(r'[^\w\s]','', fc_concept)
doc = nlp(sentence)
fc_phrase = ""
all_phrase_keys = []
for phrase in doc.noun_chunks:
tokens = [t for t in phrase]
# process phrase like forecite does
phrase_key = " ".join([t.lemma_ for t in tokens if not (t.is_stop)])
all_phrase_keys.append(phrase_key)
if (fc_concept in phrase_key) or (lemmatized_fc_concept in phrase_key) or (no_punc_fc_concept in re.sub(r'[^\w\s]','', phrase_key)):
fc_phrase = processPhrase(phrase)
break
demarcated_sentence = demarcateConcepts(all_phrase_keys, fc_concept, fc_phrase, sentence)
# append to lists for dataframe
demarcated_sentences.append(demarcated_sentence)
lowercase_sentences.append(sentence)
original_case_sentences.append(original_case_sentence)
concepts.append(fc_concept)
paper_ids.append(paper_id)
# save lists as a df
df_output = pd.DataFrame(list(zip(paper_ids, original_case_sentences, lowercase_sentences, demarcated_sentences, concepts)),
columns =['paper_id', 'sentence_original_case', 'sentence_lowercase', 'sentence', 'forecite_concept'])
print(df_output)
ids = df_output['paper_id'].unique()
random.Random(4).shuffle(ids)
df_output = df_output.set_index('paper_id').loc[ids].reset_index()
print(df_output)
df_output.to_csv("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/inputs/concepts-from-both-sentences/all-2sentence-1concept-rows-demarcated-%d-%d.csv" % (start, end))
| ACCoRD-main | system/inputs/demarcate-unlabeled-data.py |
import pickle
import matplotlib.pyplot as plt
import pandas as pd
from collections import Counter
from operator import itemgetter
import numpy as np
import re
# load final df of sentences to select concept-specific data from
df = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/inputs/scibert-multilabel-binary-predictions-all-s2orc-with-forecite-concepts.csv")
print(df.keys())
print(len(set(df['paper_id'].to_list())))
#--------------------------------------------------------
# unpickle concept dictionary
with open('/net/nfs2.s2-research/soniam/concept-rel/resources/forecite/forecite_concept_dict.pickle', 'rb') as handle:
concept_dict = pickle.load(handle)
print("...unpickled forecite concept dictionary")
# Initialize N
N = 50
# N largest values in dictionary
# Using sorted() + itemgetter() + items()
res = dict(sorted(concept_dict.items(), key = itemgetter(1), reverse = True)[:N])
# printing result
print("The top N value pairs are " + str(res))
concept_list = list(res.keys())
# print(concept_list)
#--------------------------------------------------------
selected_nlp_concepts = ['adversarial training', 'beam search', 'bert', 'elmo', 'gpt', 'glove', 'word2vec', 'resnet', 'domain shift', 'ulmfit', 'newsqa', 'squad', 'random forest', 'imagenet', 'lstm', 'roberta', 'variational autoencoder', 'dropout', 'fasttext', 'hierarchical softmax', 'distant supervision']
#--------------------------------------------------------
df_nlp_concepts = pd.read_csv("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/gpt-generations/top-n-forecite-concepts-gpt-generations/nlp_concepts.csv")
nlp_concept_scores = []
for index, row in df_nlp_concepts.iterrows():
# get forecite score for each nlp concept
nlp_concept_scores.append(concept_dict[row['concept']])
df_nlp_concepts['score'] = nlp_concept_scores
df_nlp_concepts = df_nlp_concepts[~df_nlp_concepts.concept.isin(['bert model', 'pre - train bert model', 'pre - train bert', 'moses', 'cho et al', 'dagan', 'yarowsky', 'hochreiter', 'turney', 'ney', 'och', 'grosz', 'steedman', 'well translation'])]
# # top 150 nlp concepts by score
# top150_nlp_concepts = df_nlp_concepts.sort_values(by='score', ascending=False)[:150]['concept'].tolist()
# top 150 nlp concepts by num_papers
top150_nlp_concepts = df_nlp_concepts.sort_values(by='num_papers', ascending=False)[:150]['concept'].tolist()
print(top150_nlp_concepts)
#--------------------------------------------------------
num_sents = 0
# get predictions for all rows for a concept
for concept in top150_nlp_concepts:
df_concept = df[df['forecite_concept']==concept]
print(concept, len(df_concept))
num_sents += len(df_concept)
if len(df_concept) == 0:
continue
# process multilabel pred scores
# indices for old 4class correspond to ['compare' 'is-a' 'part-of' 'used-for']
# indices for new 3class correspond to ['compare' 'contrast' 'isa']
all_preds = []
for index, row in df_concept.iterrows():
temp = row['multilabel_pred'].strip("[]").split()
preds = []
for t in temp:
preds.append(float(t))
all_preds.append(preds)
all_preds = np.array(all_preds)
# convert multilabel pred scores to a label by selecting the max pred score
result = np.argmax(all_preds, axis=1)
max = np.max(all_preds, axis=1)
df_concept['index_of_max_multilabel_pred'] = result
df_concept['max_multilabel_pred_score'] = max
# # convert multilabel pred scores to a label by taking any score above 0 (equals >0.5 probability)
# bool_ind = all_preds > 0
# print(len(bool_ind[:,1]))
# print(np.any(bool_ind[:,1]))
df_concept['compare_pred_score'] = all_preds[:,0]
df_concept['isa_pred_score'] = all_preds[:,1]
df_concept['partof_pred_score'] = all_preds[:,2]
df_concept['usedfor_pred_score'] = all_preds[:,3]
# try to find "contrast" label examples within "compare" predictions using heuristics
contrast_words = ['unlike', 'alternative to', 'alternatively', 'conversely', 'than', 'contrast', 'compared to', 'in comparison', 'instead', 'whereas', 'while', 'except', 'previous', 'different from', 'different to', 'differs', 'extends', 'extension']
possible_contrast = []
for index, row in df_concept.iterrows():
value = -1
if (row['compare_pred_score'] > 0):
for word in contrast_words:
if re.search(r'\b' + word + r'\b', row['original_sentence']):
value = row['compare_pred_score']
possible_contrast.append(value)
if value>0:
print(row['compare_pred_score'], row['original_sentence'])
df_concept['contrast_pred_score'] = possible_contrast
df_concept.to_csv("/net/nfs2.s2-research/soniam/concept-rel/abstractive-summarization/inputs/top-n-forecite-concept-source-sentences/v1-4class/nlp-concepts/scibert-multilabel-binary-predictions-all-s2orc-with-forecite-%s.csv" % concept)
print(num_sents) | ACCoRD-main | system/inputs/get-top-n-forecite-concept-source-sentences.py |
import logging
import random
import sys
import pytest
from beaker import Beaker
logger = logging.getLogger(__name__)
def _get_unique_name() -> str:
from gantry.util import unique_name
return unique_name()
@pytest.fixture
def run_name() -> str:
return _get_unique_name()
@pytest.fixture
def workspace_name() -> str:
return "ai2/gantry-testing"
@pytest.fixture
def public_workspace_name() -> str:
return "ai2/gantry-testing-public"
@pytest.fixture()
def beaker(workspace_name):
beaker_client = Beaker.from_env(default_workspace=workspace_name, default_org="ai2")
return beaker_client
def _get_free_cluster(beaker: Beaker) -> str:
choices = [
"ai2/general-cirrascale",
"ai2/allennlp-cirrascale",
"ai2/aristo-cirrascale",
"ai2/mosaic-cirrascale",
"ai2/s2-cirrascale",
]
random.shuffle(choices)
for cluster in choices:
utilization = beaker.cluster.utilization(cluster)
if utilization.queued_jobs == 0:
logger.info("Found suitable on-prem cluster '%s'", cluster)
return cluster
return "ai2/general-cirrascale"
@pytest.fixture()
def beaker_cluster_name(beaker: Beaker) -> str:
return _get_free_cluster(beaker)
if __name__ == "__main__":
beaker_client = Beaker.from_env()
assert len(sys.argv) == 2
fixture = sys.argv[-1]
if fixture == "run_name":
print(_get_unique_name())
elif fixture == "cluster":
print(_get_free_cluster(beaker_client))
else:
raise ValueError(f"Bad fixture argument '{fixture}'")
| beaker-gantry-main | conftest.py |
beaker-gantry-main | tests/__init__.py |
|
import pytest
from gantry.exceptions import InvalidRemoteError
from gantry.util import parse_git_remote_url
def test_parse_git_remote_url_ssh():
assert parse_git_remote_url("[email protected]:allenai/beaker-gantry.git") == (
"allenai",
"beaker-gantry",
)
def test_parse_git_remote_url_https():
assert parse_git_remote_url("https://github.com/allenai/beaker-gantry.git") == (
"allenai",
"beaker-gantry",
)
def test_invalid_git_remote_url():
with pytest.raises(InvalidRemoteError):
parse_git_remote_url("[email protected]/allenai/beaker-gantry.git")
| beaker-gantry-main | tests/util_test.py |
import subprocess
from gantry.version import VERSION
def test_help():
result = subprocess.run(["gantry", "--help"])
assert result.returncode == 0
def test_version():
result = subprocess.run(["gantry", "--version"], capture_output=True, text=True)
assert result.returncode == 0
assert VERSION in result.stdout
def test_dry_run(workspace_name: str, beaker_cluster_name: str, run_name: str):
result = subprocess.run(
[
"gantry",
"run",
"--dry-run",
"--allow-dirty",
"--name",
run_name,
"--workspace",
workspace_name,
"--cluster",
beaker_cluster_name,
"--yes",
"--",
"python",
"-c",
"print('Hello, World!')",
],
capture_output=True,
text=True,
)
assert result.returncode == 0
| beaker-gantry-main | tests/main_test.py |
import json
from gantry import METRICS_FILE
def main():
with open(METRICS_FILE, "w") as f:
json.dump({"loss": 0.1, "accuracy": 0.95}, f)
print(f"\N{check mark} Done! Metrics written to {METRICS_FILE}")
if __name__ == "__main__":
main()
| beaker-gantry-main | examples/metrics/run.py |
from datetime import datetime
from pathlib import Path
from gantry.version import VERSION
def main():
changelog = Path("CHANGELOG.md")
with changelog.open() as f:
lines = f.readlines()
insert_index: int = -1
for i in range(len(lines)):
line = lines[i]
if line.startswith("## Unreleased"):
insert_index = i + 1
elif line.startswith(f"## [v{VERSION}]"):
print("CHANGELOG already up-to-date")
return
elif line.startswith("## [v"):
break
if insert_index < 0:
raise RuntimeError("Couldn't find 'Unreleased' section")
lines.insert(insert_index, "\n")
lines.insert(
insert_index + 1,
f"## [v{VERSION}](https://github.com/allenai/beaker-gantry/releases/tag/v{VERSION}) - "
f"{datetime.now().strftime('%Y-%m-%d')}\n",
)
with changelog.open("w") as f:
f.writelines(lines)
if __name__ == "__main__":
main()
| beaker-gantry-main | scripts/prepare_changelog.py |
# encoding: utf-8
"""
Prepares markdown release notes for GitHub releases.
"""
import os
from typing import List, Optional
import packaging.version
TAG = os.environ["TAG"]
ADDED_HEADER = "### Added 🎉"
CHANGED_HEADER = "### Changed ⚠️"
FIXED_HEADER = "### Fixed ✅"
REMOVED_HEADER = "### Removed 👋"
def get_change_log_notes() -> str:
in_current_section = False
current_section_notes: List[str] = []
with open("CHANGELOG.md") as changelog:
for line in changelog:
if line.startswith("## "):
if line.startswith("## Unreleased"):
continue
if line.startswith(f"## [{TAG}]"):
in_current_section = True
continue
break
if in_current_section:
if line.startswith("### Added"):
line = ADDED_HEADER + "\n"
elif line.startswith("### Changed"):
line = CHANGED_HEADER + "\n"
elif line.startswith("### Fixed"):
line = FIXED_HEADER + "\n"
elif line.startswith("### Removed"):
line = REMOVED_HEADER + "\n"
current_section_notes.append(line)
assert current_section_notes
return "## What's new\n\n" + "".join(current_section_notes).strip() + "\n"
def get_commit_history() -> str:
new_version = packaging.version.parse(TAG)
# Get all tags sorted by version, latest first.
all_tags = os.popen("git tag -l --sort=-version:refname 'v*'").read().split("\n")
# Out of `all_tags`, find the latest previous version so that we can collect all
# commits between that version and the new version we're about to publish.
# Note that we ignore pre-releases unless the new version is also a pre-release.
last_tag: Optional[str] = None
for tag in all_tags:
if not tag.strip(): # could be blank line
continue
version = packaging.version.parse(tag)
if new_version.pre is None and version.pre is not None:
continue
if version < new_version:
last_tag = tag
break
if last_tag is not None:
commits = os.popen(f"git log {last_tag}..{TAG} --oneline --first-parent").read()
else:
commits = os.popen("git log --oneline --first-parent").read()
return "## Commits\n\n" + commits
def main():
print(get_change_log_notes())
print(get_commit_history())
if __name__ == "__main__":
main()
| beaker-gantry-main | scripts/release_notes.py |
_MAJOR = "0"
_MINOR = "19"
_PATCH = "0"
_SUFFIX = ""
VERSION_SHORT = "{0}.{1}".format(_MAJOR, _MINOR)
VERSION = "{0}.{1}.{2}{3}".format(_MAJOR, _MINOR, _PATCH, _SUFFIX)
| beaker-gantry-main | gantry/version.py |
import platform
import tempfile
import time
from datetime import datetime
from pathlib import Path
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union, cast
import requests
import rich
from beaker import (
Beaker,
Dataset,
DatasetConflict,
DatasetNotFound,
Digest,
Experiment,
ExperimentSpec,
Job,
JobTimeoutError,
Priority,
SecretNotFound,
TaskResources,
TaskSpec,
WorkspaceNotSet,
)
from rich import print, prompt
from rich.console import Console
from . import constants
from .aliases import PathOrStr
from .constants import GITHUB_TOKEN_SECRET
from .exceptions import *
from .version import VERSION
if TYPE_CHECKING:
from datetime import timedelta
def unique_name() -> str:
import uuid
import petname
return cast(str, petname.generate()) + "-" + str(uuid.uuid4())[:7]
def stderr_console() -> Console:
return Console(stderr=True)
def print_stderr(*args, **kwargs):
stderr_console().print(*args, **kwargs)
def print_exception(*args, **kwargs):
stderr_console().print_exception(*args, **kwargs)
def parse_git_remote_url(url: str) -> Tuple[str, str]:
"""
Parse a git remote URL into a GitHub (account, repo) pair.
:raises InvalidRemoteError: If the URL can't be parsed correctly.
"""
try:
account, repo = (
url.split("https://github.com/")[-1]
.split("[email protected]:")[-1]
.split(".git")[0]
.split("/")
)
except ValueError:
raise InvalidRemoteError(f"Failed to parse GitHub repo path from remote '{url}'")
return account, repo
def display_logs(logs: Iterable[bytes], ignore_timestamp: Optional[str] = None) -> Optional[str]:
console = rich.get_console()
latest_timestamp: Optional[str] = None
def print_line(line: str):
if not line:
return
nonlocal latest_timestamp
# Remove timestamp
try:
timestamp, line = line.split("Z ", maxsplit=1)
latest_timestamp = f"{timestamp}Z"
if ignore_timestamp is not None and latest_timestamp == ignore_timestamp:
return
except ValueError:
pass
console.print(line, highlight=False, markup=False)
line_buffer = ""
for bytes_chunk in logs:
chunk = line_buffer + bytes_chunk.decode(errors="ignore")
chunk = chunk.replace("\r", "\n")
lines = chunk.split("\n")
if chunk.endswith("\n"):
line_buffer = ""
else:
# Last line chunk is probably incomplete.
lines, line_buffer = lines[:-1], lines[-1]
for line in lines:
print_line(line)
print_line(line_buffer)
return latest_timestamp
def follow_experiment(beaker: Beaker, experiment: Experiment, timeout: int = 0) -> Job:
start = time.monotonic()
# Wait for job to start...
job: Optional[Job] = beaker.experiment.tasks(experiment.id)[0].latest_job # type: ignore
if job is None:
print("Waiting for job to launch..", end="")
while job is None:
time.sleep(1.0)
print(".", end="")
job = beaker.experiment.tasks(experiment.id)[0].latest_job # type: ignore
exit_code: Optional[int] = job.status.exit_code
stream_logs = exit_code is None
if stream_logs:
print()
rich.get_console().rule("Logs")
last_timestamp: Optional[str] = None
since: Optional[Union[str, datetime]] = datetime.utcnow()
while stream_logs and exit_code is None:
job = beaker.experiment.tasks(experiment.id)[0].latest_job # type: ignore
assert job is not None
exit_code = job.status.exit_code
last_timestamp = display_logs(
beaker.job.logs(job, quiet=True, since=since),
ignore_timestamp=last_timestamp,
)
since = last_timestamp or since
time.sleep(2.0)
if timeout > 0 and time.monotonic() - start >= timeout:
raise JobTimeoutError(f"Job did not finish within {timeout} seconds")
if stream_logs:
rich.get_console().rule("End logs")
print()
return job
def display_results(beaker: Beaker, experiment: Experiment, job: Job):
exit_code = job.status.exit_code
assert exit_code is not None
if exit_code > 0:
raise ExperimentFailedError(f"Experiment exited with non-zero code ({exit_code})")
assert job.execution is not None
assert job.status.started is not None
assert job.status.exited is not None
result_dataset = None
if job.result is not None and job.result.beaker is not None:
result_dataset = job.result.beaker
print(
f"[b green]\N{check mark}[/] [b cyan]{experiment.name}[/] completed successfully\n"
f"[b]Experiment:[/] {beaker.experiment.url(experiment)}\n"
f"[b]Runtime:[/] {format_timedelta(job.status.exited - job.status.started)}\n"
f"[b]Results:[/] {None if result_dataset is None else beaker.dataset.url(result_dataset)}"
)
metrics = beaker.experiment.metrics(experiment)
if metrics is not None:
print("[b]Metrics:[/]", metrics)
def ensure_repo(allow_dirty: bool = False) -> Tuple[str, str, str, bool]:
from git.repo import Repo
repo = Repo(".")
if repo.is_dirty() and not allow_dirty:
raise DirtyRepoError("You have uncommitted changes! Use --allow-dirty to force.")
git_ref = str(repo.commit())
account, repo = parse_git_remote_url(repo.remote().url)
response = requests.get(f"https://github.com/{account}/{repo}")
if response.status_code not in {200, 404}:
response.raise_for_status()
is_public = response.status_code == 200
return account, repo, git_ref, is_public
def ensure_entrypoint_dataset(beaker: Beaker) -> Dataset:
import hashlib
from importlib.resources import read_binary
import gantry
workspace_id = beaker.workspace.get().id
# Get hash of the local entrypoint source file.
sha256_hash = hashlib.sha256()
contents = replace_tags(read_binary(gantry, constants.ENTRYPOINT))
sha256_hash.update(contents)
entrypoint_dataset_name = f"gantry-v{VERSION}-{workspace_id}-{sha256_hash.hexdigest()[:6]}"
# Ensure gantry entrypoint dataset exists.
gantry_entrypoint_dataset: Dataset
try:
gantry_entrypoint_dataset = beaker.dataset.get(entrypoint_dataset_name)
except DatasetNotFound:
# Create it.
print(f"Creating entrypoint dataset '{entrypoint_dataset_name}'")
try:
with tempfile.TemporaryDirectory() as tmpdirname:
tmpdir = Path(tmpdirname)
entrypoint_path = tmpdir / constants.ENTRYPOINT
with open(entrypoint_path, "wb") as entrypoint_file:
entrypoint_file.write(contents)
gantry_entrypoint_dataset = beaker.dataset.create(
entrypoint_dataset_name, entrypoint_path
)
except DatasetConflict: # could be in a race with another `gantry` process.
time.sleep(1.0)
gantry_entrypoint_dataset = beaker.dataset.get(entrypoint_dataset_name)
# Verify contents.
err_msg = (
f"Checksum failed for entrypoint dataset {beaker.dataset.url(gantry_entrypoint_dataset)}\n"
f"This could be a bug, or it could mean someone has tampered with the dataset.\n"
f"If you're sure no one has tampered with it, you can delete the dataset from "
f"the Beaker dashboard and try again."
)
ds_files = list(beaker.dataset.ls(gantry_entrypoint_dataset))
if len(ds_files) != 1:
raise EntrypointChecksumError(err_msg)
if ds_files[0].digest != Digest.from_decoded(sha256_hash.digest(), "SHA256"):
raise EntrypointChecksumError(err_msg)
return gantry_entrypoint_dataset
def ensure_github_token_secret(
beaker: Beaker, secret_name: str = constants.GITHUB_TOKEN_SECRET
) -> str:
try:
beaker.secret.get(secret_name)
except SecretNotFound:
raise GitHubTokenSecretNotFound(
f"GitHub token secret '{secret_name}' not found in Beaker workspace!\n"
f"You can create a suitable GitHub token by going to https://github.com/settings/tokens/new "
f"and generating a token with '\N{ballot box with check} repo' scope.\n"
f"Then upload your token as a Beaker secret using the Beaker CLI or Python client."
)
return secret_name
def format_timedelta(td: "timedelta") -> str:
def format_value_and_unit(value: int, unit: str) -> str:
if value == 1:
return f"{value} {unit}"
else:
return f"{value} {unit}s"
parts = []
seconds = int(td.total_seconds())
days, seconds = divmod(seconds, 86400)
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
if days:
parts.append(format_value_and_unit(days, "day"))
if hours:
parts.append(format_value_and_unit(hours, "hour"))
if minutes:
parts.append(format_value_and_unit(minutes, "minute"))
if seconds:
parts.append(format_value_and_unit(seconds, "second"))
return ", ".join(parts)
def ensure_datasets(beaker: Beaker, *datasets: str) -> List[Tuple[str, Optional[str], str]]:
out = []
for dataset_str in datasets:
dataset_name: str
path: str
sub_path: Optional[str] = None
if dataset_str.count(":") == 1:
dataset_name, path = dataset_str.split(":")
elif dataset_str.count(":") == 2:
dataset_name, sub_path, path = dataset_str.split(":")
else:
raise ValueError(
f"Bad '--dataset' specification: '{dataset_str}'\n"
f"Datasets should be in the form of 'dataset-name:/mount/location'"
f"or 'dataset-name:sub/path:/mount/location'"
)
dataset_id = beaker.dataset.get(dataset_name).id
out.append((dataset_id, sub_path, path))
return out
def build_experiment_spec(
task_name: str,
clusters: List[str],
task_resources: TaskResources,
arguments: List[str],
entrypoint_dataset: str,
github_account: str,
github_repo: str,
git_ref: str,
description: Optional[str] = None,
beaker_image: Optional[str] = None,
docker_image: Optional[str] = None,
gh_token_secret: Optional[str] = constants.GITHUB_TOKEN_SECRET,
conda: Optional[PathOrStr] = None,
pip: Optional[PathOrStr] = None,
venv: Optional[str] = None,
nfs: Optional[bool] = None,
datasets: Optional[List[Tuple[str, Optional[str], str]]] = None,
env: Optional[List[Tuple[str, str]]] = None,
env_secrets: Optional[List[Tuple[str, str]]] = None,
priority: Optional[Union[str, Priority]] = None,
install: Optional[str] = None,
replicas: Optional[int] = None,
leader_selection: bool = False,
host_networking: bool = False,
mounts: Optional[List[Tuple[str, str]]] = None,
hostnames: Optional[List[str]] = None,
):
task_spec = (
TaskSpec.new(
task_name,
beaker_image=beaker_image,
docker_image=docker_image,
result_path=constants.RESULTS_DIR,
command=["bash", "/gantry/entrypoint.sh"],
arguments=arguments,
resources=task_resources,
priority=priority,
replicas=replicas,
leader_selection=leader_selection,
host_networking=host_networking,
)
.with_env_var(name="GANTRY_VERSION", value=VERSION)
.with_env_var(name="GITHUB_REPO", value=f"{github_account}/{github_repo}")
.with_env_var(name="GIT_REF", value=git_ref)
.with_dataset("/gantry", beaker=entrypoint_dataset)
)
if clusters:
task_spec = task_spec.with_constraint(cluster=clusters)
if hostnames:
task_spec = task_spec.with_constraint(hostname=hostnames)
if gh_token_secret is not None:
task_spec = task_spec.with_env_var(name="GITHUB_TOKEN", secret=gh_token_secret)
for name, val in env or []:
task_spec = task_spec.with_env_var(name=name, value=val)
for name, secret in env_secrets or []:
task_spec = task_spec.with_env_var(name=name, secret=secret)
if conda is not None:
task_spec = task_spec.with_env_var(
name="CONDA_ENV_FILE",
value=str(conda),
)
elif Path(constants.CONDA_ENV_FILE).is_file():
task_spec = task_spec.with_env_var(
name="CONDA_ENV_FILE",
value=constants.CONDA_ENV_FILE,
)
elif Path(constants.CONDA_ENV_FILE_ALTERNATE).is_file():
task_spec = task_spec.with_env_var(
name="CONDA_ENV_FILE",
value=constants.CONDA_ENV_FILE_ALTERNATE,
)
else:
task_spec = task_spec.with_env_var(
name="PYTHON_VERSION", value=".".join(platform.python_version_tuple()[:-1])
)
if pip is not None:
task_spec = task_spec.with_env_var(
name="PIP_REQUIREMENTS_FILE",
value=str(pip),
)
if venv is not None:
task_spec = task_spec.with_env_var(
name="VENV_NAME",
value=venv,
)
if install is not None:
task_spec = task_spec.with_env_var(name="INSTALL_CMD", value=install)
if nfs is None and clusters and all(["cirrascale" in cluster for cluster in clusters]):
nfs = True
if nfs:
task_spec = task_spec.with_dataset(constants.NFS_MOUNT, host_path=constants.NFS_MOUNT)
if datasets:
for dataset_id, sub_path, path in datasets:
task_spec = task_spec.with_dataset(path, beaker=dataset_id, sub_path=sub_path)
if mounts:
for source, target in mounts:
task_spec = task_spec.with_dataset(target, host_path=source)
return ExperimentSpec(description=description, tasks=[task_spec])
def check_for_upgrades():
import packaging.version
import requests
try:
response = requests.get(
"https://api.github.com/repos/allenai/beaker-gantry/releases/latest", timeout=1
)
if response.ok:
latest_version = packaging.version.parse(response.json()["tag_name"])
if latest_version > packaging.version.parse(VERSION):
print_stderr(
f":warning: [yellow]You're using [b]gantry v{VERSION}[/], "
f"but a newer version ([b]v{latest_version}[/]) is available: "
f"https://github.com/allenai/beaker-gantry/releases/tag/v{latest_version}[/]\n"
f"[yellow i]You can upgrade by running:[/] pip install --upgrade beaker-gantry\n",
)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError):
pass
def ensure_workspace(
workspace: Optional[str] = None,
yes: bool = False,
gh_token_secret: str = GITHUB_TOKEN_SECRET,
public_repo: bool = False,
) -> Beaker:
beaker = (
Beaker.from_env(session=True)
if workspace is None
else Beaker.from_env(session=True, default_workspace=workspace)
)
try:
permissions = beaker.workspace.get_permissions()
if (
not public_repo
and permissions.authorizations is not None
and len(permissions.authorizations) > 1
):
print_stderr(
f"[yellow]Your workspace [b]{beaker.workspace.url()}[/] has multiple contributors! "
f"Every contributor can view your GitHub personal access token secret ('{gh_token_secret}').[/]"
)
if not yes and not prompt.Confirm.ask(
"[yellow][i]Are you sure you want to use this workspace?[/][/]"
):
raise KeyboardInterrupt
elif workspace is None:
default_workspace = beaker.workspace.get()
if not yes and not prompt.Confirm.ask(
f"Using default workspace [b cyan]{default_workspace.full_name}[/]. [i]Is that correct?[/]"
):
raise KeyboardInterrupt
except WorkspaceNotSet:
raise ConfigurationError(
"'--workspace' option is required since you don't have a default workspace set"
)
return beaker
def replace_tags(contents: bytes) -> bytes:
tag_start = contents.find(b"${{")
while tag_start != -1:
tag_end = contents.find(b"}}") + 2
tag = contents[tag_start:tag_end]
constant_name = tag.split(b" ")[1].decode()
contents = contents.replace(tag, getattr(constants, constant_name).encode()) # type: ignore
tag_start = contents.find(b"${{", tag_end)
assert b"${{" not in contents
return contents
| beaker-gantry-main | gantry/util.py |
DEFAULT_IMAGE = "ai2/conda"
ENTRYPOINT = "entrypoint.sh"
GITHUB_TOKEN_SECRET = "GITHUB_TOKEN"
CONDA_ENV_FILE = "environment.yml"
CONDA_ENV_FILE_ALTERNATE = "environment.yaml"
PIP_REQUIREMENTS_FILE = "requirements.txt"
RUNTIME_DIR = "/gantry-runtime"
RESULTS_DIR = "/results"
METRICS_FILE = f"{RESULTS_DIR}/metrics.json"
NFS_MOUNT = "/net/nfs.cirrascale"
| beaker-gantry-main | gantry/constants.py |
from .constants import METRICS_FILE, RESULTS_DIR
__all__ = ["METRICS_FILE", "RESULTS_DIR"]
| beaker-gantry-main | gantry/__init__.py |
from os import PathLike
from typing import Union
PathOrStr = Union[PathLike, str]
| beaker-gantry-main | gantry/aliases.py |
class GantryError(Exception):
"""
Base exception for all error types that Gantry might raise.
"""
class GitError(GantryError):
pass
class DirtyRepoError(GitError):
pass
class InvalidRemoteError(GitError):
pass
class ConfigurationError(GantryError):
pass
class ExperimentFailedError(GantryError):
pass
class EntrypointChecksumError(GantryError):
pass
class GitHubTokenSecretNotFound(GantryError):
pass
class TermInterrupt(GantryError):
pass
| beaker-gantry-main | gantry/exceptions.py |
import os
import signal
import sys
from fnmatch import fnmatch
from pathlib import Path
from typing import Optional, Tuple
import click
import rich
from beaker import (
Beaker,
ImageNotFound,
Job,
JobTimeoutError,
Priority,
SecretNotFound,
TaskResources,
)
from click_help_colors import HelpColorsCommand, HelpColorsGroup
from rich import pretty, print, prompt, traceback
from . import constants, util
from .aliases import PathOrStr
from .exceptions import *
from .util import print_stderr
from .version import VERSION
_CLICK_GROUP_DEFAULTS = {
"cls": HelpColorsGroup,
"help_options_color": "green",
"help_headers_color": "yellow",
"context_settings": {"max_content_width": 115},
}
_CLICK_COMMAND_DEFAULTS = {
"cls": HelpColorsCommand,
"help_options_color": "green",
"help_headers_color": "yellow",
"context_settings": {"max_content_width": 115},
}
def excepthook(exctype, value, tb):
"""
Used to patch `sys.excepthook` in order to customize handling of uncaught exceptions.
"""
# Ignore `GantryError` because we don't need a traceback for those.
if issubclass(exctype, (GantryError,)):
print_stderr(f"[red][bold]{exctype.__name__}:[/] [i]{value}[/][/]")
# For interruptions, call the original exception handler.
elif issubclass(exctype, (KeyboardInterrupt, TermInterrupt)):
sys.__excepthook__(exctype, value, tb)
else:
print_stderr(traceback.Traceback.from_exception(exctype, value, tb, suppress=[click]))
sys.excepthook = excepthook
def handle_sigterm(sig, frame):
del sig, frame
raise TermInterrupt
@click.group(**_CLICK_GROUP_DEFAULTS) # type: ignore
@click.version_option(version=VERSION)
def main():
# Configure rich.
if os.environ.get("GANTRY_GITHUB_TESTING"):
# Force a broader terminal when running tests in GitHub Actions.
console_width = 180
rich.reconfigure(width=console_width, force_terminal=True, force_interactive=False)
pretty.install()
else:
pretty.install()
# Handle SIGTERM just like KeyboardInterrupt
signal.signal(signal.SIGTERM, handle_sigterm)
rich.get_console().print(
r'''
[cyan b] o=======[] [/]
[cyan b] __ _ _ _ |_ [] [/]
[cyan b] / _` | __ _ _ _ | |_ _ _ | || | [] [/]
[cyan b] \__, | / _` | | ' \ | _| | '_| \_, | _/ ]_ [/]
[cyan b] |___/ \__,_| |_||_| _\__| _|_|_ _|__/ |_____| [/]
[blue b]_|"""""|_|"""""|_|"""""|_|"""""|_|"""""|_| """"| [/]
[blue b] `---------------------------------------------' [/]
''', # noqa: W605
highlight=False,
)
util.check_for_upgrades()
@main.command(**_CLICK_COMMAND_DEFAULTS) # type: ignore
@click.argument("experiment", nargs=1, required=True, type=str)
def follow(experiment: str):
"""
Follow the logs for a running experiment.
"""
beaker = Beaker.from_env(session=True)
beaker.experiment.follow
exp = beaker.experiment.get(experiment)
job = util.follow_experiment(beaker, exp)
util.display_results(beaker, exp, job)
@main.command(**_CLICK_COMMAND_DEFAULTS) # type: ignore
@click.argument("arg", nargs=-1)
@click.option(
"-n",
"--name",
type=str,
help="""A name to assign to the experiment on Beaker. Defaults to a randomly generated name.""",
)
@click.option(
"-t",
"--task-name",
type=str,
help="""A name to assign to the task on Beaker.""",
default="main",
show_default=True,
)
@click.option("-d", "--description", type=str, help="""A description for the experiment.""")
@click.option(
"-w",
"--workspace",
type=str,
help="""The Beaker workspace to use.
If not specified, your default workspace will be used.""",
)
@click.option(
"-c",
"--cluster",
type=str,
multiple=True,
default=None,
help="""A potential cluster to use. This option can be used multiple times to allow multiple clusters.
You also specify it as a wildcard, e.g. '--cluster ai2/*-cirrascale'.
If you don't specify a cluster or the priority, the priority will default to 'preemptible' and
the job will be able to run on any on-premise cluster.""",
show_default=True,
)
@click.option(
"--hostname",
type=str,
multiple=True,
default=None,
help="""Hostname constraints to apply to the experiment spec. This option can be used multiple times to alllow
multiple hosts.""",
show_default=True,
)
@click.option(
"--beaker-image",
type=str,
default=constants.DEFAULT_IMAGE,
help="""The name or ID of an image on Beaker to use for your experiment.
Mutually exclusive with --docker-image.""",
show_default=True,
)
@click.option(
"--docker-image",
type=str,
help="""The name of a public Docker image to use for your experiment.
Mutually exclusive with --beaker-image.""",
)
@click.option(
"--cpus",
type=float,
help="""Minimum number of logical CPU cores (e.g. 4.0, 0.5).""",
)
@click.option(
"--gpus",
type=int,
help="""Minimum number of GPUs (e.g. 1).""",
)
@click.option(
"--memory",
type=str,
help="""Minimum available system memory as a number with unit suffix (e.g. 2.5GiB).""",
)
@click.option(
"--shared-memory",
type=str,
help="""Size of /dev/shm as a number with unit suffix (e.g. 2.5GiB).""",
)
@click.option(
"--dataset",
type=str,
multiple=True,
help="""An input dataset in the form of 'dataset-name:/mount/location' or
'dataset-name:sub/path:/mount/location' to attach to your experiment.
You can specify this option more than once to attach multiple datasets.""",
)
@click.option(
"--gh-token-secret",
type=str,
help="""The name of the Beaker secret that contains your GitHub token.""",
default=constants.GITHUB_TOKEN_SECRET,
show_default=True,
)
@click.option(
"--conda",
type=click.Path(exists=True, dir_okay=False),
help=f"""Path to a conda environment file for reconstructing your Python environment.
If not specified, '{constants.CONDA_ENV_FILE}' will be used if it exists.""",
)
@click.option(
"--pip",
type=click.Path(exists=True, dir_okay=False),
help=f"""Path to a PIP requirements file for reconstructing your Python environment.
If not specified, '{constants.PIP_REQUIREMENTS_FILE}' will be used if it exists.""",
)
@click.option(
"--venv",
type=str,
help="""The name of an existing conda environment on the image to use.""",
)
@click.option(
"--env",
type=str,
help="""Environment variables to add the Beaker experiment. Should be in the form '{NAME}={VALUE}'.""",
multiple=True,
)
@click.option(
"--env-secret",
type=str,
help="""Environment variables to add the Beaker experiment from Beaker secrets.
Should be in the form '{NAME}={SECRET_NAME}'.""",
multiple=True,
)
@click.option(
"--nfs / --no-nfs",
default=None,
help=f"""Whether or not to mount the NFS drive ({constants.NFS_MOUNT}) to the experiment.
This only works for cirrascale clusters managed by the Beaker team.
If not specified, gantry will always mount NFS when it knows the cluster supports it.""",
)
@click.option(
"--show-logs/--no-logs",
default=True,
show_default=True,
help="""Whether or not to stream the logs to stdout as the experiment runs.
This only takes effect when --timeout is non-zero.""",
)
@click.option(
"--timeout",
type=int,
default=0,
help="""Time to wait (in seconds) for the experiment to finish.
A timeout of -1 means wait indefinitely.
A timeout of 0 means don't wait at all.""",
show_default=True,
)
@click.option(
"--allow-dirty",
is_flag=True,
help="""Allow submitting the experiment with a dirty working directory.""",
)
@click.option(
"-y",
"--yes",
is_flag=True,
help="""Skip all confirmation prompts.""",
)
@click.option("--dry-run", is_flag=True, help="""Do a dry run only.""")
@click.option(
"--save-spec",
type=click.Path(dir_okay=False, file_okay=True),
help="""A path to save the generated Beaker experiment spec to.""",
)
@click.option(
"--priority",
type=click.Choice([str(p) for p in Priority]),
help="The job priority. If you don't specify at least one cluster, priority will default to 'preemptible'.",
)
@click.option(
"--install",
type=str,
help="""Override the default installation command, e.g. '--install "python setup.py install"'""",
)
@click.option(
"--replicas",
type=int,
help="""The number of task replicas to run.""",
)
@click.option(
"--leader-selection",
is_flag=True,
help="""Specifies that the first task replica should be the leader and populates each task
with 'BEAKER_LEADER_REPLICA_HOSTNAME' and 'BEAKER_LEADER_REPLICA_NODE_ID' environment variables.
This is only applicable when '--replicas INT' and '--host-networking' are used,
although the '--host-networking' flag can be omitted in this case since it's assumed.""",
)
@click.option(
"--host-networking",
is_flag=True,
help="""Specifies that each task replica should use the host's network.
When used with '--replicas INT', this allows the replicas to communicate with each
other using their hostnames.""",
)
@click.option(
"-m",
"--mount",
type=str,
help="""Host directories to mount to the Beaker experiment. Should be in the form '{HOST_SOURCE}:{TARGET}'
similar to the '-v' option with 'docker run'.""",
multiple=True,
)
def run(
arg: Tuple[str, ...],
name: Optional[str] = None,
description: Optional[str] = None,
task_name: str = "main",
workspace: Optional[str] = None,
cluster: Optional[Tuple[str, ...]] = None,
hostname: Optional[Tuple[str, ...]] = None,
beaker_image: Optional[str] = constants.DEFAULT_IMAGE,
docker_image: Optional[str] = None,
cpus: Optional[float] = None,
gpus: Optional[int] = None,
memory: Optional[str] = None,
shared_memory: Optional[str] = None,
dataset: Optional[Tuple[str, ...]] = None,
gh_token_secret: str = constants.GITHUB_TOKEN_SECRET,
conda: Optional[PathOrStr] = None,
pip: Optional[PathOrStr] = None,
venv: Optional[str] = None,
env: Optional[Tuple[str, ...]] = None,
env_secret: Optional[Tuple[str, ...]] = None,
timeout: int = 0,
nfs: Optional[bool] = None,
show_logs: bool = True,
allow_dirty: bool = False,
dry_run: bool = False,
yes: bool = False,
save_spec: Optional[PathOrStr] = None,
priority: Optional[str] = None,
install: Optional[str] = None,
replicas: Optional[int] = None,
leader_selection: bool = False,
host_networking: bool = False,
mount: Optional[Tuple[str, ...]] = None,
):
"""
Run an experiment on Beaker.
Example:
$ gantry run --name 'hello-world' -- python -c 'print("Hello, World!")'
"""
if not arg:
raise ConfigurationError(
"[ARGS]... are required! For example:\n$ gantry run -- python -c 'print(\"Hello, World!\")'"
)
if (beaker_image is None) == (docker_image is None):
raise ConfigurationError(
"Either --beaker-image or --docker-image must be specified, but not both."
)
task_resources = TaskResources(
cpu_count=cpus, gpu_count=gpus, memory=memory, shared_memory=shared_memory
)
# Get repository account, name, and current ref.
github_account, github_repo, git_ref, is_public = util.ensure_repo(allow_dirty)
# Initialize Beaker client and validate workspace.
beaker = util.ensure_workspace(
workspace=workspace, yes=yes, gh_token_secret=gh_token_secret, public_repo=is_public
)
if beaker_image is not None and beaker_image != constants.DEFAULT_IMAGE:
try:
beaker_image = beaker.image.get(beaker_image).full_name
except ImageNotFound:
raise ConfigurationError(f"Beaker image '{beaker_image}' not found")
# Get the entrypoint dataset.
entrypoint_dataset = util.ensure_entrypoint_dataset(beaker)
# Get / set the GitHub token secret.
if not is_public:
try:
beaker.secret.get(gh_token_secret)
except SecretNotFound:
print_stderr(
f"[yellow]GitHub token secret '{gh_token_secret}' not found in workspace.[/]\n"
f"You can create a suitable GitHub token by going to https://github.com/settings/tokens/new "
f"and generating a token with the '\N{ballot box with check} repo' scope."
)
gh_token = prompt.Prompt.ask(
"[i]Please paste your GitHub token here[/]",
password=True,
)
if not gh_token:
raise ConfigurationError("token cannot be empty!")
beaker.secret.write(gh_token_secret, gh_token)
print(
f"GitHub token secret uploaded to workspace as '{gh_token_secret}'.\n"
f"If you need to update this secret in the future, use the command:\n"
f"[i]$ gantry config set-gh-token[/]"
)
gh_token_secret = util.ensure_github_token_secret(beaker, gh_token_secret)
# Validate the input datasets.
datasets_to_use = util.ensure_datasets(beaker, *dataset) if dataset else []
env_vars = []
for e in env or []:
try:
env_name, val = e.split("=")
except ValueError:
raise ValueError("Invalid --env option: {e}")
env_vars.append((env_name, val))
env_secrets = []
for e in env_secret or []:
try:
env_secret_name, secret = e.split("=")
except ValueError:
raise ValueError(f"Invalid --env-secret option: '{e}'")
env_secrets.append((env_secret_name, secret))
mounts = []
for m in mount or []:
try:
source, target = m.split(":")
except ValueError:
raise ValueError(f"Invalid --mount option: '{m}'")
mounts.append((source, target))
# Validate clusters.
if cluster:
cl_objects = beaker.cluster.list()
final_clusters = []
for pat in cluster:
matching_clusters = [cl.full_name for cl in cl_objects if fnmatch(cl.full_name, pat)]
if matching_clusters:
final_clusters.extend(matching_clusters)
else:
raise ConfigurationError(f"cluster '{pat}' did not match any Beaker clusters")
cluster = list(set(final_clusters)) # type: ignore
# Default to preemptible priority when no cluster has been specified.
if not cluster and priority is None:
priority = Priority.preemptible
# Initialize experiment and task spec.
spec = util.build_experiment_spec(
task_name=task_name,
clusters=list(cluster or []),
task_resources=task_resources,
arguments=list(arg),
entrypoint_dataset=entrypoint_dataset.id,
github_account=github_account,
github_repo=github_repo,
git_ref=git_ref,
description=description,
beaker_image=beaker_image,
docker_image=docker_image,
gh_token_secret=gh_token_secret if not is_public else None,
conda=conda,
pip=pip,
venv=venv,
nfs=nfs,
datasets=datasets_to_use,
env=env_vars,
env_secrets=env_secrets,
priority=priority,
install=install,
replicas=replicas,
leader_selection=leader_selection,
host_networking=host_networking or (bool(replicas) and leader_selection),
mounts=mounts,
hostnames=None if hostname is None else list(hostname),
)
if save_spec:
if (
Path(save_spec).is_file()
and not yes
and not prompt.Confirm.ask(
f"[yellow]The file '{save_spec}' already exists. "
f"[i]Are you sure you want to overwrite it?[/][/]"
)
):
raise KeyboardInterrupt
spec.to_file(save_spec)
print(f"Experiment spec saved to {save_spec}")
if dry_run:
rich.get_console().rule("[b]Dry run[/]")
print(
f"[b]Workspace:[/] {beaker.workspace.url()}\n"
f"[b]Commit:[/] https://github.com/{github_account}/{github_repo}/commit/{git_ref}\n"
f"[b]Experiment spec:[/]",
spec.to_json(),
)
return
name = name or prompt.Prompt.ask(
"[i]What would you like to call this experiment?[/]", default=util.unique_name()
)
if not name:
raise ConfigurationError("Experiment name cannot be empty!")
experiment = beaker.experiment.create(name, spec)
print(f"Experiment submitted, see progress at {beaker.experiment.url(experiment)}")
# Can return right away if timeout is 0.
if timeout == 0:
return
job: Optional[Job] = None
try:
if show_logs:
job = util.follow_experiment(beaker, experiment, timeout=timeout)
else:
experiment = beaker.experiment.wait_for(
experiment, timeout=timeout if timeout > 0 else None
)[0]
job = beaker.experiment.tasks(experiment)[0].latest_job # type: ignore
assert job is not None
except (KeyboardInterrupt, TermInterrupt, JobTimeoutError) as exc:
print_stderr(f"[red][bold]{exc.__class__.__name__}:[/] [i]{exc}[/][/]")
beaker.experiment.stop(experiment)
print_stderr("[yellow]Experiment cancelled.[/]")
sys.exit(1)
util.display_results(beaker, experiment, job)
@main.group(**_CLICK_GROUP_DEFAULTS)
def config():
"""
Configure Gantry for a specific Beaker workspace.
"""
@config.command(**_CLICK_COMMAND_DEFAULTS) # type: ignore
@click.argument("token")
@click.option(
"-w",
"--workspace",
type=str,
help="""The Beaker workspace to use.
If not specified, your default workspace will be used.""",
)
@click.option(
"-s",
"--secret",
type=str,
help="""The name of the Beaker secret to write to.""",
default=constants.GITHUB_TOKEN_SECRET,
show_default=True,
)
@click.option(
"-y",
"--yes",
is_flag=True,
help="""Skip all confirmation prompts.""",
)
def set_gh_token(
token: str,
workspace: Optional[str] = None,
secret: str = constants.GITHUB_TOKEN_SECRET,
yes: bool = False,
):
"""
Set or update Gantry's GitHub token for the workspace.
You can create a suitable GitHub token by going to https://github.com/settings/tokens/new
and generating a token with the '\N{ballot box with check} repo' scope.
Example:
$ gantry config set-gh-token "$GITHUB_TOKEN"
"""
# Initialize Beaker client and validate workspace.
beaker = util.ensure_workspace(workspace=workspace, yes=yes, gh_token_secret=secret)
# Write token to secret.
beaker.secret.write(secret, token)
print(
f"[green]\N{check mark} GitHub token added to workspace "
f"'{beaker.config.default_workspace}' as the secret '{secret}'"
)
@main.group(**_CLICK_GROUP_DEFAULTS)
def cluster():
"""
Get information on Beaker clusters.
"""
@cluster.command(name="list", **_CLICK_COMMAND_DEFAULTS) # type: ignore
@click.option(
"--cloud",
is_flag=True,
help="""Only show cloud clusters.""",
)
def list_clusters(cloud: bool = False):
"""
List available clusters.
By default only on-premise clusters are displayed.
"""
beaker = Beaker.from_env(session=True)
clusters = [c for c in beaker.cluster.list() if c.is_cloud == cloud]
for cluster in clusters:
icon = "☁️" if cluster.is_cloud else "🏠"
print(f"{icon} [b magenta]{cluster.full_name}[/]")
for node in sorted(beaker.cluster.nodes(cluster), key=lambda node: node.hostname):
print(
f" [i cyan]{node.hostname}[/] - "
f"CPUs: {node.limits.cpu_count}, "
f"GPUs: {node.limits.gpu_count or 0} {'x' if node.limits.gpu_type else ''} {node.limits.gpu_type or ''}"
)
if cluster.node_spec is not None:
limits = cluster.node_spec
print(
f" CPUs: {limits.cpu_count}, "
f"GPUs: {limits.gpu_count or 0} {'x' if limits.gpu_type else ''} {limits.gpu_type or ''}"
)
@cluster.command(name="util", **_CLICK_COMMAND_DEFAULTS) # type: ignore
@click.argument("cluster", nargs=1, required=True, type=str)
def cluster_util(cluster: str):
"""
Get the current status and utilization for a cluster.
"""
beaker = Beaker.from_env(session=True)
cluster_util = beaker.cluster.utilization(cluster)
cluster = cluster_util.cluster
icon = "☁️" if cluster.is_cloud else "🏠"
print(
f"{icon} [b magenta]{cluster.full_name}[/]\n\n"
f"running jobs: {cluster_util.running_jobs} ({cluster_util.running_preemptible_jobs} preemptible)\n"
f"queued jobs: {cluster_util.queued_jobs}"
)
if cluster_util.nodes:
print("nodes:")
for node in sorted(cluster_util.nodes, key=lambda n: n.hostname):
print(
f" [i cyan]{node.hostname}[/] - {node.running_jobs} jobs ({node.running_preemptible_jobs} preemptible)\n"
f" CPUs free: [{'green' if node.free.cpu_count else 'red'}]"
f"{node.free.cpu_count} / {node.limits.cpu_count}[/]\n"
f" GPUs free: [{'green' if node.free.gpu_count else 'red'}]"
f"{node.free.gpu_count or 0} / {node.limits.gpu_count}[/] {node.free.gpu_type or ''}\n"
)
if __name__ == "__main__":
main()
| beaker-gantry-main | gantry/__main__.py |
from allennlp.common.testing import ModelTestCase
class TestSimpleClassifier(ModelTestCase):
def test_model_can_train(self):
# This built-in test makes sure that your data can load, that it gets passed to the model
# correctly, that your model computes a loss in a way that we can get gradients from it,
# that all of your parameters get non-zero gradient updates, and that we can save and load
# your model and have the model's predictions remain consistent.
param_file = "tests/fixtures/config.json"
self.ensure_model_can_train_save_and_load(param_file)
| allennlp-template-config-files-master | tests/test_model.py |
allennlp-template-config-files-master | tests/__init__.py |
|
from my_project.dataset_reader import ClassificationTsvReader
class TestTextClassificationJsonReader:
def test_read_from_file_ag_news_corpus_and_truncates_properly(self):
reader = ClassificationTsvReader()
data_path = "tests/fixtures/toy_data.tsv"
instances = list(reader.read(data_path))
assert len(instances) == 2
fields = instances[0].fields
expected_tokens = ["it", "is", "movies", "like", "these"]
assert [t.text for t in fields["text"].tokens][:5] == expected_tokens
assert fields["label"].label == "neg"
fields = instances[1].fields
expected_tokens = ["the", "music", "is", "well-chosen", "and"]
assert [t.text for t in fields["text"].tokens][:5] == expected_tokens
assert fields["label"].label == "pos"
| allennlp-template-config-files-master | tests/test_dataset_reader.py |
# These imports are important for making the configuration files find the classes that you wrote.
# If you don't have these, you'll get errors about allennlp not being able to find
# "simple_classifier", or whatever name you registered your model with. These imports and the
# contents of .allennlp_plugins makes it so you can just use `allennlp train`, and we will find your
# classes and use them. If you change the name of `my_project`, you'll also need to change it in
# the same way in the .allennlp_plugins file.
from my_project.model import *
from my_project.dataset_reader import *
| allennlp-template-config-files-master | my_project/__init__.py |
from typing import Dict
import torch
from allennlp.data import Vocabulary, TextFieldTensors
from allennlp.models import Model
from allennlp.modules import TextFieldEmbedder, Seq2VecEncoder
from allennlp.nn import util
from allennlp.training.metrics import CategoricalAccuracy
@Model.register("simple_classifier")
class SimpleClassifier(Model):
def __init__(
self, vocab: Vocabulary, embedder: TextFieldEmbedder, encoder: Seq2VecEncoder
):
super().__init__(vocab)
self.embedder = embedder
self.encoder = encoder
num_labels = vocab.get_vocab_size("labels")
self.classifier = torch.nn.Linear(encoder.get_output_dim(), num_labels)
self.accuracy = CategoricalAccuracy()
def forward( # type: ignore
self, text: TextFieldTensors, label: torch.Tensor = None
) -> Dict[str, torch.Tensor]:
# Shape: (batch_size, num_tokens, embedding_dim)
embedded_text = self.embedder(text)
# Shape: (batch_size, num_tokens)
mask = util.get_text_field_mask(text)
# Shape: (batch_size, encoding_dim)
encoded_text = self.encoder(embedded_text, mask)
# Shape: (batch_size, num_labels)
logits = self.classifier(encoded_text)
# Shape: (batch_size, num_labels)
probs = torch.nn.functional.softmax(logits, dim=-1)
# Shape: (1,)
output = {"probs": probs}
if label is not None:
self.accuracy(logits, label)
output["loss"] = torch.nn.functional.cross_entropy(logits, label)
return output
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {"accuracy": self.accuracy.get_metric(reset)}
| allennlp-template-config-files-master | my_project/model.py |
from typing import Dict, Iterable
from allennlp.data import DatasetReader, Instance, Field
from allennlp.data.fields import LabelField, TextField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Tokenizer, WhitespaceTokenizer
@DatasetReader.register("classification-tsv")
class ClassificationTsvReader(DatasetReader):
def __init__(
self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
max_tokens: int = None,
**kwargs
):
super().__init__(**kwargs)
self.tokenizer = tokenizer or WhitespaceTokenizer()
self.token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self.max_tokens = max_tokens
def text_to_instance(self, text: str, label: str = None) -> Instance: # type: ignore
tokens = self.tokenizer.tokenize(text)
if self.max_tokens:
tokens = tokens[: self.max_tokens]
text_field = TextField(tokens, self.token_indexers)
fields: Dict[str, Field] = {"text": text_field}
if label:
fields["label"] = LabelField(label)
return Instance(fields)
def _read(self, file_path: str) -> Iterable[Instance]:
with open(file_path, "r") as lines:
for line in lines:
text, sentiment = line.strip().split("\t")
yield self.text_to_instance(text, sentiment)
| allennlp-template-config-files-master | my_project/dataset_reader.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import io
import unittest
import torch
from models.matcher import HungarianMatcher
from models.position_encoding import PositionEmbeddingSine, PositionEmbeddingLearned
from models.backbone import Backbone, Joiner, BackboneBase
from util import box_ops
from util.misc import nested_tensor_from_tensor_list
from hubconf import detr_resnet50, detr_resnet50_panoptic
# onnxruntime requires python 3.5 or above
try:
import onnxruntime
except ImportError:
onnxruntime = None
class Tester(unittest.TestCase):
def test_box_cxcywh_to_xyxy(self):
t = torch.rand(10, 4)
r = box_ops.box_xyxy_to_cxcywh(box_ops.box_cxcywh_to_xyxy(t))
self.assertLess((t - r).abs().max(), 1e-5)
@staticmethod
def indices_torch2python(indices):
return [(i.tolist(), j.tolist()) for i, j in indices]
def test_hungarian(self):
n_queries, n_targets, n_classes = 100, 15, 91
logits = torch.rand(1, n_queries, n_classes + 1)
boxes = torch.rand(1, n_queries, 4)
tgt_labels = torch.randint(high=n_classes, size=(n_targets,))
tgt_boxes = torch.rand(n_targets, 4)
matcher = HungarianMatcher()
targets = [{'labels': tgt_labels, 'boxes': tgt_boxes}]
indices_single = matcher({'pred_logits': logits, 'pred_boxes': boxes}, targets)
indices_batched = matcher({'pred_logits': logits.repeat(2, 1, 1),
'pred_boxes': boxes.repeat(2, 1, 1)}, targets * 2)
self.assertEqual(len(indices_single[0][0]), n_targets)
self.assertEqual(len(indices_single[0][1]), n_targets)
self.assertEqual(self.indices_torch2python(indices_single),
self.indices_torch2python([indices_batched[0]]))
self.assertEqual(self.indices_torch2python(indices_single),
self.indices_torch2python([indices_batched[1]]))
# test with empty targets
tgt_labels_empty = torch.randint(high=n_classes, size=(0,))
tgt_boxes_empty = torch.rand(0, 4)
targets_empty = [{'labels': tgt_labels_empty, 'boxes': tgt_boxes_empty}]
indices = matcher({'pred_logits': logits.repeat(2, 1, 1),
'pred_boxes': boxes.repeat(2, 1, 1)}, targets + targets_empty)
self.assertEqual(len(indices[1][0]), 0)
indices = matcher({'pred_logits': logits.repeat(2, 1, 1),
'pred_boxes': boxes.repeat(2, 1, 1)}, targets_empty * 2)
self.assertEqual(len(indices[0][0]), 0)
def test_position_encoding_script(self):
m1, m2 = PositionEmbeddingSine(), PositionEmbeddingLearned()
mm1, mm2 = torch.jit.script(m1), torch.jit.script(m2) # noqa
def test_backbone_script(self):
backbone = Backbone('resnet50', True, False, False)
torch.jit.script(backbone) # noqa
def test_model_script_detection(self):
model = detr_resnet50(pretrained=False).eval()
scripted_model = torch.jit.script(model)
x = nested_tensor_from_tensor_list([torch.rand(3, 200, 200), torch.rand(3, 200, 250)])
out = model(x)
out_script = scripted_model(x)
self.assertTrue(out["pred_logits"].equal(out_script["pred_logits"]))
self.assertTrue(out["pred_boxes"].equal(out_script["pred_boxes"]))
def test_model_script_panoptic(self):
model = detr_resnet50_panoptic(pretrained=False).eval()
scripted_model = torch.jit.script(model)
x = nested_tensor_from_tensor_list([torch.rand(3, 200, 200), torch.rand(3, 200, 250)])
out = model(x)
out_script = scripted_model(x)
self.assertTrue(out["pred_logits"].equal(out_script["pred_logits"]))
self.assertTrue(out["pred_boxes"].equal(out_script["pred_boxes"]))
self.assertTrue(out["pred_masks"].equal(out_script["pred_masks"]))
def test_model_detection_different_inputs(self):
model = detr_resnet50(pretrained=False).eval()
# support NestedTensor
x = nested_tensor_from_tensor_list([torch.rand(3, 200, 200), torch.rand(3, 200, 250)])
out = model(x)
self.assertIn('pred_logits', out)
# and 4d Tensor
x = torch.rand(1, 3, 200, 200)
out = model(x)
self.assertIn('pred_logits', out)
# and List[Tensor[C, H, W]]
x = torch.rand(3, 200, 200)
out = model([x])
self.assertIn('pred_logits', out)
@unittest.skipIf(onnxruntime is None, 'ONNX Runtime unavailable')
class ONNXExporterTester(unittest.TestCase):
@classmethod
def setUpClass(cls):
torch.manual_seed(123)
def run_model(self, model, inputs_list, tolerate_small_mismatch=False, do_constant_folding=True, dynamic_axes=None,
output_names=None, input_names=None):
model.eval()
onnx_io = io.BytesIO()
# export to onnx with the first input
torch.onnx.export(model, inputs_list[0], onnx_io,
do_constant_folding=do_constant_folding, opset_version=12,
dynamic_axes=dynamic_axes, input_names=input_names, output_names=output_names)
# validate the exported model with onnx runtime
for test_inputs in inputs_list:
with torch.no_grad():
if isinstance(test_inputs, torch.Tensor) or isinstance(test_inputs, list):
test_inputs = (nested_tensor_from_tensor_list(test_inputs),)
test_ouputs = model(*test_inputs)
if isinstance(test_ouputs, torch.Tensor):
test_ouputs = (test_ouputs,)
self.ort_validate(onnx_io, test_inputs, test_ouputs, tolerate_small_mismatch)
def ort_validate(self, onnx_io, inputs, outputs, tolerate_small_mismatch=False):
inputs, _ = torch.jit._flatten(inputs)
outputs, _ = torch.jit._flatten(outputs)
def to_numpy(tensor):
if tensor.requires_grad:
return tensor.detach().cpu().numpy()
else:
return tensor.cpu().numpy()
inputs = list(map(to_numpy, inputs))
outputs = list(map(to_numpy, outputs))
ort_session = onnxruntime.InferenceSession(onnx_io.getvalue())
# compute onnxruntime output prediction
ort_inputs = dict((ort_session.get_inputs()[i].name, inpt) for i, inpt in enumerate(inputs))
ort_outs = ort_session.run(None, ort_inputs)
for i in range(0, len(outputs)):
try:
torch.testing.assert_allclose(outputs[i], ort_outs[i], rtol=1e-03, atol=1e-05)
except AssertionError as error:
if tolerate_small_mismatch:
self.assertIn("(0.00%)", str(error), str(error))
else:
raise
def test_model_onnx_detection(self):
model = detr_resnet50(pretrained=False).eval()
dummy_image = torch.ones(1, 3, 800, 800) * 0.3
model(dummy_image)
# Test exported model on images of different size, or dummy input
self.run_model(
model,
[(torch.rand(1, 3, 750, 800),)],
input_names=["inputs"],
output_names=["pred_logits", "pred_boxes"],
tolerate_small_mismatch=True,
)
if __name__ == '__main__':
unittest.main()
| detr-master | test_all.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
A script to run multinode training with submitit.
"""
import argparse
import os
import uuid
from pathlib import Path
import main as detection
import submitit
def parse_args():
detection_parser = detection.get_args_parser()
parser = argparse.ArgumentParser("Submitit for detection", parents=[detection_parser])
parser.add_argument("--ngpus", default=8, type=int, help="Number of gpus to request on each node")
parser.add_argument("--nodes", default=4, type=int, help="Number of nodes to request")
parser.add_argument("--timeout", default=60, type=int, help="Duration of the job")
parser.add_argument("--job_dir", default="", type=str, help="Job dir. Leave empty for automatic.")
return parser.parse_args()
def get_shared_folder() -> Path:
user = os.getenv("USER")
if Path("/checkpoint/").is_dir():
p = Path(f"/checkpoint/{user}/experiments")
p.mkdir(exist_ok=True)
return p
raise RuntimeError("No shared folder available")
def get_init_file():
# Init file must not exist, but it's parent dir must exist.
os.makedirs(str(get_shared_folder()), exist_ok=True)
init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init"
if init_file.exists():
os.remove(str(init_file))
return init_file
class Trainer(object):
def __init__(self, args):
self.args = args
def __call__(self):
import main as detection
self._setup_gpu_args()
detection.main(self.args)
def checkpoint(self):
import os
import submitit
from pathlib import Path
self.args.dist_url = get_init_file().as_uri()
checkpoint_file = os.path.join(self.args.output_dir, "checkpoint.pth")
if os.path.exists(checkpoint_file):
self.args.resume = checkpoint_file
print("Requeuing ", self.args)
empty_trainer = type(self)(self.args)
return submitit.helpers.DelayedSubmission(empty_trainer)
def _setup_gpu_args(self):
import submitit
from pathlib import Path
job_env = submitit.JobEnvironment()
self.args.output_dir = Path(str(self.args.output_dir).replace("%j", str(job_env.job_id)))
self.args.gpu = job_env.local_rank
self.args.rank = job_env.global_rank
self.args.world_size = job_env.num_tasks
print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}")
def main():
args = parse_args()
if args.job_dir == "":
args.job_dir = get_shared_folder() / "%j"
# Note that the folder will depend on the job_id, to easily track experiments
executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30)
# cluster setup is defined by environment variables
num_gpus_per_node = args.ngpus
nodes = args.nodes
timeout_min = args.timeout
executor.update_parameters(
mem_gb=40 * num_gpus_per_node,
gpus_per_node=num_gpus_per_node,
tasks_per_node=num_gpus_per_node, # one task per GPU
cpus_per_task=10,
nodes=nodes,
timeout_min=timeout_min, # max is 60 * 72
)
executor.update_parameters(name="detr")
args.dist_url = get_init_file().as_uri()
args.output_dir = args.job_dir
trainer = Trainer(args)
job = executor.submit(trainer)
print("Submitted job_id:", job.job_id)
if __name__ == "__main__":
main()
| detr-master | run_with_submitit.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Train and eval functions used in main.py
"""
import math
import os
import sys
from typing import Iterable
import torch
import util.misc as utils
from datasets.coco_eval import CocoEvaluator
from datasets.panoptic_eval import PanopticEvaluator
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, max_norm: float = 0):
model.train()
criterion.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
samples = samples.to(device)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
outputs = model(samples)
loss_dict = criterion(outputs, targets)
weight_dict = criterion.weight_dict
losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_unscaled = {f'{k}_unscaled': v
for k, v in loss_dict_reduced.items()}
loss_dict_reduced_scaled = {k: v * weight_dict[k]
for k, v in loss_dict_reduced.items() if k in weight_dict}
losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())
loss_value = losses_reduced_scaled.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
print(loss_dict_reduced)
sys.exit(1)
optimizer.zero_grad()
losses.backward()
if max_norm > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
optimizer.step()
metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled)
metric_logger.update(class_error=loss_dict_reduced['class_error'])
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluate(model, criterion, postprocessors, data_loader, base_ds, device, output_dir):
model.eval()
criterion.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
header = 'Test:'
iou_types = tuple(k for k in ('segm', 'bbox') if k in postprocessors.keys())
coco_evaluator = CocoEvaluator(base_ds, iou_types)
# coco_evaluator.coco_eval[iou_types[0]].params.iouThrs = [0, 0.1, 0.5, 0.75]
panoptic_evaluator = None
if 'panoptic' in postprocessors.keys():
panoptic_evaluator = PanopticEvaluator(
data_loader.dataset.ann_file,
data_loader.dataset.ann_folder,
output_dir=os.path.join(output_dir, "panoptic_eval"),
)
for samples, targets in metric_logger.log_every(data_loader, 10, header):
samples = samples.to(device)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
outputs = model(samples)
loss_dict = criterion(outputs, targets)
weight_dict = criterion.weight_dict
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_scaled = {k: v * weight_dict[k]
for k, v in loss_dict_reduced.items() if k in weight_dict}
loss_dict_reduced_unscaled = {f'{k}_unscaled': v
for k, v in loss_dict_reduced.items()}
metric_logger.update(loss=sum(loss_dict_reduced_scaled.values()),
**loss_dict_reduced_scaled,
**loss_dict_reduced_unscaled)
metric_logger.update(class_error=loss_dict_reduced['class_error'])
orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0)
results = postprocessors['bbox'](outputs, orig_target_sizes)
if 'segm' in postprocessors.keys():
target_sizes = torch.stack([t["size"] for t in targets], dim=0)
results = postprocessors['segm'](results, outputs, orig_target_sizes, target_sizes)
res = {target['image_id'].item(): output for target, output in zip(targets, results)}
if coco_evaluator is not None:
coco_evaluator.update(res)
if panoptic_evaluator is not None:
res_pano = postprocessors["panoptic"](outputs, target_sizes, orig_target_sizes)
for i, target in enumerate(targets):
image_id = target["image_id"].item()
file_name = f"{image_id:012d}.png"
res_pano[i]["image_id"] = image_id
res_pano[i]["file_name"] = file_name
panoptic_evaluator.update(res_pano)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
if coco_evaluator is not None:
coco_evaluator.synchronize_between_processes()
if panoptic_evaluator is not None:
panoptic_evaluator.synchronize_between_processes()
# accumulate predictions from all images
if coco_evaluator is not None:
coco_evaluator.accumulate()
coco_evaluator.summarize()
panoptic_res = None
if panoptic_evaluator is not None:
panoptic_res = panoptic_evaluator.summarize()
stats = {k: meter.global_avg for k, meter in metric_logger.meters.items()}
if coco_evaluator is not None:
if 'bbox' in postprocessors.keys():
stats['coco_eval_bbox'] = coco_evaluator.coco_eval['bbox'].stats.tolist()
if 'segm' in postprocessors.keys():
stats['coco_eval_masks'] = coco_evaluator.coco_eval['segm'].stats.tolist()
if panoptic_res is not None:
stats['PQ_all'] = panoptic_res["All"]
stats['PQ_th'] = panoptic_res["Things"]
stats['PQ_st'] = panoptic_res["Stuff"]
return stats, coco_evaluator
| detr-master | engine.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
from models.backbone import Backbone, Joiner
from models.detr import DETR, PostProcess
from models.position_encoding import PositionEmbeddingSine
from models.segmentation import DETRsegm, PostProcessPanoptic
from models.transformer import Transformer
dependencies = ["torch", "torchvision"]
def _make_detr(backbone_name: str, dilation=False, num_classes=91, mask=False):
hidden_dim = 256
backbone = Backbone(backbone_name, train_backbone=True, return_interm_layers=mask, dilation=dilation)
pos_enc = PositionEmbeddingSine(hidden_dim // 2, normalize=True)
backbone_with_pos_enc = Joiner(backbone, pos_enc)
backbone_with_pos_enc.num_channels = backbone.num_channels
transformer = Transformer(d_model=hidden_dim, return_intermediate_dec=True)
detr = DETR(backbone_with_pos_enc, transformer, num_classes=num_classes, num_queries=100)
if mask:
return DETRsegm(detr)
return detr
def detr_resnet50(pretrained=False, num_classes=91, return_postprocessor=False):
"""
DETR R50 with 6 encoder and 6 decoder layers.
Achieves 42/62.4 AP/AP50 on COCO val5k.
"""
model = _make_detr("resnet50", dilation=False, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r50-e632da11.pth", map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcess()
return model
def detr_resnet50_dc5(pretrained=False, num_classes=91, return_postprocessor=False):
"""
DETR-DC5 R50 with 6 encoder and 6 decoder layers.
The last block of ResNet-50 has dilation to increase
output resolution.
Achieves 43.3/63.1 AP/AP50 on COCO val5k.
"""
model = _make_detr("resnet50", dilation=True, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r50-dc5-f0fb7ef5.pth", map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcess()
return model
def detr_resnet101(pretrained=False, num_classes=91, return_postprocessor=False):
"""
DETR-DC5 R101 with 6 encoder and 6 decoder layers.
Achieves 43.5/63.8 AP/AP50 on COCO val5k.
"""
model = _make_detr("resnet101", dilation=False, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r101-2c7b67e5.pth", map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcess()
return model
def detr_resnet101_dc5(pretrained=False, num_classes=91, return_postprocessor=False):
"""
DETR-DC5 R101 with 6 encoder and 6 decoder layers.
The last block of ResNet-101 has dilation to increase
output resolution.
Achieves 44.9/64.7 AP/AP50 on COCO val5k.
"""
model = _make_detr("resnet101", dilation=True, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r101-dc5-a2e86def.pth", map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcess()
return model
def detr_resnet50_panoptic(
pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False
):
"""
DETR R50 with 6 encoder and 6 decoder layers.
Achieves 43.4 PQ on COCO val5k.
threshold is the minimum confidence required for keeping segments in the prediction
"""
model = _make_detr("resnet50", dilation=False, num_classes=num_classes, mask=True)
is_thing_map = {i: i <= 90 for i in range(250)}
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r50-panoptic-00ce5173.pth",
map_location="cpu",
check_hash=True,
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcessPanoptic(is_thing_map, threshold=threshold)
return model
def detr_resnet50_dc5_panoptic(
pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False
):
"""
DETR-DC5 R50 with 6 encoder and 6 decoder layers.
The last block of ResNet-50 has dilation to increase
output resolution.
Achieves 44.6 on COCO val5k.
threshold is the minimum confidence required for keeping segments in the prediction
"""
model = _make_detr("resnet50", dilation=True, num_classes=num_classes, mask=True)
is_thing_map = {i: i <= 90 for i in range(250)}
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r50-dc5-panoptic-da08f1b1.pth",
map_location="cpu",
check_hash=True,
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcessPanoptic(is_thing_map, threshold=threshold)
return model
def detr_resnet101_panoptic(
pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False
):
"""
DETR-DC5 R101 with 6 encoder and 6 decoder layers.
Achieves 45.1 PQ on COCO val5k.
threshold is the minimum confidence required for keeping segments in the prediction
"""
model = _make_detr("resnet101", dilation=False, num_classes=num_classes, mask=True)
is_thing_map = {i: i <= 90 for i in range(250)}
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r101-panoptic-40021d53.pth",
map_location="cpu",
check_hash=True,
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcessPanoptic(is_thing_map, threshold=threshold)
return model
| detr-master | hubconf.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import datetime
import json
import random
import time
from pathlib import Path
import numpy as np
import torch
from torch.utils.data import DataLoader, DistributedSampler
import datasets
import util.misc as utils
from datasets import build_dataset, get_coco_api_from_dataset
from engine import evaluate, train_one_epoch
from models import build_model
def get_args_parser():
parser = argparse.ArgumentParser('Set transformer detector', add_help=False)
parser.add_argument('--lr', default=1e-4, type=float)
parser.add_argument('--lr_backbone', default=1e-5, type=float)
parser.add_argument('--batch_size', default=2, type=int)
parser.add_argument('--weight_decay', default=1e-4, type=float)
parser.add_argument('--epochs', default=300, type=int)
parser.add_argument('--lr_drop', default=200, type=int)
parser.add_argument('--clip_max_norm', default=0.1, type=float,
help='gradient clipping max norm')
# Model parameters
parser.add_argument('--frozen_weights', type=str, default=None,
help="Path to the pretrained model. If set, only the mask head will be trained")
# * Backbone
parser.add_argument('--backbone', default='resnet50', type=str,
help="Name of the convolutional backbone to use")
parser.add_argument('--dilation', action='store_true',
help="If true, we replace stride with dilation in the last convolutional block (DC5)")
parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'),
help="Type of positional embedding to use on top of the image features")
# * Transformer
parser.add_argument('--enc_layers', default=6, type=int,
help="Number of encoding layers in the transformer")
parser.add_argument('--dec_layers', default=6, type=int,
help="Number of decoding layers in the transformer")
parser.add_argument('--dim_feedforward', default=2048, type=int,
help="Intermediate size of the feedforward layers in the transformer blocks")
parser.add_argument('--hidden_dim', default=256, type=int,
help="Size of the embeddings (dimension of the transformer)")
parser.add_argument('--dropout', default=0.1, type=float,
help="Dropout applied in the transformer")
parser.add_argument('--nheads', default=8, type=int,
help="Number of attention heads inside the transformer's attentions")
parser.add_argument('--num_queries', default=100, type=int,
help="Number of query slots")
parser.add_argument('--pre_norm', action='store_true')
# * Segmentation
parser.add_argument('--masks', action='store_true',
help="Train segmentation head if the flag is provided")
# Loss
parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false',
help="Disables auxiliary decoding losses (loss at each layer)")
# * Matcher
parser.add_argument('--set_cost_class', default=1, type=float,
help="Class coefficient in the matching cost")
parser.add_argument('--set_cost_bbox', default=5, type=float,
help="L1 box coefficient in the matching cost")
parser.add_argument('--set_cost_giou', default=2, type=float,
help="giou box coefficient in the matching cost")
# * Loss coefficients
parser.add_argument('--mask_loss_coef', default=1, type=float)
parser.add_argument('--dice_loss_coef', default=1, type=float)
parser.add_argument('--bbox_loss_coef', default=5, type=float)
parser.add_argument('--giou_loss_coef', default=2, type=float)
parser.add_argument('--eos_coef', default=0.1, type=float,
help="Relative classification weight of the no-object class")
# dataset parameters
parser.add_argument('--dataset_file', default='coco')
parser.add_argument('--coco_path', type=str)
parser.add_argument('--coco_panoptic_path', type=str)
parser.add_argument('--remove_difficult', action='store_true')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true')
parser.add_argument('--num_workers', default=2, type=int)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
return parser
def main(args):
utils.init_distributed_mode(args)
print("git:\n {}\n".format(utils.get_sha()))
if args.frozen_weights is not None:
assert args.masks, "Frozen training is meant for segmentation only"
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
model, criterion, postprocessors = build_model(args)
model.to(device)
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('number of params:', n_parameters)
param_dicts = [
{"params": [p for n, p in model_without_ddp.named_parameters() if "backbone" not in n and p.requires_grad]},
{
"params": [p for n, p in model_without_ddp.named_parameters() if "backbone" in n and p.requires_grad],
"lr": args.lr_backbone,
},
]
optimizer = torch.optim.AdamW(param_dicts, lr=args.lr,
weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop)
dataset_train = build_dataset(image_set='train', args=args)
dataset_val = build_dataset(image_set='val', args=args)
if args.distributed:
sampler_train = DistributedSampler(dataset_train)
sampler_val = DistributedSampler(dataset_val, shuffle=False)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
batch_sampler_train = torch.utils.data.BatchSampler(
sampler_train, args.batch_size, drop_last=True)
data_loader_train = DataLoader(dataset_train, batch_sampler=batch_sampler_train,
collate_fn=utils.collate_fn, num_workers=args.num_workers)
data_loader_val = DataLoader(dataset_val, args.batch_size, sampler=sampler_val,
drop_last=False, collate_fn=utils.collate_fn, num_workers=args.num_workers)
if args.dataset_file == "coco_panoptic":
# We also evaluate AP during panoptic training, on original coco DS
coco_val = datasets.coco.build("val", args)
base_ds = get_coco_api_from_dataset(coco_val)
else:
base_ds = get_coco_api_from_dataset(dataset_val)
if args.frozen_weights is not None:
checkpoint = torch.load(args.frozen_weights, map_location='cpu')
model_without_ddp.detr.load_state_dict(checkpoint['model'])
output_dir = Path(args.output_dir)
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = checkpoint['epoch'] + 1
if args.eval:
test_stats, coco_evaluator = evaluate(model, criterion, postprocessors,
data_loader_val, base_ds, device, args.output_dir)
if args.output_dir:
utils.save_on_master(coco_evaluator.coco_eval["bbox"].eval, output_dir / "eval.pth")
return
print("Start training")
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
sampler_train.set_epoch(epoch)
train_stats = train_one_epoch(
model, criterion, data_loader_train, optimizer, device, epoch,
args.clip_max_norm)
lr_scheduler.step()
if args.output_dir:
checkpoint_paths = [output_dir / 'checkpoint.pth']
# extra checkpoint before LR drop and every 100 epochs
if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % 100 == 0:
checkpoint_paths.append(output_dir / f'checkpoint{epoch:04}.pth')
for checkpoint_path in checkpoint_paths:
utils.save_on_master({
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'args': args,
}, checkpoint_path)
test_stats, coco_evaluator = evaluate(
model, criterion, postprocessors, data_loader_val, base_ds, device, args.output_dir
)
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
if args.output_dir and utils.is_main_process():
with (output_dir / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
# for evaluation logs
if coco_evaluator is not None:
(output_dir / 'eval').mkdir(exist_ok=True)
if "bbox" in coco_evaluator.coco_eval:
filenames = ['latest.pth']
if epoch % 50 == 0:
filenames.append(f'{epoch:03}.pth')
for name in filenames:
torch.save(coco_evaluator.coco_eval["bbox"].eval,
output_dir / "eval" / name)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
parser = argparse.ArgumentParser('DETR training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
| detr-master | main.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Misc functions, including distributed helpers.
Mostly copy-paste from torchvision references.
"""
import os
import subprocess
import time
from collections import defaultdict, deque
import datetime
import pickle
from typing import Optional, List
import torch
import torch.distributed as dist
from torch import Tensor
# needed due to empty tensor bug in pytorch and torchvision 0.5
import torchvision
if float(torchvision.__version__[:3]) < 0.7:
from torchvision.ops import _new_empty_tensor
from torchvision.ops.misc import _output_size
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.tensor([tensor.numel()], device="cuda")
size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda"))
if local_size != max_size:
padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that all processes
have the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.all_reduce(values)
if average:
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
if torch.cuda.is_available():
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}',
'max mem: {memory:.0f}'
])
else:
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
])
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
def get_sha():
cwd = os.path.dirname(os.path.abspath(__file__))
def _run(command):
return subprocess.check_output(command, cwd=cwd).decode('ascii').strip()
sha = 'N/A'
diff = "clean"
branch = 'N/A'
try:
sha = _run(['git', 'rev-parse', 'HEAD'])
subprocess.check_output(['git', 'diff'], cwd=cwd)
diff = _run(['git', 'diff-index', 'HEAD'])
diff = "has uncommited changes" if diff else "clean"
branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
except Exception:
pass
message = f"sha: {sha}, status: {diff}, branch: {branch}"
return message
def collate_fn(batch):
batch = list(zip(*batch))
batch[0] = nested_tensor_from_tensor_list(batch[0])
return tuple(batch)
def _max_by_axis(the_list):
# type: (List[List[int]]) -> List[int]
maxes = the_list[0]
for sublist in the_list[1:]:
for index, item in enumerate(sublist):
maxes[index] = max(maxes[index], item)
return maxes
def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):
# TODO make this more general
if tensor_list[0].ndim == 3:
if torchvision._is_tracing():
# nested_tensor_from_tensor_list() does not export well to ONNX
# call _onnx_nested_tensor_from_tensor_list() instead
return _onnx_nested_tensor_from_tensor_list(tensor_list)
# TODO make it support different-sized images
max_size = _max_by_axis([list(img.shape) for img in tensor_list])
# min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
batch_shape = [len(tensor_list)] + max_size
b, c, h, w = batch_shape
dtype = tensor_list[0].dtype
device = tensor_list[0].device
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
mask = torch.ones((b, h, w), dtype=torch.bool, device=device)
for img, pad_img, m in zip(tensor_list, tensor, mask):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
m[: img.shape[1], :img.shape[2]] = False
else:
raise ValueError('not supported')
return NestedTensor(tensor, mask)
# _onnx_nested_tensor_from_tensor_list() is an implementation of
# nested_tensor_from_tensor_list() that is supported by ONNX tracing.
@torch.jit.unused
def _onnx_nested_tensor_from_tensor_list(tensor_list):
max_size = []
for i in range(tensor_list[0].dim()):
max_size_i = torch.max(torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32)).to(torch.int64)
max_size.append(max_size_i)
max_size = tuple(max_size)
# work around for
# pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
# m[: img.shape[1], :img.shape[2]] = False
# which is not yet supported in onnx
padded_imgs = []
padded_masks = []
for img in tensor_list:
padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))]
padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0]))
padded_imgs.append(padded_img)
m = torch.zeros_like(img[0], dtype=torch.int, device=img.device)
padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1)
padded_masks.append(padded_mask.to(torch.bool))
tensor = torch.stack(padded_imgs)
mask = torch.stack(padded_masks)
return NestedTensor(tensor, mask=mask)
class NestedTensor(object):
def __init__(self, tensors, mask: Optional[Tensor]):
self.tensors = tensors
self.mask = mask
def to(self, device):
# type: (Device) -> NestedTensor # noqa
cast_tensor = self.tensors.to(device)
mask = self.mask
if mask is not None:
assert mask is not None
cast_mask = mask.to(device)
else:
cast_mask = None
return NestedTensor(cast_tensor, cast_mask)
def decompose(self):
return self.tensors, self.mask
def __repr__(self):
return str(self.tensors)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
@torch.no_grad()
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
if target.numel() == 0:
return [torch.zeros([], device=output.device)]
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None):
# type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor
"""
Equivalent to nn.functional.interpolate, but with support for empty batch sizes.
This will eventually be supported natively by PyTorch, and this
class can go away.
"""
if float(torchvision.__version__[:3]) < 0.7:
if input.numel() > 0:
return torch.nn.functional.interpolate(
input, size, scale_factor, mode, align_corners
)
output_shape = _output_size(2, input, size, scale_factor)
output_shape = list(input.shape[:-2]) + list(output_shape)
return _new_empty_tensor(input, output_shape)
else:
return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)
| detr-master | util/misc.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Utilities for bounding box manipulation and GIoU.
"""
import torch
from torchvision.ops.boxes import box_area
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(-1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
(x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=-1)
def box_xyxy_to_cxcywh(x):
x0, y0, x1, y1 = x.unbind(-1)
b = [(x0 + x1) / 2, (y0 + y1) / 2,
(x1 - x0), (y1 - y0)]
return torch.stack(b, dim=-1)
# modified from torchvision to also return the union
def box_iou(boxes1, boxes2):
area1 = box_area(boxes1)
area2 = box_area(boxes2)
lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
wh = (rb - lt).clamp(min=0) # [N,M,2]
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
union = area1[:, None] + area2 - inter
iou = inter / union
return iou, union
def generalized_box_iou(boxes1, boxes2):
"""
Generalized IoU from https://giou.stanford.edu/
The boxes should be in [x0, y0, x1, y1] format
Returns a [N, M] pairwise matrix, where N = len(boxes1)
and M = len(boxes2)
"""
# degenerate boxes gives inf / nan results
# so do an early check
assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
iou, union = box_iou(boxes1, boxes2)
lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])
rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
wh = (rb - lt).clamp(min=0) # [N,M,2]
area = wh[:, :, 0] * wh[:, :, 1]
return iou - (area - union) / area
def masks_to_boxes(masks):
"""Compute the bounding boxes around the provided masks
The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.
Returns a [N, 4] tensors, with the boxes in xyxy format
"""
if masks.numel() == 0:
return torch.zeros((0, 4), device=masks.device)
h, w = masks.shape[-2:]
y = torch.arange(0, h, dtype=torch.float)
x = torch.arange(0, w, dtype=torch.float)
y, x = torch.meshgrid(y, x)
x_mask = (masks * x.unsqueeze(0))
x_max = x_mask.flatten(1).max(-1)[0]
x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
y_mask = (masks * y.unsqueeze(0))
y_max = y_mask.flatten(1).max(-1)[0]
y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
return torch.stack([x_min, y_min, x_max, y_max], 1)
| detr-master | util/box_ops.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
| detr-master | util/__init__.py |
"""
Plotting utilities to visualize training logs.
"""
import torch
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from pathlib import Path, PurePath
def plot_logs(logs, fields=('class_error', 'loss_bbox_unscaled', 'mAP'), ewm_col=0, log_name='log.txt'):
'''
Function to plot specific fields from training log(s). Plots both training and test results.
:: Inputs - logs = list containing Path objects, each pointing to individual dir with a log file
- fields = which results to plot from each log file - plots both training and test for each field.
- ewm_col = optional, which column to use as the exponential weighted smoothing of the plots
- log_name = optional, name of log file if different than default 'log.txt'.
:: Outputs - matplotlib plots of results in fields, color coded for each log file.
- solid lines are training results, dashed lines are test results.
'''
func_name = "plot_utils.py::plot_logs"
# verify logs is a list of Paths (list[Paths]) or single Pathlib object Path,
# convert single Path to list to avoid 'not iterable' error
if not isinstance(logs, list):
if isinstance(logs, PurePath):
logs = [logs]
print(f"{func_name} info: logs param expects a list argument, converted to list[Path].")
else:
raise ValueError(f"{func_name} - invalid argument for logs parameter.\n \
Expect list[Path] or single Path obj, received {type(logs)}")
# verify valid dir(s) and that every item in list is Path object
for i, dir in enumerate(logs):
if not isinstance(dir, PurePath):
raise ValueError(f"{func_name} - non-Path object in logs argument of {type(dir)}: \n{dir}")
if dir.exists():
continue
raise ValueError(f"{func_name} - invalid directory in logs argument:\n{dir}")
# load log file(s) and plot
dfs = [pd.read_json(Path(p) / log_name, lines=True) for p in logs]
fig, axs = plt.subplots(ncols=len(fields), figsize=(16, 5))
for df, color in zip(dfs, sns.color_palette(n_colors=len(logs))):
for j, field in enumerate(fields):
if field == 'mAP':
coco_eval = pd.DataFrame(
pd.np.stack(df.test_coco_eval_bbox.dropna().values)[:, 1]
).ewm(com=ewm_col).mean()
axs[j].plot(coco_eval, c=color)
else:
df.interpolate().ewm(com=ewm_col).mean().plot(
y=[f'train_{field}', f'test_{field}'],
ax=axs[j],
color=[color] * 2,
style=['-', '--']
)
for ax, field in zip(axs, fields):
ax.legend([Path(p).name for p in logs])
ax.set_title(field)
def plot_precision_recall(files, naming_scheme='iter'):
if naming_scheme == 'exp_id':
# name becomes exp_id
names = [f.parts[-3] for f in files]
elif naming_scheme == 'iter':
names = [f.stem for f in files]
else:
raise ValueError(f'not supported {naming_scheme}')
fig, axs = plt.subplots(ncols=2, figsize=(16, 5))
for f, color, name in zip(files, sns.color_palette("Blues", n_colors=len(files)), names):
data = torch.load(f)
# precision is n_iou, n_points, n_cat, n_area, max_det
precision = data['precision']
recall = data['params'].recThrs
scores = data['scores']
# take precision for all classes, all areas and 100 detections
precision = precision[0, :, :, 0, -1].mean(1)
scores = scores[0, :, :, 0, -1].mean(1)
prec = precision.mean()
rec = data['recall'][0, :, 0, -1].mean()
print(f'{naming_scheme} {name}: mAP@50={prec * 100: 05.1f}, ' +
f'score={scores.mean():0.3f}, ' +
f'f1={2 * prec * rec / (prec + rec + 1e-8):0.3f}'
)
axs[0].plot(recall, precision, c=color)
axs[1].plot(recall, scores, c=color)
axs[0].set_title('Precision / Recall')
axs[0].legend(names)
axs[1].set_title('Scores / Recall')
axs[1].legend(names)
return fig, axs
| detr-master | util/plot_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
COCO dataset which returns image_id for evaluation.
Mostly copy-paste from https://github.com/pytorch/vision/blob/13b35ff/references/detection/coco_utils.py
"""
from pathlib import Path
import torch
import torch.utils.data
import torchvision
from pycocotools import mask as coco_mask
import datasets.transforms as T
class CocoDetection(torchvision.datasets.CocoDetection):
def __init__(self, img_folder, ann_file, transforms, return_masks):
super(CocoDetection, self).__init__(img_folder, ann_file)
self._transforms = transforms
self.prepare = ConvertCocoPolysToMask(return_masks)
def __getitem__(self, idx):
img, target = super(CocoDetection, self).__getitem__(idx)
image_id = self.ids[idx]
target = {'image_id': image_id, 'annotations': target}
img, target = self.prepare(img, target)
if self._transforms is not None:
img, target = self._transforms(img, target)
return img, target
def convert_coco_poly_to_mask(segmentations, height, width):
masks = []
for polygons in segmentations:
rles = coco_mask.frPyObjects(polygons, height, width)
mask = coco_mask.decode(rles)
if len(mask.shape) < 3:
mask = mask[..., None]
mask = torch.as_tensor(mask, dtype=torch.uint8)
mask = mask.any(dim=2)
masks.append(mask)
if masks:
masks = torch.stack(masks, dim=0)
else:
masks = torch.zeros((0, height, width), dtype=torch.uint8)
return masks
class ConvertCocoPolysToMask(object):
def __init__(self, return_masks=False):
self.return_masks = return_masks
def __call__(self, image, target):
w, h = image.size
image_id = target["image_id"]
image_id = torch.tensor([image_id])
anno = target["annotations"]
anno = [obj for obj in anno if 'iscrowd' not in obj or obj['iscrowd'] == 0]
boxes = [obj["bbox"] for obj in anno]
# guard against no boxes via resizing
boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)
boxes[:, 2:] += boxes[:, :2]
boxes[:, 0::2].clamp_(min=0, max=w)
boxes[:, 1::2].clamp_(min=0, max=h)
classes = [obj["category_id"] for obj in anno]
classes = torch.tensor(classes, dtype=torch.int64)
if self.return_masks:
segmentations = [obj["segmentation"] for obj in anno]
masks = convert_coco_poly_to_mask(segmentations, h, w)
keypoints = None
if anno and "keypoints" in anno[0]:
keypoints = [obj["keypoints"] for obj in anno]
keypoints = torch.as_tensor(keypoints, dtype=torch.float32)
num_keypoints = keypoints.shape[0]
if num_keypoints:
keypoints = keypoints.view(num_keypoints, -1, 3)
keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
boxes = boxes[keep]
classes = classes[keep]
if self.return_masks:
masks = masks[keep]
if keypoints is not None:
keypoints = keypoints[keep]
target = {}
target["boxes"] = boxes
target["labels"] = classes
if self.return_masks:
target["masks"] = masks
target["image_id"] = image_id
if keypoints is not None:
target["keypoints"] = keypoints
# for conversion to coco api
area = torch.tensor([obj["area"] for obj in anno])
iscrowd = torch.tensor([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in anno])
target["area"] = area[keep]
target["iscrowd"] = iscrowd[keep]
target["orig_size"] = torch.as_tensor([int(h), int(w)])
target["size"] = torch.as_tensor([int(h), int(w)])
return image, target
def make_coco_transforms(image_set):
normalize = T.Compose([
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
scales = [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800]
if image_set == 'train':
return T.Compose([
T.RandomHorizontalFlip(),
T.RandomSelect(
T.RandomResize(scales, max_size=1333),
T.Compose([
T.RandomResize([400, 500, 600]),
T.RandomSizeCrop(384, 600),
T.RandomResize(scales, max_size=1333),
])
),
normalize,
])
if image_set == 'val':
return T.Compose([
T.RandomResize([800], max_size=1333),
normalize,
])
raise ValueError(f'unknown {image_set}')
def build(image_set, args):
root = Path(args.coco_path)
assert root.exists(), f'provided COCO path {root} does not exist'
mode = 'instances'
PATHS = {
"train": (root / "train2017", root / "annotations" / f'{mode}_train2017.json'),
"val": (root / "val2017", root / "annotations" / f'{mode}_val2017.json'),
}
img_folder, ann_file = PATHS[image_set]
dataset = CocoDetection(img_folder, ann_file, transforms=make_coco_transforms(image_set), return_masks=args.masks)
return dataset
| detr-master | datasets/coco.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Transforms and data augmentation for both image + bbox.
"""
import random
import PIL
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as F
from util.box_ops import box_xyxy_to_cxcywh
from util.misc import interpolate
def crop(image, target, region):
cropped_image = F.crop(image, *region)
target = target.copy()
i, j, h, w = region
# should we do something wrt the original size?
target["size"] = torch.tensor([h, w])
fields = ["labels", "area", "iscrowd"]
if "boxes" in target:
boxes = target["boxes"]
max_size = torch.as_tensor([w, h], dtype=torch.float32)
cropped_boxes = boxes - torch.as_tensor([j, i, j, i])
cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
cropped_boxes = cropped_boxes.clamp(min=0)
area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1)
target["boxes"] = cropped_boxes.reshape(-1, 4)
target["area"] = area
fields.append("boxes")
if "masks" in target:
# FIXME should we update the area here if there are no boxes?
target['masks'] = target['masks'][:, i:i + h, j:j + w]
fields.append("masks")
# remove elements for which the boxes or masks that have zero area
if "boxes" in target or "masks" in target:
# favor boxes selection when defining which elements to keep
# this is compatible with previous implementation
if "boxes" in target:
cropped_boxes = target['boxes'].reshape(-1, 2, 2)
keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)
else:
keep = target['masks'].flatten(1).any(1)
for field in fields:
target[field] = target[field][keep]
return cropped_image, target
def hflip(image, target):
flipped_image = F.hflip(image)
w, h = image.size
target = target.copy()
if "boxes" in target:
boxes = target["boxes"]
boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0])
target["boxes"] = boxes
if "masks" in target:
target['masks'] = target['masks'].flip(-1)
return flipped_image, target
def resize(image, target, size, max_size=None):
# size can be min_size (scalar) or (w, h) tuple
def get_size_with_aspect_ratio(image_size, size, max_size=None):
w, h = image_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(round(max_size * min_original_size / max_original_size))
if (w <= h and w == size) or (h <= w and h == size):
return (h, w)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (oh, ow)
def get_size(image_size, size, max_size=None):
if isinstance(size, (list, tuple)):
return size[::-1]
else:
return get_size_with_aspect_ratio(image_size, size, max_size)
size = get_size(image.size, size, max_size)
rescaled_image = F.resize(image, size)
if target is None:
return rescaled_image, None
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size))
ratio_width, ratio_height = ratios
target = target.copy()
if "boxes" in target:
boxes = target["boxes"]
scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height])
target["boxes"] = scaled_boxes
if "area" in target:
area = target["area"]
scaled_area = area * (ratio_width * ratio_height)
target["area"] = scaled_area
h, w = size
target["size"] = torch.tensor([h, w])
if "masks" in target:
target['masks'] = interpolate(
target['masks'][:, None].float(), size, mode="nearest")[:, 0] > 0.5
return rescaled_image, target
def pad(image, target, padding):
# assumes that we only pad on the bottom right corners
padded_image = F.pad(image, (0, 0, padding[0], padding[1]))
if target is None:
return padded_image, None
target = target.copy()
# should we do something wrt the original size?
target["size"] = torch.tensor(padded_image[::-1])
if "masks" in target:
target['masks'] = torch.nn.functional.pad(target['masks'], (0, padding[0], 0, padding[1]))
return padded_image, target
class RandomCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, img, target):
region = T.RandomCrop.get_params(img, self.size)
return crop(img, target, region)
class RandomSizeCrop(object):
def __init__(self, min_size: int, max_size: int):
self.min_size = min_size
self.max_size = max_size
def __call__(self, img: PIL.Image.Image, target: dict):
w = random.randint(self.min_size, min(img.width, self.max_size))
h = random.randint(self.min_size, min(img.height, self.max_size))
region = T.RandomCrop.get_params(img, [h, w])
return crop(img, target, region)
class CenterCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, img, target):
image_width, image_height = img.size
crop_height, crop_width = self.size
crop_top = int(round((image_height - crop_height) / 2.))
crop_left = int(round((image_width - crop_width) / 2.))
return crop(img, target, (crop_top, crop_left, crop_height, crop_width))
class RandomHorizontalFlip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, img, target):
if random.random() < self.p:
return hflip(img, target)
return img, target
class RandomResize(object):
def __init__(self, sizes, max_size=None):
assert isinstance(sizes, (list, tuple))
self.sizes = sizes
self.max_size = max_size
def __call__(self, img, target=None):
size = random.choice(self.sizes)
return resize(img, target, size, self.max_size)
class RandomPad(object):
def __init__(self, max_pad):
self.max_pad = max_pad
def __call__(self, img, target):
pad_x = random.randint(0, self.max_pad)
pad_y = random.randint(0, self.max_pad)
return pad(img, target, (pad_x, pad_y))
class RandomSelect(object):
"""
Randomly selects between transforms1 and transforms2,
with probability p for transforms1 and (1 - p) for transforms2
"""
def __init__(self, transforms1, transforms2, p=0.5):
self.transforms1 = transforms1
self.transforms2 = transforms2
self.p = p
def __call__(self, img, target):
if random.random() < self.p:
return self.transforms1(img, target)
return self.transforms2(img, target)
class ToTensor(object):
def __call__(self, img, target):
return F.to_tensor(img), target
class RandomErasing(object):
def __init__(self, *args, **kwargs):
self.eraser = T.RandomErasing(*args, **kwargs)
def __call__(self, img, target):
return self.eraser(img), target
class Normalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, image, target=None):
image = F.normalize(image, mean=self.mean, std=self.std)
if target is None:
return image, None
target = target.copy()
h, w = image.shape[-2:]
if "boxes" in target:
boxes = target["boxes"]
boxes = box_xyxy_to_cxcywh(boxes)
boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32)
target["boxes"] = boxes
return image, target
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
def __repr__(self):
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += " {0}".format(t)
format_string += "\n)"
return format_string
| detr-master | datasets/transforms.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import json
import os
import util.misc as utils
try:
from panopticapi.evaluation import pq_compute
except ImportError:
pass
class PanopticEvaluator(object):
def __init__(self, ann_file, ann_folder, output_dir="panoptic_eval"):
self.gt_json = ann_file
self.gt_folder = ann_folder
if utils.is_main_process():
if not os.path.exists(output_dir):
os.mkdir(output_dir)
self.output_dir = output_dir
self.predictions = []
def update(self, predictions):
for p in predictions:
with open(os.path.join(self.output_dir, p["file_name"]), "wb") as f:
f.write(p.pop("png_string"))
self.predictions += predictions
def synchronize_between_processes(self):
all_predictions = utils.all_gather(self.predictions)
merged_predictions = []
for p in all_predictions:
merged_predictions += p
self.predictions = merged_predictions
def summarize(self):
if utils.is_main_process():
json_data = {"annotations": self.predictions}
predictions_json = os.path.join(self.output_dir, "predictions.json")
with open(predictions_json, "w") as f:
f.write(json.dumps(json_data))
return pq_compute(self.gt_json, predictions_json, gt_folder=self.gt_folder, pred_folder=self.output_dir)
return None
| detr-master | datasets/panoptic_eval.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch.utils.data
import torchvision
from .coco import build as build_coco
def get_coco_api_from_dataset(dataset):
for _ in range(10):
# if isinstance(dataset, torchvision.datasets.CocoDetection):
# break
if isinstance(dataset, torch.utils.data.Subset):
dataset = dataset.dataset
if isinstance(dataset, torchvision.datasets.CocoDetection):
return dataset.coco
def build_dataset(image_set, args):
if args.dataset_file == 'coco':
return build_coco(image_set, args)
if args.dataset_file == 'coco_panoptic':
# to avoid making panopticapi required for coco
from .coco_panoptic import build as build_coco_panoptic
return build_coco_panoptic(image_set, args)
raise ValueError(f'dataset {args.dataset_file} not supported')
| detr-master | datasets/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
COCO evaluator that works in distributed mode.
Mostly copy-paste from https://github.com/pytorch/vision/blob/edfd5a7/references/detection/coco_eval.py
The difference is that there is less copy-pasting from pycocotools
in the end of the file, as python3 can suppress prints with contextlib
"""
import os
import contextlib
import copy
import numpy as np
import torch
from pycocotools.cocoeval import COCOeval
from pycocotools.coco import COCO
import pycocotools.mask as mask_util
from util.misc import all_gather
class CocoEvaluator(object):
def __init__(self, coco_gt, iou_types):
assert isinstance(iou_types, (list, tuple))
coco_gt = copy.deepcopy(coco_gt)
self.coco_gt = coco_gt
self.iou_types = iou_types
self.coco_eval = {}
for iou_type in iou_types:
self.coco_eval[iou_type] = COCOeval(coco_gt, iouType=iou_type)
self.img_ids = []
self.eval_imgs = {k: [] for k in iou_types}
def update(self, predictions):
img_ids = list(np.unique(list(predictions.keys())))
self.img_ids.extend(img_ids)
for iou_type in self.iou_types:
results = self.prepare(predictions, iou_type)
# suppress pycocotools prints
with open(os.devnull, 'w') as devnull:
with contextlib.redirect_stdout(devnull):
coco_dt = COCO.loadRes(self.coco_gt, results) if results else COCO()
coco_eval = self.coco_eval[iou_type]
coco_eval.cocoDt = coco_dt
coco_eval.params.imgIds = list(img_ids)
img_ids, eval_imgs = evaluate(coco_eval)
self.eval_imgs[iou_type].append(eval_imgs)
def synchronize_between_processes(self):
for iou_type in self.iou_types:
self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2)
create_common_coco_eval(self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type])
def accumulate(self):
for coco_eval in self.coco_eval.values():
coco_eval.accumulate()
def summarize(self):
for iou_type, coco_eval in self.coco_eval.items():
print("IoU metric: {}".format(iou_type))
coco_eval.summarize()
def prepare(self, predictions, iou_type):
if iou_type == "bbox":
return self.prepare_for_coco_detection(predictions)
elif iou_type == "segm":
return self.prepare_for_coco_segmentation(predictions)
elif iou_type == "keypoints":
return self.prepare_for_coco_keypoint(predictions)
else:
raise ValueError("Unknown iou type {}".format(iou_type))
def prepare_for_coco_detection(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
boxes = prediction["boxes"]
boxes = convert_to_xywh(boxes).tolist()
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"bbox": box,
"score": scores[k],
}
for k, box in enumerate(boxes)
]
)
return coco_results
def prepare_for_coco_segmentation(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
scores = prediction["scores"]
labels = prediction["labels"]
masks = prediction["masks"]
masks = masks > 0.5
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
rles = [
mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F"))[0]
for mask in masks
]
for rle in rles:
rle["counts"] = rle["counts"].decode("utf-8")
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"segmentation": rle,
"score": scores[k],
}
for k, rle in enumerate(rles)
]
)
return coco_results
def prepare_for_coco_keypoint(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
boxes = prediction["boxes"]
boxes = convert_to_xywh(boxes).tolist()
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
keypoints = prediction["keypoints"]
keypoints = keypoints.flatten(start_dim=1).tolist()
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
'keypoints': keypoint,
"score": scores[k],
}
for k, keypoint in enumerate(keypoints)
]
)
return coco_results
def convert_to_xywh(boxes):
xmin, ymin, xmax, ymax = boxes.unbind(1)
return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1)
def merge(img_ids, eval_imgs):
all_img_ids = all_gather(img_ids)
all_eval_imgs = all_gather(eval_imgs)
merged_img_ids = []
for p in all_img_ids:
merged_img_ids.extend(p)
merged_eval_imgs = []
for p in all_eval_imgs:
merged_eval_imgs.append(p)
merged_img_ids = np.array(merged_img_ids)
merged_eval_imgs = np.concatenate(merged_eval_imgs, 2)
# keep only unique (and in sorted order) images
merged_img_ids, idx = np.unique(merged_img_ids, return_index=True)
merged_eval_imgs = merged_eval_imgs[..., idx]
return merged_img_ids, merged_eval_imgs
def create_common_coco_eval(coco_eval, img_ids, eval_imgs):
img_ids, eval_imgs = merge(img_ids, eval_imgs)
img_ids = list(img_ids)
eval_imgs = list(eval_imgs.flatten())
coco_eval.evalImgs = eval_imgs
coco_eval.params.imgIds = img_ids
coco_eval._paramsEval = copy.deepcopy(coco_eval.params)
#################################################################
# From pycocotools, just removed the prints and fixed
# a Python3 bug about unicode not defined
#################################################################
def evaluate(self):
'''
Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
:return: None
'''
# tic = time.time()
# print('Running per image evaluation...')
p = self.params
# add backward compatibility if useSegm is specified in params
if p.useSegm is not None:
p.iouType = 'segm' if p.useSegm == 1 else 'bbox'
print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))
# print('Evaluate annotation type *{}*'.format(p.iouType))
p.imgIds = list(np.unique(p.imgIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
self.params = p
self._prepare()
# loop through images, area range, max detection number
catIds = p.catIds if p.useCats else [-1]
if p.iouType == 'segm' or p.iouType == 'bbox':
computeIoU = self.computeIoU
elif p.iouType == 'keypoints':
computeIoU = self.computeOks
self.ious = {
(imgId, catId): computeIoU(imgId, catId)
for imgId in p.imgIds
for catId in catIds}
evaluateImg = self.evaluateImg
maxDet = p.maxDets[-1]
evalImgs = [
evaluateImg(imgId, catId, areaRng, maxDet)
for catId in catIds
for areaRng in p.areaRng
for imgId in p.imgIds
]
# this is NOT in the pycocotools code, but could be done outside
evalImgs = np.asarray(evalImgs).reshape(len(catIds), len(p.areaRng), len(p.imgIds))
self._paramsEval = copy.deepcopy(self.params)
# toc = time.time()
# print('DONE (t={:0.2f}s).'.format(toc-tic))
return p.imgIds, evalImgs
#################################################################
# end of straight copy from pycocotools, just removing the prints
#################################################################
| detr-master | datasets/coco_eval.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import json
from pathlib import Path
import numpy as np
import torch
from PIL import Image
from panopticapi.utils import rgb2id
from util.box_ops import masks_to_boxes
from .coco import make_coco_transforms
class CocoPanoptic:
def __init__(self, img_folder, ann_folder, ann_file, transforms=None, return_masks=True):
with open(ann_file, 'r') as f:
self.coco = json.load(f)
# sort 'images' field so that they are aligned with 'annotations'
# i.e., in alphabetical order
self.coco['images'] = sorted(self.coco['images'], key=lambda x: x['id'])
# sanity check
if "annotations" in self.coco:
for img, ann in zip(self.coco['images'], self.coco['annotations']):
assert img['file_name'][:-4] == ann['file_name'][:-4]
self.img_folder = img_folder
self.ann_folder = ann_folder
self.ann_file = ann_file
self.transforms = transforms
self.return_masks = return_masks
def __getitem__(self, idx):
ann_info = self.coco['annotations'][idx] if "annotations" in self.coco else self.coco['images'][idx]
img_path = Path(self.img_folder) / ann_info['file_name'].replace('.png', '.jpg')
ann_path = Path(self.ann_folder) / ann_info['file_name']
img = Image.open(img_path).convert('RGB')
w, h = img.size
if "segments_info" in ann_info:
masks = np.asarray(Image.open(ann_path), dtype=np.uint32)
masks = rgb2id(masks)
ids = np.array([ann['id'] for ann in ann_info['segments_info']])
masks = masks == ids[:, None, None]
masks = torch.as_tensor(masks, dtype=torch.uint8)
labels = torch.tensor([ann['category_id'] for ann in ann_info['segments_info']], dtype=torch.int64)
target = {}
target['image_id'] = torch.tensor([ann_info['image_id'] if "image_id" in ann_info else ann_info["id"]])
if self.return_masks:
target['masks'] = masks
target['labels'] = labels
target["boxes"] = masks_to_boxes(masks)
target['size'] = torch.as_tensor([int(h), int(w)])
target['orig_size'] = torch.as_tensor([int(h), int(w)])
if "segments_info" in ann_info:
for name in ['iscrowd', 'area']:
target[name] = torch.tensor([ann[name] for ann in ann_info['segments_info']])
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self):
return len(self.coco['images'])
def get_height_and_width(self, idx):
img_info = self.coco['images'][idx]
height = img_info['height']
width = img_info['width']
return height, width
def build(image_set, args):
img_folder_root = Path(args.coco_path)
ann_folder_root = Path(args.coco_panoptic_path)
assert img_folder_root.exists(), f'provided COCO path {img_folder_root} does not exist'
assert ann_folder_root.exists(), f'provided COCO path {ann_folder_root} does not exist'
mode = 'panoptic'
PATHS = {
"train": ("train2017", Path("annotations") / f'{mode}_train2017.json'),
"val": ("val2017", Path("annotations") / f'{mode}_val2017.json'),
}
img_folder, ann_file = PATHS[image_set]
img_folder_path = img_folder_root / img_folder
ann_folder = ann_folder_root / f'{mode}_{img_folder}'
ann_file = ann_folder_root / ann_file
dataset = CocoPanoptic(img_folder_path, ann_folder, ann_file,
transforms=make_coco_transforms(image_set), return_masks=args.masks)
return dataset
| detr-master | datasets/coco_panoptic.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
DETR model and criterion classes.
"""
import torch
import torch.nn.functional as F
from torch import nn
from util import box_ops
from util.misc import (NestedTensor, nested_tensor_from_tensor_list,
accuracy, get_world_size, interpolate,
is_dist_avail_and_initialized)
from .backbone import build_backbone
from .matcher import build_matcher
from .segmentation import (DETRsegm, PostProcessPanoptic, PostProcessSegm,
dice_loss, sigmoid_focal_loss)
from .transformer import build_transformer
class DETR(nn.Module):
""" This is the DETR module that performs object detection """
def __init__(self, backbone, transformer, num_classes, num_queries, aux_loss=False):
""" Initializes the model.
Parameters:
backbone: torch module of the backbone to be used. See backbone.py
transformer: torch module of the transformer architecture. See transformer.py
num_classes: number of object classes
num_queries: number of object queries, ie detection slot. This is the maximal number of objects
DETR can detect in a single image. For COCO, we recommend 100 queries.
aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.
"""
super().__init__()
self.num_queries = num_queries
self.transformer = transformer
hidden_dim = transformer.d_model
self.class_embed = nn.Linear(hidden_dim, num_classes + 1)
self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
self.query_embed = nn.Embedding(num_queries, hidden_dim)
self.input_proj = nn.Conv2d(backbone.num_channels, hidden_dim, kernel_size=1)
self.backbone = backbone
self.aux_loss = aux_loss
def forward(self, samples: NestedTensor):
""" The forward expects a NestedTensor, which consists of:
- samples.tensor: batched images, of shape [batch_size x 3 x H x W]
- samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels
It returns a dict with the following elements:
- "pred_logits": the classification logits (including no-object) for all queries.
Shape= [batch_size x num_queries x (num_classes + 1)]
- "pred_boxes": The normalized boxes coordinates for all queries, represented as
(center_x, center_y, height, width). These values are normalized in [0, 1],
relative to the size of each individual image (disregarding possible padding).
See PostProcess for information on how to retrieve the unnormalized bounding box.
- "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of
dictionnaries containing the two above keys for each decoder layer.
"""
if isinstance(samples, (list, torch.Tensor)):
samples = nested_tensor_from_tensor_list(samples)
features, pos = self.backbone(samples)
src, mask = features[-1].decompose()
assert mask is not None
hs = self.transformer(self.input_proj(src), mask, self.query_embed.weight, pos[-1])[0]
outputs_class = self.class_embed(hs)
outputs_coord = self.bbox_embed(hs).sigmoid()
out = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord[-1]}
if self.aux_loss:
out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord)
return out
@torch.jit.unused
def _set_aux_loss(self, outputs_class, outputs_coord):
# this is a workaround to make torchscript happy, as torchscript
# doesn't support dictionary with non-homogeneous values, such
# as a dict having both a Tensor and a list.
return [{'pred_logits': a, 'pred_boxes': b}
for a, b in zip(outputs_class[:-1], outputs_coord[:-1])]
class SetCriterion(nn.Module):
""" This class computes the loss for DETR.
The process happens in two steps:
1) we compute hungarian assignment between ground truth boxes and the outputs of the model
2) we supervise each pair of matched ground-truth / prediction (supervise class and box)
"""
def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses):
""" Create the criterion.
Parameters:
num_classes: number of object categories, omitting the special no-object category
matcher: module able to compute a matching between targets and proposals
weight_dict: dict containing as key the names of the losses and as values their relative weight.
eos_coef: relative classification weight applied to the no-object category
losses: list of all the losses to be applied. See get_loss for list of available losses.
"""
super().__init__()
self.num_classes = num_classes
self.matcher = matcher
self.weight_dict = weight_dict
self.eos_coef = eos_coef
self.losses = losses
empty_weight = torch.ones(self.num_classes + 1)
empty_weight[-1] = self.eos_coef
self.register_buffer('empty_weight', empty_weight)
def loss_labels(self, outputs, targets, indices, num_boxes, log=True):
"""Classification loss (NLL)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
"""
assert 'pred_logits' in outputs
src_logits = outputs['pred_logits']
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)])
target_classes = torch.full(src_logits.shape[:2], self.num_classes,
dtype=torch.int64, device=src_logits.device)
target_classes[idx] = target_classes_o
loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)
losses = {'loss_ce': loss_ce}
if log:
# TODO this should probably be a separate loss, not hacked in this one here
losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0]
return losses
@torch.no_grad()
def loss_cardinality(self, outputs, targets, indices, num_boxes):
""" Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes
This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients
"""
pred_logits = outputs['pred_logits']
device = pred_logits.device
tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device)
# Count the number of predictions that are NOT "no-object" (which is the last class)
card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1)
card_err = F.l1_loss(card_pred.float(), tgt_lengths.float())
losses = {'cardinality_error': card_err}
return losses
def loss_boxes(self, outputs, targets, indices, num_boxes):
"""Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss
targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.
"""
assert 'pred_boxes' in outputs
idx = self._get_src_permutation_idx(indices)
src_boxes = outputs['pred_boxes'][idx]
target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0)
loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none')
losses = {}
losses['loss_bbox'] = loss_bbox.sum() / num_boxes
loss_giou = 1 - torch.diag(box_ops.generalized_box_iou(
box_ops.box_cxcywh_to_xyxy(src_boxes),
box_ops.box_cxcywh_to_xyxy(target_boxes)))
losses['loss_giou'] = loss_giou.sum() / num_boxes
return losses
def loss_masks(self, outputs, targets, indices, num_boxes):
"""Compute the losses related to the masks: the focal loss and the dice loss.
targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]
"""
assert "pred_masks" in outputs
src_idx = self._get_src_permutation_idx(indices)
tgt_idx = self._get_tgt_permutation_idx(indices)
src_masks = outputs["pred_masks"]
# TODO use valid to mask invalid areas due to padding in loss
target_masks, valid = nested_tensor_from_tensor_list([t["masks"] for t in targets]).decompose()
target_masks = target_masks.to(src_masks)
src_masks = src_masks[src_idx]
# upsample predictions to the target size
src_masks = interpolate(src_masks[:, None], size=target_masks.shape[-2:],
mode="bilinear", align_corners=False)
src_masks = src_masks[:, 0].flatten(1)
target_masks = target_masks[tgt_idx].flatten(1)
losses = {
"loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_boxes),
"loss_dice": dice_loss(src_masks, target_masks, num_boxes),
}
return losses
def _get_src_permutation_idx(self, indices):
# permute predictions following indices
batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
src_idx = torch.cat([src for (src, _) in indices])
return batch_idx, src_idx
def _get_tgt_permutation_idx(self, indices):
# permute targets following indices
batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
tgt_idx = torch.cat([tgt for (_, tgt) in indices])
return batch_idx, tgt_idx
def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs):
loss_map = {
'labels': self.loss_labels,
'cardinality': self.loss_cardinality,
'boxes': self.loss_boxes,
'masks': self.loss_masks
}
assert loss in loss_map, f'do you really want to compute {loss} loss?'
return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs)
def forward(self, outputs, targets):
""" This performs the loss computation.
Parameters:
outputs: dict of tensors, see the output specification of the model for the format
targets: list of dicts, such that len(targets) == batch_size.
The expected keys in each dict depends on the losses applied, see each loss' doc
"""
outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'}
# Retrieve the matching between the outputs of the last layer and the targets
indices = self.matcher(outputs_without_aux, targets)
# Compute the average number of target boxes accross all nodes, for normalization purposes
num_boxes = sum(len(t["labels"]) for t in targets)
num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)
if is_dist_avail_and_initialized():
torch.distributed.all_reduce(num_boxes)
num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()
# Compute all the requested losses
losses = {}
for loss in self.losses:
losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))
# In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
if 'aux_outputs' in outputs:
for i, aux_outputs in enumerate(outputs['aux_outputs']):
indices = self.matcher(aux_outputs, targets)
for loss in self.losses:
if loss == 'masks':
# Intermediate masks losses are too costly to compute, we ignore them.
continue
kwargs = {}
if loss == 'labels':
# Logging is enabled only for the last layer
kwargs = {'log': False}
l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs)
l_dict = {k + f'_{i}': v for k, v in l_dict.items()}
losses.update(l_dict)
return losses
class PostProcess(nn.Module):
""" This module converts the model's output into the format expected by the coco api"""
@torch.no_grad()
def forward(self, outputs, target_sizes):
""" Perform the computation
Parameters:
outputs: raw outputs of the model
target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch
For evaluation, this must be the original image size (before any data augmentation)
For visualization, this should be the image size after data augment, but before padding
"""
out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes']
assert len(out_logits) == len(target_sizes)
assert target_sizes.shape[1] == 2
prob = F.softmax(out_logits, -1)
scores, labels = prob[..., :-1].max(-1)
# convert to [x0, y0, x1, y1] format
boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)
# and from relative [0, 1] to absolute [0, height] coordinates
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
boxes = boxes * scale_fct[:, None, :]
results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)]
return results
class MLP(nn.Module):
""" Very simple multi-layer perceptron (also called FFN)"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
def build(args):
num_classes = 20 if args.dataset_file != 'coco' else 91
if args.dataset_file == "coco_panoptic":
num_classes = 250
device = torch.device(args.device)
backbone = build_backbone(args)
transformer = build_transformer(args)
model = DETR(
backbone,
transformer,
num_classes=num_classes,
num_queries=args.num_queries,
aux_loss=args.aux_loss,
)
if args.masks:
model = DETRsegm(model, freeze_detr=(args.frozen_weights is not None))
matcher = build_matcher(args)
weight_dict = {'loss_ce': 1, 'loss_bbox': args.bbox_loss_coef}
weight_dict['loss_giou'] = args.giou_loss_coef
if args.masks:
weight_dict["loss_mask"] = args.mask_loss_coef
weight_dict["loss_dice"] = args.dice_loss_coef
# TODO this is a hack
if args.aux_loss:
aux_weight_dict = {}
for i in range(args.dec_layers - 1):
aux_weight_dict.update({k + f'_{i}': v for k, v in weight_dict.items()})
weight_dict.update(aux_weight_dict)
losses = ['labels', 'boxes', 'cardinality']
if args.masks:
losses += ["masks"]
criterion = SetCriterion(num_classes, matcher=matcher, weight_dict=weight_dict,
eos_coef=args.eos_coef, losses=losses)
criterion.to(device)
postprocessors = {'bbox': PostProcess()}
if args.masks:
postprocessors['segm'] = PostProcessSegm()
if args.dataset_file == "coco_panoptic":
is_thing_map = {i: i <= 90 for i in range(201)}
postprocessors["panoptic"] = PostProcessPanoptic(is_thing_map, threshold=0.85)
return model, criterion, postprocessors
| detr-master | models/detr.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Modules to compute the matching cost and solve the corresponding LSAP.
"""
import torch
from scipy.optimize import linear_sum_assignment
from torch import nn
from util.box_ops import box_cxcywh_to_xyxy, generalized_box_iou
class HungarianMatcher(nn.Module):
"""This class computes an assignment between the targets and the predictions of the network
For efficiency reasons, the targets don't include the no_object. Because of this, in general,
there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,
while the others are un-matched (and thus treated as non-objects).
"""
def __init__(self, cost_class: float = 1, cost_bbox: float = 1, cost_giou: float = 1):
"""Creates the matcher
Params:
cost_class: This is the relative weight of the classification error in the matching cost
cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost
cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost
"""
super().__init__()
self.cost_class = cost_class
self.cost_bbox = cost_bbox
self.cost_giou = cost_giou
assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, "all costs cant be 0"
@torch.no_grad()
def forward(self, outputs, targets):
""" Performs the matching
Params:
outputs: This is a dict that contains at least these entries:
"pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
"pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates
targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:
"labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth
objects in the target) containing the class labels
"boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates
Returns:
A list of size batch_size, containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected targets (in order)
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
"""
bs, num_queries = outputs["pred_logits"].shape[:2]
# We flatten to compute the cost matrices in a batch
out_prob = outputs["pred_logits"].flatten(0, 1).softmax(-1) # [batch_size * num_queries, num_classes]
out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4]
# Also concat the target labels and boxes
tgt_ids = torch.cat([v["labels"] for v in targets])
tgt_bbox = torch.cat([v["boxes"] for v in targets])
# Compute the classification cost. Contrary to the loss, we don't use the NLL,
# but approximate it in 1 - proba[target class].
# The 1 is a constant that doesn't change the matching, it can be ommitted.
cost_class = -out_prob[:, tgt_ids]
# Compute the L1 cost between boxes
cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)
# Compute the giou cost betwen boxes
cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox))
# Final cost matrix
C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou
C = C.view(bs, num_queries, -1).cpu()
sizes = [len(v["boxes"]) for v in targets]
indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]
return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
def build_matcher(args):
return HungarianMatcher(cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou)
| detr-master | models/matcher.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Backbone modules.
"""
from collections import OrderedDict
import torch
import torch.nn.functional as F
import torchvision
from torch import nn
from torchvision.models._utils import IntermediateLayerGetter
from typing import Dict, List
from util.misc import NestedTensor, is_main_process
from .position_encoding import build_position_encoding
class FrozenBatchNorm2d(torch.nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
Copy-paste from torchvision.misc.ops with added eps before rqsrt,
without which any other models than torchvision.models.resnet[18,34,50,101]
produce nans.
"""
def __init__(self, n):
super(FrozenBatchNorm2d, self).__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
num_batches_tracked_key = prefix + 'num_batches_tracked'
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super(FrozenBatchNorm2d, self)._load_from_state_dict(
state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs)
def forward(self, x):
# move reshapes to the beginning
# to make it fuser-friendly
w = self.weight.reshape(1, -1, 1, 1)
b = self.bias.reshape(1, -1, 1, 1)
rv = self.running_var.reshape(1, -1, 1, 1)
rm = self.running_mean.reshape(1, -1, 1, 1)
eps = 1e-5
scale = w * (rv + eps).rsqrt()
bias = b - rm * scale
return x * scale + bias
class BackboneBase(nn.Module):
def __init__(self, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_layers: bool):
super().__init__()
for name, parameter in backbone.named_parameters():
if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name:
parameter.requires_grad_(False)
if return_interm_layers:
return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"}
else:
return_layers = {'layer4': "0"}
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
self.num_channels = num_channels
def forward(self, tensor_list: NestedTensor):
xs = self.body(tensor_list.tensors)
out: Dict[str, NestedTensor] = {}
for name, x in xs.items():
m = tensor_list.mask
assert m is not None
mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]
out[name] = NestedTensor(x, mask)
return out
class Backbone(BackboneBase):
"""ResNet backbone with frozen BatchNorm."""
def __init__(self, name: str,
train_backbone: bool,
return_interm_layers: bool,
dilation: bool):
backbone = getattr(torchvision.models, name)(
replace_stride_with_dilation=[False, False, dilation],
pretrained=is_main_process(), norm_layer=FrozenBatchNorm2d)
num_channels = 512 if name in ('resnet18', 'resnet34') else 2048
super().__init__(backbone, train_backbone, num_channels, return_interm_layers)
class Joiner(nn.Sequential):
def __init__(self, backbone, position_embedding):
super().__init__(backbone, position_embedding)
def forward(self, tensor_list: NestedTensor):
xs = self[0](tensor_list)
out: List[NestedTensor] = []
pos = []
for name, x in xs.items():
out.append(x)
# position encoding
pos.append(self[1](x).to(x.tensors.dtype))
return out, pos
def build_backbone(args):
position_embedding = build_position_encoding(args)
train_backbone = args.lr_backbone > 0
return_interm_layers = args.masks
backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation)
model = Joiner(backbone, position_embedding)
model.num_channels = backbone.num_channels
return model
| detr-master | models/backbone.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Various positional encodings for the transformer.
"""
import math
import torch
from torch import nn
from util.misc import NestedTensor
class PositionEmbeddingSine(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one
used by the Attention is all you need paper, generalized to work on images.
"""
def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, tensor_list: NestedTensor):
x = tensor_list.tensors
mask = tensor_list.mask
assert mask is not None
not_mask = ~mask
y_embed = not_mask.cumsum(1, dtype=torch.float32)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
if self.normalize:
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
class PositionEmbeddingLearned(nn.Module):
"""
Absolute pos embedding, learned.
"""
def __init__(self, num_pos_feats=256):
super().__init__()
self.row_embed = nn.Embedding(50, num_pos_feats)
self.col_embed = nn.Embedding(50, num_pos_feats)
self.reset_parameters()
def reset_parameters(self):
nn.init.uniform_(self.row_embed.weight)
nn.init.uniform_(self.col_embed.weight)
def forward(self, tensor_list: NestedTensor):
x = tensor_list.tensors
h, w = x.shape[-2:]
i = torch.arange(w, device=x.device)
j = torch.arange(h, device=x.device)
x_emb = self.col_embed(i)
y_emb = self.row_embed(j)
pos = torch.cat([
x_emb.unsqueeze(0).repeat(h, 1, 1),
y_emb.unsqueeze(1).repeat(1, w, 1),
], dim=-1).permute(2, 0, 1).unsqueeze(0).repeat(x.shape[0], 1, 1, 1)
return pos
def build_position_encoding(args):
N_steps = args.hidden_dim // 2
if args.position_embedding in ('v2', 'sine'):
# TODO find a better way of exposing other arguments
position_embedding = PositionEmbeddingSine(N_steps, normalize=True)
elif args.position_embedding in ('v3', 'learned'):
position_embedding = PositionEmbeddingLearned(N_steps)
else:
raise ValueError(f"not supported {args.position_embedding}")
return position_embedding
| detr-master | models/position_encoding.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .detr import build
def build_model(args):
return build(args)
| detr-master | models/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
DETR Transformer class.
Copy-paste from torch.nn.Transformer with modifications:
* positional encodings are passed in MHattention
* extra LN at the end of encoder is removed
* decoder returns a stack of activations from all decoding layers
"""
import copy
from typing import Optional, List
import torch
import torch.nn.functional as F
from torch import nn, Tensor
class Transformer(nn.Module):
def __init__(self, d_model=512, nhead=8, num_encoder_layers=6,
num_decoder_layers=6, dim_feedforward=2048, dropout=0.1,
activation="relu", normalize_before=False,
return_intermediate_dec=False):
super().__init__()
encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward,
dropout, activation, normalize_before)
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward,
dropout, activation, normalize_before)
decoder_norm = nn.LayerNorm(d_model)
self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm,
return_intermediate=return_intermediate_dec)
self._reset_parameters()
self.d_model = d_model
self.nhead = nhead
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, src, mask, query_embed, pos_embed):
# flatten NxCxHxW to HWxNxC
bs, c, h, w = src.shape
src = src.flatten(2).permute(2, 0, 1)
pos_embed = pos_embed.flatten(2).permute(2, 0, 1)
query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)
mask = mask.flatten(1)
tgt = torch.zeros_like(query_embed)
memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed)
hs = self.decoder(tgt, memory, memory_key_padding_mask=mask,
pos=pos_embed, query_pos=query_embed)
return hs.transpose(1, 2), memory.permute(1, 2, 0).view(bs, c, h, w)
class TransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers, norm=None):
super().__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(self, src,
mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
output = src
for layer in self.layers:
output = layer(output, src_mask=mask,
src_key_padding_mask=src_key_padding_mask, pos=pos)
if self.norm is not None:
output = self.norm(output)
return output
class TransformerDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
super().__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
self.return_intermediate = return_intermediate
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
output = tgt
intermediate = []
for layer in self.layers:
output = layer(output, memory, tgt_mask=tgt_mask,
memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask,
pos=pos, query_pos=query_pos)
if self.return_intermediate:
intermediate.append(self.norm(output))
if self.norm is not None:
output = self.norm(output)
if self.return_intermediate:
intermediate.pop()
intermediate.append(output)
if self.return_intermediate:
return torch.stack(intermediate)
return output.unsqueeze(0)
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation="relu", normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(src, pos)
src2 = self.self_attn(q, k, value=src, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def forward_pre(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
src2 = self.norm1(src)
q = k = self.with_pos_embed(src2, pos)
src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src2 = self.norm2(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
src = src + self.dropout2(src2)
return src
def forward(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
return self.forward_post(src, src_mask, src_key_padding_mask, pos)
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation="relu", normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
def forward_pre(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.norm1(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt2 = self.norm2(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt2 = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout3(tgt2)
return tgt
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
return self.forward_post(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def build_transformer(args):
return Transformer(
d_model=args.hidden_dim,
dropout=args.dropout,
nhead=args.nheads,
dim_feedforward=args.dim_feedforward,
num_encoder_layers=args.enc_layers,
num_decoder_layers=args.dec_layers,
normalize_before=args.pre_norm,
return_intermediate_dec=True,
)
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
| detr-master | models/transformer.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
This file provides the definition of the convolutional heads used to predict masks, as well as the losses
"""
import io
from collections import defaultdict
from typing import List, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from PIL import Image
import util.box_ops as box_ops
from util.misc import NestedTensor, interpolate, nested_tensor_from_tensor_list
try:
from panopticapi.utils import id2rgb, rgb2id
except ImportError:
pass
class DETRsegm(nn.Module):
def __init__(self, detr, freeze_detr=False):
super().__init__()
self.detr = detr
if freeze_detr:
for p in self.parameters():
p.requires_grad_(False)
hidden_dim, nheads = detr.transformer.d_model, detr.transformer.nhead
self.bbox_attention = MHAttentionMap(hidden_dim, hidden_dim, nheads, dropout=0.0)
self.mask_head = MaskHeadSmallConv(hidden_dim + nheads, [1024, 512, 256], hidden_dim)
def forward(self, samples: NestedTensor):
if not isinstance(samples, NestedTensor):
samples = nested_tensor_from_tensor_list(samples)
features, pos = self.detr.backbone(samples)
bs = features[-1].tensors.shape[0]
src, mask = features[-1].decompose()
assert mask is not None
src_proj = self.detr.input_proj(src)
hs, memory = self.detr.transformer(src_proj, mask, self.detr.query_embed.weight, pos[-1])
outputs_class = self.detr.class_embed(hs)
outputs_coord = self.detr.bbox_embed(hs).sigmoid()
out = {"pred_logits": outputs_class[-1], "pred_boxes": outputs_coord[-1]}
if self.detr.aux_loss:
out['aux_outputs'] = self.detr._set_aux_loss(outputs_class, outputs_coord)
# FIXME h_boxes takes the last one computed, keep this in mind
bbox_mask = self.bbox_attention(hs[-1], memory, mask=mask)
seg_masks = self.mask_head(src_proj, bbox_mask, [features[2].tensors, features[1].tensors, features[0].tensors])
outputs_seg_masks = seg_masks.view(bs, self.detr.num_queries, seg_masks.shape[-2], seg_masks.shape[-1])
out["pred_masks"] = outputs_seg_masks
return out
def _expand(tensor, length: int):
return tensor.unsqueeze(1).repeat(1, int(length), 1, 1, 1).flatten(0, 1)
class MaskHeadSmallConv(nn.Module):
"""
Simple convolutional head, using group norm.
Upsampling is done using a FPN approach
"""
def __init__(self, dim, fpn_dims, context_dim):
super().__init__()
inter_dims = [dim, context_dim // 2, context_dim // 4, context_dim // 8, context_dim // 16, context_dim // 64]
self.lay1 = torch.nn.Conv2d(dim, dim, 3, padding=1)
self.gn1 = torch.nn.GroupNorm(8, dim)
self.lay2 = torch.nn.Conv2d(dim, inter_dims[1], 3, padding=1)
self.gn2 = torch.nn.GroupNorm(8, inter_dims[1])
self.lay3 = torch.nn.Conv2d(inter_dims[1], inter_dims[2], 3, padding=1)
self.gn3 = torch.nn.GroupNorm(8, inter_dims[2])
self.lay4 = torch.nn.Conv2d(inter_dims[2], inter_dims[3], 3, padding=1)
self.gn4 = torch.nn.GroupNorm(8, inter_dims[3])
self.lay5 = torch.nn.Conv2d(inter_dims[3], inter_dims[4], 3, padding=1)
self.gn5 = torch.nn.GroupNorm(8, inter_dims[4])
self.out_lay = torch.nn.Conv2d(inter_dims[4], 1, 3, padding=1)
self.dim = dim
self.adapter1 = torch.nn.Conv2d(fpn_dims[0], inter_dims[1], 1)
self.adapter2 = torch.nn.Conv2d(fpn_dims[1], inter_dims[2], 1)
self.adapter3 = torch.nn.Conv2d(fpn_dims[2], inter_dims[3], 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight, a=1)
nn.init.constant_(m.bias, 0)
def forward(self, x: Tensor, bbox_mask: Tensor, fpns: List[Tensor]):
x = torch.cat([_expand(x, bbox_mask.shape[1]), bbox_mask.flatten(0, 1)], 1)
x = self.lay1(x)
x = self.gn1(x)
x = F.relu(x)
x = self.lay2(x)
x = self.gn2(x)
x = F.relu(x)
cur_fpn = self.adapter1(fpns[0])
if cur_fpn.size(0) != x.size(0):
cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0))
x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest")
x = self.lay3(x)
x = self.gn3(x)
x = F.relu(x)
cur_fpn = self.adapter2(fpns[1])
if cur_fpn.size(0) != x.size(0):
cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0))
x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest")
x = self.lay4(x)
x = self.gn4(x)
x = F.relu(x)
cur_fpn = self.adapter3(fpns[2])
if cur_fpn.size(0) != x.size(0):
cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0))
x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest")
x = self.lay5(x)
x = self.gn5(x)
x = F.relu(x)
x = self.out_lay(x)
return x
class MHAttentionMap(nn.Module):
"""This is a 2D attention module, which only returns the attention softmax (no multiplication by value)"""
def __init__(self, query_dim, hidden_dim, num_heads, dropout=0.0, bias=True):
super().__init__()
self.num_heads = num_heads
self.hidden_dim = hidden_dim
self.dropout = nn.Dropout(dropout)
self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias)
self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias)
nn.init.zeros_(self.k_linear.bias)
nn.init.zeros_(self.q_linear.bias)
nn.init.xavier_uniform_(self.k_linear.weight)
nn.init.xavier_uniform_(self.q_linear.weight)
self.normalize_fact = float(hidden_dim / self.num_heads) ** -0.5
def forward(self, q, k, mask: Optional[Tensor] = None):
q = self.q_linear(q)
k = F.conv2d(k, self.k_linear.weight.unsqueeze(-1).unsqueeze(-1), self.k_linear.bias)
qh = q.view(q.shape[0], q.shape[1], self.num_heads, self.hidden_dim // self.num_heads)
kh = k.view(k.shape[0], self.num_heads, self.hidden_dim // self.num_heads, k.shape[-2], k.shape[-1])
weights = torch.einsum("bqnc,bnchw->bqnhw", qh * self.normalize_fact, kh)
if mask is not None:
weights.masked_fill_(mask.unsqueeze(1).unsqueeze(1), float("-inf"))
weights = F.softmax(weights.flatten(2), dim=-1).view_as(weights)
weights = self.dropout(weights)
return weights
def dice_loss(inputs, targets, num_boxes):
"""
Compute the DICE loss, similar to generalized IOU for masks
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
"""
inputs = inputs.sigmoid()
inputs = inputs.flatten(1)
numerator = 2 * (inputs * targets).sum(1)
denominator = inputs.sum(-1) + targets.sum(-1)
loss = 1 - (numerator + 1) / (denominator + 1)
return loss.sum() / num_boxes
def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):
"""
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
alpha: (optional) Weighting factor in range (0,1) to balance
positive vs negative examples. Default = -1 (no weighting).
gamma: Exponent of the modulating factor (1 - p_t) to
balance easy vs hard examples.
Returns:
Loss tensor
"""
prob = inputs.sigmoid()
ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
p_t = prob * targets + (1 - prob) * (1 - targets)
loss = ce_loss * ((1 - p_t) ** gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
return loss.mean(1).sum() / num_boxes
class PostProcessSegm(nn.Module):
def __init__(self, threshold=0.5):
super().__init__()
self.threshold = threshold
@torch.no_grad()
def forward(self, results, outputs, orig_target_sizes, max_target_sizes):
assert len(orig_target_sizes) == len(max_target_sizes)
max_h, max_w = max_target_sizes.max(0)[0].tolist()
outputs_masks = outputs["pred_masks"].squeeze(2)
outputs_masks = F.interpolate(outputs_masks, size=(max_h, max_w), mode="bilinear", align_corners=False)
outputs_masks = (outputs_masks.sigmoid() > self.threshold).cpu()
for i, (cur_mask, t, tt) in enumerate(zip(outputs_masks, max_target_sizes, orig_target_sizes)):
img_h, img_w = t[0], t[1]
results[i]["masks"] = cur_mask[:, :img_h, :img_w].unsqueeze(1)
results[i]["masks"] = F.interpolate(
results[i]["masks"].float(), size=tuple(tt.tolist()), mode="nearest"
).byte()
return results
class PostProcessPanoptic(nn.Module):
"""This class converts the output of the model to the final panoptic result, in the format expected by the
coco panoptic API """
def __init__(self, is_thing_map, threshold=0.85):
"""
Parameters:
is_thing_map: This is a whose keys are the class ids, and the values a boolean indicating whether
the class is a thing (True) or a stuff (False) class
threshold: confidence threshold: segments with confidence lower than this will be deleted
"""
super().__init__()
self.threshold = threshold
self.is_thing_map = is_thing_map
def forward(self, outputs, processed_sizes, target_sizes=None):
""" This function computes the panoptic prediction from the model's predictions.
Parameters:
outputs: This is a dict coming directly from the model. See the model doc for the content.
processed_sizes: This is a list of tuples (or torch tensors) of sizes of the images that were passed to the
model, ie the size after data augmentation but before batching.
target_sizes: This is a list of tuples (or torch tensors) corresponding to the requested final size
of each prediction. If left to None, it will default to the processed_sizes
"""
if target_sizes is None:
target_sizes = processed_sizes
assert len(processed_sizes) == len(target_sizes)
out_logits, raw_masks, raw_boxes = outputs["pred_logits"], outputs["pred_masks"], outputs["pred_boxes"]
assert len(out_logits) == len(raw_masks) == len(target_sizes)
preds = []
def to_tuple(tup):
if isinstance(tup, tuple):
return tup
return tuple(tup.cpu().tolist())
for cur_logits, cur_masks, cur_boxes, size, target_size in zip(
out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes
):
# we filter empty queries and detection below threshold
scores, labels = cur_logits.softmax(-1).max(-1)
keep = labels.ne(outputs["pred_logits"].shape[-1] - 1) & (scores > self.threshold)
cur_scores, cur_classes = cur_logits.softmax(-1).max(-1)
cur_scores = cur_scores[keep]
cur_classes = cur_classes[keep]
cur_masks = cur_masks[keep]
cur_masks = interpolate(cur_masks[None], to_tuple(size), mode="bilinear").squeeze(0)
cur_boxes = box_ops.box_cxcywh_to_xyxy(cur_boxes[keep])
h, w = cur_masks.shape[-2:]
assert len(cur_boxes) == len(cur_classes)
# It may be that we have several predicted masks for the same stuff class.
# In the following, we track the list of masks ids for each stuff class (they are merged later on)
cur_masks = cur_masks.flatten(1)
stuff_equiv_classes = defaultdict(lambda: [])
for k, label in enumerate(cur_classes):
if not self.is_thing_map[label.item()]:
stuff_equiv_classes[label.item()].append(k)
def get_ids_area(masks, scores, dedup=False):
# This helper function creates the final panoptic segmentation image
# It also returns the area of the masks that appears on the image
m_id = masks.transpose(0, 1).softmax(-1)
if m_id.shape[-1] == 0:
# We didn't detect any mask :(
m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device)
else:
m_id = m_id.argmax(-1).view(h, w)
if dedup:
# Merge the masks corresponding to the same stuff class
for equiv in stuff_equiv_classes.values():
if len(equiv) > 1:
for eq_id in equiv:
m_id.masked_fill_(m_id.eq(eq_id), equiv[0])
final_h, final_w = to_tuple(target_size)
seg_img = Image.fromarray(id2rgb(m_id.view(h, w).cpu().numpy()))
seg_img = seg_img.resize(size=(final_w, final_h), resample=Image.NEAREST)
np_seg_img = (
torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes())).view(final_h, final_w, 3).numpy()
)
m_id = torch.from_numpy(rgb2id(np_seg_img))
area = []
for i in range(len(scores)):
area.append(m_id.eq(i).sum().item())
return area, seg_img
area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True)
if cur_classes.numel() > 0:
# We know filter empty masks as long as we find some
while True:
filtered_small = torch.as_tensor(
[area[i] <= 4 for i, c in enumerate(cur_classes)], dtype=torch.bool, device=keep.device
)
if filtered_small.any().item():
cur_scores = cur_scores[~filtered_small]
cur_classes = cur_classes[~filtered_small]
cur_masks = cur_masks[~filtered_small]
area, seg_img = get_ids_area(cur_masks, cur_scores)
else:
break
else:
cur_classes = torch.ones(1, dtype=torch.long, device=cur_classes.device)
segments_info = []
for i, a in enumerate(area):
cat = cur_classes[i].item()
segments_info.append({"id": i, "isthing": self.is_thing_map[cat], "category_id": cat, "area": a})
del cur_classes
with io.BytesIO() as out:
seg_img.save(out, format="PNG")
predictions = {"png_string": out.getvalue(), "segments_info": segments_info}
preds.append(predictions)
return preds
| detr-master | models/segmentation.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Helper script to convert models trained with the main version of DETR to be used with the Detectron2 version.
"""
import json
import argparse
import numpy as np
import torch
def parse_args():
parser = argparse.ArgumentParser("D2 model converter")
parser.add_argument("--source_model", default="", type=str, help="Path or url to the DETR model to convert")
parser.add_argument("--output_model", default="", type=str, help="Path where to save the converted model")
return parser.parse_args()
def main():
args = parse_args()
# D2 expects contiguous classes, so we need to remap the 92 classes from DETR
# fmt: off
coco_idx = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77,
78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90, 91]
# fmt: on
coco_idx = np.array(coco_idx)
if args.source_model.startswith("https"):
checkpoint = torch.hub.load_state_dict_from_url(args.source_model, map_location="cpu", check_hash=True)
else:
checkpoint = torch.load(args.source_model, map_location="cpu")
model_to_convert = checkpoint["model"]
model_converted = {}
for k in model_to_convert.keys():
old_k = k
if "backbone" in k:
k = k.replace("backbone.0.body.", "")
if "layer" not in k:
k = "stem." + k
for t in [1, 2, 3, 4]:
k = k.replace(f"layer{t}", f"res{t + 1}")
for t in [1, 2, 3]:
k = k.replace(f"bn{t}", f"conv{t}.norm")
k = k.replace("downsample.0", "shortcut")
k = k.replace("downsample.1", "shortcut.norm")
k = "backbone.0.backbone." + k
k = "detr." + k
print(old_k, "->", k)
if "class_embed" in old_k:
v = model_to_convert[old_k].detach()
if v.shape[0] == 92:
shape_old = v.shape
model_converted[k] = v[coco_idx]
print("Head conversion: changing shape from {} to {}".format(shape_old, model_converted[k].shape))
continue
model_converted[k] = model_to_convert[old_k].detach()
model_to_save = {"model": model_converted}
torch.save(model_to_save, args.output_model)
if __name__ == "__main__":
main()
| detr-master | d2/converter.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
DETR Training Script.
This script is a simplified version of the training script in detectron2/tools.
"""
import os
import sys
# fmt: off
sys.path.insert(1, os.path.join(sys.path[0], '..'))
# fmt: on
import time
from typing import Any, Dict, List, Set
import torch
import detectron2.utils.comm as comm
from d2.detr import DetrDatasetMapper, add_detr_config
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog, build_detection_train_loader
from detectron2.engine import AutogradProfiler, DefaultTrainer, default_argument_parser, default_setup, launch
from detectron2.evaluation import COCOEvaluator, verify_results
from detectron2.solver.build import maybe_add_gradient_clipping
class Trainer(DefaultTrainer):
"""
Extension of the Trainer class adapted to DETR.
"""
def __init__(self, cfg):
"""
Args:
cfg (CfgNode):
"""
self.clip_norm_val = 0.0
if cfg.SOLVER.CLIP_GRADIENTS.ENABLED:
if cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model":
self.clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE
super().__init__(cfg)
def run_step(self):
assert self.model.training, "[Trainer] model was changed to eval mode!"
start = time.perf_counter()
data = next(self._data_loader_iter)
data_time = time.perf_counter() - start
loss_dict = self.model(data)
losses = sum(loss_dict.values())
self._detect_anomaly(losses, loss_dict)
metrics_dict = loss_dict
metrics_dict["data_time"] = data_time
self._write_metrics(metrics_dict)
self.optimizer.zero_grad()
losses.backward()
if self.clip_norm_val > 0.0:
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip_norm_val)
self.optimizer.step()
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each builtin dataset.
For your own dataset, you can simply create an evaluator manually in your
script and do not have to worry about the hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
return COCOEvaluator(dataset_name, cfg, True, output_folder)
@classmethod
def build_train_loader(cls, cfg):
if "Detr" == cfg.MODEL.META_ARCHITECTURE:
mapper = DetrDatasetMapper(cfg, True)
else:
mapper = None
return build_detection_train_loader(cfg, mapper=mapper)
@classmethod
def build_optimizer(cls, cfg, model):
params: List[Dict[str, Any]] = []
memo: Set[torch.nn.parameter.Parameter] = set()
for key, value in model.named_parameters(recurse=True):
if not value.requires_grad:
continue
# Avoid duplicating parameters
if value in memo:
continue
memo.add(value)
lr = cfg.SOLVER.BASE_LR
weight_decay = cfg.SOLVER.WEIGHT_DECAY
if "backbone" in key:
lr = lr * cfg.SOLVER.BACKBONE_MULTIPLIER
params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]
optimizer_type = cfg.SOLVER.OPTIMIZER
if optimizer_type == "SGD":
optimizer = torch.optim.SGD(params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM)
elif optimizer_type == "ADAMW":
optimizer = torch.optim.AdamW(params, cfg.SOLVER.BASE_LR)
else:
raise NotImplementedError(f"no optimizer type {optimizer_type}")
if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model":
optimizer = maybe_add_gradient_clipping(cfg, optimizer)
return optimizer
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_detr_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(cfg.MODEL.WEIGHTS, resume=args.resume)
res = Trainer.test(cfg, model)
if comm.is_main_process():
verify_results(cfg, res)
return res
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| detr-master | d2/train_net.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import math
from typing import List
import numpy as np
import torch
import torch.distributed as dist
import torch.nn.functional as F
from scipy.optimize import linear_sum_assignment
from torch import nn
from detectron2.layers import ShapeSpec
from detectron2.modeling import META_ARCH_REGISTRY, build_backbone, detector_postprocess
from detectron2.structures import Boxes, ImageList, Instances
from detectron2.utils.logger import log_first_n
from fvcore.nn import giou_loss, smooth_l1_loss
from models.backbone import Joiner
from models.detr import DETR, SetCriterion
from models.matcher import HungarianMatcher
from models.position_encoding import PositionEmbeddingSine
from models.transformer import Transformer
from util.box_ops import box_cxcywh_to_xyxy, box_xyxy_to_cxcywh
from util.misc import NestedTensor
__all__ = ["Detr"]
class MaskedBackbone(nn.Module):
""" This is a thin wrapper around D2's backbone to provide padding masking"""
def __init__(self, cfg):
super().__init__()
self.backbone = build_backbone(cfg)
backbone_shape = self.backbone.output_shape()
self.feature_strides = [backbone_shape[f].stride for f in backbone_shape.keys()]
self.num_channels = backbone_shape[list(backbone_shape.keys())[-1]].channels
def forward(self, images):
features = self.backbone(images.tensor)
masks = self.mask_out_padding(
[features_per_level.shape for features_per_level in features.values()],
images.image_sizes,
images.tensor.device,
)
assert len(features) == len(masks)
for i, k in enumerate(features.keys()):
features[k] = NestedTensor(features[k], masks[i])
return features
def mask_out_padding(self, feature_shapes, image_sizes, device):
masks = []
assert len(feature_shapes) == len(self.feature_strides)
for idx, shape in enumerate(feature_shapes):
N, _, H, W = shape
masks_per_feature_level = torch.ones((N, H, W), dtype=torch.bool, device=device)
for img_idx, (h, w) in enumerate(image_sizes):
masks_per_feature_level[
img_idx,
: int(np.ceil(float(h) / self.feature_strides[idx])),
: int(np.ceil(float(w) / self.feature_strides[idx])),
] = 0
masks.append(masks_per_feature_level)
return masks
@META_ARCH_REGISTRY.register()
class Detr(nn.Module):
"""
Implement Detr
"""
def __init__(self, cfg):
super().__init__()
self.device = torch.device(cfg.MODEL.DEVICE)
self.num_classes = cfg.MODEL.DETR.NUM_CLASSES
hidden_dim = cfg.MODEL.DETR.HIDDEN_DIM
num_queries = cfg.MODEL.DETR.NUM_OBJECT_QUERIES
# Transformer parameters:
nheads = cfg.MODEL.DETR.NHEADS
dropout = cfg.MODEL.DETR.DROPOUT
dim_feedforward = cfg.MODEL.DETR.DIM_FEEDFORWARD
enc_layers = cfg.MODEL.DETR.ENC_LAYERS
dec_layers = cfg.MODEL.DETR.DEC_LAYERS
pre_norm = cfg.MODEL.DETR.PRE_NORM
# Loss parameters:
giou_weight = cfg.MODEL.DETR.GIOU_WEIGHT
l1_weight = cfg.MODEL.DETR.L1_WEIGHT
deep_supervision = cfg.MODEL.DETR.DEEP_SUPERVISION
no_object_weight = cfg.MODEL.DETR.NO_OBJECT_WEIGHT
N_steps = hidden_dim // 2
d2_backbone = MaskedBackbone(cfg)
backbone = Joiner(d2_backbone, PositionEmbeddingSine(N_steps, normalize=True))
backbone.num_channels = d2_backbone.num_channels
transformer = Transformer(
d_model=hidden_dim,
dropout=dropout,
nhead=nheads,
dim_feedforward=dim_feedforward,
num_encoder_layers=enc_layers,
num_decoder_layers=dec_layers,
normalize_before=pre_norm,
return_intermediate_dec=deep_supervision,
)
self.detr = DETR(
backbone, transformer, num_classes=self.num_classes, num_queries=num_queries, aux_loss=deep_supervision
)
self.detr.to(self.device)
# building criterion
matcher = HungarianMatcher(cost_class=1, cost_bbox=l1_weight, cost_giou=giou_weight)
weight_dict = {"loss_ce": 1, "loss_bbox": l1_weight}
weight_dict["loss_giou"] = giou_weight
if deep_supervision:
aux_weight_dict = {}
for i in range(dec_layers - 1):
aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()})
weight_dict.update(aux_weight_dict)
losses = ["labels", "boxes", "cardinality"]
self.criterion = SetCriterion(
self.num_classes, matcher=matcher, weight_dict=weight_dict, eos_coef=no_object_weight, losses=losses
)
self.criterion.to(self.device)
pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(3, 1, 1)
pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(3, 1, 1)
self.normalizer = lambda x: (x - pixel_mean) / pixel_std
self.to(self.device)
def forward(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances: Instances
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
Returns:
dict[str: Tensor]:
mapping from a named loss to a tensor storing the loss. Used during training only.
"""
images = self.preprocess_image(batched_inputs)
output = self.detr(images)
if self.training:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
targets = self.prepare_targets(gt_instances)
loss_dict = self.criterion(output, targets)
weight_dict = self.criterion.weight_dict
for k in loss_dict.keys():
if k in weight_dict:
loss_dict[k] *= weight_dict[k]
return loss_dict
else:
box_cls = output["pred_logits"]
box_pred = output["pred_boxes"]
results = self.inference(box_cls, box_pred, images.image_sizes)
processed_results = []
for results_per_image, input_per_image, image_size in zip(results, batched_inputs, images.image_sizes):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"instances": r})
return processed_results
def prepare_targets(self, targets):
new_targets = []
for targets_per_image in targets:
h, w = targets_per_image.image_size
image_size_xyxy = torch.as_tensor([w, h, w, h], dtype=torch.float, device=self.device)
gt_classes = targets_per_image.gt_classes
gt_boxes = targets_per_image.gt_boxes.tensor / image_size_xyxy
gt_boxes = box_xyxy_to_cxcywh(gt_boxes)
new_targets.append({"labels": gt_classes, "boxes": gt_boxes})
return new_targets
def inference(self, box_cls, box_pred, image_sizes):
"""
Arguments:
box_cls (Tensor): tensor of shape (batch_size, num_queries, K).
The tensor predicts the classification probability for each query.
box_pred (Tensor): tensors of shape (batch_size, num_queries, 4).
The tensor predicts 4-vector (x,y,w,h) box
regression values for every queryx
image_sizes (List[torch.Size]): the input image sizes
Returns:
results (List[Instances]): a list of #images elements.
"""
assert len(box_cls) == len(image_sizes)
results = []
# For each box we assign the best class or the second best if the best on is `no_object`.
scores, labels = F.softmax(box_cls, dim=-1)[:, :, :-1].max(-1)
for scores_per_image, labels_per_image, box_pred_per_image, image_size in zip(
scores, labels, box_pred, image_sizes
):
result = Instances(image_size)
result.pred_boxes = Boxes(box_cxcywh_to_xyxy(box_pred_per_image))
result.pred_boxes.scale(scale_x=image_size[1], scale_y=image_size[0])
result.scores = scores_per_image
result.pred_classes = labels_per_image
results.append(result)
return results
def preprocess_image(self, batched_inputs):
"""
Normalize, pad and batch the input images.
"""
images = [self.normalizer(x["image"].to(self.device)) for x in batched_inputs]
images = ImageList.from_tensors(images)
return images
| detr-master | d2/detr/detr.py |
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from detectron2.config import CfgNode as CN
def add_detr_config(cfg):
"""
Add config for DETR.
"""
cfg.MODEL.DETR = CN()
cfg.MODEL.DETR.NUM_CLASSES = 80
# LOSS
cfg.MODEL.DETR.GIOU_WEIGHT = 2.0
cfg.MODEL.DETR.L1_WEIGHT = 5.0
cfg.MODEL.DETR.DEEP_SUPERVISION = True
cfg.MODEL.DETR.NO_OBJECT_WEIGHT = 0.1
# TRANSFORMER
cfg.MODEL.DETR.NHEADS = 8
cfg.MODEL.DETR.DROPOUT = 0.1
cfg.MODEL.DETR.DIM_FEEDFORWARD = 2048
cfg.MODEL.DETR.ENC_LAYERS = 6
cfg.MODEL.DETR.DEC_LAYERS = 6
cfg.MODEL.DETR.PRE_NORM = False
cfg.MODEL.DETR.HIDDEN_DIM = 256
cfg.MODEL.DETR.NUM_OBJECT_QUERIES = 100
cfg.SOLVER.OPTIMIZER = "ADAMW"
cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1
| detr-master | d2/detr/config.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .config import add_detr_config
from .detr import Detr
from .dataset_mapper import DetrDatasetMapper
| detr-master | d2/detr/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.