python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
"""
This script handles the skimage exif problem.
"""
from PIL import Image
import numpy as np
ORIENTATIONS = { # used in apply_orientation
2: (Image.FLIP_LEFT_RIGHT,),
3: (Image.ROTATE_180,),
4: (Image.FLIP_TOP_BOTTOM,),
5: (Image.FLIP_LEFT_RIGHT, Image.ROTATE_90),
6: (Image.ROTATE_270,),
7: (Image.FLIP_LEFT_RIGHT, Image.ROTATE_270),
8: (Image.ROTATE_90,)
}
def open_oriented_im(im_path):
im = Image.open(im_path)
if hasattr(im, '_getexif'):
exif = im._getexif()
if exif is not None and 274 in exif:
orientation = exif[274]
im = apply_orientation(im, orientation)
img = np.asarray(im).astype(np.float32) / 255.
if img.ndim == 2:
img = img[:, :, np.newaxis]
img = np.tile(img, (1, 1, 3))
elif img.shape[2] == 4:
img = img[:, :, :3]
return img
def apply_orientation(im, orientation):
if orientation in ORIENTATIONS:
for method in ORIENTATIONS[orientation]:
im = im.transpose(method)
return im
|
colorization-master
|
caffe-colorization/examples/web_demo/exifutil.py
|
import os
import time
import cPickle
import datetime
import logging
import flask
import werkzeug
import optparse
import tornado.wsgi
import tornado.httpserver
import numpy as np
import pandas as pd
from PIL import Image
import cStringIO as StringIO
import urllib
import exifutil
import caffe
REPO_DIRNAME = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + '/../..')
UPLOAD_FOLDER = '/tmp/caffe_demos_uploads'
ALLOWED_IMAGE_EXTENSIONS = set(['png', 'bmp', 'jpg', 'jpe', 'jpeg', 'gif'])
# Obtain the flask app object
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('index.html', has_result=False)
@app.route('/classify_url', methods=['GET'])
def classify_url():
imageurl = flask.request.args.get('imageurl', '')
try:
string_buffer = StringIO.StringIO(
urllib.urlopen(imageurl).read())
image = caffe.io.load_image(string_buffer)
except Exception as err:
# For any exception we encounter in reading the image, we will just
# not continue.
logging.info('URL Image open error: %s', err)
return flask.render_template(
'index.html', has_result=True,
result=(False, 'Cannot open image from URL.')
)
logging.info('Image: %s', imageurl)
result = app.clf.classify_image(image)
return flask.render_template(
'index.html', has_result=True, result=result, imagesrc=imageurl)
@app.route('/classify_upload', methods=['POST'])
def classify_upload():
try:
# We will save the file to disk for possible data collection.
imagefile = flask.request.files['imagefile']
filename_ = str(datetime.datetime.now()).replace(' ', '_') + \
werkzeug.secure_filename(imagefile.filename)
filename = os.path.join(UPLOAD_FOLDER, filename_)
imagefile.save(filename)
logging.info('Saving to %s.', filename)
image = exifutil.open_oriented_im(filename)
except Exception as err:
logging.info('Uploaded image open error: %s', err)
return flask.render_template(
'index.html', has_result=True,
result=(False, 'Cannot open uploaded image.')
)
result = app.clf.classify_image(image)
return flask.render_template(
'index.html', has_result=True, result=result,
imagesrc=embed_image_html(image)
)
def embed_image_html(image):
"""Creates an image embedded in HTML base64 format."""
image_pil = Image.fromarray((255 * image).astype('uint8'))
image_pil = image_pil.resize((256, 256))
string_buf = StringIO.StringIO()
image_pil.save(string_buf, format='png')
data = string_buf.getvalue().encode('base64').replace('\n', '')
return 'data:image/png;base64,' + data
def allowed_file(filename):
return (
'.' in filename and
filename.rsplit('.', 1)[1] in ALLOWED_IMAGE_EXTENSIONS
)
class ImagenetClassifier(object):
default_args = {
'model_def_file': (
'{}/models/bvlc_reference_caffenet/deploy.prototxt'.format(REPO_DIRNAME)),
'pretrained_model_file': (
'{}/models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'.format(REPO_DIRNAME)),
'mean_file': (
'{}/python/caffe/imagenet/ilsvrc_2012_mean.npy'.format(REPO_DIRNAME)),
'class_labels_file': (
'{}/data/ilsvrc12/synset_words.txt'.format(REPO_DIRNAME)),
'bet_file': (
'{}/data/ilsvrc12/imagenet.bet.pickle'.format(REPO_DIRNAME)),
}
for key, val in default_args.iteritems():
if not os.path.exists(val):
raise Exception(
"File for {} is missing. Should be at: {}".format(key, val))
default_args['image_dim'] = 256
default_args['raw_scale'] = 255.
def __init__(self, model_def_file, pretrained_model_file, mean_file,
raw_scale, class_labels_file, bet_file, image_dim, gpu_mode):
logging.info('Loading net and associated files...')
if gpu_mode:
caffe.set_mode_gpu()
else:
caffe.set_mode_cpu()
self.net = caffe.Classifier(
model_def_file, pretrained_model_file,
image_dims=(image_dim, image_dim), raw_scale=raw_scale,
mean=np.load(mean_file).mean(1).mean(1), channel_swap=(2, 1, 0)
)
with open(class_labels_file) as f:
labels_df = pd.DataFrame([
{
'synset_id': l.strip().split(' ')[0],
'name': ' '.join(l.strip().split(' ')[1:]).split(',')[0]
}
for l in f.readlines()
])
self.labels = labels_df.sort('synset_id')['name'].values
self.bet = cPickle.load(open(bet_file))
# A bias to prefer children nodes in single-chain paths
# I am setting the value to 0.1 as a quick, simple model.
# We could use better psychological models here...
self.bet['infogain'] -= np.array(self.bet['preferences']) * 0.1
def classify_image(self, image):
try:
starttime = time.time()
scores = self.net.predict([image], oversample=True).flatten()
endtime = time.time()
indices = (-scores).argsort()[:5]
predictions = self.labels[indices]
# In addition to the prediction text, we will also produce
# the length for the progress bar visualization.
meta = [
(p, '%.5f' % scores[i])
for i, p in zip(indices, predictions)
]
logging.info('result: %s', str(meta))
# Compute expected information gain
expected_infogain = np.dot(
self.bet['probmat'], scores[self.bet['idmapping']])
expected_infogain *= self.bet['infogain']
# sort the scores
infogain_sort = expected_infogain.argsort()[::-1]
bet_result = [(self.bet['words'][v], '%.5f' % expected_infogain[v])
for v in infogain_sort[:5]]
logging.info('bet result: %s', str(bet_result))
return (True, meta, bet_result, '%.3f' % (endtime - starttime))
except Exception as err:
logging.info('Classification error: %s', err)
return (False, 'Something went wrong when classifying the '
'image. Maybe try another one?')
def start_tornado(app, port=5000):
http_server = tornado.httpserver.HTTPServer(
tornado.wsgi.WSGIContainer(app))
http_server.listen(port)
print("Tornado server starting on port {}".format(port))
tornado.ioloop.IOLoop.instance().start()
def start_from_terminal(app):
"""
Parse command line options and start the server.
"""
parser = optparse.OptionParser()
parser.add_option(
'-d', '--debug',
help="enable debug mode",
action="store_true", default=False)
parser.add_option(
'-p', '--port',
help="which port to serve content on",
type='int', default=5000)
parser.add_option(
'-g', '--gpu',
help="use gpu mode",
action='store_true', default=False)
opts, args = parser.parse_args()
ImagenetClassifier.default_args.update({'gpu_mode': opts.gpu})
# Initialize classifier + warm start by forward for allocation
app.clf = ImagenetClassifier(**ImagenetClassifier.default_args)
app.clf.net.forward()
if opts.debug:
app.run(debug=True, host='0.0.0.0', port=opts.port)
else:
start_tornado(app, opts.port)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
if not os.path.exists(UPLOAD_FOLDER):
os.makedirs(UPLOAD_FOLDER)
start_from_terminal(app)
|
colorization-master
|
caffe-colorization/examples/web_demo/app.py
|
#!/usr/bin/env python
"""
Form a subset of the Flickr Style data, download images to dirname, and write
Caffe ImagesDataLayer training file.
"""
import os
import urllib
import hashlib
import argparse
import numpy as np
import pandas as pd
from skimage import io
import multiprocessing
# Flickr returns a special image if the request is unavailable.
MISSING_IMAGE_SHA1 = '6a92790b1c2a301c6e7ddef645dca1f53ea97ac2'
example_dirname = os.path.abspath(os.path.dirname(__file__))
caffe_dirname = os.path.abspath(os.path.join(example_dirname, '../..'))
training_dirname = os.path.join(caffe_dirname, 'data/flickr_style')
def download_image(args_tuple):
"For use with multiprocessing map. Returns filename on fail."
try:
url, filename = args_tuple
if not os.path.exists(filename):
urllib.urlretrieve(url, filename)
with open(filename) as f:
assert hashlib.sha1(f.read()).hexdigest() != MISSING_IMAGE_SHA1
test_read_image = io.imread(filename)
return True
except KeyboardInterrupt:
raise Exception() # multiprocessing doesn't catch keyboard exceptions
except:
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Download a subset of Flickr Style to a directory')
parser.add_argument(
'-s', '--seed', type=int, default=0,
help="random seed")
parser.add_argument(
'-i', '--images', type=int, default=-1,
help="number of images to use (-1 for all [default])",
)
parser.add_argument(
'-w', '--workers', type=int, default=-1,
help="num workers used to download images. -x uses (all - x) cores [-1 default]."
)
parser.add_argument(
'-l', '--labels', type=int, default=0,
help="if set to a positive value, only sample images from the first number of labels."
)
args = parser.parse_args()
np.random.seed(args.seed)
# Read data, shuffle order, and subsample.
csv_filename = os.path.join(example_dirname, 'flickr_style.csv.gz')
df = pd.read_csv(csv_filename, index_col=0, compression='gzip')
df = df.iloc[np.random.permutation(df.shape[0])]
if args.labels > 0:
df = df.loc[df['label'] < args.labels]
if args.images > 0 and args.images < df.shape[0]:
df = df.iloc[:args.images]
# Make directory for images and get local filenames.
if training_dirname is None:
training_dirname = os.path.join(caffe_dirname, 'data/flickr_style')
images_dirname = os.path.join(training_dirname, 'images')
if not os.path.exists(images_dirname):
os.makedirs(images_dirname)
df['image_filename'] = [
os.path.join(images_dirname, _.split('/')[-1]) for _ in df['image_url']
]
# Download images.
num_workers = args.workers
if num_workers <= 0:
num_workers = multiprocessing.cpu_count() + num_workers
print('Downloading {} images with {} workers...'.format(
df.shape[0], num_workers))
pool = multiprocessing.Pool(processes=num_workers)
map_args = zip(df['image_url'], df['image_filename'])
results = pool.map(download_image, map_args)
# Only keep rows with valid images, and write out training file lists.
df = df[results]
for split in ['train', 'test']:
split_df = df[df['_split'] == split]
filename = os.path.join(training_dirname, '{}.txt'.format(split))
split_df[['image_filename', 'label']].to_csv(
filename, sep=' ', header=None, index=None)
print('Writing train/val for {} successfully downloaded images.'.format(
df.shape[0]))
|
colorization-master
|
caffe-colorization/examples/finetune_flickr_style/assemble_data.py
|
#!/usr/bin/env python
"""
Takes as arguments:
1. the path to a JSON file (such as an IPython notebook).
2. the path to output file
If 'metadata' dict in the JSON file contains 'include_in_docs': true,
then copies the file to output file, appending the 'metadata' property
as YAML front-matter, adding the field 'category' with value 'notebook'.
"""
import os
import sys
import json
filename = sys.argv[1]
output_filename = sys.argv[2]
content = json.load(open(filename))
if 'include_in_docs' in content['metadata'] and content['metadata']['include_in_docs']:
yaml_frontmatter = ['---']
for key, val in content['metadata'].iteritems():
if key == 'example_name':
key = 'title'
if val == '':
val = os.path.basename(filename)
yaml_frontmatter.append('{}: {}'.format(key, val))
yaml_frontmatter += ['category: notebook']
yaml_frontmatter += ['original_path: ' + filename]
with open(output_filename, 'w') as fo:
fo.write('\n'.join(yaml_frontmatter + ['---']) + '\n')
fo.write(open(filename).read())
|
colorization-master
|
caffe-colorization/scripts/copy_notebook.py
|
#!/usr/bin/python2
#
# Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Does google-lint on c++ files.
The goal of this script is to identify places in the code that *may*
be in non-compliance with google style. It does not attempt to fix
up these problems -- the point is to educate. It does also not
attempt to find all problems, or to ensure that everything it does
find is legitimately a problem.
In particular, we can get very confused by /* and // inside strings!
We do a small hack, which is to ignore //'s with "'s after them on the
same line, but it is far from perfect (in either direction).
"""
import codecs
import copy
import getopt
import math # for log
import os
import re
import sre_compile
import string
import sys
import unicodedata
_USAGE = """
Syntax: cpp_lint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
[--counting=total|toplevel|detailed] [--root=subdir]
[--linelength=digits]
<file> [file] ...
The style guidelines this tries to follow are those in
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
Every problem is given a confidence score from 1-5, with 5 meaning we are
certain of the problem, and 1 meaning it could be a legitimate construct.
This will miss some errors, and is not a substitute for a code review.
To suppress false-positive errors of a certain category, add a
'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
suppresses errors of all categories on that line.
The files passed in will be linted; at least one file must be provided.
Default linted extensions are .cc, .cpp, .cu, .cuh and .h. Change the
extensions with the --extensions flag.
Flags:
output=vs7
By default, the output is formatted to ease emacs parsing. Visual Studio
compatible output (vs7) may also be used. Other formats are unsupported.
verbose=#
Specify a number 0-5 to restrict errors to certain verbosity levels.
filter=-x,+y,...
Specify a comma-separated list of category-filters to apply: only
error messages whose category names pass the filters will be printed.
(Category names are printed with the message and look like
"[whitespace/indent]".) Filters are evaluated left to right.
"-FOO" and "FOO" means "do not print categories that start with FOO".
"+FOO" means "do print categories that start with FOO".
Examples: --filter=-whitespace,+whitespace/braces
--filter=whitespace,runtime/printf,+runtime/printf_format
--filter=-,+build/include_what_you_use
To see a list of all the categories used in cpplint, pass no arg:
--filter=
counting=total|toplevel|detailed
The total number of errors found is always printed. If
'toplevel' is provided, then the count of errors in each of
the top-level categories like 'build' and 'whitespace' will
also be printed. If 'detailed' is provided, then a count
is provided for each category like 'build/class'.
root=subdir
The root directory used for deriving header guard CPP variable.
By default, the header guard CPP variable is calculated as the relative
path to the directory that contains .git, .hg, or .svn. When this flag
is specified, the relative path is calculated from the specified
directory. If the specified directory does not exist, this flag is
ignored.
Examples:
Assuing that src/.git exists, the header guard CPP variables for
src/chrome/browser/ui/browser.h are:
No flag => CHROME_BROWSER_UI_BROWSER_H_
--root=chrome => BROWSER_UI_BROWSER_H_
--root=chrome/browser => UI_BROWSER_H_
linelength=digits
This is the allowed line length for the project. The default value is
80 characters.
Examples:
--linelength=120
extensions=extension,extension,...
The allowed file extensions that cpplint will check
Examples:
--extensions=hpp,cpp
"""
# We categorize each error message we print. Here are the categories.
# We want an explicit list so we can list them all in cpplint --filter=.
# If you add a new error message with a new category, add it to the list
# here! cpplint_unittest.py should tell you if you forget to do this.
_ERROR_CATEGORIES = [
'build/class',
'build/deprecated',
'build/endif_comment',
'build/explicit_make_pair',
'build/forward_decl',
'build/header_guard',
'build/include',
'build/include_alpha',
'build/include_dir',
'build/include_order',
'build/include_what_you_use',
'build/namespaces',
'build/printf_format',
'build/storage_class',
'caffe/alt_fn',
'caffe/data_layer_setup',
'caffe/random_fn',
'legal/copyright',
'readability/alt_tokens',
'readability/braces',
'readability/casting',
'readability/check',
'readability/constructors',
'readability/fn_size',
'readability/function',
'readability/multiline_comment',
'readability/multiline_string',
'readability/namespace',
'readability/nolint',
'readability/nul',
'readability/streams',
'readability/todo',
'readability/utf8',
'runtime/arrays',
'runtime/casting',
'runtime/explicit',
'runtime/int',
'runtime/init',
'runtime/invalid_increment',
'runtime/member_string_references',
'runtime/memset',
'runtime/operator',
'runtime/printf',
'runtime/printf_format',
'runtime/references',
'runtime/string',
'runtime/threadsafe_fn',
'runtime/vlog',
'whitespace/blank_line',
'whitespace/braces',
'whitespace/comma',
'whitespace/comments',
'whitespace/empty_conditional_body',
'whitespace/empty_loop_body',
'whitespace/end_of_line',
'whitespace/ending_newline',
'whitespace/forcolon',
'whitespace/indent',
'whitespace/line_length',
'whitespace/newline',
'whitespace/operators',
'whitespace/parens',
'whitespace/semicolon',
'whitespace/tab',
'whitespace/todo'
]
# The default state of the category filter. This is overrided by the --filter=
# flag. By default all errors are on, so only add here categories that should be
# off by default (i.e., categories that must be enabled by the --filter= flags).
# All entries here should start with a '-' or '+', as in the --filter= flag.
_DEFAULT_FILTERS = [
'-build/include_dir',
'-readability/todo',
]
# We used to check for high-bit characters, but after much discussion we
# decided those were OK, as long as they were in UTF-8 and didn't represent
# hard-coded international strings, which belong in a separate i18n file.
# C++ headers
_CPP_HEADERS = frozenset([
# Legacy
'algobase.h',
'algo.h',
'alloc.h',
'builtinbuf.h',
'bvector.h',
'complex.h',
'defalloc.h',
'deque.h',
'editbuf.h',
'fstream.h',
'function.h',
'hash_map',
'hash_map.h',
'hash_set',
'hash_set.h',
'hashtable.h',
'heap.h',
'indstream.h',
'iomanip.h',
'iostream.h',
'istream.h',
'iterator.h',
'list.h',
'map.h',
'multimap.h',
'multiset.h',
'ostream.h',
'pair.h',
'parsestream.h',
'pfstream.h',
'procbuf.h',
'pthread_alloc',
'pthread_alloc.h',
'rope',
'rope.h',
'ropeimpl.h',
'set.h',
'slist',
'slist.h',
'stack.h',
'stdiostream.h',
'stl_alloc.h',
'stl_relops.h',
'streambuf.h',
'stream.h',
'strfile.h',
'strstream.h',
'tempbuf.h',
'tree.h',
'type_traits.h',
'vector.h',
# 17.6.1.2 C++ library headers
'algorithm',
'array',
'atomic',
'bitset',
'chrono',
'codecvt',
'complex',
'condition_variable',
'deque',
'exception',
'forward_list',
'fstream',
'functional',
'future',
'initializer_list',
'iomanip',
'ios',
'iosfwd',
'iostream',
'istream',
'iterator',
'limits',
'list',
'locale',
'map',
'memory',
'mutex',
'new',
'numeric',
'ostream',
'queue',
'random',
'ratio',
'regex',
'set',
'sstream',
'stack',
'stdexcept',
'streambuf',
'string',
'strstream',
'system_error',
'thread',
'tuple',
'typeindex',
'typeinfo',
'type_traits',
'unordered_map',
'unordered_set',
'utility',
'valarray',
'vector',
# 17.6.1.2 C++ headers for C library facilities
'cassert',
'ccomplex',
'cctype',
'cerrno',
'cfenv',
'cfloat',
'cinttypes',
'ciso646',
'climits',
'clocale',
'cmath',
'csetjmp',
'csignal',
'cstdalign',
'cstdarg',
'cstdbool',
'cstddef',
'cstdint',
'cstdio',
'cstdlib',
'cstring',
'ctgmath',
'ctime',
'cuchar',
'cwchar',
'cwctype',
])
# Assertion macros. These are defined in base/logging.h and
# testing/base/gunit.h. Note that the _M versions need to come first
# for substring matching to work.
_CHECK_MACROS = [
'DCHECK', 'CHECK',
'EXPECT_TRUE_M', 'EXPECT_TRUE',
'ASSERT_TRUE_M', 'ASSERT_TRUE',
'EXPECT_FALSE_M', 'EXPECT_FALSE',
'ASSERT_FALSE_M', 'ASSERT_FALSE',
]
# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
('>=', 'GE'), ('>', 'GT'),
('<=', 'LE'), ('<', 'LT')]:
_CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
_CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
('>=', 'LT'), ('>', 'LE'),
('<=', 'GT'), ('<', 'GE')]:
_CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
_CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
# Alternative tokens and their replacements. For full list, see section 2.5
# Alternative tokens [lex.digraph] in the C++ standard.
#
# Digraphs (such as '%:') are not included here since it's a mess to
# match those on a word boundary.
_ALT_TOKEN_REPLACEMENT = {
'and': '&&',
'bitor': '|',
'or': '||',
'xor': '^',
'compl': '~',
'bitand': '&',
'and_eq': '&=',
'or_eq': '|=',
'xor_eq': '^=',
'not': '!',
'not_eq': '!='
}
# Compile regular expression that matches all the above keywords. The "[ =()]"
# bit is meant to avoid matching these keywords outside of boolean expressions.
#
# False positives include C-style multi-line comments and multi-line strings
# but those have always been troublesome for cpplint.
_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
# These constants define types of headers for use with
# _IncludeState.CheckNextIncludeOrder().
_C_SYS_HEADER = 1
_CPP_SYS_HEADER = 2
_LIKELY_MY_HEADER = 3
_POSSIBLE_MY_HEADER = 4
_OTHER_HEADER = 5
# These constants define the current inline assembly state
_NO_ASM = 0 # Outside of inline assembly block
_INSIDE_ASM = 1 # Inside inline assembly block
_END_ASM = 2 # Last line of inline assembly block
_BLOCK_ASM = 3 # The whole block is an inline assembly block
# Match start of assembly blocks
_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
r'(?:\s+(volatile|__volatile__))?'
r'\s*[{(]')
_regexp_compile_cache = {}
# Finds occurrences of NOLINT[_NEXT_LINE] or NOLINT[_NEXT_LINE](...).
_RE_SUPPRESSION = re.compile(r'\bNOLINT(_NEXT_LINE)?\b(\([^)]*\))?')
# {str, set(int)}: a map from error categories to sets of linenumbers
# on which those errors are expected and should be suppressed.
_error_suppressions = {}
# Finds Copyright.
_RE_COPYRIGHT = re.compile(r'Copyright')
# The root directory used for deriving header guard CPP variable.
# This is set by --root flag.
_root = None
# The allowed line length of files.
# This is set by --linelength flag.
_line_length = 80
# The allowed extensions for file names
# This is set by --extensions flag.
_valid_extensions = set(['cc', 'h', 'cpp', 'hpp', 'cu', 'cuh'])
def ParseNolintSuppressions(filename, raw_line, linenum, error):
"""Updates the global list of error-suppressions.
Parses any NOLINT comments on the current line, updating the global
error_suppressions store. Reports an error if the NOLINT comment
was malformed.
Args:
filename: str, the name of the input file.
raw_line: str, the line of input text, with comments.
linenum: int, the number of the current line.
error: function, an error handler.
"""
# FIXME(adonovan): "NOLINT(" is misparsed as NOLINT(*).
matched = _RE_SUPPRESSION.search(raw_line)
if matched:
if matched.group(1) == '_NEXT_LINE':
linenum += 1
category = matched.group(2)
if category in (None, '(*)'): # => "suppress all"
_error_suppressions.setdefault(None, set()).add(linenum)
else:
if category.startswith('(') and category.endswith(')'):
category = category[1:-1]
if category in _ERROR_CATEGORIES:
_error_suppressions.setdefault(category, set()).add(linenum)
else:
error(filename, linenum, 'readability/nolint', 5,
'Unknown NOLINT error category: %s' % category)
def ResetNolintSuppressions():
"Resets the set of NOLINT suppressions to empty."
_error_suppressions.clear()
def IsErrorSuppressedByNolint(category, linenum):
"""Returns true if the specified error category is suppressed on this line.
Consults the global error_suppressions map populated by
ParseNolintSuppressions/ResetNolintSuppressions.
Args:
category: str, the category of the error.
linenum: int, the current line number.
Returns:
bool, True iff the error should be suppressed due to a NOLINT comment.
"""
return (linenum in _error_suppressions.get(category, set()) or
linenum in _error_suppressions.get(None, set()))
def Match(pattern, s):
"""Matches the string with the pattern, caching the compiled regexp."""
# The regexp compilation caching is inlined in both Match and Search for
# performance reasons; factoring it out into a separate function turns out
# to be noticeably expensive.
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].match(s)
def ReplaceAll(pattern, rep, s):
"""Replaces instances of pattern in a string with a replacement.
The compiled regex is kept in a cache shared by Match and Search.
Args:
pattern: regex pattern
rep: replacement text
s: search string
Returns:
string with replacements made (or original string if no replacements)
"""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].sub(rep, s)
def Search(pattern, s):
"""Searches the string for the pattern, caching the compiled regexp."""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].search(s)
class _IncludeState(dict):
"""Tracks line numbers for includes, and the order in which includes appear.
As a dict, an _IncludeState object serves as a mapping between include
filename and line number on which that file was included.
Call CheckNextIncludeOrder() once for each header in the file, passing
in the type constants defined above. Calls in an illegal order will
raise an _IncludeError with an appropriate error message.
"""
# self._section will move monotonically through this set. If it ever
# needs to move backwards, CheckNextIncludeOrder will raise an error.
_INITIAL_SECTION = 0
_MY_H_SECTION = 1
_C_SECTION = 2
_CPP_SECTION = 3
_OTHER_H_SECTION = 4
_TYPE_NAMES = {
_C_SYS_HEADER: 'C system header',
_CPP_SYS_HEADER: 'C++ system header',
_LIKELY_MY_HEADER: 'header this file implements',
_POSSIBLE_MY_HEADER: 'header this file may implement',
_OTHER_HEADER: 'other header',
}
_SECTION_NAMES = {
_INITIAL_SECTION: "... nothing. (This can't be an error.)",
_MY_H_SECTION: 'a header this file implements',
_C_SECTION: 'C system header',
_CPP_SECTION: 'C++ system header',
_OTHER_H_SECTION: 'other header',
}
def __init__(self):
dict.__init__(self)
self.ResetSection()
def ResetSection(self):
# The name of the current section.
self._section = self._INITIAL_SECTION
# The path of last found header.
self._last_header = ''
def SetLastHeader(self, header_path):
self._last_header = header_path
def CanonicalizeAlphabeticalOrder(self, header_path):
"""Returns a path canonicalized for alphabetical comparison.
- replaces "-" with "_" so they both cmp the same.
- removes '-inl' since we don't require them to be after the main header.
- lowercase everything, just in case.
Args:
header_path: Path to be canonicalized.
Returns:
Canonicalized path.
"""
return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
"""Check if a header is in alphabetical order with the previous header.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
header_path: Canonicalized header to be checked.
Returns:
Returns true if the header is in alphabetical order.
"""
# If previous section is different from current section, _last_header will
# be reset to empty string, so it's always less than current header.
#
# If previous line was a blank line, assume that the headers are
# intentionally sorted the way they are.
if (self._last_header > header_path and
not Match(r'^\s*$', clean_lines.elided[linenum - 1])):
return False
return True
def CheckNextIncludeOrder(self, header_type):
"""Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong.
"""
error_message = ('Found %s after %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section]))
last_section = self._section
if header_type == _C_SYS_HEADER:
if self._section <= self._C_SECTION:
self._section = self._C_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _CPP_SYS_HEADER:
if self._section <= self._CPP_SECTION:
self._section = self._CPP_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _LIKELY_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
self._section = self._OTHER_H_SECTION
elif header_type == _POSSIBLE_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
# This will always be the fallback because we're not sure
# enough that the header is associated with this file.
self._section = self._OTHER_H_SECTION
else:
assert header_type == _OTHER_HEADER
self._section = self._OTHER_H_SECTION
if last_section != self._section:
self._last_header = ''
return ''
class _CppLintState(object):
"""Maintains module-wide state.."""
def __init__(self):
self.verbose_level = 1 # global setting.
self.error_count = 0 # global count of reported errors
# filters to apply when emitting error messages
self.filters = _DEFAULT_FILTERS[:]
self.counting = 'total' # In what way are we counting errors?
self.errors_by_category = {} # string to int dict storing error counts
# output format:
# "emacs" - format that emacs can parse (default)
# "vs7" - format that Microsoft Visual Studio 7 can parse
self.output_format = 'emacs'
def SetOutputFormat(self, output_format):
"""Sets the output format for errors."""
self.output_format = output_format
def SetVerboseLevel(self, level):
"""Sets the module's verbosity, and returns the previous setting."""
last_verbose_level = self.verbose_level
self.verbose_level = level
return last_verbose_level
def SetCountingStyle(self, counting_style):
"""Sets the module's counting options."""
self.counting = counting_style
def SetFilters(self, filters):
"""Sets the error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "+whitespace/indent").
Each filter should start with + or -; else we die.
Raises:
ValueError: The comma-separated filters did not all start with '+' or '-'.
E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
"""
# Default filters always have less priority than the flag ones.
self.filters = _DEFAULT_FILTERS[:]
for filt in filters.split(','):
clean_filt = filt.strip()
if clean_filt:
self.filters.append(clean_filt)
for filt in self.filters:
if not (filt.startswith('+') or filt.startswith('-')):
raise ValueError('Every filter in --filters must start with + or -'
' (%s does not)' % filt)
def ResetErrorCounts(self):
"""Sets the module's error statistic back to zero."""
self.error_count = 0
self.errors_by_category = {}
def IncrementErrorCount(self, category):
"""Bumps the module's error statistic."""
self.error_count += 1
if self.counting in ('toplevel', 'detailed'):
if self.counting != 'detailed':
category = category.split('/')[0]
if category not in self.errors_by_category:
self.errors_by_category[category] = 0
self.errors_by_category[category] += 1
def PrintErrorCounts(self):
"""Print a summary of errors by category, and the total."""
for category, count in self.errors_by_category.iteritems():
sys.stderr.write('Category \'%s\' errors found: %d\n' %
(category, count))
sys.stderr.write('Total errors found: %d\n' % self.error_count)
_cpplint_state = _CppLintState()
def _OutputFormat():
"""Gets the module's output format."""
return _cpplint_state.output_format
def _SetOutputFormat(output_format):
"""Sets the module's output format."""
_cpplint_state.SetOutputFormat(output_format)
def _VerboseLevel():
"""Returns the module's verbosity setting."""
return _cpplint_state.verbose_level
def _SetVerboseLevel(level):
"""Sets the module's verbosity, and returns the previous setting."""
return _cpplint_state.SetVerboseLevel(level)
def _SetCountingStyle(level):
"""Sets the module's counting options."""
_cpplint_state.SetCountingStyle(level)
def _Filters():
"""Returns the module's list of output filters, as a list."""
return _cpplint_state.filters
def _SetFilters(filters):
"""Sets the module's error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.SetFilters(filters)
class _FunctionState(object):
"""Tracks current function name and the number of lines in its body."""
_NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
_TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
def __init__(self):
self.in_a_function = False
self.lines_in_function = 0
self.current_function = ''
def Begin(self, function_name):
"""Start analyzing function body.
Args:
function_name: The name of the function being tracked.
"""
self.in_a_function = True
self.lines_in_function = 0
self.current_function = function_name
def Count(self):
"""Count line in current function body."""
if self.in_a_function:
self.lines_in_function += 1
def Check(self, error, filename, linenum):
"""Report if too many lines in function body.
Args:
error: The function to call with any errors found.
filename: The name of the current file.
linenum: The number of the line to check.
"""
if Match(r'T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
base_trigger = self._NORMAL_TRIGGER
trigger = base_trigger * 2**_VerboseLevel()
if self.lines_in_function > trigger:
error_level = int(math.log(self.lines_in_function / base_trigger, 2))
# 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
if error_level > 5:
error_level = 5
error(filename, linenum, 'readability/fn_size', error_level,
'Small and focused functions are preferred:'
' %s has %d non-comment lines'
' (error triggered by exceeding %d lines).' % (
self.current_function, self.lines_in_function, trigger))
def End(self):
"""Stop analyzing function body."""
self.in_a_function = False
class _IncludeError(Exception):
"""Indicates a problem with the include order in a file."""
pass
class FileInfo:
"""Provides utility functions for filenames.
FileInfo provides easy access to the components of a file's path
relative to the project root.
"""
def __init__(self, filename):
self._filename = filename
def FullName(self):
"""Make Windows paths like Unix."""
return os.path.abspath(self._filename).replace('\\', '/')
def RepositoryName(self):
"""FullName after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
"C:\Documents and Settings\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
fullname = self.FullName()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
if os.path.exists(os.path.join(project_dir, ".svn")):
# If there's a .svn file in the current directory, we recursively look
# up the directory tree for the top of the SVN checkout
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, ".svn")):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
# searching up from the current path.
root_dir = os.path.dirname(fullname)
while (root_dir != os.path.dirname(root_dir) and
not os.path.exists(os.path.join(root_dir, ".git")) and
not os.path.exists(os.path.join(root_dir, ".hg")) and
not os.path.exists(os.path.join(root_dir, ".svn"))):
root_dir = os.path.dirname(root_dir)
if (os.path.exists(os.path.join(root_dir, ".git")) or
os.path.exists(os.path.join(root_dir, ".hg")) or
os.path.exists(os.path.join(root_dir, ".svn"))):
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Don't know what to do; header guard warnings may be wrong...
return fullname
def Split(self):
"""Splits the file into the directory, basename, and extension.
For 'chrome/browser/browser.cc', Split() would
return ('chrome/browser', 'browser', '.cc')
Returns:
A tuple of (directory, basename, extension).
"""
googlename = self.RepositoryName()
project, rest = os.path.split(googlename)
return (project,) + os.path.splitext(rest)
def BaseName(self):
"""File base name - text after the final slash, before the final period."""
return self.Split()[1]
def Extension(self):
"""File extension - text following the final period."""
return self.Split()[2]
def NoExtension(self):
"""File has no source file extension."""
return '/'.join(self.Split()[0:2])
def IsSource(self):
"""File has a source file extension."""
return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
def _ShouldPrintError(category, confidence, linenum):
"""If confidence >= verbose, category passes filter and is not suppressed."""
# There are three ways we might decide not to print an error message:
# a "NOLINT(category)" comment appears in the source,
# the verbosity level isn't high enough, or the filters filter it out.
if IsErrorSuppressedByNolint(category, linenum):
return False
if confidence < _cpplint_state.verbose_level:
return False
is_filtered = False
for one_filter in _Filters():
if one_filter.startswith('-'):
if category.startswith(one_filter[1:]):
is_filtered = True
elif one_filter.startswith('+'):
if category.startswith(one_filter[1:]):
is_filtered = False
else:
assert False # should have been checked for in SetFilter.
if is_filtered:
return False
return True
def Error(filename, linenum, category, confidence, message):
"""Logs the fact we've found a lint error.
We log where the error was found, and also our confidence in the error,
that is, how certain we are this is a legitimate style regression, and
not a misidentification or a use that's sometimes justified.
False positives can be suppressed by the use of
"cpplint(category)" comments on the offending line. These are
parsed into _error_suppressions.
Args:
filename: The name of the file containing the error.
linenum: The number of the line containing the error.
category: A string used to describe the "category" this bug
falls under: "whitespace", say, or "runtime". Categories
may have a hierarchy separated by slashes: "whitespace/indent".
confidence: A number from 1-5 representing a confidence score for
the error, with 5 meaning that we are certain of the problem,
and 1 meaning that it could be a legitimate construct.
message: The error message.
"""
if _ShouldPrintError(category, confidence, linenum):
_cpplint_state.IncrementErrorCount(category)
if _cpplint_state.output_format == 'vs7':
sys.stderr.write('%s(%s): %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
elif _cpplint_state.output_format == 'eclipse':
sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
else:
sys.stderr.write('%s:%s: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
# Matches strings. Escape codes should already be removed by ESCAPES.
_RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"[^"]*"')
# Matches characters. Escape codes should already be removed by ESCAPES.
_RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'.'")
# Matches multi-line C++ comments.
# This RE is a little bit more complicated than one might expect, because we
# have to take care of space removals tools so we can handle comments inside
# statements better.
# The current rule is: We only clear spaces from both sides when we're at the
# end of the line. Otherwise, we try to remove spaces from the right side,
# if this doesn't work we try on left side but only if there's a non-character
# on the right.
_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
r"""(\s*/\*.*\*/\s*$|
/\*.*\*/\s+|
\s+/\*.*\*/(?=\W)|
/\*.*\*/)""", re.VERBOSE)
def IsCppString(line):
"""Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant.
"""
line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
def CleanseRawStrings(raw_lines):
"""Removes C++11 raw strings from lines.
Before:
static const char kData[] = R"(
multi-line string
)";
After:
static const char kData[] = ""
(replaced by blank line)
"";
Args:
raw_lines: list of raw lines.
Returns:
list of lines with C++11 raw strings replaced by empty strings.
"""
delimiter = None
lines_without_raw_strings = []
for line in raw_lines:
if delimiter:
# Inside a raw string, look for the end
end = line.find(delimiter)
if end >= 0:
# Found the end of the string, match leading space for this
# line and resume copying the original lines, and also insert
# a "" on the last line.
leading_space = Match(r'^(\s*)\S', line)
line = leading_space.group(1) + '""' + line[end + len(delimiter):]
delimiter = None
else:
# Haven't found the end yet, append a blank line.
line = ''
else:
# Look for beginning of a raw string.
# See 2.14.15 [lex.string] for syntax.
matched = Match(r'^(.*)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
if matched:
delimiter = ')' + matched.group(2) + '"'
end = matched.group(3).find(delimiter)
if end >= 0:
# Raw string ended on same line
line = (matched.group(1) + '""' +
matched.group(3)[end + len(delimiter):])
delimiter = None
else:
# Start of a multi-line raw string
line = matched.group(1) + '""'
lines_without_raw_strings.append(line)
# TODO(unknown): if delimiter is not None here, we might want to
# emit a warning for unterminated string.
return lines_without_raw_strings
def FindNextMultiLineCommentStart(lines, lineix):
"""Find the beginning marker for a multiline comment."""
while lineix < len(lines):
if lines[lineix].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[lineix].strip().find('*/', 2) < 0:
return lineix
lineix += 1
return len(lines)
def FindNextMultiLineCommentEnd(lines, lineix):
"""We are inside a comment, find the end marker."""
while lineix < len(lines):
if lines[lineix].strip().endswith('*/'):
return lineix
lineix += 1
return len(lines)
def RemoveMultiLineCommentsFromRange(lines, begin, end):
"""Clears a range of lines for multi-line comments."""
# Having // dummy comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '// dummy'
def RemoveMultiLineComments(filename, lines, error):
"""Removes multiline (c-style) comments from lines."""
lineix = 0
while lineix < len(lines):
lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
if lineix_begin >= len(lines):
return
lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
if lineix_end >= len(lines):
error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
'Could not find end of multi-line comment')
return
RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
lineix = lineix_end + 1
def CleanseComments(line):
"""Removes //-comments and single-line C-style /* */ comments.
Args:
line: A line of C++ source.
Returns:
The line with single-line comments removed.
"""
commentpos = line.find('//')
if commentpos != -1 and not IsCppString(line[:commentpos]):
line = line[:commentpos].rstrip()
# get rid of /* ... */
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
class CleansedLines(object):
"""Holds 3 copies of all lines with different preprocessing applied to them.
1) elided member contains lines without strings and comments,
2) lines member contains lines without comments, and
3) raw_lines member contains all the lines without processing.
All these three members are of <type 'list'>, and of the same length.
"""
def __init__(self, lines):
self.elided = []
self.lines = []
self.raw_lines = lines
self.num_lines = len(lines)
self.lines_without_raw_strings = CleanseRawStrings(lines)
for linenum in range(len(self.lines_without_raw_strings)):
self.lines.append(CleanseComments(
self.lines_without_raw_strings[linenum]))
elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
self.elided.append(CleanseComments(elided))
def NumLines(self):
"""Returns the number of lines represented."""
return self.num_lines
@staticmethod
def _CollapseStrings(elided):
"""Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings.
"""
if not _RE_PATTERN_INCLUDE.match(elided):
# Remove escaped characters first to make quote/single quote collapsing
# basic. Things that look like escaped characters shouldn't occur
# outside of strings and chars.
elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub("''", elided)
elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub('""', elided)
return elided
def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar):
"""Find the position just after the matching endchar.
Args:
line: a CleansedLines line.
startpos: start searching at this position.
depth: nesting level at startpos.
startchar: expression opening character.
endchar: expression closing character.
Returns:
On finding matching endchar: (index just after matching endchar, 0)
Otherwise: (-1, new depth at end of this line)
"""
for i in xrange(startpos, len(line)):
if line[i] == startchar:
depth += 1
elif line[i] == endchar:
depth -= 1
if depth == 0:
return (i + 1, 0)
return (-1, depth)
def CloseExpression(clean_lines, linenum, pos):
"""If input points to ( or { or [ or <, finds the position that closes it.
If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
linenum/pos that correspond to the closing of the expression.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *past* the closing brace, or
(line, len(lines), -1) if we never find a close. Note we ignore
strings and comments when matching; and the line we return is the
'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
startchar = line[pos]
if startchar not in '({[<':
return (line, clean_lines.NumLines(), -1)
if startchar == '(': endchar = ')'
if startchar == '[': endchar = ']'
if startchar == '{': endchar = '}'
if startchar == '<': endchar = '>'
# Check first line
(end_pos, num_open) = FindEndOfExpressionInLine(
line, pos, 0, startchar, endchar)
if end_pos > -1:
return (line, linenum, end_pos)
# Continue scanning forward
while linenum < clean_lines.NumLines() - 1:
linenum += 1
line = clean_lines.elided[linenum]
(end_pos, num_open) = FindEndOfExpressionInLine(
line, 0, num_open, startchar, endchar)
if end_pos > -1:
return (line, linenum, end_pos)
# Did not find endchar before end of file, give up
return (line, clean_lines.NumLines(), -1)
def FindStartOfExpressionInLine(line, endpos, depth, startchar, endchar):
"""Find position at the matching startchar.
This is almost the reverse of FindEndOfExpressionInLine, but note
that the input position and returned position differs by 1.
Args:
line: a CleansedLines line.
endpos: start searching at this position.
depth: nesting level at endpos.
startchar: expression opening character.
endchar: expression closing character.
Returns:
On finding matching startchar: (index at matching startchar, 0)
Otherwise: (-1, new depth at beginning of this line)
"""
for i in xrange(endpos, -1, -1):
if line[i] == endchar:
depth += 1
elif line[i] == startchar:
depth -= 1
if depth == 0:
return (i, 0)
return (-1, depth)
def ReverseCloseExpression(clean_lines, linenum, pos):
"""If input points to ) or } or ] or >, finds the position that opens it.
If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
linenum/pos that correspond to the opening of the expression.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *at* the opening brace, or
(line, 0, -1) if we never find the matching opening brace. Note
we ignore strings and comments when matching; and the line we
return is the 'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
endchar = line[pos]
if endchar not in ')}]>':
return (line, 0, -1)
if endchar == ')': startchar = '('
if endchar == ']': startchar = '['
if endchar == '}': startchar = '{'
if endchar == '>': startchar = '<'
# Check last line
(start_pos, num_open) = FindStartOfExpressionInLine(
line, pos, 0, startchar, endchar)
if start_pos > -1:
return (line, linenum, start_pos)
# Continue scanning backward
while linenum > 0:
linenum -= 1
line = clean_lines.elided[linenum]
(start_pos, num_open) = FindStartOfExpressionInLine(
line, len(line) - 1, num_open, startchar, endchar)
if start_pos > -1:
return (line, linenum, start_pos)
# Did not find startchar before beginning of file, give up
return (line, 0, -1)
def CheckForCopyright(filename, lines, error):
"""Logs an error if a Copyright message appears at the top of the file."""
# We'll check up to line 10. Don't forget there's a
# dummy line at the front.
for line in xrange(1, min(len(lines), 11)):
if _RE_COPYRIGHT.search(lines[line], re.I):
error(filename, 0, 'legal/copyright', 5,
'Copyright message found. '
'You should not include a copyright line.')
def GetHeaderGuardCPPVariable(filename):
"""Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file.
"""
# Restores original filename in case that cpplint is invoked from Emacs's
# flymake.
filename = re.sub(r'_flymake\.h$', '.h', filename)
filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
fileinfo = FileInfo(filename)
file_path_from_root = fileinfo.RepositoryName()
if _root:
file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root)
return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_'
def CheckForHeaderGuard(filename, lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
cppvar = GetHeaderGuardCPPVariable(filename)
ifndef = None
ifndef_linenum = 0
define = None
endif = None
endif_linenum = 0
for linenum, line in enumerate(lines):
linesplit = line.split()
if len(linesplit) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and linesplit[0] == '#ifndef':
# set ifndef to the header guard presented on the #ifndef line.
ifndef = linesplit[1]
ifndef_linenum = linenum
if not define and linesplit[0] == '#define':
define = linesplit[1]
# find the last occurrence of #endif, save entire line
if line.startswith('#endif'):
endif = line
endif_linenum = linenum
if not ifndef:
error(filename, 0, 'build/header_guard', 5,
'No #ifndef header guard found, suggested CPP variable is: %s' %
cppvar)
return
if not define:
error(filename, 0, 'build/header_guard', 5,
'No #define header guard found, suggested CPP variable is: %s' %
cppvar)
return
# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
# for backward compatibility.
if ifndef != cppvar:
error_level = 0
if ifndef != cppvar + '_':
error_level = 5
ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum,
error)
error(filename, ifndef_linenum, 'build/header_guard', error_level,
'#ifndef header guard has wrong style, please use: %s' % cppvar)
if define != ifndef:
error(filename, 0, 'build/header_guard', 5,
'#ifndef and #define don\'t match, suggested CPP variable is: %s' %
cppvar)
return
if endif != ('#endif // %s' % cppvar):
error_level = 0
if endif != ('#endif // %s' % (cppvar + '_')):
error_level = 5
ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum,
error)
error(filename, endif_linenum, 'build/header_guard', error_level,
'#endif line should be "#endif // %s"' % cppvar)
def CheckForBadCharacters(filename, lines, error):
"""Logs an error for each line containing bad characters.
Two kinds of bad characters:
1. Unicode replacement characters: These indicate that either the file
contained invalid UTF-8 (likely) or Unicode replacement characters (which
it shouldn't). Note that it's possible for this to throw off line
numbering if the invalid UTF-8 occurred adjacent to a newline.
2. NUL bytes. These are problematic for some tools.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
for linenum, line in enumerate(lines):
if u'\ufffd' in line:
error(filename, linenum, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
if '\0' in line:
error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
def CheckForNewlineAtEOF(filename, lines, error):
"""Logs an error if there is no newline char at the end of the file.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# The array lines() was created by adding two newlines to the
# original file (go figure), then splitting on \n.
# To verify that the file ends in \n, we just have to make sure the
# last-but-two element of lines() exists and is empty.
if len(lines) < 3 or lines[-2]:
error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
'Could not find a newline character at the end of the file.')
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
"""Logs an error if we see /* ... */ or "..." that extend past one line.
/* ... */ comments are legit inside macros, for one line.
Otherwise, we prefer // comments, so it's ok to warn about the
other. Likewise, it's ok for strings to extend across multiple
lines, as long as a line continuation character (backslash)
terminates each line. Although not currently prohibited by the C++
style guide, it's ugly and unnecessary. We don't do well with either
in this lint program, so we warn about both.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remove all \\ (escaped backslashes) from the line. They are OK, and the
# second (escaped) slash may trigger later \" detection erroneously.
line = line.replace('\\\\', '')
if line.count('/*') > line.count('*/'):
error(filename, linenum, 'readability/multiline_comment', 5,
'Complex multi-line /*...*/-style comment found. '
'Lint may give bogus warnings. '
'Consider replacing these with //-style comments, '
'with #if 0...#endif, '
'or with more clearly structured multi-line comments.')
if (line.count('"') - line.count('\\"')) % 2:
error(filename, linenum, 'readability/multiline_string', 5,
'Multi-line string ("...") found. This lint script doesn\'t '
'do well with such strings, and may give bogus warnings. '
'Use C++11 raw strings or concatenation instead.')
caffe_alt_function_list = (
('memset', ['caffe_set', 'caffe_memset']),
('cudaMemset', ['caffe_gpu_set', 'caffe_gpu_memset']),
('memcpy', ['caffe_copy']),
('cudaMemcpy', ['caffe_copy', 'caffe_gpu_memcpy']),
)
def CheckCaffeAlternatives(filename, clean_lines, linenum, error):
"""Checks for C(++) functions for which a Caffe substitute should be used.
For certain native C functions (memset, memcpy), there is a Caffe alternative
which should be used instead.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for function, alts in caffe_alt_function_list:
ix = line.find(function + '(')
if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and
line[ix - 1] not in ('_', '.', '>'))):
disp_alts = ['%s(...)' % alt for alt in alts]
error(filename, linenum, 'caffe/alt_fn', 2,
'Use Caffe function %s instead of %s(...).' %
(' or '.join(disp_alts), function))
def CheckCaffeDataLayerSetUp(filename, clean_lines, linenum, error):
"""Except the base classes, Caffe DataLayer should define DataLayerSetUp
instead of LayerSetUp.
The base DataLayers define common SetUp steps, the subclasses should
not override them.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
ix = line.find('DataLayer<Dtype>::LayerSetUp')
if ix >= 0 and (
line.find('void DataLayer<Dtype>::LayerSetUp') != -1 or
line.find('void ImageDataLayer<Dtype>::LayerSetUp') != -1 or
line.find('void MemoryDataLayer<Dtype>::LayerSetUp') != -1 or
line.find('void WindowDataLayer<Dtype>::LayerSetUp') != -1):
error(filename, linenum, 'caffe/data_layer_setup', 2,
'Except the base classes, Caffe DataLayer should define'
+ ' DataLayerSetUp instead of LayerSetUp. The base DataLayers'
+ ' define common SetUp steps, the subclasses should'
+ ' not override them.')
ix = line.find('DataLayer<Dtype>::DataLayerSetUp')
if ix >= 0 and (
line.find('void Base') == -1 and
line.find('void DataLayer<Dtype>::DataLayerSetUp') == -1 and
line.find('void ImageDataLayer<Dtype>::DataLayerSetUp') == -1 and
line.find('void MemoryDataLayer<Dtype>::DataLayerSetUp') == -1 and
line.find('void WindowDataLayer<Dtype>::DataLayerSetUp') == -1):
error(filename, linenum, 'caffe/data_layer_setup', 2,
'Except the base classes, Caffe DataLayer should define'
+ ' DataLayerSetUp instead of LayerSetUp. The base DataLayers'
+ ' define common SetUp steps, the subclasses should'
+ ' not override them.')
c_random_function_list = (
'rand(',
'rand_r(',
'random(',
)
def CheckCaffeRandom(filename, clean_lines, linenum, error):
"""Checks for calls to C random functions (rand, rand_r, random, ...).
Caffe code should (almost) always use the caffe_rng_* functions rather
than these, as the internal state of these C functions is independent of the
native Caffe RNG system which should produce deterministic results for a
fixed Caffe seed set using Caffe::set_random_seed(...).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for function in c_random_function_list:
ix = line.find(function)
# Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and
line[ix - 1] not in ('_', '.', '>'))):
error(filename, linenum, 'caffe/random_fn', 2,
'Use caffe_rng_rand() (or other caffe_rng_* function) instead of '
+ function +
') to ensure results are deterministic for a fixed Caffe seed.')
threading_list = (
('asctime(', 'asctime_r('),
('ctime(', 'ctime_r('),
('getgrgid(', 'getgrgid_r('),
('getgrnam(', 'getgrnam_r('),
('getlogin(', 'getlogin_r('),
('getpwnam(', 'getpwnam_r('),
('getpwuid(', 'getpwuid_r('),
('gmtime(', 'gmtime_r('),
('localtime(', 'localtime_r('),
('strtok(', 'strtok_r('),
('ttyname(', 'ttyname_r('),
)
def CheckPosixThreading(filename, clean_lines, linenum, error):
"""Checks for calls to thread-unsafe functions.
Much code has been originally written without consideration of
multi-threading. Also, engineers are relying on their old experience;
they have learned posix before threading extensions were added. These
tests guide the engineers to use thread-safe functions (when using
posix directly).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for single_thread_function, multithread_safe_function in threading_list:
ix = line.find(single_thread_function)
# Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and
line[ix - 1] not in ('_', '.', '>'))):
error(filename, linenum, 'runtime/threadsafe_fn', 2,
'Consider using ' + multithread_safe_function +
'...) instead of ' + single_thread_function +
'...) for improved thread safety.')
def CheckVlogArguments(filename, clean_lines, linenum, error):
"""Checks that VLOG() is only used for defining a logging level.
For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
VLOG(FATAL) are not.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
error(filename, linenum, 'runtime/vlog', 5,
'VLOG() should be used with numeric verbosity level. '
'Use LOG() if you want symbolic severity levels.')
# Matches invalid increment: *count++, which moves pointer instead of
# incrementing a value.
_RE_PATTERN_INVALID_INCREMENT = re.compile(
r'^\s*\*\w+(\+\+|--);')
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
"""Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if _RE_PATTERN_INVALID_INCREMENT.match(line):
error(filename, linenum, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).')
class _BlockInfo(object):
"""Stores information about a generic block of code."""
def __init__(self, seen_open_brace):
self.seen_open_brace = seen_open_brace
self.open_parentheses = 0
self.inline_asm = _NO_ASM
def CheckBegin(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text up to the opening brace.
This is mostly for checking the text after the class identifier
and the "{", usually where the base class is specified. For other
blocks, there isn't much to check, so we always pass.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text after the closing brace.
This is mostly used for checking end of namespace comments.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
class _ClassInfo(_BlockInfo):
"""Stores information about a class."""
def __init__(self, name, class_or_struct, clean_lines, linenum):
_BlockInfo.__init__(self, False)
self.name = name
self.starting_linenum = linenum
self.is_derived = False
if class_or_struct == 'struct':
self.access = 'public'
self.is_struct = True
else:
self.access = 'private'
self.is_struct = False
# Remember initial indentation level for this class. Using raw_lines here
# instead of elided to account for leading comments.
initial_indent = Match(r'^( *)\S', clean_lines.raw_lines[linenum])
if initial_indent:
self.class_indent = len(initial_indent.group(1))
else:
self.class_indent = 0
# Try to find the end of the class. This will be confused by things like:
# class A {
# } *x = { ...
#
# But it's still good enough for CheckSectionSpacing.
self.last_line = 0
depth = 0
for i in range(linenum, clean_lines.NumLines()):
line = clean_lines.elided[i]
depth += line.count('{') - line.count('}')
if not depth:
self.last_line = i
break
def CheckBegin(self, filename, clean_lines, linenum, error):
# Look for a bare ':'
if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
self.is_derived = True
def CheckEnd(self, filename, clean_lines, linenum, error):
# Check that closing brace is aligned with beginning of the class.
# Only do this if the closing brace is indented by only whitespaces.
# This means we will not check single-line class definitions.
indent = Match(r'^( *)\}', clean_lines.elided[linenum])
if indent and len(indent.group(1)) != self.class_indent:
if self.is_struct:
parent = 'struct ' + self.name
else:
parent = 'class ' + self.name
error(filename, linenum, 'whitespace/indent', 3,
'Closing brace should be aligned with beginning of %s' % parent)
class _NamespaceInfo(_BlockInfo):
"""Stores information about a namespace."""
def __init__(self, name, linenum):
_BlockInfo.__init__(self, False)
self.name = name or ''
self.starting_linenum = linenum
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Check end of namespace comments."""
line = clean_lines.raw_lines[linenum]
# Check how many lines is enclosed in this namespace. Don't issue
# warning for missing namespace comments if there aren't enough
# lines. However, do apply checks if there is already an end of
# namespace comment and it's incorrect.
#
# TODO(unknown): We always want to check end of namespace comments
# if a namespace is large, but sometimes we also want to apply the
# check if a short namespace contained nontrivial things (something
# other than forward declarations). There is currently no logic on
# deciding what these nontrivial things are, so this check is
# triggered by namespace size only, which works most of the time.
if (linenum - self.starting_linenum < 10
and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)):
return
# Look for matching comment at end of namespace.
#
# Note that we accept C style "/* */" comments for terminating
# namespaces, so that code that terminate namespaces inside
# preprocessor macros can be cpplint clean.
#
# We also accept stuff like "// end of namespace <name>." with the
# period at the end.
#
# Besides these, we don't accept anything else, otherwise we might
# get false negatives when existing comment is a substring of the
# expected namespace.
if self.name:
# Named namespace
if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) +
r'[\*/\.\\\s]*$'),
line):
error(filename, linenum, 'readability/namespace', 5,
'Namespace should be terminated with "// namespace %s"' %
self.name)
else:
# Anonymous namespace
if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
error(filename, linenum, 'readability/namespace', 5,
'Namespace should be terminated with "// namespace"')
class _PreprocessorInfo(object):
"""Stores checkpoints of nesting stacks when #if/#else is seen."""
def __init__(self, stack_before_if):
# The entire nesting stack before #if
self.stack_before_if = stack_before_if
# The entire nesting stack up to #else
self.stack_before_else = []
# Whether we have already seen #else or #elif
self.seen_else = False
class _NestingState(object):
"""Holds states related to parsing braces."""
def __init__(self):
# Stack for tracking all braces. An object is pushed whenever we
# see a "{", and popped when we see a "}". Only 3 types of
# objects are possible:
# - _ClassInfo: a class or struct.
# - _NamespaceInfo: a namespace.
# - _BlockInfo: some other type of block.
self.stack = []
# Stack of _PreprocessorInfo objects.
self.pp_stack = []
def SeenOpenBrace(self):
"""Check if we have seen the opening brace for the innermost block.
Returns:
True if we have seen the opening brace, False if the innermost
block is still expecting an opening brace.
"""
return (not self.stack) or self.stack[-1].seen_open_brace
def InNamespaceBody(self):
"""Check if we are currently one level inside a namespace body.
Returns:
True if top of the stack is a namespace block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
def UpdatePreprocessor(self, line):
"""Update preprocessor stack.
We need to handle preprocessors due to classes like this:
#ifdef SWIG
struct ResultDetailsPageElementExtensionPoint {
#else
struct ResultDetailsPageElementExtensionPoint : public Extension {
#endif
We make the following assumptions (good enough for most files):
- Preprocessor condition evaluates to true from #if up to first
#else/#elif/#endif.
- Preprocessor condition evaluates to false from #else/#elif up
to #endif. We still perform lint checks on these lines, but
these do not affect nesting stack.
Args:
line: current line to check.
"""
if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
# Beginning of #if block, save the nesting stack here. The saved
# stack will allow us to restore the parsing state in the #else case.
self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
elif Match(r'^\s*#\s*(else|elif)\b', line):
# Beginning of #else block
if self.pp_stack:
if not self.pp_stack[-1].seen_else:
# This is the first #else or #elif block. Remember the
# whole nesting stack up to this point. This is what we
# keep after the #endif.
self.pp_stack[-1].seen_else = True
self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
# Restore the stack to how it was before the #if
self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
else:
# TODO(unknown): unexpected #else, issue warning?
pass
elif Match(r'^\s*#\s*endif\b', line):
# End of #if or #else blocks.
if self.pp_stack:
# If we saw an #else, we will need to restore the nesting
# stack to its former state before the #else, otherwise we
# will just continue from where we left off.
if self.pp_stack[-1].seen_else:
# Here we can just use a shallow copy since we are the last
# reference to it.
self.stack = self.pp_stack[-1].stack_before_else
# Drop the corresponding #if
self.pp_stack.pop()
else:
# TODO(unknown): unexpected #endif, issue warning?
pass
def Update(self, filename, clean_lines, linenum, error):
"""Update nesting state with current line.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Update pp_stack first
self.UpdatePreprocessor(line)
# Count parentheses. This is to avoid adding struct arguments to
# the nesting stack.
if self.stack:
inner_block = self.stack[-1]
depth_change = line.count('(') - line.count(')')
inner_block.open_parentheses += depth_change
# Also check if we are starting or ending an inline assembly block.
if inner_block.inline_asm in (_NO_ASM, _END_ASM):
if (depth_change != 0 and
inner_block.open_parentheses == 1 and
_MATCH_ASM.match(line)):
# Enter assembly block
inner_block.inline_asm = _INSIDE_ASM
else:
# Not entering assembly block. If previous line was _END_ASM,
# we will now shift to _NO_ASM state.
inner_block.inline_asm = _NO_ASM
elif (inner_block.inline_asm == _INSIDE_ASM and
inner_block.open_parentheses == 0):
# Exit assembly block
inner_block.inline_asm = _END_ASM
# Consume namespace declaration at the beginning of the line. Do
# this in a loop so that we catch same line declarations like this:
# namespace proto2 { namespace bridge { class MessageSet; } }
while True:
# Match start of namespace. The "\b\s*" below catches namespace
# declarations even if it weren't followed by a whitespace, this
# is so that we don't confuse our namespace checker. The
# missing spaces will be flagged by CheckSpacing.
namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
if not namespace_decl_match:
break
new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
self.stack.append(new_namespace)
line = namespace_decl_match.group(2)
if line.find('{') != -1:
new_namespace.seen_open_brace = True
line = line[line.find('{') + 1:]
# Look for a class declaration in whatever is left of the line
# after parsing namespaces. The regexp accounts for decorated classes
# such as in:
# class LOCKABLE API Object {
# };
#
# Templates with class arguments may confuse the parser, for example:
# template <class T
# class Comparator = less<T>,
# class Vector = vector<T> >
# class HeapQueue {
#
# Because this parser has no nesting state about templates, by the
# time it saw "class Comparator", it may think that it's a new class.
# Nested templates have a similar problem:
# template <
# typename ExportedType,
# typename TupleType,
# template <typename, typename> class ImplTemplate>
#
# To avoid these cases, we ignore classes that are followed by '=' or '>'
class_decl_match = Match(
r'\s*(template\s*<[\w\s<>,:]*>\s*)?'
r'(class|struct)\s+([A-Z_]+\s+)*(\w+(?:::\w+)*)'
r'(([^=>]|<[^<>]*>|<[^<>]*<[^<>]*>\s*>)*)$', line)
if (class_decl_match and
(not self.stack or self.stack[-1].open_parentheses == 0)):
self.stack.append(_ClassInfo(
class_decl_match.group(4), class_decl_match.group(2),
clean_lines, linenum))
line = class_decl_match.group(5)
# If we have not yet seen the opening brace for the innermost block,
# run checks here.
if not self.SeenOpenBrace():
self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
# Update access control if we are inside a class/struct
if self.stack and isinstance(self.stack[-1], _ClassInfo):
classinfo = self.stack[-1]
access_match = Match(
r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
r':(?:[^:]|$)',
line)
if access_match:
classinfo.access = access_match.group(2)
# Check that access keywords are indented +1 space. Skip this
# check if the keywords are not preceded by whitespaces.
indent = access_match.group(1)
if (len(indent) != classinfo.class_indent + 1 and
Match(r'^\s*$', indent)):
if classinfo.is_struct:
parent = 'struct ' + classinfo.name
else:
parent = 'class ' + classinfo.name
slots = ''
if access_match.group(3):
slots = access_match.group(3)
error(filename, linenum, 'whitespace/indent', 3,
'%s%s: should be indented +1 space inside %s' % (
access_match.group(2), slots, parent))
# Consume braces or semicolons from what's left of the line
while True:
# Match first brace, semicolon, or closed parenthesis.
matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
if not matched:
break
token = matched.group(1)
if token == '{':
# If namespace or class hasn't seen a opening brace yet, mark
# namespace/class head as complete. Push a new block onto the
# stack otherwise.
if not self.SeenOpenBrace():
self.stack[-1].seen_open_brace = True
else:
self.stack.append(_BlockInfo(True))
if _MATCH_ASM.match(line):
self.stack[-1].inline_asm = _BLOCK_ASM
elif token == ';' or token == ')':
# If we haven't seen an opening brace yet, but we already saw
# a semicolon, this is probably a forward declaration. Pop
# the stack for these.
#
# Similarly, if we haven't seen an opening brace yet, but we
# already saw a closing parenthesis, then these are probably
# function arguments with extra "class" or "struct" keywords.
# Also pop these stack for these.
if not self.SeenOpenBrace():
self.stack.pop()
else: # token == '}'
# Perform end of block checks and pop the stack.
if self.stack:
self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
self.stack.pop()
line = matched.group(2)
def InnermostClass(self):
"""Get class info on the top of the stack.
Returns:
A _ClassInfo object if we are inside a class, or None otherwise.
"""
for i in range(len(self.stack), 0, -1):
classinfo = self.stack[i - 1]
if isinstance(classinfo, _ClassInfo):
return classinfo
return None
def CheckCompletedBlocks(self, filename, error):
"""Checks that all classes and namespaces have been completely parsed.
Call this when all lines in a file have been processed.
Args:
filename: The name of the current file.
error: The function to call with any errors found.
"""
# Note: This test can result in false positives if #ifdef constructs
# get in the way of brace matching. See the testBuildClass test in
# cpplint_unittest.py for an example of this.
for obj in self.stack:
if isinstance(obj, _ClassInfo):
error(filename, obj.starting_linenum, 'build/class', 5,
'Failed to find complete declaration of class %s' %
obj.name)
elif isinstance(obj, _NamespaceInfo):
error(filename, obj.starting_linenum, 'build/namespaces', 5,
'Failed to find complete declaration of namespace %s' %
obj.name)
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
nesting_state, error):
r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[linenum]
if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if Search(r'printf\s*\(.*".*%\d+\$', line):
error(filename, linenum, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if Search(r'("|\').*\\(%|\[|\(|{)', line):
error(filename, linenum, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[linenum]
if Search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(register|static|extern|typedef)\b',
line):
error(filename, linenum, 'build/storage_class', 5,
'Storage class (static, extern, typedef, etc) should be first.')
if Match(r'\s*#\s*endif\s*[^/\s]+', line):
error(filename, linenum, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(filename, linenum, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
line):
error(filename, linenum, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
# TODO(unknown): Could it be expanded safely to arbitrary references,
# without triggering too many false positives? The first
# attempt triggered 5 warnings for mostly benign code in the regtest, hence
# the restriction.
# Here's the original regexp, for the reference:
# type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
# r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
error(filename, linenum, 'runtime/member_string_references', 2,
'const string& members are dangerous. It is much better to use '
'alternatives, such as pointers or simple constants.')
# Everything else in this function operates on class declarations.
# Return early if the top of the nesting stack is not a class, or if
# the class head is not completed yet.
classinfo = nesting_state.InnermostClass()
if not classinfo or not classinfo.seen_open_brace:
return
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style.
args = Match(r'\s+(?:inline\s+)?%s\s*\(([^,()]+)\)'
% re.escape(base_classname),
line)
if (args and
args.group(1) != 'void' and
not Match(r'(const\s+)?%s(\s+const)?\s*(?:<\w+>\s*)?&'
% re.escape(base_classname), args.group(1).strip())):
error(filename, linenum, 'runtime/explicit', 5,
'Single-argument constructors should be marked explicit.')
def CheckSpacingForFunctionCall(filename, line, linenum, error):
"""Checks for the correctness of various spacing around function calls.
Args:
filename: The name of the current file.
line: The text of the line to check.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Since function calls often occur inside if/for/while/switch
# expressions - which have their own, more liberal conventions - we
# first see if we should be looking inside such an expression for a
# function call, to which we can apply more strict standards.
fncall = line # if there's no control flow construct, look at whole line
for pattern in (r'\bif\s*\((.*)\)\s*{',
r'\bfor\s*\((.*)\)\s*{',
r'\bwhile\s*\((.*)\)\s*[{;]',
r'\bswitch\s*\((.*)\)\s*{'):
match = Search(pattern, line)
if match:
fncall = match.group(1) # look inside the parens for function calls
break
# Except in if/for/while/switch, there should never be space
# immediately inside parens (eg "f( 3, 4 )"). We make an exception
# for nested parens ( (a+b) + c ). Likewise, there should never be
# a space before a ( when it's a function argument. I assume it's a
# function argument when the char before the whitespace is legal in
# a function name (alnum + _) and we're not starting a macro. Also ignore
# pointers and references to arrays and functions coz they're too tricky:
# we use a very simple way to recognize these:
# " (something)(maybe-something)" or
# " (something)(maybe-something," or
# " (something)[something]"
# Note that we assume the contents of [] to be short enough that
# they'll never need to wrap.
if ( # Ignore control structures.
not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
fncall) and
# Ignore pointers/references to functions.
not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
# Ignore pointers/references to arrays.
not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
error(filename, linenum, 'whitespace/parens', 4,
'Extra space after ( in function call')
elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Extra space after (')
if (Search(r'\w\s+\(', fncall) and
not Search(r'#\s*define|typedef', fncall) and
not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall)):
error(filename, linenum, 'whitespace/parens', 4,
'Extra space before ( in function call')
# If the ) is followed only by a newline or a { + newline, assume it's
# part of a control statement (if/while/etc), and don't complain
if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
# If the closing parenthesis is preceded by only whitespaces,
# try to give a more descriptive error message.
if Search(r'^\s+\)', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Closing ) should be moved to the previous line')
else:
error(filename, linenum, 'whitespace/parens', 2,
'Extra space before )')
def IsBlankLine(line):
"""Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank.
"""
return not line or line.isspace()
def CheckForFunctionLengths(filename, clean_lines, linenum,
function_state, error):
"""Reports for long function bodies.
For an overview why this is done, see:
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Only checks unindented functions, so class members are unchecked.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and comments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[linenum]
raw = clean_lines.raw_lines
raw_line = raw[linenum]
joined_line = ''
starting_func = False
regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
match_result = Match(regexp, line)
if match_result:
# If the name is all caps and underscores, figure it's a macro and
# ignore it, unless it's TEST or TEST_F.
function_name = match_result.group(1).split()[-1]
if function_name == 'TEST' or function_name == 'TEST_F' or (
not Match(r'[A-Z_]+$', function_name)):
starting_func = True
if starting_func:
body_found = False
for start_linenum in xrange(linenum, clean_lines.NumLines()):
start_line = lines[start_linenum]
joined_line += ' ' + start_line.lstrip()
if Search(r'(;|})', start_line): # Declarations and trivial functions
body_found = True
break # ... ignore
elif Search(r'{', start_line):
body_found = True
function = Search(r'((\w|:)*)\(', line).group(1)
if Match(r'TEST', function): # Handle TEST... macros
parameter_regexp = Search(r'(\(.*\))', joined_line)
if parameter_regexp: # Ignore bad syntax
function += parameter_regexp.group(1)
else:
function += '()'
function_state.Begin(function)
break
if not body_found:
# No body for the function (or evidence of a non-function) was found.
error(filename, linenum, 'readability/fn_size', 5,
'Lint failed to find start of function body.')
elif Match(r'^\}\s*$', line): # function end
function_state.Check(error, filename, linenum)
function_state.End()
elif not Match(r'^\s*$', line):
function_state.Count() # Count non-blank/non-comment lines.
_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
def CheckComment(comment, filename, linenum, error):
"""Checks for common mistakes in TODO comments.
Args:
comment: The text of the comment from the line in question.
filename: The name of the current file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
match = _RE_PATTERN_TODO.match(comment)
if match:
# One whitespace is correct; zero whitespace is handled elsewhere.
leading_whitespace = match.group(1)
if len(leading_whitespace) > 1:
error(filename, linenum, 'whitespace/todo', 2,
'Too many spaces before TODO')
username = match.group(2)
if not username:
error(filename, linenum, 'readability/todo', 2,
'Missing username in TODO; it should look like '
'"// TODO(my_username): Stuff."')
middle_whitespace = match.group(3)
# Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
if middle_whitespace != ' ' and middle_whitespace != '':
error(filename, linenum, 'whitespace/todo', 2,
'TODO(my_username) should be followed by a space')
def CheckAccess(filename, clean_lines, linenum, nesting_state, error):
"""Checks for improper use of DISALLOW* macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|'
r'DISALLOW_EVIL_CONSTRUCTORS|'
r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line)
if not matched:
return
if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo):
if nesting_state.stack[-1].access != 'private':
error(filename, linenum, 'readability/constructors', 3,
'%s must be in the private: section' % matched.group(1))
else:
# Found DISALLOW* macro outside a class declaration, or perhaps it
# was used inside a function when it should have been part of the
# class declaration. We could issue a warning here, but it
# probably resulted in a compiler error already.
pass
def FindNextMatchingAngleBracket(clean_lines, linenum, init_suffix):
"""Find the corresponding > to close a template.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: Current line number.
init_suffix: Remainder of the current line after the initial <.
Returns:
True if a matching bracket exists.
"""
line = init_suffix
nesting_stack = ['<']
while True:
# Find the next operator that can tell us whether < is used as an
# opening bracket or as a less-than operator. We only want to
# warn on the latter case.
#
# We could also check all other operators and terminate the search
# early, e.g. if we got something like this "a<b+c", the "<" is
# most likely a less-than operator, but then we will get false
# positives for default arguments and other template expressions.
match = Search(r'^[^<>(),;\[\]]*([<>(),;\[\]])(.*)$', line)
if match:
# Found an operator, update nesting stack
operator = match.group(1)
line = match.group(2)
if nesting_stack[-1] == '<':
# Expecting closing angle bracket
if operator in ('<', '(', '['):
nesting_stack.append(operator)
elif operator == '>':
nesting_stack.pop()
if not nesting_stack:
# Found matching angle bracket
return True
elif operator == ',':
# Got a comma after a bracket, this is most likely a template
# argument. We have not seen a closing angle bracket yet, but
# it's probably a few lines later if we look for it, so just
# return early here.
return True
else:
# Got some other operator.
return False
else:
# Expecting closing parenthesis or closing bracket
if operator in ('<', '(', '['):
nesting_stack.append(operator)
elif operator in (')', ']'):
# We don't bother checking for matching () or []. If we got
# something like (] or [), it would have been a syntax error.
nesting_stack.pop()
else:
# Scan the next line
linenum += 1
if linenum >= len(clean_lines.elided):
break
line = clean_lines.elided[linenum]
# Exhausted all remaining lines and still no matching angle bracket.
# Most likely the input was incomplete, otherwise we should have
# seen a semicolon and returned early.
return True
def FindPreviousMatchingAngleBracket(clean_lines, linenum, init_prefix):
"""Find the corresponding < that started a template.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: Current line number.
init_prefix: Part of the current line before the initial >.
Returns:
True if a matching bracket exists.
"""
line = init_prefix
nesting_stack = ['>']
while True:
# Find the previous operator
match = Search(r'^(.*)([<>(),;\[\]])[^<>(),;\[\]]*$', line)
if match:
# Found an operator, update nesting stack
operator = match.group(2)
line = match.group(1)
if nesting_stack[-1] == '>':
# Expecting opening angle bracket
if operator in ('>', ')', ']'):
nesting_stack.append(operator)
elif operator == '<':
nesting_stack.pop()
if not nesting_stack:
# Found matching angle bracket
return True
elif operator == ',':
# Got a comma before a bracket, this is most likely a
# template argument. The opening angle bracket is probably
# there if we look for it, so just return early here.
return True
else:
# Got some other operator.
return False
else:
# Expecting opening parenthesis or opening bracket
if operator in ('>', ')', ']'):
nesting_stack.append(operator)
elif operator in ('(', '['):
nesting_stack.pop()
else:
# Scan the previous line
linenum -= 1
if linenum < 0:
break
line = clean_lines.elided[linenum]
# Exhausted all earlier lines and still no matching angle bracket.
return False
def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
"""Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't add a blank line
after public/protected/private, don't have too many blank lines in a row.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw = clean_lines.lines_without_raw_strings
line = raw[linenum]
# Before nixing comments, check if the line is blank for no good
# reason. This includes the first line after a block is opened, and
# blank lines at the end of a function (ie, right before a line like '}'
#
# Skip all the blank line checks if we are immediately inside a
# namespace body. In other words, don't issue blank line warnings
# for this block:
# namespace {
#
# }
#
# A warning about missing end of namespace comments will be issued instead.
if IsBlankLine(line) and not nesting_state.InNamespaceBody():
elided = clean_lines.elided
prev_line = elided[linenum - 1]
prevbrace = prev_line.rfind('{')
# TODO(unknown): Don't complain if line before blank line, and line after,
# both start with alnums and are indented the same amount.
# This ignores whitespace at the start of a namespace block
# because those are not usually indented.
if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
# OK, we have a blank line at the start of a code block. Before we
# complain, we check if it is an exception to the rule: The previous
# non-empty line has the parameters of a function header that are indented
# 4 spaces (because they did not fit in a 80 column line when placed on
# the same line as the function name). We also check for the case where
# the previous line is indented 6 spaces, which may happen when the
# initializers of a constructor do not fit into a 80 column line.
exception = False
if Match(r' {6}\w', prev_line): # Initializer list?
# We are looking for the opening column of initializer list, which
# should be indented 4 spaces to cause 6 space indentation afterwards.
search_position = linenum-2
while (search_position >= 0
and Match(r' {6}\w', elided[search_position])):
search_position -= 1
exception = (search_position >= 0
and elided[search_position][:5] == ' :')
else:
# Search for the function arguments or an initializer list. We use a
# simple heuristic here: If the line is indented 4 spaces; and we have a
# closing paren, without the opening paren, followed by an opening brace
# or colon (for initializer lists) we assume that it is the last line of
# a function header. If we have a colon indented 4 spaces, it is an
# initializer list.
exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
prev_line)
or Match(r' {4}:', prev_line))
if not exception:
error(filename, linenum, 'whitespace/blank_line', 2,
'Redundant blank line at the start of a code block '
'should be deleted.')
# Ignore blank lines at the end of a block in a long if-else
# chain, like this:
# if (condition1) {
# // Something followed by a blank line
#
# } else if (condition2) {
# // Something else
# }
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
if (next_line
and Match(r'\s*}', next_line)
and next_line.find('} else ') == -1):
error(filename, linenum, 'whitespace/blank_line', 3,
'Redundant blank line at the end of a code block '
'should be deleted.')
matched = Match(r'\s*(public|protected|private):', prev_line)
if matched:
error(filename, linenum, 'whitespace/blank_line', 3,
'Do not leave a blank line after "%s:"' % matched.group(1))
# Next, we complain if there's a comment too near the text
commentpos = line.find('//')
if commentpos != -1:
# Check if the // may be in quotes. If so, ignore it
# Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
if (line.count('"', 0, commentpos) -
line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes
# Allow one space for new scopes, two spaces otherwise:
if (not Match(r'^\s*{ //', line) and
((commentpos >= 1 and
line[commentpos-1] not in string.whitespace) or
(commentpos >= 2 and
line[commentpos-2] not in string.whitespace))):
error(filename, linenum, 'whitespace/comments', 2,
'At least two spaces is best between code and comments')
# There should always be a space between the // and the comment
commentend = commentpos + 2
if commentend < len(line) and not line[commentend] == ' ':
# but some lines are exceptions -- e.g. if they're big
# comment delimiters like:
# //----------------------------------------------------------
# or are an empty C++ style Doxygen comment, like:
# ///
# or C++ style Doxygen comments placed after the variable:
# ///< Header comment
# //!< Header comment
# or they begin with multiple slashes followed by a space:
# //////// Header comment
match = (Search(r'[=/-]{4,}\s*$', line[commentend:]) or
Search(r'^/$', line[commentend:]) or
Search(r'^!< ', line[commentend:]) or
Search(r'^/< ', line[commentend:]) or
Search(r'^/+ ', line[commentend:]))
if not match:
error(filename, linenum, 'whitespace/comments', 4,
'Should have a space between // and comment')
CheckComment(line[commentpos:], filename, linenum, error)
line = clean_lines.elided[linenum] # get rid of comments and strings
# Don't try to do spacing checks for operator methods
line = re.sub(r'operator(==|!=|<|<<|<=|>=|>>|>)\(', 'operator\(', line)
# We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
# Otherwise not. Note we only check for non-spaces on *both* sides;
# sometimes people put non-spaces on one side when aligning ='s among
# many lines (not that this is behavior that I approve of...)
if Search(r'[\w.]=[\w.]', line) and not Search(r'\b(if|while) ', line):
error(filename, linenum, 'whitespace/operators', 4,
'Missing spaces around =')
# It's ok not to have spaces around binary operators like + - * /, but if
# there's too little whitespace, we get concerned. It's hard to tell,
# though, so we punt on this one for now. TODO.
# You should always have whitespace around binary operators.
#
# Check <= and >= first to avoid false positives with < and >, then
# check non-include lines for spacing around < and >.
match = Search(r'[^<>=!\s](==|!=|<=|>=)[^<>=!\s]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
# We allow no-spaces around << when used like this: 10<<20, but
# not otherwise (particularly, not when used as streams)
# Also ignore using ns::operator<<;
match = Search(r'(operator|\S)(?:L|UL|ULL|l|ul|ull)?<<(\S)', line)
if (match and
not (match.group(1).isdigit() and match.group(2).isdigit()) and
not (match.group(1) == 'operator' and match.group(2) == ';')):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <<')
elif not Match(r'#.*include', line):
# Avoid false positives on ->
reduced_line = line.replace('->', '')
# Look for < that is not surrounded by spaces. This is only
# triggered if both sides are missing spaces, even though
# technically should should flag if at least one side is missing a
# space. This is done to avoid some false positives with shifts.
match = Search(r'[^\s<]<([^\s=<].*)', reduced_line)
if (match and
not FindNextMatchingAngleBracket(clean_lines, linenum, match.group(1))):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <')
# Look for > that is not surrounded by spaces. Similar to the
# above, we only trigger if both sides are missing spaces to avoid
# false positives with shifts.
match = Search(r'^(.*[^\s>])>[^\s=>]', reduced_line)
if (match and
not FindPreviousMatchingAngleBracket(clean_lines, linenum,
match.group(1))):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >')
# We allow no-spaces around >> for almost anything. This is because
# C++11 allows ">>" to close nested templates, which accounts for
# most cases when ">>" is not followed by a space.
#
# We still warn on ">>" followed by alpha character, because that is
# likely due to ">>" being used for right shifts, e.g.:
# value >> alpha
#
# When ">>" is used to close templates, the alphanumeric letter that
# follows would be part of an identifier, and there should still be
# a space separating the template type and the identifier.
# type<type<type>> alpha
match = Search(r'>>[a-zA-Z_]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >>')
# There shouldn't be space around unary operators
match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
if match:
error(filename, linenum, 'whitespace/operators', 4,
'Extra space for operator %s' % match.group(1))
# A pet peeve of mine: no spaces after an if, while, switch, or for
match = Search(r' (if\(|for\(|while\(|switch\()', line)
if match:
error(filename, linenum, 'whitespace/parens', 5,
'Missing space before ( in %s' % match.group(1))
# For if/for/while/switch, the left and right parens should be
# consistent about how many spaces are inside the parens, and
# there should either be zero or one spaces inside the parens.
# We don't want: "if ( foo)" or "if ( foo )".
# Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
match = Search(r'\b(if|for|while|switch)\s*'
r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
line)
if match:
if len(match.group(2)) != len(match.group(4)):
if not (match.group(3) == ';' and
len(match.group(2)) == 1 + len(match.group(4)) or
not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
error(filename, linenum, 'whitespace/parens', 5,
'Mismatching spaces inside () in %s' % match.group(1))
if len(match.group(2)) not in [0, 1]:
error(filename, linenum, 'whitespace/parens', 5,
'Should have zero or one spaces inside ( and ) in %s' %
match.group(1))
# You should always have a space after a comma (either as fn arg or operator)
#
# This does not apply when the non-space character following the
# comma is another comma, since the only time when that happens is
# for empty macro arguments.
#
# We run this check in two passes: first pass on elided lines to
# verify that lines contain missing whitespaces, second pass on raw
# lines to confirm that those missing whitespaces are not due to
# elided comments.
if Search(r',[^,\s]', line) and Search(r',[^,\s]', raw[linenum]):
error(filename, linenum, 'whitespace/comma', 3,
'Missing space after ,')
# You should always have a space after a semicolon
# except for few corner cases
# TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
# space after ;
if Search(r';[^\s};\\)/]', line):
error(filename, linenum, 'whitespace/semicolon', 3,
'Missing space after ;')
# Next we will look for issues with function calls.
CheckSpacingForFunctionCall(filename, line, linenum, error)
# Except after an opening paren, or after another opening brace (in case of
# an initializer list, for instance), you should have spaces before your
# braces. And since you should never have braces at the beginning of a line,
# this is an easy test.
match = Match(r'^(.*[^ ({]){', line)
if match:
# Try a bit harder to check for brace initialization. This
# happens in one of the following forms:
# Constructor() : initializer_list_{} { ... }
# Constructor{}.MemberFunction()
# Type variable{};
# FunctionCall(type{}, ...);
# LastArgument(..., type{});
# LOG(INFO) << type{} << " ...";
# map_of_type[{...}] = ...;
#
# We check for the character following the closing brace, and
# silence the warning if it's one of those listed above, i.e.
# "{.;,)<]".
#
# To account for nested initializer list, we allow any number of
# closing braces up to "{;,)<". We can't simply silence the
# warning on first sight of closing brace, because that would
# cause false negatives for things that are not initializer lists.
# Silence this: But not this:
# Outer{ if (...) {
# Inner{...} if (...){ // Missing space before {
# }; }
#
# There is a false negative with this approach if people inserted
# spurious semicolons, e.g. "if (cond){};", but we will catch the
# spurious semicolon with a separate check.
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
trailing_text = ''
if endpos > -1:
trailing_text = endline[endpos:]
for offset in xrange(endlinenum + 1,
min(endlinenum + 3, clean_lines.NumLines() - 1)):
trailing_text += clean_lines.elided[offset]
if not Match(r'^[\s}]*[{.;,)<\]]', trailing_text):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before {')
# Make sure '} else {' has spaces.
if Search(r'}else', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before else')
# You shouldn't have spaces before your brackets, except maybe after
# 'delete []' or 'new char * []'.
if Search(r'\w\s+\[', line) and not Search(r'delete\s+\[', line):
error(filename, linenum, 'whitespace/braces', 5,
'Extra space before [')
# You shouldn't have a space before a semicolon at the end of the line.
# There's a special case for "for" since the style guide allows space before
# the semicolon there.
if Search(r':\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use {} instead.')
elif Search(r'^\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
'use {} instead.')
elif (Search(r'\s+;\s*$', line) and
not Search(r'\bfor\b', line)):
error(filename, linenum, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
'statement, use {} instead.')
# In range-based for, we wanted spaces before and after the colon, but
# not around "::" tokens that might appear.
if (Search('for *\(.*[^:]:[^: ]', line) or
Search('for *\(.*[^: ]:[^:]', line)):
error(filename, linenum, 'whitespace/forcolon', 2,
'Missing space around colon in range-based for loop')
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
"""Checks for additional blank line issues related to sections.
Currently the only thing checked here is blank line before protected/private.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
class_info: A _ClassInfo objects.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Skip checks if the class is small, where small means 25 lines or less.
# 25 lines seems like a good cutoff since that's the usual height of
# terminals, and any class that can't fit in one screen can't really
# be considered "small".
#
# Also skip checks if we are on the first line. This accounts for
# classes that look like
# class Foo { public: ... };
#
# If we didn't find the end of the class, last_line would be zero,
# and the check will be skipped by the first condition.
if (class_info.last_line - class_info.starting_linenum <= 24 or
linenum <= class_info.starting_linenum):
return
matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
if matched:
# Issue warning if the line before public/protected/private was
# not a blank line, but don't do this if the previous line contains
# "class" or "struct". This can happen two ways:
# - We are at the beginning of the class.
# - We are forward-declaring an inner class that is semantically
# private, but needed to be public for implementation reasons.
# Also ignores cases where the previous line ends with a backslash as can be
# common when defining classes in C macros.
prev_line = clean_lines.lines[linenum - 1]
if (not IsBlankLine(prev_line) and
not Search(r'\b(class|struct)\b', prev_line) and
not Search(r'\\$', prev_line)):
# Try a bit harder to find the beginning of the class. This is to
# account for multi-line base-specifier lists, e.g.:
# class Derived
# : public Base {
end_class_head = class_info.starting_linenum
for i in range(class_info.starting_linenum, linenum):
if Search(r'\{\s*$', clean_lines.lines[i]):
end_class_head = i
break
if end_class_head < linenum - 1:
error(filename, linenum, 'whitespace/blank_line', 3,
'"%s:" should be preceded by a blank line' % matched.group(1))
def GetPreviousNonBlankLine(clean_lines, linenum):
"""Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
linenum: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line.
"""
prevlinenum = linenum - 1
while prevlinenum >= 0:
prevline = clean_lines.elided[prevlinenum]
if not IsBlankLine(prevline): # if not a blank line...
return (prevline, prevlinenum)
prevlinenum -= 1
return ('', -1)
def CheckBraces(filename, clean_lines, linenum, error):
"""Looks for misplaced braces (e.g. at the end of line).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
if Match(r'\s*{\s*$', line):
# We allow an open brace to start a line in the case where someone is using
# braces in a block to explicitly create a new scope, which is commonly used
# to control the lifetime of stack-allocated variables. Braces are also
# used for brace initializers inside function calls. We don't detect this
# perfectly: we just don't complain if the last non-whitespace character on
# the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
# previous line starts a preprocessor block.
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if (not Search(r'[,;:}{(]\s*$', prevline) and
not Match(r'\s*#', prevline)):
error(filename, linenum, 'whitespace/braces', 4,
'{ should almost always be at the end of the previous line')
# An else clause should be on the same line as the preceding closing brace.
if Match(r'\s*else\s*', line):
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if Match(r'\s*}\s*$', prevline):
error(filename, linenum, 'whitespace/newline', 4,
'An else should appear on the same line as the preceding }')
# If braces come on one side of an else, they should be on both.
# However, we have to worry about "else if" that spans multiple lines!
if Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
if Search(r'}\s*else if([^{]*)$', line): # could be multi-line if
# find the ( after the if
pos = line.find('else if')
pos = line.find('(', pos)
if pos > 0:
(endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
if endline[endpos:].find('{') == -1: # must be brace after if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
else: # common case: else not followed by a multi-line if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
# Likewise, an else should never have the else clause on the same line
if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
error(filename, linenum, 'whitespace/newline', 4,
'Else clause should never be on same line as else (use 2 lines)')
# In the same way, a do/while should never be on one line
if Match(r'\s*do [^\s{]', line):
error(filename, linenum, 'whitespace/newline', 4,
'do/while clauses should not be on a single line')
# Block bodies should not be followed by a semicolon. Due to C++11
# brace initialization, there are more places where semicolons are
# required than not, so we use a whitelist approach to check these
# rather than a blacklist. These are the places where "};" should
# be replaced by just "}":
# 1. Some flavor of block following closing parenthesis:
# for (;;) {};
# while (...) {};
# switch (...) {};
# Function(...) {};
# if (...) {};
# if (...) else if (...) {};
#
# 2. else block:
# if (...) else {};
#
# 3. const member function:
# Function(...) const {};
#
# 4. Block following some statement:
# x = 42;
# {};
#
# 5. Block at the beginning of a function:
# Function(...) {
# {};
# }
#
# Note that naively checking for the preceding "{" will also match
# braces inside multi-dimensional arrays, but this is fine since
# that expression will not contain semicolons.
#
# 6. Block following another block:
# while (true) {}
# {};
#
# 7. End of namespaces:
# namespace {};
#
# These semicolons seems far more common than other kinds of
# redundant semicolons, possibly due to people converting classes
# to namespaces. For now we do not warn for this case.
#
# Try matching case 1 first.
match = Match(r'^(.*\)\s*)\{', line)
if match:
# Matched closing parenthesis (case 1). Check the token before the
# matching opening parenthesis, and don't warn if it looks like a
# macro. This avoids these false positives:
# - macro that defines a base class
# - multi-line macro that defines a base class
# - macro that defines the whole class-head
#
# But we still issue warnings for macros that we know are safe to
# warn, specifically:
# - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
# - TYPED_TEST
# - INTERFACE_DEF
# - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
#
# We implement a whitelist of safe macros instead of a blacklist of
# unsafe macros, even though the latter appears less frequently in
# google code and would have been easier to implement. This is because
# the downside for getting the whitelist wrong means some extra
# semicolons, while the downside for getting the blacklist wrong
# would result in compile errors.
#
# In addition to macros, we also don't want to warn on compound
# literals.
closing_brace_pos = match.group(1).rfind(')')
opening_parenthesis = ReverseCloseExpression(
clean_lines, linenum, closing_brace_pos)
if opening_parenthesis[2] > -1:
line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
macro = Search(r'\b([A-Z_]+)\s*$', line_prefix)
if ((macro and
macro.group(1) not in (
'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
Search(r'\s+=\s*$', line_prefix)):
match = None
else:
# Try matching cases 2-3.
match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
if not match:
# Try matching cases 4-6. These are always matched on separate lines.
#
# Note that we can't simply concatenate the previous line to the
# current line and do a single match, otherwise we may output
# duplicate warnings for the blank line case:
# if (cond) {
# // blank line
# }
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if prevline and Search(r'[;{}]\s*$', prevline):
match = Match(r'^(\s*)\{', line)
# Check matching closing brace
if match:
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
# Current {} pair is eligible for semicolon check, and we have found
# the redundant semicolon, output warning here.
#
# Note: because we are scanning forward for opening braces, and
# outputting warnings for the matching closing brace, if there are
# nested blocks with trailing semicolons, we will get the error
# messages in reversed order.
error(filename, endlinenum, 'readability/braces', 4,
"You don't need a ; after a }")
def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
"""Look for empty loop/conditional body with only a single semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Search for loop keywords at the beginning of the line. Because only
# whitespaces are allowed before the keywords, this will also ignore most
# do-while-loops, since those lines should start with closing brace.
#
# We also check "if" blocks here, since an empty conditional block
# is likely an error.
line = clean_lines.elided[linenum]
matched = Match(r'\s*(for|while|if)\s*\(', line)
if matched:
# Find the end of the conditional expression
(end_line, end_linenum, end_pos) = CloseExpression(
clean_lines, linenum, line.find('('))
# Output warning if what follows the condition expression is a semicolon.
# No warning for all other cases, including whitespace or newline, since we
# have a separate check for semicolons preceded by whitespace.
if end_pos >= 0 and Match(r';', end_line[end_pos:]):
if matched.group(1) == 'if':
error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
'Empty conditional bodies should use {}')
else:
error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
'Empty loop bodies should use {} or continue')
def CheckCheck(filename, clean_lines, linenum, error):
"""Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Decide the set of replacement macros that should be suggested
lines = clean_lines.elided
check_macro = None
start_pos = -1
for macro in _CHECK_MACROS:
i = lines[linenum].find(macro)
if i >= 0:
check_macro = macro
# Find opening parenthesis. Do a regular expression match here
# to make sure that we are matching the expected CHECK macro, as
# opposed to some other macro that happens to contain the CHECK
# substring.
matched = Match(r'^(.*\b' + check_macro + r'\s*)\(', lines[linenum])
if not matched:
continue
start_pos = len(matched.group(1))
break
if not check_macro or start_pos < 0:
# Don't waste time here if line doesn't contain 'CHECK' or 'EXPECT'
return
# Find end of the boolean expression by matching parentheses
(last_line, end_line, end_pos) = CloseExpression(
clean_lines, linenum, start_pos)
if end_pos < 0:
return
if linenum == end_line:
expression = lines[linenum][start_pos + 1:end_pos - 1]
else:
expression = lines[linenum][start_pos + 1:]
for i in xrange(linenum + 1, end_line):
expression += lines[i]
expression += last_line[0:end_pos - 1]
# Parse expression so that we can take parentheses into account.
# This avoids false positives for inputs like "CHECK((a < 4) == b)",
# which is not replaceable by CHECK_LE.
lhs = ''
rhs = ''
operator = None
while expression:
matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
r'==|!=|>=|>|<=|<|\()(.*)$', expression)
if matched:
token = matched.group(1)
if token == '(':
# Parenthesized operand
expression = matched.group(2)
(end, _) = FindEndOfExpressionInLine(expression, 0, 1, '(', ')')
if end < 0:
return # Unmatched parenthesis
lhs += '(' + expression[0:end]
expression = expression[end:]
elif token in ('&&', '||'):
# Logical and/or operators. This means the expression
# contains more than one term, for example:
# CHECK(42 < a && a < b);
#
# These are not replaceable with CHECK_LE, so bail out early.
return
elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
# Non-relational operator
lhs += token
expression = matched.group(2)
else:
# Relational operator
operator = token
rhs = matched.group(2)
break
else:
# Unparenthesized operand. Instead of appending to lhs one character
# at a time, we do another regular expression match to consume several
# characters at once if possible. Trivial benchmark shows that this
# is more efficient when the operands are longer than a single
# character, which is generally the case.
matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
if not matched:
matched = Match(r'^(\s*\S)(.*)$', expression)
if not matched:
break
lhs += matched.group(1)
expression = matched.group(2)
# Only apply checks if we got all parts of the boolean expression
if not (lhs and operator and rhs):
return
# Check that rhs do not contain logical operators. We already know
# that lhs is fine since the loop above parses out && and ||.
if rhs.find('&&') > -1 or rhs.find('||') > -1:
return
# At least one of the operands must be a constant literal. This is
# to avoid suggesting replacements for unprintable things like
# CHECK(variable != iterator)
#
# The following pattern matches decimal, hex integers, strings, and
# characters (in that order).
lhs = lhs.strip()
rhs = rhs.strip()
match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
if Match(match_constant, lhs) or Match(match_constant, rhs):
# Note: since we know both lhs and rhs, we can provide a more
# descriptive error message like:
# Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
# Instead of:
# Consider using CHECK_EQ instead of CHECK(a == b)
#
# We are still keeping the less descriptive message because if lhs
# or rhs gets long, the error message might become unreadable.
error(filename, linenum, 'readability/check', 2,
'Consider using %s instead of %s(a %s b)' % (
_CHECK_REPLACEMENT[check_macro][operator],
check_macro, operator))
def CheckAltTokens(filename, clean_lines, linenum, error):
"""Check alternative keywords being used in boolean expressions.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Avoid preprocessor lines
if Match(r'^\s*#', line):
return
# Last ditch effort to avoid multi-line comments. This will not help
# if the comment started before the current line or ended after the
# current line, but it catches most of the false positives. At least,
# it provides a way to workaround this warning for people who use
# multi-line comments in preprocessor macros.
#
# TODO(unknown): remove this once cpplint has better support for
# multi-line comments.
if line.find('/*') >= 0 or line.find('*/') >= 0:
return
for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
error(filename, linenum, 'readability/alt_tokens', 2,
'Use operator %s instead of %s' % (
_ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
def GetLineWidth(line):
"""Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters.
"""
if isinstance(line, unicode):
width = 0
for uc in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(uc) in ('W', 'F'):
width += 2
elif not unicodedata.combining(uc):
width += 1
return width
else:
return len(line)
def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw_lines = clean_lines.lines_without_raw_strings
line = raw_lines[linenum]
if line.find('\t') != -1:
error(filename, linenum, 'whitespace/tab', 1,
'Tab found; better to use spaces')
# One or three blank spaces at the beginning of the line is weird; it's
# hard to reconcile that with 2-space indents.
# NOTE: here are the conditions rob pike used for his tests. Mine aren't
# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
# if(RLENGTH > 20) complain = 0;
# if(match($0, " +(error|private|public|protected):")) complain = 0;
# if(match(prev, "&& *$")) complain = 0;
# if(match(prev, "\\|\\| *$")) complain = 0;
# if(match(prev, "[\",=><] *$")) complain = 0;
# if(match($0, " <<")) complain = 0;
# if(match(prev, " +for \\(")) complain = 0;
# if(prevodd && match(prevprev, " +for \\(")) complain = 0;
initial_spaces = 0
cleansed_line = clean_lines.elided[linenum]
while initial_spaces < len(line) and line[initial_spaces] == ' ':
initial_spaces += 1
if line and line[-1].isspace():
error(filename, linenum, 'whitespace/end_of_line', 4,
'Line ends in whitespace. Consider deleting these extra spaces.')
# There are certain situations we allow one space, notably for section labels
elif ((initial_spaces == 1 or initial_spaces == 3) and
not Match(r'\s*\w+\s*:\s*$', cleansed_line)):
error(filename, linenum, 'whitespace/indent', 3,
'Weird number of spaces at line-start. '
'Are you using a 2-space indent?')
# Check if the line is a header guard.
is_header_guard = False
if file_extension == 'h':
cppvar = GetHeaderGuardCPPVariable(filename)
if (line.startswith('#ifndef %s' % cppvar) or
line.startswith('#define %s' % cppvar) or
line.startswith('#endif // %s' % cppvar)):
is_header_guard = True
# #include lines and header guards can be long, since there's no clean way to
# split them.
#
# URLs can be long too. It's possible to split these, but it makes them
# harder to cut&paste.
#
# The "$Id:...$" comment may also get very long without it being the
# developers fault.
if (not line.startswith('#include') and not is_header_guard and
not Match(r'^\s*//.*http(s?)://\S*$', line) and
not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
line_width = GetLineWidth(line)
extended_length = int((_line_length * 1.25))
if line_width > extended_length:
error(filename, linenum, 'whitespace/line_length', 4,
'Lines should very rarely be longer than %i characters' %
extended_length)
elif line_width > _line_length:
error(filename, linenum, 'whitespace/line_length', 2,
'Lines should be <= %i characters long' % _line_length)
if (cleansed_line.count(';') > 1 and
# for loops are allowed two ;'s (and may run over two lines).
cleansed_line.find('for') == -1 and
(GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
# It's ok to have many commands in a switch case that fits in 1 line
not ((cleansed_line.find('case ') != -1 or
cleansed_line.find('default:') != -1) and
cleansed_line.find('break;') != -1)):
error(filename, linenum, 'whitespace/newline', 0,
'More than one command on the same line')
# Some more style checks
CheckBraces(filename, clean_lines, linenum, error)
CheckEmptyBlockBody(filename, clean_lines, linenum, error)
CheckAccess(filename, clean_lines, linenum, nesting_state, error)
CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckCheck(filename, clean_lines, linenum, error)
CheckAltTokens(filename, clean_lines, linenum, error)
classinfo = nesting_state.InnermostClass()
if classinfo:
CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
_RE_PATTERN_INCLUDE_NEW_STYLE = re.compile(r'#include +"[^/]+\.h"')
_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
# Matches the first component of a filename delimited by -s and _s. That is:
# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
def _DropCommonSuffixes(filename):
"""Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
"""
for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
'inl.h', 'impl.h', 'internal.h'):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0]
def _IsTestFilename(filename):
"""Determines if the given filename has a suffix that identifies it as a test.
Args:
filename: The input filename.
Returns:
True if 'filename' looks like a test, False otherwise.
"""
if (filename.endswith('_test.cc') or
filename.endswith('_unittest.cc') or
filename.endswith('_regtest.cc')):
return True
else:
return False
def _ClassifyInclude(fileinfo, include, is_system):
"""Figures out what kind of header 'include' is.
Args:
fileinfo: The current file cpplint is running over. A FileInfo instance.
include: The path to a #included file.
is_system: True if the #include used <> rather than "".
Returns:
One of the _XXX_HEADER constants.
For example:
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
_C_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
_CPP_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
_LIKELY_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
... 'bar/foo_other_ext.h', False)
_POSSIBLE_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
_OTHER_HEADER
"""
# This is a list of all standard c++ header files, except
# those already checked for above.
is_cpp_h = include in _CPP_HEADERS
if is_system:
if is_cpp_h:
return _CPP_SYS_HEADER
else:
return _C_SYS_HEADER
# If the target file and the include we're checking share a
# basename when we drop common extensions, and the include
# lives in . , then it's likely to be owned by the target file.
target_dir, target_base = (
os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
if target_base == include_base and (
include_dir == target_dir or
include_dir == os.path.normpath(target_dir + '/../public')):
return _LIKELY_MY_HEADER
# If the target and include share some initial basename
# component, it's possible the target is implementing the
# include, so it's allowed to be first, but we'll never
# complain if it's not there.
target_first_component = _RE_FIRST_COMPONENT.match(target_base)
include_first_component = _RE_FIRST_COMPONENT.match(include_base)
if (target_first_component and include_first_component and
target_first_component.group(0) ==
include_first_component.group(0)):
return _POSSIBLE_MY_HEADER
return _OTHER_HEADER
def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
"""Check rules that are applicable to #include lines.
Strings on #include lines are NOT removed from elided line, to make
certain tasks easier. However, to prevent false positives, checks
applicable to #include lines in CheckLanguage must be put here.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
fileinfo = FileInfo(filename)
line = clean_lines.lines[linenum]
# "include" should use the new style "foo/bar.h" instead of just "bar.h"
if _RE_PATTERN_INCLUDE_NEW_STYLE.search(line):
error(filename, linenum, 'build/include_dir', 4,
'Include the directory when naming .h files')
# we shouldn't include a file more than once. actually, there are a
# handful of instances where doing so is okay, but in general it's
# not.
match = _RE_PATTERN_INCLUDE.search(line)
if match:
include = match.group(2)
is_system = (match.group(1) == '<')
if include in include_state:
error(filename, linenum, 'build/include', 4,
'"%s" already included at %s:%s' %
(include, filename, include_state[include]))
else:
include_state[include] = linenum
# We want to ensure that headers appear in the right order:
# 1) for foo.cc, foo.h (preferred location)
# 2) c system files
# 3) cpp system files
# 4) for foo.cc, foo.h (deprecated location)
# 5) other google headers
#
# We classify each include statement as one of those 5 types
# using a number of techniques. The include_state object keeps
# track of the highest type seen, and complains if we see a
# lower type after that.
error_message = include_state.CheckNextIncludeOrder(
_ClassifyInclude(fileinfo, include, is_system))
if error_message:
error(filename, linenum, 'build/include_order', 4,
'%s. Should be: %s.h, c system, c++ system, other.' %
(error_message, fileinfo.BaseName()))
canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
if not include_state.IsInAlphabeticalOrder(
clean_lines, linenum, canonical_include):
error(filename, linenum, 'build/include_alpha', 4,
'Include "%s" not in alphabetical order' % include)
include_state.SetLastHeader(canonical_include)
# Look for any of the stream classes that are part of standard C++.
match = _RE_PATTERN_INCLUDE.match(line)
if match:
include = match.group(2)
if Match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include):
# Many unit tests use cout, so we exempt them.
if not _IsTestFilename(filename):
error(filename, linenum, 'readability/streams', 3,
'Streams are highly discouraged.')
def _GetTextInside(text, start_pattern):
r"""Retrieves all the text between matching open and close parentheses.
Given a string of lines and a regular expression string, retrieve all the text
following the expression and between opening punctuation symbols like
(, [, or {, and the matching close-punctuation symbol. This properly nested
occurrences of the punctuations, so for the text like
printf(a(), b(c()));
a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
start_pattern must match string having an open punctuation symbol at the end.
Args:
text: The lines to extract text. Its comments and strings must be elided.
It can be single line and can span multiple lines.
start_pattern: The regexp string indicating where to start extracting
the text.
Returns:
The extracted text.
None if either the opening string or ending punctuation could not be found.
"""
# TODO(sugawarayu): Audit cpplint.py to see what places could be profitably
# rewritten to use _GetTextInside (and use inferior regexp matching today).
# Give opening punctuations to get the matching close-punctuations.
matching_punctuation = {'(': ')', '{': '}', '[': ']'}
closing_punctuation = set(matching_punctuation.itervalues())
# Find the position to start extracting text.
match = re.search(start_pattern, text, re.M)
if not match: # start_pattern not found in text.
return None
start_position = match.end(0)
assert start_position > 0, (
'start_pattern must ends with an opening punctuation.')
assert text[start_position - 1] in matching_punctuation, (
'start_pattern must ends with an opening punctuation.')
# Stack of closing punctuations we expect to have in text after position.
punctuation_stack = [matching_punctuation[text[start_position - 1]]]
position = start_position
while punctuation_stack and position < len(text):
if text[position] == punctuation_stack[-1]:
punctuation_stack.pop()
elif text[position] in closing_punctuation:
# A closing punctuation without matching opening punctuations.
return None
elif text[position] in matching_punctuation:
punctuation_stack.append(matching_punctuation[text[position]])
position += 1
if punctuation_stack:
# Opening punctuations left without matching close-punctuations.
return None
# punctuations match.
return text[start_position:position - 1]
# Patterns for matching call-by-reference parameters.
#
# Supports nested templates up to 2 levels deep using this messy pattern:
# < (?: < (?: < [^<>]*
# >
# | [^<>] )*
# >
# | [^<>] )*
# >
_RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]*
_RE_PATTERN_TYPE = (
r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?'
r'(?:\w|'
r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'
r'::)+')
# A call-by-reference parameter ends with '& identifier'.
_RE_PATTERN_REF_PARAM = re.compile(
r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*'
r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]')
# A call-by-const-reference parameter either ends with 'const& identifier'
# or looks like 'const type& identifier' when 'type' is atomic.
_RE_PATTERN_CONST_REF_PARAM = (
r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
def CheckLanguage(filename, clean_lines, linenum, file_extension,
include_state, nesting_state, error):
"""Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# If the line is empty or consists of entirely a comment, no need to
# check it.
line = clean_lines.elided[linenum]
if not line:
return
match = _RE_PATTERN_INCLUDE.search(line)
if match:
CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
return
# Reset include state across preprocessor directives. This is meant
# to silence warnings for conditional includes.
if Match(r'^\s*#\s*(?:ifdef|elif|else|endif)\b', line):
include_state.ResetSection()
# Make Windows paths like Unix.
fullname = os.path.abspath(filename).replace('\\', '/')
# TODO(unknown): figure out if they're using default arguments in fn proto.
# Check to see if they're using an conversion function cast.
# I just try to capture the most common basic types, though there are more.
# Parameterless conversion functions, such as bool(), are allowed as they are
# probably a member operator declaration or default constructor.
match = Search(
r'(\bnew\s+)?\b' # Grab 'new' operator, if it's there
r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
r'(\([^)].*)', line)
if match:
matched_new = match.group(1)
matched_type = match.group(2)
matched_funcptr = match.group(3)
# gMock methods are defined using some variant of MOCK_METHODx(name, type)
# where type may be float(), int(string), etc. Without context they are
# virtually indistinguishable from int(x) casts. Likewise, gMock's
# MockCallback takes a template parameter of the form return_type(arg_type),
# which looks much like the cast we're trying to detect.
#
# std::function<> wrapper has a similar problem.
#
# Return types for function pointers also look like casts if they
# don't have an extra space.
if (matched_new is None and # If new operator, then this isn't a cast
not (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
Search(r'\bMockCallback<.*>', line) or
Search(r'\bstd::function<.*>', line)) and
not (matched_funcptr and
Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
matched_funcptr))):
# Try a bit harder to catch gmock lines: the only place where
# something looks like an old-style cast is where we declare the
# return type of the mocked method, and the only time when we
# are missing context is if MOCK_METHOD was split across
# multiple lines. The missing MOCK_METHOD is usually one or two
# lines back, so scan back one or two lines.
#
# It's not possible for gmock macros to appear in the first 2
# lines, since the class head + section name takes up 2 lines.
if (linenum < 2 or
not (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
clean_lines.elided[linenum - 1]) or
Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
clean_lines.elided[linenum - 2]))):
error(filename, linenum, 'readability/casting', 4,
'Using deprecated casting style. '
'Use static_cast<%s>(...) instead' %
matched_type)
CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'static_cast',
r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
# This doesn't catch all cases. Consider (const char * const)"hello".
#
# (char *) "foo" should always be a const_cast (reinterpret_cast won't
# compile).
if CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'const_cast', r'\((char\s?\*+\s?)\)\s*"', error):
pass
else:
# Check pointer casts for other than string constants
CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error)
# In addition, we look for people taking the address of a cast. This
# is dangerous -- casts can assign to temporaries, so the pointer doesn't
# point where you think.
match = Search(
r'(?:&\(([^)]+)\)[\w(])|'
r'(?:&(static|dynamic|down|reinterpret)_cast\b)', line)
if match and match.group(1) != '*':
error(filename, linenum, 'runtime/casting', 4,
('Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'))
# Create an extended_line, which is the concatenation of the current and
# next lines, for more effective checking of code that may span more than one
# line.
if linenum + 1 < clean_lines.NumLines():
extended_line = line + clean_lines.elided[linenum + 1]
else:
extended_line = line
# Check for people declaring static/global STL strings at the top level.
# This is dangerous because the C++ language does not guarantee that
# globals with constructors are initialized before the first access.
match = Match(
r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
line)
# Make sure it's not a function.
# Function template specialization looks like: "string foo<Type>(...".
# Class template definitions look like: "string Foo<Type>::Method(...".
#
# Also ignore things that look like operators. These are matched separately
# because operator names cross non-word boundaries. If we change the pattern
# above, we would decrease the accuracy of matching identifiers.
if (match and
not Search(r'\boperator\W', line) and
not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)?\s*\(([^"]|$)', match.group(3))):
error(filename, linenum, 'runtime/string', 4,
'For a static/global string constant, use a C style string instead: '
'"%schar %s[]".' %
(match.group(1), match.group(2)))
if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
error(filename, linenum, 'runtime/init', 4,
'You seem to be initializing a member variable with itself.')
if file_extension == 'h':
# TODO(unknown): check that 1-arg constructors are explicit.
# How to tell it's a constructor?
# (handled in CheckForNonStandardConstructs for now)
# TODO(unknown): check that classes have DISALLOW_EVIL_CONSTRUCTORS
# (level 1 error)
pass
# Check if people are using the verboten C basic types. The only exception
# we regularly allow is "unsigned short port" for port.
if Search(r'\bshort port\b', line):
if not Search(r'\bunsigned short port\b', line):
error(filename, linenum, 'runtime/int', 4,
'Use "unsigned short" for ports, not "short"')
else:
match = Search(r'\b(short|long(?! +double)|long long)\b', line)
if match:
error(filename, linenum, 'runtime/int', 4,
'Use int16/int64/etc, rather than the C type %s' % match.group(1))
# When snprintf is used, the second argument shouldn't be a literal.
match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
if match and match.group(2) != '0':
# If 2nd arg is zero, snprintf is used to calculate size.
error(filename, linenum, 'runtime/printf', 3,
'If you can, use sizeof(%s) instead of %s as the 2nd arg '
'to snprintf.' % (match.group(1), match.group(2)))
# Check if some verboten C functions are being used.
if Search(r'\bsprintf\b', line):
error(filename, linenum, 'runtime/printf', 5,
'Never use sprintf. Use snprintf instead.')
match = Search(r'\b(strcpy|strcat)\b', line)
if match:
error(filename, linenum, 'runtime/printf', 4,
'Almost always, snprintf is better than %s' % match.group(1))
# Check if some verboten operator overloading is going on
# TODO(unknown): catch out-of-line unary operator&:
# class X {};
# int operator&(const X& x) { return 42; } // unary operator&
# The trick is it's hard to tell apart from binary operator&:
# class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
if Search(r'\boperator\s*&\s*\(\s*\)', line):
error(filename, linenum, 'runtime/operator', 4,
'Unary operator& is dangerous. Do not use it.')
# Check for suspicious usage of "if" like
# } if (a == b) {
if Search(r'\}\s*if\s*\(', line):
error(filename, linenum, 'readability/braces', 4,
'Did you mean "else if"? If not, start a new line for "if".')
# Check for potential format string bugs like printf(foo).
# We constrain the pattern not to pick things like DocidForPrintf(foo).
# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
# TODO(sugawarayu): Catch the following case. Need to change the calling
# convention of the whole function to process multiple line to handle it.
# printf(
# boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
if printf_args:
match = Match(r'([\w.\->()]+)$', printf_args)
if match and match.group(1) != '__VA_ARGS__':
function_name = re.search(r'\b((?:string)?printf)\s*\(',
line, re.I).group(1)
error(filename, linenum, 'runtime/printf', 4,
'Potential format string bug. Do %s("%%s", %s) instead.'
% (function_name, match.group(1)))
# Check for potential memset bugs like memset(buf, sizeof(buf), 0).
match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
error(filename, linenum, 'runtime/memset', 4,
'Did you mean "memset(%s, 0, %s)"?'
% (match.group(1), match.group(2)))
if Search(r'\busing namespace\b', line):
error(filename, linenum, 'build/namespaces', 5,
'Do not use namespace using-directives. '
'Use using-declarations instead.')
# Detect variable-length arrays.
match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
match.group(3).find(']') == -1):
# Split the size using space and arithmetic operators as delimiters.
# If any of the resulting tokens are not compile time constants then
# report the error.
tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
is_const = True
skip_next = False
for tok in tokens:
if skip_next:
skip_next = False
continue
if Search(r'sizeof\(.+\)', tok): continue
if Search(r'arraysize\(\w+\)', tok): continue
tok = tok.lstrip('(')
tok = tok.rstrip(')')
if not tok: continue
if Match(r'\d+', tok): continue
if Match(r'0[xX][0-9a-fA-F]+', tok): continue
if Match(r'k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
# A catch all for tricky sizeof cases, including 'sizeof expression',
# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
# requires skipping the next token because we split on ' ' and '*'.
if tok.startswith('sizeof'):
skip_next = True
continue
is_const = False
break
if not is_const:
error(filename, linenum, 'runtime/arrays', 1,
'Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size.")
# If DISALLOW_EVIL_CONSTRUCTORS, DISALLOW_COPY_AND_ASSIGN, or
# DISALLOW_IMPLICIT_CONSTRUCTORS is present, then it should be the last thing
# in the class declaration.
match = Match(
(r'\s*'
r'(DISALLOW_(EVIL_CONSTRUCTORS|COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))'
r'\(.*\);$'),
line)
if match and linenum + 1 < clean_lines.NumLines():
next_line = clean_lines.elided[linenum + 1]
# We allow some, but not all, declarations of variables to be present
# in the statement that defines the class. The [\w\*,\s]* fragment of
# the regular expression below allows users to declare instances of
# the class or pointers to instances, but not less common types such
# as function pointers or arrays. It's a tradeoff between allowing
# reasonable code and avoiding trying to parse more C++ using regexps.
if not Search(r'^\s*}[\w\*,\s]*;', next_line):
error(filename, linenum, 'readability/constructors', 3,
match.group(1) + ' should be the last thing in the class')
# Check for use of unnamed namespaces in header files. Registration
# macros are typically OK, so we allow use of "namespace {" on lines
# that end with backslashes.
if (file_extension == 'h'
and Search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
error(filename, linenum, 'build/namespaces', 4,
'Do not use unnamed namespaces in header files. See '
'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.')
def CheckForNonConstReference(filename, clean_lines, linenum,
nesting_state, error):
"""Check for non-const references.
Separate from CheckLanguage since it scans backwards from current
line, instead of scanning forward.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Do nothing if there is no '&' on current line.
line = clean_lines.elided[linenum]
if '&' not in line:
return
# Long type names may be broken across multiple lines, usually in one
# of these forms:
# LongType
# ::LongTypeContinued &identifier
# LongType::
# LongTypeContinued &identifier
# LongType<
# ...>::LongTypeContinued &identifier
#
# If we detected a type split across two lines, join the previous
# line to current line so that we can match const references
# accordingly.
#
# Note that this only scans back one line, since scanning back
# arbitrary number of lines would be expensive. If you have a type
# that spans more than 2 lines, please use a typedef.
if linenum > 1:
previous = None
if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
# previous_line\n + ::current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
clean_lines.elided[linenum - 1])
elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
# previous_line::\n + current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
clean_lines.elided[linenum - 1])
if previous:
line = previous.group(1) + line.lstrip()
else:
# Check for templated parameter that is split across multiple lines
endpos = line.rfind('>')
if endpos > -1:
(_, startline, startpos) = ReverseCloseExpression(
clean_lines, linenum, endpos)
if startpos > -1 and startline < linenum:
# Found the matching < on an earlier line, collect all
# pieces up to current line.
line = ''
for i in xrange(startline, linenum + 1):
line += clean_lines.elided[i].strip()
# Check for non-const references in function parameters. A single '&' may
# found in the following places:
# inside expression: binary & for bitwise AND
# inside expression: unary & for taking the address of something
# inside declarators: reference parameter
# We will exclude the first two cases by checking that we are not inside a
# function body, including one that was just introduced by a trailing '{'.
# TODO(unknwon): Doesn't account for preprocessor directives.
# TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].
check_params = False
if not nesting_state.stack:
check_params = True # top level
elif (isinstance(nesting_state.stack[-1], _ClassInfo) or
isinstance(nesting_state.stack[-1], _NamespaceInfo)):
check_params = True # within class or namespace
elif Match(r'.*{\s*$', line):
if (len(nesting_state.stack) == 1 or
isinstance(nesting_state.stack[-2], _ClassInfo) or
isinstance(nesting_state.stack[-2], _NamespaceInfo)):
check_params = True # just opened global/class/namespace block
# We allow non-const references in a few standard places, like functions
# called "swap()" or iostream operators like "<<" or ">>". Do not check
# those function parameters.
#
# We also accept & in static_assert, which looks like a function but
# it's actually a declaration expression.
whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
r'operator\s*[<>][<>]|'
r'static_assert|COMPILE_ASSERT'
r')\s*\(')
if Search(whitelisted_functions, line):
check_params = False
elif not Search(r'\S+\([^)]*$', line):
# Don't see a whitelisted function on this line. Actually we
# didn't see any function name on this line, so this is likely a
# multi-line parameter list. Try a bit harder to catch this case.
for i in xrange(2):
if (linenum > i and
Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])):
check_params = False
break
if check_params:
decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body
for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
if not Match(_RE_PATTERN_CONST_REF_PARAM, parameter):
error(filename, linenum, 'runtime/references', 2,
'Is this a non-const reference? '
'If so, make const or use a pointer: ' +
ReplaceAll(' *<', '<', parameter))
def CheckCStyleCast(filename, linenum, line, raw_line, cast_type, pattern,
error):
"""Checks for a C-style cast by looking for the pattern.
Args:
filename: The name of the current file.
linenum: The number of the line to check.
line: The line of code to check.
raw_line: The raw line of code to check, with comments.
cast_type: The string for the C++ cast to recommend. This is either
reinterpret_cast, static_cast, or const_cast, depending.
pattern: The regular expression used to find C-style casts.
error: The function to call with any errors found.
Returns:
True if an error was emitted.
False otherwise.
"""
match = Search(pattern, line)
if not match:
return False
# Exclude lines with sizeof, since sizeof looks like a cast.
sizeof_match = Match(r'.*sizeof\s*$', line[0:match.start(1) - 1])
if sizeof_match:
return False
# operator++(int) and operator--(int)
if (line[0:match.start(1) - 1].endswith(' operator++') or
line[0:match.start(1) - 1].endswith(' operator--')):
return False
# A single unnamed argument for a function tends to look like old
# style cast. If we see those, don't issue warnings for deprecated
# casts, instead issue warnings for unnamed arguments where
# appropriate.
#
# These are things that we want warnings for, since the style guide
# explicitly require all parameters to be named:
# Function(int);
# Function(int) {
# ConstMember(int) const;
# ConstMember(int) const {
# ExceptionMember(int) throw (...);
# ExceptionMember(int) throw (...) {
# PureVirtual(int) = 0;
#
# These are functions of some sort, where the compiler would be fine
# if they had named parameters, but people often omit those
# identifiers to reduce clutter:
# (FunctionPointer)(int);
# (FunctionPointer)(int) = value;
# Function((function_pointer_arg)(int))
# <TemplateArgument(int)>;
# <(FunctionPointerTemplateArgument)(int)>;
remainder = line[match.end(0):]
if Match(r'^\s*(?:;|const\b|throw\b|=|>|\{|\))', remainder):
# Looks like an unnamed parameter.
# Don't warn on any kind of template arguments.
if Match(r'^\s*>', remainder):
return False
# Don't warn on assignments to function pointers, but keep warnings for
# unnamed parameters to pure virtual functions. Note that this pattern
# will also pass on assignments of "0" to function pointers, but the
# preferred values for those would be "nullptr" or "NULL".
matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder)
if matched_zero and matched_zero.group(1) != '0':
return False
# Don't warn on function pointer declarations. For this we need
# to check what came before the "(type)" string.
if Match(r'.*\)\s*$', line[0:match.start(0)]):
return False
# Don't warn if the parameter is named with block comments, e.g.:
# Function(int /*unused_param*/);
if '/*' in raw_line:
return False
# Passed all filters, issue warning here.
error(filename, linenum, 'readability/function', 3,
'All parameters should be named in a function')
return True
# At this point, all that should be left is actual casts.
error(filename, linenum, 'readability/casting', 4,
'Using C-style cast. Use %s<%s>(...) instead' %
(cast_type, match.group(1)))
return True
_HEADERS_CONTAINING_TEMPLATES = (
('<deque>', ('deque',)),
('<functional>', ('unary_function', 'binary_function',
'plus', 'minus', 'multiplies', 'divides', 'modulus',
'negate',
'equal_to', 'not_equal_to', 'greater', 'less',
'greater_equal', 'less_equal',
'logical_and', 'logical_or', 'logical_not',
'unary_negate', 'not1', 'binary_negate', 'not2',
'bind1st', 'bind2nd',
'pointer_to_unary_function',
'pointer_to_binary_function',
'ptr_fun',
'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
'mem_fun_ref_t',
'const_mem_fun_t', 'const_mem_fun1_t',
'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
'mem_fun_ref',
)),
('<limits>', ('numeric_limits',)),
('<list>', ('list',)),
('<map>', ('map', 'multimap',)),
('<memory>', ('allocator',)),
('<queue>', ('queue', 'priority_queue',)),
('<set>', ('set', 'multiset',)),
('<stack>', ('stack',)),
('<string>', ('char_traits', 'basic_string',)),
('<utility>', ('pair',)),
('<vector>', ('vector',)),
# gcc extensions.
# Note: std::hash is their hash, ::hash is our hash
('<hash_map>', ('hash_map', 'hash_multimap',)),
('<hash_set>', ('hash_set', 'hash_multiset',)),
('<slist>', ('slist',)),
)
_RE_PATTERN_STRING = re.compile(r'\bstring\b')
_re_pattern_algorithm_header = []
for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap',
'transform'):
# Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
# type::max().
_re_pattern_algorithm_header.append(
(re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
_template,
'<algorithm>'))
_re_pattern_templates = []
for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
for _template in _templates:
_re_pattern_templates.append(
(re.compile(r'(\<|\b)' + _template + r'\s*\<'),
_template + '<>',
_header))
def FilesBelongToSameModule(filename_cc, filename_h):
"""Check if these two filenames belong to the same module.
The concept of a 'module' here is a as follows:
foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
same 'module' if they are in the same directory.
some/path/public/xyzzy and some/path/internal/xyzzy are also considered
to belong to the same module here.
If the filename_cc contains a longer path than the filename_h, for example,
'/absolute/path/to/base/sysinfo.cc', and this file would include
'base/sysinfo.h', this function also produces the prefix needed to open the
header. This is used by the caller of this function to more robustly open the
header file. We don't have access to the real include paths in this context,
so we need this guesswork here.
Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
according to this implementation. Because of this, this function gives
some false positives. This should be sufficiently rare in practice.
Args:
filename_cc: is the path for the .cc file
filename_h: is the path for the header path
Returns:
Tuple with a bool and a string:
bool: True if filename_cc and filename_h belong to the same module.
string: the additional prefix needed to open the header file.
"""
if not filename_cc.endswith('.cc'):
return (False, '')
filename_cc = filename_cc[:-len('.cc')]
if filename_cc.endswith('_unittest'):
filename_cc = filename_cc[:-len('_unittest')]
elif filename_cc.endswith('_test'):
filename_cc = filename_cc[:-len('_test')]
filename_cc = filename_cc.replace('/public/', '/')
filename_cc = filename_cc.replace('/internal/', '/')
if not filename_h.endswith('.h'):
return (False, '')
filename_h = filename_h[:-len('.h')]
if filename_h.endswith('-inl'):
filename_h = filename_h[:-len('-inl')]
filename_h = filename_h.replace('/public/', '/')
filename_h = filename_h.replace('/internal/', '/')
files_belong_to_same_module = filename_cc.endswith(filename_h)
common_path = ''
if files_belong_to_same_module:
common_path = filename_cc[:-len(filename_h)]
return files_belong_to_same_module, common_path
def UpdateIncludeState(filename, include_state, io=codecs):
"""Fill up the include_state with new includes found from the file.
Args:
filename: the name of the header to read.
include_state: an _IncludeState instance in which the headers are inserted.
io: The io factory to use to read the file. Provided for testability.
Returns:
True if a header was succesfully added. False otherwise.
"""
headerfile = None
try:
headerfile = io.open(filename, 'r', 'utf8', 'replace')
except IOError:
return False
linenum = 0
for line in headerfile:
linenum += 1
clean_line = CleanseComments(line)
match = _RE_PATTERN_INCLUDE.search(clean_line)
if match:
include = match.group(2)
# The value formatting is cute, but not really used right now.
# What matters here is that the key is in include_state.
include_state.setdefault(include, '%s:%d' % (filename, linenum))
return True
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
io=codecs):
"""Reports for missing stl includes.
This function will output warnings to make sure you are including the headers
necessary for the stl containers and functions that you use. We only give one
reason to include a header. For example, if you use both equal_to<> and
less<> in a .h file, only one (the latter in the file) of these will be
reported as a reason to include the <functional>.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
include_state: An _IncludeState instance.
error: The function to call with any errors found.
io: The IO factory to use to read the header file. Provided for unittest
injection.
"""
required = {} # A map of header name to linenumber and the template entity.
# Example of required: { '<functional>': (1219, 'less<>') }
for linenum in xrange(clean_lines.NumLines()):
line = clean_lines.elided[linenum]
if not line or line[0] == '#':
continue
# String is special -- it is a non-templatized type in STL.
matched = _RE_PATTERN_STRING.search(line)
if matched:
# Don't warn about strings in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:matched.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required['<string>'] = (linenum, 'string')
for pattern, template, header in _re_pattern_algorithm_header:
if pattern.search(line):
required[header] = (linenum, template)
# The following function is just a speed up, no semantics are changed.
if not '<' in line: # Reduces the cpu time usage by skipping lines.
continue
for pattern, template, header in _re_pattern_templates:
if pattern.search(line):
required[header] = (linenum, template)
# The policy is that if you #include something in foo.h you don't need to
# include it again in foo.cc. Here, we will look at possible includes.
# Let's copy the include_state so it is only messed up within this function.
include_state = include_state.copy()
# Did we find the header for this file (if any) and succesfully load it?
header_found = False
# Use the absolute path so that matching works properly.
abs_filename = FileInfo(filename).FullName()
# For Emacs's flymake.
# If cpplint is invoked from Emacs's flymake, a temporary file is generated
# by flymake and that file name might end with '_flymake.cc'. In that case,
# restore original file name here so that the corresponding header file can be
# found.
# e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
# instead of 'foo_flymake.h'
abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
# include_state is modified during iteration, so we iterate over a copy of
# the keys.
header_keys = include_state.keys()
for header in header_keys:
(same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
fullpath = common_path + header
if same_module and UpdateIncludeState(fullpath, include_state, io):
header_found = True
# If we can't find the header file for a .cc, assume it's because we don't
# know where to look. In that case we'll give up as we're not sure they
# didn't include it in the .h file.
# TODO(unknown): Do a better job of finding .h files so we are confident that
# not having the .h file means there isn't one.
if filename.endswith('.cc') and not header_found:
return
# All the lines have been processed, report the errors found.
for required_header_unstripped in required:
template = required[required_header_unstripped][1]
if required_header_unstripped.strip('<>"') not in include_state:
error(filename, required[required_header_unstripped][0],
'build/include_what_you_use', 4,
'Add #include ' + required_header_unstripped + ' for ' + template)
_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
"""Check that make_pair's template arguments are deduced.
G++ 4.6 in C++0x mode fails badly if make_pair's template arguments are
specified explicitly, and such use isn't intended in any case.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
if match:
error(filename, linenum, 'build/explicit_make_pair',
4, # 4 = high confidence
'For C++11-compatibility, omit template arguments from make_pair'
' OR use pair directly OR if appropriate, construct a pair directly')
def ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions=[]):
"""Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[line], line, error)
nesting_state.Update(filename, clean_lines, line, error)
if nesting_state.stack and nesting_state.stack[-1].inline_asm != _NO_ASM:
return
CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
CheckLanguage(filename, clean_lines, line, file_extension, include_state,
nesting_state, error)
CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
CheckForNonStandardConstructs(filename, clean_lines, line,
nesting_state, error)
CheckVlogArguments(filename, clean_lines, line, error)
CheckCaffeAlternatives(filename, clean_lines, line, error)
CheckCaffeDataLayerSetUp(filename, clean_lines, line, error)
CheckCaffeRandom(filename, clean_lines, line, error)
CheckPosixThreading(filename, clean_lines, line, error)
CheckInvalidIncrement(filename, clean_lines, line, error)
CheckMakePairUsesDeduction(filename, clean_lines, line, error)
for check_fn in extra_check_functions:
check_fn(filename, clean_lines, line, error)
def ProcessFileData(filename, file_extension, lines, error,
extra_check_functions=[]):
"""Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
lines = (['// marker so line numbers and indices both start at 1'] + lines +
['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState()
nesting_state = _NestingState()
ResetNolintSuppressions()
CheckForCopyright(filename, lines, error)
if file_extension == 'h':
CheckForHeaderGuard(filename, lines, error)
RemoveMultiLineComments(filename, lines, error)
clean_lines = CleansedLines(lines)
for line in xrange(clean_lines.NumLines()):
ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions)
nesting_state.CheckCompletedBlocks(filename, error)
CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
# We check here rather than inside ProcessLine so that we see raw
# lines rather than "cleaned" lines.
CheckForBadCharacters(filename, lines, error)
CheckForNewlineAtEOF(filename, lines, error)
def ProcessFile(filename, vlevel, extra_check_functions=[]):
"""Does google-lint on a single file.
Args:
filename: The name of the file to parse.
vlevel: The level of errors to report. Every error of confidence
>= verbose_level will be reported. 0 is a good default.
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
_SetVerboseLevel(vlevel)
try:
# Support the UNIX convention of using "-" for stdin. Note that
# we are not opening the file with universal newline support
# (which codecs doesn't support anyway), so the resulting lines do
# contain trailing '\r' characters if we are reading a file that
# has CRLF endings.
# If after the split a trailing '\r' is present, it is removed
# below. If it is not expected to be present (i.e. os.linesep !=
# '\r\n' as in Windows), a warning is issued below if this file
# is processed.
if filename == '-':
lines = codecs.StreamReaderWriter(sys.stdin,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace').read().split('\n')
else:
lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
carriage_return_found = False
# Remove trailing '\r'.
for linenum in range(len(lines)):
if lines[linenum].endswith('\r'):
lines[linenum] = lines[linenum].rstrip('\r')
carriage_return_found = True
except IOError:
sys.stderr.write(
"Skipping input '%s': Can't open for reading\n" % filename)
return
# Note, if no dot is found, this will give the entire filename as the ext.
file_extension = filename[filename.rfind('.') + 1:]
# When reading from stdin, the extension is unknown, so no cpplint tests
# should rely on the extension.
if filename != '-' and file_extension not in _valid_extensions:
sys.stderr.write('Ignoring %s; not a valid file name '
'(%s)\n' % (filename, ', '.join(_valid_extensions)))
else:
ProcessFileData(filename, file_extension, lines, Error,
extra_check_functions)
if carriage_return_found and os.linesep != '\r\n':
# Use 0 for linenum since outputting only one error for potentially
# several lines.
Error(filename, 0, 'whitespace/newline', 1,
'One or more unexpected \\r (^M) found;'
'better to use only a \\n')
sys.stderr.write('Done processing %s\n' % filename)
def PrintUsage(message):
"""Prints a brief usage string and exits, optionally with an error message.
Args:
message: The optional error message.
"""
sys.stderr.write(_USAGE)
if message:
sys.exit('\nFATAL ERROR: ' + message)
else:
sys.exit(1)
def PrintCategories():
"""Prints a list of all the error-categories used by error messages.
These are the categories used to filter messages via --filter.
"""
sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
sys.exit(0)
def ParseArguments(args):
"""Parses the command line arguments.
This may set the output format and verbosity level as side-effects.
Args:
args: The command line arguments:
Returns:
The list of filenames to lint.
"""
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
'counting=',
'filter=',
'root=',
'linelength=',
'extensions='])
except getopt.GetoptError:
PrintUsage('Invalid arguments.')
verbosity = _VerboseLevel()
output_format = _OutputFormat()
filters = ''
counting_style = ''
for (opt, val) in opts:
if opt == '--help':
PrintUsage(None)
elif opt == '--output':
if val not in ('emacs', 'vs7', 'eclipse'):
PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.')
output_format = val
elif opt == '--verbose':
verbosity = int(val)
elif opt == '--filter':
filters = val
if not filters:
PrintCategories()
elif opt == '--counting':
if val not in ('total', 'toplevel', 'detailed'):
PrintUsage('Valid counting options are total, toplevel, and detailed')
counting_style = val
elif opt == '--root':
global _root
_root = val
elif opt == '--linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
PrintUsage('Line length must be digits.')
elif opt == '--extensions':
global _valid_extensions
try:
_valid_extensions = set(val.split(','))
except ValueError:
PrintUsage('Extensions must be comma seperated list.')
if not filenames:
PrintUsage('No files were specified.')
_SetOutputFormat(output_format)
_SetVerboseLevel(verbosity)
_SetFilters(filters)
_SetCountingStyle(counting_style)
return filenames
def main():
filenames = ParseArguments(sys.argv[1:])
# Change stderr to write with replacement characters so we don't die
# if we try to print something containing non-ASCII characters.
sys.stderr = codecs.StreamReaderWriter(sys.stderr,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace')
_cpplint_state.ResetErrorCounts()
for filename in filenames:
ProcessFile(filename, _cpplint_state.verbose_level)
_cpplint_state.PrintErrorCounts()
sys.exit(_cpplint_state.error_count > 0)
if __name__ == '__main__':
main()
|
colorization-master
|
caffe-colorization/scripts/cpp_lint.py
|
#!/usr/bin/env python
import os
import sys
import time
import yaml
import urllib
import hashlib
import argparse
required_keys = ['caffemodel', 'caffemodel_url', 'sha1']
def reporthook(count, block_size, total_size):
"""
From http://blog.moleculea.com/2012/10/04/urlretrieve-progres-indicator/
"""
global start_time
if count == 0:
start_time = time.time()
return
duration = (time.time() - start_time) or 0.01
progress_size = int(count * block_size)
speed = int(progress_size / (1024 * duration))
percent = int(count * block_size * 100 / total_size)
sys.stdout.write("\r...%d%%, %d MB, %d KB/s, %d seconds passed" %
(percent, progress_size / (1024 * 1024), speed, duration))
sys.stdout.flush()
def parse_readme_frontmatter(dirname):
readme_filename = os.path.join(dirname, 'readme.md')
with open(readme_filename) as f:
lines = [line.strip() for line in f.readlines()]
top = lines.index('---')
bottom = lines.index('---', top + 1)
frontmatter = yaml.load('\n'.join(lines[top + 1:bottom]))
assert all(key in frontmatter for key in required_keys)
return dirname, frontmatter
def valid_dirname(dirname):
try:
return parse_readme_frontmatter(dirname)
except Exception as e:
print('ERROR: {}'.format(e))
raise argparse.ArgumentTypeError(
'Must be valid Caffe model directory with a correct readme.md')
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Download trained model binary.')
parser.add_argument('dirname', type=valid_dirname)
args = parser.parse_args()
# A tiny hack: the dirname validator also returns readme YAML frontmatter.
dirname = args.dirname[0]
frontmatter = args.dirname[1]
model_filename = os.path.join(dirname, frontmatter['caffemodel'])
# Closure-d function for checking SHA1.
def model_checks_out(filename=model_filename, sha1=frontmatter['sha1']):
with open(filename, 'r') as f:
return hashlib.sha1(f.read()).hexdigest() == sha1
# Check if model exists.
if os.path.exists(model_filename) and model_checks_out():
print("Model already exists.")
sys.exit(0)
# Download and verify model.
urllib.urlretrieve(
frontmatter['caffemodel_url'], model_filename, reporthook)
if not model_checks_out():
print('ERROR: model did not download correctly! Run this again.')
sys.exit(1)
|
colorization-master
|
caffe-colorization/scripts/download_model_binary.py
|
"""
Generate data used in the HDF5DataLayer and GradientBasedSolver tests.
"""
import os
import numpy as np
import h5py
script_dir = os.path.dirname(os.path.abspath(__file__))
# Generate HDF5DataLayer sample_data.h5
num_cols = 8
num_rows = 10
height = 6
width = 5
total_size = num_cols * num_rows * height * width
data = np.arange(total_size)
data = data.reshape(num_rows, num_cols, height, width)
data = data.astype('float32')
# We had a bug where data was copied into label, but the tests weren't
# catching it, so let's make label 1-indexed.
label = 1 + np.arange(num_rows)[:, np.newaxis]
label = label.astype('float32')
# We add an extra label2 dataset to test HDF5 layer's ability
# to handle arbitrary number of output ("top") Blobs.
label2 = label + 1
print data
print label
with h5py.File(script_dir + '/sample_data.h5', 'w') as f:
f['data'] = data
f['label'] = label
f['label2'] = label2
with h5py.File(script_dir + '/sample_data_2_gzip.h5', 'w') as f:
f.create_dataset(
'data', data=data + total_size,
compression='gzip', compression_opts=1
)
f.create_dataset(
'label', data=label,
compression='gzip', compression_opts=1,
dtype='uint8',
)
f.create_dataset(
'label2', data=label2,
compression='gzip', compression_opts=1,
dtype='uint8',
)
with open(script_dir + '/sample_data_list.txt', 'w') as f:
f.write('src/caffe/test/test_data/sample_data.h5\n')
f.write('src/caffe/test/test_data/sample_data_2_gzip.h5\n')
# Generate GradientBasedSolver solver_data.h5
num_cols = 3
num_rows = 8
height = 10
width = 10
data = np.random.randn(num_rows, num_cols, height, width)
data = data.reshape(num_rows, num_cols, height, width)
data = data.astype('float32')
targets = np.random.randn(num_rows, 1)
targets = targets.astype('float32')
print data
print targets
with h5py.File(script_dir + '/solver_data.h5', 'w') as f:
f['data'] = data
f['targets'] = targets
with open(script_dir + '/solver_data_list.txt', 'w') as f:
f.write('src/caffe/test/test_data/solver_data.h5\n')
|
colorization-master
|
caffe-colorization/src/caffe/test/test_data/generate_sample_data.py
|
import sys
import argparse
import caffe
from caffe import score, surgery # score, surgery function from caffe-fcn
import numpy as np
import os
import warnings
print sys.argv
def parse_args():
parser = argparse.ArgumentParser(description='')
# ***** FLAGS *****
parser.add_argument('--gpu', dest='gpu', help='gpu id', type=int, default=0)
parser.add_argument('--phase', dest='phase', help='{0: 0-50k iters, 1: 50-100k, 2: 100-150k}', type=int, default=0)
parser.add_argument('--caffemodel',dest='caffemodel',help='path to caffemodel', type=str, \
default='./models/alexnet_release_450000_nobn_rs.caffemodel') # no strokes
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
for arg in vars(args):
print('[%s] =' % arg, getattr(args, arg))
args = parse_args()
caffe.set_mode_gpu()
caffe.set_device(args.gpu)
EXP_DIR = os.path.join('./feature_learning_tests/segmentation')
weights = args.caffemodel
# make models directory
# os.chdir(EXP_DIR)
if(not os.path.exists(os.path.join(EXP_DIR,'models'))):
print('Making models directory')
os.mkdir(os.path.join(EXP_DIR,'models'))
save_format = os.path.join(EXP_DIR,'out_{}')
if(args.phase==0):
restore = None
solver = caffe.SGDSolver(os.path.join(EXP_DIR,'solver0.prototxt'))
elif(args.phase==1):
restore = os.path.join(EXP_DIR,'models','fcn_iter_50000.solverstate')
solver = caffe.SGDSolver(os.path.join(EXP_DIR,'solver1.prototxt'))
elif(args.phase==2):
restore = os.path.join(EXP_DIR,'models','fcn_iter_100000.solverstate')
solver = caffe.SGDSolver(os.path.join(EXP_DIR,'solver2.prototxt'))
# resume = False
if restore is not None:
solver.restore(restore)
# elif resume:
# solver.net.copy_from(weights)
else:
solver.net.copy_from(weights) # initialize with weights
# add bilinear upsampling weights
interp_layers = [k for k in solver.net.params.keys() if 'up' in k]
surgery.interp(solver.net, interp_layers)
net = solver.net
tnet = solver.test_nets[0]
solver.test_nets[0].share_with(solver.net)
LAYER_SCORE = 'score'
LAYER_LOSS = 'loss'
# warnings.filterwarnings("ignore")
# scoring
val = np.loadtxt(os.path.join(EXP_DIR,'./segvalid11.txt'), dtype=str)
for aa in range(50000):
# if(np.mod(aa,100)==0):
# print 'Running: %i'%aa
if(np.mod(aa,1000)==0):
print 'Evaluating: %i'%aa
score.seg_tests(solver, save_format, val, layer=LAYER_SCORE)
solver.step(1)
|
colorization-master
|
feature_learning_tests/segmentation/solve.py
|
from __future__ import division
import caffe
import numpy as np
def transplant(new_net, net):
for p in net.params:
if p not in new_net.params:
print 'dropping', p
continue
for i in range(len(net.params[p])):
if net.params[p][i].data.shape != new_net.params[p][i].data.shape:
print 'coercing', p, i, 'from', net.params[p][i].data.shape, 'to', new_net.params[p][i].data.shape
else:
print 'copying', p, i
new_net.params[p][i].data.flat = net.params[p][i].data.flat
def expand_score(new_net, new_layer, net, layer):
old_cl = net.params[layer][0].num
new_net.params[new_layer][0].data[:old_cl][...] = net.params[layer][0].data
new_net.params[new_layer][1].data[0,0,0,:old_cl][...] = net.params[layer][1].data
def upsample_filt(size):
factor = (size + 1) // 2
if size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:size, :size]
return (1 - abs(og[0] - center) / factor) * \
(1 - abs(og[1] - center) / factor)
def interp(net, layers):
for l in layers:
m, k, h, w = net.params[l][0].data.shape
if m != k and k != 1:
print 'input + output channels need to be the same or |output| == 1'
raise
if h != w:
print 'filters need to be square'
raise
filt = upsample_filt(h)
net.params[l][0].data[range(m), range(k), :, :] = filt
def upsample_filt2(size1,size2):
size = np.maximum(size1,size2)
factor = (size + 1) // 2
if size1 % 2 == 1:
center1 = factor - 1
else:
center1 = factor - 0.5
if size2 % 2 == 1:
center2 = factor - 1
else:
center2 = factor - 0.5
og = np.ogrid[:size1, :size2]
return (1 - abs(og[0] - center1) / factor) * \
(1 - abs(og[1] - center2) / factor)
def interp2(net, layers):
for l in layers:
m, k, h, w = net.params[l][0].data.shape
if m != k and k != 1:
print 'input + output channels need to be the same or |output| == 1'
raise
filt = upsample_filt2(h,w)
net.params[l][0].data[range(m), range(k), :, :] = filt
|
colorization-master
|
feature_learning_tests/segmentation/caffe/surgery.py
|
from __future__ import division
import caffe
import numpy as np
import os
import sys
from datetime import datetime
from PIL import Image
def fast_hist(a, b, n):
k = (a >= 0) & (a < n)
return np.bincount(n * a[k].astype(int) + b[k], minlength=n**2).reshape(n, n)
def compute_hist(net, save_dir, dataset, layer='score', gt='label'):
n_cl = net.blobs[layer].channels
if save_dir:
os.mkdir(save_dir)
hist = np.zeros((n_cl, n_cl))
loss = 0
for idx in dataset:
net.forward()
hist += fast_hist(net.blobs[gt].data[0, 0].flatten(),
net.blobs[layer].data[0].argmax(0).flatten(),
n_cl)
if save_dir:
im = Image.fromarray(net.blobs[layer].data[0].argmax(0).astype(np.uint8), mode='P')
im.save(os.path.join(save_dir, idx + '.png'))
# compute the loss as well
loss += net.blobs['loss'].data.flat[0]
return hist, loss / len(dataset)
def seg_tests(solver, save_format, dataset, layer='score', gt='label'):
print '>>>', datetime.now(), 'Begin seg tests'
solver.test_nets[0].share_with(solver.net)
do_seg_tests(solver.test_nets[0], solver.iter, save_format, dataset, layer, gt)
def do_seg_tests(net, iter, save_format, dataset, layer='score', gt='label'):
n_cl = net.blobs[layer].channels
if save_format:
save_format = save_format.format(iter)
hist, loss = compute_hist(net, save_format, dataset, layer, gt)
# mean loss
print '>>>', datetime.now(), 'Iteration', iter, 'loss', loss
# overall accuracy
acc = np.diag(hist).sum() / hist.sum()
print '>>>', datetime.now(), 'Iteration', iter, 'overall accuracy', acc
# per-class accuracy
acc = np.diag(hist) / hist.sum(1)
print '>>>', datetime.now(), 'Iteration', iter, 'mean accuracy', np.nanmean(acc)
# per-class IU
iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
print '>>>', datetime.now(), 'Iteration', iter, 'mean IU', np.nanmean(iu)
freq = hist.sum(1) / hist.sum()
print '>>>', datetime.now(), 'Iteration', iter, 'fwavacc', \
(freq[freq > 0] * iu[freq > 0]).sum()
|
colorization-master
|
feature_learning_tests/segmentation/caffe/score.py
|
import caffe
import os
import string
import numpy as np
import argparse
import matplotlib.pyplot as plt
def parse_args():
parser = argparse.ArgumentParser(description='Convert conv layers into FC layers')
parser.add_argument('--gpu', dest='gpu', help='gpu id', type=int, default=0)
parser.add_argument('--prototxt_in',dest='prototxt_in',help='prototxt with conv layers', type=str, default='')
parser.add_argument('--prototxt_out',dest='prototxt_out',help='prototxt with fc layers', type=str, default='')
parser.add_argument('--caffemodel_in',dest='caffemodel_in',help='caffemodel with conv layers', type=str, default='')
parser.add_argument('--caffemodel_out',dest='caffemodel_out',help='caffemodel with fc layers, to be saved', type=str, default='')
parser.add_argument('--dummymodel',dest='dummymodel',help='blank caffemodel',type=str,default='./models/dummy.caffemodel')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
gpu_id = args.gpu
PROTOTXT1_PATH = args.prototxt_in
PROTOTXT2_PATH = args.prototxt_out # no batch norm
MODEL_PATH = args.caffemodel_in
DUMMYMODEL_PATH = args.dummymodel
MODEL2_PATH = args.caffemodel_out # to be saved off
caffe.set_mode_gpu()
caffe.set_device(gpu_id)
net1 = caffe.Net(PROTOTXT1_PATH, MODEL_PATH, caffe.TEST)
net2 = caffe.Net(PROTOTXT2_PATH, DUMMYMODEL_PATH, caffe.TEST)
# commenting out since packages don't exist in the repo
# import rz_fcns as rz
# rz.caffe_param_shapes(net1,to_print=True)
# rz.caffe_param_shapes(net2,to_print=True)
# rz.caffe_shapes(net2,to_print=True)
# CONV_INDS = np.where(np.array([layer.type for layer in net1.layers])=='Convolution')[0]
print net1.params.keys()
print net2.params.keys()
for (ll,layer) in enumerate(net2.params.keys()):
P = len(net2.params[layer]) # number of blobs
if(P>0):
for pp in range(P):
ndim1 = net1.params[layer][pp].data.ndim
ndim2 = net2.params[layer][pp].data.ndim
print('Copying layer %s, param blob %i (%i-dim => %i-dim)'%(layer,pp,ndim1,ndim2))
if(ndim1==ndim2):
print(' Same dimensionality...')
net2.params[layer][pp].data[...] = net1.params[layer][pp].data[...]
else:
print(' Different dimensionality...')
net2.params[layer][pp].data[...] = net1.params[layer][pp].data[...].reshape(net2.params[layer][pp].data[...].shape)
net2.save(MODEL2_PATH)
for arg in vars(args):
print('[%s] =' % arg, getattr(args, arg))
print 'Saving model into: %s'%MODEL2_PATH
|
colorization-master
|
resources/conv_into_fc.py
|
# **************************************
# ***** Richard Zhang / 2016.08.06 *****
# **************************************
import numpy as np
import warnings
import os
import sklearn.neighbors as nn
import caffe
from skimage import color
# ************************
# ***** CAFFE LAYERS *****
# ************************
class BGR2LabLayer(caffe.Layer):
''' Layer converts BGR to Lab
INPUTS
bottom[0].data Nx3xXxY
OUTPUTS
top[0].data Nx3xXxY
'''
def setup(self,bottom, top):
warnings.filterwarnings("ignore")
if(len(bottom)!=1):
raise Exception("Layer should a single input")
if(bottom[0].data.shape[1]!=3):
raise Exception("Input should be 3-channel BGR image")
self.N = bottom[0].data.shape[0]
self.X = bottom[0].data.shape[2]
self.Y = bottom[0].data.shape[3]
def reshape(self, bottom, top):
top[0].reshape(self.N,3,self.X,self.Y)
def forward(self, bottom, top):
top[0].data[...] = color.rgb2lab(bottom[0].data[:,::-1,:,:].astype('uint8').transpose((2,3,0,1))).transpose((2,3,0,1))
def backward(self, top, propagate_down, bottom):
# no back-prop
for i in range(len(bottom)):
if not propagate_down[i]:
continue
# bottom[i].diff[...] = np.zeros_like(bottom[i].data)
class NNEncLayer(caffe.Layer):
''' Layer which encodes ab map into Q colors
INPUTS
bottom[0] Nx2xXxY
OUTPUTS
top[0].data NxQ
'''
def setup(self,bottom, top):
warnings.filterwarnings("ignore")
if len(bottom) == 0:
raise Exception("Layer should have inputs")
self.NN = 10
self.sigma = 5.
self.ENC_DIR = './resources/'
self.nnenc = NNEncode(self.NN,self.sigma,km_filepath=os.path.join(self.ENC_DIR,'pts_in_hull.npy'))
self.N = bottom[0].data.shape[0]
self.X = bottom[0].data.shape[2]
self.Y = bottom[0].data.shape[3]
self.Q = self.nnenc.K
def reshape(self, bottom, top):
top[0].reshape(self.N,self.Q,self.X,self.Y)
def forward(self, bottom, top):
top[0].data[...] = self.nnenc.encode_points_mtx_nd(bottom[0].data[...],axis=1)
def backward(self, top, propagate_down, bottom):
# no back-prop
for i in range(len(bottom)):
if not propagate_down[i]:
continue
bottom[i].diff[...] = np.zeros_like(bottom[i].data)
class PriorBoostLayer(caffe.Layer):
''' Layer boosts ab values based on their rarity
INPUTS
bottom[0] NxQxXxY
OUTPUTS
top[0].data Nx1xXxY
'''
def setup(self,bottom, top):
if len(bottom) == 0:
raise Exception("Layer should have inputs")
self.ENC_DIR = './resources/'
self.gamma = .5
self.alpha = 1.
self.pc = PriorFactor(self.alpha,gamma=self.gamma,priorFile=os.path.join(self.ENC_DIR,'prior_probs.npy'))
self.N = bottom[0].data.shape[0]
self.Q = bottom[0].data.shape[1]
self.X = bottom[0].data.shape[2]
self.Y = bottom[0].data.shape[3]
def reshape(self, bottom, top):
top[0].reshape(self.N,1,self.X,self.Y)
def forward(self, bottom, top):
top[0].data[...] = self.pc.forward(bottom[0].data[...],axis=1)
def backward(self, top, propagate_down, bottom):
# no back-prop
for i in range(len(bottom)):
if not propagate_down[i]:
continue
bottom[i].diff[...] = np.zeros_like(bottom[i].data)
class NonGrayMaskLayer(caffe.Layer):
''' Layer outputs a mask based on if the image is grayscale or not
INPUTS
bottom[0] Nx2xXxY ab values
OUTPUTS
top[0].data Nx1xXxY 1 if image is NOT grayscale
0 if image is grayscale
'''
def setup(self,bottom, top):
if len(bottom) == 0:
raise Exception("Layer should have inputs")
self.thresh = 5 # threshold on ab value
self.N = bottom[0].data.shape[0]
self.X = bottom[0].data.shape[2]
self.Y = bottom[0].data.shape[3]
def reshape(self, bottom, top):
top[0].reshape(self.N,1,self.X,self.Y)
def forward(self, bottom, top):
# if an image has any (a,b) value which exceeds threshold, output 1
top[0].data[...] = (np.sum(np.sum(np.sum(np.abs(bottom[0].data) > self.thresh,axis=1),axis=1),axis=1) > 0)[:,na(),na(),na()]
def backward(self, top, propagate_down, bottom):
# no back-prop
for i in range(len(bottom)):
if not propagate_down[i]:
continue
bottom[i].diff[...] = np.zeros_like(bottom[i].data)
class ClassRebalanceMultLayer(caffe.Layer):
''' INPUTS
bottom[0] NxMxXxY feature map
bottom[1] Nx1xXxY boost coefficients
OUTPUTS
top[0] NxMxXxY on forward, gets copied from bottom[0]
FUNCTIONALITY
On forward pass, top[0] passes bottom[0]
On backward pass, bottom[0] gets boosted by bottom[1]
through pointwise multiplication (with singleton expansion) '''
def setup(self, bottom, top):
# check input pair
if len(bottom)==0:
raise Exception("Specify inputs")
def reshape(self, bottom, top):
i = 0
if(bottom[i].data.ndim==1):
top[i].reshape(bottom[i].data.shape[0])
elif(bottom[i].data.ndim==2):
top[i].reshape(bottom[i].data.shape[0], bottom[i].data.shape[1])
elif(bottom[i].data.ndim==4):
top[i].reshape(bottom[i].data.shape[0], bottom[i].data.shape[1], bottom[i].data.shape[2], bottom[i].data.shape[3])
def forward(self, bottom, top):
# output equation to negative of inputs
top[0].data[...] = bottom[0].data[...]
# top[0].data[...] = bottom[0].data[...]*bottom[1].data[...] # this was bad, would mess up the gradients going up
def backward(self, top, propagate_down, bottom):
for i in range(len(bottom)):
if not propagate_down[i]:
continue
bottom[0].diff[...] = top[0].diff[...]*bottom[1].data[...]
# print 'Back-propagating class rebalance, %i'%i
class LossMeterLayer(caffe.Layer):
''' Layer acts as a "meter" to track loss values '''
def setup(self,bottom,top):
if(len(bottom)==0):
raise Exception("Layer needs inputs")
self.param_str_split = self.param_str.split(' ')
self.LOSS_DIR = self.param_str_split[0]
self.P = int(self.param_str_split[1])
self.H = int(self.param_str_split[2])
if(len(self.param_str_split)==4):
self.prefix = self.param_str_split[3]
else:
self.prefix = ''
self.cnt = 0 # loss track counter
# self.P = 1 # interval to print losses
self.h = 0 # index into history
self.L = len(bottom)
self.losses = np.zeros((self.L,self.H))
self.ITER_PATH = os.path.join(self.LOSS_DIR,'iter.npy')
self.LOG_PATH = os.path.join(self.LOSS_DIR,'loss_log')
if(not os.path.exists(self.LOSS_DIR)):
os.mkdir(self.LOSS_DIR)
if(os.path.exists(self.ITER_PATH)):
self.iter = np.load(self.ITER_PATH)
else:
self.iter = 0 # iteration counter
print 'Initial iteration: %i'%(self.iter+1)
def reshape(self,bottom,top):
0;
def forward(self,bottom,top):
for ll in range(self.L):
self.losses[ll,self.h] = bottom[ll].data[...]
if(np.mod(self.cnt,self.P)==self.P-1): # print
if(self.cnt >= self.H-1):
tmp_str = 'NumAvg %i, Loss '%(self.H)
for ll in range(self.L):
tmp_str += '%.3f, '%np.mean(self.losses[ll,:])
else:
tmp_str = 'NumAvg %i, Loss '%(self.h)
for ll in range(self.L):
tmp_str += '%.3f, '%np.mean(self.losses[ll,:self.cnt+1])
print_str = '%s: Iter %i, %s'%(self.prefix,self.iter+1,tmp_str)
print print_str
self.f = open(self.LOG_PATH,'a')
self.f.write(print_str)
self.f.write('\n')
self.f.close()
np.save(self.ITER_PATH,self.iter)
self.h = np.mod(self.h+1,self.H) # roll through history
self.cnt = self.cnt+1
self.iter = self.iter+1
def backward(self,top,propagate_down,bottom):
for ll in range(self.L):
continue
# ***************************
# ***** SUPPORT CLASSES *****
# ***************************
class PriorFactor():
''' Class handles prior factor '''
def __init__(self,alpha,gamma=0,verbose=True,priorFile=''):
# INPUTS
# alpha integer prior correction factor, 0 to ignore prior, 1 to divide by prior, alpha to divide by prior**alpha
# gamma integer percentage to mix in uniform prior with empirical prior
# priorFile file file which contains prior probabilities across classes
# settings
self.alpha = alpha
self.gamma = gamma
self.verbose = verbose
# empirical prior probability
self.prior_probs = np.load(priorFile)
# define uniform probability
self.uni_probs = np.zeros_like(self.prior_probs)
self.uni_probs[self.prior_probs!=0] = 1.
self.uni_probs = self.uni_probs/np.sum(self.uni_probs)
# convex combination of empirical prior and uniform distribution
self.prior_mix = (1-self.gamma)*self.prior_probs + self.gamma*self.uni_probs
# set prior factor
self.prior_factor = self.prior_mix**-self.alpha
self.prior_factor = self.prior_factor/np.sum(self.prior_probs*self.prior_factor) # re-normalize
# implied empirical prior
self.implied_prior = self.prior_probs*self.prior_factor
self.implied_prior = self.implied_prior/np.sum(self.implied_prior) # re-normalize
if(self.verbose):
self.print_correction_stats()
def print_correction_stats(self):
print 'Prior factor correction:'
print ' (alpha,gamma) = (%.2f, %.2f)'%(self.alpha,self.gamma)
print ' (min,max,mean,med,exp) = (%.2f, %.2f, %.2f, %.2f, %.2f)'%(np.min(self.prior_factor),np.max(self.prior_factor),np.mean(self.prior_factor),np.median(self.prior_factor),np.sum(self.prior_factor*self.prior_probs))
def forward(self,data_ab_quant,axis=1):
data_ab_maxind = np.argmax(data_ab_quant,axis=axis)
corr_factor = self.prior_factor[data_ab_maxind]
if(axis==0):
return corr_factor[na(),:]
elif(axis==1):
return corr_factor[:,na(),:]
elif(axis==2):
return corr_factor[:,:,na(),:]
elif(axis==3):
return corr_factor[:,:,:,na()]
class NNEncode():
''' Encode points using NN search and Gaussian kernel '''
def __init__(self,NN,sigma,km_filepath='',cc=-1):
if(check_value(cc,-1)):
self.cc = np.load(km_filepath)
else:
self.cc = cc
self.K = self.cc.shape[0]
self.NN = int(NN)
self.sigma = sigma
self.nbrs = nn.NearestNeighbors(n_neighbors=NN, algorithm='ball_tree').fit(self.cc)
self.alreadyUsed = False
def encode_points_mtx_nd(self,pts_nd,axis=1,returnSparse=False,sameBlock=True):
pts_flt = flatten_nd_array(pts_nd,axis=axis)
P = pts_flt.shape[0]
if(sameBlock and self.alreadyUsed):
self.pts_enc_flt[...] = 0 # already pre-allocated
else:
self.alreadyUsed = True
self.pts_enc_flt = np.zeros((P,self.K))
self.p_inds = np.arange(0,P,dtype='int')[:,na()]
P = pts_flt.shape[0]
(dists,inds) = self.nbrs.kneighbors(pts_flt)
wts = np.exp(-dists**2/(2*self.sigma**2))
wts = wts/np.sum(wts,axis=1)[:,na()]
self.pts_enc_flt[self.p_inds,inds] = wts
pts_enc_nd = unflatten_2d_array(self.pts_enc_flt,pts_nd,axis=axis)
return pts_enc_nd
def decode_points_mtx_nd(self,pts_enc_nd,axis=1):
pts_enc_flt = flatten_nd_array(pts_enc_nd,axis=axis)
pts_dec_flt = np.dot(pts_enc_flt,self.cc)
pts_dec_nd = unflatten_2d_array(pts_dec_flt,pts_enc_nd,axis=axis)
return pts_dec_nd
def decode_1hot_mtx_nd(self,pts_enc_nd,axis=1,returnEncode=False):
pts_1hot_nd = nd_argmax_1hot(pts_enc_nd,axis=axis)
pts_dec_nd = self.decode_points_mtx_nd(pts_1hot_nd,axis=axis)
if(returnEncode):
return (pts_dec_nd,pts_1hot_nd)
else:
return pts_dec_nd
# *****************************
# ***** Utility functions *****
# *****************************
def check_value(inds, val):
''' Check to see if an array is a single element equaling a particular value
for pre-processing inputs in a function '''
if(np.array(inds).size==1):
if(inds==val):
return True
return False
def na(): # shorthand for new axis
return np.newaxis
def flatten_nd_array(pts_nd,axis=1):
''' Flatten an nd array into a 2d array with a certain axis
INPUTS
pts_nd N0xN1x...xNd array
axis integer
OUTPUTS
pts_flt prod(N \ N_axis) x N_axis array '''
NDIM = pts_nd.ndim
SHP = np.array(pts_nd.shape)
nax = np.setdiff1d(np.arange(0,NDIM),np.array((axis))) # non axis indices
NPTS = np.prod(SHP[nax])
axorder = np.concatenate((nax,np.array(axis).flatten()),axis=0)
pts_flt = pts_nd.transpose((axorder))
pts_flt = pts_flt.reshape(NPTS,SHP[axis])
return pts_flt
def unflatten_2d_array(pts_flt,pts_nd,axis=1,squeeze=False):
''' Unflatten a 2d array with a certain axis
INPUTS
pts_flt prod(N \ N_axis) x M array
pts_nd N0xN1x...xNd array
axis integer
squeeze bool if true, M=1, squeeze it out
OUTPUTS
pts_out N0xN1x...xNd array '''
NDIM = pts_nd.ndim
SHP = np.array(pts_nd.shape)
nax = np.setdiff1d(np.arange(0,NDIM),np.array((axis))) # non axis indices
NPTS = np.prod(SHP[nax])
if(squeeze):
axorder = nax
axorder_rev = np.argsort(axorder)
M = pts_flt.shape[1]
NEW_SHP = SHP[nax].tolist()
pts_out = pts_flt.reshape(NEW_SHP)
pts_out = pts_out.transpose(axorder_rev)
else:
axorder = np.concatenate((nax,np.array(axis).flatten()),axis=0)
axorder_rev = np.argsort(axorder)
M = pts_flt.shape[1]
NEW_SHP = SHP[nax].tolist()
NEW_SHP.append(M)
pts_out = pts_flt.reshape(NEW_SHP)
pts_out = pts_out.transpose(axorder_rev)
return pts_out
|
colorization-master
|
resources/caffe_traininglayers.py
|
# **************************************
# ***** Richard Zhang / 2016.06.04 *****
# **************************************
# Absorb batch norm into convolution layers
# This script only supports the conv-batchnorm configuration
# Currently unsupported:
# - deconv layers
# - fc layers
# - batchnorm before linear layer
import caffe
import os
import string
import numpy as np
import argparse
import matplotlib.pyplot as plt
def parse_args():
parser = argparse.ArgumentParser(description='BatchNorm absorption')
parser.add_argument('--gpu', dest='gpu', help='gpu id', type=int, default=0)
parser.add_argument('--prototxt_in',dest='prototxt_in',help='prototxt with batchnorm', type=str, default='')
parser.add_argument('--prototxt_out',dest='prototxt_out',help='prototxt without batchnorm', type=str, default='')
parser.add_argument('--caffemodel_in',dest='caffemodel_in',help='caffemodel with batchnorm', type=str, default='')
parser.add_argument('--caffemodel_out',dest='caffemodel_out',help='caffemodel without batchnorm, to be saved', type=str, default='')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
gpu_id = args.gpu
PROTOTXT1_PATH = args.prototxt_in
PROTOTXT2_PATH = args.prototxt_out # no batch norm
MODEL_PATH = args.caffemodel_in
MODEL2_PATH = args.caffemodel_out # to be saved off
caffe.set_mode_gpu()
caffe.set_device(gpu_id)
net1 = caffe.Net(PROTOTXT1_PATH, MODEL_PATH, caffe.TEST)
net2 = caffe.Net(PROTOTXT2_PATH, MODEL_PATH, caffe.TEST)
# call forward on net1, net2
net1.forward()
net2.forward()
# identify batch norms and paired linear layers
BN_INDS = np.where(np.array([layer.type for layer in net1.layers])=='BatchNorm')[0]
BN_NAMES = np.zeros(BN_INDS.shape,dtype='S50') # batch norm layer names
LIN_NAMES = np.zeros(BN_INDS.shape,dtype='S50') # linear layer names
PRE_NAMES = np.zeros(BN_INDS.shape,dtype='S50') # blob right before
POST_NAMES = np.zeros(BN_INDS.shape,dtype='S50') # blob right after
PRE_POST = -1+np.zeros(BN_INDS.shape) # 0 - pre, 1 - post
CONV_DECONV = -1+np.zeros(BN_INDS.shape) # 0 - conv, 1 - deconv
# identify layers which are paired with batch norms (only supporting convolution)
for (ll,bn_ind) in enumerate(BN_INDS):
BN_NAMES[ll] = net1._layer_names[bn_ind]
if(net1.layers[bn_ind-1].type=='Convolution' or net1.layers[bn_ind-1].type=='Deconvolution'):
PRE_POST[ll] = 0
LIN_NAMES[ll] = net1._layer_names[bn_ind-1]
POST_NAMES[ll] = net1._layer_names[bn_ind+1]
if(net1.layers[bn_ind-1].type=='Convolution'):
CONV_DECONV[ll] = 0
elif(net1.layers[bn_ind-1].type=='Deconvolution'):
CONV_DECONV[ll] = 1
elif(net1.layers[bn_ind+1].type=='Convolution' or net1.layers[bn_ind+1].type=='Deconvolution'):
PRE_POST[ll] = 1
LIN_NAMES[ll] = net1._layer_names[bn_ind+1]
POST_NAMES[ll] = net1._layer_names[bn_ind+3]
if(net1.layers[bn_ind+1].type=='Convolution'):
CONV_DECONV[ll] = 0
elif(net1.layers[bn_ind+1].type=='Deconvolution'):
CONV_DECONV[ll] = 1
else:
PRE_POST[ll] = -1
PRE_NAMES[ll] = net1.bottom_names[BN_NAMES[ll]][0]
LIN_INDS = BN_INDS+PRE_POST # linear layer indices
ALL_SLOPES = {}
# compute batch norm parameters on net1 in first layer
# absorb into weights in first layer
for ll in range(BN_INDS.size):
bn_ind = BN_INDS[ll]
BN_NAME = BN_NAMES[ll]
PRE_NAME = PRE_NAMES[ll]
POST_NAME = POST_NAMES[ll]
LIN_NAME = LIN_NAMES[ll]
print 'LAYERS %s, %s'%(PRE_NAME,BN_NAME)
# print net1.blobs[BN_NAME].data.shape
# print net1.blobs[PRE_NAME].data.shape
C = net1.blobs[BN_NAME].data.shape[1]
in_blob = net1.blobs[PRE_NAME].data
bn_blob = net1.blobs[BN_NAME].data
scale_factor = 1./net1.params[BN_NAME][2].data[...]
mean = scale_factor * net1.params[BN_NAME][0].data[...]
scale = scale_factor * net1.params[BN_NAME][1].data[...]
slopes = np.sqrt(1./scale)
offs = -mean*slopes
print ' Computing error on data...'
bn_blob_rep = in_blob*slopes[np.newaxis,:,np.newaxis,np.newaxis]+offs[np.newaxis,:,np.newaxis,np.newaxis]
# Visually verify that factors are correct
print ' Maximum error: %.3e'%np.max(np.abs(bn_blob_rep[bn_blob>0] - bn_blob[bn_blob>0]))
print ' RMS error: %.3e'%np.linalg.norm(bn_blob_rep[bn_blob>0] - bn_blob[bn_blob>0])
print ' RMS signal: %.3e'%np.linalg.norm(bn_blob_rep[bn_blob>0])
print ' Absorbing slope and offset...'
# absorb slope and offset into appropriate parameter
if(PRE_POST[ll]==0): # linear layer is before
if(CONV_DECONV[ll]==0): # convolution
net2.params[LIN_NAME][0].data[...] = net1.params[LIN_NAME][0].data[...]*slopes[:,np.newaxis,np.newaxis,np.newaxis]
net2.params[LIN_NAME][1].data[...] = offs + (slopes*net1.params[LIN_NAME][1].data)
elif(CONV_DECONV[ll]==1): # deconvolution
print '*** Deconvolution not implemented ***'
elif(PRE_POST[ll]==1): # batchnorm is BEFORE linear layer
print '*** Not implemented ***'
net2.save(MODEL2_PATH)
for arg in vars(args):
print('[%s] =' % arg, getattr(args, arg))
print 'Saving model into: %s'%MODEL2_PATH
|
colorization-master
|
resources/batch_norm_absorb.py
|
from __future__ import print_function, division
INPUT_LAYERS = ['Data', 'ImageData']
# Layers that only support elwise
ELWISE_LAYERS = ['Deconvolution']
# Layers that support parameters
PARAMETER_LAYERS = ['Convolution', 'InnerProduct']+ELWISE_LAYERS
# All supported layers
SUPPORTED_LAYERS = ['ReLU', 'Sigmoid', 'LRN', 'Pooling', 'Eltwise'] + PARAMETER_LAYERS + INPUT_LAYERS
STRIP_LAYER = ['Softmax', 'SoftmaxWithLoss', 'SigmoidCrossEntropyLoss']
# Use 'Dropout' at your own risk
# Unless Jon merges #2865 , 'Split' cannot be supported
UNSUPPORTED_LAYERS = ['Split', 'BatchNorm', 'Reshape', 'Scale']
def forward(net, i, NIT, data, output_names):
n = net._layer_names[i]
# Create the top data if needed
output = {t: [None]*NIT for t in output_names}
for it in range(NIT):
for b in data:
net.blobs[b].data[...] = data[b][it]
net._forward(i, i)
for t in output_names:
output[t][it] = 1*net.blobs[t].data
return output
def flattenData(data):
import numpy as np
return np.concatenate([d.swapaxes(0, 1).reshape((d.shape[1],-1)) for d in data], axis=1).T
def gatherInputData(net, layer_id, bottom_data, top_name, fast=False, max_data=None):
# This functions gathers all input data.
# In order to not replicate all the internal functionality of convolutions (eg. padding ...)
# we gather the data in the output space and use random gaussian weights. The output of this
# function is W and D, there the input data I = D * W^-1 [with some abuse of tensor notation]
# If we not compute an initialization A for D, we then simply multiply A by W to obtain the
# proper initialization in the input space
import numpy as np
l = net.layers[layer_id]
NIT = len(list(bottom_data.values())[0])
# How many times do we need to over-sample to get a full basis (out of random projections)
OS = int(np.ceil( np.prod(l.blobs[0].data.shape[1:]) / l.blobs[0].data.shape[0] ))
if fast: OS = 1
# If we are over sampling we might run out of memory at some point, especially for filters higher up
# Do avoid any issues we never return more than max_data number of elements
subsample = None
# Note this could cause some memory issues in the FC layers
W, D = [], []
for i in range(OS):
d = l.blobs[0].data
d[...] = np.random.normal(0, 1, d.shape)
W.append(1*d)
# Collect the data and flatten out the convs
data = np.concatenate([i.swapaxes(0, 1).reshape((i.shape[1],-1)).T for i in forward(net, layer_id, NIT, bottom_data, [top_name])[top_name]], axis=0)
# Do we need to subsample the data to save memory?
if subsample is None and max_data is not None:
# Randomly select n data representative samples
N = int(max_data / (data.shape[1]*OS))
subsample = np.arange(data.shape[0])
if N < data.shape[0]:
np.random.shuffle(subsample)
subsample = subsample[:N]
if subsample is not None:
data = data[subsample]
D.append(data)
# In order to handle any sort of groups we want to have the samples packed in the following order:
# a1 a2 a3 a4 b1 b2 b3 b4 c1 ... (where the original data was a b c and OS=4)
W, D = np.concatenate([w[:,None] for w in W], axis=1), np.concatenate([d[:,:,None] for d in D], axis=2)
return W.reshape((-1,)+W.shape[2:]), D.reshape((D.shape[0], -1)+D.shape[3:])
def initializeWeight(D, type, N_OUT):
# Here we first whiten the data (PCA or ZCA) and then optionally run k-means
# on this whitened data.
import numpy as np
if D.shape[0] < N_OUT:
print( " Not enough data for '%s' estimation, using elwise"%type )
return np.random.normal(0, 1, (N_OUT,D.shape[1]))
D = D - np.mean(D, axis=0, keepdims=True)
# PCA, ZCA, K-Means
assert type in ['pca', 'zca', 'kmeans', 'rand'], "Unknown initialization type '%s'"%type
C = D.T.dot(D)
s, V = np.linalg.eigh(C)
# order the eigenvalues
ids = np.argsort(s)[-N_OUT:]
s = s[ids]
V = V[:,ids]
s[s<1e-6] = 0
s[s>=1e-6] = 1. / np.sqrt(s[s>=1e-6]+1e-3)
S = np.diag(s)
if type == 'pca':
return S.dot(V.T)
elif type == 'zca':
return V.dot(S.dot(V.T))
# Whiten the data
wD = D.dot(V.dot(S))
wD /= np.linalg.norm(wD, axis=1)[:,None]
if type == 'kmeans':
# Run k-means
from sklearn.cluster import MiniBatchKMeans
km = MiniBatchKMeans(n_clusters = wD.shape[1], batch_size=10*wD.shape[1]).fit(wD).cluster_centers_
elif type == 'rand':
km = wD[np.random.choice(wD.shape[0], wD.shape[1], False)]
C = km.dot(S.dot(V.T))
C /= np.std(D.dot(C.T), axis=0, keepdims=True).T
return C
def initializeLayer(net, layer_id, bottom_data, top_name, bias=0, type='elwise', max_data=None):
import numpy as np
l = net.layers[layer_id]
NIT = len(list(bottom_data.values())[0])
if type!='elwise' and l.type in ELWISE_LAYERS:
print( "Only 'elwise' supported for layer '%s'. Falling back."%net._layer_names[layer_id] )
type = 'elwise'
for p in l.blobs: p.data[...] = 0
fast = 'fast_' in type
if fast:
type = type.replace('fast_', '')
# Initialize the weights [k-means, ...]
if type == 'elwise':
d = l.blobs[0].data
d[...] = np.random.normal(0, 1, d.shape)
else: # Use the input data
# Are there any groups?
G = 1
bottom_names = net.bottom_names[net._layer_names[layer_id]]
if len(bottom_names) == 1:
N1 = net.blobs[bottom_names[0]].shape[1]
N2 = l.blobs[0].shape[1]
G = N1 // N2
# Gather the input data
print( " Gathering input data")
T, D = gatherInputData(net, layer_id, bottom_data, top_name, fast, max_data=max_data)
# Figure out the output dimensionality of d
d = l.blobs[0].data
print( " Initializing weights" )
# Loop over groups
for g in range(G):
dg, Dg = d[g*(d.shape[0]//G):(g+1)*(d.shape[0]//G)], D[:,g*(D.shape[1]//G):(g+1)*(D.shape[1]//G):]
Tg = T[g*(T.shape[0]//G):(g+1)*(T.shape[0]//G)]
# Compute the weights
W = initializeWeight(Dg, type, N_OUT=dg.shape[0])
# Multiply the weights by the random basis
# NOTE: This matrix multiplication is a bit large, if it's too slow,
# reduce the oversampling in gatherInputData
dg[...] = np.dot(W, Tg.reshape((Tg.shape[0],-1))).reshape(dg.shape)
# Scale the mean and initialize the bias
print( " Scale the mean and initialize the bias" )
top_data = forward(net, layer_id, NIT, bottom_data, [top_name])[top_name]
flat_data = flattenData(top_data)
mu = flat_data.mean(axis=0)
std = flat_data.std(axis=0)
for ii in range(np.minimum(mu.size,5)):
print(" mu+/-std: (%.3f,%.3f)"%(mu[ii],std[ii]))
if l.type == 'Deconvolution':
l.blobs[0].data[...] /= std.reshape((1,-1,)+(1,)*(len(l.blobs[0].data.shape)-2))
else:
l.blobs[0].data[...] /= std.reshape((-1,)+(1,)*(len(l.blobs[0].data.shape)-1))
for b in l.blobs[1:]:
b.data[...] = -mu / std + bias
def magicInitialize(net, bias=0, NIT=10, type='elwise', max_data=None):
import numpy as np
# When was a blob last used
last_used = {}
# Make sure all layers are supported, and compute the last time each blob is used
for i, (n, l) in enumerate(zip(net._layer_names, net.layers)):
if l.type in UNSUPPORTED_LAYERS:
print( "WARNING: Layer type '%s' not supported! Things might go very wrong..."%l.type )
elif l.type not in SUPPORTED_LAYERS+STRIP_LAYER:
print( "Unknown layer type '%s'. double check if it is supported"%l.type )
for b in net.bottom_names[n]:
last_used[b] = i
active_data = {}
# Read all the input data
for i, (n, l) in enumerate(zip(net._layer_names, net.layers)):
# Initialize the layer
if (len(l.blobs) > 0) and (l.type not in UNSUPPORTED_LAYERS):
# if len(l.blobs) > 0:
if np.sum(np.abs(l.blobs[0].data)) <= 1e-10:
print( "Initializing layer '%s'"%n )
assert l.type in PARAMETER_LAYERS, "Unsupported parameter layer"
assert len(net.top_names[n]) == 1, "Exactly one output supported"
# Fill the parameters
initializeLayer(net, i, {b: active_data[b] for b in net.bottom_names[n]}, net.top_names[n][0], bias, type, max_data=max_data)
else:
print( "Skipping layer '%s'"%n )
else:
print( "Skipping layer '%s'"%n )
# TODO: Estimate and rescale the values [TODO: Record and undo this scaling above]
# Run the network forward
new_data = forward(net, i, NIT, {b: active_data[b] for b in net.bottom_names[n]}, net.top_names[n])
active_data.update(new_data)
# Delete all unused data
for k in list(active_data):
if k not in last_used or last_used[k] == i:
del active_data[k]
def load(net, blobs):
for l,n in zip(net.layers, net._layer_names):
if n in blobs:
for b, sb in zip(l.blobs, blobs[n]):
b.data[...] = sb
def save(net):
import numpy as np
r = {}
for l,n in zip(net.layers, net._layer_names):
if len(l.blobs) > 0:
r[n] = [np.copy(b.data) for b in l.blobs]
return r
def estimateHomogenety(net):
# Estimate if a certain layer is homogeneous and if yes return the degree k
# by which the output is scaled (if input is scaled by alpha then the output
# is scaled by alpha^k). Return None if the layer is not homogeneous.
import numpy as np
print("Estimating homogenety")
# When was a blob last used
last_used = {}
# Make sure all layers are supported, and compute the range each blob is used in
for i, (n, l) in enumerate(zip(net._layer_names, net.layers)):
for b in net.bottom_names[n]:
last_used[b] = i
active_data = {}
homogenety = {}
# Read all the input data
for i, (n, l) in enumerate(zip(net._layer_names, net.layers)):
# Run the network forward
new_data1 = forward(net, i, 1, {b: [1*d for d in active_data[b]] for b in net.bottom_names[n]}, net.top_names[n])
new_data2 = forward(net, i, 1, {b: [2*d for d in active_data[b]] for b in net.bottom_names[n]}, net.top_names[n])
active_data.update(new_data1)
if len(new_data1) == 1:
m = list(new_data1.keys())[0]
d1, d2 = flattenData(new_data1[m]), flattenData(new_data2[m])
f = np.mean(np.abs(d1), axis=0) / np.mean(np.abs(d2), axis=0)
if 1e-3*np.mean(f) < np.std(f):
# Not homogeneous
homogenety[n] = None
else:
# Compute the degree of the homogeneous transformation
homogenety[n] = (np.log(np.mean(np.abs(d2))) - np.log(np.mean(np.abs(d1)))) / np.log(2)
else:
homogenety[n] = None
# Delete all unused data
for k in list(active_data):
if k not in last_used or last_used[k] == i:
del active_data[k]
return homogenety
def calibrateGradientRatio(net, NIT=1):
print('Calibrate gradient ratio')
import numpy as np
# When was a blob last used
last_used = {}
# Find the last layer to use
last_layer = 0
for i, (n, l) in enumerate(zip(net._layer_names, net.layers)):
print('\tLayer %s'%n)
if l.type not in STRIP_LAYER:
last_layer = i
for b in net.bottom_names[n]:
last_used[b] = i
# Figure out which tops are involved
last_tops = net.top_names[net._layer_names[last_layer]]
for t in last_tops:
last_used[t] = len(net.layers)
# Call forward and store the data of all data layers
print('Call forward and store the data of all data layers')
active_data, input_data, bottom_scale = {}, {}, {}
# Read all the input data
for i, (n, l) in enumerate(zip(net._layer_names, net.layers)):
print('\tLayer %s'%n)
if i > last_layer: break
# Compute the input scale for parameter layers
if len(l.blobs) > 0:
bottom_scale[n] = np.mean([np.mean(np.abs(active_data[b])) for b in net.bottom_names[n]])
# Run the network forward
new_data = forward(net, i, NIT, {b: active_data[b] for b in net.bottom_names[n]}, net.top_names[n])
if l.type in INPUT_LAYERS:
input_data.update(new_data)
active_data.update(new_data)
# Delete all unused data
for k in list(active_data):
if k not in last_used or last_used[k] == i:
del active_data[k]
output_std = np.mean(np.std(flattenData(active_data[last_tops[0]]), axis=0))
for it in range(10):
# for it in range(1):
print('Iteration %i'%it)
# Reset the diffs
for l in net.layers:
for b in l.blobs:
b.diff[...] = 0
# Set the top diffs
print('Last layer')
print(last_tops)
print(last_layer)
for t in last_tops:
print(t)
net.blobs[t].diff[...] = np.random.normal(0, 1, net.blobs[t].shape)
# Compute all gradients
# print(np.mean(net.blobs[t].diff[...]**2))
# print(np.mean(net.blobs[t].data[...]**2))
net._backward(last_layer, 0)
# # net.backward()
# print(np.mean(net.blobs[t].diff[...]**2))
# print(np.mean(net.blobs[t].data[...]**2))
# print(np.mean(net.blobs['da_conv1'].data[...]**2))
# Compute the gradient ratio
ratio={}
for i, (n, l) in enumerate(zip(net._layer_names, net.layers)):
print('layer index %i, layer name %s'%(i,n))
if len(l.blobs) > 0:
# if (len(l.blobs) > 0) and (l.type in PARAMETER_LAYERS):
assert l.type in PARAMETER_LAYERS, "Parameter layer '%s' currently not supported"%l.type
b = l.blobs[0]
ratio[n] = np.sqrt(np.mean(b.diff**2) / np.mean(b.data**2))
print('Ratio = sqrt(diff/data), %.0f=sqrt(%.3e/%3e)'%(ratio[n],np.mean(b.diff**2),np.mean(b.data**2)))
# print(ratio)
# If all layers are homogeneous, then the target ratio is the geometric mean of all ratios
# (assuming we want the same output)
# To deal with non-homogeneous layers we scale by output_std in the hope to undo correct the
# estimation over time.
# NOTE: for non feed-forward networks the geometric mean might not be the right scaling factor
target_ratio = np.exp(np.mean(np.log(np.array(list(ratio.values()))))) * (output_std)**(1. / len(ratio))
for val in np.array(list(ratio.values())):
print(val)
# np.exp(np.mean(np.log(np.array(list(ratio.values())))))
# (output_std)**(1. / len(ratio))
# print(len(ratio))
print('Num ratios: %i'%len(ratio))
print('Target ratio: %.0f'%target_ratio)
print('Current ratios (mean/std): %.0f+/-%.0f'%(np.mean(np.array(list(ratio.values()))),np.std(np.array(list(ratio.values())))))
# Terminate if the relative change is less than 1% for all values
log_ratio = np.log( np.array(list(ratio.values())) )
print('Max relative change: %.3f'%np.max(np.abs(log_ratio/np.log(target_ratio)-1)))
if np.all( np.abs(log_ratio/np.log(target_ratio) - 1) < 0.01 ):
break
# Update all the weights and biases
active_data = {}
# Read all the input data
for i, (n, l) in enumerate(zip(net._layer_names, net.layers)):
if i > last_layer: break
# Use the stored input
if l.type in INPUT_LAYERS:
active_data.update({b: input_data[b] for b in net.top_names[n]})
else:
if len(l.blobs) > 0:
# if (len(l.blobs) > 0) and (l.type in PARAMETER_LAYERS):
# Add the scaling from the bottom to the biases
current_scale = np.mean([np.mean(np.abs(active_data[b])) for b in net.bottom_names[n]])
adj = current_scale / bottom_scale[n]
for b in list(l.blobs)[1:]:
b.data[...] *= adj
bottom_scale[n] = current_scale
# Scale to obtain the target ratio
scale = np.sqrt(ratio[n] / target_ratio)
for b in l.blobs:
b.data[...] *= scale
active_data.update(forward(net, i, NIT, {b: active_data[b] for b in net.bottom_names[n]}, net.top_names[n]))
# Delete all unused data
for k in list(active_data):
if k not in last_used or last_used[k] == i:
del active_data[k]
new_output_std = np.mean(np.std(flattenData(active_data[last_tops[0]]), axis=0))
print(np.linalg.norm(active_data[last_tops[0]]))
print(last_tops[0])
print(new_output_std)
if np.abs(np.log(output_std) - np.log(new_output_std)) > 0.25:
# If we diverge by a factor of exp(0.25) = ~1.3, then we should check if the network is really
# homogeneous
print( "WARNING: It looks like one or more layers are not homogeneous! Trying to correct for this..." )
print( " Output std = %f" % new_output_std )
output_std = new_output_std
print('')
def netFromString(s, t=None):
import caffe
from tempfile import NamedTemporaryFile
if t is None: t = caffe.TEST
f = NamedTemporaryFile('w')
f.write(s)
f.flush()
r = caffe.Net(f.name, t)
f.close()
return r
def getFileList(f):
from glob import glob
from os import path
return [f for f in glob(f) if path.isfile(f)]
def main():
from argparse import ArgumentParser
from os import path
import numpy as np
parser = ArgumentParser()
parser.add_argument('prototxt')
parser.add_argument('output_caffemodel')
parser.add_argument('-l', '--load', help='Load a pretrained model and rescale it [bias and type are not supported]')
parser.add_argument('-d', '--data', default=None, help='Image list to use [default prototxt data]')
parser.add_argument('-b', '--bias', type=float, default=0.1, help='Bias')
parser.add_argument('-t', '--type', default='elwise', help='Type: elwise, pca, zca, kmeans, rand (random input patches). Add fast_ to speed up the initialization, but you might lose in precision.')
parser.add_argument('-z', action='store_true', help='Zero all weights and reinitialize')
parser.add_argument('-cs', action='store_true', help='Correct for scaling')
parser.add_argument('-q', action='store_true', help='Quiet execution')
parser.add_argument('-s', type=float, default=1.0, help='Scale the input [only custom data "-d"]')
parser.add_argument('-bs', type=int, default=16, help='Batch size [only custom data "-d"]')
parser.add_argument('-nit', type=int, default=10, help='Number of iterations')
parser.add_argument('--mem-limit', type=int, default=500, help='How much memory should we use for the data buffer (MB)?')
parser.add_argument('--gpu', type=int, default=0, help='What gpu to run it on?')
args = parser.parse_args()
if args.q:
from os import environ
environ['GLOG_minloglevel'] = '2'
import caffe, load
from caffe import NetSpec, layers as L
caffe.set_mode_gpu()
if args.gpu is not None:
caffe.set_device(args.gpu)
if args.data is not None:
model = load.ProtoDesc(args.prototxt)
net = NetSpec()
fl = getFileList(args.data)
if len(fl) == 0:
print("Unknown data type for '%s'"%args.data)
exit(1)
from tempfile import NamedTemporaryFile
f = NamedTemporaryFile('w')
f.write('\n'.join([path.abspath(i)+' 0' for i in fl]))
f.flush()
net.data, net.label = L.ImageData(source=f.name, batch_size=args.bs, new_width=model.input_dim[-1], new_height=model.input_dim[-1], transform_param=dict(mean_value=[104,117,123], scale=args.s),ntop=2)
net.out = model(data=net.data, label=net.label)
n = netFromString('force_backward:true\n'+str(net.to_proto()), caffe.TRAIN )
else:
n = caffe.Net(args.prototxt, caffe.TRAIN)
# forward call on network
n.forward()
if args.load is not None:
n.copy_from(args.load)
# Rescale existing layers?
#if args.fix:
#magicFix(n, args.nit)
if args.z:
# Zero out all layers
for l in n.layers:
for b in l.blobs:
b.data[...] = 0
if any([np.abs(l.blobs[0].data).sum() < 1e-10 for l in n.layers if len(l.blobs) > 0]):
print( [m for l,m in zip(n.layers, n._layer_names) if len(l.blobs) > 0 and np.abs(l.blobs[0].data).sum() < 1e-10] )
magicInitialize(n, args.bias, NIT=args.nit, type=args.type, max_data=args.mem_limit*1024*1024/4)
else:
print( "Network already initialized, skipping magic init" )
if args.cs:
# A simply helper function that lets you figure out which layers are not
# homogeneous
# print( estimateHomogenety(n) )
calibrateGradientRatio(n)
n.save(args.output_caffemodel)
if __name__ == "__main__":
main()
|
colorization-master
|
resources/magic_init/magic_init_mod.py
|
from __future__ import print_function
from magic_init import *
class BCOLORS:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class NOCOLORS:
HEADER = ''
OKBLUE = ''
OKGREEN = ''
WARNING = ''
FAIL = ''
ENDC = ''
BOLD = ''
UNDERLINE = ''
def coloredNumbers(v, color=None, fmt='%6.2f', max_display=300, bcolors=BCOLORS):
import numpy as np
# Display a numpy array and highlight the min and max values [required a nice linux
# terminal supporting colors]
r = ""
mn, mx = np.min(v), np.max(v)
for k,i in enumerate(v):
if len(v) > max_display and k > max_display/2 and k < len(v) - max_display/2:
if r[-1] != '.':
r += '...'
continue
if i <= mn + 1e-3:
r += bcolors.BOLD+bcolors.FAIL
elif i + 1e-3 >= mx:
r += bcolors.BOLD+bcolors.FAIL
elif color is not None:
r += color
r += (fmt+' ')%i
r += bcolors.ENDC
r += bcolors.ENDC
return r
def computeGradientRatio(net, NIT=1):
import numpy as np
last_layer = 0
for i, (n, l) in enumerate(zip(net._layer_names, net.layers)):
if l.type not in STRIP_LAYER:
last_layer = i
last_tops = net.top_names[net._layer_names[last_layer]]
var = {}
for it in range(NIT):
net._forward(0, last_layer)
# Reset the diffs
for l in net.layers:
for b in l.blobs:
b.diff[...] = 0
# Set the top diffs
for t in last_tops:
net.blobs[t].diff[...] = np.random.normal(0, 1, net.blobs[t].shape)
net._backward(last_layer, 0)
for i, (n, l) in enumerate(zip(net._layer_names, net.layers)):
if len(l.blobs) > 0:
assert l.type in PARAMETER_LAYERS, "Parameter layer '%s' currently not supported"%l.type
b = l.blobs[0]
r = np.mean(b.diff.swapaxes(0,1).reshape((b.diff.shape[1],-1))**2, axis=1) / np.mean(b.data**2)
if n in var: var[n] += r / NIT
else: var[n] = r / NIT
std = {n: np.sqrt(var[n]) for n in var}
return {n: np.std(s) / np.mean(s) for n,s in std.items()}, {n: np.mean(s) for n,s in std.items()}
def printMeanStddev(net, NIT=10, show_all=False, show_color=True, quiet=False):
import numpy as np
bcolors = NOCOLORS
if show_color: bcolors = BCOLORS
layer_names = list(net._layer_names)
if not show_all:
layer_names = [n for n, l in zip(net._layer_names, net.layers) if len(l.blobs)>0]
if 'data' in net._layer_names:
layer_names.append('data')
# When was a blob last used
last_used = {}
# Make sure all layers are supported, and compute the range each blob is used in
for i, (n, l) in enumerate(zip(net._layer_names, net.layers)):
for b in net.bottom_names[n]:
last_used[b] = i
active_data, cvar = {}, {}
for i, (n, l) in enumerate(zip(net._layer_names, net.layers)):
# Run the network forward
new_data = forward(net, i, NIT, {b: active_data[b] for b in net.bottom_names[n]}, net.top_names[n])
active_data.update(new_data)
if len(net.top_names[n]) > 0 and n in layer_names:
m = net.top_names[n][0]
D = flattenData(new_data[m])
mean = np.mean(D, axis=0)
stddev = np.std(D, axis=0)
if not quiet:
print( bcolors.BOLD, ' '*5, n, ':', m, ' '*5, bcolors.ENDC )
print( 'mean ', coloredNumbers(mean, bcolors.OKGREEN, bcolors=bcolors) )
print( 'stddev', coloredNumbers(stddev, bcolors.OKBLUE, bcolors=bcolors) )
print( 'coef of variation ', bcolors.OKGREEN, stddev.std() / stddev.mean(), bcolors.ENDC )
print()
cvar[n] = stddev.std() / stddev.mean()
# Delete all unused data
for k in list(active_data):
if k not in last_used or last_used[k] == i:
del active_data[k]
return cvar
def main():
from argparse import ArgumentParser
from os import path
parser = ArgumentParser()
parser.add_argument('prototxt')
parser.add_argument('-l', '--load', help='Load a caffemodel')
parser.add_argument('-d', '--data', default=None, help='Image list to use [default prototxt data]')
#parser.add_argument('-q', action='store_true', help='Quiet execution')
parser.add_argument('-sm', action='store_true', help='Summary only')
parser.add_argument('-q', action='store_true', help='Quiet execution')
parser.add_argument('-a', '--all', action='store_true', help='Show the statistic for all layers')
parser.add_argument('-nc', action='store_true', help='Do not use color')
parser.add_argument('-s', type=float, default=1.0, help='Scale the input [only custom data "-d"]')
parser.add_argument('-bs', type=int, default=16, help='Batch size [only custom data "-d"]')
parser.add_argument('-nit', type=int, default=10, help='Number of iterations')
parser.add_argument('--gpu', type=int, default=0, help='What gpu to run it on?')
args = parser.parse_args()
if args.q:
from os import environ
environ['GLOG_minloglevel'] = '2'
import caffe, load
from caffe import NetSpec, layers as L
caffe.set_mode_gpu()
if args.gpu is not None:
caffe.set_device(args.gpu)
if args.data is not None:
model = load.ProtoDesc(args.prototxt)
net = NetSpec()
fl = getFileList(args.data)
if len(fl) == 0:
print("Unknown data type for '%s'"%args.data)
exit(1)
from tempfile import NamedTemporaryFile
f = NamedTemporaryFile('w')
f.write('\n'.join([path.abspath(i)+' 0' for i in fl]))
f.flush()
net.data, net.label = L.ImageData(source=f.name, batch_size=args.bs, new_width=model.input_dim[-1], new_height=model.input_dim[-1], transform_param=dict(mean_value=[104,117,123], scale=args.s),ntop=2)
net.out = model(data=net.data, label=net.label)
n = netFromString('force_backward:true\n'+str(net.to_proto()), caffe.TRAIN )
else:
n = caffe.Net(args.prototxt, caffe.TRAIN)
if args.load is not None:
n.copy_from(args.load)
cvar = printMeanStddev(n, NIT=args.nit, show_all=args.all, show_color=not args.nc, quiet=args.sm)
cv, gr = computeGradientRatio(n, NIT=args.nit)
print()
print(' Summary ')
print('-----------')
print()
print('layer name out cvar rate cvar rate mean')
for l in n._layer_names:
if l in cvar and l in cv and l in gr:
print('%-30s %10.2f %10.2f %10.2f'%(l, cvar[l], cv[l], gr[l]) )
if __name__ == "__main__":
main()
|
colorization-master
|
resources/magic_init/measure_stat.py
|
import caffe
def parseProtoString(s):
from google.protobuf import text_format
from caffe.proto import caffe_pb2 as pb
proto_net = pb.NetParameter()
text_format.Merge(s, proto_net)
return proto_net
def get_param(l, exclude=set(['top', 'bottom', 'name', 'type'])):
if not hasattr(l,'ListFields'):
if hasattr(l,'__delitem__'):
return list(l)
return l
r = dict()
for f, v in l.ListFields():
if f.name not in exclude:
r[f.name] = get_param(v, [])
return r
class ProtoDesc:
def __init__(self, prototxt):
from os import path
self.prototxt = prototxt
self.parsed_proto = parseProtoString(open(self.prototxt, 'r').read())
# Guess the input dimension
self.input_dim = (3, 227, 227)
net = self.parsed_proto
if len(net.input_dim) > 0:
self.input_dim = net.input_dim[1:]
else:
lrs = net.layer
cs = [l.transform_param.crop_size for l in lrs
if l.HasField('transform_param')]
if len(cs):
self.input_dim = (3, cs[0], cs[0])
def __call__(self, clip=None, **inputs):
from caffe import layers as L
from collections import OrderedDict
net = self.parsed_proto
blobs = OrderedDict(inputs)
for l in net.layer:
if l.name not in inputs:
in_place = l.top == l.bottom
param = get_param(l)
assert all([b in blobs for b in l.bottom]), "Some bottoms not founds: " + ', '.join([b for b in l.bottom if not b in blobs])
tops = getattr(L, l.type)(*[blobs[b] for b in l.bottom],
ntop=len(l.top), in_place=in_place,
name=l.name,
**param)
if len(l.top) <= 1:
tops = [tops]
for i, t in enumerate(l.top):
blobs[t] = tops[i]
if l.name == clip:
break
return list(blobs.values())[-1]
|
colorization-master
|
resources/magic_init/load.py
|
import numpy as np
import os
import skimage.color as color
import matplotlib.pyplot as plt
import scipy.ndimage.interpolation as sni
import caffe
import argparse
def parse_args():
parser = argparse.ArgumentParser(description='iColor: deep interactive colorization')
parser.add_argument('-img_in',dest='img_in',help='grayscale image to read in', type=str)
parser.add_argument('-img_out',dest='img_out',help='colorized image to save off', type=str)
parser.add_argument('--gpu', dest='gpu', help='gpu id', type=int, default=0)
parser.add_argument('--prototxt',dest='prototxt',help='prototxt filepath', type=str, default='./models/colorization_deploy_v2.prototxt')
parser.add_argument('--caffemodel',dest='caffemodel',help='caffemodel filepath', type=str, default='./models/colorization_release_v2.caffemodel')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
caffe.set_mode_gpu()
caffe.set_device(args.gpu)
# Select desired model
net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)
(H_in,W_in) = net.blobs['data_l'].data.shape[2:] # get input shape
(H_out,W_out) = net.blobs['class8_ab'].data.shape[2:] # get output shape
pts_in_hull = np.load('./resources/pts_in_hull.npy') # load cluster centers
net.params['class8_ab'][0].data[:,:,0,0] = pts_in_hull.transpose((1,0)) # populate cluster centers as 1x1 convolution kernel
# print 'Annealed-Mean Parameters populated'
# load the original image
img_rgb = caffe.io.load_image(args.img_in)
img_lab = color.rgb2lab(img_rgb) # convert image to lab color space
img_l = img_lab[:,:,0] # pull out L channel
(H_orig,W_orig) = img_rgb.shape[:2] # original image size
# create grayscale version of image (just for displaying)
img_lab_bw = img_lab.copy()
img_lab_bw[:,:,1:] = 0
img_rgb_bw = color.lab2rgb(img_lab_bw)
# resize image to network input size
img_rs = caffe.io.resize_image(img_rgb,(H_in,W_in)) # resize image to network input size
img_lab_rs = color.rgb2lab(img_rs)
img_l_rs = img_lab_rs[:,:,0]
net.blobs['data_l'].data[0,0,:,:] = img_l_rs-50 # subtract 50 for mean-centering
net.forward() # run network
ab_dec = net.blobs['class8_ab'].data[0,:,:,:].transpose((1,2,0)) # this is our result
ab_dec_us = sni.zoom(ab_dec,(1.*H_orig/H_out,1.*W_orig/W_out,1)) # upsample to match size of original image L
img_lab_out = np.concatenate((img_l[:,:,np.newaxis],ab_dec_us),axis=2) # concatenate with original image L
img_rgb_out = (255*np.clip(color.lab2rgb(img_lab_out),0,1)).astype('uint8') # convert back to rgb
plt.imsave(args.img_out, img_rgb_out)
|
colorization-master
|
colorization/colorize.py
|
#!/usr/bin/python
import os
import sys
import argparse
import numpy as np
from skimage import color, io
import scipy.ndimage.interpolation as sni
import caffe
def parse_args(argv):
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('flist', type=str,
help='file containing list of images to process')
parser.add_argument('output', type=str,
help='output directory')
parser.add_argument('-p', '--proto', type=str,
default='../models/colorization_deploy_v2.prototxt',
help='prototxt file of the net model')
parser.add_argument('-m', '--model', type=str,
default='../models/colorization_release_v2.caffemodel',
help='caffemodel file of the net model')
parser.add_argument('-c', '--cluster', type=str,
default='../resources/pts_in_hull.npy',
help='cluster centers (pts in hull)')
parser.add_argument('-g', '--gpu', type=int,
default=0,
help='gpu id')
args = parser.parse_args(args=argv)
return args
# Prepare network
def prepare_net(proto, model, cluster):
net = caffe.Net(proto, model, caffe.TEST)
in_shape = net.blobs['data_l'].data.shape[2:] # get input shape
out_shape = net.blobs['class8_ab'].data.shape[2:] # get output shape
print 'Input dimensions: %s' % str(in_shape)
print 'Output dimensions: %s' % str(out_shape)
pts_in_hull = np.load(cluster) # load cluster centers
net.params['class8_ab'][0].data[:,:,0,0] = pts_in_hull.transpose((1,0)) # populate cluster centers as 1x1 convolution kernel
print 'Annealed-Mean Parameters populated'
return net, in_shape, out_shape
# Prepare image for network
def prepare_img(fpath, in_shape):
# load the original image
img_rgb = caffe.io.load_image(fpath)
img_lab = color.rgb2lab(img_rgb) # convert image to lab color space
img_l = img_lab[:,:,0] # pull out L channel
orig_shape = img_rgb.shape[:2] # original image size
# resize image to network input size
img_rs = caffe.io.resize_image(img_rgb, in_shape) # resize image to network input size
img_lab_rs = color.rgb2lab(img_rs)
img_l_rs = img_lab_rs[:,:,0]
return img_l_rs, img_l, orig_shape
# Process image
def process(net, in_shape, out_shape, fpath):
img_l_rs, img_l, orig_shape = prepare_img(fpath, in_shape)
net.blobs['data_l'].data[0,0,:,:] = img_l_rs-50 # subtract 50 for mean-centering
net.forward() # run network
ab_dec = net.blobs['class8_ab'].data[0,:,:,:].transpose((1,2,0)) # this is our result
shape = (1.*orig_shape[0]/out_shape[0], 1.*orig_shape[1]/out_shape[1])
ab_dec_us = sni.zoom(ab_dec,(shape[0],shape[1],1)) # upsample to match size of original image L
img_lab_out = np.concatenate((img_l[:,:,np.newaxis],ab_dec_us),axis=2) # concatenate with original image L
img_rgb_out = np.clip(color.lab2rgb(img_lab_out),0,1) # convert back to rgb
return img_rgb_out
# Save image
def save_img(img, fpath, out_dir):
fname_in = os.path.basename(fpath)
fpath_out = os.path.join(out_dir, fname_in)
io.imsave(fpath_out, img)
# Main
def main(argv):
# Parse arguments
args = parse_args(argv)
print args
# Prepare caffe and net
caffe.set_mode_gpu()
caffe.set_device(args.gpu)
net, in_shape, out_shape = prepare_net(args.proto, args.model, args.cluster)
# Process files
with open(args.flist) as flist:
for fpath in flist:
fpath = fpath.rstrip('\n')
print 'Processing file %s ...' % fpath
img = process(net, in_shape, out_shape, fpath)
save_img(img, fpath, args.output)
print 'Done!'
if __name__ == "__main__":
main(sys.argv[1:])
|
colorization-master
|
colorization/demo/batch_process.py
|
import sys
import argparse
import caffe
from caffe import score, surgery # score, surgery function from caffe-fcn
import numpy as np
import os
import warnings
print sys.argv
def parse_args():
parser = argparse.ArgumentParser(description='')
# ***** FLAGS *****
parser.add_argument('--gpu', dest='gpu', help='gpu id', type=int, default=0)
parser.add_argument('--phase', dest='phase', help='{0: 0-50k iters, 1: 50-100k, 2: 100-150k}', type=int, default=0)
parser.add_argument('--caffemodel',dest='caffemodel',help='path to caffemodel', type=str, \
default='./models/alexnet_release_450000_nobn_rs.caffemodel') # no strokes
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
for arg in vars(args):
print('[%s] =' % arg, getattr(args, arg))
args = parse_args()
caffe.set_mode_gpu()
caffe.set_device(args.gpu)
EXP_DIR = os.path.join('./feature_learning_tests/segmentation')
weights = args.caffemodel
# make models directory
# os.chdir(EXP_DIR)
if(not os.path.exists(os.path.join(EXP_DIR,'models'))):
print('Making models directory')
os.mkdir(os.path.join(EXP_DIR,'models'))
save_format = os.path.join(EXP_DIR,'out_{}')
if(args.phase==0):
restore = None
solver = caffe.SGDSolver(os.path.join(EXP_DIR,'solver0.prototxt'))
elif(args.phase==1):
restore = os.path.join(EXP_DIR,'models','fcn_iter_50000.solverstate')
solver = caffe.SGDSolver(os.path.join(EXP_DIR,'solver1.prototxt'))
elif(args.phase==2):
restore = os.path.join(EXP_DIR,'models','fcn_iter_100000.solverstate')
solver = caffe.SGDSolver(os.path.join(EXP_DIR,'solver2.prototxt'))
# resume = False
if restore is not None:
solver.restore(restore)
# elif resume:
# solver.net.copy_from(weights)
else:
solver.net.copy_from(weights) # initialize with weights
# add bilinear upsampling weights
interp_layers = [k for k in solver.net.params.keys() if 'up' in k]
surgery.interp(solver.net, interp_layers)
net = solver.net
tnet = solver.test_nets[0]
solver.test_nets[0].share_with(solver.net)
LAYER_SCORE = 'score'
LAYER_LOSS = 'loss'
# warnings.filterwarnings("ignore")
# scoring
val = np.loadtxt(os.path.join(EXP_DIR,'./segvalid11.txt'), dtype=str)
for aa in range(50000):
# if(np.mod(aa,100)==0):
# print 'Running: %i'%aa
if(np.mod(aa,1000)==0):
print 'Evaluating: %i'%aa
score.seg_tests(solver, save_format, val, layer=LAYER_SCORE)
solver.step(1)
|
colorization-master
|
colorization/feature_learning_tests/segmentation/solve.py
|
from __future__ import division
import caffe
import numpy as np
def transplant(new_net, net):
for p in net.params:
if p not in new_net.params:
print 'dropping', p
continue
for i in range(len(net.params[p])):
if net.params[p][i].data.shape != new_net.params[p][i].data.shape:
print 'coercing', p, i, 'from', net.params[p][i].data.shape, 'to', new_net.params[p][i].data.shape
else:
print 'copying', p, i
new_net.params[p][i].data.flat = net.params[p][i].data.flat
def expand_score(new_net, new_layer, net, layer):
old_cl = net.params[layer][0].num
new_net.params[new_layer][0].data[:old_cl][...] = net.params[layer][0].data
new_net.params[new_layer][1].data[0,0,0,:old_cl][...] = net.params[layer][1].data
def upsample_filt(size):
factor = (size + 1) // 2
if size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:size, :size]
return (1 - abs(og[0] - center) / factor) * \
(1 - abs(og[1] - center) / factor)
def interp(net, layers):
for l in layers:
m, k, h, w = net.params[l][0].data.shape
if m != k and k != 1:
print 'input + output channels need to be the same or |output| == 1'
raise
if h != w:
print 'filters need to be square'
raise
filt = upsample_filt(h)
net.params[l][0].data[range(m), range(k), :, :] = filt
def upsample_filt2(size1,size2):
size = np.maximum(size1,size2)
factor = (size + 1) // 2
if size1 % 2 == 1:
center1 = factor - 1
else:
center1 = factor - 0.5
if size2 % 2 == 1:
center2 = factor - 1
else:
center2 = factor - 0.5
og = np.ogrid[:size1, :size2]
return (1 - abs(og[0] - center1) / factor) * \
(1 - abs(og[1] - center2) / factor)
def interp2(net, layers):
for l in layers:
m, k, h, w = net.params[l][0].data.shape
if m != k and k != 1:
print 'input + output channels need to be the same or |output| == 1'
raise
filt = upsample_filt2(h,w)
net.params[l][0].data[range(m), range(k), :, :] = filt
|
colorization-master
|
colorization/feature_learning_tests/segmentation/caffe/surgery.py
|
from __future__ import division
import caffe
import numpy as np
import os
import sys
from datetime import datetime
from PIL import Image
def fast_hist(a, b, n):
k = (a >= 0) & (a < n)
return np.bincount(n * a[k].astype(int) + b[k], minlength=n**2).reshape(n, n)
def compute_hist(net, save_dir, dataset, layer='score', gt='label'):
n_cl = net.blobs[layer].channels
if save_dir:
os.mkdir(save_dir)
hist = np.zeros((n_cl, n_cl))
loss = 0
for idx in dataset:
net.forward()
hist += fast_hist(net.blobs[gt].data[0, 0].flatten(),
net.blobs[layer].data[0].argmax(0).flatten(),
n_cl)
if save_dir:
im = Image.fromarray(net.blobs[layer].data[0].argmax(0).astype(np.uint8), mode='P')
im.save(os.path.join(save_dir, idx + '.png'))
# compute the loss as well
loss += net.blobs['loss'].data.flat[0]
return hist, loss / len(dataset)
def seg_tests(solver, save_format, dataset, layer='score', gt='label'):
print '>>>', datetime.now(), 'Begin seg tests'
solver.test_nets[0].share_with(solver.net)
do_seg_tests(solver.test_nets[0], solver.iter, save_format, dataset, layer, gt)
def do_seg_tests(net, iter, save_format, dataset, layer='score', gt='label'):
n_cl = net.blobs[layer].channels
if save_format:
save_format = save_format.format(iter)
hist, loss = compute_hist(net, save_format, dataset, layer, gt)
# mean loss
print '>>>', datetime.now(), 'Iteration', iter, 'loss', loss
# overall accuracy
acc = np.diag(hist).sum() / hist.sum()
print '>>>', datetime.now(), 'Iteration', iter, 'overall accuracy', acc
# per-class accuracy
acc = np.diag(hist) / hist.sum(1)
print '>>>', datetime.now(), 'Iteration', iter, 'mean accuracy', np.nanmean(acc)
# per-class IU
iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
print '>>>', datetime.now(), 'Iteration', iter, 'mean IU', np.nanmean(iu)
freq = hist.sum(1) / hist.sum()
print '>>>', datetime.now(), 'Iteration', iter, 'fwavacc', \
(freq[freq > 0] * iu[freq > 0]).sum()
|
colorization-master
|
colorization/feature_learning_tests/segmentation/caffe/score.py
|
import caffe
import os
import string
import numpy as np
import argparse
import matplotlib.pyplot as plt
def parse_args():
parser = argparse.ArgumentParser(description='Convert conv layers into FC layers')
parser.add_argument('--gpu', dest='gpu', help='gpu id', type=int, default=0)
parser.add_argument('--prototxt_in',dest='prototxt_in',help='prototxt with conv layers', type=str, default='')
parser.add_argument('--prototxt_out',dest='prototxt_out',help='prototxt with fc layers', type=str, default='')
parser.add_argument('--caffemodel_in',dest='caffemodel_in',help='caffemodel with conv layers', type=str, default='')
parser.add_argument('--caffemodel_out',dest='caffemodel_out',help='caffemodel with fc layers, to be saved', type=str, default='')
parser.add_argument('--dummymodel',dest='dummymodel',help='blank caffemodel',type=str,default='./models/dummy.caffemodel')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
gpu_id = args.gpu
PROTOTXT1_PATH = args.prototxt_in
PROTOTXT2_PATH = args.prototxt_out # no batch norm
MODEL_PATH = args.caffemodel_in
DUMMYMODEL_PATH = args.dummymodel
MODEL2_PATH = args.caffemodel_out # to be saved off
caffe.set_mode_gpu()
caffe.set_device(gpu_id)
net1 = caffe.Net(PROTOTXT1_PATH, MODEL_PATH, caffe.TEST)
net2 = caffe.Net(PROTOTXT2_PATH, DUMMYMODEL_PATH, caffe.TEST)
import rz_fcns as rz
rz.caffe_param_shapes(net1,to_print=True)
rz.caffe_param_shapes(net2,to_print=True)
rz.caffe_shapes(net2,to_print=True)
# CONV_INDS = np.where(np.array([layer.type for layer in net1.layers])=='Convolution')[0]
print net1.params.keys()
print net2.params.keys()
for (ll,layer) in enumerate(net2.params.keys()):
P = len(net2.params[layer]) # number of blobs
if(P>0):
for pp in range(P):
ndim1 = net1.params[layer][pp].data.ndim
ndim2 = net2.params[layer][pp].data.ndim
print('Copying layer %s, param blob %i (%i-dim => %i-dim)'%(layer,pp,ndim1,ndim2))
if(ndim1==ndim2):
print(' Same dimensionality...')
net2.params[layer][pp].data[...] = net1.params[layer][pp].data[...]
else:
print(' Different dimensionality...')
net2.params[layer][pp].data[...] = net1.params[layer][pp].data[...].reshape(net2.params[layer][pp].data[...].shape)
net2.save(MODEL2_PATH)
for arg in vars(args):
print('[%s] =' % arg, getattr(args, arg))
print 'Saving model into: %s'%MODEL2_PATH
|
colorization-master
|
colorization/resources/conv_into_fc.py
|
# **************************************
# ***** Richard Zhang / 2016.08.06 *****
# **************************************
import numpy as np
import warnings
import os
import sklearn.neighbors as nn
import caffe
from skimage import color
# ************************
# ***** CAFFE LAYERS *****
# ************************
class BGR2LabLayer(caffe.Layer):
''' Layer converts BGR to Lab
INPUTS
bottom[0].data Nx3xXxY
OUTPUTS
top[0].data Nx3xXxY
'''
def setup(self,bottom, top):
warnings.filterwarnings("ignore")
if(len(bottom)!=1):
raise Exception("Layer should a single input")
if(bottom[0].data.shape[1]!=3):
raise Exception("Input should be 3-channel BGR image")
self.N = bottom[0].data.shape[0]
self.X = bottom[0].data.shape[2]
self.Y = bottom[0].data.shape[3]
def reshape(self, bottom, top):
top[0].reshape(self.N,3,self.X,self.Y)
def forward(self, bottom, top):
top[0].data[...] = color.rgb2lab(bottom[0].data[:,::-1,:,:].astype('uint8').transpose((2,3,0,1))).transpose((2,3,0,1))
def backward(self, top, propagate_down, bottom):
# no back-prop
for i in range(len(bottom)):
if not propagate_down[i]:
continue
# bottom[i].diff[...] = np.zeros_like(bottom[i].data)
class NNEncLayer(caffe.Layer):
''' Layer which encodes ab map into Q colors
INPUTS
bottom[0] Nx2xXxY
OUTPUTS
top[0].data NxQ
'''
def setup(self,bottom, top):
warnings.filterwarnings("ignore")
if len(bottom) == 0:
raise Exception("Layer should have inputs")
self.NN = 10.
self.sigma = 5.
self.ENC_DIR = './resources/'
self.nnenc = NNEncode(self.NN,self.sigma,km_filepath=os.path.join(self.ENC_DIR,'pts_in_hull.npy'))
self.N = bottom[0].data.shape[0]
self.X = bottom[0].data.shape[2]
self.Y = bottom[0].data.shape[3]
self.Q = self.nnenc.K
def reshape(self, bottom, top):
top[0].reshape(self.N,self.Q,self.X,self.Y)
def forward(self, bottom, top):
top[0].data[...] = self.nnenc.encode_points_mtx_nd(bottom[0].data[...],axis=1)
def backward(self, top, propagate_down, bottom):
# no back-prop
for i in range(len(bottom)):
if not propagate_down[i]:
continue
bottom[i].diff[...] = np.zeros_like(bottom[i].data)
class PriorBoostLayer(caffe.Layer):
''' Layer boosts ab values based on their rarity
INPUTS
bottom[0] NxQxXxY
OUTPUTS
top[0].data Nx1xXxY
'''
def setup(self,bottom, top):
if len(bottom) == 0:
raise Exception("Layer should have inputs")
self.ENC_DIR = './resources/'
self.gamma = .5
self.alpha = 1.
self.pc = PriorFactor(self.alpha,gamma=self.gamma,priorFile=os.path.join(self.ENC_DIR,'prior_probs.npy'))
self.N = bottom[0].data.shape[0]
self.Q = bottom[0].data.shape[1]
self.X = bottom[0].data.shape[2]
self.Y = bottom[0].data.shape[3]
def reshape(self, bottom, top):
top[0].reshape(self.N,1,self.X,self.Y)
def forward(self, bottom, top):
top[0].data[...] = self.pc.forward(bottom[0].data[...],axis=1)
def backward(self, top, propagate_down, bottom):
# no back-prop
for i in range(len(bottom)):
if not propagate_down[i]:
continue
bottom[i].diff[...] = np.zeros_like(bottom[i].data)
class NonGrayMaskLayer(caffe.Layer):
''' Layer outputs a mask based on if the image is grayscale or not
INPUTS
bottom[0] Nx2xXxY ab values
OUTPUTS
top[0].data Nx1xXxY 1 if image is NOT grayscale
0 if image is grayscale
'''
def setup(self,bottom, top):
if len(bottom) == 0:
raise Exception("Layer should have inputs")
self.thresh = 5 # threshold on ab value
self.N = bottom[0].data.shape[0]
self.X = bottom[0].data.shape[2]
self.Y = bottom[0].data.shape[3]
def reshape(self, bottom, top):
top[0].reshape(self.N,1,self.X,self.Y)
def forward(self, bottom, top):
# if an image has any (a,b) value which exceeds threshold, output 1
top[0].data[...] = (np.sum(np.sum(np.sum(np.abs(bottom[0].data) > self.thresh,axis=1),axis=1),axis=1) > 0)[:,na(),na(),na()]
def backward(self, top, propagate_down, bottom):
# no back-prop
for i in range(len(bottom)):
if not propagate_down[i]:
continue
bottom[i].diff[...] = np.zeros_like(bottom[i].data)
class ClassRebalanceMultLayer(caffe.Layer):
''' INPUTS
bottom[0] NxMxXxY feature map
bottom[1] Nx1xXxY boost coefficients
OUTPUTS
top[0] NxMxXxY on forward, gets copied from bottom[0]
FUNCTIONALITY
On forward pass, top[0] passes bottom[0]
On backward pass, bottom[0] gets boosted by bottom[1]
through pointwise multiplication (with singleton expansion) '''
def setup(self, bottom, top):
# check input pair
if len(bottom)==0:
raise Exception("Specify inputs")
def reshape(self, bottom, top):
i = 0
if(bottom[i].data.ndim==1):
top[i].reshape(bottom[i].data.shape[0])
elif(bottom[i].data.ndim==2):
top[i].reshape(bottom[i].data.shape[0], bottom[i].data.shape[1])
elif(bottom[i].data.ndim==4):
top[i].reshape(bottom[i].data.shape[0], bottom[i].data.shape[1], bottom[i].data.shape[2], bottom[i].data.shape[3])
def forward(self, bottom, top):
# output equation to negative of inputs
top[0].data[...] = bottom[0].data[...]
# top[0].data[...] = bottom[0].data[...]*bottom[1].data[...] # this was bad, would mess up the gradients going up
def backward(self, top, propagate_down, bottom):
for i in range(len(bottom)):
if not propagate_down[i]:
continue
bottom[0].diff[...] = top[0].diff[...]*bottom[1].data[...]
# print 'Back-propagating class rebalance, %i'%i
class LossMeterLayer(caffe.Layer):
''' Layer acts as a "meter" to track loss values '''
def setup(self,bottom,top):
if(len(bottom)==0):
raise Exception("Layer needs inputs")
self.param_str_split = self.param_str.split(' ')
self.LOSS_DIR = self.param_str_split[0]
self.P = int(self.param_str_split[1])
self.H = int(self.param_str_split[2])
if(len(self.param_str_split)==4):
self.prefix = self.param_str_split[3]
else:
self.prefix = ''
self.cnt = 0 # loss track counter
# self.P = 1 # interval to print losses
self.h = 0 # index into history
self.L = len(bottom)
self.losses = np.zeros((self.L,self.H))
self.ITER_PATH = os.path.join(self.LOSS_DIR,'iter.npy')
self.LOG_PATH = os.path.join(self.LOSS_DIR,'loss_log')
if(not os.path.exists(self.LOSS_DIR)):
os.mkdir(self.LOSS_DIR)
if(os.path.exists(self.ITER_PATH)):
self.iter = np.load(self.ITER_PATH)
else:
self.iter = 0 # iteration counter
print 'Initial iteration: %i'%(self.iter+1)
def reshape(self,bottom,top):
0;
def forward(self,bottom,top):
for ll in range(self.L):
self.losses[ll,self.h] = bottom[ll].data[...]
if(np.mod(self.cnt,self.P)==self.P-1): # print
if(self.cnt >= self.H-1):
tmp_str = 'NumAvg %i, Loss '%(self.H)
for ll in range(self.L):
tmp_str += '%.3f, '%np.mean(self.losses[ll,:])
else:
tmp_str = 'NumAvg %i, Loss '%(self.h)
for ll in range(self.L):
tmp_str += '%.3f, '%np.mean(self.losses[ll,:self.cnt+1])
print_str = '%s: Iter %i, %s'%(self.prefix,self.iter+1,tmp_str)
print print_str
self.f = open(self.LOG_PATH,'a')
self.f.write(print_str)
self.f.write('\n')
self.f.close()
np.save(self.ITER_PATH,self.iter)
self.h = np.mod(self.h+1,self.H) # roll through history
self.cnt = self.cnt+1
self.iter = self.iter+1
def backward(self,top,propagate_down,bottom):
for ll in range(self.L):
continue
# ***************************
# ***** SUPPORT CLASSES *****
# ***************************
class PriorFactor():
''' Class handles prior factor '''
def __init__(self,alpha,gamma=0,verbose=True,priorFile=''):
# INPUTS
# alpha integer prior correction factor, 0 to ignore prior, 1 to divide by prior, alpha to divide by prior**alpha
# gamma integer percentage to mix in uniform prior with empirical prior
# priorFile file file which contains prior probabilities across classes
# settings
self.alpha = alpha
self.gamma = gamma
self.verbose = verbose
# empirical prior probability
self.prior_probs = np.load(priorFile)
# define uniform probability
self.uni_probs = np.zeros_like(self.prior_probs)
self.uni_probs[self.prior_probs!=0] = 1.
self.uni_probs = self.uni_probs/np.sum(self.uni_probs)
# convex combination of empirical prior and uniform distribution
self.prior_mix = (1-self.gamma)*self.prior_probs + self.gamma*self.uni_probs
# set prior factor
self.prior_factor = self.prior_mix**-self.alpha
self.prior_factor = self.prior_factor/np.sum(self.prior_probs*self.prior_factor) # re-normalize
# implied empirical prior
self.implied_prior = self.prior_probs*self.prior_factor
self.implied_prior = self.implied_prior/np.sum(self.implied_prior) # re-normalize
if(self.verbose):
self.print_correction_stats()
def print_correction_stats(self):
print 'Prior factor correction:'
print ' (alpha,gamma) = (%.2f, %.2f)'%(self.alpha,self.gamma)
print ' (min,max,mean,med,exp) = (%.2f, %.2f, %.2f, %.2f, %.2f)'%(np.min(self.prior_factor),np.max(self.prior_factor),np.mean(self.prior_factor),np.median(self.prior_factor),np.sum(self.prior_factor*self.prior_probs))
def forward(self,data_ab_quant,axis=1):
data_ab_maxind = np.argmax(data_ab_quant,axis=axis)
corr_factor = self.prior_factor[data_ab_maxind]
if(axis==0):
return corr_factor[na(),:]
elif(axis==1):
return corr_factor[:,na(),:]
elif(axis==2):
return corr_factor[:,:,na(),:]
elif(axis==3):
return corr_factor[:,:,:,na()]
class NNEncode():
''' Encode points using NN search and Gaussian kernel '''
def __init__(self,NN,sigma,km_filepath='',cc=-1):
if(check_value(cc,-1)):
self.cc = np.load(km_filepath)
else:
self.cc = cc
self.K = self.cc.shape[0]
self.NN = int(NN)
self.sigma = sigma
self.nbrs = nn.NearestNeighbors(n_neighbors=NN, algorithm='ball_tree').fit(self.cc)
self.alreadyUsed = False
def encode_points_mtx_nd(self,pts_nd,axis=1,returnSparse=False,sameBlock=True):
pts_flt = flatten_nd_array(pts_nd,axis=axis)
P = pts_flt.shape[0]
if(sameBlock and self.alreadyUsed):
self.pts_enc_flt[...] = 0 # already pre-allocated
else:
self.alreadyUsed = True
self.pts_enc_flt = np.zeros((P,self.K))
self.p_inds = np.arange(0,P,dtype='int')[:,na()]
P = pts_flt.shape[0]
(dists,inds) = self.nbrs.kneighbors(pts_flt)
wts = np.exp(-dists**2/(2*self.sigma**2))
wts = wts/np.sum(wts,axis=1)[:,na()]
self.pts_enc_flt[self.p_inds,inds] = wts
pts_enc_nd = unflatten_2d_array(self.pts_enc_flt,pts_nd,axis=axis)
return pts_enc_nd
def decode_points_mtx_nd(self,pts_enc_nd,axis=1):
pts_enc_flt = flatten_nd_array(pts_enc_nd,axis=axis)
pts_dec_flt = np.dot(pts_enc_flt,self.cc)
pts_dec_nd = unflatten_2d_array(pts_dec_flt,pts_enc_nd,axis=axis)
return pts_dec_nd
def decode_1hot_mtx_nd(self,pts_enc_nd,axis=1,returnEncode=False):
pts_1hot_nd = nd_argmax_1hot(pts_enc_nd,axis=axis)
pts_dec_nd = self.decode_points_mtx_nd(pts_1hot_nd,axis=axis)
if(returnEncode):
return (pts_dec_nd,pts_1hot_nd)
else:
return pts_dec_nd
# *****************************
# ***** Utility functions *****
# *****************************
def check_value(inds, val):
''' Check to see if an array is a single element equaling a particular value
for pre-processing inputs in a function '''
if(np.array(inds).size==1):
if(inds==val):
return True
return False
def na(): # shorthand for new axis
return np.newaxis
def flatten_nd_array(pts_nd,axis=1):
''' Flatten an nd array into a 2d array with a certain axis
INPUTS
pts_nd N0xN1x...xNd array
axis integer
OUTPUTS
pts_flt prod(N \ N_axis) x N_axis array '''
NDIM = pts_nd.ndim
SHP = np.array(pts_nd.shape)
nax = np.setdiff1d(np.arange(0,NDIM),np.array((axis))) # non axis indices
NPTS = np.prod(SHP[nax])
axorder = np.concatenate((nax,np.array(axis).flatten()),axis=0)
pts_flt = pts_nd.transpose((axorder))
pts_flt = pts_flt.reshape(NPTS,SHP[axis])
return pts_flt
def unflatten_2d_array(pts_flt,pts_nd,axis=1,squeeze=False):
''' Unflatten a 2d array with a certain axis
INPUTS
pts_flt prod(N \ N_axis) x M array
pts_nd N0xN1x...xNd array
axis integer
squeeze bool if true, M=1, squeeze it out
OUTPUTS
pts_out N0xN1x...xNd array '''
NDIM = pts_nd.ndim
SHP = np.array(pts_nd.shape)
nax = np.setdiff1d(np.arange(0,NDIM),np.array((axis))) # non axis indices
NPTS = np.prod(SHP[nax])
if(squeeze):
axorder = nax
axorder_rev = np.argsort(axorder)
M = pts_flt.shape[1]
NEW_SHP = SHP[nax].tolist()
pts_out = pts_flt.reshape(NEW_SHP)
pts_out = pts_out.transpose(axorder_rev)
else:
axorder = np.concatenate((nax,np.array(axis).flatten()),axis=0)
axorder_rev = np.argsort(axorder)
M = pts_flt.shape[1]
NEW_SHP = SHP[nax].tolist()
NEW_SHP.append(M)
pts_out = pts_flt.reshape(NEW_SHP)
pts_out = pts_out.transpose(axorder_rev)
return pts_out
|
colorization-master
|
colorization/resources/caffe_traininglayers.py
|
# **************************************
# ***** Richard Zhang / 2016.06.04 *****
# **************************************
# Absorb batch norm into convolution layers
# This script only supports the conv-batchnorm configuration
# Currently unsupported:
# - deconv layers
# - fc layers
# - batchnorm before linear layer
import caffe
import os
import string
import numpy as np
import argparse
import matplotlib.pyplot as plt
def parse_args():
parser = argparse.ArgumentParser(description='BatchNorm absorption')
parser.add_argument('--gpu', dest='gpu', help='gpu id', type=int, default=0)
parser.add_argument('--prototxt_in',dest='prototxt_in',help='prototxt with batchnorm', type=str, default='')
parser.add_argument('--prototxt_out',dest='prototxt_out',help='prototxt without batchnorm', type=str, default='')
parser.add_argument('--caffemodel_in',dest='caffemodel_in',help='caffemodel with batchnorm', type=str, default='')
parser.add_argument('--caffemodel_out',dest='caffemodel_out',help='caffemodel without batchnorm, to be saved', type=str, default='')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
gpu_id = args.gpu
PROTOTXT1_PATH = args.prototxt_in
PROTOTXT2_PATH = args.prototxt_out # no batch norm
MODEL_PATH = args.caffemodel_in
MODEL2_PATH = args.caffemodel_out # to be saved off
caffe.set_mode_gpu()
caffe.set_device(gpu_id)
net1 = caffe.Net(PROTOTXT1_PATH, MODEL_PATH, caffe.TEST)
net2 = caffe.Net(PROTOTXT2_PATH, MODEL_PATH, caffe.TEST)
# call forward on net1, net2
net1.forward()
net2.forward()
# identify batch norms and paired linear layers
BN_INDS = np.where(np.array([layer.type for layer in net1.layers])=='BatchNorm')[0]
BN_NAMES = np.zeros(BN_INDS.shape,dtype='S50') # batch norm layer names
LIN_NAMES = np.zeros(BN_INDS.shape,dtype='S50') # linear layer names
PRE_NAMES = np.zeros(BN_INDS.shape,dtype='S50') # blob right before
POST_NAMES = np.zeros(BN_INDS.shape,dtype='S50') # blob right after
PRE_POST = -1+np.zeros(BN_INDS.shape) # 0 - pre, 1 - post
CONV_DECONV = -1+np.zeros(BN_INDS.shape) # 0 - conv, 1 - deconv
# identify layers which are paired with batch norms (only supporting convolution)
for (ll,bn_ind) in enumerate(BN_INDS):
BN_NAMES[ll] = net1._layer_names[bn_ind]
if(net1.layers[bn_ind-1].type=='Convolution' or net1.layers[bn_ind-1].type=='Deconvolution'):
PRE_POST[ll] = 0
LIN_NAMES[ll] = net1._layer_names[bn_ind-1]
POST_NAMES[ll] = net1._layer_names[bn_ind+1]
if(net1.layers[bn_ind-1].type=='Convolution'):
CONV_DECONV[ll] = 0
elif(net1.layers[bn_ind-1].type=='Deconvolution'):
CONV_DECONV[ll] = 1
elif(net1.layers[bn_ind+1].type=='Convolution' or net1.layers[bn_ind+1].type=='Deconvolution'):
PRE_POST[ll] = 1
LIN_NAMES[ll] = net1._layer_names[bn_ind+1]
POST_NAMES[ll] = net1._layer_names[bn_ind+3]
if(net1.layers[bn_ind+1].type=='Convolution'):
CONV_DECONV[ll] = 0
elif(net1.layers[bn_ind+1].type=='Deconvolution'):
CONV_DECONV[ll] = 1
else:
PRE_POST[ll] = -1
PRE_NAMES[ll] = net1.bottom_names[BN_NAMES[ll]][0]
LIN_INDS = BN_INDS+PRE_POST # linear layer indices
ALL_SLOPES = {}
# compute batch norm parameters on net1 in first layer
# absorb into weights in first layer
for ll in range(BN_INDS.size):
bn_ind = BN_INDS[ll]
BN_NAME = BN_NAMES[ll]
PRE_NAME = PRE_NAMES[ll]
POST_NAME = POST_NAMES[ll]
LIN_NAME = LIN_NAMES[ll]
print 'LAYERS %s, %s'%(PRE_NAME,BN_NAME)
# print net1.blobs[BN_NAME].data.shape
# print net1.blobs[PRE_NAME].data.shape
C = net1.blobs[BN_NAME].data.shape[1]
in_blob = net1.blobs[PRE_NAME].data
bn_blob = net1.blobs[BN_NAME].data
scale_factor = 1./net1.params[BN_NAME][2].data[...]
mean = scale_factor * net1.params[BN_NAME][0].data[...]
scale = scale_factor * net1.params[BN_NAME][1].data[...]
slopes = np.sqrt(1./scale)
offs = -mean*slopes
print ' Computing error on data...'
bn_blob_rep = in_blob*slopes[np.newaxis,:,np.newaxis,np.newaxis]+offs[np.newaxis,:,np.newaxis,np.newaxis]
# Visually verify that factors are correct
print ' Maximum error: %.3e'%np.max(np.abs(bn_blob_rep[bn_blob>0] - bn_blob[bn_blob>0]))
print ' RMS error: %.3e'%np.linalg.norm(bn_blob_rep[bn_blob>0] - bn_blob[bn_blob>0])
print ' RMS signal: %.3e'%np.linalg.norm(bn_blob_rep[bn_blob>0])
print ' Absorbing slope and offset...'
# absorb slope and offset into appropriate parameter
if(PRE_POST[ll]==0): # linear layer is before
if(CONV_DECONV[ll]==0): # convolution
net2.params[LIN_NAME][0].data[...] = net1.params[LIN_NAME][0].data[...]*slopes[:,np.newaxis,np.newaxis,np.newaxis]
net2.params[LIN_NAME][1].data[...] = offs + (slopes*net1.params[LIN_NAME][1].data)
elif(CONV_DECONV[ll]==1): # deconvolution
print '*** Deconvolution not implemented ***'
elif(PRE_POST[ll]==1): # batchnorm is BEFORE linear layer
print '*** Not implemented ***'
net2.save(MODEL2_PATH)
for arg in vars(args):
print('[%s] =' % arg, getattr(args, arg))
print 'Saving model into: %s'%MODEL2_PATH
|
colorization-master
|
colorization/resources/batch_norm_absorb.py
|
from __future__ import print_function, division
INPUT_LAYERS = ['Data', 'ImageData']
# Layers that only support elwise
ELWISE_LAYERS = ['Deconvolution']
# Layers that support parameters
PARAMETER_LAYERS = ['Convolution', 'InnerProduct']+ELWISE_LAYERS
# All supported layers
SUPPORTED_LAYERS = ['ReLU', 'Sigmoid', 'LRN', 'Pooling', 'Eltwise'] + PARAMETER_LAYERS + INPUT_LAYERS
STRIP_LAYER = ['Softmax', 'SoftmaxWithLoss', 'SigmoidCrossEntropyLoss']
# Use 'Dropout' at your own risk
# Unless Jon merges #2865 , 'Split' cannot be supported
UNSUPPORTED_LAYERS = ['Split', 'BatchNorm', 'Reshape', 'Scale']
def forward(net, i, NIT, data, output_names):
n = net._layer_names[i]
# Create the top data if needed
output = {t: [None]*NIT for t in output_names}
for it in range(NIT):
for b in data:
net.blobs[b].data[...] = data[b][it]
net._forward(i, i)
for t in output_names:
output[t][it] = 1*net.blobs[t].data
return output
def flattenData(data):
import numpy as np
return np.concatenate([d.swapaxes(0, 1).reshape((d.shape[1],-1)) for d in data], axis=1).T
def gatherInputData(net, layer_id, bottom_data, top_name, fast=False, max_data=None):
# This functions gathers all input data.
# In order to not replicate all the internal functionality of convolutions (eg. padding ...)
# we gather the data in the output space and use random gaussian weights. The output of this
# function is W and D, there the input data I = D * W^-1 [with some abuse of tensor notation]
# If we not compute an initialization A for D, we then simply multiply A by W to obtain the
# proper initialization in the input space
import numpy as np
l = net.layers[layer_id]
NIT = len(list(bottom_data.values())[0])
# How many times do we need to over-sample to get a full basis (out of random projections)
OS = int(np.ceil( np.prod(l.blobs[0].data.shape[1:]) / l.blobs[0].data.shape[0] ))
if fast: OS = 1
# If we are over sampling we might run out of memory at some point, especially for filters higher up
# Do avoid any issues we never return more than max_data number of elements
subsample = None
# Note this could cause some memory issues in the FC layers
W, D = [], []
for i in range(OS):
d = l.blobs[0].data
d[...] = np.random.normal(0, 1, d.shape)
W.append(1*d)
# Collect the data and flatten out the convs
data = np.concatenate([i.swapaxes(0, 1).reshape((i.shape[1],-1)).T for i in forward(net, layer_id, NIT, bottom_data, [top_name])[top_name]], axis=0)
# Do we need to subsample the data to save memory?
if subsample is None and max_data is not None:
# Randomly select n data representative samples
N = int(max_data / (data.shape[1]*OS))
subsample = np.arange(data.shape[0])
if N < data.shape[0]:
np.random.shuffle(subsample)
subsample = subsample[:N]
if subsample is not None:
data = data[subsample]
D.append(data)
# In order to handle any sort of groups we want to have the samples packed in the following order:
# a1 a2 a3 a4 b1 b2 b3 b4 c1 ... (where the original data was a b c and OS=4)
W, D = np.concatenate([w[:,None] for w in W], axis=1), np.concatenate([d[:,:,None] for d in D], axis=2)
return W.reshape((-1,)+W.shape[2:]), D.reshape((D.shape[0], -1)+D.shape[3:])
def initializeWeight(D, type, N_OUT):
# Here we first whiten the data (PCA or ZCA) and then optionally run k-means
# on this whitened data.
import numpy as np
if D.shape[0] < N_OUT:
print( " Not enough data for '%s' estimation, using elwise"%type )
return np.random.normal(0, 1, (N_OUT,D.shape[1]))
D = D - np.mean(D, axis=0, keepdims=True)
# PCA, ZCA, K-Means
assert type in ['pca', 'zca', 'kmeans', 'rand'], "Unknown initialization type '%s'"%type
C = D.T.dot(D)
s, V = np.linalg.eigh(C)
# order the eigenvalues
ids = np.argsort(s)[-N_OUT:]
s = s[ids]
V = V[:,ids]
s[s<1e-6] = 0
s[s>=1e-6] = 1. / np.sqrt(s[s>=1e-6]+1e-3)
S = np.diag(s)
if type == 'pca':
return S.dot(V.T)
elif type == 'zca':
return V.dot(S.dot(V.T))
# Whiten the data
wD = D.dot(V.dot(S))
wD /= np.linalg.norm(wD, axis=1)[:,None]
if type == 'kmeans':
# Run k-means
from sklearn.cluster import MiniBatchKMeans
km = MiniBatchKMeans(n_clusters = wD.shape[1], batch_size=10*wD.shape[1]).fit(wD).cluster_centers_
elif type == 'rand':
km = wD[np.random.choice(wD.shape[0], wD.shape[1], False)]
C = km.dot(S.dot(V.T))
C /= np.std(D.dot(C.T), axis=0, keepdims=True).T
return C
def initializeLayer(net, layer_id, bottom_data, top_name, bias=0, type='elwise', max_data=None):
import numpy as np
l = net.layers[layer_id]
NIT = len(list(bottom_data.values())[0])
if type!='elwise' and l.type in ELWISE_LAYERS:
print( "Only 'elwise' supported for layer '%s'. Falling back."%net._layer_names[layer_id] )
type = 'elwise'
for p in l.blobs: p.data[...] = 0
fast = 'fast_' in type
if fast:
type = type.replace('fast_', '')
# Initialize the weights [k-means, ...]
if type == 'elwise':
d = l.blobs[0].data
d[...] = np.random.normal(0, 1, d.shape)
else: # Use the input data
# Are there any groups?
G = 1
bottom_names = net.bottom_names[net._layer_names[layer_id]]
if len(bottom_names) == 1:
N1 = net.blobs[bottom_names[0]].shape[1]
N2 = l.blobs[0].shape[1]
G = N1 // N2
# Gather the input data
print( " Gathering input data")
T, D = gatherInputData(net, layer_id, bottom_data, top_name, fast, max_data=max_data)
# Figure out the output dimensionality of d
d = l.blobs[0].data
print( " Initializing weights" )
# Loop over groups
for g in range(G):
dg, Dg = d[g*(d.shape[0]//G):(g+1)*(d.shape[0]//G)], D[:,g*(D.shape[1]//G):(g+1)*(D.shape[1]//G):]
Tg = T[g*(T.shape[0]//G):(g+1)*(T.shape[0]//G)]
# Compute the weights
W = initializeWeight(Dg, type, N_OUT=dg.shape[0])
# Multiply the weights by the random basis
# NOTE: This matrix multiplication is a bit large, if it's too slow,
# reduce the oversampling in gatherInputData
dg[...] = np.dot(W, Tg.reshape((Tg.shape[0],-1))).reshape(dg.shape)
# Scale the mean and initialize the bias
print( " Scale the mean and initialize the bias" )
top_data = forward(net, layer_id, NIT, bottom_data, [top_name])[top_name]
flat_data = flattenData(top_data)
mu = flat_data.mean(axis=0)
std = flat_data.std(axis=0)
for ii in range(np.minimum(mu.size,5)):
print(" mu+/-std: (%.3f,%.3f)"%(mu[ii],std[ii]))
if l.type == 'Deconvolution':
l.blobs[0].data[...] /= std.reshape((1,-1,)+(1,)*(len(l.blobs[0].data.shape)-2))
else:
l.blobs[0].data[...] /= std.reshape((-1,)+(1,)*(len(l.blobs[0].data.shape)-1))
for b in l.blobs[1:]:
b.data[...] = -mu / std + bias
def magicInitialize(net, bias=0, NIT=10, type='elwise', max_data=None):
import numpy as np
# When was a blob last used
last_used = {}
# Make sure all layers are supported, and compute the last time each blob is used
for i, (n, l) in enumerate(zip(net._layer_names, net.layers)):
if l.type in UNSUPPORTED_LAYERS:
print( "WARNING: Layer type '%s' not supported! Things might go very wrong..."%l.type )
elif l.type not in SUPPORTED_LAYERS+STRIP_LAYER:
print( "Unknown layer type '%s'. double check if it is supported"%l.type )
for b in net.bottom_names[n]:
last_used[b] = i
active_data = {}
# Read all the input data
for i, (n, l) in enumerate(zip(net._layer_names, net.layers)):
# Initialize the layer
if (len(l.blobs) > 0) and (l.type not in UNSUPPORTED_LAYERS):
# if len(l.blobs) > 0:
if np.sum(np.abs(l.blobs[0].data)) <= 1e-10:
print( "Initializing layer '%s'"%n )
assert l.type in PARAMETER_LAYERS, "Unsupported parameter layer"
assert len(net.top_names[n]) == 1, "Exactly one output supported"
# Fill the parameters
initializeLayer(net, i, {b: active_data[b] for b in net.bottom_names[n]}, net.top_names[n][0], bias, type, max_data=max_data)
else:
print( "Skipping layer '%s'"%n )
else:
print( "Skipping layer '%s'"%n )
# TODO: Estimate and rescale the values [TODO: Record and undo this scaling above]
# Run the network forward
new_data = forward(net, i, NIT, {b: active_data[b] for b in net.bottom_names[n]}, net.top_names[n])
active_data.update(new_data)
# Delete all unused data
for k in list(active_data):
if k not in last_used or last_used[k] == i:
del active_data[k]
def load(net, blobs):
for l,n in zip(net.layers, net._layer_names):
if n in blobs:
for b, sb in zip(l.blobs, blobs[n]):
b.data[...] = sb
def save(net):
import numpy as np
r = {}
for l,n in zip(net.layers, net._layer_names):
if len(l.blobs) > 0:
r[n] = [np.copy(b.data) for b in l.blobs]
return r
def estimateHomogenety(net):
# Estimate if a certain layer is homogeneous and if yes return the degree k
# by which the output is scaled (if input is scaled by alpha then the output
# is scaled by alpha^k). Return None if the layer is not homogeneous.
import numpy as np
print("Estimating homogenety")
# When was a blob last used
last_used = {}
# Make sure all layers are supported, and compute the range each blob is used in
for i, (n, l) in enumerate(zip(net._layer_names, net.layers)):
for b in net.bottom_names[n]:
last_used[b] = i
active_data = {}
homogenety = {}
# Read all the input data
for i, (n, l) in enumerate(zip(net._layer_names, net.layers)):
# Run the network forward
new_data1 = forward(net, i, 1, {b: [1*d for d in active_data[b]] for b in net.bottom_names[n]}, net.top_names[n])
new_data2 = forward(net, i, 1, {b: [2*d for d in active_data[b]] for b in net.bottom_names[n]}, net.top_names[n])
active_data.update(new_data1)
if len(new_data1) == 1:
m = list(new_data1.keys())[0]
d1, d2 = flattenData(new_data1[m]), flattenData(new_data2[m])
f = np.mean(np.abs(d1), axis=0) / np.mean(np.abs(d2), axis=0)
if 1e-3*np.mean(f) < np.std(f):
# Not homogeneous
homogenety[n] = None
else:
# Compute the degree of the homogeneous transformation
homogenety[n] = (np.log(np.mean(np.abs(d2))) - np.log(np.mean(np.abs(d1)))) / np.log(2)
else:
homogenety[n] = None
# Delete all unused data
for k in list(active_data):
if k not in last_used or last_used[k] == i:
del active_data[k]
return homogenety
def calibrateGradientRatio(net, NIT=1):
print('Calibrate gradient ratio')
import numpy as np
# When was a blob last used
last_used = {}
# Find the last layer to use
last_layer = 0
for i, (n, l) in enumerate(zip(net._layer_names, net.layers)):
print('\tLayer %s'%n)
if l.type not in STRIP_LAYER:
last_layer = i
for b in net.bottom_names[n]:
last_used[b] = i
# Figure out which tops are involved
last_tops = net.top_names[net._layer_names[last_layer]]
for t in last_tops:
last_used[t] = len(net.layers)
# Call forward and store the data of all data layers
print('Call forward and store the data of all data layers')
active_data, input_data, bottom_scale = {}, {}, {}
# Read all the input data
for i, (n, l) in enumerate(zip(net._layer_names, net.layers)):
print('\tLayer %s'%n)
if i > last_layer: break
# Compute the input scale for parameter layers
if len(l.blobs) > 0:
bottom_scale[n] = np.mean([np.mean(np.abs(active_data[b])) for b in net.bottom_names[n]])
# Run the network forward
new_data = forward(net, i, NIT, {b: active_data[b] for b in net.bottom_names[n]}, net.top_names[n])
if l.type in INPUT_LAYERS:
input_data.update(new_data)
active_data.update(new_data)
# Delete all unused data
for k in list(active_data):
if k not in last_used or last_used[k] == i:
del active_data[k]
output_std = np.mean(np.std(flattenData(active_data[last_tops[0]]), axis=0))
for it in range(10):
# for it in range(1):
print('Iteration %i'%it)
# Reset the diffs
for l in net.layers:
for b in l.blobs:
b.diff[...] = 0
# Set the top diffs
print('Last layer')
print(last_tops)
print(last_layer)
for t in last_tops:
print(t)
net.blobs[t].diff[...] = np.random.normal(0, 1, net.blobs[t].shape)
# Compute all gradients
# print(np.mean(net.blobs[t].diff[...]**2))
# print(np.mean(net.blobs[t].data[...]**2))
net._backward(last_layer, 0)
# # net.backward()
# print(np.mean(net.blobs[t].diff[...]**2))
# print(np.mean(net.blobs[t].data[...]**2))
# print(np.mean(net.blobs['da_conv1'].data[...]**2))
# Compute the gradient ratio
ratio={}
for i, (n, l) in enumerate(zip(net._layer_names, net.layers)):
print('layer index %i, layer name %s'%(i,n))
if len(l.blobs) > 0:
# if (len(l.blobs) > 0) and (l.type in PARAMETER_LAYERS):
assert l.type in PARAMETER_LAYERS, "Parameter layer '%s' currently not supported"%l.type
b = l.blobs[0]
ratio[n] = np.sqrt(np.mean(b.diff**2) / np.mean(b.data**2))
print('Ratio = sqrt(diff/data), %.0f=sqrt(%.3e/%3e)'%(ratio[n],np.mean(b.diff**2),np.mean(b.data**2)))
# print(ratio)
# If all layers are homogeneous, then the target ratio is the geometric mean of all ratios
# (assuming we want the same output)
# To deal with non-homogeneous layers we scale by output_std in the hope to undo correct the
# estimation over time.
# NOTE: for non feed-forward networks the geometric mean might not be the right scaling factor
target_ratio = np.exp(np.mean(np.log(np.array(list(ratio.values()))))) * (output_std)**(1. / len(ratio))
for val in np.array(list(ratio.values())):
print(val)
# np.exp(np.mean(np.log(np.array(list(ratio.values())))))
# (output_std)**(1. / len(ratio))
# print(len(ratio))
print('Num ratios: %i'%len(ratio))
print('Target ratio: %.0f'%target_ratio)
print('Current ratios (mean/std): %.0f+/-%.0f'%(np.mean(np.array(list(ratio.values()))),np.std(np.array(list(ratio.values())))))
# Terminate if the relative change is less than 1% for all values
log_ratio = np.log( np.array(list(ratio.values())) )
print('Max relative change: %.3f'%np.max(np.abs(log_ratio/np.log(target_ratio)-1)))
if np.all( np.abs(log_ratio/np.log(target_ratio) - 1) < 0.01 ):
break
# Update all the weights and biases
active_data = {}
# Read all the input data
for i, (n, l) in enumerate(zip(net._layer_names, net.layers)):
if i > last_layer: break
# Use the stored input
if l.type in INPUT_LAYERS:
active_data.update({b: input_data[b] for b in net.top_names[n]})
else:
if len(l.blobs) > 0:
# if (len(l.blobs) > 0) and (l.type in PARAMETER_LAYERS):
# Add the scaling from the bottom to the biases
current_scale = np.mean([np.mean(np.abs(active_data[b])) for b in net.bottom_names[n]])
adj = current_scale / bottom_scale[n]
for b in list(l.blobs)[1:]:
b.data[...] *= adj
bottom_scale[n] = current_scale
# Scale to obtain the target ratio
scale = np.sqrt(ratio[n] / target_ratio)
for b in l.blobs:
b.data[...] *= scale
active_data.update(forward(net, i, NIT, {b: active_data[b] for b in net.bottom_names[n]}, net.top_names[n]))
# Delete all unused data
for k in list(active_data):
if k not in last_used or last_used[k] == i:
del active_data[k]
new_output_std = np.mean(np.std(flattenData(active_data[last_tops[0]]), axis=0))
print(np.linalg.norm(active_data[last_tops[0]]))
print(last_tops[0])
print(new_output_std)
if np.abs(np.log(output_std) - np.log(new_output_std)) > 0.25:
# If we diverge by a factor of exp(0.25) = ~1.3, then we should check if the network is really
# homogeneous
print( "WARNING: It looks like one or more layers are not homogeneous! Trying to correct for this..." )
print( " Output std = %f" % new_output_std )
output_std = new_output_std
print('')
def netFromString(s, t=None):
import caffe
from tempfile import NamedTemporaryFile
if t is None: t = caffe.TEST
f = NamedTemporaryFile('w')
f.write(s)
f.flush()
r = caffe.Net(f.name, t)
f.close()
return r
def getFileList(f):
from glob import glob
from os import path
return [f for f in glob(f) if path.isfile(f)]
def main():
from argparse import ArgumentParser
from os import path
import numpy as np
parser = ArgumentParser()
parser.add_argument('prototxt')
parser.add_argument('output_caffemodel')
parser.add_argument('-l', '--load', help='Load a pretrained model and rescale it [bias and type are not supported]')
parser.add_argument('-d', '--data', default=None, help='Image list to use [default prototxt data]')
parser.add_argument('-b', '--bias', type=float, default=0.1, help='Bias')
parser.add_argument('-t', '--type', default='elwise', help='Type: elwise, pca, zca, kmeans, rand (random input patches). Add fast_ to speed up the initialization, but you might lose in precision.')
parser.add_argument('-z', action='store_true', help='Zero all weights and reinitialize')
parser.add_argument('-cs', action='store_true', help='Correct for scaling')
parser.add_argument('-q', action='store_true', help='Quiet execution')
parser.add_argument('-s', type=float, default=1.0, help='Scale the input [only custom data "-d"]')
parser.add_argument('-bs', type=int, default=16, help='Batch size [only custom data "-d"]')
parser.add_argument('-nit', type=int, default=10, help='Number of iterations')
parser.add_argument('--mem-limit', type=int, default=500, help='How much memory should we use for the data buffer (MB)?')
parser.add_argument('--gpu', type=int, default=0, help='What gpu to run it on?')
args = parser.parse_args()
if args.q:
from os import environ
environ['GLOG_minloglevel'] = '2'
import caffe, load
from caffe import NetSpec, layers as L
caffe.set_mode_gpu()
if args.gpu is not None:
caffe.set_device(args.gpu)
if args.data is not None:
model = load.ProtoDesc(args.prototxt)
net = NetSpec()
fl = getFileList(args.data)
if len(fl) == 0:
print("Unknown data type for '%s'"%args.data)
exit(1)
from tempfile import NamedTemporaryFile
f = NamedTemporaryFile('w')
f.write('\n'.join([path.abspath(i)+' 0' for i in fl]))
f.flush()
net.data, net.label = L.ImageData(source=f.name, batch_size=args.bs, new_width=model.input_dim[-1], new_height=model.input_dim[-1], transform_param=dict(mean_value=[104,117,123], scale=args.s),ntop=2)
net.out = model(data=net.data, label=net.label)
n = netFromString('force_backward:true\n'+str(net.to_proto()), caffe.TRAIN )
else:
n = caffe.Net(args.prototxt, caffe.TRAIN)
# forward call on network
n.forward()
if args.load is not None:
n.copy_from(args.load)
# Rescale existing layers?
#if args.fix:
#magicFix(n, args.nit)
if args.z:
# Zero out all layers
for l in n.layers:
for b in l.blobs:
b.data[...] = 0
if any([np.abs(l.blobs[0].data).sum() < 1e-10 for l in n.layers if len(l.blobs) > 0]):
print( [m for l,m in zip(n.layers, n._layer_names) if len(l.blobs) > 0 and np.abs(l.blobs[0].data).sum() < 1e-10] )
magicInitialize(n, args.bias, NIT=args.nit, type=args.type, max_data=args.mem_limit*1024*1024/4)
else:
print( "Network already initialized, skipping magic init" )
if args.cs:
# A simply helper function that lets you figure out which layers are not
# homogeneous
# print( estimateHomogenety(n) )
calibrateGradientRatio(n)
n.save(args.output_caffemodel)
if __name__ == "__main__":
main()
|
colorization-master
|
colorization/resources/magic_init/magic_init_mod.py
|
from __future__ import print_function
from magic_init import *
class BCOLORS:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class NOCOLORS:
HEADER = ''
OKBLUE = ''
OKGREEN = ''
WARNING = ''
FAIL = ''
ENDC = ''
BOLD = ''
UNDERLINE = ''
def coloredNumbers(v, color=None, fmt='%6.2f', max_display=300, bcolors=BCOLORS):
import numpy as np
# Display a numpy array and highlight the min and max values [required a nice linux
# terminal supporting colors]
r = ""
mn, mx = np.min(v), np.max(v)
for k,i in enumerate(v):
if len(v) > max_display and k > max_display/2 and k < len(v) - max_display/2:
if r[-1] != '.':
r += '...'
continue
if i <= mn + 1e-3:
r += bcolors.BOLD+bcolors.FAIL
elif i + 1e-3 >= mx:
r += bcolors.BOLD+bcolors.FAIL
elif color is not None:
r += color
r += (fmt+' ')%i
r += bcolors.ENDC
r += bcolors.ENDC
return r
def computeGradientRatio(net, NIT=1):
import numpy as np
last_layer = 0
for i, (n, l) in enumerate(zip(net._layer_names, net.layers)):
if l.type not in STRIP_LAYER:
last_layer = i
last_tops = net.top_names[net._layer_names[last_layer]]
var = {}
for it in range(NIT):
net._forward(0, last_layer)
# Reset the diffs
for l in net.layers:
for b in l.blobs:
b.diff[...] = 0
# Set the top diffs
for t in last_tops:
net.blobs[t].diff[...] = np.random.normal(0, 1, net.blobs[t].shape)
net._backward(last_layer, 0)
for i, (n, l) in enumerate(zip(net._layer_names, net.layers)):
if len(l.blobs) > 0:
assert l.type in PARAMETER_LAYERS, "Parameter layer '%s' currently not supported"%l.type
b = l.blobs[0]
r = np.mean(b.diff.swapaxes(0,1).reshape((b.diff.shape[1],-1))**2, axis=1) / np.mean(b.data**2)
if n in var: var[n] += r / NIT
else: var[n] = r / NIT
std = {n: np.sqrt(var[n]) for n in var}
return {n: np.std(s) / np.mean(s) for n,s in std.items()}, {n: np.mean(s) for n,s in std.items()}
def printMeanStddev(net, NIT=10, show_all=False, show_color=True, quiet=False):
import numpy as np
bcolors = NOCOLORS
if show_color: bcolors = BCOLORS
layer_names = list(net._layer_names)
if not show_all:
layer_names = [n for n, l in zip(net._layer_names, net.layers) if len(l.blobs)>0]
if 'data' in net._layer_names:
layer_names.append('data')
# When was a blob last used
last_used = {}
# Make sure all layers are supported, and compute the range each blob is used in
for i, (n, l) in enumerate(zip(net._layer_names, net.layers)):
for b in net.bottom_names[n]:
last_used[b] = i
active_data, cvar = {}, {}
for i, (n, l) in enumerate(zip(net._layer_names, net.layers)):
# Run the network forward
new_data = forward(net, i, NIT, {b: active_data[b] for b in net.bottom_names[n]}, net.top_names[n])
active_data.update(new_data)
if len(net.top_names[n]) > 0 and n in layer_names:
m = net.top_names[n][0]
D = flattenData(new_data[m])
mean = np.mean(D, axis=0)
stddev = np.std(D, axis=0)
if not quiet:
print( bcolors.BOLD, ' '*5, n, ':', m, ' '*5, bcolors.ENDC )
print( 'mean ', coloredNumbers(mean, bcolors.OKGREEN, bcolors=bcolors) )
print( 'stddev', coloredNumbers(stddev, bcolors.OKBLUE, bcolors=bcolors) )
print( 'coef of variation ', bcolors.OKGREEN, stddev.std() / stddev.mean(), bcolors.ENDC )
print()
cvar[n] = stddev.std() / stddev.mean()
# Delete all unused data
for k in list(active_data):
if k not in last_used or last_used[k] == i:
del active_data[k]
return cvar
def main():
from argparse import ArgumentParser
from os import path
parser = ArgumentParser()
parser.add_argument('prototxt')
parser.add_argument('-l', '--load', help='Load a caffemodel')
parser.add_argument('-d', '--data', default=None, help='Image list to use [default prototxt data]')
#parser.add_argument('-q', action='store_true', help='Quiet execution')
parser.add_argument('-sm', action='store_true', help='Summary only')
parser.add_argument('-q', action='store_true', help='Quiet execution')
parser.add_argument('-a', '--all', action='store_true', help='Show the statistic for all layers')
parser.add_argument('-nc', action='store_true', help='Do not use color')
parser.add_argument('-s', type=float, default=1.0, help='Scale the input [only custom data "-d"]')
parser.add_argument('-bs', type=int, default=16, help='Batch size [only custom data "-d"]')
parser.add_argument('-nit', type=int, default=10, help='Number of iterations')
parser.add_argument('--gpu', type=int, default=0, help='What gpu to run it on?')
args = parser.parse_args()
if args.q:
from os import environ
environ['GLOG_minloglevel'] = '2'
import caffe, load
from caffe import NetSpec, layers as L
caffe.set_mode_gpu()
if args.gpu is not None:
caffe.set_device(args.gpu)
if args.data is not None:
model = load.ProtoDesc(args.prototxt)
net = NetSpec()
fl = getFileList(args.data)
if len(fl) == 0:
print("Unknown data type for '%s'"%args.data)
exit(1)
from tempfile import NamedTemporaryFile
f = NamedTemporaryFile('w')
f.write('\n'.join([path.abspath(i)+' 0' for i in fl]))
f.flush()
net.data, net.label = L.ImageData(source=f.name, batch_size=args.bs, new_width=model.input_dim[-1], new_height=model.input_dim[-1], transform_param=dict(mean_value=[104,117,123], scale=args.s),ntop=2)
net.out = model(data=net.data, label=net.label)
n = netFromString('force_backward:true\n'+str(net.to_proto()), caffe.TRAIN )
else:
n = caffe.Net(args.prototxt, caffe.TRAIN)
if args.load is not None:
n.copy_from(args.load)
cvar = printMeanStddev(n, NIT=args.nit, show_all=args.all, show_color=not args.nc, quiet=args.sm)
cv, gr = computeGradientRatio(n, NIT=args.nit)
print()
print(' Summary ')
print('-----------')
print()
print('layer name out cvar rate cvar rate mean')
for l in n._layer_names:
if l in cvar and l in cv and l in gr:
print('%-30s %10.2f %10.2f %10.2f'%(l, cvar[l], cv[l], gr[l]) )
if __name__ == "__main__":
main()
|
colorization-master
|
colorization/resources/magic_init/measure_stat.py
|
import caffe
def parseProtoString(s):
from google.protobuf import text_format
from caffe.proto import caffe_pb2 as pb
proto_net = pb.NetParameter()
text_format.Merge(s, proto_net)
return proto_net
def get_param(l, exclude=set(['top', 'bottom', 'name', 'type'])):
if not hasattr(l,'ListFields'):
if hasattr(l,'__delitem__'):
return list(l)
return l
r = dict()
for f, v in l.ListFields():
if f.name not in exclude:
r[f.name] = get_param(v, [])
return r
class ProtoDesc:
def __init__(self, prototxt):
from os import path
self.prototxt = prototxt
self.parsed_proto = parseProtoString(open(self.prototxt, 'r').read())
# Guess the input dimension
self.input_dim = (3, 227, 227)
net = self.parsed_proto
if len(net.input_dim) > 0:
self.input_dim = net.input_dim[1:]
else:
lrs = net.layer
cs = [l.transform_param.crop_size for l in lrs
if l.HasField('transform_param')]
if len(cs):
self.input_dim = (3, cs[0], cs[0])
def __call__(self, clip=None, **inputs):
from caffe import layers as L
from collections import OrderedDict
net = self.parsed_proto
blobs = OrderedDict(inputs)
for l in net.layer:
if l.name not in inputs:
in_place = l.top == l.bottom
param = get_param(l)
assert all([b in blobs for b in l.bottom]), "Some bottoms not founds: " + ', '.join([b for b in l.bottom if not b in blobs])
tops = getattr(L, l.type)(*[blobs[b] for b in l.bottom],
ntop=len(l.top), in_place=in_place,
name=l.name,
**param)
if len(l.top) <= 1:
tops = [tops]
for i, t in enumerate(l.top):
blobs[t] = tops[i]
if l.name == clip:
break
return list(blobs.values())[-1]
|
colorization-master
|
colorization/resources/magic_init/load.py
|
from __future__ import print_function
import platform
import sys
import argparse
import qdarkstyle
from PyQt4.QtGui import QApplication, QIcon
from PyQt4.QtCore import Qt
from ui import gui_design
from data import colorize_image as CI
sys.path.append('./caffe_files')
def parse_args():
parser = argparse.ArgumentParser(description='iDeepColor: deep interactive colorization')
# basic parameters
parser.add_argument('--win_size', dest='win_size', help='the size of the main window', type=int, default=512)
parser.add_argument('--image_file', dest='image_file', help='input image', type=str, default='test_imgs/mortar_pestle.jpg')
parser.add_argument('--gpu', dest='gpu', help='gpu id', type=int, default=0)
parser.add_argument('--cpu_mode', dest='cpu_mode', help='do not use gpu', action='store_true')
# Main colorization model
parser.add_argument('--color_prototxt', dest='color_prototxt', help='colorization caffe prototxt', type=str,
default='./models/reference_model/deploy_nodist.prototxt')
parser.add_argument('--color_caffemodel', dest='color_caffemodel', help='colorization caffe prototxt', type=str,
default='./models/reference_model/model.caffemodel')
# Distribution prediction model
parser.add_argument('--dist_prototxt', dest='dist_prototxt', type=str, help='distribution net prototxt',
default='./models/reference_model/deploy_nopred.prototxt')
parser.add_argument('--dist_caffemodel', dest='dist_caffemodel', type=str, help='distribution net caffemodel',
default='./models/reference_model/model.caffemodel')
# ***** DEPRECATED *****
parser.add_argument('--load_size', dest='load_size', help='image size', type=int, default=256)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
for arg in vars(args):
print('[%s] =' % arg, getattr(args, arg))
if(args.cpu_mode):
args.gpu = -1
args.win_size = int(args.win_size / 4.0) * 4 # make sure the width of the image can be divided by 4
# initialize the colorization model
colorModel = CI.ColorizeImageCaffe(Xd=args.load_size)
colorModel.prep_net(args.gpu, args.color_prototxt, args.color_caffemodel)
distModel = CI.ColorizeImageCaffeDist(Xd=args.load_size)
distModel.prep_net(args.gpu, args.dist_prototxt, args.dist_caffemodel)
# initialize application
app = QApplication(sys.argv)
window = gui_design.GUIDesign(color_model=colorModel, dist_model=distModel,
img_file=args.image_file, load_size=args.load_size, win_size=args.win_size)
app.setStyleSheet(qdarkstyle.load_stylesheet(pyside=False)) # comment this if you do not like dark stylesheet
app.setWindowIcon(QIcon('imgs/logo.png')) # load logo
window.setWindowTitle('iColor')
window.setWindowFlags(window.windowFlags() & ~Qt.WindowMaximizeButtonHint) # fix window siz
window.show()
app.exec_()
|
colorization-master
|
interactive-deep-colorization/ideepcolor.py
|
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import cv2
import numpy as np
class GUI_VIS(QWidget):
def __init__(self, win_size=256, scale=2.0):
QWidget.__init__(self)
self.result = None
self.win_width = win_size
self.win_height = win_size
self.scale = scale
self.setFixedSize(self.win_width, self.win_height)
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
painter.setRenderHint(QPainter.Antialiasing)
painter.fillRect(event.rect(), QColor(49, 54, 49))
if self.result is not None:
h, w, c = self.result.shape
qImg = QImage(self.result.tostring(), w, h, QImage.Format_RGB888)
dw = int((self.win_width - w) / 2)
dh = int((self.win_height - h) / 2)
painter.drawImage(dw, dh, qImg)
painter.end()
def update_result(self, result):
self.result = result
self.update()
def sizeHint(self):
return QSize(self.win_width, self.win_height)
def reset(self):
self.update()
self.result = None
def is_valid_point(self, pos):
if pos is None:
return False
else:
x = pos.x()
y = pos.y()
return x >= 0 and y >= 0 and x < self.win_width and y < self.win_height
def scale_point(self, pnt):
x = int(pnt.x() / self.scale)
y = int(pnt.y() / self.scale)
return x, y
def mousePressEvent(self, event):
pos = event.pos()
x, y = self.scale_point(pos)
if event.button() == Qt.LeftButton and self.is_valid_point(pos): # click the point
if self.result is not None:
color = self.result[y, x, :] #
print('color', color)
def mouseMoveEvent(self, event):
pass
def mouseReleaseEvent(self, event):
pass
|
colorization-master
|
interactive-deep-colorization/ui/gui_vis.py
|
colorization-master
|
interactive-deep-colorization/ui/__init__.py
|
|
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import numpy as np
class GUIPalette(QWidget):
def __init__(self, grid_sz=(6, 3)):
QWidget.__init__(self)
self.color_width = 25
self.border = 6
self.win_width = grid_sz[0] * self.color_width + (grid_sz[0]+1) * self.border
self.win_height = grid_sz[1] * self.color_width + (grid_sz[1]+1) * self.border
self.setFixedSize(self.win_width, self.win_height)
self.num_colors = grid_sz[0] * grid_sz[1]
self.grid_sz = grid_sz
self.colors = None
self.color_id = -1
self.reset()
def set_colors(self, colors):
if colors is not None:
self.colors = (colors[:min(colors.shape[0], self.num_colors), :] * 255).astype(np.uint8)
self.color_id = -1
self.update()
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
painter.setRenderHint(QPainter.Antialiasing)
painter.fillRect(event.rect(), Qt.white)
if self.colors is not None:
for n, c in enumerate(self.colors):
ca = QColor(c[0], c[1], c[2], 255)
painter.setPen(QPen(Qt.black, 1))
painter.setBrush(ca)
grid_x = n % self.grid_sz[0]
grid_y = (n - grid_x) / self.grid_sz[0]
x = grid_x * (self.color_width + self.border) + self.border
y = grid_y * (self.color_width + self.border) + self.border
if n == self.color_id:
painter.drawEllipse(x, y, self.color_width, self.color_width)
else:
painter.drawRoundedRect(x, y, self.color_width, self.color_width, 2, 2)
painter.end()
def sizeHint(self):
return QSize(self.win_width, self.win_height)
def reset(self):
self.colors = None
self.mouseClicked = False
self.color_id = -1
self.update()
def selected_color(self, pos):
width = self.color_width + self.border
dx = pos.x() % width
dy = pos.y() % width
if dx >= self.border and dy >= self.border:
x_id = (pos.x() - dx) / width
y_id = (pos.y() - dy) / width
color_id = x_id + y_id * self.grid_sz[0]
return color_id
else:
return -1
def update_ui(self, color_id):
self.color_id = color_id
self.update()
if color_id >= 0:
color = self.colors[color_id]
self.emit(SIGNAL('update_color'), color)
self.update()
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton: # click the point
color_id = self.selected_color(event.pos())
self.update_ui(color_id)
self.mouseClicked = True
def mouseMoveEvent(self, event):
if self.mouseClicked:
color_id = self.selected_color(event.pos())
self.update_ui(color_id)
def mouseReleaseEvent(self, event):
self.mouseClicked = False
|
colorization-master
|
interactive-deep-colorization/ui/gui_palette.py
|
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from . import gui_draw
from . import gui_vis
from . import gui_gamut
from . import gui_palette
import time
class GUIDesign(QWidget):
def __init__(self, color_model, dist_model=None, img_file=None, load_size=256,
win_size=256, save_all=True):
# draw the layout
QWidget.__init__(self)
# main layout
mainLayout = QHBoxLayout()
self.setLayout(mainLayout)
# gamut layout
self.gamutWidget = gui_gamut.GUIGamut(gamut_size=160)
gamutLayout = self.AddWidget(self.gamutWidget, 'ab Color Gamut')
colorLayout = QVBoxLayout()
colorLayout.addLayout(gamutLayout)
mainLayout.addLayout(colorLayout)
# palette
self.customPalette = gui_palette.GUIPalette(grid_sz=(10, 1))
self.usedPalette = gui_palette.GUIPalette(grid_sz=(10, 1))
cpLayout = self.AddWidget(self.customPalette, 'Suggested colors')
colorLayout.addLayout(cpLayout)
upLayout = self.AddWidget(self.usedPalette, 'Recently used colors')
colorLayout.addLayout(upLayout)
self.colorPush = QPushButton() # to visualize the selected color
self.colorPush.setFixedWidth(self.customPalette.width())
self.colorPush.setFixedHeight(25)
self.colorPush.setStyleSheet("background-color: grey")
colorPushLayout = self.AddWidget(self.colorPush, 'Color')
colorLayout.addLayout(colorPushLayout)
colorLayout.setAlignment(Qt.AlignTop)
# drawPad layout
drawPadLayout = QVBoxLayout()
mainLayout.addLayout(drawPadLayout)
self.drawWidget = gui_draw.GUIDraw(color_model, dist_model, load_size=load_size, win_size=win_size)
drawPadLayout = self.AddWidget(self.drawWidget, 'Drawing Pad')
mainLayout.addLayout(drawPadLayout)
drawPadMenu = QHBoxLayout()
self.bGray = QCheckBox("&Gray")
self.bGray.setToolTip('show gray-scale image')
self.bLoad = QPushButton('&Load')
self.bLoad.setToolTip('load an input image')
self.bSave = QPushButton("&Save")
self.bSave.setToolTip('Save the current result.')
drawPadMenu.addWidget(self.bGray)
drawPadMenu.addWidget(self.bLoad)
drawPadMenu.addWidget(self.bSave)
drawPadLayout.addLayout(drawPadMenu)
self.visWidget = gui_vis.GUI_VIS(win_size=win_size, scale=win_size/load_size)
visWidgetLayout = self.AddWidget(self.visWidget, 'Result')
mainLayout.addLayout(visWidgetLayout)
self.bRestart = QPushButton("&Restart")
self.bRestart.setToolTip('Restart the system')
self.bQuit = QPushButton("&Quit")
self.bQuit.setToolTip('Quit the system.')
visWidgetMenu = QHBoxLayout()
visWidgetMenu.addWidget(self.bRestart)
visWidgetMenu.addWidget(self.bQuit)
visWidgetLayout.addLayout(visWidgetMenu)
self.drawWidget.update()
self.visWidget.update()
self.colorPush.clicked.connect(self.drawWidget.change_color)
# color indicator
self.connect(self.drawWidget, SIGNAL('update_color'), self.colorPush.setStyleSheet)
# update result
self.connect(self.drawWidget, SIGNAL('update_result'), self.visWidget.update_result)
self.connect(self.visWidget, SIGNAL('update_color'), self.gamutWidget.set_ab)
self.connect(self.visWidget, SIGNAL('update_color'), self.drawWidget.set_color)
# update gamut
self.connect(self.drawWidget, SIGNAL('update_gamut'), self.gamutWidget.set_gamut)
self.connect(self.drawWidget, SIGNAL('update_ab'), self.gamutWidget.set_ab)
self.connect(self.gamutWidget, SIGNAL('update_color'), self.drawWidget.set_color)
# connect palette
self.connect(self.drawWidget, SIGNAL('suggest_colors'), self.customPalette.set_colors)
# self.connect(self.drawWidget, SIGNAL('change_color_id'), self.customPalette.update_color_id)
self.connect(self.customPalette, SIGNAL('update_color'), self.drawWidget.set_color)
self.connect(self.customPalette, SIGNAL('update_color'), self.gamutWidget.set_ab)
self.connect(self.drawWidget, SIGNAL('used_colors'), self.usedPalette.set_colors)
self.connect(self.usedPalette, SIGNAL('update_color'), self.drawWidget.set_color)
self.connect(self.usedPalette, SIGNAL('update_color'), self.gamutWidget.set_ab)
# menu events
self.bGray.setChecked(True)
self.bRestart.clicked.connect(self.reset)
self.bQuit.clicked.connect(self.quit)
self.bGray.toggled.connect(self.enable_gray)
self.bSave.clicked.connect(self.save)
self.bLoad.clicked.connect(self.load)
self.start_t = time.time()
if img_file is not None:
self.drawWidget.init_result(img_file)
def AddWidget(self, widget, title):
widgetLayout = QVBoxLayout()
widgetBox = QGroupBox()
widgetBox.setTitle(title)
vbox_t = QVBoxLayout()
vbox_t.addWidget(widget)
widgetBox.setLayout(vbox_t)
widgetLayout.addWidget(widgetBox)
return widgetLayout
def nextImage(self):
self.drawWidget.nextImage()
def reset(self):
# self.start_t = time.time()
print('============================reset all=========================================')
self.visWidget.reset()
self.gamutWidget.reset()
self.customPalette.reset()
self.usedPalette.reset()
self.drawWidget.reset()
self.update()
self.colorPush.setStyleSheet("background-color: grey")
def enable_gray(self):
self.drawWidget.enable_gray()
def quit(self):
print('time spent = %3.3f' % (time.time() - self.start_t))
self.close()
def save(self):
print('time spent = %3.3f' % (time.time()-self.start_t))
self.drawWidget.save_result()
def load(self):
self.drawWidget.load_image()
def change_color(self):
print('change color')
self.drawWidget.change_color(use_suggest=True)
def keyPressEvent(self, event):
if event.key() == Qt.Key_R:
self.reset()
if event.key() == Qt.Key_Q:
self.save()
self.quit()
if event.key() == Qt.Key_S:
self.save()
if event.key() == Qt.Key_G:
self.bGray.toggle()
if event.key() == Qt.Key_L:
self.load()
|
colorization-master
|
interactive-deep-colorization/ui/gui_design.py
|
import numpy as np
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import cv2
class UserEdit(object):
def __init__(self, mode, win_size, load_size, img_size):
self.mode = mode
self.win_size = win_size
self.img_size = img_size
self.load_size = load_size
print('image_size', self.img_size)
max_width = np.max(self.img_size)
self.scale = float(max_width) / self.load_size
self.dw = int((self.win_size - img_size[0]) / 2)
self.dh = int((self.win_size - img_size[1]) / 2)
self.img_w = img_size[0]
self.img_h = img_size[1]
self.ui_count = 0
print(self)
def scale_point(self, in_x, in_y, w):
x = int((in_x - self.dw) / float(self.img_w) * self.load_size) + w
y = int((in_y - self.dh) / float(self.img_h) * self.load_size) + w
return x, y
def __str__(self):
return "add (%s) with win_size %3.3f, load_size %3.3f" % (self.mode, self.win_size, self.load_size)
class PointEdit(UserEdit):
def __init__(self, win_size, load_size, img_size):
UserEdit.__init__(self, 'point', win_size, load_size, img_size)
def add(self, pnt, color, userColor, width, ui_count):
self.pnt = pnt
self.color = color
self.userColor = userColor
self.width = width
self.ui_count = ui_count
def select_old(self, pnt, ui_count):
self.pnt = pnt
self.ui_count = ui_count
return self.userColor, self.width
def update_color(self, color, userColor):
self.color = color
self.userColor = userColor
def updateInput(self, im, mask, vis_im):
w = int(self.width / self.scale)
pnt = self.pnt
x1, y1 = self.scale_point(pnt.x(), pnt.y(), -w)
tl = (x1, y1)
x2, y2 = self.scale_point(pnt.x(), pnt.y(), w)
br = (x2, y2)
c = (self.color.red(), self.color.green(), self.color.blue())
uc = (self.userColor.red(), self.userColor.green(), self.userColor.blue())
cv2.rectangle(mask, tl, br, 255, -1)
cv2.rectangle(im, tl, br, c, -1)
cv2.rectangle(vis_im, tl, br, uc, -1)
def is_same(self, pnt):
dx = abs(self.pnt.x() - pnt.x())
dy = abs(self.pnt.y() - pnt.y())
return dx <= self.width+1 and dy <= self.width+1
def update_painter(self, painter):
w = max(3, self.width)
c = self.color
r = c.red()
g = c.green()
b = c.blue()
ca = QColor(c.red(), c.green(), c.blue(), 255)
d_to_black = r * r + g * g + b * b
d_to_white = (255-r) * (255-r) + (255-g) * (255-g) + (255-r) * (255-r)
if d_to_black > d_to_white:
painter.setPen(QPen(Qt.black, 1))
else:
painter.setPen(QPen(Qt.white, 1))
painter.setBrush(ca)
painter.drawRoundedRect(self.pnt.x()-w, self.pnt.y()-w, 1+2*w, 1+2*w, 2, 2)
class UIControl:
def __init__(self, win_size=256, load_size=512):
self.win_size = win_size
self.load_size = load_size
self.reset()
self.userEdit = None
self.userEdits = []
self.ui_count = 0
def setImageSize(self, img_size):
self.img_size = img_size
def addStroke(self, prevPnt, nextPnt, color, userColor, width):
pass
def erasePoint(self, pnt):
isErase = False
for id, ue in enumerate(self.userEdits):
if ue.is_same(pnt):
self.userEdits.remove(ue)
print('remove user edit %d\n' % id)
isErase = True
break
return isErase
def addPoint(self, pnt, color, userColor, width):
self.ui_count += 1
print('process add Point')
self.userEdit = None
isNew = True
for id, ue in enumerate(self.userEdits):
if ue.is_same(pnt):
self.userEdit = ue
isNew = False
print('select user edit %d\n' % id)
break
if self.userEdit is None:
self.userEdit = PointEdit(self.win_size, self.load_size, self.img_size)
self.userEdits.append(self.userEdit)
print('add user edit %d\n' % len(self.userEdits))
self.userEdit.add(pnt, color, userColor, width, self.ui_count)
return userColor, width, isNew
else:
userColor, width = self.userEdit.select_old(pnt, self.ui_count)
return userColor, width, isNew
def movePoint(self, pnt, color, userColor, width):
self.userEdit.add(pnt, color, userColor, width, self.ui_count)
def update_color(self, color, userColor):
self.userEdit.update_color(color, userColor)
def update_painter(self, painter):
for ue in self.userEdits:
if ue is not None:
ue.update_painter(painter)
def get_stroke_image(self, im):
return im
def used_colors(self): # get recently used colors
if len(self.userEdits) == 0:
return None
nEdits = len(self.userEdits)
ui_counts = np.zeros(nEdits)
ui_colors = np.zeros((nEdits, 3))
for n, ue in enumerate(self.userEdits):
ui_counts[n] = ue.ui_count
c = ue.userColor
ui_colors[n, :] = [c.red(), c.green(), c.blue()]
ui_counts = np.array(ui_counts)
ids = np.argsort(-ui_counts)
ui_colors = ui_colors[ids, :]
unique_colors = []
for ui_color in ui_colors:
is_exit = False
for u_color in unique_colors:
d = np.sum(np.abs(u_color - ui_color))
if d < 0.1:
is_exit = True
break
if not is_exit:
unique_colors.append(ui_color)
unique_colors = np.vstack(unique_colors)
return unique_colors / 255.0
def get_input(self):
h = self.load_size
w = self.load_size
im = np.zeros((h, w, 3), np.uint8)
mask = np.zeros((h, w, 1), np.uint8)
vis_im = np.zeros((h, w, 3), np.uint8)
for ue in self.userEdits:
ue.updateInput(im, mask, vis_im)
im_bgr = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
return im, mask
def reset(self):
self.userEdits = []
self.userEdit = None
self.ui_count = 0
|
colorization-master
|
interactive-deep-colorization/ui/ui_control.py
|
from __future__ import print_function
import inspect, re
import numpy as np
import cv2
import os
import collections
try:
import pickle as pickle
except ImportError:
import pickle
def debug_trace():
from PyQt4.QtCore import pyqtRemoveInputHook
from pdb import set_trace
pyqtRemoveInputHook()
set_trace()
def info(object, spacing=10, collapse=1):
"""Print methods and doc strings.
Takes module, class, list, dictionary, or string."""
methodList = [e for e in dir(object) if isinstance(getattr(object, e), collections.Callable)]
processFunc = collapse and (lambda s: " ".join(s.split())) or (lambda s: s)
print( "\n".join(["%s %s" %
(method.ljust(spacing),
processFunc(str(getattr(object, method).__doc__)))
for method in methodList]) )
def PickleLoad(file_name):
try:
with open(file_name, 'rb') as f:
data= pickle.load(f)
except UnicodeDecodeError:
with open(file_name, 'rb') as f:
data= pickle.load(f, encoding='latin1')
return data
def PickleSave(file_name, data):
with open(file_name, "wb") as f:
pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
def varname(p):
for line in inspect.getframeinfo(inspect.currentframe().f_back)[3]:
m = re.search(r'\bvarname\s*\(\s*([A-Za-z_][A-Za-z0-9_]*)\s*\)', line)
if m:
return m.group(1)
def interp_z(z0, z1, ratio, interp='linear'):
if interp == 'linear':
z_t = (1 - ratio) * z0 + ratio * z1
if interp == 'slerp':
N = len(z0)
z_t = []
for i in range(N):
z0_i = z0[i]
z1_i = z1[i]
z0_n = z0_i / np.linalg.norm(z0_i)
z1_n = z1_i / np.linalg.norm(z1_i)
omega = np.arccos(np.dot(z0_n, z1_n))
sin_omega = np.sin(omega)
if sin_omega == 0:
z_i = interp_z(z0_i, z1_i, ratio, 'linear')
else:
z_i = np.sin((1 - ratio) * omega) / sin_omega * z0_i + np.sin(ratio * omega) / sin_omega * z1_i
z_t.append(z_i[np.newaxis,...])
z_t = np.concatenate(z_t, axis=0)
return z_t
def print_numpy(x, val=True, shp=False):
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def CVShow(im, im_name='', wait=1):
if len(im.shape) >= 3 and im.shape[2] == 3:
im_show = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
else:
im_show = im
cv2.imshow(im_name, im_show)
cv2.waitKey(wait)
return im_show
def average_image(imgs, weights):
im_weights = np.tile(weights[:,np.newaxis, np.newaxis, np.newaxis], (1, imgs.shape[1], imgs.shape[2], imgs.shape[3]))
imgs_f = imgs.astype(np.float32)
weights_norm = np.mean(im_weights)
average_f = np.mean(imgs_f * im_weights, axis=0) /weights_norm
average = average_f.astype(np.uint8)
return average
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def grid_vis(X, nh, nw): #[buggy]
if X.shape[0] == 1:
return X[0]
# nc = 3
if X.ndim == 3:
X = X[..., np.newaxis]
if X.shape[-1] == 1:
X = np.tile(X, [1,1,1,3])
h, w = X[0].shape[:2]
if X.dtype == np.uint8:
img = np.ones((h * nh, w * nw, 3), np.uint8) * 255
else:
img = np.ones((h * nh, w * nw, 3), X.dtype)
for n, x in enumerate(X):
j = n // nw
i = n % nw
img[j * h:j * h + h, i * w:i * w + w, :] = x
img = np.squeeze(img)
return img
|
colorization-master
|
interactive-deep-colorization/ui/utils.py
|
import numpy as np
import time
import cv2
from PyQt4.QtCore import *
from PyQt4.QtGui import *
try:
from PyQt4.QtCore import QString
except ImportError:
QString = str
from .ui_control import UIControl
from data import lab_gamut
from skimage import color
import os
import datetime
import glob
import sys
import matplotlib as mpl
import matplotlib.pyplot as plt
import skimage
mpl.use('Qt4Agg')
class GUIDraw(QWidget):
def __init__(self, model, dist_model=None, load_size=256, win_size=512):
QWidget.__init__(self)
self.model = None
self.image_file = None
self.pos = None
self.model = model
self.dist_model = dist_model # distribution predictor, could be empty
self.win_size = win_size
self.load_size = load_size
self.setFixedSize(win_size, win_size)
self.uiControl = UIControl(win_size=win_size, load_size=load_size)
self.move(win_size, win_size)
self.movie = True
self.init_color() # initialize color
self.im_gray3 = None
self.eraseMode = False
self.ui_mode = 'none' # stroke or point
self.image_loaded = False
self.use_gray = True
self.total_images = 0
self.image_id = 0
self.method = 'with_dist'
def clock_count(self):
self.count_secs -= 1
self.update()
def init_result(self, image_file):
self.read_image(image_file.encode('utf-8')) # read an image
self.reset()
def get_batches(self, img_dir):
self.img_list = glob.glob(os.path.join(img_dir, '*.JPEG'))
self.total_images = len(self.img_list)
img_first = self.img_list[0]
self.init_result(img_first)
def nextImage(self):
self.save_result()
self.image_id += 1
if self.image_id == self.total_images:
print('you have finished all the results')
sys.exit()
img_current = self.img_list[self.image_id]
# self.reset()
self.init_result(img_current)
self.reset_timer()
def read_image(self, image_file):
# self.result = None
self.image_loaded = True
self.image_file = image_file
print(image_file)
im_bgr = cv2.imread(image_file)
self.im_full = im_bgr.copy()
# get image for display
h, w, c = self.im_full.shape
max_width = max(h, w)
r = self.win_size / float(max_width)
self.scale = float(self.win_size) / self.load_size
print('scale = %f' % self.scale)
rw = int(round(r * w / 4.0) * 4)
rh = int(round(r * h / 4.0) * 4)
self.im_win = cv2.resize(self.im_full, (rw, rh), interpolation=cv2.INTER_CUBIC)
self.dw = int((self.win_size - rw) / 2)
self.dh = int((self.win_size - rh) / 2)
self.win_w = rw
self.win_h = rh
self.uiControl.setImageSize((rw, rh))
im_gray = cv2.cvtColor(im_bgr, cv2.COLOR_BGR2GRAY)
self.im_gray3 = cv2.cvtColor(im_gray, cv2.COLOR_GRAY2BGR)
self.gray_win = cv2.resize(self.im_gray3, (rw, rh), interpolation=cv2.INTER_CUBIC)
im_bgr = cv2.resize(im_bgr, (self.load_size, self.load_size), interpolation=cv2.INTER_CUBIC)
self.im_rgb = cv2.cvtColor(im_bgr, cv2.COLOR_BGR2RGB)
lab_win = color.rgb2lab(self.im_win[:, :, ::-1])
self.im_lab = color.rgb2lab(im_bgr[:, :, ::-1])
self.im_l = self.im_lab[:, :, 0]
self.l_win = lab_win[:, :, 0]
self.im_ab = self.im_lab[:, :, 1:]
self.im_size = self.im_rgb.shape[0:2]
self.im_ab0 = np.zeros((2, self.load_size, self.load_size))
self.im_mask0 = np.zeros((1, self.load_size, self.load_size))
self.brushWidth = 2 * self.scale
self.model.load_image(image_file)
if (self.dist_model is not None):
self.dist_model.set_image(self.im_rgb)
self.predict_color()
def update_im(self):
self.update()
QApplication.processEvents()
def update_ui(self, move_point=True):
if self.ui_mode == 'none':
return False
is_predict = False
snap_qcolor = self.calibrate_color(self.user_color, self.pos)
self.color = snap_qcolor
self.emit(SIGNAL('update_color'), QString('background-color: %s' % self.color.name()))
if self.ui_mode == 'point':
if move_point:
self.uiControl.movePoint(self.pos, snap_qcolor, self.user_color, self.brushWidth)
else:
self.user_color, self.brushWidth, isNew = self.uiControl.addPoint(self.pos, snap_qcolor, self.user_color, self.brushWidth)
if isNew:
is_predict = True
# self.predict_color()
if self.ui_mode == 'stroke':
self.uiControl.addStroke(self.prev_pos, self.pos, snap_qcolor, self.user_color, self.brushWidth)
if self.ui_mode == 'erase':
isRemoved = self.uiControl.erasePoint(self.pos)
if isRemoved:
is_predict = True
# self.predict_color()
return is_predict
def reset(self):
self.ui_mode = 'none'
self.pos = None
self.result = None
self.user_color = None
self.color = None
self.uiControl.reset()
self.init_color()
self.compute_result()
self.predict_color()
self.update()
def scale_point(self, pnt):
x = int((pnt.x() - self.dw) / float(self.win_w) * self.load_size)
y = int((pnt.y() - self.dh) / float(self.win_h) * self.load_size)
return x, y
def valid_point(self, pnt):
if pnt is None:
print('WARNING: no point\n')
return None
else:
if pnt.x() >= self.dw and pnt.y() >= self.dh and pnt.x() < self.win_size-self.dw and pnt.y() < self.win_size-self.dh:
x = int(np.round(pnt.x()))
y = int(np.round(pnt.y()))
return QPoint(x, y)
else:
print('WARNING: invalid point (%d, %d)\n' % (pnt.x(), pnt.y()))
return None
def init_color(self):
self.user_color = QColor(128, 128, 128) # default color red
self.color = self.user_color
def change_color(self, pos=None):
if pos is not None:
x, y = self.scale_point(pos)
L = self.im_lab[y, x, 0]
self.emit(SIGNAL('update_gamut'), L)
rgb_colors = self.suggest_color(h=y, w=x, K=9)
rgb_colors[-1, :] = 0.5
self.emit(SIGNAL('suggest_colors'), rgb_colors)
used_colors = self.uiControl.used_colors()
self.emit(SIGNAL('used_colors'), used_colors)
snap_color = self.calibrate_color(self.user_color, pos)
c = np.array((snap_color.red(), snap_color.green(), snap_color.blue()), np.uint8)
self.emit(SIGNAL('update_ab'), c)
def calibrate_color(self, c, pos):
x, y = self.scale_point(pos)
P = int(self.brushWidth / self.scale)
# snap color based on L color
color_array = np.array((c.red(), c.green(), c.blue())).astype(
'uint8')
mean_L = self.im_l[y, x]
snap_color = lab_gamut.snap_ab(mean_L, color_array)
snap_qcolor = QColor(snap_color[0], snap_color[1], snap_color[2])
return snap_qcolor
def set_color(self, c_rgb):
c = QColor(c_rgb[0], c_rgb[1], c_rgb[2])
self.user_color = c
snap_qcolor = self.calibrate_color(c, self.pos)
self.color = snap_qcolor
self.emit(SIGNAL('update_color'), QString('background-color: %s' % self.color.name()))
self.uiControl.update_color(snap_qcolor, self.user_color)
self.compute_result()
def erase(self):
self.eraseMode = not self.eraseMode
def load_image(self):
img_path = unicode(QFileDialog.getOpenFileName(self, 'load an input image'))
self.init_result(img_path)
def save_result(self):
path = os.path.abspath(self.image_file)
path, ext = os.path.splitext(path)
suffix = datetime.datetime.now().strftime("%y%m%d_%H%M%S")
save_path = "_".join([path, self.method, suffix])
print('saving result to <%s>\n' % save_path)
if not os.path.exists(save_path):
os.mkdir(save_path)
np.save(os.path.join(save_path, 'im_l.npy'), self.model.img_l)
np.save(os.path.join(save_path, 'im_ab.npy'), self.im_ab0)
np.save(os.path.join(save_path, 'im_mask.npy'), self.im_mask0)
result_bgr = cv2.cvtColor(self.result, cv2.COLOR_RGB2BGR)
mask = self.im_mask0.transpose((1, 2, 0)).astype(np.uint8)*255
cv2.imwrite(os.path.join(save_path, 'input_mask.png'), mask)
cv2.imwrite(os.path.join(save_path, 'ours.png'), result_bgr)
cv2.imwrite(os.path.join(save_path, 'ours_fullres.png'), self.model.get_img_fullres()[:, :, ::-1])
cv2.imwrite(os.path.join(save_path, 'input_fullres.png'), self.model.get_input_img_fullres()[:, :, ::-1])
cv2.imwrite(os.path.join(save_path, 'input.png'), self.model.get_input_img()[:, :, ::-1])
cv2.imwrite(os.path.join(save_path, 'input_ab.png'), self.model.get_sup_img()[:, :, ::-1])
def enable_gray(self):
self.use_gray = not self.use_gray
self.update()
def predict_color(self):
if self.dist_model is not None and self.image_loaded:
im, mask = self.uiControl.get_input()
im_mask0 = mask > 0.0
self.im_mask0 = im_mask0.transpose((2, 0, 1))
im_lab = color.rgb2lab(im).transpose((2, 0, 1))
self.im_ab0 = im_lab[1:3, :, :]
self.dist_model.net_forward(self.im_ab0, self.im_mask0)
def suggest_color(self, h, w, K=5):
if self.dist_model is not None and self.image_loaded:
ab, conf = self.dist_model.get_ab_reccs(h=h, w=w, K=K, N=25000, return_conf=True)
L = np.tile(self.im_lab[h, w, 0], (K, 1))
colors_lab = np.concatenate((L, ab), axis=1)
colors_lab3 = colors_lab[:, np.newaxis, :]
colors_rgb = np.clip(np.squeeze(color.lab2rgb(colors_lab3)), 0, 1)
colors_rgb_withcurr = np.concatenate((self.model.get_img_forward()[h, w, np.newaxis, :] / 255., colors_rgb), axis=0)
return colors_rgb_withcurr
else:
return None
def compute_result(self):
im, mask = self.uiControl.get_input()
im_mask0 = mask > 0.0
self.im_mask0 = im_mask0.transpose((2, 0, 1))
im_lab = color.rgb2lab(im).transpose((2, 0, 1))
self.im_ab0 = im_lab[1:3, :, :]
self.model.net_forward(self.im_ab0, self.im_mask0)
ab = self.model.output_ab.transpose((1, 2, 0))
ab_win = cv2.resize(ab, (self.win_w, self.win_h), interpolation=cv2.INTER_CUBIC)
pred_lab = np.concatenate((self.l_win[..., np.newaxis], ab_win), axis=2)
pred_rgb = (np.clip(color.lab2rgb(pred_lab), 0, 1) * 255).astype('uint8')
self.result = pred_rgb
self.emit(SIGNAL('update_result'), self.result)
self.update()
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
painter.fillRect(event.rect(), QColor(49, 54, 49))
painter.setRenderHint(QPainter.Antialiasing)
if self.use_gray or self.result is None:
im = self.gray_win
else:
im = self.result
if im is not None:
qImg = QImage(im.tostring(), im.shape[1], im.shape[0], QImage.Format_RGB888)
painter.drawImage(self.dw, self.dh, qImg)
self.uiControl.update_painter(painter)
painter.end()
def wheelEvent(self, event):
d = event.delta() / 120
self.brushWidth = min(4.05*self.scale, max(0, self.brushWidth + d*self.scale))
print('update brushWidth = %f' % self.brushWidth)
self.update_ui(move_point=True)
self.update()
def is_same_point(self, pos1, pos2):
if pos1 is None or pos2 is None:
return False
dx = pos1.x() - pos2.x()
dy = pos1.y() - pos2.y()
d = dx * dx + dy * dy
# print('distance between points = %f' % d)
return d < 25
def mousePressEvent(self, event):
print('mouse press', event.pos())
pos = self.valid_point(event.pos())
if pos is not None:
if event.button() == Qt.LeftButton:
self.pos = pos
self.ui_mode = 'point'
self.change_color(pos)
self.update_ui(move_point=False)
self.compute_result()
if event.button() == Qt.RightButton:
# draw the stroke
self.pos = pos
self.ui_mode = 'erase'
self.update_ui(move_point=False)
self.compute_result()
def mouseMoveEvent(self, event):
self.pos = self.valid_point(event.pos())
if self.pos is not None:
if self.ui_mode == 'point':
self.update_ui(move_point=True)
self.compute_result()
def mouseReleaseEvent(self, event):
pass
def sizeHint(self):
return QSize(self.win_size, self.win_size) # 28 * 8
|
colorization-master
|
interactive-deep-colorization/ui/gui_draw.py
|
import cv2
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from data import lab_gamut
import numpy as np
class GUIGamut(QWidget):
def __init__(self, gamut_size=110):
QWidget.__init__(self)
self.gamut_size = gamut_size
self.win_size = gamut_size * 2 # divided by 4
self.setFixedSize(self.win_size, self.win_size)
self.ab_grid = lab_gamut.abGrid(gamut_size=gamut_size, D=1)
self.reset()
def set_gamut(self, l_in=50):
self.l_in = l_in
self.ab_map, self.mask = self.ab_grid.update_gamut(l_in=l_in)
self.update()
def set_ab(self, color):
self.color = color
self.lab = lab_gamut.rgb2lab_1d(self.color)
x, y = self.ab_grid.ab2xy(self.lab[1], self.lab[2])
self.pos = QPointF(x, y)
self.update()
def is_valid_point(self, pos):
if pos is None:
return False
else:
x = pos.x()
y = pos.y()
if x >= 0 and y >= 0 and x < self.win_size and y < self.win_size:
return self.mask[y, x]
else:
return False
def update_ui(self, pos):
self.pos = pos
a, b = self.ab_grid.xy2ab(pos.x(), pos.y())
# get color we need L
L = self.l_in
lab = np.array([L, a, b])
color = lab_gamut.lab2rgb_1d(lab, clip=True, dtype='uint8')
self.emit(SIGNAL('update_color'), color)
self.update()
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
painter.setRenderHint(QPainter.Antialiasing)
painter.fillRect(event.rect(), Qt.white)
if self.ab_map is not None:
ab_map = cv2.resize(self.ab_map, (self.win_size, self.win_size))
qImg = QImage(ab_map.tostring(), self.win_size, self.win_size, QImage.Format_RGB888)
painter.drawImage(0, 0, qImg)
painter.setPen(QPen(Qt.gray, 3, Qt.DotLine, cap=Qt.RoundCap, join=Qt.RoundJoin))
painter.drawLine(self.win_size/2, 0, self.win_size/2, self.win_size)
painter.drawLine(0, self.win_size/2, self.win_size, self.win_size/2)
if self.pos is not None:
painter.setPen(QPen(Qt.black, 2, Qt.SolidLine, cap=Qt.RoundCap, join=Qt.RoundJoin))
w = 5
x = self.pos.x()
y = self.pos.y()
painter.drawLine(x - w, y, x + w, y)
painter.drawLine(x, y - w, x, y + w)
painter.end()
def mousePressEvent(self, event):
pos = event.pos()
if event.button() == Qt.LeftButton and self.is_valid_point(pos): # click the point
self.update_ui(pos)
self.mouseClicked = True
def mouseMoveEvent(self, event):
pos = event.pos()
if self.is_valid_point(pos):
if self.mouseClicked:
self.update_ui(pos)
def mouseReleaseEvent(self, event):
self.mouseClicked = False
def sizeHint(self):
return QSize(self.win_size, self.win_size)
def reset(self):
self.ab_map = None
self.mask = None
self.color = None
self.lab = None
self.pos = None
self.mouseClicked = False
self.update()
|
colorization-master
|
interactive-deep-colorization/ui/gui_gamut.py
|
import numpy as np
import itertools
import time
import datetime
def check_value(inds, val):
# Check to see if an array is a single element equaling a particular value
# Good for pre-processing inputs in a function
if(np.array(inds).size==1):
if(inds==val):
return True
return False
def flatten_nd_array(pts_nd,axis=1):
# Flatten an nd array into a 2d array with a certain axis
# INPUTS
# pts_nd N0xN1x...xNd array
# axis integer
# OUTPUTS
# pts_flt prod(N \ N_axis) x N_axis array
NDIM = pts_nd.ndim
SHP = np.array(pts_nd.shape)
nax = np.setdiff1d(np.arange(0,NDIM),np.array((axis))) # non axis indices
NPTS = np.prod(SHP[nax])
axorder = np.concatenate((nax,np.array(axis).flatten()),axis=0)
pts_flt = pts_nd.transpose((axorder))
pts_flt = pts_flt.reshape(NPTS,SHP[axis])
return pts_flt
def unflatten_2d_array(pts_flt,pts_nd,axis=1,squeeze=False):
# Unflatten a 2d array with a certain axis
# INPUTS
# pts_flt prod(N \ N_axis) x M array
# pts_nd N0xN1x...xNd array
# axis integer
# squeeze bool if true, M=1, squeeze it out
# OUTPUTS
# pts_out N0xN1x...xNd array
NDIM = pts_nd.ndim
SHP = np.array(pts_nd.shape)
nax = np.setdiff1d(np.arange(0,NDIM),np.array((axis))) # non axis indices
NPTS = np.prod(SHP[nax])
if(squeeze):
axorder = nax
axorder_rev = np.argsort(axorder)
M = pts_flt.shape[1]
NEW_SHP = SHP[nax].tolist()
pts_out = pts_flt.reshape(NEW_SHP)
pts_out = pts_out.transpose(axorder_rev)
else:
axorder = np.concatenate((nax,np.array(axis).flatten()),axis=0)
axorder_rev = np.argsort(axorder)
M = pts_flt.shape[1]
NEW_SHP = SHP[nax].tolist()
NEW_SHP.append(M)
pts_out = pts_flt.reshape(NEW_SHP)
pts_out = pts_out.transpose(axorder_rev)
return pts_out
def na():
return np.newaxis
class Timer():
def __init__(self):
self.cur_t = time.time()
def tic(self):
self.cur_t = time.time()
def toc(self):
return time.time()-self.cur_t
def tocStr(self, t=-1):
if(t==-1):
return str(datetime.timedelta(seconds=np.round(time.time()-self.cur_t,3)))[:-4]
else:
return str(datetime.timedelta(seconds=np.round(t,3)))[:-4]
|
colorization-master
|
interactive-deep-colorization/caffe_files/util.py
|
colorization-master
|
interactive-deep-colorization/caffe_files/__init__.py
|
|
import numpy as np
import warnings
import os
import sklearn.neighbors as nn
import caffe
from skimage import color
import matplotlib.pyplot as plt
import color_quantization as cq
# ***************************************
# ***** LAYERS FOR GLOBAL HISTOGRAM *****
# ***************************************
class SpatialRepLayer(caffe.Layer):
'''
INPUTS
bottom[0].data NxCx1x1
bottom[1].data NxCxXxY
OUTPUTS
top[0].data NxCxXxY repeat 0th input spatially '''
def setup(self,bottom,top):
if(len(bottom)!=2):
raise Exception("Layer needs 2 inputs")
self.param_str_split = self.param_str.split(' ')
# self.keep_ratio = float(self.param_str_split[0]) # frequency keep whole input
self.N = bottom[0].data.shape[0]
self.C = bottom[0].data.shape[1]
self.X = bottom[0].data.shape[2]
self.Y = bottom[0].data.shape[3]
if(self.X!=1 or self.Y!=1):
raise Exception("bottom[0] should have spatial dimensions 1x1")
# self.Nref = bottom[1].data.shape[0]
# self.Cref = bottom[1].data.shape[1]
self.Xref = bottom[1].data.shape[2]
self.Yref = bottom[1].data.shape[3]
def reshape(self,bottom,top):
top[0].reshape(self.N,self.C,self.Xref,self.Yref) # output shape
def forward(self,bottom,top):
top[0].data[...] = bottom[0].data[:,:,:,:] # will do singleton expansion
def backward(self,top,propagate_down,bottom):
bottom[0].diff[:,:,0,0] = np.sum(np.sum(top[0].diff,axis=2),axis=2)
bottom[1].diff[...] = 0
class BGR2HSVLayer(caffe.Layer):
''' Layer converts BGR to HSV
INPUTS
bottom[0] Nx3xXxY
OUTPUTS
top[0].data Nx3xXxY
'''
def setup(self,bottom, top):
warnings.filterwarnings("ignore")
if(len(bottom)!=1):
raise Exception("Layer should a single input")
if(bottom[0].data.shape[1]!=3):
raise Exception("Input should be 3-channel BGR image")
self.N = bottom[0].data.shape[0]
self.X = bottom[0].data.shape[2]
self.Y = bottom[0].data.shape[3]
def reshape(self, bottom, top):
top[0].reshape(self.N,3,self.X,self.Y)
def forward(self, bottom, top):
for nn in range(self.N):
top[0].data[nn,:,:,:] = color.rgb2hsv(bottom[0].data[nn,::-1,:,:].astype('uint8').transpose((1,2,0))).transpose((2,0,1))
def backward(self, top, propagate_down, bottom):
# no back-prop
for i in range(len(bottom)):
if not propagate_down[i]:
continue
# bottom[i].diff[...] = np.zeros_like(bottom[i].data)
class BGR2LabLayer(caffe.Layer):
''' Layer converts BGR to Lab
INPUTS
bottom[0] Nx3xXxY
OUTPUTS
top[0].data Nx3xXxY
'''
def setup(self,bottom, top):
warnings.filterwarnings("ignore")
if(len(bottom)!=1):
raise Exception("Layer should a single input")
if(bottom[0].data.shape[1]!=3):
raise Exception("Input should be 3-channel BGR image")
self.N = bottom[0].data.shape[0]
self.X = bottom[0].data.shape[2]
self.Y = bottom[0].data.shape[3]
def reshape(self, bottom, top):
top[0].reshape(self.N,3,self.X,self.Y)
def forward(self, bottom, top):
top[0].data[...] = color.rgb2lab(bottom[0].data[:,::-1,:,:].astype('uint8').transpose((2,3,0,1))).transpose((2,3,0,1))
def backward(self, top, propagate_down, bottom):
# no back-prop
for i in range(len(bottom)):
if not propagate_down[i]:
continue
# bottom[i].diff[...] = np.zeros_like(bottom[i].data)
class ColorGlobalDropoutLayer(caffe.Layer):
'''
Inputs
bottom[0].data NxCx1x1
Outputs
top[0].data Nx(C+1)x1x1 last channel is whether or not to keep input
first C channels are copied from bottom (if kept)
'''
def setup(self,bottom,top):
if(len(bottom)==0):
raise Exception("Layer needs inputs")
self.param_str_split = self.param_str.split(' ')
self.keep_ratio = float(self.param_str_split[0]) # frequency keep whole input
self.cnt = 0
self.N = bottom[0].data.shape[0]
self.C = bottom[0].data.shape[1]
self.X = bottom[0].data.shape[2]
self.Y = bottom[0].data.shape[3]
def reshape(self,bottom,top):
top[0].reshape(self.N,self.C+1,self.X,self.Y) # output mask
def forward(self,bottom,top):
top[0].data[...] = 0
# top[0].data[:,:self.C,:,:] = bottom[0].data[...]
# determine which ones are kept
keeps = np.random.binomial(1,self.keep_ratio,size=self.N)
top[0].data[:,-1,:,:] = keeps[:,np.newaxis,np.newaxis]
top[0].data[:,:-1,:,:] = bottom[0].data[...]*keeps[:,np.newaxis,np.newaxis,np.newaxis]
def backward(self,top,propagate_down,bottom):
0; # backward not implemented
class NNEncLayer(caffe.Layer):
''' Layer which encodes ab map into Q colors
INPUTS
bottom[0] Nx2xXxY
OUTPUTS
top[0].data NxQ
'''
def setup(self,bottom, top):
warnings.filterwarnings("ignore")
if len(bottom) == 0:
raise Exception("Layer should have inputs")
# self.NN = 10.
self.NN = 1.
self.sigma = 5.
self.ENC_DIR = './data/color_bins'
self.nnenc = cq.NNEncode(self.NN,self.sigma,km_filepath=os.path.join(self.ENC_DIR,'pts_in_hull.npy'))
self.N = bottom[0].data.shape[0]
self.X = bottom[0].data.shape[2]
self.Y = bottom[0].data.shape[3]
self.Q = self.nnenc.K
def reshape(self, bottom, top):
top[0].reshape(self.N,self.Q,self.X,self.Y)
def forward(self, bottom, top):
top[0].data[...] = self.nnenc.encode_points_mtx_nd(bottom[0].data[...],axis=1)
def backward(self, top, propagate_down, bottom):
# no back-prop
for i in range(len(bottom)):
if not propagate_down[i]:
continue
bottom[i].diff[...] = np.zeros_like(bottom[i].data)
|
colorization-master
|
interactive-deep-colorization/caffe_files/caffe_traininglayers.py
|
import numpy as np
from IPython.core.debugger import Pdb as pdb
import sklearn.neighbors as nn
import util
import caffe
class NNEncode():
# Encode points as a linear combination of unordered points
# using NN search and RBF kernel
def __init__(self,NN,sigma,km_filepath='./data/color_bins/pts_in_hull.npy',cc=-1):
if(util.check_value(cc,-1)):
self.cc = np.load(km_filepath)
else:
self.cc = cc
self.K = self.cc.shape[0]
self.NN = int(NN)
self.sigma = sigma
self.nbrs = nn.NearestNeighbors(n_neighbors=self.NN, algorithm='auto').fit(self.cc)
def encode_points_mtx_nd(self,pts_nd,axis=1,returnSparse=False):
t = util.Timer()
pts_flt = util.flatten_nd_array(pts_nd,axis=axis)
P = pts_flt.shape[0]
(dists,inds) = self.nbrs.kneighbors(pts_flt)
pts_enc_flt = np.zeros((P,self.K))
wts = np.exp(-dists**2/(2*self.sigma**2))
wts = wts/np.sum(wts,axis=1)[:,util.na()]
pts_enc_flt[np.arange(0,P,dtype='int')[:,util.na()],inds] = wts
pts_enc_nd = util.unflatten_2d_array(pts_enc_flt,pts_nd,axis=axis)
return pts_enc_nd
def decode_points_mtx_nd(self,pts_enc_nd,axis=1):
pts_enc_flt = util.flatten_nd_array(pts_enc_nd,axis=axis)
pts_dec_flt = np.dot(pts_enc_flt,self.cc)
pts_dec_nd = util.unflatten_2d_array(pts_dec_flt,pts_enc_nd,axis=axis)
return pts_dec_nd
def decode_1hot_mtx_nd(self,pts_enc_nd,axis=1,returnEncode=False):
pts_1hot_nd = nd_argmax_1hot(pts_enc_nd,axis=axis)
pts_dec_nd = self.decode_points_mtx_nd(pts_1hot_nd,axis=axis)
if(returnEncode):
return (pts_dec_nd,pts_1hot_nd)
else:
return pts_dec_nd
|
colorization-master
|
interactive-deep-colorization/caffe_files/color_quantization.py
|
import numpy as np
import scipy as sp
import cv2
import matplotlib.pyplot as plt
from skimage import color
import caffe
from sklearn.cluster import KMeans
from skimage.io import imread
from skimage.io import imsave
from skimage import color
import os
import sys
import ntpath
import datetime
from scipy.ndimage.interpolation import zoom
def create_temp_directory(path_template, N=1e8):
print(path_template)
cur_path = path_template % np.random.randint(0, N)
while(os.path.exists(cur_path)):
cur_path = path_template % np.random.randint(0, N)
print('Creating directory: %s' % cur_path)
os.mkdir(cur_path)
return cur_path
def lab2rgb_transpose(img_l, img_ab):
''' INPUTS
img_l 1xXxX [0,100]
img_ab 2xXxX [-100,100]
OUTPUTS
returned value is XxXx3 '''
pred_lab = np.concatenate((img_l, img_ab), axis=0).transpose((1, 2, 0))
pred_rgb = (np.clip(color.lab2rgb(pred_lab), 0, 1)*255).astype('uint8')
return pred_rgb
def rgb2lab_transpose(img_rgb):
''' INPUTS
img_rgb XxXx3
OUTPUTS
returned value is 3xXxX '''
return color.rgb2lab(img_rgb).transpose((2, 0, 1))
class ColorizeImageBase():
def __init__(self, Xd=256, Xfullres_max=10000):
self.Xd = Xd
self.img_l_set = False
self.net_set = False
self.Xfullres_max = Xfullres_max # maximum size of maximum dimension
self.img_just_set = False # this will be true whenever image is just loaded
# net_forward can set this to False if they want
def prep_net(self):
raise Exception("Should be implemented by base class")
# ***** Image prepping *****
def load_image(self, input_path):
# rgb image [CxXdxXd]
im = cv2.cvtColor(cv2.imread(input_path, 1), cv2.COLOR_BGR2RGB)
self.img_rgb_fullres = im.copy()
self._set_img_lab_fullres_()
im = cv2.resize(im, (self.Xd, self.Xd))
self.img_rgb = im.copy()
# self.img_rgb = sp.misc.imresize(plt.imread(input_path),(self.Xd,self.Xd)).transpose((2,0,1))
self.img_l_set = True
# convert into lab space
self._set_img_lab_()
self._set_img_lab_mc_()
def set_image(self, input_image):
self.img_rgb_fullres = input_image.copy()
self._set_img_lab_fullres_()
self.img_l_set = True
im = cv2.resize(self.img_rgb_fullres, (self.Xd, self.Xd))
self.img_rgb = input_image
# convert into lab space
self._set_img_lab_()
self._set_img_lab_mc_()
def net_forward(self, input_ab, input_mask):
# INPUTS
# ab 2xXxX input color patches (non-normalized)
# mask 1xXxX input mask, indicating which points have been provided
# assumes self.img_l_mc has been set
if(not self.img_l_set):
print('I need to have an image!')
return -1
if(not self.net_set):
print('I need to have a net!')
return -1
self.input_ab = input_ab
self.input_ab_mc = (input_ab-self.ab_mean)/self.ab_norm
self.input_mask = input_mask
self.input_mask_mult = input_mask*self.mask_mult
return 0
def get_result_PSNR(self, result=-1, return_SE_map=False):
if np.array((result)).flatten()[0] == -1:
cur_result = self.get_img_forward()
else:
cur_result = result.copy()
SE_map = (1.*self.img_rgb-cur_result)**2
cur_MSE = np.mean(SE_map)
cur_PSNR = 20*np.log10(255./np.sqrt(cur_MSE))
if return_SE_map:
return(cur_PSNR, SE_map)
else:
return cur_PSNR
def get_img_forward(self):
# get image with point estimate
return self.output_rgb
def get_img_gray(self):
# Get black and white image
return lab2rgb_transpose(self.img_l, np.zeros((2, self.Xd, self.Xd)))
def get_img_gray_fullres(self):
# Get black and white image
return lab2rgb_transpose(self.img_l_fullres, np.zeros((2, self.img_l_fullres.shape[1], self.img_l_fullres.shape[2])))
def get_img_fullres(self):
# This assumes self.img_l_fullres, self.output_ab are set.
# Typically, this means that set_image() and net_forward()
# have been called.
# bilinear upsample
zoom_factor = (1, 1.*self.img_l_fullres.shape[1]/self.output_ab.shape[1], 1.*self.img_l_fullres.shape[2]/self.output_ab.shape[2])
output_ab_fullres = zoom(self.output_ab, zoom_factor, order=1)
return lab2rgb_transpose(self.img_l_fullres, output_ab_fullres)
def get_input_img_fullres(self):
zoom_factor = (1, 1.*self.img_l_fullres.shape[1]/self.input_ab.shape[1], 1.*self.img_l_fullres.shape[2]/self.input_ab.shape[2])
input_ab_fullres = zoom(self.input_ab, zoom_factor, order=1)
return lab2rgb_transpose(self.img_l_fullres, input_ab_fullres)
def get_input_img(self):
return lab2rgb_transpose(self.img_l, self.input_ab)
def get_img_mask(self):
# Get black and white image
return lab2rgb_transpose(100.*(1-self.input_mask), np.zeros((2, self.Xd, self.Xd)))
def get_img_mask_fullres(self):
# Get black and white image
zoom_factor = (1, 1.*self.img_l_fullres.shape[1]/self.input_ab.shape[1], 1.*self.img_l_fullres.shape[2]/self.input_ab.shape[2])
input_mask_fullres = zoom(self.input_mask, zoom_factor, order=0)
return lab2rgb_transpose(100.*(1-input_mask_fullres), np.zeros((2, input_mask_fullres.shape[1], input_mask_fullres.shape[2])))
def get_sup_img(self):
return lab2rgb_transpose(50*self.input_mask, self.input_ab)
def get_sup_fullres(self):
zoom_factor = (1, 1.*self.img_l_fullres.shape[1]/self.output_ab.shape[1], 1.*self.img_l_fullres.shape[2]/self.output_ab.shape[2])
input_mask_fullres = zoom(self.input_mask, zoom_factor, order=0)
input_ab_fullres = zoom(self.input_ab, zoom_factor, order=0)
return lab2rgb_transpose(50*input_mask_fullres, input_ab_fullres)
# ***** Private functions *****
def _set_img_lab_fullres_(self):
# adjust full resolution image to be within maximum dimension is within Xfullres_max
Xfullres = self.img_rgb_fullres.shape[0]
Yfullres = self.img_rgb_fullres.shape[1]
if Xfullres > self.Xfullres_max or Yfullres > self.Xfullres_max:
if Xfullres > Yfullres:
zoom_factor = 1.*self.Xfullres_max/Xfullres
else:
zoom_factor = 1.*self.Xfullres_max/Yfullres
self.img_rgb_fullres = zoom(self.img_rgb_fullres, (zoom_factor, zoom_factor, 1), order=1)
self.img_lab_fullres = color.rgb2lab(self.img_rgb_fullres).transpose((2, 0, 1))
self.img_l_fullres = self.img_lab_fullres[[0], :, :]
self.img_ab_fullres = self.img_lab_fullres[1:, :, :]
def _set_img_lab_(self):
# set self.img_lab from self.im_rgb
self.img_lab = color.rgb2lab(self.img_rgb).transpose((2, 0, 1))
self.img_l = self.img_lab[[0], :, :]
self.img_ab = self.img_lab[1:, :, :]
def _set_img_lab_mc_(self):
# set self.img_lab_mc from self.img_lab
# lab image, mean centered [XxYxX]
self.img_lab_mc = self.img_lab / np.array((self.l_norm, self.ab_norm, self.ab_norm))[:, np.newaxis, np.newaxis]-np.array((self.l_mean/self.l_norm, self.ab_mean/self.ab_norm, self.ab_mean/self.ab_norm))[:, np.newaxis, np.newaxis]
self._set_img_l_()
def _set_img_l_(self):
self.img_l_mc = self.img_lab_mc[[0], :, :]
self.img_l_set = True
def _set_img_ab_(self):
self.img_ab_mc = self.img_lab_mc[[1, 2], :, :]
def _set_out_ab_(self):
self.output_lab = rgb2lab_transpose(self.output_rgb)
self.output_ab = self.output_lab[1:, :, :]
class ColorizeImageCaffe(ColorizeImageBase):
def __init__(self, Xd=256):
print('ColorizeImageCaffe instantiated')
ColorizeImageBase.__init__(self, Xd)
self.l_norm = 1.
self.ab_norm = 1.
self.l_mean = 50.
self.ab_mean = 0.
self.mask_mult = 110.
self.pred_ab_layer = 'pred_ab' # predicted ab layer
# Load grid properties
self.pts_in_hull_path = './data/color_bins/pts_in_hull.npy'
self.pts_in_hull = np.load(self.pts_in_hull_path) # 313x2, in-gamut
# ***** Net preparation *****
def prep_net(self, gpu_id, prototxt_path='', caffemodel_path=''):
print('gpu_id = %d, net_path = %s, model_path = %s' % (gpu_id, prototxt_path, caffemodel_path))
if gpu_id == -1:
caffe.set_mode_cpu()
else:
caffe.set_device(gpu_id)
caffe.set_mode_gpu()
self.gpu_id = gpu_id
self.net = caffe.Net(prototxt_path, caffemodel_path, caffe.TEST)
self.net_set = True
# automatically set cluster centers
if len(self.net.params[self.pred_ab_layer][0].data[...].shape) == 4 and self.net.params[self.pred_ab_layer][0].data[...].shape[1] == 313:
print('Setting ab cluster centers in layer: %s' % self.pred_ab_layer)
self.net.params[self.pred_ab_layer][0].data[:, :, 0, 0] = self.pts_in_hull.T
# automatically set upsampling kernel
for layer in self.net._layer_names:
if layer[-3:] == '_us':
print('Setting upsampling layer kernel: %s' % layer)
self.net.params[layer][0].data[:, 0, :, :] = np.array(((.25, .5, .25, 0), (.5, 1., .5, 0), (.25, .5, .25, 0), (0, 0, 0, 0)))[np.newaxis, :, :]
# ***** Call forward *****
def net_forward(self, input_ab, input_mask):
# INPUTS
# ab 2xXxX input color patches (non-normalized)
# mask 1xXxX input mask, indicating which points have been provided
# assumes self.img_l_mc has been set
if ColorizeImageBase.net_forward(self, input_ab, input_mask) == -1:
return -1
net_input_prepped = np.concatenate((self.img_l_mc, self.input_ab_mc, self.input_mask_mult), axis=0)
self.net.blobs['data_l_ab_mask'].data[...] = net_input_prepped
self.net.forward()
# return prediction
self.output_rgb = lab2rgb_transpose(self.img_l, self.net.blobs[self.pred_ab_layer].data[0, :, :, :])
self._set_out_ab_()
return self.output_rgb
def get_img_forward(self):
# get image with point estimate
return self.output_rgb
def get_img_gray(self):
# Get black and white image
return lab2rgb_transpose(self.img_l, np.zeros((2, self.Xd, self.Xd)))
class ColorizeImageCaffeGlobDist(ColorizeImageCaffe):
# Caffe colorization, with additional global histogram as input
def __init__(self, Xd=256):
ColorizeImageCaffe.__init__(self, Xd)
self.glob_mask_mult = 1.
self.glob_layer = 'glob_ab_313_mask'
def net_forward(self, input_ab, input_mask, glob_dist=-1):
# glob_dist is 313 array, or -1
if np.array(glob_dist).flatten()[0] == -1: # run without this, zero it out
self.net.blobs[self.glob_layer].data[0, :-1, 0, 0] = 0.
self.net.blobs[self.glob_layer].data[0, -1, 0, 0] = 0.
else: # run conditioned on global histogram
self.net.blobs[self.glob_layer].data[0, :-1, 0, 0] = glob_dist
self.net.blobs[self.glob_layer].data[0, -1, 0, 0] = self.glob_mask_mult
self.output_rgb = ColorizeImageCaffe.net_forward(self, input_ab, input_mask)
self._set_out_ab_()
return self.output_rgb
class ColorizeImageCaffeDist(ColorizeImageCaffe):
# caffe model which includes distribution prediction
def __init__(self, Xd=256):
ColorizeImageCaffe.__init__(self, Xd)
self.dist_ab_set = False
self.scale_S_layer = 'scale_S'
self.dist_ab_S_layer = 'dist_ab_S' # softened distribution layer
self.pts_grid = np.load('./data/color_bins/pts_grid.npy') # 529x2, all points
self.in_hull = np.load('./data/color_bins/in_hull.npy') # 529 bool
self.AB = self.pts_grid.shape[0] # 529
self.A = int(np.sqrt(self.AB)) # 23
self.B = int(np.sqrt(self.AB)) # 23
self.dist_ab_full = np.zeros((self.AB, self.Xd, self.Xd))
self.dist_ab_grid = np.zeros((self.A, self.B, self.Xd, self.Xd))
self.dist_entropy = np.zeros((self.Xd, self.Xd))
def prep_net(self, gpu_id, prototxt_path='', caffemodel_path='', S=.2):
ColorizeImageCaffe.prep_net(self, gpu_id, prototxt_path=prototxt_path, caffemodel_path=caffemodel_path)
self.S = S
self.net.params[self.scale_S_layer][0].data[...] = S
def net_forward(self, input_ab, input_mask):
# INPUTS
# ab 2xXxX input color patches (non-normalized)
# mask 1xXxX input mask, indicating which points have been provided
# assumes self.img_l_mc has been set
function_return = ColorizeImageCaffe.net_forward(self, input_ab, input_mask)
if np.array(function_return).flatten()[0] == -1: # errored out
return -1
# set distribution
# in-gamut, CxXxX, C = 313
self.dist_ab = self.net.blobs[self.dist_ab_S_layer].data[0, :, :, :]
self.dist_ab_set = True
# full grid, ABxXxX, AB = 529
self.dist_ab_full[self.in_hull, :, :] = self.dist_ab
# gridded, AxBxXxX, A = 23
self.dist_ab_grid = self.dist_ab_full.reshape((self.A, self.B, self.Xd, self.Xd))
# return
return function_return
def get_ab_reccs(self, h, w, K=5, N=25000, return_conf=False):
''' Recommended colors at point (h,w)
Call this after calling net_forward
'''
if not self.dist_ab_set:
print('Need to set prediction first')
return 0
# randomly sample from pdf
cmf = np.cumsum(self.dist_ab[:, h, w]) # CMF
cmf = cmf/cmf[-1]
cmf_bins = cmf
# randomly sample N points
rnd_pts = np.random.uniform(low=0, high=1.0, size=N)
inds = np.digitize(rnd_pts, bins=cmf_bins)
rnd_pts_ab = self.pts_in_hull[inds, :]
# run k-means
kmeans = KMeans(n_clusters=K).fit(rnd_pts_ab)
# sort by cluster occupancy
k_label_cnt = np.histogram(kmeans.labels_, np.arange(0, K+1))[0]
k_inds = np.argsort(k_label_cnt, axis=0)[::-1]
cluster_per = 1. * k_label_cnt[k_inds]/N # percentage of points within cluster
cluster_centers = kmeans.cluster_centers_[k_inds, :] # cluster centers
# cluster_centers = np.random.uniform(low=-100,high=100,size=(N,2))
if return_conf:
return cluster_centers, cluster_per
else:
return cluster_centers
def compute_entropy(self):
# compute the distribution entropy (really slow right now)
self.dist_entropy = np.sum(self.dist_ab*np.log(self.dist_ab), axis=0)
def plot_dist_grid(self, h, w):
# Plots distribution at a given point
plt.figure()
plt.imshow(self.dist_ab_grid[:, :, h, w], extent=[-110, 110, 110, -110], interpolation='nearest')
plt.colorbar()
plt.ylabel('a')
plt.xlabel('b')
def plot_dist_entropy(self):
# Plots distribution at a given point
plt.figure()
plt.imshow(-self.dist_entropy, interpolation='nearest')
plt.colorbar()
|
colorization-master
|
interactive-deep-colorization/data/colorize_image.py
|
colorization-master
|
interactive-deep-colorization/data/__init__.py
|
|
import numpy as np
import scipy as sp
from skimage import color
from pdb import set_trace as st
import warnings
def qcolor2lab_1d(qc):
# take 1d numpy array and do color conversion
c = np.array([qc.red(), qc.green(), qc.blue()], np.uint8)
return rgb2lab_1d(c)
def rgb2lab_1d(in_rgb):
# take 1d numpy array and do color conversion
# print('in_rgb', in_rgb)
return color.rgb2lab(in_rgb[np.newaxis, np.newaxis, :]).flatten()
def lab2rgb_1d(in_lab, clip=True, dtype='uint8'):
warnings.filterwarnings("ignore")
tmp_rgb = color.lab2rgb(in_lab[np.newaxis, np.newaxis, :]).flatten()
if clip:
tmp_rgb = np.clip(tmp_rgb, 0, 1)
if dtype == 'uint8':
tmp_rgb = np.round(tmp_rgb * 255).astype('uint8')
return tmp_rgb
def snap_ab(input_l, input_rgb, return_type='rgb'):
''' given an input lightness and rgb, snap the color into a region where l,a,b is in-gamut
'''
T = 20
warnings.filterwarnings("ignore")
input_lab = rgb2lab_1d(np.array(input_rgb)) # convert input to lab
conv_lab = input_lab.copy() # keep ab from input
for t in range(T):
conv_lab[0] = input_l # overwrite input l with input ab
old_lab = conv_lab
tmp_rgb = color.lab2rgb(conv_lab[np.newaxis, np.newaxis, :]).flatten()
tmp_rgb = np.clip(tmp_rgb, 0, 1)
conv_lab = color.rgb2lab(tmp_rgb[np.newaxis, np.newaxis, :]).flatten()
dif_lab = np.sum(np.abs(conv_lab-old_lab))
if dif_lab < 1:
break
# print(conv_lab)
conv_rgb_ingamut = lab2rgb_1d(conv_lab, clip=True, dtype='uint8')
if (return_type == 'rgb'):
return conv_rgb_ingamut
elif(return_type == 'lab'):
conv_lab_ingamut = rgb2lab_1d(conv_rgb_ingamut)
return conv_lab_ingamut
class abGrid():
def __init__(self, gamut_size=110, D=1):
self.D = D
self.vals_b, self.vals_a = np.meshgrid(np.arange(-gamut_size, gamut_size+D, D),
np.arange(-gamut_size, gamut_size+D, D))
self.pts_full_grid = np.concatenate((self.vals_a[:, :, np.newaxis], self.vals_b[:, :, np.newaxis]), axis=2)
self.A = self.pts_full_grid.shape[0]
self.B = self.pts_full_grid.shape[1]
self.AB = self.A * self.B
self.gamut_size = gamut_size
def update_gamut(self, l_in):
warnings.filterwarnings("ignore")
thresh = 1.0
pts_lab = np.concatenate((l_in + np.zeros((self.A, self.B, 1)), self.pts_full_grid), axis=2)
self.pts_rgb = (255 * np.clip(color.lab2rgb(pts_lab), 0, 1)).astype('uint8')
pts_lab_back = color.rgb2lab(self.pts_rgb)
pts_lab_diff = np.linalg.norm(pts_lab-pts_lab_back, axis=2)
self.mask = pts_lab_diff < thresh
mask3 = np.tile(self.mask[..., np.newaxis], [1, 1, 3])
self.masked_rgb = self.pts_rgb.copy()
self.masked_rgb[np.invert(mask3)] = 255
return self.masked_rgb, self.mask
def ab2xy(self, a, b):
y = self.gamut_size + a
x = self.gamut_size + b
# print('ab2xy (%d, %d) -> (%d, %d)' % (a, b, x, y))
return x, y
def xy2ab(self, x, y):
a = y - self.gamut_size
b = x - self.gamut_size
# print('xy2ab (%d, %d) -> (%d, %d)' % (x, y, a, b))
return a, b
|
colorization-master
|
interactive-deep-colorization/data/lab_gamut.py
|
from setuptools import setup, find_packages
import sys
import os
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
# version.py defines the VERSION and VERSION_SHORT variables.
# We use exec here so we don't import allennlp_semparse whilst setting up.
VERSION = {}
with open("allennlp_rc/version.py") as version_file:
exec(version_file.read(), VERSION)
# Load requirements.txt with a special case for allennlp so we can handle
# cross-library integration testing.
with open("requirements.txt") as requirements_file:
install_requirements = requirements_file.readlines()
install_requirements = [
r for r in install_requirements if "git+https://github.com/allenai/allennlp" not in r
]
if not os.environ.get("EXCLUDE_ALLENNLP_IN_SETUP"):
# Warning: This will not give you the desired version if you've already
# installed allennlp! See https://github.com/pypa/pip/issues/5898.
#
# There used to be an alternative to this using `dependency_links`
# (https://stackoverflow.com/questions/3472430), but pip decided to
# remove this in version 19 breaking numerous projects in the process.
# See https://github.com/pypa/pip/issues/6162.
#
# As a mitigation, run `pip uninstall allennlp` before installing this
# package.
sha = "4749fc3"
requirement = f"allennlp @ git+https://github.com/allenai/allennlp@{sha}#egg=allennlp"
install_requirements.append(requirement)
# make pytest-runner a conditional requirement,
# per: https://github.com/pytest-dev/pytest-runner#considerations
needs_pytest = {"pytest", "test", "ptr"}.intersection(sys.argv)
pytest_runner = ["pytest-runner"] if needs_pytest else []
setup_requirements = [
# add other setup requirements as necessary
] + pytest_runner
setup(
name="allennlp_reading_comprehension",
version=VERSION["VERSION"],
description=(
"A framework for building reading comprehension models "
"with AllenNLP, built by the authors of AllenNLP"
),
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
classifiers=[
"Intended Audience :: Science/Research",
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.6",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="allennlp NLP deep learning machine reading semantic parsing parsers",
url="https://github.com/allenai/allennlp-reading-comprehension",
author="Allen Institute for Artificial Intelligence",
author_email="[email protected]",
license="Apache",
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
install_requires=install_requirements,
setup_requires=setup_requirements,
tests_require=["pytest", "flaky", "responses>=0.7"],
include_package_data=True,
python_requires=">=3.6.1",
zip_safe=False,
)
|
allennlp-reading-comprehension-master
|
setup.py
|
_MAJOR = "0"
_MINOR = "0"
_REVISION = "1-unreleased"
VERSION_SHORT = "{0}.{1}".format(_MAJOR, _MINOR)
VERSION = "{0}.{1}.{2}".format(_MAJOR, _MINOR, _REVISION)
|
allennlp-reading-comprehension-master
|
allennlp_rc/version.py
|
import allennlp_rc.dataset_readers
import allennlp_rc.models
import allennlp_rc.predictors
|
allennlp-reading-comprehension-master
|
allennlp_rc/__init__.py
|
import json
import logging
from typing import Dict, List
from overrides import overrides
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.data.fields import Field, TextField, ListField, MetadataField, IndexField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Tokenizer, SpacyTokenizer
logger = logging.getLogger(__name__)
@DatasetReader.register("qangaroo")
class QangarooReader(DatasetReader):
"""
Reads a JSON-formatted Qangaroo file and returns a ``Dataset`` where the ``Instances`` have six
fields: ``candidates``, a ``ListField[TextField]``, ``query``, a ``TextField``, ``supports``, a
``ListField[TextField]``, ``answer``, a ``TextField``, and ``answer_index``, a ``IndexField``.
We also add a ``MetadataField`` that stores the instance's ID and annotations if they are present.
Parameters
----------
tokenizer : ``Tokenizer``, optional (default=``SpacyTokenizer()``)
We use this ``Tokenizer`` for both the question and the passage. See :class:`Tokenizer`.
Default is ```SpacyTokenizer()``.
token_indexers : ``Dict[str, TokenIndexer]``, optional
We similarly use this for both the question and the passage. See :class:`TokenIndexer`.
Default is ``{"tokens": SingleIdTokenIndexer()}``.
"""
def __init__(
self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
lazy: bool = False,
) -> None:
super().__init__(lazy)
self._tokenizer = tokenizer or SpacyTokenizer()
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
@overrides
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
logger.info("Reading file at %s", file_path)
with open(file_path) as dataset_file:
dataset = json.load(dataset_file)
logger.info("Reading the dataset")
for sample in dataset:
instance = self.text_to_instance(
sample["candidates"],
sample["query"],
sample["supports"],
sample["id"],
sample["answer"],
sample["annotations"] if "annotations" in sample else [[]],
)
yield instance
@overrides
def text_to_instance(
self, # type: ignore
candidates: List[str],
query: str,
supports: List[str],
_id: str = None,
answer: str = None,
annotations: List[List[str]] = None,
) -> Instance:
fields: Dict[str, Field] = {}
candidates_field = ListField(
[
TextField(candidate, self._token_indexers)
for candidate in self._tokenizer.batch_tokenize(candidates)
]
)
fields["query"] = TextField(self._tokenizer.tokenize(query), self._token_indexers)
fields["supports"] = ListField(
[
TextField(support, self._token_indexers)
for support in self._tokenizer.batch_tokenize(supports)
]
)
fields["answer"] = TextField(self._tokenizer.tokenize(answer), self._token_indexers)
fields["answer_index"] = IndexField(candidates.index(answer), candidates_field)
fields["candidates"] = candidates_field
fields["metadata"] = MetadataField({"annotations": annotations, "id": _id})
return Instance(fields)
|
allennlp-reading-comprehension-master
|
allennlp_rc/dataset_readers/qangaroo.py
|
"""
Utilities for reading comprehension dataset readers.
"""
from collections import Counter, defaultdict
import logging
import string
from typing import Any, Dict, List, Tuple, Optional
from allennlp.data.fields import (
Field,
TextField,
IndexField,
MetadataField,
LabelField,
ListField,
SequenceLabelField,
)
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import TokenIndexer
from allennlp.data.tokenizers import Token
logger = logging.getLogger(__name__)
# These are tokens and characters that are stripped by the standard SQuAD and TriviaQA evaluation
# scripts.
IGNORED_TOKENS = {"a", "an", "the"}
STRIPPED_CHARACTERS = string.punctuation + "".join(["‘", "’", "´", "`", "_"])
def normalize_text(text: str) -> str:
"""
Performs a normalization that is very similar to that done by the normalization functions in
SQuAD and TriviaQA.
This involves splitting and rejoining the text, and could be a somewhat expensive operation.
"""
return " ".join(
[
token
for token in text.lower().strip(STRIPPED_CHARACTERS).split()
if token not in IGNORED_TOKENS
]
)
def char_span_to_token_span(
token_offsets: List[Optional[Tuple[int, int]]], character_span: Tuple[int, int]
) -> Tuple[Tuple[int, int], bool]:
"""
Converts a character span from a passage into the corresponding token span in the tokenized
version of the passage. If you pass in a character span that does not correspond to complete
tokens in the tokenized version, we'll do our best, but the behavior is officially undefined.
We return an error flag in this case, and have some debug logging so you can figure out the
cause of this issue (in SQuAD, these are mostly either tokenization problems or annotation
problems; there's a fair amount of both).
The basic outline of this method is to find the token span that has the same offsets as the
input character span. If the tokenizer tokenized the passage correctly and has matching
offsets, this is easy. We try to be a little smart about cases where they don't match exactly,
but mostly just find the closest thing we can.
The returned ``(begin, end)`` indices are `inclusive` for both ``begin`` and ``end``.
So, for example, ``(2, 2)`` is the one word span beginning at token index 2, ``(3, 4)`` is the
two-word span beginning at token index 3, and so on.
Returns
-------
token_span : ``Tuple[int, int]``
`Inclusive` span start and end token indices that match as closely as possible to the input
character spans.
error : ``bool``
Whether the token spans match the input character spans exactly. If this is ``False``, it
means there was an error in either the tokenization or the annotated character span.
"""
# We have token offsets into the passage from the tokenizer; we _should_ be able to just find
# the tokens that have the same offsets as our span.
error = False
start_index = 0
while start_index < len(token_offsets) and (
token_offsets[start_index] is None or token_offsets[start_index][0] < character_span[0]
):
start_index += 1
if start_index >= len(token_offsets):
raise ValueError(f"Character span %r outside the range of the given tokens.")
# start_index should now be pointing at the span start index.
if token_offsets[start_index][0] > character_span[0]:
if start_index <= 0:
raise ValueError(f"Character span %r outside the range of the given tokens.")
# In this case, a tokenization or labeling issue made us go too far - the character span we're looking for
# actually starts in the previous token. We'll back up one. Note that this might have us starting at a None
# token.
logger.debug("Bad labelling or tokenization - start offset doesn't match")
start_index -= 1
if token_offsets[start_index] is None or token_offsets[start_index][0] != character_span[0]:
error = True
end_index = start_index
while end_index < len(token_offsets) and (
token_offsets[end_index] is None or token_offsets[end_index][1] < character_span[1]
):
end_index += 1
if end_index >= len(token_offsets):
raise ValueError(f"Character span %r outside the range of the given tokens.")
if end_index == start_index and token_offsets[end_index][1] > character_span[1]:
# Looks like there was a token that should have been split, like "1854-1855", where the
# answer is "1854". We can't do much in this case, except keep the answer as the whole
# token.
logger.debug("Bad tokenization - end offset doesn't match")
elif token_offsets[end_index][1] > character_span[1]:
# This is a case where the given answer span is more than one token, and the last token is
# cut off for some reason, like "split with Luckett and Rober", when the original passage
# said "split with Luckett and Roberson". In this case, we'll just keep the end index
# where it is, and assume the intent was to mark the whole token.
logger.debug("Bad labelling or tokenization - end offset doesn't match")
if token_offsets[end_index][1] != character_span[1]:
error = True
return (start_index, end_index), error
def find_valid_answer_spans(
passage_tokens: List[Token], answer_texts: List[str]
) -> List[Tuple[int, int]]:
"""
Finds a list of token spans in ``passage_tokens`` that match the given ``answer_texts``. This
tries to find all spans that would evaluate to correct given the SQuAD and TriviaQA official
evaluation scripts, which do some normalization of the input text.
Note that this could return duplicate spans! The caller is expected to be able to handle
possible duplicates (as already happens in the SQuAD dev set, for instance).
"""
normalized_tokens = [token.text.lower().strip(STRIPPED_CHARACTERS) for token in passage_tokens]
# Because there could be many `answer_texts`, we'll do the most expensive pre-processing
# step once. This gives us a map from tokens to the position in the passage they appear.
word_positions: Dict[str, List[int]] = defaultdict(list)
for i, token in enumerate(normalized_tokens):
word_positions[token].append(i)
spans = []
for answer_text in answer_texts:
# For each answer, we'll first find all valid start positions in the passage. Then
# we'll grow each span to the same length as the number of answer tokens, and see if we
# have a match. We're a little tricky as we grow the span, skipping words that are
# already pruned from the normalized answer text, and stopping early if we don't match.
answer_tokens = answer_text.lower().strip(STRIPPED_CHARACTERS).split()
num_answer_tokens = len(answer_tokens)
for span_start in word_positions[answer_tokens[0]]:
span_end = span_start # span_end is _inclusive_
answer_index = 1
while answer_index < num_answer_tokens and span_end + 1 < len(normalized_tokens):
token = normalized_tokens[span_end + 1]
if answer_tokens[answer_index] == token:
answer_index += 1
span_end += 1
elif token in IGNORED_TOKENS:
span_end += 1
else:
break
if num_answer_tokens == answer_index:
spans.append((span_start, span_end))
return spans
def make_reading_comprehension_instance(
question_tokens: List[Token],
passage_tokens: List[Token],
token_indexers: Dict[str, TokenIndexer],
passage_text: str,
token_spans: List[Tuple[int, int]] = None,
answer_texts: List[str] = None,
additional_metadata: Dict[str, Any] = None,
) -> Instance:
"""
Converts a question, a passage, and an optional answer (or answers) to an ``Instance`` for use
in a reading comprehension model.
Creates an ``Instance`` with at least these fields: ``question`` and ``passage``, both
``TextFields``; and ``metadata``, a ``MetadataField``. Additionally, if both ``answer_texts``
and ``char_span_starts`` are given, the ``Instance`` has ``span_start`` and ``span_end``
fields, which are both ``IndexFields``.
Parameters
----------
question_tokens : ``List[Token]``
An already-tokenized question.
passage_tokens : ``List[Token]``
An already-tokenized passage that contains the answer to the given question.
token_indexers : ``Dict[str, TokenIndexer]``
Determines how the question and passage ``TextFields`` will be converted into tensors that
get input to a model. See :class:`TokenIndexer`.
passage_text : ``str``
The original passage text. We need this so that we can recover the actual span from the
original passage that the model predicts as the answer to the question. This is used in
official evaluation scripts.
token_spans : ``List[Tuple[int, int]]``, optional
Indices into ``passage_tokens`` to use as the answer to the question for training. This is
a list because there might be several possible correct answer spans in the passage.
Currently, we just select the most frequent span in this list (i.e., SQuAD has multiple
annotations on the dev set; this will select the span that the most annotators gave as
correct).
answer_texts : ``List[str]``, optional
All valid answer strings for the given question. In SQuAD, e.g., the training set has
exactly one answer per question, but the dev and test sets have several. TriviaQA has many
possible answers, which are the aliases for the known correct entity. This is put into the
metadata for use with official evaluation scripts, but not used anywhere else.
additional_metadata : ``Dict[str, Any]``, optional
The constructed ``metadata`` field will by default contain ``original_passage``,
``token_offsets``, ``question_tokens``, ``passage_tokens``, and ``answer_texts`` keys. If
you want any other metadata to be associated with each instance, you can pass that in here.
This dictionary will get added to the ``metadata`` dictionary we already construct.
"""
additional_metadata = additional_metadata or {}
fields: Dict[str, Field] = {}
passage_offsets = [(token.idx, token.idx + len(token.text)) for token in passage_tokens]
# This is separate so we can reference it later with a known type.
passage_field = TextField(passage_tokens, token_indexers)
fields["passage"] = passage_field
fields["question"] = TextField(question_tokens, token_indexers)
metadata = {
"original_passage": passage_text,
"token_offsets": passage_offsets,
"question_tokens": [token.text for token in question_tokens],
"passage_tokens": [token.text for token in passage_tokens],
}
if answer_texts:
metadata["answer_texts"] = answer_texts
if token_spans:
# There may be multiple answer annotations, so we pick the one that occurs the most. This
# only matters on the SQuAD dev set, and it means our computed metrics ("start_acc",
# "end_acc", and "span_acc") aren't quite the same as the official metrics, which look at
# all of the annotations. This is why we have a separate official SQuAD metric calculation
# (the "em" and "f1" metrics use the official script).
candidate_answers: Counter = Counter()
for span_start, span_end in token_spans:
candidate_answers[(span_start, span_end)] += 1
span_start, span_end = candidate_answers.most_common(1)[0][0]
fields["span_start"] = IndexField(span_start, passage_field)
fields["span_end"] = IndexField(span_end, passage_field)
metadata.update(additional_metadata)
fields["metadata"] = MetadataField(metadata)
return Instance(fields)
def make_reading_comprehension_instance_quac(
question_list_tokens: List[List[Token]],
passage_tokens: List[Token],
token_indexers: Dict[str, TokenIndexer],
passage_text: str,
token_span_lists: List[List[Tuple[int, int]]] = None,
yesno_list: List[int] = None,
followup_list: List[int] = None,
additional_metadata: Dict[str, Any] = None,
num_context_answers: int = 0,
) -> Instance:
"""
Converts a question, a passage, and an optional answer (or answers) to an ``Instance`` for use
in a reading comprehension model.
Creates an ``Instance`` with at least these fields: ``question`` and ``passage``, both
``TextFields``; and ``metadata``, a ``MetadataField``. Additionally, if both ``answer_texts``
and ``char_span_starts`` are given, the ``Instance`` has ``span_start`` and ``span_end``
fields, which are both ``IndexFields``.
Parameters
----------
question_list_tokens : ``List[List[Token]]``
An already-tokenized list of questions. Each dialog have multiple questions.
passage_tokens : ``List[Token]``
An already-tokenized passage that contains the answer to the given question.
token_indexers : ``Dict[str, TokenIndexer]``
Determines how the question and passage ``TextFields`` will be converted into tensors that
get input to a model. See :class:`TokenIndexer`.
passage_text : ``str``
The original passage text. We need this so that we can recover the actual span from the
original passage that the model predicts as the answer to the question. This is used in
official evaluation scripts.
token_span_lists : ``List[List[Tuple[int, int]]]``, optional
Indices into ``passage_tokens`` to use as the answer to the question for training. This is
a list of list, first because there is multiple questions per dialog, and
because there might be several possible correct answer spans in the passage.
Currently, we just select the last span in this list (i.e., QuAC has multiple
annotations on the dev set; this will select the last span, which was given by the original annotator).
yesno_list : ``List[int]``
List of the affirmation bit for each question answer pairs.
followup_list : ``List[int]``
List of the continuation bit for each question answer pairs.
num_context_answers : ``int``, optional
How many answers to encode into the passage.
additional_metadata : ``Dict[str, Any]``, optional
The constructed ``metadata`` field will by default contain ``original_passage``,
``token_offsets``, ``question_tokens``, ``passage_tokens``, and ``answer_texts`` keys. If
you want any other metadata to be associated with each instance, you can pass that in here.
This dictionary will get added to the ``metadata`` dictionary we already construct.
"""
additional_metadata = additional_metadata or {}
fields: Dict[str, Field] = {}
passage_offsets = [(token.idx, token.idx + len(token.text)) for token in passage_tokens]
# This is separate so we can reference it later with a known type.
passage_field = TextField(passage_tokens, token_indexers)
fields["passage"] = passage_field
fields["question"] = ListField(
[TextField(q_tokens, token_indexers) for q_tokens in question_list_tokens]
)
metadata = {
"original_passage": passage_text,
"token_offsets": passage_offsets,
"question_tokens": [
[token.text for token in question_tokens] for question_tokens in question_list_tokens
],
"passage_tokens": [token.text for token in passage_tokens],
}
p1_answer_marker_list: List[Field] = []
p2_answer_marker_list: List[Field] = []
p3_answer_marker_list: List[Field] = []
def get_tag(i, i_name):
# Generate a tag to mark previous answer span in the passage.
return "<{0:d}_{1:s}>".format(i, i_name)
def mark_tag(span_start, span_end, passage_tags, prev_answer_distance):
try:
assert span_start >= 0
assert span_end >= 0
except: # noqa
raise ValueError(
"Previous {0:d}th answer span should have been updated!".format(
prev_answer_distance
)
)
# Modify "tags" to mark previous answer span.
if span_start == span_end:
passage_tags[prev_answer_distance][span_start] = get_tag(prev_answer_distance, "")
else:
passage_tags[prev_answer_distance][span_start] = get_tag(prev_answer_distance, "start")
passage_tags[prev_answer_distance][span_end] = get_tag(prev_answer_distance, "end")
for passage_index in range(span_start + 1, span_end):
passage_tags[prev_answer_distance][passage_index] = get_tag(
prev_answer_distance, "in"
)
if token_span_lists:
span_start_list: List[Field] = []
span_end_list: List[Field] = []
p1_span_start, p1_span_end, p2_span_start = -1, -1, -1
p2_span_end, p3_span_start, p3_span_end = -1, -1, -1
# Looping each <<answers>>.
for question_index, answer_span_lists in enumerate(token_span_lists):
span_start, span_end = answer_span_lists[-1] # Last one is the original answer
span_start_list.append(IndexField(span_start, passage_field))
span_end_list.append(IndexField(span_end, passage_field))
prev_answer_marker_lists = [
["O"] * len(passage_tokens),
["O"] * len(passage_tokens),
["O"] * len(passage_tokens),
["O"] * len(passage_tokens),
]
if question_index > 0 and num_context_answers > 0:
mark_tag(p1_span_start, p1_span_end, prev_answer_marker_lists, 1)
if question_index > 1 and num_context_answers > 1:
mark_tag(p2_span_start, p2_span_end, prev_answer_marker_lists, 2)
if question_index > 2 and num_context_answers > 2:
mark_tag(p3_span_start, p3_span_end, prev_answer_marker_lists, 3)
p3_span_start = p2_span_start
p3_span_end = p2_span_end
p2_span_start = p1_span_start
p2_span_end = p1_span_end
p1_span_start = span_start
p1_span_end = span_end
if num_context_answers > 2:
p3_answer_marker_list.append(
SequenceLabelField(
prev_answer_marker_lists[3], passage_field, label_namespace="answer_tags"
)
)
if num_context_answers > 1:
p2_answer_marker_list.append(
SequenceLabelField(
prev_answer_marker_lists[2], passage_field, label_namespace="answer_tags"
)
)
if num_context_answers > 0:
p1_answer_marker_list.append(
SequenceLabelField(
prev_answer_marker_lists[1], passage_field, label_namespace="answer_tags"
)
)
fields["span_start"] = ListField(span_start_list)
fields["span_end"] = ListField(span_end_list)
if num_context_answers > 0:
fields["p1_answer_marker"] = ListField(p1_answer_marker_list)
if num_context_answers > 1:
fields["p2_answer_marker"] = ListField(p2_answer_marker_list)
if num_context_answers > 2:
fields["p3_answer_marker"] = ListField(p3_answer_marker_list)
fields["yesno_list"] = ListField(
[LabelField(yesno, label_namespace="yesno_labels") for yesno in yesno_list]
)
fields["followup_list"] = ListField(
[LabelField(followup, label_namespace="followup_labels") for followup in followup_list]
)
metadata.update(additional_metadata)
fields["metadata"] = MetadataField(metadata)
return Instance(fields)
def handle_cannot(reference_answers: List[str]):
"""
Process a list of reference answers.
If equal or more than half of the reference answers are "CANNOTANSWER", take it as gold.
Otherwise, return answers that are not "CANNOTANSWER".
"""
num_cannot = 0
num_spans = 0
for ref in reference_answers:
if ref == "CANNOTANSWER":
num_cannot += 1
else:
num_spans += 1
if num_cannot >= num_spans:
reference_answers = ["CANNOTANSWER"]
else:
reference_answers = [x for x in reference_answers if x != "CANNOTANSWER"]
return reference_answers
def split_token_by_delimiter(token: Token, delimiter: str) -> List[Token]:
split_tokens = []
char_offset = token.idx
for sub_str in token.text.split(delimiter):
if sub_str:
split_tokens.append(Token(text=sub_str, idx=char_offset))
char_offset += len(sub_str)
split_tokens.append(Token(text=delimiter, idx=char_offset))
char_offset += len(delimiter)
if split_tokens:
split_tokens.pop(-1)
char_offset -= len(delimiter)
return split_tokens
else:
return [token]
def split_tokens_by_hyphen(tokens: List[Token]) -> List[Token]:
hyphens = ["-", "–", "~"]
new_tokens: List[Token] = []
for token in tokens:
if any(hyphen in token.text for hyphen in hyphens):
unsplit_tokens = [token]
split_tokens: List[Token] = []
for hyphen in hyphens:
for unsplit_token in unsplit_tokens:
if hyphen in token.text:
split_tokens += split_token_by_delimiter(unsplit_token, hyphen)
else:
split_tokens.append(unsplit_token)
unsplit_tokens, split_tokens = split_tokens, []
new_tokens += unsplit_tokens
else:
new_tokens.append(token)
return new_tokens
|
allennlp-reading-comprehension-master
|
allennlp_rc/dataset_readers/util.py
|
import json
import logging
from typing import Any, Dict, List, Tuple, Optional, Iterable
from allennlp.common.util import sanitize_wordpiece
from allennlp.data.fields import MetadataField, TextField, SpanField
from overrides import overrides
from allennlp.common.file_utils import cached_path, open_compressed
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import PretrainedTransformerIndexer
from allennlp.data.tokenizers import Token, PretrainedTransformerTokenizer
from allennlp_rc.dataset_readers.util import char_span_to_token_span
logger = logging.getLogger(__name__)
@DatasetReader.register("transformer_squad")
class TransformerSquadReader(DatasetReader):
"""
Reads a JSON-formatted SQuAD file and returns a ``Dataset`` where the ``Instances`` have four
fields:
* ``question_with_context``, a ``TextField`` that contains the concatenation of question and context,
* ``answer_span``, a ``SpanField`` into the ``question`` ``TextField`` denoting the answer.
* ``context_span`` a ``SpanField`` into the ``question`` ``TextField`` denoting the context, i.e., the part of
the text that potential answers can come from.
* A ``MetadataField`` that stores the instance's ID, the original question, the original passage text, both of
these in tokenized form, and the gold answer strings, accessible as ``metadata['id']``,
``metadata['question']``, ``metadata['context']``, ``metadata['question_tokens']``,
``metadata['context_tokens']``, and ``metadata['answers']. This is so that we can more easily use the
official SQuAD evaluation script to get metrics.
We also support limiting the maximum length for the question. When the context+question is too long, we run a
sliding window over the context and emit multiple instances for a single question. At training time, we only
emit instances that contain a gold answer. At test time, we emit all instances. As a result, the per-instance
metrics you get during training and evaluation don't correspond 100% to the SQuAD task. To get a final number,
you have to run the script in scripts/transformer_qa_eval.py.
Parameters
----------
transformer_model_name : ``str``, optional (default=``bert-base-cased``)
This reader chooses tokenizer and token indexer according to this setting.
length_limit : ``int``, optional (default=384)
We will make sure that the length of context+question never exceeds this many word pieces.
stride : ``int``, optional (default=128)
When context+question are too long for the length limit, we emit multiple instances for one question,
where the context is shifted. This parameter specifies the overlap between the shifted context window. It
is called "stride" instead of "overlap" because that's what it's called in the original huggingface
implementation.
skip_invalid_examples: ``bool``, optional (default=False)
If this is true, we will skip examples that don't have a gold answer. You should set this to True during
training, and False any other time.
max_query_length : ``int``, optional (default=64)
The maximum number of wordpieces dedicated to the question. If the question is longer than this, it will be
truncated.
"""
def __init__(
self,
transformer_model_name: str = "bert-base-cased",
length_limit: int = 384,
stride: int = 128,
skip_invalid_examples: bool = False,
max_query_length: int = 64,
**kwargs
) -> None:
super().__init__(**kwargs)
self._tokenizer = PretrainedTransformerTokenizer(
transformer_model_name, add_special_tokens=False, calculate_character_offsets=True
)
self._token_indexers = {"tokens": PretrainedTransformerIndexer(transformer_model_name)}
self.length_limit = length_limit
self.stride = stride
self.skip_invalid_examples = skip_invalid_examples
self.max_query_length = max_query_length
self.non_content_type_id = max(
self._tokenizer.tokenizer.encode_plus("left", "right")["token_type_ids"]
)
# workaround for a bug in the transformers library
if "distilbert" in transformer_model_name:
self.non_content_type_id = 0
@overrides
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
logger.info("Reading file at %s", file_path)
with open_compressed(file_path) as dataset_file:
dataset_json = json.load(dataset_file)
dataset = dataset_json["data"]
logger.info("Reading the dataset")
yielded_question_count = 0
questions_with_more_than_one_instance = 0
for article in dataset:
for paragraph_json in article["paragraphs"]:
context = paragraph_json["context"]
for question_answer in paragraph_json["qas"]:
answers = [answer_json["text"] for answer_json in question_answer["answers"]]
# Just like huggingface, we only use the first answer for training.
if len(answers) > 0:
first_answer_offset = int(question_answer["answers"][0]["answer_start"])
else:
first_answer_offset = None
instances = self.make_instances(
question_answer.get("id", None),
question_answer["question"],
answers,
context,
first_answer_offset,
)
instances_yielded = 0
for instance in instances:
yield instance
instances_yielded += 1
if instances_yielded > 1:
questions_with_more_than_one_instance += 1
yielded_question_count += 1
if questions_with_more_than_one_instance > 0:
logger.info(
"%d (%.2f%%) questions have more than one instance",
questions_with_more_than_one_instance,
100 * questions_with_more_than_one_instance / yielded_question_count,
)
def make_instances(
self,
qid: str,
question: str,
answers: List[str],
context: str,
first_answer_offset: Optional[int],
) -> Iterable[Instance]:
# tokenize context by spaces first, and then with the wordpiece tokenizer
# For RoBERTa, this produces a bug where every token is marked as beginning-of-sentence. To fix it, we
# detect whether a space comes before a word, and if so, add "a " in front of the word.
def tokenize_slice(start: int, end: int) -> Iterable[Token]:
text_to_tokenize = context[start:end]
if start - 1 >= 0 and context[start - 1].isspace():
prefix = "a " # must end in a space, and be short so we can be sure it becomes only one token
wordpieces = self._tokenizer.tokenize(prefix + text_to_tokenize)
for wordpiece in wordpieces:
if wordpiece.idx is not None:
wordpiece.idx -= len(prefix)
return wordpieces[1:]
else:
return self._tokenizer.tokenize(text_to_tokenize)
tokenized_context = []
token_start = 0
for i, c in enumerate(context):
if c.isspace():
for wordpiece in tokenize_slice(token_start, i):
if wordpiece.idx is not None:
wordpiece.idx += token_start
tokenized_context.append(wordpiece)
token_start = i + 1
for wordpiece in tokenize_slice(token_start, len(context)):
if wordpiece.idx is not None:
wordpiece.idx += token_start
tokenized_context.append(wordpiece)
if first_answer_offset is None:
(token_answer_span_start, token_answer_span_end) = (-1, -1)
else:
(token_answer_span_start, token_answer_span_end), _ = char_span_to_token_span(
[
(t.idx, t.idx + len(sanitize_wordpiece(t.text))) if t.idx is not None else None
for t in tokenized_context
],
(first_answer_offset, first_answer_offset + len(answers[0])),
)
# Tokenize the question
tokenized_question = self._tokenizer.tokenize(question)
tokenized_question = tokenized_question[: self.max_query_length]
for token in tokenized_question:
token.type_id = self.non_content_type_id
token.idx = None
# Stride over the context, making instances
# Sequences are [CLS] question [SEP] [SEP] context [SEP], hence the - 4 for four special tokens.
# This is technically not correct for anything but RoBERTa, but it does not affect the scores.
space_for_context = self.length_limit - len(tokenized_question) - 4
stride_start = 0
while True:
tokenized_context_window = tokenized_context[stride_start:]
tokenized_context_window = tokenized_context_window[:space_for_context]
window_token_answer_span = (
token_answer_span_start - stride_start,
token_answer_span_end - stride_start,
)
if any(i < 0 or i >= len(tokenized_context_window) for i in window_token_answer_span):
# The answer is not contained in the window.
window_token_answer_span = None
if not self.skip_invalid_examples or window_token_answer_span is not None:
additional_metadata = {"id": qid}
instance = self.text_to_instance(
question,
tokenized_question,
context,
tokenized_context_window,
answers,
window_token_answer_span,
additional_metadata,
)
yield instance
stride_start += space_for_context
if stride_start >= len(tokenized_context):
break
stride_start -= self.stride
@overrides
def text_to_instance(
self, # type: ignore
question: str,
tokenized_question: List[Token],
context: str,
tokenized_context: List[Token],
answers: List[str],
token_answer_span: Optional[Tuple[int, int]],
additional_metadata: Dict[str, Any] = None,
) -> Instance:
fields = {}
# make the question field
cls_token = Token(
self._tokenizer.tokenizer.cls_token,
text_id=self._tokenizer.tokenizer.cls_token_id,
type_id=self.non_content_type_id,
)
sep_token = Token(
self._tokenizer.tokenizer.sep_token,
text_id=self._tokenizer.tokenizer.sep_token_id,
type_id=self.non_content_type_id,
)
question_field = TextField(
(
[cls_token]
+ tokenized_question
+ [sep_token, sep_token]
+ tokenized_context
+ [sep_token]
),
self._token_indexers,
)
fields["question_with_context"] = question_field
start_of_context = 1 + len(tokenized_question) + 2
# make the answer span
if token_answer_span is not None:
assert all(i >= 0 for i in token_answer_span)
assert token_answer_span[0] <= token_answer_span[1]
fields["answer_span"] = SpanField(
token_answer_span[0] + start_of_context,
token_answer_span[1] + start_of_context,
question_field,
)
else:
# We have to put in something even when we don't have an answer, so that this instance can be batched
# together with other instances that have answers.
fields["answer_span"] = SpanField(-1, -1, question_field)
# make the context span, i.e., the span of text from which possible answers should be drawn
fields["context_span"] = SpanField(
start_of_context, start_of_context + len(tokenized_context) - 1, question_field
)
# make the metadata
metadata = {
"question": question,
"question_tokens": tokenized_question,
"context": context,
"context_tokens": tokenized_context,
"answers": answers,
}
if additional_metadata is not None:
metadata.update(additional_metadata)
fields["metadata"] = MetadataField(metadata)
return Instance(fields)
|
allennlp-reading-comprehension-master
|
allennlp_rc/dataset_readers/transformer_squad.py
|
from allennlp_rc.dataset_readers.drop import DropReader
from allennlp_rc.dataset_readers.qangaroo import QangarooReader
from allennlp_rc.dataset_readers.quac import QuACReader
from allennlp_rc.dataset_readers.squad import SquadReader
from allennlp_rc.dataset_readers.triviaqa import TriviaQaReader
from allennlp_rc.dataset_readers.transformer_squad import TransformerSquadReader
|
allennlp-reading-comprehension-master
|
allennlp_rc/dataset_readers/__init__.py
|
import json
import logging
from typing import Any, Dict, List, Tuple, Optional
from overrides import overrides
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, SpacyTokenizer
from allennlp_rc.dataset_readers import util
logger = logging.getLogger(__name__)
@DatasetReader.register("squad")
class SquadReader(DatasetReader):
"""
Reads a JSON-formatted SQuAD file and returns a ``Dataset`` where the ``Instances`` have four
fields: ``question``, a ``TextField``, ``passage``, another ``TextField``, and ``span_start``
and ``span_end``, both ``IndexFields`` into the ``passage`` ``TextField``. We also add a
``MetadataField`` that stores the instance's ID, the original passage text, gold answer strings,
and token offsets into the original passage, accessible as ``metadata['id']``,
``metadata['original_passage']``, ``metadata['answer_texts']`` and
``metadata['token_offsets']``. This is so that we can more easily use the official SQuAD
evaluation script to get metrics.
We also support limiting the maximum length for both passage and question. However, some gold
answer spans may exceed the maximum passage length, which will cause error in making instances.
We simply skip these spans to avoid errors. If all of the gold answer spans of an example
are skipped, during training, we will skip this example. During validating or testing, since
we cannot skip examples, we use the last token as the pseudo gold answer span instead. The
computed loss will not be accurate as a result. But this will not affect the answer evaluation,
because we keep all the original gold answer texts.
Parameters
----------
tokenizer : ``Tokenizer``, optional (default=``SpacyTokenizer()``)
We use this ``Tokenizer`` for both the question and the passage. See :class:`Tokenizer`.
Default is ```SpacyTokenizer()``.
token_indexers : ``Dict[str, TokenIndexer]``, optional
We similarly use this for both the question and the passage. See :class:`TokenIndexer`.
Default is ``{"tokens": SingleIdTokenIndexer()}``.
lazy : ``bool``, optional (default=False)
If this is true, ``instances()`` will return an object whose ``__iter__`` method
reloads the dataset each time it's called. Otherwise, ``instances()`` returns a list.
passage_length_limit : ``int``, optional (default=None)
if specified, we will cut the passage if the length of passage exceeds this limit.
question_length_limit : ``int``, optional (default=None)
if specified, we will cut the question if the length of passage exceeds this limit.
skip_invalid_examples: ``bool``, optional (default=False)
if this is true, we will skip those invalid examples
"""
def __init__(
self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
lazy: bool = False,
passage_length_limit: int = None,
question_length_limit: int = None,
skip_invalid_examples: bool = False,
) -> None:
super().__init__(lazy)
self._tokenizer = tokenizer or SpacyTokenizer()
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self.passage_length_limit = passage_length_limit
self.question_length_limit = question_length_limit
self.skip_invalid_examples = skip_invalid_examples
@overrides
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
logger.info("Reading file at %s", file_path)
with open(file_path) as dataset_file:
dataset_json = json.load(dataset_file)
dataset = dataset_json["data"]
logger.info("Reading the dataset")
for article in dataset:
for paragraph_json in article["paragraphs"]:
paragraph = paragraph_json["context"]
tokenized_paragraph = self._tokenizer.tokenize(paragraph)
for question_answer in paragraph_json["qas"]:
question_text = question_answer["question"].strip().replace("\n", "")
answer_texts = [answer["text"] for answer in question_answer["answers"]]
span_starts = [answer["answer_start"] for answer in question_answer["answers"]]
span_ends = [
start + len(answer) for start, answer in zip(span_starts, answer_texts)
]
additional_metadata = {"id": question_answer.get("id", None)}
instance = self.text_to_instance(
question_text,
paragraph,
zip(span_starts, span_ends),
answer_texts,
tokenized_paragraph,
additional_metadata,
)
if instance is not None:
yield instance
@overrides
def text_to_instance(
self, # type: ignore
question_text: str,
passage_text: str,
char_spans: List[Tuple[int, int]] = None,
answer_texts: List[str] = None,
passage_tokens: List[Token] = None,
additional_metadata: Dict[str, Any] = None,
) -> Optional[Instance]:
if not passage_tokens:
passage_tokens = self._tokenizer.tokenize(passage_text)
question_tokens = self._tokenizer.tokenize(question_text)
if self.passage_length_limit is not None:
passage_tokens = passage_tokens[: self.passage_length_limit]
if self.question_length_limit is not None:
question_tokens = question_tokens[: self.question_length_limit]
char_spans = char_spans or []
# We need to convert character indices in `passage_text` to token indices in
# `passage_tokens`, as the latter is what we'll actually use for supervision.
token_spans: List[Tuple[int, int]] = []
passage_offsets = [(token.idx, token.idx + len(token.text)) for token in passage_tokens]
for char_span_start, char_span_end in char_spans:
if char_span_end > passage_offsets[-1][1]:
continue
(span_start, span_end), error = util.char_span_to_token_span(
passage_offsets, (char_span_start, char_span_end)
)
if error:
logger.debug("Passage: %s", passage_text)
logger.debug("Passage tokens: %s", passage_tokens)
logger.debug("Question text: %s", question_text)
logger.debug("Answer span: (%d, %d)", char_span_start, char_span_end)
logger.debug("Token span: (%d, %d)", span_start, span_end)
logger.debug("Tokens in answer: %s", passage_tokens[span_start : span_end + 1])
logger.debug("Answer: %s", passage_text[char_span_start:char_span_end])
token_spans.append((span_start, span_end))
# The original answer is filtered out
if char_spans and not token_spans:
if self.skip_invalid_examples:
return None
else:
token_spans.append((len(passage_tokens) - 1, len(passage_tokens) - 1))
return util.make_reading_comprehension_instance(
question_tokens,
passage_tokens,
self._token_indexers,
passage_text,
token_spans,
answer_texts,
additional_metadata,
)
|
allennlp-reading-comprehension-master
|
allennlp_rc/dataset_readers/squad.py
|
import json
import logging
from typing import Any, Dict, List, Tuple
from overrides import overrides
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, SpacyTokenizer
from allennlp_rc.dataset_readers import util
logger = logging.getLogger(__name__)
@DatasetReader.register("quac")
class QuACReader(DatasetReader):
"""
Reads a JSON-formatted Question Answering in Context (QuAC) data file
and returns a ``Dataset`` where the ``Instances`` have four fields: ``question``, a ``ListField``,
``passage``, another ``TextField``, and ``span_start`` and ``span_end``, both ``ListField`` composed of
IndexFields`` into the ``passage`` ``TextField``.
Two ``ListField``, composed of ``LabelField``, ``yesno_list`` and ``followup_list`` is added.
We also add a
``MetadataField`` that stores the instance's ID, the original passage text, gold answer strings,
and token offsets into the original passage, accessible as ``metadata['id']``,
``metadata['original_passage']``, ``metadata['answer_text_lists'] and ``metadata['token_offsets']``.
Parameters
----------
tokenizer : ``Tokenizer``, optional (default=``SpacyTokenizer()``)
We use this ``Tokenizer`` for both the question and the passage. See :class:`Tokenizer`.
Default is ```SpacyTokenizer()``.
token_indexers : ``Dict[str, TokenIndexer]``, optional
We similarly use this for both the question and the passage. See :class:`TokenIndexer`.
Default is ``{"tokens": SingleIdTokenIndexer()}``.
num_context_answers : ``int``, optional
How many previous question answers to consider in a context.
"""
def __init__(
self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
lazy: bool = False,
num_context_answers: int = 0,
) -> None:
super().__init__(lazy)
self._tokenizer = tokenizer or SpacyTokenizer()
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self._num_context_answers = num_context_answers
@overrides
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
logger.info("Reading file at %s", file_path)
with open(file_path) as dataset_file:
dataset_json = json.load(dataset_file)
dataset = dataset_json["data"]
logger.info("Reading the dataset")
for article in dataset:
for paragraph_json in article["paragraphs"]:
paragraph = paragraph_json["context"]
tokenized_paragraph = self._tokenizer.tokenize(paragraph)
qas = paragraph_json["qas"]
metadata = {}
metadata["instance_id"] = [qa["id"] for qa in qas]
question_text_list = [qa["question"].strip().replace("\n", "") for qa in qas]
answer_texts_list = [[answer["text"] for answer in qa["answers"]] for qa in qas]
metadata["question"] = question_text_list
metadata["answer_texts_list"] = answer_texts_list
span_starts_list = [
[answer["answer_start"] for answer in qa["answers"]] for qa in qas
]
span_ends_list = []
for answer_starts, an_list in zip(span_starts_list, answer_texts_list):
span_ends = [
start + len(answer) for start, answer in zip(answer_starts, an_list)
]
span_ends_list.append(span_ends)
yesno_list = [str(qa["yesno"]) for qa in qas]
followup_list = [str(qa["followup"]) for qa in qas]
instance = self.text_to_instance(
question_text_list,
paragraph,
span_starts_list,
span_ends_list,
tokenized_paragraph,
yesno_list,
followup_list,
metadata,
)
yield instance
@overrides
def text_to_instance(
self, # type: ignore
question_text_list: List[str],
passage_text: str,
start_span_list: List[List[int]] = None,
end_span_list: List[List[int]] = None,
passage_tokens: List[Token] = None,
yesno_list: List[int] = None,
followup_list: List[int] = None,
additional_metadata: Dict[str, Any] = None,
) -> Instance:
# We need to convert character indices in `passage_text` to token indices in
# `passage_tokens`, as the latter is what we'll actually use for supervision.
answer_token_span_list = []
passage_offsets = [(token.idx, token.idx + len(token.text)) for token in passage_tokens]
for start_list, end_list in zip(start_span_list, end_span_list):
token_spans: List[Tuple[int, int]] = []
for char_span_start, char_span_end in zip(start_list, end_list):
(span_start, span_end), error = util.char_span_to_token_span(
passage_offsets, (char_span_start, char_span_end)
)
if error:
logger.debug("Passage: %s", passage_text)
logger.debug("Passage tokens: %s", passage_tokens)
logger.debug("Answer span: (%d, %d)", char_span_start, char_span_end)
logger.debug("Token span: (%d, %d)", span_start, span_end)
logger.debug("Tokens in answer: %s", passage_tokens[span_start : span_end + 1])
logger.debug("Answer: %s", passage_text[char_span_start:char_span_end])
token_spans.append((span_start, span_end))
answer_token_span_list.append(token_spans)
question_list_tokens = [self._tokenizer.tokenize(q) for q in question_text_list]
# Map answer texts to "CANNOTANSWER" if more than half of them marked as so.
additional_metadata["answer_texts_list"] = [
util.handle_cannot(ans_list) for ans_list in additional_metadata["answer_texts_list"]
]
return util.make_reading_comprehension_instance_quac(
question_list_tokens,
passage_tokens,
self._token_indexers,
passage_text,
answer_token_span_list,
yesno_list,
followup_list,
additional_metadata,
self._num_context_answers,
)
|
allennlp-reading-comprehension-master
|
allennlp_rc/dataset_readers/quac.py
|
import itertools
import json
import logging
import string
from collections import defaultdict
from typing import Dict, List, Union, Tuple, Any
from overrides import overrides
from word2number.w2n import word_to_num
from allennlp.common.file_utils import cached_path
from allennlp.data.fields import (
Field,
TextField,
MetadataField,
LabelField,
ListField,
SequenceLabelField,
SpanField,
IndexField,
)
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, SpacyTokenizer
from allennlp_rc.dataset_readers.util import (
IGNORED_TOKENS,
STRIPPED_CHARACTERS,
make_reading_comprehension_instance,
split_tokens_by_hyphen,
)
logger = logging.getLogger(__name__)
WORD_NUMBER_MAP = {
"zero": 0,
"one": 1,
"two": 2,
"three": 3,
"four": 4,
"five": 5,
"six": 6,
"seven": 7,
"eight": 8,
"nine": 9,
"ten": 10,
"eleven": 11,
"twelve": 12,
"thirteen": 13,
"fourteen": 14,
"fifteen": 15,
"sixteen": 16,
"seventeen": 17,
"eighteen": 18,
"nineteen": 19,
}
@DatasetReader.register("drop")
class DropReader(DatasetReader):
"""
Reads a JSON-formatted DROP dataset file and returns instances in a few different possible
formats. The input format is complicated; see the test fixture for an example of what it looks
like. The output formats all contain a question ``TextField``, a passage ``TextField``, and
some kind of answer representation. Because DROP has instances with several different kinds of
answers, this dataset reader allows you to filter out questions that do not have answers of a
particular type (e.g., remove questions that have numbers as answers, if you model can only
give passage spans as answers). We typically return all possible ways of arriving at a given
answer string, and expect models to marginalize over these possibilities.
Parameters
----------
tokenizer : ``Tokenizer``, optional (default=``SpacyTokenizer()``)
We use this ``Tokenizer`` for both the question and the passage. See :class:`Tokenizer`.
Default is ```SpacyTokenizer()``.
token_indexers : ``Dict[str, TokenIndexer]``, optional
We similarly use this for both the question and the passage. See :class:`TokenIndexer`.
Default is ``{"tokens": SingleIdTokenIndexer()}``.
passage_length_limit : ``int``, optional (default=None)
If specified, we will cut the passage if the length of passage exceeds this limit.
question_length_limit : ``int``, optional (default=None)
If specified, we will cut the question if the length of passage exceeds this limit.
skip_when_all_empty: ``List[str]``, optional (default=None)
In some cases such as preparing for training examples, you may want to skip some examples
when there are no gold labels. You can specify on what condition should the examples be
skipped. Currently, you can put "passage_span", "question_span", "addition_subtraction",
or "counting" in this list, to tell the reader skip when there are no such label found.
If not specified, we will keep all the examples.
instance_format: ``str``, optional (default="drop")
We try to be generous in providing a few different formats for the instances in DROP,
in terms of the ``Fields`` that we return for each ``Instance``, to allow for several
different kinds of models. "drop" format will do processing to detect numbers and
various ways those numbers can be arrived at from the passage, and return ``Fields``
related to that. "bert" format only allows passage spans as answers, and provides a
"question_and_passage" field with the two pieces of text joined as BERT expects.
"squad" format provides the same fields that our BiDAF and other SQuAD models expect.
relaxed_span_match_for_finding_labels : ``bool``, optional (default=True)
DROP dataset contains multi-span answers, and the date-type answers are usually hard to
find exact span matches for, also. In order to use as many examples as possible
to train the model, we may not want a strict match for such cases when finding the gold
span labels. If this argument is true, we will treat every span in the multi-span
answers as correct, and every token in the date answer as correct, too. Because models
trained on DROP typically marginalize over all possible answer positions, this is just
being a little more generous in what is being marginalized. Note that this will not
affect evaluation.
"""
def __init__(
self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
passage_length_limit: int = None,
question_length_limit: int = None,
skip_when_all_empty: List[str] = None,
instance_format: str = "drop",
relaxed_span_match_for_finding_labels: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._tokenizer = tokenizer or SpacyTokenizer()
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self.passage_length_limit = passage_length_limit
self.question_length_limit = question_length_limit
self.skip_when_all_empty = skip_when_all_empty if skip_when_all_empty is not None else []
for item in self.skip_when_all_empty:
assert item in [
"passage_span",
"question_span",
"addition_subtraction",
"counting",
], f"Unsupported skip type: {item}"
self.instance_format = instance_format
self.relaxed_span_match_for_finding_labels = relaxed_span_match_for_finding_labels
@overrides
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
logger.info("Reading file at %s", file_path)
with open(file_path) as dataset_file:
dataset = json.load(dataset_file)
logger.info("Reading the dataset")
kept_count, skip_count = 0, 0
for passage_id, passage_info in dataset.items():
passage_text = passage_info["passage"]
passage_tokens = self._tokenizer.tokenize(passage_text)
passage_tokens = split_tokens_by_hyphen(passage_tokens)
for question_answer in passage_info["qa_pairs"]:
question_id = question_answer["query_id"]
question_text = question_answer["question"].strip()
answer_annotations = []
if "answer" in question_answer:
answer_annotations.append(question_answer["answer"])
if "validated_answers" in question_answer:
answer_annotations += question_answer["validated_answers"]
instance = self.text_to_instance(
question_text,
passage_text,
question_id,
passage_id,
answer_annotations,
passage_tokens,
)
if instance is not None:
kept_count += 1
yield instance
else:
skip_count += 1
logger.info(f"Skipped {skip_count} questions, kept {kept_count} questions.")
@overrides
def text_to_instance(
self, # type: ignore
question_text: str,
passage_text: str,
question_id: str = None,
passage_id: str = None,
answer_annotations: List[Dict] = None,
passage_tokens: List[Token] = None,
) -> Union[Instance, None]:
if not passage_tokens:
passage_tokens = self._tokenizer.tokenize(passage_text)
passage_tokens = split_tokens_by_hyphen(passage_tokens)
question_tokens = self._tokenizer.tokenize(question_text)
question_tokens = split_tokens_by_hyphen(question_tokens)
if self.passage_length_limit is not None:
passage_tokens = passage_tokens[: self.passage_length_limit]
if self.question_length_limit is not None:
question_tokens = question_tokens[: self.question_length_limit]
answer_type: str = None
answer_texts: List[str] = []
if answer_annotations:
# Currently we only use the first annotated answer here, but actually this doesn't affect
# the training, because we only have one annotation for the train set.
answer_type, answer_texts = self.extract_answer_info_from_annotation(
answer_annotations[0]
)
# Tokenize the answer text in order to find the matched span based on token
tokenized_answer_texts = []
for answer_text in answer_texts:
answer_tokens = self._tokenizer.tokenize(answer_text)
answer_tokens = split_tokens_by_hyphen(answer_tokens)
tokenized_answer_texts.append(" ".join(token.text for token in answer_tokens))
if self.instance_format == "squad":
valid_passage_spans = (
self.find_valid_spans(passage_tokens, tokenized_answer_texts)
if tokenized_answer_texts
else []
)
if not valid_passage_spans:
if "passage_span" in self.skip_when_all_empty:
return None
else:
valid_passage_spans.append((len(passage_tokens) - 1, len(passage_tokens) - 1))
return make_reading_comprehension_instance(
question_tokens,
passage_tokens,
self._token_indexers,
passage_text,
valid_passage_spans,
# this `answer_texts` will not be used for evaluation
answer_texts,
additional_metadata={
"original_passage": passage_text,
"original_question": question_text,
"passage_id": passage_id,
"question_id": question_id,
"valid_passage_spans": valid_passage_spans,
"answer_annotations": answer_annotations,
},
)
elif self.instance_format == "bert":
question_concat_passage_tokens = question_tokens + [Token("[SEP]")] + passage_tokens
valid_passage_spans = []
for span in self.find_valid_spans(passage_tokens, tokenized_answer_texts):
# This span is for `question + [SEP] + passage`.
valid_passage_spans.append(
(span[0] + len(question_tokens) + 1, span[1] + len(question_tokens) + 1)
)
if not valid_passage_spans:
if "passage_span" in self.skip_when_all_empty:
return None
else:
valid_passage_spans.append(
(
len(question_concat_passage_tokens) - 1,
len(question_concat_passage_tokens) - 1,
)
)
answer_info = {
"answer_texts": answer_texts, # this `answer_texts` will not be used for evaluation
"answer_passage_spans": valid_passage_spans,
}
return self.make_bert_drop_instance(
question_tokens,
passage_tokens,
question_concat_passage_tokens,
self._token_indexers,
passage_text,
answer_info,
additional_metadata={
"original_passage": passage_text,
"original_question": question_text,
"passage_id": passage_id,
"question_id": question_id,
"answer_annotations": answer_annotations,
},
)
elif self.instance_format == "drop":
numbers_in_passage = []
number_indices = []
for token_index, token in enumerate(passage_tokens):
number = self.convert_word_to_number(token.text)
if number is not None:
numbers_in_passage.append(number)
number_indices.append(token_index)
# hack to guarantee minimal length of padded number
numbers_in_passage.append(0)
number_indices.append(-1)
numbers_as_tokens = [Token(str(number)) for number in numbers_in_passage]
valid_passage_spans = (
self.find_valid_spans(passage_tokens, tokenized_answer_texts)
if tokenized_answer_texts
else []
)
valid_question_spans = (
self.find_valid_spans(question_tokens, tokenized_answer_texts)
if tokenized_answer_texts
else []
)
target_numbers = []
# `answer_texts` is a list of valid answers.
for answer_text in answer_texts:
number = self.convert_word_to_number(answer_text)
if number is not None:
target_numbers.append(number)
valid_signs_for_add_sub_expressions: List[List[int]] = []
valid_counts: List[int] = []
if answer_type in ["number", "date"]:
valid_signs_for_add_sub_expressions = self.find_valid_add_sub_expressions(
numbers_in_passage, target_numbers
)
if answer_type in ["number"]:
# Currently we only support count number 0 ~ 9
numbers_for_count = list(range(10))
valid_counts = self.find_valid_counts(numbers_for_count, target_numbers)
type_to_answer_map = {
"passage_span": valid_passage_spans,
"question_span": valid_question_spans,
"addition_subtraction": valid_signs_for_add_sub_expressions,
"counting": valid_counts,
}
if self.skip_when_all_empty and not any(
type_to_answer_map[skip_type] for skip_type in self.skip_when_all_empty
):
return None
answer_info = {
"answer_texts": answer_texts, # this `answer_texts` will not be used for evaluation
"answer_passage_spans": valid_passage_spans,
"answer_question_spans": valid_question_spans,
"signs_for_add_sub_expressions": valid_signs_for_add_sub_expressions,
"counts": valid_counts,
}
return self.make_marginal_drop_instance(
question_tokens,
passage_tokens,
numbers_as_tokens,
number_indices,
self._token_indexers,
passage_text,
answer_info,
additional_metadata={
"original_passage": passage_text,
"original_question": question_text,
"original_numbers": numbers_in_passage,
"passage_id": passage_id,
"question_id": question_id,
"answer_info": answer_info,
"answer_annotations": answer_annotations,
},
)
else:
raise ValueError(
f'Expect the instance format to be "drop", "squad" or "bert", '
f"but got {self.instance_format}"
)
@staticmethod
def make_marginal_drop_instance(
question_tokens: List[Token],
passage_tokens: List[Token],
number_tokens: List[Token],
number_indices: List[int],
token_indexers: Dict[str, TokenIndexer],
passage_text: str,
answer_info: Dict[str, Any] = None,
additional_metadata: Dict[str, Any] = None,
) -> Instance:
additional_metadata = additional_metadata or {}
fields: Dict[str, Field] = {}
passage_offsets = [(token.idx, token.idx + len(token.text)) for token in passage_tokens]
question_offsets = [(token.idx, token.idx + len(token.text)) for token in question_tokens]
# This is separate so we can reference it later with a known type.
passage_field = TextField(passage_tokens, token_indexers)
question_field = TextField(question_tokens, token_indexers)
fields["passage"] = passage_field
fields["question"] = question_field
number_index_fields: List[Field] = [
IndexField(index, passage_field) for index in number_indices
]
fields["number_indices"] = ListField(number_index_fields)
# This field is actually not required in the model,
# it is used to create the `answer_as_plus_minus_combinations` field, which is a `SequenceLabelField`.
# We cannot use `number_indices` field for creating that, because the `ListField` will not be empty
# when we want to create a new empty field. That will lead to error.
numbers_in_passage_field = TextField(number_tokens, token_indexers)
metadata = {
"original_passage": passage_text,
"passage_token_offsets": passage_offsets,
"question_token_offsets": question_offsets,
"question_tokens": [token.text for token in question_tokens],
"passage_tokens": [token.text for token in passage_tokens],
"number_tokens": [token.text for token in number_tokens],
"number_indices": number_indices,
}
if answer_info:
metadata["answer_texts"] = answer_info["answer_texts"]
passage_span_fields: List[Field] = [
SpanField(span[0], span[1], passage_field)
for span in answer_info["answer_passage_spans"]
]
if not passage_span_fields:
passage_span_fields.append(SpanField(-1, -1, passage_field))
fields["answer_as_passage_spans"] = ListField(passage_span_fields)
question_span_fields: List[Field] = [
SpanField(span[0], span[1], question_field)
for span in answer_info["answer_question_spans"]
]
if not question_span_fields:
question_span_fields.append(SpanField(-1, -1, question_field))
fields["answer_as_question_spans"] = ListField(question_span_fields)
add_sub_signs_field: List[Field] = []
for signs_for_one_add_sub_expression in answer_info["signs_for_add_sub_expressions"]:
add_sub_signs_field.append(
SequenceLabelField(signs_for_one_add_sub_expression, numbers_in_passage_field)
)
if not add_sub_signs_field:
add_sub_signs_field.append(
SequenceLabelField([0] * len(number_tokens), numbers_in_passage_field)
)
fields["answer_as_add_sub_expressions"] = ListField(add_sub_signs_field)
count_fields: List[Field] = [
LabelField(count_label, skip_indexing=True) for count_label in answer_info["counts"]
]
if not count_fields:
count_fields.append(LabelField(-1, skip_indexing=True))
fields["answer_as_counts"] = ListField(count_fields)
metadata.update(additional_metadata)
fields["metadata"] = MetadataField(metadata)
return Instance(fields)
@staticmethod
def make_bert_drop_instance(
question_tokens: List[Token],
passage_tokens: List[Token],
question_concat_passage_tokens: List[Token],
token_indexers: Dict[str, TokenIndexer],
passage_text: str,
answer_info: Dict[str, Any] = None,
additional_metadata: Dict[str, Any] = None,
) -> Instance:
additional_metadata = additional_metadata or {}
fields: Dict[str, Field] = {}
passage_offsets = [(token.idx, token.idx + len(token.text)) for token in passage_tokens]
# This is separate so we can reference it later with a known type.
passage_field = TextField(passage_tokens, token_indexers)
question_field = TextField(question_tokens, token_indexers)
fields["passage"] = passage_field
fields["question"] = question_field
question_and_passage_field = TextField(question_concat_passage_tokens, token_indexers)
fields["question_and_passage"] = question_and_passage_field
metadata = {
"original_passage": passage_text,
"passage_token_offsets": passage_offsets,
"question_tokens": [token.text for token in question_tokens],
"passage_tokens": [token.text for token in passage_tokens],
}
if answer_info:
metadata["answer_texts"] = answer_info["answer_texts"]
passage_span_fields: List[Field] = [
SpanField(span[0], span[1], question_and_passage_field)
for span in answer_info["answer_passage_spans"]
]
if not passage_span_fields:
passage_span_fields.append(SpanField(-1, -1, question_and_passage_field))
fields["answer_as_passage_spans"] = ListField(passage_span_fields)
metadata.update(additional_metadata)
fields["metadata"] = MetadataField(metadata)
return Instance(fields)
@staticmethod
def extract_answer_info_from_annotation(
answer_annotation: Dict[str, Any]
) -> Tuple[str, List[str]]:
answer_type = None
if answer_annotation["spans"]:
answer_type = "spans"
elif answer_annotation["number"]:
answer_type = "number"
elif any(answer_annotation["date"].values()):
answer_type = "date"
answer_content = answer_annotation[answer_type] if answer_type is not None else None
answer_texts: List[str] = []
if answer_type is None: # No answer
pass
elif answer_type == "spans":
# answer_content is a list of string in this case
answer_texts = answer_content
elif answer_type == "date":
# answer_content is a dict with "month", "day", "year" as the keys
date_tokens = [
answer_content[key]
for key in ["month", "day", "year"]
if key in answer_content and answer_content[key]
]
answer_texts = date_tokens
elif answer_type == "number":
# answer_content is a string of number
answer_texts = [answer_content]
return answer_type, answer_texts
@staticmethod
def convert_word_to_number(word: str, try_to_include_more_numbers=False):
"""
Currently we only support limited types of conversion.
"""
if try_to_include_more_numbers:
# strip all punctuations from the sides of the word, except for the negative sign
punctruations = string.punctuation.replace("-", "")
word = word.strip(punctruations)
# some words may contain the comma as deliminator
word = word.replace(",", "")
# word2num will convert hundred, thousand ... to number, but we skip it.
if word in ["hundred", "thousand", "million", "billion", "trillion"]:
return None
try:
number = word_to_num(word)
except ValueError:
try:
number = int(word)
except ValueError:
try:
number = float(word)
except ValueError:
number = None
return number
else:
no_comma_word = word.replace(",", "")
if no_comma_word in WORD_NUMBER_MAP:
number = WORD_NUMBER_MAP[no_comma_word]
else:
try:
number = int(no_comma_word)
except ValueError:
number = None
return number
@staticmethod
def find_valid_spans(
passage_tokens: List[Token], answer_texts: List[str]
) -> List[Tuple[int, int]]:
normalized_tokens = [
token.text.lower().strip(STRIPPED_CHARACTERS) for token in passage_tokens
]
word_positions: Dict[str, List[int]] = defaultdict(list)
for i, token in enumerate(normalized_tokens):
word_positions[token].append(i)
spans = []
for answer_text in answer_texts:
answer_tokens = answer_text.lower().strip(STRIPPED_CHARACTERS).split()
num_answer_tokens = len(answer_tokens)
if answer_tokens[0] not in word_positions:
continue
for span_start in word_positions[answer_tokens[0]]:
span_end = span_start # span_end is _inclusive_
answer_index = 1
while answer_index < num_answer_tokens and span_end + 1 < len(normalized_tokens):
token = normalized_tokens[span_end + 1]
if answer_tokens[answer_index].strip(STRIPPED_CHARACTERS) == token:
answer_index += 1
span_end += 1
elif token in IGNORED_TOKENS:
span_end += 1
else:
break
if num_answer_tokens == answer_index:
spans.append((span_start, span_end))
return spans
@staticmethod
def find_valid_add_sub_expressions(
numbers: List[int], targets: List[int], max_number_of_numbers_to_consider: int = 2
) -> List[List[int]]:
valid_signs_for_add_sub_expressions = []
# TODO: Try smaller numbers?
for number_of_numbers_to_consider in range(2, max_number_of_numbers_to_consider + 1):
possible_signs = list(itertools.product((-1, 1), repeat=number_of_numbers_to_consider))
for number_combination in itertools.combinations(
enumerate(numbers), number_of_numbers_to_consider
):
indices = [it[0] for it in number_combination]
values = [it[1] for it in number_combination]
for signs in possible_signs:
eval_value = sum(sign * value for sign, value in zip(signs, values))
if eval_value in targets:
labels_for_numbers = [0] * len(numbers) # 0 represents ``not included''.
for index, sign in zip(indices, signs):
labels_for_numbers[index] = (
1 if sign == 1 else 2
) # 1 for positive, 2 for negative
valid_signs_for_add_sub_expressions.append(labels_for_numbers)
return valid_signs_for_add_sub_expressions
@staticmethod
def find_valid_counts(count_numbers: List[int], targets: List[int]) -> List[int]:
valid_indices = []
for index, number in enumerate(count_numbers):
if number in targets:
valid_indices.append(index)
return valid_indices
|
allennlp-reading-comprehension-master
|
allennlp_rc/dataset_readers/drop.py
|
import json
import logging
import os
import tarfile
from typing import Dict, List, Tuple
from overrides import overrides
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, SpacyTokenizer
from allennlp_rc.dataset_readers import util
logger = logging.getLogger(__name__)
@DatasetReader.register("triviaqa")
class TriviaQaReader(DatasetReader):
"""
Reads the TriviaQA dataset into a ``Dataset`` containing ``Instances`` with four fields:
``question`` (a ``TextField``), ``passage`` (another ``TextField``), ``span_start``, and
``span_end`` (both ``IndexFields``).
TriviaQA is split up into several JSON files defining the questions, and a lot of text files
containing crawled web documents. We read these from a gzipped tarball, to avoid having to
have millions of individual files on a filesystem.
Because we need to read both train and validation files from the same tarball, we take the
tarball itself as a constructor parameter, and take the question file as the argument to
``read``. This means that you should give the path to the tarball in the ``dataset_reader``
parameters in your experiment configuration file, and something like ``"wikipedia-train.json"``
for the ``train_data_path`` and ``validation_data_path``.
Parameters
----------
base_tarball_path : ``str``
This is the path to the main ``tar.gz`` file you can download from the TriviaQA website,
with directories ``evidence`` and ``qa``.
unfiltered_tarball_path : ``str``, optional
This is the path to the "unfiltered" TriviaQA data that you can download from the TriviaQA
website, containing just question JSON files that point to evidence files in the base
tarball.
tokenizer : ``Tokenizer``, optional
We'll use this tokenizer on questions and evidence passages, defaulting to
``SpacyTokenizer`` if none is provided.
token_indexers : ``Dict[str, TokenIndexer]``, optional
Determines how both the question and the evidence passages are represented as arrays. See
:class:`TokenIndexer`. Default is to have a single word ID for every token.
"""
def __init__(
self,
base_tarball_path: str,
unfiltered_tarball_path: str = None,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
lazy: bool = False,
) -> None:
super().__init__(lazy)
self._base_tarball_path = base_tarball_path
self._unfiltered_tarball_path = unfiltered_tarball_path
self._tokenizer = tokenizer or SpacyTokenizer()
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
@overrides
def _read(self, file_path: str):
logger.info("Opening base tarball file at %s", self._base_tarball_path)
base_tarball = tarfile.open(cached_path(self._base_tarball_path), "r")
if "unfiltered" in file_path:
logger.info("Opening unfiltered tarball file at %s", self._unfiltered_tarball_path)
unfiltered_tarball = tarfile.open(cached_path(self._unfiltered_tarball_path), "r")
logger.info("Loading question file from tarball")
data_json = json.loads(unfiltered_tarball.extractfile(file_path).read().decode("utf-8"))
else:
logger.info("Loading question file from tarball")
path = os.path.join("qa", file_path)
data_json = json.loads(base_tarball.extractfile(path).read().decode("utf-8"))
logger.info("Reading the dataset")
for question_json in data_json["Data"]:
question_text = question_json["Question"]
question_tokens = self._tokenizer.tokenize(question_text)
evidence_files: List[List[str]] = [] # contains lines from each evidence file
if "web" in file_path:
for result in question_json["SearchResults"]:
filename = result["Filename"]
evidence_file = base_tarball.extractfile(
os.path.join("evidence", "web", filename)
)
evidence_files.append(
[line.decode("utf-8") for line in evidence_file.readlines()]
)
else:
for result in question_json["EntityPages"]:
filename = result["Filename"]
evidence_file = base_tarball.extractfile(
os.path.join("evidence", "wikipedia", filename)
)
evidence_files.append(
[line.decode("utf-8") for line in evidence_file.readlines()]
)
answer_json = question_json["Answer"]
human_answers = [
util.normalize_text(answer) for answer in answer_json.get("HumanAnswers", [])
]
answer_texts = answer_json["NormalizedAliases"] + human_answers
for paragraph in self.pick_paragraphs(evidence_files, question_text, answer_texts):
paragraph_tokens = self._tokenizer.tokenize(paragraph)
token_spans = util.find_valid_answer_spans(paragraph_tokens, answer_texts)
if not token_spans:
# For now, we'll just ignore instances that we can't find answer spans for.
# Maybe we can do something smarter here later, but this will do for now.
continue
instance = self.text_to_instance(
question_text,
paragraph,
token_spans,
answer_texts,
question_tokens,
paragraph_tokens,
)
yield instance
def pick_paragraphs(
self, evidence_files: List[List[str]], question: str = None, answer_texts: List[str] = None
) -> List[str]:
"""
Given a list of evidence documents, return a list of paragraphs to use as training
examples. Each paragraph returned will be made into one training example.
To aid in picking the best paragraph, you can also optionally pass the question text or the
answer strings. Note, though, that if you actually use the answer strings for picking the
paragraph on the dev or test sets, that's likely cheating, depending on how you've defined
the task.
"""
paragraphs = []
for evidence_file in evidence_files:
whole_document = " ".join(evidence_file)
tokens = whole_document.split(" ")
paragraph = " ".join(tokens[:400])
paragraphs.append(paragraph)
return paragraphs
@overrides
def text_to_instance(
self, # type: ignore
question_text: str,
passage_text: str,
token_spans: List[Tuple[int, int]] = None,
answer_texts: List[str] = None,
question_tokens: List[Token] = None,
passage_tokens: List[Token] = None,
) -> Instance:
if not question_tokens:
question_tokens = self._tokenizer.tokenize(question_text)
if not passage_tokens:
passage_tokens = self._tokenizer.tokenize(passage_text)
return util.make_reading_comprehension_instance(
question_tokens,
passage_tokens,
self._token_indexers,
passage_text,
token_spans,
answer_texts,
)
|
allennlp-reading-comprehension-master
|
allennlp_rc/dataset_readers/triviaqa.py
|
import json
from overrides import overrides
from allennlp.common.util import JsonDict
from allennlp.data import DatasetReader, Instance
from allennlp.data.tokenizers.spacy_tokenizer import SpacyTokenizer
from allennlp.predictors.predictor import Predictor
from allennlp.models import Model
@Predictor.register("dialog_qa")
class DialogQAPredictor(Predictor):
def __init__(
self, model: Model, dataset_reader: DatasetReader, language: str = "en_core_web_sm"
) -> None:
super().__init__(model, dataset_reader)
self._tokenizer = SpacyTokenizer(language=language)
def predict(self, jsonline: str) -> JsonDict:
"""
Make a dialog-style question answering prediction on the supplied input.
The supplied input json must contain a list of
question answer pairs, containing question, answer, yesno, followup, id
as well as the context (passage).
Parameters
----------
jsonline : ``str``
A json line that has the same format as the quac data file.
Returns
----------
A dictionary that represents the prediction made by the system. The answer string will be under the
"best_span_str" key.
"""
return self.predict_json(json.loads(jsonline))
@overrides
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
"""
Expects json that looks like the original quac data file.
"""
paragraph_json = json_dict["paragraphs"][0]
paragraph = paragraph_json["context"]
tokenized_paragraph = self._tokenizer.tokenize(paragraph)
qas = paragraph_json["qas"]
metadata = {}
metadata["instance_id"] = [qa["id"] for qa in qas]
question_text_list = [qa["question"].strip().replace("\n", "") for qa in qas]
answer_texts_list = [[answer["text"] for answer in qa["answers"]] for qa in qas]
metadata["answer_texts_list"] = answer_texts_list
metadata["question"] = question_text_list
span_starts_list = [[answer["answer_start"] for answer in qa["answers"]] for qa in qas]
span_ends_list = []
for st_list, an_list in zip(span_starts_list, answer_texts_list):
span_ends = [start + len(answer) for start, answer in zip(st_list, an_list)]
span_ends_list.append(span_ends)
yesno_list = [str(qa["yesno"]) for qa in qas]
followup_list = [str(qa["followup"]) for qa in qas]
instance = self._dataset_reader.text_to_instance(
question_text_list,
paragraph,
span_starts_list,
span_ends_list,
tokenized_paragraph,
yesno_list,
followup_list,
metadata,
)
return instance
|
allennlp-reading-comprehension-master
|
allennlp_rc/predictors/dialog_qa.py
|
from allennlp_rc.predictors.reading_comprehension import ReadingComprehensionPredictor
from allennlp_rc.predictors.dialog_qa import DialogQAPredictor
from allennlp_rc.predictors.transformer_qa import TransformerQAPredictor
|
allennlp-reading-comprehension-master
|
allennlp_rc/predictors/__init__.py
|
from copy import deepcopy
from typing import Dict, List
from overrides import overrides
import numpy
from allennlp.common.util import JsonDict
from allennlp.data import Instance
from allennlp.predictors.predictor import Predictor
from allennlp.data.fields import (
IndexField,
ListField,
LabelField,
SpanField,
SequenceLabelField,
SequenceField,
)
@Predictor.register("reading-comprehension")
class ReadingComprehensionPredictor(Predictor):
"""
Predictor for the :class:`~allennlp_rc.models.bidaf.BidirectionalAttentionFlow` model, and any
other model that takes a question and passage as input.
"""
def predict(self, question: str, passage: str) -> JsonDict:
"""
Make a machine comprehension prediction on the supplied input.
See https://rajpurkar.github.io/SQuAD-explorer/ for more information about the machine comprehension task.
Parameters
----------
question : ``str``
A question about the content in the supplied paragraph. The question must be answerable by a
span in the paragraph.
passage : ``str``
A paragraph of information relevant to the question.
Returns
-------
A dictionary that represents the prediction made by the system. The answer string will be under the
"best_span_str" key.
"""
return self.predict_json({"passage": passage, "question": question})
@overrides
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
"""
Expects JSON that looks like ``{"question": "...", "passage": "..."}``.
"""
question_text = json_dict["question"]
passage_text = json_dict["passage"]
return self._dataset_reader.text_to_instance(question_text, passage_text)
@overrides
def predictions_to_labeled_instances(
self, instance: Instance, outputs: Dict[str, numpy.ndarray]
) -> List[Instance]:
new_instance = deepcopy(instance)
# For BiDAF
if "best_span" in outputs:
span_start_label = outputs["best_span"][0]
span_end_label = outputs["best_span"][1]
passage_field: SequenceField = new_instance["passage"] # type: ignore
new_instance.add_field("span_start", IndexField(int(span_start_label), passage_field))
new_instance.add_field("span_end", IndexField(int(span_end_label), passage_field))
# For NAQANet model. It has the fields: answer_as_passage_spans, answer_as_question_spans,
# answer_as_add_sub_expressions, answer_as_counts. We need labels for all.
elif "answer" in outputs:
answer_type = outputs["answer"]["answer_type"]
# When the problem is a counting problem
if answer_type == "count":
field = ListField([LabelField(int(outputs["answer"]["count"]), skip_indexing=True)])
new_instance.add_field("answer_as_counts", field)
# When the answer is in the passage
elif answer_type == "passage_span":
# TODO(mattg): Currently we only handle one predicted span.
span = outputs["answer"]["spans"][0]
# Convert character span indices into word span indices
word_span_start = None
word_span_end = None
offsets = new_instance["metadata"].metadata["passage_token_offsets"] # type: ignore
for index, offset in enumerate(offsets):
if offset[0] == span[0]:
word_span_start = index
if offset[1] == span[1]:
word_span_end = index
passage_field: SequenceField = new_instance["passage"] # type: ignore
field = ListField([SpanField(word_span_start, word_span_end, passage_field)])
new_instance.add_field("answer_as_passage_spans", field)
# When the answer is an arithmetic calculation
elif answer_type == "arithmetic":
# The different numbers in the passage that the model encounters
sequence_labels = outputs["answer"]["numbers"]
numbers_field: ListField = instance["number_indices"] # type: ignore
# The numbers in the passage are given signs, that's what we are labeling here.
# Negative signs are given the class label 2 (for 0 and 1, the sign matches the
# label).
labels = []
for label in sequence_labels:
if label["sign"] == -1:
labels.append(2)
else:
labels.append(label["sign"])
# There's a dummy number added in the dataset reader to handle passages with no
# numbers; it has a label of 0 (not included).
labels.append(0)
field = ListField([SequenceLabelField(labels, numbers_field)])
new_instance.add_field("answer_as_add_sub_expressions", field)
# When the answer is in the question
elif answer_type == "question_span":
span = outputs["answer"]["spans"][0]
# Convert character span indices into word span indices
word_span_start = None
word_span_end = None
question_offsets = new_instance["metadata"].metadata[ # type: ignore
"question_token_offsets"
]
for index, offset in enumerate(question_offsets):
if offset[0] == span[0]:
word_span_start = index
if offset[1] == span[1]:
word_span_end = index
question_field: SequenceField = new_instance["question"] # type: ignore
field = ListField([SpanField(word_span_start, word_span_end, question_field)])
new_instance.add_field("answer_as_question_spans", field)
return [new_instance]
|
allennlp-reading-comprehension-master
|
allennlp_rc/predictors/reading_comprehension.py
|
from typing import List, Dict, Any
from allennlp.models import Model
from overrides import overrides
from allennlp.common.util import JsonDict, sanitize
from allennlp.data import Instance, DatasetReader
from allennlp.predictors.predictor import Predictor
@Predictor.register("transformer_qa")
class TransformerQAPredictor(Predictor):
"""
Predictor for the :class:`~allennlp_rc.models.TransformerQA` model, and any
other model that takes a question and passage as input.
"""
def __init__(self, model: Model, dataset_reader: DatasetReader) -> None:
super(TransformerQAPredictor, self).__init__(model, dataset_reader)
self._next_qid = 1
def predict(self, question: str, passage: str) -> JsonDict:
"""
Make a machine comprehension prediction on the supplied input.
See https://rajpurkar.github.io/SQuAD-explorer/ for more information about the machine comprehension task.
Parameters
----------
question : ``str``
A question about the content in the supplied paragraph. The question must be answerable by a
span in the paragraph.
passage : ``str``
A paragraph of information relevant to the question.
Returns
-------
A dictionary that represents the prediction made by the system. The answer string will be under the
"best_span_str" key.
"""
return self.predict_json({"context": passage, "question": question})
def predict_json(self, inputs: JsonDict) -> JsonDict:
results = self.predict_batch_json([inputs])
assert len(results) == 1
return results[0]
@overrides
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
raise NotImplementedError(
"This predictor maps a question to multiple instances. "
"Please use _json_to_instances instead."
)
def _json_to_instances(self, json_dict: JsonDict) -> List[Instance]:
result = list(
self._dataset_reader.make_instances(
qid=str(self._next_qid),
question=json_dict["question"],
answers=[],
context=json_dict["context"],
first_answer_offset=None,
)
)
self._next_qid += 1
return result
@overrides
def _batch_json_to_instances(self, json_dicts: List[JsonDict]) -> List[Instance]:
instances = []
for json_dict in json_dicts:
instances.extend(self._json_to_instances(json_dict))
return instances
@overrides
def predict_batch_json(self, inputs: List[JsonDict]) -> List[JsonDict]:
instances = self._batch_json_to_instances(inputs)
result = self.predict_batch_instance(instances)
assert len(result) == len(inputs)
return result
@overrides
def predict_batch_instance(self, instances: List[Instance]) -> List[JsonDict]:
outputs = self._model.forward_on_instances(instances)
# group outputs with the same question id
qid_to_output: Dict[str, Dict[str, Any]] = {}
for instance, output in zip(instances, outputs):
qid = instance["metadata"]["id"]
output["id"] = qid
output["answers"] = instance["metadata"]["answers"]
if qid in qid_to_output:
old_output = qid_to_output[qid]
if old_output["best_span_scores"] < output["best_span_scores"]:
qid_to_output[qid] = output
else:
qid_to_output[qid] = output
return [sanitize(o) for o in qid_to_output.values()]
|
allennlp-reading-comprehension-master
|
allennlp_rc/predictors/transformer_qa.py
|
import logging
from typing import Any, Dict, List, Optional
import numpy as np
from overrides import overrides
import torch
import torch.nn.functional as F
from torch.nn.functional import nll_loss
from allennlp.common.checks import check_dimensions_match
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder
from allennlp.modules.input_variational_dropout import InputVariationalDropout
from allennlp.modules.matrix_attention.linear_matrix_attention import LinearMatrixAttention
from allennlp.nn import InitializerApplicator, util
from allennlp.training.metrics import Average, BooleanAccuracy, CategoricalAccuracy
from allennlp_rc.eval import squad_eval
logger = logging.getLogger(__name__)
@Model.register("dialog_qa")
class DialogQA(Model):
"""
This class implements modified version of BiDAF
(with self attention and residual layer, from Clark and Gardner ACL 17 paper) model as used in
Question Answering in Context (EMNLP 2018) paper [https://arxiv.org/pdf/1808.07036.pdf].
In this set-up, a single instance is a dialog, list of question answer pairs.
Parameters
----------
vocab : ``Vocabulary``
text_field_embedder : ``TextFieldEmbedder``
Used to embed the ``question`` and ``passage`` ``TextFields`` we get as input to the model.
phrase_layer : ``Seq2SeqEncoder``
The encoder (with its own internal stacking) that we will use in between embedding tokens
and doing the bidirectional attention.
span_start_encoder : ``Seq2SeqEncoder``
The encoder that we will use to incorporate span start predictions into the passage state
before predicting span end.
span_end_encoder : ``Seq2SeqEncoder``
The encoder that we will use to incorporate span end predictions into the passage state.
dropout : ``float``, optional (default=0.2)
If greater than 0, we will apply dropout with this probability after all encoders (pytorch
LSTMs do not apply dropout to their last layer).
num_context_answers : ``int``, optional (default=0)
If greater than 0, the model will consider previous question answering context.
max_span_length: ``int``, optional (default=0)
Maximum token length of the output span.
max_turn_length: ``int``, optional (default=12)
Maximum length of an interaction.
"""
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
phrase_layer: Seq2SeqEncoder,
residual_encoder: Seq2SeqEncoder,
span_start_encoder: Seq2SeqEncoder,
span_end_encoder: Seq2SeqEncoder,
initializer: Optional[InitializerApplicator] = None,
dropout: float = 0.2,
num_context_answers: int = 0,
marker_embedding_dim: int = 10,
max_span_length: int = 30,
max_turn_length: int = 12,
) -> None:
super().__init__(vocab)
self._num_context_answers = num_context_answers
self._max_span_length = max_span_length
self._text_field_embedder = text_field_embedder
self._phrase_layer = phrase_layer
self._marker_embedding_dim = marker_embedding_dim
self._encoding_dim = phrase_layer.get_output_dim()
self._matrix_attention = LinearMatrixAttention(
self._encoding_dim, self._encoding_dim, "x,y,x*y"
)
self._merge_atten = TimeDistributed(
torch.nn.Linear(self._encoding_dim * 4, self._encoding_dim)
)
self._residual_encoder = residual_encoder
if num_context_answers > 0:
self._question_num_marker = torch.nn.Embedding(
max_turn_length, marker_embedding_dim * num_context_answers
)
self._prev_ans_marker = torch.nn.Embedding(
(num_context_answers * 4) + 1, marker_embedding_dim
)
self._self_attention = LinearMatrixAttention(
self._encoding_dim, self._encoding_dim, "x,y,x*y"
)
self._followup_lin = torch.nn.Linear(self._encoding_dim, 3)
self._merge_self_attention = TimeDistributed(
torch.nn.Linear(self._encoding_dim * 3, self._encoding_dim)
)
self._span_start_encoder = span_start_encoder
self._span_end_encoder = span_end_encoder
self._span_start_predictor = TimeDistributed(torch.nn.Linear(self._encoding_dim, 1))
self._span_end_predictor = TimeDistributed(torch.nn.Linear(self._encoding_dim, 1))
self._span_yesno_predictor = TimeDistributed(torch.nn.Linear(self._encoding_dim, 3))
self._span_followup_predictor = TimeDistributed(self._followup_lin)
check_dimensions_match(
phrase_layer.get_input_dim(),
text_field_embedder.get_output_dim() + marker_embedding_dim * num_context_answers,
"phrase layer input dim",
"embedding dim + marker dim * num context answers",
)
if initializer is not None:
initializer(self)
self._span_start_accuracy = CategoricalAccuracy()
self._span_end_accuracy = CategoricalAccuracy()
self._span_yesno_accuracy = CategoricalAccuracy()
self._span_followup_accuracy = CategoricalAccuracy()
self._span_gt_yesno_accuracy = CategoricalAccuracy()
self._span_gt_followup_accuracy = CategoricalAccuracy()
self._span_accuracy = BooleanAccuracy()
self._official_f1 = Average()
self._variational_dropout = InputVariationalDropout(dropout)
def forward( # type: ignore
self,
question: Dict[str, torch.LongTensor],
passage: Dict[str, torch.LongTensor],
span_start: torch.IntTensor = None,
span_end: torch.IntTensor = None,
p1_answer_marker: torch.IntTensor = None,
p2_answer_marker: torch.IntTensor = None,
p3_answer_marker: torch.IntTensor = None,
yesno_list: torch.IntTensor = None,
followup_list: torch.IntTensor = None,
metadata: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
"""
Parameters
----------
question : Dict[str, torch.LongTensor]
From a ``TextField``.
passage : Dict[str, torch.LongTensor]
From a ``TextField``. The model assumes that this passage contains the answer to the
question, and predicts the beginning and ending positions of the answer within the
passage.
span_start : ``torch.IntTensor``, optional
From an ``IndexField``. This is one of the things we are trying to predict - the
beginning position of the answer with the passage. This is an `inclusive` token index.
If this is given, we will compute a loss that gets included in the output dictionary.
span_end : ``torch.IntTensor``, optional
From an ``IndexField``. This is one of the things we are trying to predict - the
ending position of the answer with the passage. This is an `inclusive` token index.
If this is given, we will compute a loss that gets included in the output dictionary.
p1_answer_marker : ``torch.IntTensor``, optional
This is one of the inputs, but only when num_context_answers > 0.
This is a tensor that has a shape [batch_size, max_qa_count, max_passage_length].
Most passage token will have assigned 'O', except the passage tokens belongs to the previous answer
in the dialog, which will be assigned labels such as <1_start>, <1_in>, <1_end>.
For more details, look into dataset_readers/util/make_reading_comprehension_instance_quac
p2_answer_marker : ``torch.IntTensor``, optional
This is one of the inputs, but only when num_context_answers > 1.
It is similar to p1_answer_marker, but marking previous previous answer in passage.
p3_answer_marker : ``torch.IntTensor``, optional
This is one of the inputs, but only when num_context_answers > 2.
It is similar to p1_answer_marker, but marking previous previous previous answer in passage.
yesno_list : ``torch.IntTensor``, optional
This is one of the outputs that we are trying to predict.
Three way classification (the yes/no/not a yes no question).
followup_list : ``torch.IntTensor``, optional
This is one of the outputs that we are trying to predict.
Three way classification (followup / maybe followup / don't followup).
metadata : ``List[Dict[str, Any]]``, optional
If present, this should contain the question ID, original passage text, and token
offsets into the passage for each instance in the batch. We use this for computing
official metrics using the official SQuAD evaluation script. The length of this list
should be the batch size, and each dictionary should have the keys ``id``,
``original_passage``, and ``token_offsets``. If you only want the best span string and
don't care about official metrics, you can omit the ``id`` key.
Returns
-------
An output dictionary consisting of the followings.
Each of the followings is a nested list because first iterates over dialog, then questions in dialog.
qid : List[List[str]]
A list of list, consisting of question ids.
followup : List[List[int]]
A list of list, consisting of continuation marker prediction index.
(y :yes, m: maybe follow up, n: don't follow up)
yesno : List[List[int]]
A list of list, consisting of affirmation marker prediction index.
(y :yes, x: not a yes/no question, n: np)
best_span_str : List[List[str]]
If sufficient metadata was provided for the instances in the batch, we also return the
string from the original passage that the model thinks is the best answer to the
question.
loss : torch.FloatTensor, optional
A scalar loss to be optimised.
"""
token_character_ids = question["token_characters"]["token_characters"]
batch_size, max_qa_count, max_q_len, _ = token_character_ids.size()
total_qa_count = batch_size * max_qa_count
qa_mask = torch.ge(followup_list, 0).view(total_qa_count)
embedded_question = self._text_field_embedder(question, num_wrapping_dims=1)
embedded_question = embedded_question.reshape(
total_qa_count, max_q_len, self._text_field_embedder.get_output_dim()
)
embedded_question = self._variational_dropout(embedded_question)
embedded_passage = self._variational_dropout(self._text_field_embedder(passage))
passage_length = embedded_passage.size(1)
question_mask = util.get_text_field_mask(question, num_wrapping_dims=1)
question_mask = question_mask.reshape(total_qa_count, max_q_len)
passage_mask = util.get_text_field_mask(passage)
repeated_passage_mask = passage_mask.unsqueeze(1).repeat(1, max_qa_count, 1)
repeated_passage_mask = repeated_passage_mask.view(total_qa_count, passage_length)
if self._num_context_answers > 0:
# Encode question turn number inside the dialog into question embedding.
question_num_ind = util.get_range_vector(
max_qa_count, util.get_device_of(embedded_question)
)
question_num_ind = question_num_ind.unsqueeze(-1).repeat(1, max_q_len)
question_num_ind = question_num_ind.unsqueeze(0).repeat(batch_size, 1, 1)
question_num_ind = question_num_ind.reshape(total_qa_count, max_q_len)
question_num_marker_emb = self._question_num_marker(question_num_ind)
embedded_question = torch.cat([embedded_question, question_num_marker_emb], dim=-1)
# Encode the previous answers in passage embedding.
repeated_embedded_passage = (
embedded_passage.unsqueeze(1)
.repeat(1, max_qa_count, 1, 1)
.view(total_qa_count, passage_length, self._text_field_embedder.get_output_dim())
)
# batch_size * max_qa_count, passage_length, word_embed_dim
p1_answer_marker = p1_answer_marker.view(total_qa_count, passage_length)
p1_answer_marker_emb = self._prev_ans_marker(p1_answer_marker)
repeated_embedded_passage = torch.cat(
[repeated_embedded_passage, p1_answer_marker_emb], dim=-1
)
if self._num_context_answers > 1:
p2_answer_marker = p2_answer_marker.view(total_qa_count, passage_length)
p2_answer_marker_emb = self._prev_ans_marker(p2_answer_marker)
repeated_embedded_passage = torch.cat(
[repeated_embedded_passage, p2_answer_marker_emb], dim=-1
)
if self._num_context_answers > 2:
p3_answer_marker = p3_answer_marker.view(total_qa_count, passage_length)
p3_answer_marker_emb = self._prev_ans_marker(p3_answer_marker)
repeated_embedded_passage = torch.cat(
[repeated_embedded_passage, p3_answer_marker_emb], dim=-1
)
repeated_encoded_passage = self._variational_dropout(
self._phrase_layer(repeated_embedded_passage, repeated_passage_mask)
)
else:
encoded_passage = self._variational_dropout(
self._phrase_layer(embedded_passage, passage_mask)
)
repeated_encoded_passage = encoded_passage.unsqueeze(1).repeat(1, max_qa_count, 1, 1)
repeated_encoded_passage = repeated_encoded_passage.view(
total_qa_count, passage_length, self._encoding_dim
)
encoded_question = self._variational_dropout(
self._phrase_layer(embedded_question, question_mask)
)
# Shape: (batch_size * max_qa_count, passage_length, question_length)
passage_question_similarity = self._matrix_attention(
repeated_encoded_passage, encoded_question
)
# Shape: (batch_size * max_qa_count, passage_length, question_length)
passage_question_attention = util.masked_softmax(passage_question_similarity, question_mask)
# Shape: (batch_size * max_qa_count, passage_length, encoding_dim)
passage_question_vectors = util.weighted_sum(encoded_question, passage_question_attention)
# We replace masked values with something really negative here, so they don't affect the
# max below.
masked_similarity = util.replace_masked_values(
passage_question_similarity, question_mask.unsqueeze(1), -1e7
)
question_passage_similarity = masked_similarity.max(dim=-1)[0].squeeze(-1)
question_passage_attention = util.masked_softmax(
question_passage_similarity, repeated_passage_mask
)
# Shape: (batch_size * max_qa_count, encoding_dim)
question_passage_vector = util.weighted_sum(
repeated_encoded_passage, question_passage_attention
)
tiled_question_passage_vector = question_passage_vector.unsqueeze(1).expand(
total_qa_count, passage_length, self._encoding_dim
)
# Shape: (batch_size * max_qa_count, passage_length, encoding_dim * 4)
final_merged_passage = torch.cat(
[
repeated_encoded_passage,
passage_question_vectors,
repeated_encoded_passage * passage_question_vectors,
repeated_encoded_passage * tiled_question_passage_vector,
],
dim=-1,
)
final_merged_passage = F.relu(self._merge_atten(final_merged_passage))
residual_layer = self._variational_dropout(
self._residual_encoder(final_merged_passage, repeated_passage_mask)
)
self_attention_matrix = self._self_attention(residual_layer, residual_layer)
mask = repeated_passage_mask.reshape(
total_qa_count, passage_length, 1
) * repeated_passage_mask.reshape(total_qa_count, 1, passage_length)
self_mask = torch.eye(
passage_length, passage_length, dtype=torch.bool, device=self_attention_matrix.device
)
self_mask = self_mask.reshape(1, passage_length, passage_length)
mask = mask & ~self_mask
self_attention_probs = util.masked_softmax(self_attention_matrix, mask)
# (batch, passage_len, passage_len) * (batch, passage_len, dim) -> (batch, passage_len, dim)
self_attention_vecs = torch.matmul(self_attention_probs, residual_layer)
self_attention_vecs = torch.cat(
[self_attention_vecs, residual_layer, residual_layer * self_attention_vecs], dim=-1
)
residual_layer = F.relu(self._merge_self_attention(self_attention_vecs))
final_merged_passage = final_merged_passage + residual_layer
# batch_size * maxqa_pair_len * max_passage_len * 200
final_merged_passage = self._variational_dropout(final_merged_passage)
start_rep = self._span_start_encoder(final_merged_passage, repeated_passage_mask)
span_start_logits = self._span_start_predictor(start_rep).squeeze(-1)
end_rep = self._span_end_encoder(
torch.cat([final_merged_passage, start_rep], dim=-1), repeated_passage_mask
)
span_end_logits = self._span_end_predictor(end_rep).squeeze(-1)
span_yesno_logits = self._span_yesno_predictor(end_rep).squeeze(-1)
span_followup_logits = self._span_followup_predictor(end_rep).squeeze(-1)
span_start_logits = util.replace_masked_values(
span_start_logits, repeated_passage_mask, -1e7
)
# batch_size * maxqa_len_pair, max_document_len
span_end_logits = util.replace_masked_values(span_end_logits, repeated_passage_mask, -1e7)
best_span = self._get_best_span_yesno_followup(
span_start_logits,
span_end_logits,
span_yesno_logits,
span_followup_logits,
self._max_span_length,
)
output_dict: Dict[str, Any] = {}
# Compute the loss.
if span_start is not None:
loss = nll_loss(
util.masked_log_softmax(span_start_logits, repeated_passage_mask),
span_start.view(-1),
ignore_index=-1,
)
self._span_start_accuracy(span_start_logits, span_start.view(-1), mask=qa_mask)
loss += nll_loss(
util.masked_log_softmax(span_end_logits, repeated_passage_mask),
span_end.view(-1),
ignore_index=-1,
)
self._span_end_accuracy(span_end_logits, span_end.view(-1), mask=qa_mask)
self._span_accuracy(
best_span[:, 0:2],
torch.stack([span_start, span_end], -1).view(total_qa_count, 2),
mask=qa_mask.unsqueeze(1).expand(-1, 2),
)
# add a select for the right span to compute loss
gold_span_end_loc = []
span_end = span_end.view(total_qa_count).squeeze().data.cpu().numpy()
for i in range(0, total_qa_count):
gold_span_end_loc.append(max(span_end[i] * 3 + i * passage_length * 3, 0))
gold_span_end_loc.append(max(span_end[i] * 3 + i * passage_length * 3 + 1, 0))
gold_span_end_loc.append(max(span_end[i] * 3 + i * passage_length * 3 + 2, 0))
gold_span_end_loc = span_start.new(gold_span_end_loc)
pred_span_end_loc = []
for i in range(0, total_qa_count):
pred_span_end_loc.append(max(best_span[i][1] * 3 + i * passage_length * 3, 0))
pred_span_end_loc.append(max(best_span[i][1] * 3 + i * passage_length * 3 + 1, 0))
pred_span_end_loc.append(max(best_span[i][1] * 3 + i * passage_length * 3 + 2, 0))
predicted_end = span_start.new(pred_span_end_loc)
_yesno = span_yesno_logits.view(-1).index_select(0, gold_span_end_loc).view(-1, 3)
_followup = span_followup_logits.view(-1).index_select(0, gold_span_end_loc).view(-1, 3)
loss += nll_loss(F.log_softmax(_yesno, dim=-1), yesno_list.view(-1), ignore_index=-1)
loss += nll_loss(
F.log_softmax(_followup, dim=-1), followup_list.view(-1), ignore_index=-1
)
_yesno = span_yesno_logits.view(-1).index_select(0, predicted_end).view(-1, 3)
_followup = span_followup_logits.view(-1).index_select(0, predicted_end).view(-1, 3)
self._span_yesno_accuracy(_yesno, yesno_list.view(-1), mask=qa_mask)
self._span_followup_accuracy(_followup, followup_list.view(-1), mask=qa_mask)
output_dict["loss"] = loss
# Compute F1 and preparing the output dictionary.
output_dict["best_span_str"] = []
output_dict["qid"] = []
output_dict["followup"] = []
output_dict["yesno"] = []
best_span_cpu = best_span.detach().cpu().numpy()
for i in range(batch_size):
passage_str = metadata[i]["original_passage"]
offsets = metadata[i]["token_offsets"]
f1_score = 0.0
per_dialog_best_span_list = []
per_dialog_yesno_list = []
per_dialog_followup_list = []
per_dialog_query_id_list = []
for per_dialog_query_index, (iid, answer_texts) in enumerate(
zip(metadata[i]["instance_id"], metadata[i]["answer_texts_list"])
):
predicted_span = tuple(best_span_cpu[i * max_qa_count + per_dialog_query_index])
start_offset = offsets[predicted_span[0]][0]
end_offset = offsets[predicted_span[1]][1]
yesno_pred = predicted_span[2]
followup_pred = predicted_span[3]
per_dialog_yesno_list.append(yesno_pred)
per_dialog_followup_list.append(followup_pred)
per_dialog_query_id_list.append(iid)
best_span_string = passage_str[start_offset:end_offset]
per_dialog_best_span_list.append(best_span_string)
if answer_texts:
if len(answer_texts) > 1:
t_f1 = []
# Compute F1 over N-1 human references and averages the scores.
for answer_index in range(len(answer_texts)):
idxes = list(range(len(answer_texts)))
idxes.pop(answer_index)
refs = [answer_texts[z] for z in idxes]
t_f1.append(
squad_eval.metric_max_over_ground_truths(
squad_eval.f1_score, best_span_string, refs
)
)
f1_score = 1.0 * sum(t_f1) / len(t_f1)
else:
f1_score = squad_eval.metric_max_over_ground_truths(
squad_eval.f1_score, best_span_string, answer_texts
)
self._official_f1(100 * f1_score)
output_dict["qid"].append(per_dialog_query_id_list)
output_dict["best_span_str"].append(per_dialog_best_span_list)
output_dict["yesno"].append(per_dialog_yesno_list)
output_dict["followup"].append(per_dialog_followup_list)
return output_dict
@overrides
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
yesno_tags = [
[self.vocab.get_token_from_index(x, namespace="yesno_labels") for x in yn_list]
for yn_list in output_dict.pop("yesno")
]
followup_tags = [
[self.vocab.get_token_from_index(x, namespace="followup_labels") for x in followup_list]
for followup_list in output_dict.pop("followup")
]
output_dict["yesno"] = yesno_tags
output_dict["followup"] = followup_tags
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {
"start_acc": self._span_start_accuracy.get_metric(reset),
"end_acc": self._span_end_accuracy.get_metric(reset),
"span_acc": self._span_accuracy.get_metric(reset),
"yesno": self._span_yesno_accuracy.get_metric(reset),
"followup": self._span_followup_accuracy.get_metric(reset),
"f1": self._official_f1.get_metric(reset),
}
@staticmethod
def _get_best_span_yesno_followup(
span_start_logits: torch.Tensor,
span_end_logits: torch.Tensor,
span_yesno_logits: torch.Tensor,
span_followup_logits: torch.Tensor,
max_span_length: int,
) -> torch.Tensor:
# Returns the index of highest-scoring span that is not longer than 30 tokens, as well as
# yesno prediction bit and followup prediction bit from the predicted span end token.
if span_start_logits.dim() != 2 or span_end_logits.dim() != 2:
raise ValueError("Input shapes must be (batch_size, passage_length)")
batch_size, passage_length = span_start_logits.size()
max_span_log_prob = [-1e20] * batch_size
span_start_argmax = [0] * batch_size
best_word_span = span_start_logits.new_zeros((batch_size, 4), dtype=torch.long)
span_start_logits = span_start_logits.data.cpu().numpy()
span_end_logits = span_end_logits.data.cpu().numpy()
span_yesno_logits = span_yesno_logits.data.cpu().numpy()
span_followup_logits = span_followup_logits.data.cpu().numpy()
for b_i in range(batch_size):
for j in range(passage_length):
val1 = span_start_logits[b_i, span_start_argmax[b_i]]
if val1 < span_start_logits[b_i, j]:
span_start_argmax[b_i] = j
val1 = span_start_logits[b_i, j]
val2 = span_end_logits[b_i, j]
if val1 + val2 > max_span_log_prob[b_i]:
if j - span_start_argmax[b_i] > max_span_length:
continue
best_word_span[b_i, 0] = span_start_argmax[b_i]
best_word_span[b_i, 1] = j
max_span_log_prob[b_i] = val1 + val2
for b_i in range(batch_size):
j = best_word_span[b_i, 1]
yesno_pred = np.argmax(span_yesno_logits[b_i, j])
followup_pred = np.argmax(span_followup_logits[b_i, j])
best_word_span[b_i, 2] = int(yesno_pred)
best_word_span[b_i, 3] = int(followup_pred)
return best_word_span
|
allennlp-reading-comprehension-master
|
allennlp_rc/models/dialog_qa.py
|
import torch
def get_best_span(span_start_logits: torch.Tensor, span_end_logits: torch.Tensor) -> torch.Tensor:
"""
This acts the same as the static method ``BidirectionalAttentionFlow.get_best_span()``
in ``allennlp/models/reading_comprehension/bidaf.py``. We keep it here so that users can
directly import this function without the class.
We call the inputs "logits" - they could either be unnormalized logits or normalized log
probabilities. A log_softmax operation is a constant shifting of the entire logit
vector, so taking an argmax over either one gives the same result.
"""
if span_start_logits.dim() != 2 or span_end_logits.dim() != 2:
raise ValueError("Input shapes must be (batch_size, passage_length)")
batch_size, passage_length = span_start_logits.size()
device = span_start_logits.device
# (batch_size, passage_length, passage_length)
span_log_probs = span_start_logits.unsqueeze(2) + span_end_logits.unsqueeze(1)
# Only the upper triangle of the span matrix is valid; the lower triangle has entries where
# the span ends before it starts.
span_log_mask = torch.triu(torch.ones((passage_length, passage_length), device=device)).log()
valid_span_log_probs = span_log_probs + span_log_mask
# Here we take the span matrix and flatten it, then find the best span using argmax. We
# can recover the start and end indices from this flattened list using simple modular
# arithmetic.
# (batch_size, passage_length * passage_length)
best_spans = valid_span_log_probs.view(batch_size, -1).argmax(-1)
span_start_indices = best_spans // passage_length
span_end_indices = best_spans % passage_length
return torch.stack([span_start_indices, span_end_indices], dim=-1)
|
allennlp-reading-comprehension-master
|
allennlp_rc/models/util.py
|
from typing import Any, Dict, List, Optional
import torch
from torch.nn.functional import nll_loss
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import Highway
from allennlp.modules import Seq2SeqEncoder, TextFieldEmbedder
from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention
from allennlp.nn import util, InitializerApplicator, RegularizerApplicator
from allennlp.training.metrics import BooleanAccuracy, CategoricalAccuracy
from allennlp.nn.util import masked_softmax
from allennlp_rc.models.util import get_best_span
from allennlp_rc.eval import SquadEmAndF1
@Model.register("qanet")
class QaNet(Model):
"""
This class implements Adams Wei Yu's `QANet Model <https://openreview.net/forum?id=B14TlG-RW>`_
for machine reading comprehension published at ICLR 2018.
The overall architecture of QANet is very similar to BiDAF. The main difference is that QANet
replaces the RNN encoder with CNN + self-attention. There are also some minor differences in the
modeling layer and output layer.
Parameters
----------
vocab : ``Vocabulary``
text_field_embedder : ``TextFieldEmbedder``
Used to embed the ``question`` and ``passage`` ``TextFields`` we get as input to the model.
num_highway_layers : ``int``
The number of highway layers to use in between embedding the input and passing it through
the phrase layer.
phrase_layer : ``Seq2SeqEncoder``
The encoder (with its own internal stacking) that we will use in between embedding tokens
and doing the passage-question attention.
matrix_attention_layer : ``MatrixAttention``
The matrix attention function that we will use when comparing encoded passage and question
representations.
modeling_layer : ``Seq2SeqEncoder``
The encoder (with its own internal stacking) that we will use in between the bidirectional
attention and predicting span start and end.
dropout_prob : ``float``, optional (default=0.1)
If greater than 0, we will apply dropout with this probability between layers.
initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)
Used to initialize the model parameters.
regularizer : ``RegularizerApplicator``, optional (default=``None``)
If provided, will be used to calculate the regularization penalty during training.
"""
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
num_highway_layers: int,
phrase_layer: Seq2SeqEncoder,
matrix_attention_layer: MatrixAttention,
modeling_layer: Seq2SeqEncoder,
dropout_prob: float = 0.1,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None,
) -> None:
super().__init__(vocab, regularizer)
text_embed_dim = text_field_embedder.get_output_dim()
encoding_in_dim = phrase_layer.get_input_dim()
encoding_out_dim = phrase_layer.get_output_dim()
modeling_in_dim = modeling_layer.get_input_dim()
modeling_out_dim = modeling_layer.get_output_dim()
self._text_field_embedder = text_field_embedder
self._embedding_proj_layer = torch.nn.Linear(text_embed_dim, encoding_in_dim)
self._highway_layer = Highway(encoding_in_dim, num_highway_layers)
self._encoding_proj_layer = torch.nn.Linear(encoding_in_dim, encoding_in_dim)
self._phrase_layer = phrase_layer
self._matrix_attention = matrix_attention_layer
self._modeling_proj_layer = torch.nn.Linear(encoding_out_dim * 4, modeling_in_dim)
self._modeling_layer = modeling_layer
self._span_start_predictor = torch.nn.Linear(modeling_out_dim * 2, 1)
self._span_end_predictor = torch.nn.Linear(modeling_out_dim * 2, 1)
self._span_start_accuracy = CategoricalAccuracy()
self._span_end_accuracy = CategoricalAccuracy()
self._span_accuracy = BooleanAccuracy()
self._metrics = SquadEmAndF1()
self._dropout = torch.nn.Dropout(p=dropout_prob) if dropout_prob > 0 else lambda x: x
initializer(self)
def forward( # type: ignore
self,
question: Dict[str, torch.LongTensor],
passage: Dict[str, torch.LongTensor],
span_start: torch.IntTensor = None,
span_end: torch.IntTensor = None,
metadata: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
"""
Parameters
----------
question : Dict[str, torch.LongTensor]
From a ``TextField``.
passage : Dict[str, torch.LongTensor]
From a ``TextField``. The model assumes that this passage contains the answer to the
question, and predicts the beginning and ending positions of the answer within the
passage.
span_start : ``torch.IntTensor``, optional
From an ``IndexField``. This is one of the things we are trying to predict - the
beginning position of the answer with the passage. This is an `inclusive` token index.
If this is given, we will compute a loss that gets included in the output dictionary.
span_end : ``torch.IntTensor``, optional
From an ``IndexField``. This is one of the things we are trying to predict - the
ending position of the answer with the passage. This is an `inclusive` token index.
If this is given, we will compute a loss that gets included in the output dictionary.
metadata : ``List[Dict[str, Any]]``, optional
If present, this should contain the question tokens, passage tokens, original passage
text, and token offsets into the passage for each instance in the batch. The length
of this list should be the batch size, and each dictionary should have the keys
``question_tokens``, ``passage_tokens``, ``original_passage``, and ``token_offsets``.
Returns
-------
An output dictionary consisting of:
span_start_logits : torch.FloatTensor
A tensor of shape ``(batch_size, passage_length)`` representing unnormalized log
probabilities of the span start position.
span_start_probs : torch.FloatTensor
The result of ``softmax(span_start_logits)``.
span_end_logits : torch.FloatTensor
A tensor of shape ``(batch_size, passage_length)`` representing unnormalized log
probabilities of the span end position (inclusive).
span_end_probs : torch.FloatTensor
The result of ``softmax(span_end_logits)``.
best_span : torch.IntTensor
The result of a constrained inference over ``span_start_logits`` and
``span_end_logits`` to find the most probable span. Shape is ``(batch_size, 2)``
and each offset is a token index.
loss : torch.FloatTensor, optional
A scalar loss to be optimised.
best_span_str : List[str]
If sufficient metadata was provided for the instances in the batch, we also return the
string from the original passage that the model thinks is the best answer to the
question.
"""
question_mask = util.get_text_field_mask(question)
passage_mask = util.get_text_field_mask(passage)
embedded_question = self._dropout(self._text_field_embedder(question))
embedded_passage = self._dropout(self._text_field_embedder(passage))
embedded_question = self._highway_layer(self._embedding_proj_layer(embedded_question))
embedded_passage = self._highway_layer(self._embedding_proj_layer(embedded_passage))
batch_size = embedded_question.size(0)
projected_embedded_question = self._encoding_proj_layer(embedded_question)
projected_embedded_passage = self._encoding_proj_layer(embedded_passage)
encoded_question = self._dropout(
self._phrase_layer(projected_embedded_question, question_mask)
)
encoded_passage = self._dropout(
self._phrase_layer(projected_embedded_passage, passage_mask)
)
# Shape: (batch_size, passage_length, question_length)
passage_question_similarity = self._matrix_attention(encoded_passage, encoded_question)
# Shape: (batch_size, passage_length, question_length)
passage_question_attention = masked_softmax(
passage_question_similarity, question_mask, memory_efficient=True
)
# Shape: (batch_size, passage_length, encoding_dim)
passage_question_vectors = util.weighted_sum(encoded_question, passage_question_attention)
# Shape: (batch_size, question_length, passage_length)
question_passage_attention = masked_softmax(
passage_question_similarity.transpose(1, 2), passage_mask, memory_efficient=True
)
# Shape: (batch_size, passage_length, passage_length)
attention_over_attention = torch.bmm(passage_question_attention, question_passage_attention)
# Shape: (batch_size, passage_length, encoding_dim)
passage_passage_vectors = util.weighted_sum(encoded_passage, attention_over_attention)
# Shape: (batch_size, passage_length, encoding_dim * 4)
merged_passage_attention_vectors = self._dropout(
torch.cat(
[
encoded_passage,
passage_question_vectors,
encoded_passage * passage_question_vectors,
encoded_passage * passage_passage_vectors,
],
dim=-1,
)
)
modeled_passage_list = [self._modeling_proj_layer(merged_passage_attention_vectors)]
for _ in range(3):
modeled_passage = self._dropout(
self._modeling_layer(modeled_passage_list[-1], passage_mask)
)
modeled_passage_list.append(modeled_passage)
# Shape: (batch_size, passage_length, modeling_dim * 2))
span_start_input = torch.cat([modeled_passage_list[-3], modeled_passage_list[-2]], dim=-1)
# Shape: (batch_size, passage_length)
span_start_logits = self._span_start_predictor(span_start_input).squeeze(-1)
# Shape: (batch_size, passage_length, modeling_dim * 2)
span_end_input = torch.cat([modeled_passage_list[-3], modeled_passage_list[-1]], dim=-1)
span_end_logits = self._span_end_predictor(span_end_input).squeeze(-1)
span_start_logits = util.replace_masked_values(span_start_logits, passage_mask, -1e32)
span_end_logits = util.replace_masked_values(span_end_logits, passage_mask, -1e32)
# Shape: (batch_size, passage_length)
span_start_probs = torch.nn.functional.softmax(span_start_logits, dim=-1)
span_end_probs = torch.nn.functional.softmax(span_end_logits, dim=-1)
best_span = get_best_span(span_start_logits, span_end_logits)
output_dict = {
"passage_question_attention": passage_question_attention,
"span_start_logits": span_start_logits,
"span_start_probs": span_start_probs,
"span_end_logits": span_end_logits,
"span_end_probs": span_end_probs,
"best_span": best_span,
}
# Compute the loss for training.
if span_start is not None:
loss = nll_loss(
util.masked_log_softmax(span_start_logits, passage_mask), span_start.squeeze(-1)
)
self._span_start_accuracy(span_start_logits, span_start.squeeze(-1))
loss += nll_loss(
util.masked_log_softmax(span_end_logits, passage_mask), span_end.squeeze(-1)
)
self._span_end_accuracy(span_end_logits, span_end.squeeze(-1))
self._span_accuracy(best_span, torch.cat([span_start, span_end], -1))
output_dict["loss"] = loss
# Compute the EM and F1 on SQuAD and add the tokenized input to the output.
if metadata is not None:
output_dict["best_span_str"] = []
question_tokens = []
passage_tokens = []
for i in range(batch_size):
question_tokens.append(metadata[i]["question_tokens"])
passage_tokens.append(metadata[i]["passage_tokens"])
passage_str = metadata[i]["original_passage"]
offsets = metadata[i]["token_offsets"]
predicted_span = tuple(best_span[i].detach().cpu().numpy())
start_offset = offsets[predicted_span[0]][0]
end_offset = offsets[predicted_span[1]][1]
best_span_string = passage_str[start_offset:end_offset]
output_dict["best_span_str"].append(best_span_string)
answer_texts = metadata[i].get("answer_texts", [])
if answer_texts:
self._metrics(best_span_string, answer_texts)
output_dict["question_tokens"] = question_tokens
output_dict["passage_tokens"] = passage_tokens
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
exact_match, f1_score = self._metrics.get_metric(reset)
return {
"start_acc": self._span_start_accuracy.get_metric(reset),
"end_acc": self._span_end_accuracy.get_metric(reset),
"span_acc": self._span_accuracy.get_metric(reset),
"em": exact_match,
"f1": f1_score,
}
|
allennlp-reading-comprehension-master
|
allennlp_rc/models/qanet.py
|
from allennlp_rc.models.bidaf import BidirectionalAttentionFlow
from allennlp_rc.models.bidaf_ensemble import BidafEnsemble
from allennlp_rc.models.dialog_qa import DialogQA
from allennlp_rc.models.naqanet import NumericallyAugmentedQaNet
from allennlp_rc.models.qanet import QaNet
from allennlp_rc.models.transformer_qa import TransformerQA
|
allennlp-reading-comprehension-master
|
allennlp_rc/models/__init__.py
|
import logging
from typing import Any, Dict, List, Optional
import torch
from torch.nn.functional import nll_loss
from allennlp.common.checks import check_dimensions_match
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import Highway
from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder
from allennlp.modules.matrix_attention import MatrixAttention
from allennlp.nn import util, InitializerApplicator, RegularizerApplicator
from allennlp.training.metrics import BooleanAccuracy, CategoricalAccuracy
from allennlp_rc.models.util import get_best_span
from allennlp_rc.eval import SquadEmAndF1
logger = logging.getLogger(__name__)
@Model.register("bidaf")
class BidirectionalAttentionFlow(Model):
"""
This class implements Minjoon Seo's `Bidirectional Attention Flow model
<https://www.semanticscholar.org/paper/Bidirectional-Attention-Flow-for-Machine-Seo-Kembhavi/7586b7cca1deba124af80609327395e613a20e9d>`_
for answering reading comprehension questions (ICLR 2017).
The basic layout is pretty simple: encode words as a combination of word embeddings and a
character-level encoder, pass the word representations through a bi-LSTM/GRU, use a matrix of
attentions to put question information into the passage word representations (this is the only
part that is at all non-standard), pass this through another few layers of bi-LSTMs/GRUs, and
do a softmax over span start and span end.
Parameters
----------
vocab : ``Vocabulary``
text_field_embedder : ``TextFieldEmbedder``
Used to embed the ``question`` and ``passage`` ``TextFields`` we get as input to the model.
num_highway_layers : ``int``
The number of highway layers to use in between embedding the input and passing it through
the phrase layer.
phrase_layer : ``Seq2SeqEncoder``
The encoder (with its own internal stacking) that we will use in between embedding tokens
and doing the bidirectional attention.
similarity_function : ``SimilarityFunction``
The similarity function that we will use when comparing encoded passage and question
representations.
modeling_layer : ``Seq2SeqEncoder``
The encoder (with its own internal stacking) that we will use in between the bidirectional
attention and predicting span start and end.
span_end_encoder : ``Seq2SeqEncoder``
The encoder that we will use to incorporate span start predictions into the passage state
before predicting span end.
dropout : ``float``, optional (default=0.2)
If greater than 0, we will apply dropout with this probability after all encoders (pytorch
LSTMs do not apply dropout to their last layer).
mask_lstms : ``bool``, optional (default=True)
If ``False``, we will skip passing the mask to the LSTM layers. This gives a ~2x speedup,
with only a slight performance decrease, if any. We haven't experimented much with this
yet, but have confirmed that we still get very similar performance with much faster
training times. We still use the mask for all softmaxes, but avoid the shuffling that's
required when using masking with pytorch LSTMs.
initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)
Used to initialize the model parameters.
regularizer : ``RegularizerApplicator``, optional (default=``None``)
If provided, will be used to calculate the regularization penalty during training.
"""
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
num_highway_layers: int,
phrase_layer: Seq2SeqEncoder,
matrix_attention: MatrixAttention,
modeling_layer: Seq2SeqEncoder,
span_end_encoder: Seq2SeqEncoder,
dropout: float = 0.2,
mask_lstms: bool = True,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None,
) -> None:
super().__init__(vocab, regularizer)
self._text_field_embedder = text_field_embedder
self._highway_layer = TimeDistributed(
Highway(text_field_embedder.get_output_dim(), num_highway_layers)
)
self._phrase_layer = phrase_layer
self._matrix_attention = matrix_attention
self._modeling_layer = modeling_layer
self._span_end_encoder = span_end_encoder
encoding_dim = phrase_layer.get_output_dim()
modeling_dim = modeling_layer.get_output_dim()
span_start_input_dim = encoding_dim * 4 + modeling_dim
self._span_start_predictor = TimeDistributed(torch.nn.Linear(span_start_input_dim, 1))
span_end_encoding_dim = span_end_encoder.get_output_dim()
span_end_input_dim = encoding_dim * 4 + span_end_encoding_dim
self._span_end_predictor = TimeDistributed(torch.nn.Linear(span_end_input_dim, 1))
# Bidaf has lots of layer dimensions which need to match up - these aren't necessarily
# obvious from the configuration files, so we check here.
check_dimensions_match(
modeling_layer.get_input_dim(),
4 * encoding_dim,
"modeling layer input dim",
"4 * encoding dim",
)
check_dimensions_match(
text_field_embedder.get_output_dim(),
phrase_layer.get_input_dim(),
"text field embedder output dim",
"phrase layer input dim",
)
check_dimensions_match(
span_end_encoder.get_input_dim(),
4 * encoding_dim + 3 * modeling_dim,
"span end encoder input dim",
"4 * encoding dim + 3 * modeling dim",
)
self._span_start_accuracy = CategoricalAccuracy()
self._span_end_accuracy = CategoricalAccuracy()
self._span_accuracy = BooleanAccuracy()
self._squad_metrics = SquadEmAndF1()
if dropout > 0:
self._dropout = torch.nn.Dropout(p=dropout)
else:
self._dropout = lambda x: x
self._mask_lstms = mask_lstms
initializer(self)
def forward( # type: ignore
self,
question: Dict[str, torch.LongTensor],
passage: Dict[str, torch.LongTensor],
span_start: torch.IntTensor = None,
span_end: torch.IntTensor = None,
metadata: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
"""
Parameters
----------
question : Dict[str, torch.LongTensor]
From a ``TextField``.
passage : Dict[str, torch.LongTensor]
From a ``TextField``. The model assumes that this passage contains the answer to the
question, and predicts the beginning and ending positions of the answer within the
passage.
span_start : ``torch.IntTensor``, optional
From an ``IndexField``. This is one of the things we are trying to predict - the
beginning position of the answer with the passage. This is an `inclusive` token index.
If this is given, we will compute a loss that gets included in the output dictionary.
span_end : ``torch.IntTensor``, optional
From an ``IndexField``. This is one of the things we are trying to predict - the
ending position of the answer with the passage. This is an `inclusive` token index.
If this is given, we will compute a loss that gets included in the output dictionary.
metadata : ``List[Dict[str, Any]]``, optional
metadata : ``List[Dict[str, Any]]``, optional
If present, this should contain the question tokens, passage tokens, original passage
text, and token offsets into the passage for each instance in the batch. The length
of this list should be the batch size, and each dictionary should have the keys
``question_tokens``, ``passage_tokens``, ``original_passage``, and ``token_offsets``.
Returns
-------
An output dictionary consisting of:
span_start_logits : torch.FloatTensor
A tensor of shape ``(batch_size, passage_length)`` representing unnormalized log
probabilities of the span start position.
span_start_probs : torch.FloatTensor
The result of ``softmax(span_start_logits)``.
span_end_logits : torch.FloatTensor
A tensor of shape ``(batch_size, passage_length)`` representing unnormalized log
probabilities of the span end position (inclusive).
span_end_probs : torch.FloatTensor
The result of ``softmax(span_end_logits)``.
best_span : torch.IntTensor
The result of a constrained inference over ``span_start_logits`` and
``span_end_logits`` to find the most probable span. Shape is ``(batch_size, 2)``
and each offset is a token index.
loss : torch.FloatTensor, optional
A scalar loss to be optimised.
best_span_str : List[str]
If sufficient metadata was provided for the instances in the batch, we also return the
string from the original passage that the model thinks is the best answer to the
question.
"""
embedded_question = self._highway_layer(self._text_field_embedder(question))
embedded_passage = self._highway_layer(self._text_field_embedder(passage))
batch_size = embedded_question.size(0)
passage_length = embedded_passage.size(1)
question_mask = util.get_text_field_mask(question)
passage_mask = util.get_text_field_mask(passage)
question_lstm_mask = question_mask if self._mask_lstms else None
passage_lstm_mask = passage_mask if self._mask_lstms else None
encoded_question = self._dropout(self._phrase_layer(embedded_question, question_lstm_mask))
encoded_passage = self._dropout(self._phrase_layer(embedded_passage, passage_lstm_mask))
encoding_dim = encoded_question.size(-1)
# Shape: (batch_size, passage_length, question_length)
passage_question_similarity = self._matrix_attention(encoded_passage, encoded_question)
# Shape: (batch_size, passage_length, question_length)
passage_question_attention = util.masked_softmax(passage_question_similarity, question_mask)
# Shape: (batch_size, passage_length, encoding_dim)
passage_question_vectors = util.weighted_sum(encoded_question, passage_question_attention)
# We replace masked values with something really negative here, so they don't affect the
# max below.
masked_similarity = util.replace_masked_values(
passage_question_similarity, question_mask.unsqueeze(1), -1e7
)
# Shape: (batch_size, passage_length)
question_passage_similarity = masked_similarity.max(dim=-1)[0].squeeze(-1)
# Shape: (batch_size, passage_length)
question_passage_attention = util.masked_softmax(question_passage_similarity, passage_mask)
# Shape: (batch_size, encoding_dim)
question_passage_vector = util.weighted_sum(encoded_passage, question_passage_attention)
# Shape: (batch_size, passage_length, encoding_dim)
tiled_question_passage_vector = question_passage_vector.unsqueeze(1).expand(
batch_size, passage_length, encoding_dim
)
# Shape: (batch_size, passage_length, encoding_dim * 4)
final_merged_passage = torch.cat(
[
encoded_passage,
passage_question_vectors,
encoded_passage * passage_question_vectors,
encoded_passage * tiled_question_passage_vector,
],
dim=-1,
)
modeled_passage = self._dropout(
self._modeling_layer(final_merged_passage, passage_lstm_mask)
)
modeling_dim = modeled_passage.size(-1)
# Shape: (batch_size, passage_length, encoding_dim * 4 + modeling_dim))
span_start_input = self._dropout(torch.cat([final_merged_passage, modeled_passage], dim=-1))
# Shape: (batch_size, passage_length)
span_start_logits = self._span_start_predictor(span_start_input).squeeze(-1)
# Shape: (batch_size, passage_length)
span_start_probs = util.masked_softmax(span_start_logits, passage_mask)
# Shape: (batch_size, modeling_dim)
span_start_representation = util.weighted_sum(modeled_passage, span_start_probs)
# Shape: (batch_size, passage_length, modeling_dim)
tiled_start_representation = span_start_representation.unsqueeze(1).expand(
batch_size, passage_length, modeling_dim
)
# Shape: (batch_size, passage_length, encoding_dim * 4 + modeling_dim * 3)
span_end_representation = torch.cat(
[
final_merged_passage,
modeled_passage,
tiled_start_representation,
modeled_passage * tiled_start_representation,
],
dim=-1,
)
# Shape: (batch_size, passage_length, encoding_dim)
encoded_span_end = self._dropout(
self._span_end_encoder(span_end_representation, passage_lstm_mask)
)
# Shape: (batch_size, passage_length, encoding_dim * 4 + span_end_encoding_dim)
span_end_input = self._dropout(torch.cat([final_merged_passage, encoded_span_end], dim=-1))
span_end_logits = self._span_end_predictor(span_end_input).squeeze(-1)
span_end_probs = util.masked_softmax(span_end_logits, passage_mask)
span_start_logits = util.replace_masked_values(span_start_logits, passage_mask, -1e7)
span_end_logits = util.replace_masked_values(span_end_logits, passage_mask, -1e7)
best_span = get_best_span(span_start_logits, span_end_logits)
output_dict = {
"passage_question_attention": passage_question_attention,
"span_start_logits": span_start_logits,
"span_start_probs": span_start_probs,
"span_end_logits": span_end_logits,
"span_end_probs": span_end_probs,
"best_span": best_span,
}
# Compute the loss for training.
if span_start is not None:
loss = nll_loss(
util.masked_log_softmax(span_start_logits, passage_mask), span_start.squeeze(-1)
)
self._span_start_accuracy(span_start_logits, span_start.squeeze(-1))
loss += nll_loss(
util.masked_log_softmax(span_end_logits, passage_mask), span_end.squeeze(-1)
)
self._span_end_accuracy(span_end_logits, span_end.squeeze(-1))
self._span_accuracy(best_span, torch.cat([span_start, span_end], -1))
output_dict["loss"] = loss
# Compute the EM and F1 on SQuAD and add the tokenized input to the output.
if metadata is not None:
output_dict["best_span_str"] = []
question_tokens = []
passage_tokens = []
token_offsets = []
for i in range(batch_size):
question_tokens.append(metadata[i]["question_tokens"])
passage_tokens.append(metadata[i]["passage_tokens"])
token_offsets.append(metadata[i]["token_offsets"])
passage_str = metadata[i]["original_passage"]
offsets = metadata[i]["token_offsets"]
predicted_span = tuple(best_span[i].detach().cpu().numpy())
start_offset = offsets[predicted_span[0]][0]
end_offset = offsets[predicted_span[1]][1]
best_span_string = passage_str[start_offset:end_offset]
output_dict["best_span_str"].append(best_span_string)
answer_texts = metadata[i].get("answer_texts", [])
if answer_texts:
self._squad_metrics(best_span_string, answer_texts)
output_dict["question_tokens"] = question_tokens
output_dict["passage_tokens"] = passage_tokens
output_dict["token_offsets"] = token_offsets
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
exact_match, f1_score = self._squad_metrics.get_metric(reset)
return {
"start_acc": self._span_start_accuracy.get_metric(reset),
"end_acc": self._span_end_accuracy.get_metric(reset),
"span_acc": self._span_accuracy.get_metric(reset),
"em": exact_match,
"f1": f1_score,
}
@staticmethod
def get_best_span(
span_start_logits: torch.Tensor, span_end_logits: torch.Tensor
) -> torch.Tensor:
# We call the inputs "logits" - they could either be unnormalized logits or normalized log
# probabilities. A log_softmax operation is a constant shifting of the entire logit
# vector, so taking an argmax over either one gives the same result.
if span_start_logits.dim() != 2 or span_end_logits.dim() != 2:
raise ValueError("Input shapes must be (batch_size, passage_length)")
batch_size, passage_length = span_start_logits.size()
device = span_start_logits.device
# (batch_size, passage_length, passage_length)
span_log_probs = span_start_logits.unsqueeze(2) + span_end_logits.unsqueeze(1)
# Only the upper triangle of the span matrix is valid; the lower triangle has entries where
# the span ends before it starts.
span_log_mask = (
torch.triu(torch.ones((passage_length, passage_length), device=device))
.log()
.unsqueeze(0)
)
valid_span_log_probs = span_log_probs + span_log_mask
# Here we take the span matrix and flatten it, then find the best span using argmax. We
# can recover the start and end indices from this flattened list using simple modular
# arithmetic.
# (batch_size, passage_length * passage_length)
best_spans = valid_span_log_probs.view(batch_size, -1).argmax(-1)
span_start_indices = best_spans // passage_length
span_end_indices = best_spans % passage_length
return torch.stack([span_start_indices, span_end_indices], dim=-1)
|
allennlp-reading-comprehension-master
|
allennlp_rc/models/bidaf.py
|
from typing import Dict, List, Any
from overrides import overrides
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.models.ensemble import Ensemble
from allennlp.models.archival import load_archive
from allennlp.models.model import Model
from allennlp.common import Params
from allennlp.data import Vocabulary
from allennlp_rc.models.bidaf import BidirectionalAttentionFlow
from allennlp_rc.models.util import get_best_span
from allennlp_rc.eval import SquadEmAndF1
@Model.register("bidaf-ensemble")
class BidafEnsemble(Ensemble):
"""
This class ensembles the output from multiple BiDAF models.
It combines results from the submodels by averaging the start and end span probabilities.
"""
def __init__(self, submodels: List[BidirectionalAttentionFlow]) -> None:
super().__init__(submodels)
self._squad_metrics = SquadEmAndF1()
@overrides
def forward(
self, # type: ignore
question: Dict[str, torch.LongTensor],
passage: Dict[str, torch.LongTensor],
span_start: torch.IntTensor = None,
span_end: torch.IntTensor = None,
metadata: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
"""
The forward method runs each of the submodels, then selects the best span from the subresults.
The best span is determined by averaging the probabilities for the start and end of the spans.
Parameters
----------
question : Dict[str, torch.LongTensor]
From a ``TextField``.
passage : Dict[str, torch.LongTensor]
From a ``TextField``. The model assumes that this passage contains the answer to the
question, and predicts the beginning and ending positions of the answer within the
passage.
span_start : ``torch.IntTensor``, optional
From an ``IndexField``. This is one of the things we are trying to predict - the
beginning position of the answer with the passage. This is an `inclusive` token index.
If this is given, we will compute a loss that gets included in the output dictionary.
span_end : ``torch.IntTensor``, optional
From an ``IndexField``. This is one of the things we are trying to predict - the
ending position of the answer with the passage. This is an `inclusive` token index.
If this is given, we will compute a loss that gets included in the output dictionary.
metadata : ``List[Dict[str, Any]]``, optional
If present, this should contain the question ID, original passage text, and token
offsets into the passage for each instance in the batch. We use this for computing
official metrics using the official SQuAD evaluation script. The length of this list
should be the batch size, and each dictionary should have the keys ``id``,
``original_passage``, and ``token_offsets``. If you only want the best span string and
don't care about official metrics, you can omit the ``id`` key.
Returns
-------
An output dictionary consisting of:
best_span : torch.IntTensor
The result of a constrained inference over ``span_start_logits`` and
``span_end_logits`` to find the most probable span. Shape is ``(batch_size, 2)``
and each offset is a token index.
best_span_str : List[str]
If sufficient metadata was provided for the instances in the batch, we also return the
string from the original passage that the model thinks is the best answer to the
question.
"""
subresults = [
submodel(question, passage, span_start, span_end, metadata)
for submodel in self.submodels
]
batch_size = len(subresults[0]["best_span"])
best_span = ensemble(subresults)
output = {"best_span": best_span, "best_span_str": []}
for index in range(batch_size):
if metadata is not None:
passage_str = metadata[index]["original_passage"]
offsets = metadata[index]["token_offsets"]
predicted_span = tuple(best_span[index].detach().cpu().numpy())
start_offset = offsets[predicted_span[0]][0]
end_offset = offsets[predicted_span[1]][1]
best_span_string = passage_str[start_offset:end_offset]
output["best_span_str"].append(best_span_string)
answer_texts = metadata[index].get("answer_texts", [])
if answer_texts:
self._squad_metrics(best_span_string, answer_texts)
return output
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
exact_match, f1_score = self._squad_metrics.get_metric(reset)
return {"em": exact_match, "f1": f1_score}
# The logic here requires a custom from_params.
@classmethod
def from_params(cls, vocab: Vocabulary, params: Params) -> "BidafEnsemble": # type: ignore
if vocab:
raise ConfigurationError("vocab should be None")
submodels = []
paths = params.pop("submodels")
for path in paths:
submodels.append(load_archive(path).model)
return cls(submodels=submodels)
def ensemble(subresults: List[Dict[str, torch.Tensor]]) -> torch.Tensor:
"""
Identifies the best prediction given the results from the submodels.
Parameters
----------
subresults : List[Dict[str, torch.Tensor]]
Results of each submodel.
Returns
-------
The index of the best submodel.
"""
# Choose the highest average confidence span.
span_start_probs = sum(subresult["span_start_probs"] for subresult in subresults) / len(
subresults
)
span_end_probs = sum(subresult["span_end_probs"] for subresult in subresults) / len(subresults)
return get_best_span(span_start_probs.log(), span_end_probs.log()) # type: ignore
|
allennlp-reading-comprehension-master
|
allennlp_rc/models/bidaf_ensemble.py
|
from typing import Any, Dict, List, Optional
import logging
import torch
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import Highway
from allennlp.nn.activations import Activation
from allennlp.modules.feedforward import FeedForward
from allennlp.modules import Seq2SeqEncoder, TextFieldEmbedder
from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention
from allennlp.nn import util, InitializerApplicator, RegularizerApplicator
from allennlp.nn.util import masked_softmax
from allennlp_rc.models.util import get_best_span
from allennlp_rc.eval import DropEmAndF1
logger = logging.getLogger(__name__)
@Model.register("naqanet")
class NumericallyAugmentedQaNet(Model):
"""
This class augments the QANet model with some rudimentary numerical reasoning abilities, as
published in the original DROP paper.
The main idea here is that instead of just predicting a passage span after doing all of the
QANet modeling stuff, we add several different "answer abilities": predicting a span from the
question, predicting a count, or predicting an arithmetic expression. Near the end of the
QANet model, we have a variable that predicts what kind of answer type we need, and each branch
has separate modeling logic to predict that answer type. We then marginalize over all possible
ways of getting to the right answer through each of these answer types.
"""
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
num_highway_layers: int,
phrase_layer: Seq2SeqEncoder,
matrix_attention_layer: MatrixAttention,
modeling_layer: Seq2SeqEncoder,
dropout_prob: float = 0.1,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None,
answering_abilities: List[str] = None,
) -> None:
super().__init__(vocab, regularizer)
if answering_abilities is None:
self.answering_abilities = [
"passage_span_extraction",
"question_span_extraction",
"addition_subtraction",
"counting",
]
else:
self.answering_abilities = answering_abilities
text_embed_dim = text_field_embedder.get_output_dim()
encoding_in_dim = phrase_layer.get_input_dim()
encoding_out_dim = phrase_layer.get_output_dim()
modeling_in_dim = modeling_layer.get_input_dim()
modeling_out_dim = modeling_layer.get_output_dim()
self._text_field_embedder = text_field_embedder
self._embedding_proj_layer = torch.nn.Linear(text_embed_dim, encoding_in_dim)
self._highway_layer = Highway(encoding_in_dim, num_highway_layers)
self._encoding_proj_layer = torch.nn.Linear(encoding_in_dim, encoding_in_dim)
self._phrase_layer = phrase_layer
self._matrix_attention = matrix_attention_layer
self._modeling_proj_layer = torch.nn.Linear(encoding_out_dim * 4, modeling_in_dim)
self._modeling_layer = modeling_layer
self._passage_weights_predictor = torch.nn.Linear(modeling_out_dim, 1)
self._question_weights_predictor = torch.nn.Linear(encoding_out_dim, 1)
if len(self.answering_abilities) > 1:
self._answer_ability_predictor = FeedForward(
modeling_out_dim + encoding_out_dim,
activations=[Activation.by_name("relu")(), Activation.by_name("linear")()],
hidden_dims=[modeling_out_dim, len(self.answering_abilities)],
num_layers=2,
dropout=dropout_prob,
)
if "passage_span_extraction" in self.answering_abilities:
self._passage_span_extraction_index = self.answering_abilities.index(
"passage_span_extraction"
)
self._passage_span_start_predictor = FeedForward(
modeling_out_dim * 2,
activations=[Activation.by_name("relu")(), Activation.by_name("linear")()],
hidden_dims=[modeling_out_dim, 1],
num_layers=2,
)
self._passage_span_end_predictor = FeedForward(
modeling_out_dim * 2,
activations=[Activation.by_name("relu")(), Activation.by_name("linear")()],
hidden_dims=[modeling_out_dim, 1],
num_layers=2,
)
if "question_span_extraction" in self.answering_abilities:
self._question_span_extraction_index = self.answering_abilities.index(
"question_span_extraction"
)
self._question_span_start_predictor = FeedForward(
modeling_out_dim * 2,
activations=[Activation.by_name("relu")(), Activation.by_name("linear")()],
hidden_dims=[modeling_out_dim, 1],
num_layers=2,
)
self._question_span_end_predictor = FeedForward(
modeling_out_dim * 2,
activations=[Activation.by_name("relu")(), Activation.by_name("linear")()],
hidden_dims=[modeling_out_dim, 1],
num_layers=2,
)
if "addition_subtraction" in self.answering_abilities:
self._addition_subtraction_index = self.answering_abilities.index(
"addition_subtraction"
)
self._number_sign_predictor = FeedForward(
modeling_out_dim * 3,
activations=[Activation.by_name("relu")(), Activation.by_name("linear")()],
hidden_dims=[modeling_out_dim, 3],
num_layers=2,
)
if "counting" in self.answering_abilities:
self._counting_index = self.answering_abilities.index("counting")
self._count_number_predictor = FeedForward(
modeling_out_dim,
activations=[Activation.by_name("relu")(), Activation.by_name("linear")()],
hidden_dims=[modeling_out_dim, 10],
num_layers=2,
)
self._drop_metrics = DropEmAndF1()
self._dropout = torch.nn.Dropout(p=dropout_prob)
initializer(self)
def forward( # type: ignore
self,
question: Dict[str, torch.LongTensor],
passage: Dict[str, torch.LongTensor],
number_indices: torch.LongTensor,
answer_as_passage_spans: torch.LongTensor = None,
answer_as_question_spans: torch.LongTensor = None,
answer_as_add_sub_expressions: torch.LongTensor = None,
answer_as_counts: torch.LongTensor = None,
metadata: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
question_mask = util.get_text_field_mask(question)
passage_mask = util.get_text_field_mask(passage)
embedded_question = self._dropout(self._text_field_embedder(question))
embedded_passage = self._dropout(self._text_field_embedder(passage))
embedded_question = self._highway_layer(self._embedding_proj_layer(embedded_question))
embedded_passage = self._highway_layer(self._embedding_proj_layer(embedded_passage))
batch_size = embedded_question.size(0)
projected_embedded_question = self._encoding_proj_layer(embedded_question)
projected_embedded_passage = self._encoding_proj_layer(embedded_passage)
encoded_question = self._dropout(
self._phrase_layer(projected_embedded_question, question_mask)
)
encoded_passage = self._dropout(
self._phrase_layer(projected_embedded_passage, passage_mask)
)
# Shape: (batch_size, passage_length, question_length)
passage_question_similarity = self._matrix_attention(encoded_passage, encoded_question)
# Shape: (batch_size, passage_length, question_length)
passage_question_attention = masked_softmax(
passage_question_similarity, question_mask, memory_efficient=True
)
# Shape: (batch_size, passage_length, encoding_dim)
passage_question_vectors = util.weighted_sum(encoded_question, passage_question_attention)
# Shape: (batch_size, question_length, passage_length)
question_passage_attention = masked_softmax(
passage_question_similarity.transpose(1, 2), passage_mask, memory_efficient=True
)
# Shape: (batch_size, passage_length, passage_length)
passsage_attention_over_attention = torch.bmm(
passage_question_attention, question_passage_attention
)
# Shape: (batch_size, passage_length, encoding_dim)
passage_passage_vectors = util.weighted_sum(
encoded_passage, passsage_attention_over_attention
)
# Shape: (batch_size, passage_length, encoding_dim * 4)
merged_passage_attention_vectors = self._dropout(
torch.cat(
[
encoded_passage,
passage_question_vectors,
encoded_passage * passage_question_vectors,
encoded_passage * passage_passage_vectors,
],
dim=-1,
)
)
# The recurrent modeling layers. Since these layers share the same parameters,
# we don't construct them conditioned on answering abilities.
modeled_passage_list = [self._modeling_proj_layer(merged_passage_attention_vectors)]
for _ in range(4):
modeled_passage = self._dropout(
self._modeling_layer(modeled_passage_list[-1], passage_mask)
)
modeled_passage_list.append(modeled_passage)
# Pop the first one, which is input
modeled_passage_list.pop(0)
# The first modeling layer is used to calculate the vector representation of passage
passage_weights = self._passage_weights_predictor(modeled_passage_list[0]).squeeze(-1)
passage_weights = masked_softmax(passage_weights, passage_mask)
passage_vector = util.weighted_sum(modeled_passage_list[0], passage_weights)
# The vector representation of question is calculated based on the unmatched encoding,
# because we may want to infer the answer ability only based on the question words.
question_weights = self._question_weights_predictor(encoded_question).squeeze(-1)
question_weights = masked_softmax(question_weights, question_mask)
question_vector = util.weighted_sum(encoded_question, question_weights)
if len(self.answering_abilities) > 1:
# Shape: (batch_size, number_of_abilities)
answer_ability_logits = self._answer_ability_predictor(
torch.cat([passage_vector, question_vector], -1)
)
answer_ability_log_probs = torch.nn.functional.log_softmax(answer_ability_logits, -1)
best_answer_ability = torch.argmax(answer_ability_log_probs, 1)
if "counting" in self.answering_abilities:
# Shape: (batch_size, 10)
count_number_logits = self._count_number_predictor(passage_vector)
count_number_log_probs = torch.nn.functional.log_softmax(count_number_logits, -1)
# Info about the best count number prediction
# Shape: (batch_size,)
best_count_number = torch.argmax(count_number_log_probs, -1)
best_count_log_prob = torch.gather(
count_number_log_probs, 1, best_count_number.unsqueeze(-1)
).squeeze(-1)
if len(self.answering_abilities) > 1:
best_count_log_prob += answer_ability_log_probs[:, self._counting_index]
if "passage_span_extraction" in self.answering_abilities:
# Shape: (batch_size, passage_length, modeling_dim * 2))
passage_for_span_start = torch.cat(
[modeled_passage_list[0], modeled_passage_list[1]], dim=-1
)
# Shape: (batch_size, passage_length)
passage_span_start_logits = self._passage_span_start_predictor(
passage_for_span_start
).squeeze(-1)
# Shape: (batch_size, passage_length, modeling_dim * 2)
passage_for_span_end = torch.cat(
[modeled_passage_list[0], modeled_passage_list[2]], dim=-1
)
# Shape: (batch_size, passage_length)
passage_span_end_logits = self._passage_span_end_predictor(
passage_for_span_end
).squeeze(-1)
# Shape: (batch_size, passage_length)
passage_span_start_log_probs = util.masked_log_softmax(
passage_span_start_logits, passage_mask
)
passage_span_end_log_probs = util.masked_log_softmax(
passage_span_end_logits, passage_mask
)
# Info about the best passage span prediction
passage_span_start_logits = util.replace_masked_values(
passage_span_start_logits, passage_mask, -1e7
)
passage_span_end_logits = util.replace_masked_values(
passage_span_end_logits, passage_mask, -1e7
)
# Shape: (batch_size, 2)
best_passage_span = get_best_span(passage_span_start_logits, passage_span_end_logits)
# Shape: (batch_size, 2)
best_passage_start_log_probs = torch.gather(
passage_span_start_log_probs, 1, best_passage_span[:, 0].unsqueeze(-1)
).squeeze(-1)
best_passage_end_log_probs = torch.gather(
passage_span_end_log_probs, 1, best_passage_span[:, 1].unsqueeze(-1)
).squeeze(-1)
# Shape: (batch_size,)
best_passage_span_log_prob = best_passage_start_log_probs + best_passage_end_log_probs
if len(self.answering_abilities) > 1:
best_passage_span_log_prob += answer_ability_log_probs[
:, self._passage_span_extraction_index
]
if "question_span_extraction" in self.answering_abilities:
# Shape: (batch_size, question_length)
encoded_question_for_span_prediction = torch.cat(
[
encoded_question,
passage_vector.unsqueeze(1).repeat(1, encoded_question.size(1), 1),
],
-1,
)
question_span_start_logits = self._question_span_start_predictor(
encoded_question_for_span_prediction
).squeeze(-1)
# Shape: (batch_size, question_length)
question_span_end_logits = self._question_span_end_predictor(
encoded_question_for_span_prediction
).squeeze(-1)
question_span_start_log_probs = util.masked_log_softmax(
question_span_start_logits, question_mask
)
question_span_end_log_probs = util.masked_log_softmax(
question_span_end_logits, question_mask
)
# Info about the best question span prediction
question_span_start_logits = util.replace_masked_values(
question_span_start_logits, question_mask, -1e7
)
question_span_end_logits = util.replace_masked_values(
question_span_end_logits, question_mask, -1e7
)
# Shape: (batch_size, 2)
best_question_span = get_best_span(question_span_start_logits, question_span_end_logits)
# Shape: (batch_size, 2)
best_question_start_log_probs = torch.gather(
question_span_start_log_probs, 1, best_question_span[:, 0].unsqueeze(-1)
).squeeze(-1)
best_question_end_log_probs = torch.gather(
question_span_end_log_probs, 1, best_question_span[:, 1].unsqueeze(-1)
).squeeze(-1)
# Shape: (batch_size,)
best_question_span_log_prob = (
best_question_start_log_probs + best_question_end_log_probs
)
if len(self.answering_abilities) > 1:
best_question_span_log_prob += answer_ability_log_probs[
:, self._question_span_extraction_index
]
if "addition_subtraction" in self.answering_abilities:
# Shape: (batch_size, # of numbers in the passage)
number_indices = number_indices.squeeze(-1)
number_mask = number_indices != -1
clamped_number_indices = util.replace_masked_values(number_indices, number_mask, 0)
encoded_passage_for_numbers = torch.cat(
[modeled_passage_list[0], modeled_passage_list[3]], dim=-1
)
# Shape: (batch_size, # of numbers in the passage, encoding_dim)
encoded_numbers = torch.gather(
encoded_passage_for_numbers,
1,
clamped_number_indices.unsqueeze(-1).expand(
-1, -1, encoded_passage_for_numbers.size(-1)
),
)
# Shape: (batch_size, # of numbers in the passage)
encoded_numbers = torch.cat(
[
encoded_numbers,
passage_vector.unsqueeze(1).repeat(1, encoded_numbers.size(1), 1),
],
-1,
)
# Shape: (batch_size, # of numbers in the passage, 3)
number_sign_logits = self._number_sign_predictor(encoded_numbers)
number_sign_log_probs = torch.nn.functional.log_softmax(number_sign_logits, -1)
# Shape: (batch_size, # of numbers in passage).
best_signs_for_numbers = torch.argmax(number_sign_log_probs, -1)
# For padding numbers, the best sign masked as 0 (not included).
best_signs_for_numbers = util.replace_masked_values(
best_signs_for_numbers, number_mask, 0
)
# Shape: (batch_size, # of numbers in passage)
best_signs_log_probs = torch.gather(
number_sign_log_probs, 2, best_signs_for_numbers.unsqueeze(-1)
).squeeze(-1)
# the probs of the masked positions should be 1 so that it will not affect the joint probability
# TODO: this is not quite right, since if there are many numbers in the passage,
# TODO: the joint probability would be very small.
best_signs_log_probs = util.replace_masked_values(best_signs_log_probs, number_mask, 0)
# Shape: (batch_size,)
best_combination_log_prob = best_signs_log_probs.sum(-1)
if len(self.answering_abilities) > 1:
best_combination_log_prob += answer_ability_log_probs[
:, self._addition_subtraction_index
]
output_dict = {}
# If answer is given, compute the loss.
if (
answer_as_passage_spans is not None
or answer_as_question_spans is not None
or answer_as_add_sub_expressions is not None
or answer_as_counts is not None
):
log_marginal_likelihood_list = []
for answering_ability in self.answering_abilities:
if answering_ability == "passage_span_extraction":
# Shape: (batch_size, # of answer spans)
gold_passage_span_starts = answer_as_passage_spans[:, :, 0]
gold_passage_span_ends = answer_as_passage_spans[:, :, 1]
# Some spans are padded with index -1,
# so we clamp those paddings to 0 and then mask after `torch.gather()`.
gold_passage_span_mask = gold_passage_span_starts != -1
clamped_gold_passage_span_starts = util.replace_masked_values(
gold_passage_span_starts, gold_passage_span_mask, 0
)
clamped_gold_passage_span_ends = util.replace_masked_values(
gold_passage_span_ends, gold_passage_span_mask, 0
)
# Shape: (batch_size, # of answer spans)
log_likelihood_for_passage_span_starts = torch.gather(
passage_span_start_log_probs, 1, clamped_gold_passage_span_starts
)
log_likelihood_for_passage_span_ends = torch.gather(
passage_span_end_log_probs, 1, clamped_gold_passage_span_ends
)
# Shape: (batch_size, # of answer spans)
log_likelihood_for_passage_spans = (
log_likelihood_for_passage_span_starts
+ log_likelihood_for_passage_span_ends
)
# For those padded spans, we set their log probabilities to be very small negative value
log_likelihood_for_passage_spans = util.replace_masked_values(
log_likelihood_for_passage_spans, gold_passage_span_mask, -1e7
)
# Shape: (batch_size, )
log_marginal_likelihood_for_passage_span = util.logsumexp(
log_likelihood_for_passage_spans
)
log_marginal_likelihood_list.append(log_marginal_likelihood_for_passage_span)
elif answering_ability == "question_span_extraction":
# Shape: (batch_size, # of answer spans)
gold_question_span_starts = answer_as_question_spans[:, :, 0]
gold_question_span_ends = answer_as_question_spans[:, :, 1]
# Some spans are padded with index -1,
# so we clamp those paddings to 0 and then mask after `torch.gather()`.
gold_question_span_mask = gold_question_span_starts != -1
clamped_gold_question_span_starts = util.replace_masked_values(
gold_question_span_starts, gold_question_span_mask, 0
)
clamped_gold_question_span_ends = util.replace_masked_values(
gold_question_span_ends, gold_question_span_mask, 0
)
# Shape: (batch_size, # of answer spans)
log_likelihood_for_question_span_starts = torch.gather(
question_span_start_log_probs, 1, clamped_gold_question_span_starts
)
log_likelihood_for_question_span_ends = torch.gather(
question_span_end_log_probs, 1, clamped_gold_question_span_ends
)
# Shape: (batch_size, # of answer spans)
log_likelihood_for_question_spans = (
log_likelihood_for_question_span_starts
+ log_likelihood_for_question_span_ends
)
# For those padded spans, we set their log probabilities to be very small negative value
log_likelihood_for_question_spans = util.replace_masked_values(
log_likelihood_for_question_spans, gold_question_span_mask, -1e7
)
# Shape: (batch_size, )
log_marginal_likelihood_for_question_span = util.logsumexp(
log_likelihood_for_question_spans
)
log_marginal_likelihood_list.append(log_marginal_likelihood_for_question_span)
elif answering_ability == "addition_subtraction":
# The padded add-sub combinations use 0 as the signs for all numbers, and we mask them here.
# Shape: (batch_size, # of combinations)
gold_add_sub_mask = answer_as_add_sub_expressions.sum(-1) > 0
# Shape: (batch_size, # of numbers in the passage, # of combinations)
gold_add_sub_signs = answer_as_add_sub_expressions.transpose(1, 2)
# Shape: (batch_size, # of numbers in the passage, # of combinations)
log_likelihood_for_number_signs = torch.gather(
number_sign_log_probs, 2, gold_add_sub_signs
)
# the log likelihood of the masked positions should be 0
# so that it will not affect the joint probability
log_likelihood_for_number_signs = util.replace_masked_values(
log_likelihood_for_number_signs, number_mask.unsqueeze(-1), 0
)
# Shape: (batch_size, # of combinations)
log_likelihood_for_add_subs = log_likelihood_for_number_signs.sum(1)
# For those padded combinations, we set their log probabilities to be very small negative value
log_likelihood_for_add_subs = util.replace_masked_values(
log_likelihood_for_add_subs, gold_add_sub_mask, -1e7
)
# Shape: (batch_size, )
log_marginal_likelihood_for_add_sub = util.logsumexp(
log_likelihood_for_add_subs
)
log_marginal_likelihood_list.append(log_marginal_likelihood_for_add_sub)
elif answering_ability == "counting":
# Count answers are padded with label -1,
# so we clamp those paddings to 0 and then mask after `torch.gather()`.
# Shape: (batch_size, # of count answers)
gold_count_mask = answer_as_counts != -1
# Shape: (batch_size, # of count answers)
clamped_gold_counts = util.replace_masked_values(
answer_as_counts, gold_count_mask, 0
)
log_likelihood_for_counts = torch.gather(
count_number_log_probs, 1, clamped_gold_counts
)
# For those padded spans, we set their log probabilities to be very small negative value
log_likelihood_for_counts = util.replace_masked_values(
log_likelihood_for_counts, gold_count_mask, -1e7
)
# Shape: (batch_size, )
log_marginal_likelihood_for_count = util.logsumexp(log_likelihood_for_counts)
log_marginal_likelihood_list.append(log_marginal_likelihood_for_count)
else:
raise ValueError(f"Unsupported answering ability: {answering_ability}")
if len(self.answering_abilities) > 1:
# Add the ability probabilities if there are more than one abilities
all_log_marginal_likelihoods = torch.stack(log_marginal_likelihood_list, dim=-1)
all_log_marginal_likelihoods = (
all_log_marginal_likelihoods + answer_ability_log_probs
)
marginal_log_likelihood = util.logsumexp(all_log_marginal_likelihoods)
else:
marginal_log_likelihood = log_marginal_likelihood_list[0]
output_dict["loss"] = -marginal_log_likelihood.mean()
# Compute the metrics and add the tokenized input to the output.
if metadata is not None:
output_dict["question_id"] = []
output_dict["answer"] = []
question_tokens = []
passage_tokens = []
for i in range(batch_size):
question_tokens.append(metadata[i]["question_tokens"])
passage_tokens.append(metadata[i]["passage_tokens"])
if len(self.answering_abilities) > 1:
predicted_ability_str = self.answering_abilities[
best_answer_ability[i].detach().cpu().numpy()
]
else:
predicted_ability_str = self.answering_abilities[0]
answer_json: Dict[str, Any] = {}
# We did not consider multi-mention answers here
if predicted_ability_str == "passage_span_extraction":
answer_json["answer_type"] = "passage_span"
passage_str = metadata[i]["original_passage"]
offsets = metadata[i]["passage_token_offsets"]
predicted_span = tuple(best_passage_span[i].detach().cpu().numpy())
start_offset = offsets[predicted_span[0]][0]
end_offset = offsets[predicted_span[1]][1]
predicted_answer = passage_str[start_offset:end_offset]
answer_json["value"] = predicted_answer
answer_json["spans"] = [(start_offset, end_offset)]
elif predicted_ability_str == "question_span_extraction":
answer_json["answer_type"] = "question_span"
question_str = metadata[i]["original_question"]
offsets = metadata[i]["question_token_offsets"]
predicted_span = tuple(best_question_span[i].detach().cpu().numpy())
start_offset = offsets[predicted_span[0]][0]
end_offset = offsets[predicted_span[1]][1]
predicted_answer = question_str[start_offset:end_offset]
answer_json["value"] = predicted_answer
answer_json["spans"] = [(start_offset, end_offset)]
elif (
predicted_ability_str == "addition_subtraction"
): # plus_minus combination answer
answer_json["answer_type"] = "arithmetic"
original_numbers = metadata[i]["original_numbers"]
sign_remap = {0: 0, 1: 1, 2: -1}
predicted_signs = [
sign_remap[it] for it in best_signs_for_numbers[i].detach().cpu().numpy()
]
result = sum(
[sign * number for sign, number in zip(predicted_signs, original_numbers)]
)
predicted_answer = str(result)
offsets = metadata[i]["passage_token_offsets"]
number_indices = metadata[i]["number_indices"]
number_positions = [offsets[index] for index in number_indices]
answer_json["numbers"] = []
for offset, value, sign in zip(
number_positions, original_numbers, predicted_signs
):
answer_json["numbers"].append(
{"span": offset, "value": value, "sign": sign}
)
if number_indices[-1] == -1:
# There is a dummy 0 number at position -1 added in some cases; we are
# removing that here.
answer_json["numbers"].pop()
answer_json["value"] = result
elif predicted_ability_str == "counting":
answer_json["answer_type"] = "count"
predicted_count = best_count_number[i].detach().cpu().numpy()
predicted_answer = str(predicted_count)
answer_json["count"] = predicted_count
else:
raise ValueError(f"Unsupported answer ability: {predicted_ability_str}")
output_dict["question_id"].append(metadata[i]["question_id"])
output_dict["answer"].append(answer_json)
answer_annotations = metadata[i].get("answer_annotations", [])
if answer_annotations:
self._drop_metrics(predicted_answer, answer_annotations)
# This is used for the demo.
output_dict["passage_question_attention"] = passage_question_attention
output_dict["question_tokens"] = question_tokens
output_dict["passage_tokens"] = passage_tokens
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
exact_match, f1_score = self._drop_metrics.get_metric(reset)
return {"em": exact_match, "f1": f1_score}
|
allennlp-reading-comprehension-master
|
allennlp_rc/models/naqanet.py
|
import logging
from typing import Any, Dict, List, Optional
import numpy as np
import torch
from allennlp.common.util import sanitize_wordpiece
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.token_embedders import PretrainedTransformerEmbedder
from allennlp.nn.util import get_token_ids_from_text_field_tensors
from torch import nn
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.nn import util
from allennlp.training.metrics import BooleanAccuracy, CategoricalAccuracy
from torch.nn.functional import cross_entropy
from allennlp_rc.models.util import get_best_span
from allennlp_rc.eval import SquadEmAndF1
logger = logging.getLogger(__name__)
@Model.register("transformer_qa")
class TransformerQA(Model):
"""
This class implements a reading comprehension model patterned after the proposed model in
https://arxiv.org/abs/1810.04805 (Devlin et al), with improvements borrowed from the SQuAD model in the
transformers project.
It predicts start tokens and end tokens with a linear layer on top of word piece embeddings.
Note that the metrics that the model produces are calculated on a per-instance basis only. Since there could
be more than one instance per question, these metrics are not the official numbers on the SQuAD task. To get
official numbers, run the script in scripts/transformer_qa_eval.py.
Parameters
----------
vocab : ``Vocabulary``
transformer_model_name : ``str``, optional (default=``bert-base-cased``)
This model chooses the embedder according to this setting. You probably want to make sure this is set to
the same thing as the reader.
"""
def __init__(
self, vocab: Vocabulary, transformer_model_name: str = "bert-base-cased", **kwargs
) -> None:
super().__init__(vocab, **kwargs)
self._text_field_embedder = BasicTextFieldEmbedder(
{"tokens": PretrainedTransformerEmbedder(transformer_model_name)}
)
self._linear_layer = nn.Linear(self._text_field_embedder.get_output_dim(), 2)
self._span_start_accuracy = CategoricalAccuracy()
self._span_end_accuracy = CategoricalAccuracy()
self._span_accuracy = BooleanAccuracy()
self._per_instance_metrics = SquadEmAndF1()
def forward( # type: ignore
self,
question_with_context: Dict[str, Dict[str, torch.LongTensor]],
context_span: torch.IntTensor,
answer_span: Optional[torch.IntTensor] = None,
metadata: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
"""
Parameters
----------
question_with_context : Dict[str, torch.LongTensor]
From a ``TextField``. The model assumes that this text field contains the context followed by the
question. It further assumes that the tokens have type ids set such that any token that can be part of
the answer (i.e., tokens from the context) has type id 0, and any other token (including [CLS] and
[SEP]) has type id 1.
context_span : ``torch.IntTensor``
From a ``SpanField``. This marks the span of word pieces in ``question`` from which answers can come.
answer_span : ``torch.IntTensor``, optional
From a ``SpanField``. This is the thing we are trying to predict - the span of text that marks the
answer. If given, we compute a loss that gets included in the output directory.
metadata : ``List[Dict[str, Any]]``, optional
If present, this should contain the question id, and the original texts of context, question, tokenized
version of both, and a list of possible answers. The length of the ``metadata`` list should be the
batch size, and each dictionary should have the keys ``id``, ``question``, ``context``,
``question_tokens``, ``context_tokens``, and ``answers``.
Returns
-------
An output dictionary consisting of:
span_start_logits : torch.FloatTensor
A tensor of shape ``(batch_size, passage_length)`` representing unnormalized log
probabilities of the span start position.
span_start_probs : torch.FloatTensor
The result of ``softmax(span_start_logits)``.
span_end_logits : torch.FloatTensor
A tensor of shape ``(batch_size, passage_length)`` representing unnormalized log
probabilities of the span end position (inclusive).
span_end_probs : torch.FloatTensor
The result of ``softmax(span_end_logits)``.
best_span : torch.IntTensor
The result of a constrained inference over ``span_start_logits`` and
``span_end_logits`` to find the most probable span. Shape is ``(batch_size, 2)``
and each offset is a token index.
best_span_scores : torch.FloatTensor
The score for each of the best spans.
loss : torch.FloatTensor, optional
A scalar loss to be optimised.
best_span_str : List[str]
If sufficient metadata was provided for the instances in the batch, we also return the
string from the original passage that the model thinks is the best answer to the
question.
"""
embedded_question = self._text_field_embedder(question_with_context)
logits = self._linear_layer(embedded_question)
span_start_logits, span_end_logits = logits.split(1, dim=-1)
span_start_logits = span_start_logits.squeeze(-1)
span_end_logits = span_end_logits.squeeze(-1)
possible_answer_mask = torch.zeros_like(
get_token_ids_from_text_field_tensors(question_with_context), dtype=torch.bool
)
for i, (start, end) in enumerate(context_span):
possible_answer_mask[i, start : end + 1] = True
span_start_logits = util.replace_masked_values(
span_start_logits, possible_answer_mask, -1e32
)
span_end_logits = util.replace_masked_values(span_end_logits, possible_answer_mask, -1e32)
span_start_probs = torch.nn.functional.softmax(span_start_logits, dim=-1)
span_end_probs = torch.nn.functional.softmax(span_end_logits, dim=-1)
best_spans = get_best_span(span_start_logits, span_end_logits)
best_span_scores = torch.gather(
span_start_logits, 1, best_spans[:, 0].unsqueeze(1)
) + torch.gather(span_end_logits, 1, best_spans[:, 1].unsqueeze(1))
best_span_scores = best_span_scores.squeeze(1)
output_dict = {
"span_start_logits": span_start_logits,
"span_start_probs": span_start_probs,
"span_end_logits": span_end_logits,
"span_end_probs": span_end_probs,
"best_span": best_spans,
"best_span_scores": best_span_scores,
}
# Compute the loss for training.
if answer_span is not None:
span_start = answer_span[:, 0]
span_end = answer_span[:, 1]
span_mask = span_start != -1
self._span_accuracy(
best_spans, answer_span, span_mask.unsqueeze(-1).expand_as(best_spans)
)
start_loss = cross_entropy(span_start_logits, span_start, ignore_index=-1)
if torch.any(start_loss > 1e9):
logger.critical("Start loss too high (%r)", start_loss)
logger.critical("span_start_logits: %r", span_start_logits)
logger.critical("span_start: %r", span_start)
assert False
end_loss = cross_entropy(span_end_logits, span_end, ignore_index=-1)
if torch.any(end_loss > 1e9):
logger.critical("End loss too high (%r)", end_loss)
logger.critical("span_end_logits: %r", span_end_logits)
logger.critical("span_end: %r", span_end)
assert False
loss = (start_loss + end_loss) / 2
self._span_start_accuracy(span_start_logits, span_start, span_mask)
self._span_end_accuracy(span_end_logits, span_end, span_mask)
output_dict["loss"] = loss
# Compute the EM and F1 on SQuAD and add the tokenized input to the output.
if metadata is not None:
best_spans = best_spans.detach().cpu().numpy()
output_dict["best_span_str"] = []
context_tokens = []
for metadata_entry, best_span in zip(metadata, best_spans):
context_tokens_for_question = metadata_entry["context_tokens"]
context_tokens.append(context_tokens_for_question)
best_span -= 1 + len(metadata_entry["question_tokens"]) + 2
assert np.all(best_span >= 0)
predicted_start, predicted_end = tuple(best_span)
while (
predicted_start >= 0
and context_tokens_for_question[predicted_start].idx is None
):
predicted_start -= 1
if predicted_start < 0:
logger.warning(
f"Could not map the token '{context_tokens_for_question[best_span[0]].text}' at index "
f"'{best_span[0]}' to an offset in the original text."
)
character_start = 0
else:
character_start = context_tokens_for_question[predicted_start].idx
while (
predicted_end < len(context_tokens_for_question)
and context_tokens_for_question[predicted_end].idx is None
):
predicted_end += 1
if predicted_end >= len(context_tokens_for_question):
logger.warning(
f"Could not map the token '{context_tokens_for_question[best_span[1]].text}' at index "
f"'{best_span[1]}' to an offset in the original text."
)
character_end = len(metadata_entry["context"])
else:
end_token = context_tokens_for_question[predicted_end]
character_end = end_token.idx + len(sanitize_wordpiece(end_token.text))
best_span_string = metadata_entry["context"][character_start:character_end]
output_dict["best_span_str"].append(best_span_string)
answers = metadata_entry.get("answers")
if len(answers) > 0:
self._per_instance_metrics(best_span_string, answers)
output_dict["context_tokens"] = context_tokens
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
exact_match, f1_score = self._per_instance_metrics.get_metric(reset)
return {
"start_acc": self._span_start_accuracy.get_metric(reset),
"end_acc": self._span_end_accuracy.get_metric(reset),
"span_acc": self._span_accuracy.get_metric(reset),
"per_instance_em": exact_match,
"per_instance_f1": f1_score,
}
|
allennlp-reading-comprehension-master
|
allennlp_rc/models/transformer_qa.py
|
""" Evaluation script for NarrativeQA dataset. """
import nltk
try:
nltk.data.find("tokenizers/punkt")
except LookupError:
nltk.download("punkt")
nltk.download("wordnet")
import rouge
from nltk.translate.bleu_score import sentence_bleu
from nltk.tokenize import word_tokenize
from nltk.translate.meteor_score import meteor_score
import copy
rouge_l_evaluator = rouge.Rouge(
metrics=["rouge-l"],
max_n=4,
limit_length=True,
length_limit=100,
length_limit_type="words",
apply_avg=True,
apply_best=True,
alpha=0.5,
weight_factor=1.2,
stemming=True,
)
def bleu_1(p, g):
return sentence_bleu(g, p, weights=(1, 0, 0, 0))
def bleu_4(p, g):
return sentence_bleu(g, p, weights=(0, 0, 0, 1))
def meteor(p, g):
return meteor_score(g, p)
def rouge_l(p, g):
return rouge_l_evaluator.get_scores(p, g)
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths, tokenize=False):
scores_for_ground_truths = []
for ground_truth in ground_truths:
if tokenize:
score = metric_fn(word_tokenize(prediction), [word_tokenize(ground_truth)])
else:
score = metric_fn(prediction, [ground_truth])
scores_for_ground_truths.append(score)
if isinstance(score, dict) and "rouge-l" in score:
max_score = copy.deepcopy(score)
max_score["rouge-l"]["f"] = round(
max([score["rouge-l"]["f"] for score in scores_for_ground_truths]), 2
)
max_score["rouge-l"]["p"] = round(
max([score["rouge-l"]["p"] for score in scores_for_ground_truths]), 2
)
max_score["rouge-l"]["r"] = round(
max([score["rouge-l"]["r"] for score in scores_for_ground_truths]), 2
)
return max_score
else:
return round(max(scores_for_ground_truths), 2)
def get_metric_score(prediction, ground_truths):
bleu_1_score = metric_max_over_ground_truths(bleu_1, prediction, ground_truths, tokenize=True)
bleu_4_score = metric_max_over_ground_truths(bleu_4, prediction, ground_truths, tokenize=True)
meteor_score = metric_max_over_ground_truths(meteor, prediction, ground_truths, tokenize=False)
rouge_l_score = metric_max_over_ground_truths(
rouge_l, prediction, ground_truths, tokenize=False
)
return (
bleu_1_score,
bleu_4_score,
meteor_score,
rouge_l_score["rouge-l"]["f"],
rouge_l_score["rouge-l"]["p"],
rouge_l_score["rouge-l"]["r"],
)
|
allennlp-reading-comprehension-master
|
allennlp_rc/eval/narrativeqa_eval.py
|
""" Official evaluation script for v1.1 of the SQuAD dataset. """
from __future__ import print_function
from collections import Counter
import string
import re
import argparse
import json
import sys
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return normalize_answer(prediction) == normalize_answer(ground_truth)
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(dataset, predictions):
f1 = exact_match = total = 0
for article in dataset:
for paragraph in article["paragraphs"]:
for qa in paragraph["qas"]:
total += 1
if qa["id"] not in predictions:
message = "Unanswered question " + qa["id"] + " will receive score 0."
print(message, file=sys.stderr)
continue
ground_truths = list(map(lambda x: x["text"], qa["answers"]))
prediction = predictions[qa["id"]]
exact_match += metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths
)
f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {"exact_match": exact_match, "f1": f1}
if __name__ == "__main__":
expected_version = "1.1"
parser = argparse.ArgumentParser(description="Evaluation for SQuAD " + expected_version)
parser.add_argument("dataset_file", help="Dataset file")
parser.add_argument("prediction_file", help="Prediction File")
args = parser.parse_args()
with open(args.dataset_file) as dataset_file:
dataset_json = json.load(dataset_file)
if dataset_json["version"] != expected_version:
print(
"Evaluation expects v-"
+ expected_version
+ ", but got dataset with v-"
+ dataset_json["version"],
file=sys.stderr,
)
dataset = dataset_json["data"]
with open(args.prediction_file) as prediction_file:
predictions = json.load(prediction_file)
print(json.dumps(evaluate(dataset, predictions)))
|
allennlp-reading-comprehension-master
|
allennlp_rc/eval/squad_eval.py
|
from allennlp_rc.eval.drop_em_and_f1 import DropEmAndF1
from allennlp_rc.eval.squad_em_and_f1 import SquadEmAndF1
|
allennlp-reading-comprehension-master
|
allennlp_rc/eval/__init__.py
|
"""Official evaluation script for ORB.
Usage:
python evaluation_script.py
--dataset_file <file_path>
--prediction_file <file_path>
--metrics_output_file <file_path>
"""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
from allennlp_rc.eval.orb_utils import evaluate_dataset
def read_predictions(json_file):
return json.load(open(json_file))
def read_labels(jsonl_file):
qid_answer_map = {}
with open(jsonl_file) as f:
for line in f:
data = json.loads(line)
for qa_pair in data["qa_pairs"]:
qid_answer_map[str(qa_pair["qid"])] = {
"dataset": qa_pair["dataset"],
"answers": qa_pair["answers"],
}
return qid_answer_map
def compute_averages(all_metrics):
for dataset, dataset_metric in all_metrics.items():
if len(dataset_metric) > 0:
total = dataset_metric["total"]
for metric, value in dataset_metric.items():
if metric != "total":
dataset_metric[metric] = value / float(total)
return all_metrics
def evaluate(answers, predictions):
metrics = {
"drop": {},
"squad1": {},
"squad2": {},
"newsqa": {},
"quoref": {},
"ropes": {},
"narrativeqa": {},
"duorc": {},
"drop_syn": {},
"squad1_syn": {},
"quoref_syn": {},
"newsqa_syn": {},
"ropes_syn": {},
"duorc_syn": {},
}
for qid, ground_truth_dict in answers.items():
if qid in predictions:
predicted_answer = predictions[qid]
dataset_name = ground_truth_dict["dataset"].lower()
try:
metrics = evaluate_dataset(
dataset_name, predicted_answer, ground_truth_dict["answers"], metrics
)
except KeyError:
print("Incorrect dataset name at : {0}.".format(dataset_name))
exit(0)
except Exception as err:
print(str(err))
metrics = compute_averages(metrics)
return metrics
def process_for_output(metrics):
processed_metrics = {}
average_f1 = 0
f1_instance_count = 0
for dataset, metric_dict in metrics.items():
for metric_name, metric_value in metric_dict.items():
if metric_name != "total":
processed_metrics["{0}_{1}".format(dataset, metric_name)] = round(metric_value, 4)
if metric_name in ["f1", "rouge_f"] and dataset != "ropes":
average_f1 += metric_value
f1_instance_count += 1
elif metric_name == "exact_match" and dataset == "ropes":
average_f1 += metric_value
f1_instance_count += 1
processed_metrics["average_f1"] = round(average_f1 / f1_instance_count, 4)
return processed_metrics
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Evaluation for ORB")
parser.add_argument("--dataset_file", type=str, help="Dataset File")
parser.add_argument("--prediction_file", type=str, help="Prediction File")
parser.add_argument("--metrics_output_file", type=str, help="Metrics File")
args = parser.parse_args()
answers = read_labels(args.dataset_file)
predictions = read_predictions(args.prediction_file)
metrics = evaluate(answers, predictions)
processed_metrics = process_for_output(metrics)
json.dump(processed_metrics, open(args.metrics_output_file, "w"), indent=2)
|
allennlp-reading-comprehension-master
|
allennlp_rc/eval/orb_eval.py
|
from typing import List, Tuple
from allennlp_rc.eval.squad_eval import exact_match_score, f1_score
from allennlp_rc.eval.drop_eval import get_metrics as drop_metrics
from allennlp_rc.eval.narrativeqa_eval import get_metric_score as get_metric_narrativeqa
from allennlp_rc.eval.squad2_eval import get_metric_score as get_metric_squad2
def get_metric_squad(prediction, ground_truths):
em_scores = []
f1_scores = []
for ground_truth in ground_truths:
em = exact_match_score(prediction, ground_truth)
f1 = f1_score(prediction, ground_truth)
em_scores.append(em)
f1_scores.append(f1)
return max(em_scores), max(f1_scores)
def get_metric_drop(predicted: str, ground_truths: List[str]) -> Tuple[float, float]:
em_scores = []
f1_scores = []
for ground_truth in ground_truths:
exact_match, f1 = drop_metrics(predicted, ground_truth)
em_scores.append(exact_match)
f1_scores.append(f1)
return max(em_scores), max(f1_scores)
def update_extractive_metrics(metrics, dataset_name, exact_match, f1):
metrics[dataset_name]["exact_match"] = (
metrics[dataset_name]["exact_match"] + exact_match
if "exact_match" in metrics[dataset_name]
else exact_match
)
metrics[dataset_name]["f1"] = (
metrics[dataset_name]["f1"] + f1 if "f1" in metrics[dataset_name] else f1
)
metrics[dataset_name]["total"] = (
metrics[dataset_name]["total"] + 1 if "total" in metrics[dataset_name] else 1
)
return metrics
def update_abstractive_metrics(
metrics, bleu_1_score, bleu_4_score, meteor_score, rouge_f, rouge_p, rouge_r
):
metrics["narrativeqa"]["bleu_1"] = (
metrics["narrativeqa"]["bleu_1"] + bleu_1_score
if "bleu_1" in metrics["narrativeqa"]
else bleu_1_score
)
metrics["narrativeqa"]["bleu_4"] = (
metrics["narrativeqa"]["bleu_4"] + bleu_4_score
if "bleu_4" in metrics["narrativeqa"]
else bleu_4_score
)
metrics["narrativeqa"]["meteor"] = (
metrics["narrativeqa"]["meteor"] + meteor_score
if "meteor" in metrics["narrativeqa"]
else meteor_score
)
metrics["narrativeqa"]["rouge_f"] = (
metrics["narrativeqa"]["rouge_f"] + rouge_f
if "rouge_f" in metrics["narrativeqa"]
else rouge_f
)
metrics["narrativeqa"]["rouge_p"] = (
metrics["narrativeqa"]["rouge_p"] + rouge_p
if "rouge_p" in metrics["narrativeqa"]
else rouge_p
)
metrics["narrativeqa"]["rouge_r"] = (
metrics["narrativeqa"]["rouge_r"] + rouge_r
if "rouge_r" in metrics["narrativeqa"]
else rouge_r
)
metrics["narrativeqa"]["total"] = (
metrics["narrativeqa"]["total"] + 1 if "total" in metrics["narrativeqa"] else 1
)
return metrics
def evaluate_dataset(dataset_name, prediction, ground_truths, metrics):
prediction = prediction[0] if isinstance(prediction, list) else prediction
if dataset_name in [
"squad1",
"ropes",
"newsqa",
"duorc",
"squad1_syn",
"ropes_syn",
"newsqa_syn",
"duorc_syn",
]:
exact_match, f1 = get_metric_squad(prediction, [truth[0] for truth in ground_truths])
metrics = update_extractive_metrics(metrics, dataset_name, exact_match, f1)
elif dataset_name in ["squad2"]:
exact_match, f1 = get_metric_squad2(prediction, [truth[0] for truth in ground_truths])
metrics = update_extractive_metrics(metrics, dataset_name, exact_match, f1)
elif dataset_name in ["drop", "quoref", "drop_syn", "quoref_syn"]:
exact_match, f1 = get_metric_drop(prediction, [truth[0] for truth in ground_truths])
metrics = update_extractive_metrics(metrics, dataset_name, exact_match, f1)
elif dataset_name == "narrativeqa":
prediction = prediction[0] if isinstance(prediction, list) else prediction
ground_truths = [truth[0] for truth in ground_truths]
bleu1, bleu4, meteor, rouge_f, rouge_p, rouge_r = get_metric_narrativeqa(
prediction, ground_truths
)
metrics = update_abstractive_metrics(
metrics, bleu1, bleu4, meteor, rouge_f, rouge_p, rouge_r
)
else:
print("Incorrect dataset name at :{0}".format(dataset_name))
raise ValueError
return metrics
|
allennlp-reading-comprehension-master
|
allennlp_rc/eval/orb_utils.py
|
"""
This evaluation script relies heavily on the one for DROP (``allennlp/tools/drop_eval.py``). We need a separate
script for Quoref only because the data formats are slightly different.
"""
import json
from typing import Dict, Tuple, List, Any, Optional
import argparse
import numpy as np
from allennlp_rc.eval import drop_eval
def _get_answers_from_data(annotations: Dict[str, Any]) -> Dict[str, List[str]]:
"""
If the annotations file is in the same format as the original data files, this method can be used to extract a
dict of query ids and answers.
"""
answers_dict: Dict[str, List[str]] = {}
for article_info in annotations["data"]:
for paragraph_info in article_info["paragraphs"]:
for qa_pair in paragraph_info["qas"]:
query_id = qa_pair["id"]
candidate_answers = [answer["text"] for answer in qa_pair["answers"]]
answers_dict[query_id] = candidate_answers
return answers_dict
def evaluate_json(
annotations: Dict[str, Any], predicted_answers: Dict[str, Any]
) -> Tuple[float, float]:
"""
Takes gold annotations and predicted answers and evaluates the predictions for each question
in the gold annotations. Both JSON dictionaries must have query_id keys, which are used to
match predictions to gold annotations.
The ``predicted_answers`` JSON must be a dictionary keyed by query id, where the value is a
list of strings (or just one string) that is the answer.
The ``annotations`` are assumed to have either the format of the dev set in the Quoref data release, or the
same format as the predicted answers file.
"""
instance_exact_match = []
instance_f1 = []
if "data" in annotations:
# We're looking at annotations in the original data format. Let's extract the answers.
annotated_answers = _get_answers_from_data(annotations)
else:
annotated_answers = annotations
for query_id, candidate_answers in annotated_answers.items():
max_em_score = 0.0
max_f1_score = 0.0
if query_id in predicted_answers:
predicted = predicted_answers[query_id]
gold_answer = tuple(candidate_answers)
em_score, f1_score = drop_eval.get_metrics(predicted, gold_answer)
if gold_answer[0].strip() != "":
max_em_score = max(max_em_score, em_score)
max_f1_score = max(max_f1_score, f1_score)
else:
print("Missing prediction for question: {}".format(query_id))
max_em_score = 0.0
max_f1_score = 0.0
instance_exact_match.append(max_em_score)
instance_f1.append(max_f1_score)
global_em = np.mean(instance_exact_match)
global_f1 = np.mean(instance_f1)
print("Exact-match accuracy {0:.2f}".format(global_em * 100))
print("F1 score {0:.2f}".format(global_f1 * 100))
print("{0:.2f} & {1:.2f}".format(global_em * 100, global_f1 * 100))
return global_em, global_f1
def evaluate_prediction_file(
prediction_path: str, gold_path: str, output_path: Optional[str] = None
) -> Tuple[float, float]:
"""
Takes a prediction file and a gold file and evaluates the predictions for each question in the gold file. Both
files must be json formatted and must have query_id keys, which are used to match predictions to gold
annotations. Writes a json with global_em and global_f1 metrics to file at the specified output
path, unless None is passed as output path.
"""
predicted_answers = json.load(open(prediction_path, encoding="utf-8"))
annotations = json.load(open(gold_path, encoding="utf-8"))
global_em, global_f1 = evaluate_json(annotations, predicted_answers)
# Output predictions to file if an output path is given
if output_path is not None:
output_dict = {"global_em": global_em, "global_f1": global_f1}
with open(output_path, "w", encoding="utf8") as outfile:
json.dump(output_dict, outfile)
return (global_em, global_f1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Evaluate Quoref predictions")
parser.add_argument(
"--gold_path",
type=str,
required=False,
default="quoref-test-v0.1.json",
help="location of the gold file",
)
parser.add_argument(
"--prediction_path",
type=str,
required=False,
default="sample_predictions.json",
help="location of the prediction file",
)
parser.add_argument(
"--output_path",
type=str,
required=False,
default=None,
help="location of the output metrics file",
)
args = parser.parse_args()
evaluate_prediction_file(args.prediction_path, args.gold_path, args.output_path)
|
allennlp-reading-comprehension-master
|
allennlp_rc/eval/quoref_eval.py
|
"""Official evaluation script for SQuAD version 2.0.
"""
import collections
import re
import string
def make_qid_to_has_ans(dataset):
qid_to_has_ans = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
qid_to_has_ans[qa["id"]] = bool(qa["answers"])
return qid_to_has_ans
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
return re.sub(regex, " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_tokens(s):
if not s:
return []
return normalize_answer(s).split()
def compute_exact(a_gold, a_pred):
return int(normalize_answer(a_gold) == normalize_answer(a_pred))
def compute_f1(a_gold, a_pred):
gold_toks = get_tokens(a_gold)
pred_toks = get_tokens(a_pred)
common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def get_metric_score(prediction, gold_answers):
exact_scores = max(compute_exact(a, prediction) for a in gold_answers)
f1_scores = max(compute_f1(a, prediction) for a in gold_answers)
return exact_scores, f1_scores
|
allennlp-reading-comprehension-master
|
allennlp_rc/eval/squad2_eval.py
|
from typing import Tuple
from allennlp.training.metrics.metric import Metric
from overrides import overrides
from allennlp_rc.eval import squad_eval
@Metric.register("squad")
class SquadEmAndF1(Metric):
"""
This :class:`Metric` takes the best span string computed by a model, along with the answer
strings labeled in the data, and computed exact match and F1 score using the official SQuAD
evaluation script.
"""
def __init__(self) -> None:
self._total_em = 0.0
self._total_f1 = 0.0
self._count = 0
@overrides
def __call__(self, best_span_string, answer_strings):
"""
Parameters
----------
value : ``float``
The value to average.
"""
exact_match = squad_eval.metric_max_over_ground_truths(
squad_eval.exact_match_score, best_span_string, answer_strings
)
f1_score = squad_eval.metric_max_over_ground_truths(
squad_eval.f1_score, best_span_string, answer_strings
)
self._total_em += exact_match
self._total_f1 += f1_score
self._count += 1
@overrides
def get_metric(self, reset: bool = False) -> Tuple[float, float]:
"""
Returns
-------
Average exact match and F1 score (in that order) as computed by the official SQuAD script
over all inputs.
"""
exact_match = self._total_em / self._count if self._count > 0 else 0
f1_score = self._total_f1 / self._count if self._count > 0 else 0
if reset:
self.reset()
return exact_match, f1_score
@overrides
def reset(self):
self._total_em = 0.0
self._total_f1 = 0.0
self._count = 0
def __str__(self):
return f"SquadEmAndF1(em={self._total_em}, f1={self._total_f1})"
|
allennlp-reading-comprehension-master
|
allennlp_rc/eval/squad_em_and_f1.py
|
#!/usr/bin/python
from collections import defaultdict
from typing import Any, Dict, List, Set, Tuple, Union, Optional
import json
import argparse
import string
import re
import numpy as np
from scipy.optimize import linear_sum_assignment
# From here through _normalize_answer was originally copied from:
# https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/
# Then cleaned up and modified a bit.
def _remove_articles(text: str) -> str:
regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
return re.sub(regex, " ", text)
def _white_space_fix(text: str) -> str:
return " ".join(text.split())
EXCLUDE = set(string.punctuation)
def _remove_punc(text: str) -> str:
if not _is_number(text):
return "".join(ch for ch in text if ch not in EXCLUDE)
else:
return text
def _lower(text: str) -> str:
return text.lower()
def _tokenize(text: str) -> List[str]:
return re.split(" |-", text)
def _normalize_answer(text: str) -> str:
"""Lower text and remove punctuation, articles and extra whitespace."""
parts = [
_white_space_fix(_remove_articles(_normalize_number(_remove_punc(_lower(token)))))
for token in _tokenize(text)
]
parts = [part for part in parts if part.strip()]
normalized = " ".join(parts).strip()
return normalized
def _is_number(text: str) -> bool:
try:
float(text)
return True
except ValueError:
return False
def _normalize_number(text: str) -> str:
if _is_number(text):
return str(float(text))
else:
return text
def _answer_to_bags(
answer: Union[str, List[str], Tuple[str, ...]]
) -> Tuple[List[str], List[Set[str]]]:
if isinstance(answer, (list, tuple)):
raw_spans = answer
else:
raw_spans = [answer]
normalized_spans: List[str] = []
token_bags = []
for raw_span in raw_spans:
normalized_span = _normalize_answer(raw_span)
normalized_spans.append(normalized_span)
token_bags.append(set(normalized_span.split()))
return normalized_spans, token_bags
def _align_bags(predicted: List[Set[str]], gold: List[Set[str]]) -> List[float]:
"""
Takes gold and predicted answer sets and first finds the optimal 1-1 alignment
between them and gets maximum metric values over all the answers.
"""
scores = np.zeros([len(gold), len(predicted)])
for gold_index, gold_item in enumerate(gold):
for pred_index, pred_item in enumerate(predicted):
if _match_numbers_if_present(gold_item, pred_item):
scores[gold_index, pred_index] = _compute_f1(pred_item, gold_item)
row_ind, col_ind = linear_sum_assignment(-scores)
max_scores = np.zeros([max(len(gold), len(predicted))])
for row, column in zip(row_ind, col_ind):
max_scores[row] = max(max_scores[row], scores[row, column])
return max_scores
def _compute_f1(predicted_bag: Set[str], gold_bag: Set[str]) -> float:
intersection = len(gold_bag.intersection(predicted_bag))
if not predicted_bag:
precision = 1.0
else:
precision = intersection / float(len(predicted_bag))
if not gold_bag:
recall = 1.0
else:
recall = intersection / float(len(gold_bag))
f1 = (
(2 * precision * recall) / (precision + recall)
if not (precision == 0.0 and recall == 0.0)
else 0.0
)
return f1
def _match_numbers_if_present(gold_bag: Set[str], predicted_bag: Set[str]) -> bool:
gold_numbers = set()
predicted_numbers = set()
for word in gold_bag:
if _is_number(word):
gold_numbers.add(word)
for word in predicted_bag:
if _is_number(word):
predicted_numbers.add(word)
if (not gold_numbers) or gold_numbers.intersection(predicted_numbers):
return True
return False
def get_metrics(
predicted: Union[str, List[str], Tuple[str, ...]], gold: Union[str, List[str], Tuple[str, ...]]
) -> Tuple[float, float]:
"""
Takes a predicted answer and a gold answer (that are both either a string or a list of
strings), and returns exact match and the DROP F1 metric for the prediction. If you are
writing a script for evaluating objects in memory (say, the output of predictions during
validation, or while training), this is the function you want to call, after using
:func:`answer_json_to_strings` when reading the gold answer from the released data file.
"""
predicted_bags = _answer_to_bags(predicted)
gold_bags = _answer_to_bags(gold)
if set(predicted_bags[0]) == set(gold_bags[0]) and len(predicted_bags[0]) == len(gold_bags[0]):
exact_match = 1.0
else:
exact_match = 0.0
f1_per_bag = _align_bags(predicted_bags[1], gold_bags[1])
f1 = np.mean(f1_per_bag)
f1 = round(f1, 2)
return exact_match, f1
def answer_json_to_strings(answer: Dict[str, Any]) -> Tuple[Tuple[str, ...], str]:
"""
Takes an answer JSON blob from the DROP data release and converts it into strings used for
evaluation.
"""
if "number" in answer and answer["number"]:
return tuple([str(answer["number"])]), "number"
elif "spans" in answer and answer["spans"]:
return tuple(answer["spans"]), "span" if len(answer["spans"]) == 1 else "spans"
elif "date" in answer:
return (
tuple(
[
"{0} {1} {2}".format(
answer["date"]["day"], answer["date"]["month"], answer["date"]["year"]
)
]
),
"date",
)
else:
raise ValueError(
f"Answer type not found, should be one of number, spans or date at: {json.dumps(answer)}"
)
def evaluate_json(
annotations: Dict[str, Any], predicted_answers: Dict[str, Any]
) -> Tuple[float, float]:
"""
Takes gold annotations and predicted answers and evaluates the predictions for each question
in the gold annotations. Both JSON dictionaries must have query_id keys, which are used to
match predictions to gold annotations (note that these are somewhat deep in the JSON for the
gold annotations, but must be top-level keys in the predicted answers).
The ``annotations`` are assumed to have the format of the dev set in the DROP data release.
The ``predicted_answers`` JSON must be a dictionary keyed by query id, where the value is a string
(or list of strings) that is the answer.
"""
instance_exact_match = []
instance_f1 = []
# for each type as well
type_to_em: Dict[str, List[float]] = defaultdict(list)
type_to_f1: Dict[str, List[float]] = defaultdict(list)
for _, annotation in annotations.items():
for qa_pair in annotation["qa_pairs"]:
query_id = qa_pair["query_id"]
max_em_score = 0.0
max_f1_score = 0.0
max_type = None
if query_id in predicted_answers:
predicted = predicted_answers[query_id]
candidate_answers = [qa_pair["answer"]]
if "validated_answers" in qa_pair and qa_pair["validated_answers"]:
candidate_answers += qa_pair["validated_answers"]
for answer in candidate_answers:
gold_answer, gold_type = answer_json_to_strings(answer)
em_score, f1_score = get_metrics(predicted, gold_answer)
if gold_answer[0].strip() != "":
max_em_score = max(max_em_score, em_score)
max_f1_score = max(max_f1_score, f1_score)
if max_em_score == em_score and max_f1_score == f1_score:
max_type = gold_type
else:
print("Missing prediction for question: {}".format(query_id))
if qa_pair and qa_pair["answer"]:
max_type = answer_json_to_strings(qa_pair["answer"])[1]
else:
max_type = "number"
max_em_score = 0.0
max_f1_score = 0.0
instance_exact_match.append(max_em_score)
instance_f1.append(max_f1_score)
type_to_em[max_type].append(max_em_score)
type_to_f1[max_type].append(max_f1_score)
global_em = np.mean(instance_exact_match)
global_f1 = np.mean(instance_f1)
print("Exact-match accuracy {0:.2f}".format(global_em * 100))
print("F1 score {0:.2f}".format(global_f1 * 100))
print("{0:.2f} & {1:.2f}".format(global_em * 100, global_f1 * 100))
print("----")
total = np.sum([len(v) for v in type_to_em.values()])
for typ in sorted(type_to_em.keys()):
print(
"{0}: {1} ({2:.2f}%)".format(
typ, len(type_to_em[typ]), 100.0 * len(type_to_em[typ]) / total
)
)
print(" Exact-match accuracy {0:.3f}".format(100.0 * np.mean(type_to_em[typ])))
print(" F1 score {0:.3f}".format(100.0 * np.mean(type_to_f1[typ])))
return global_em, global_f1
def evaluate_prediction_file(
prediction_path: str, gold_path: str, output_path: Optional[str] = None
) -> Tuple[float, float]:
"""
Takes a prediction file and a gold file and evaluates the predictions for each question in the
gold file. Both files must be json formatted and must have query_id keys, which are used to
match predictions to gold annotations. The gold file is assumed to have the format of the dev
set in the DROP data release. The prediction file must be a JSON dictionary keyed by query id,
where the value is either a JSON dictionary with an "answer" key, or just a string (or list of
strings) that is the answer. Writes a json with global_em and global_f1 metrics to file at
the specified output path, unless None is passed as output path.
"""
predicted_answers = json.load(open(prediction_path, encoding="utf-8"))
annotations = json.load(open(gold_path, encoding="utf-8"))
global_em, global_f1 = evaluate_json(annotations, predicted_answers)
# Output predictions to file if an output path is given
if output_path is not None:
output_dict = {"global_em": global_em, "global_f1": global_f1}
with open(output_path, "w", encoding="utf8") as outfile:
json.dump(output_dict, outfile)
return (global_em, global_f1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="evaluate on drop dataset")
parser.add_argument(
"--gold_path",
type=str,
required=False,
default="drop_dataset_test.gold.json",
help="location of the gold file",
)
parser.add_argument(
"--prediction_path",
type=str,
required=False,
default="sample_predictions.json",
help="location of the prediction file",
)
parser.add_argument(
"--output_path",
type=str,
required=False,
default=None,
help="location of the output metrics file",
)
args = parser.parse_args()
evaluate_prediction_file(args.prediction_path, args.gold_path, args.output_path)
|
allennlp-reading-comprehension-master
|
allennlp_rc/eval/drop_eval.py
|
from typing import Tuple, List, Union
from allennlp.training.metrics.metric import Metric
from overrides import overrides
from allennlp_rc.eval.drop_eval import get_metrics as drop_em_and_f1, answer_json_to_strings
from allennlp_rc.eval.squad_eval import metric_max_over_ground_truths
@Metric.register("drop")
class DropEmAndF1(Metric):
"""
This :class:`Metric` takes the best span string computed by a model, along with the answer
strings labeled in the data, and computes exact match and F1 score using the official DROP
evaluator (which has special handling for numbers and for questions with multiple answer spans,
among other things).
"""
def __init__(self) -> None:
self._total_em = 0.0
self._total_f1 = 0.0
self._count = 0
@overrides
def __call__(self, prediction: Union[str, List], ground_truths: List): # type: ignore
"""
Parameters
----------
prediction: ``Union[str, List]``
The predicted answer from the model evaluated. This could be a string, or a list of string
when multiple spans are predicted as answer.
ground_truths: ``List``
All the ground truth answer annotations.
"""
# If you wanted to split this out by answer type, you could look at [1] here and group by
# that, instead of only keeping [0].
ground_truth_answer_strings = [
answer_json_to_strings(annotation)[0] for annotation in ground_truths
]
exact_match, f1_score = metric_max_over_ground_truths(
drop_em_and_f1, prediction, ground_truth_answer_strings
)
self._total_em += exact_match
self._total_f1 += f1_score
self._count += 1
@overrides
def get_metric(self, reset: bool = False) -> Tuple[float, float]:
"""
Returns
-------
Average exact match and F1 score (in that order) as computed by the official DROP script
over all inputs.
"""
exact_match = self._total_em / self._count if self._count > 0 else 0
f1_score = self._total_f1 / self._count if self._count > 0 else 0
if reset:
self.reset()
return exact_match, f1_score
@overrides
def reset(self):
self._total_em = 0.0
self._total_f1 = 0.0
self._count = 0
def __str__(self):
return f"DropEmAndF1(em={self._total_em}, f1={self._total_f1})"
|
allennlp-reading-comprehension-master
|
allennlp_rc/eval/drop_em_and_f1.py
|
import pathlib
PROJECT_ROOT = (pathlib.Path(__file__).parent / "..").resolve() # pylint: disable=no-member
TESTS_ROOT = PROJECT_ROOT / "tests"
FIXTURES_ROOT = PROJECT_ROOT / "test_fixtures"
|
allennlp-reading-comprehension-master
|
tests/__init__.py
|
from allennlp_rc import version
class TestVersion:
def test_version_exists(self):
assert version.VERSION.startswith(version.VERSION_SHORT)
|
allennlp-reading-comprehension-master
|
tests/version_test.py
|
import pytest
from allennlp.common import Params
from allennlp.common.util import ensure_list
from allennlp_rc.dataset_readers import TriviaQaReader
from tests import FIXTURES_ROOT
class TestTriviaQaReader:
@pytest.mark.parametrize("lazy", (True, False))
def test_read(self, lazy):
params = Params(
{"base_tarball_path": str(FIXTURES_ROOT / "data" / "triviaqa-sample.tgz"), "lazy": lazy}
)
reader = TriviaQaReader.from_params(params)
instances = reader.read("web-train.json")
instances = ensure_list(instances)
assert len(instances) == 3
assert [t.text for t in instances[0].fields["question"].tokens[:3]] == [
"Which",
"American",
"-",
]
assert [t.text for t in instances[0].fields["passage"].tokens[:3]] == [
"The",
"Nobel",
"Prize",
]
url = "http://www.nobelprize.org/nobel_prizes/literature/laureates/1930/"
assert [t.text for t in instances[0].fields["passage"].tokens[-3:]] == ["<", url, ">"]
assert instances[0].fields["span_start"].sequence_index == 12
assert instances[0].fields["span_end"].sequence_index == 13
assert [t.text for t in instances[1].fields["question"].tokens[:3]] == [
"Which",
"American",
"-",
]
assert [t.text for t in instances[1].fields["passage"].tokens[:3]] == ["Why", "Do", "n’t"]
assert [t.text for t in instances[1].fields["passage"].tokens[-3:]] == [
"adults",
",",
"and",
]
assert instances[1].fields["span_start"].sequence_index == 38
assert instances[1].fields["span_end"].sequence_index == 39
assert [t.text for t in instances[2].fields["question"].tokens[:3]] == [
"Where",
"in",
"England",
]
assert [t.text for t in instances[2].fields["passage"].tokens[:3]] == ["Judi", "Dench", "-"]
assert [t.text for t in instances[2].fields["passage"].tokens[-3:]] == [")", "(", "special"]
assert instances[2].fields["span_start"].sequence_index == 16
assert instances[2].fields["span_end"].sequence_index == 16
|
allennlp-reading-comprehension-master
|
tests/dataset_readers/triviaqa_test.py
|
import pytest
from allennlp.common import Params
from allennlp.common.util import ensure_list
from allennlp_rc.dataset_readers import TransformerSquadReader
from tests import FIXTURES_ROOT
class TestTransformerSquadReader:
def test_read_from_file(self):
reader = TransformerSquadReader()
instances = ensure_list(reader.read(FIXTURES_ROOT / "data" / "squad.json"))
assert len(instances) == 5
token_text = [t.text for t in instances[0].fields["question_with_context"].tokens]
token_type_ids = [t.type_id for t in instances[0].fields["question_with_context"].tokens]
assert token_text[:3] == ["[CLS]", "To", "whom"]
assert token_type_ids[:3] == [1, 1, 1]
assert token_text[-3:] == ["Mary", ".", "[SEP]"]
assert token_type_ids[-3:] == [0, 0, 1]
assert token_text[instances[0].fields["context_span"].span_start] == "Architectural"
assert token_type_ids[instances[0].fields["context_span"].span_start] == 0
assert token_text[instances[0].fields["context_span"].span_end + 1] == "[SEP]"
assert token_type_ids[instances[0].fields["context_span"].span_end + 1] == 1
assert token_text[instances[0].fields["context_span"].span_end] == "."
assert token_type_ids[instances[0].fields["context_span"].span_end] == 0
assert token_text[
instances[0]
.fields["answer_span"]
.span_start : instances[0]
.fields["answer_span"]
.span_end
+ 1
] == ["Saint", "Bern", "##ade", "##tte", "So", "##ubi", "##rous"]
for instance in instances:
token_type_ids = [t.type_id for t in instance.fields["question_with_context"].tokens]
context_start = instance.fields["context_span"].span_start
context_end = instance.fields["context_span"].span_end + 1
assert all(id != 0 for id in token_type_ids[:context_start])
assert all(id == 0 for id in token_type_ids[context_start:context_end])
assert all(id != 0 for id in token_type_ids[context_end:])
def test_length_limit_works(self):
max_query_length = 10
stride = 20
reader = TransformerSquadReader(
length_limit=100,
max_query_length=max_query_length,
stride=stride,
skip_invalid_examples=False,
)
instances = ensure_list(reader.read(FIXTURES_ROOT / "data" / "squad.json"))
assert len(instances) == 12
# The sequence is "<s> question </s> </s> context".
assert instances[0].fields["context_span"].span_start == 1 + max_query_length + 2
instance_0_text = [t.text for t in instances[0].fields["question_with_context"].tokens]
instance_1_text = [t.text for t in instances[1].fields["question_with_context"].tokens]
assert instance_0_text[: max_query_length + 2] == instance_1_text[: max_query_length + 2]
assert instance_0_text[max_query_length + 3] != instance_1_text[max_query_length + 3]
assert instance_0_text[-1] == "[SEP]"
assert instance_0_text[-2] == "G"
assert instance_1_text[instances[1].fields["context_span"].span_start + stride - 1] == "G"
def test_roberta_bug(self):
"""This reader tokenizes first by spaces, and then re-tokenizes using the wordpiece tokenizer that comes
with the transformer model. For RoBERTa, this produces a bug, since RoBERTa tokens are different depending
on whether they are preceded by a space, and the first round of tokenization cuts off the spaces. The
reader has a workaround for this case. This tests that workaround."""
reader = TransformerSquadReader(transformer_model_name="roberta-base")
instances = ensure_list(reader.read(FIXTURES_ROOT / "data" / "squad.json"))
assert instances
assert len(instances) == 5
token_text = [t.text for t in instances[1].fields["question_with_context"].tokens]
token_ids = [t.text_id for t in instances[1].fields["question_with_context"].tokens]
assert token_text[:3] == ["<s>", "What", "Ġsits"]
assert token_ids[:3] == [0, 2264, 6476]
|
allennlp-reading-comprehension-master
|
tests/dataset_readers/transformer_squad_test.py
|
allennlp-reading-comprehension-master
|
tests/dataset_readers/__init__.py
|
|
import pytest
from allennlp.common import Params
from allennlp.common.util import ensure_list
from allennlp_rc.dataset_readers import DropReader
from tests import FIXTURES_ROOT
class TestDropReader:
@pytest.mark.parametrize("lazy", (True, False))
def test_read_from_file(self, lazy):
reader = DropReader(lazy=lazy)
instances = ensure_list(reader.read(FIXTURES_ROOT / "data" / "drop.json"))
assert len(instances) == 19
instance = instances[0]
assert set(instance.fields.keys()) == {
"question",
"passage",
"number_indices",
"answer_as_passage_spans",
"answer_as_question_spans",
"answer_as_add_sub_expressions",
"answer_as_counts",
"metadata",
}
assert [t.text for t in instance["question"][:3]] == ["What", "happened", "second"]
assert [t.text for t in instance["passage"][:3]] == ["The", "Port", "of"]
assert [t.text for t in instance["passage"][-3:]] == ["cruise", "ships", "."]
# Note that the last number in here is added as padding in case we don't find any numbers
# in a particular passage.
# Just FYI, these are the actual numbers that the indices correspond to:
# [ "1", "25", "2014", "5", "2018", "1", "2", "1", "54", "52", "6", "60", "58", "2010",
# "67", "2010", "1996", "3", "1", "6", "1", "0"]
assert [f.sequence_index for f in instance["number_indices"]] == [
16,
30,
36,
41,
52,
64,
80,
89,
147,
153,
166,
174,
177,
206,
245,
252,
267,
279,
283,
288,
296,
-1,
]
assert len(instance["answer_as_passage_spans"]) == 1
assert instance["answer_as_passage_spans"][0] == (46, 47)
assert len(instance["answer_as_question_spans"]) == 1
assert instance["answer_as_question_spans"][0] == (5, 6)
assert len(instance["answer_as_add_sub_expressions"]) == 1
assert instance["answer_as_add_sub_expressions"][0].labels == [0] * 22
assert len(instance["answer_as_counts"]) == 1
assert instance["answer_as_counts"][0].label == -1
assert set(instance["metadata"].metadata.keys()) == {
"answer_annotations",
"answer_info",
"answer_texts",
"number_indices",
"number_tokens",
"original_numbers",
"original_passage",
"original_question",
"passage_id",
"passage_token_offsets",
"passage_tokens",
"question_id",
"question_token_offsets",
"question_tokens",
}
def test_read_in_bert_format(self):
reader = DropReader(instance_format="bert")
instances = ensure_list(reader.read(FIXTURES_ROOT / "data" / "drop.json"))
assert len(instances) == 19
print(instances[0])
instance = instances[0]
assert set(instance.fields.keys()) == {
"answer_as_passage_spans",
"metadata",
"passage",
"question",
"question_and_passage",
}
assert [t.text for t in instance["question"][:3]] == ["What", "happened", "second"]
assert [t.text for t in instance["passage"][:3]] == ["The", "Port", "of"]
assert [t.text for t in instance["passage"][-3:]] == ["cruise", "ships", "."]
question_length = len(instance["question"])
passage_length = len(instance["passage"])
assert len(instance["question_and_passage"]) == question_length + passage_length + 1
assert len(instance["answer_as_passage_spans"]) == 1
assert instance["answer_as_passage_spans"][0] == (
question_length + 1 + 46,
question_length + 1 + 47,
)
assert set(instance["metadata"].metadata.keys()) == {
"answer_annotations",
"answer_texts",
"original_passage",
"original_question",
"passage_id",
"passage_token_offsets",
"passage_tokens",
"question_id",
"question_tokens",
}
def test_read_in_squad_format(self):
reader = DropReader(instance_format="squad")
instances = ensure_list(reader.read(FIXTURES_ROOT / "data" / "drop.json"))
assert len(instances) == 19
print(instances[0])
instance = instances[0]
assert set(instance.fields.keys()) == {
"question",
"passage",
"span_start",
"span_end",
"metadata",
}
assert [t.text for t in instance["question"][:3]] == ["What", "happened", "second"]
assert [t.text for t in instance["passage"][:3]] == ["The", "Port", "of"]
assert [t.text for t in instance["passage"][-3:]] == ["cruise", "ships", "."]
assert instance["span_start"] == 46
assert instance["span_end"] == 47
assert set(instance["metadata"].metadata.keys()) == {
"answer_annotations",
"answer_texts",
"original_passage",
"original_question",
"passage_id",
"token_offsets",
"passage_tokens",
"question_id",
"question_tokens",
"valid_passage_spans",
}
def test_can_build_from_params(self):
reader = DropReader.from_params(Params({}))
assert reader._tokenizer.__class__.__name__ == "SpacyTokenizer"
assert reader._token_indexers["tokens"].__class__.__name__ == "SingleIdTokenIndexer"
|
allennlp-reading-comprehension-master
|
tests/dataset_readers/drop_test.py
|
import pytest
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.tokenizers import SpacyTokenizer
from allennlp_rc.dataset_readers import util
class TestReadingComprehensionUtil(AllenNlpTestCase):
def test_char_span_to_token_span_handles_easy_cases(self):
# These are _inclusive_ spans, on both sides.
tokenizer = SpacyTokenizer()
passage = (
"On January 7, 2012, Beyoncé gave birth to her first child, a daughter, Blue Ivy "
+ "Carter, at Lenox Hill Hospital in New York. Five months later, she performed for four "
+ "nights at Revel Atlantic City's Ovation Hall to celebrate the resort's opening, her "
+ "first performances since giving birth to Blue Ivy."
)
tokens = tokenizer.tokenize(passage)
offsets = [(t.idx, t.idx + len(t.text)) for t in tokens]
# "January 7, 2012"
token_span = util.char_span_to_token_span(offsets, (3, 18))[0]
assert token_span == (1, 4)
# "Lenox Hill Hospital"
token_span = util.char_span_to_token_span(offsets, (91, 110))[0]
assert token_span == (22, 24)
# "Lenox Hill Hospital in New York."
token_span = util.char_span_to_token_span(offsets, (91, 123))[0]
assert token_span == (22, 28)
def test_char_span_to_token_span_handles_hard_cases(self):
# An earlier version of the code had a hard time when the answer was the last token in the
# passage. This tests that case, on the instance that used to fail.
tokenizer = SpacyTokenizer()
passage = (
"Beyonc\u00e9 is believed to have first started a relationship with Jay Z "
+ 'after a collaboration on "\'03 Bonnie & Clyde", which appeared on his seventh '
+ "album The Blueprint 2: The Gift & The Curse (2002). Beyonc\u00e9 appeared as Jay "
+ "Z's girlfriend in the music video for the song, which would further fuel "
+ "speculation of their relationship. On April 4, 2008, Beyonc\u00e9 and Jay Z were "
+ "married without publicity. As of April 2014, the couple have sold a combined 300 "
+ "million records together. The couple are known for their private relationship, "
+ "although they have appeared to become more relaxed in recent years. Beyonc\u00e9 "
+ 'suffered a miscarriage in 2010 or 2011, describing it as "the saddest thing" '
+ "she had ever endured. She returned to the studio and wrote music in order to cope "
+ "with the loss. In April 2011, Beyonc\u00e9 and Jay Z traveled to Paris in order "
+ "to shoot the album cover for her 4, and unexpectedly became pregnant in Paris."
)
start = 912
end = 912 + len("Paris.")
tokens = tokenizer.tokenize(passage)
offsets = [(t.idx, t.idx + len(t.text)) for t in tokens]
token_span = util.char_span_to_token_span(offsets, (start, end))[0]
assert token_span == (184, 185)
def test_char_span_to_token_span_handles_none_cases(self):
# base case
offsets = [(0, 8), (10, 18), (20, 28), (30, 38), (40, 48)]
token_span, error = util.char_span_to_token_span(offsets, (10, 38))
assert token_span == (1, 3) and not error
# None in the middle
offsets = [(0, 8), (10, 18), None, (30, 38), (40, 48)]
token_span, error = util.char_span_to_token_span(offsets, (10, 38))
assert token_span == (1, 3) and not error
# None before
offsets = [None, (10, 18), (20, 28), (30, 38), (40, 48)]
token_span, error = util.char_span_to_token_span(offsets, (10, 38))
assert token_span == (1, 3) and not error
# None after
offsets = [(0, 8), (10, 18), (20, 28), (30, 38), None]
token_span, error = util.char_span_to_token_span(offsets, (10, 38))
assert token_span == (1, 3) and not error
# None after and we're looking for more characters
offsets = [(0, 8), (10, 18), (20, 28), (30, 38), None]
with pytest.raises(ValueError):
util.char_span_to_token_span(offsets, (10, 48))
# Starting at None
offsets = [None, (10, 18), (20, 28), (30, 38), (40, 48)]
token_span, error = util.char_span_to_token_span(offsets, (8, 38))
assert token_span == (0, 3) and error
def test_char_span_to_token_span_handles_out_of_range(self):
offsets = [(10, 18), (20, 28)]
with pytest.raises(ValueError):
util.char_span_to_token_span(offsets, (1, 3))
with pytest.raises(ValueError):
util.char_span_to_token_span(offsets, (1, 15))
with pytest.raises(ValueError):
util.char_span_to_token_span(offsets, (30, 38))
with pytest.raises(ValueError):
util.char_span_to_token_span(offsets, (25, 38))
|
allennlp-reading-comprehension-master
|
tests/dataset_readers/util_test.py
|
import pytest
from allennlp.common import Params
from allennlp.common.util import ensure_list
from allennlp_rc.dataset_readers import QuACReader
from tests import FIXTURES_ROOT
class TestQuACReader:
@pytest.mark.parametrize("lazy", (True, False))
def test_read(self, lazy):
params = Params({"lazy": lazy, "num_context_answers": 2})
reader = QuACReader.from_params(params)
instances = reader.read(str(FIXTURES_ROOT / "data" / "quac_sample.json"))
instances = ensure_list(instances)
assert instances[0].fields["question"].sequence_length() == 6
assert instances[0].fields["yesno_list"].sequence_length() == 6
assert [t.text for t in instances[0].fields["question"].field_list[0].tokens[:3]] == [
"What",
"was",
"the",
]
assert len(instances) == 2
passage_length = len(instances[0].fields["passage"].tokens)
assert [t.text for t in instances[0].fields["passage"].tokens[:3]] == ["DJ", "Kool", "Herc"]
assert [x.label for x in instances[0].fields["yesno_list"].field_list] == [
"x",
"x",
"y",
"x",
"x",
"x",
]
assert [x.label for x in instances[0].fields["followup_list"].field_list] == [
"y",
"m",
"m",
"n",
"m",
"y",
]
assert (
instances[0].fields["p1_answer_marker"].field_list[0].labels == ["O"] * passage_length
)
# Check the previous answer marking here
prev_1_list = ["O"] * passage_length
prev_2_list = ["O"] * passage_length
q0_span_start = instances[0].fields["span_start"].field_list[0].sequence_index
q0_span_end = instances[0].fields["span_end"].field_list[0].sequence_index
prev_1_list[q0_span_start] = "<{0:d}_{1:s}>".format(1, "start")
prev_1_list[q0_span_end] = "<{0:d}_{1:s}>".format(1, "end")
prev_2_list[q0_span_start] = "<{0:d}_{1:s}>".format(2, "start")
prev_2_list[q0_span_end] = "<{0:d}_{1:s}>".format(2, "end")
for passage_index in range(q0_span_start + 1, q0_span_end):
prev_1_list[passage_index] = "<{0:d}_{1:s}>".format(1, "in")
prev_2_list[passage_index] = "<{0:d}_{1:s}>".format(2, "in")
assert instances[0].fields["p1_answer_marker"].field_list[1].labels == prev_1_list
assert instances[0].fields["p2_answer_marker"].field_list[2].labels == prev_2_list
|
allennlp-reading-comprehension-master
|
tests/dataset_readers/quac_test.py
|
import pytest
from allennlp.common import Params
from allennlp.common.util import ensure_list
from allennlp_rc.dataset_readers import QangarooReader
from tests import FIXTURES_ROOT
class TestQangarooReader:
@pytest.mark.parametrize("lazy", (True, False))
def test_read_from_file(self, lazy):
reader = QangarooReader(lazy=lazy)
instances = ensure_list(reader.read(FIXTURES_ROOT / "data" / "qangaroo.json"))
assert len(instances) == 2
assert [t.text for t in instances[0].fields["candidates"][3]] == ["german", "confederation"]
assert [t.text for t in instances[0].fields["query"]] == ["country", "sms", "braunschweig"]
assert [t.text for t in instances[0].fields["supports"][0][:3]] == [
"The",
"North",
"German",
]
assert [t.text for t in instances[0].fields["answer"]] == ["german", "empire"]
assert instances[0].fields["answer_index"].sequence_index == 4
def test_can_build_from_params(self):
reader = QangarooReader.from_params(Params({}))
assert reader._token_indexers["tokens"].__class__.__name__ == "SingleIdTokenIndexer"
|
allennlp-reading-comprehension-master
|
tests/dataset_readers/qangaroo_test.py
|
import pytest
from allennlp.common import Params
from allennlp.common.util import ensure_list
from allennlp_rc.dataset_readers import SquadReader
from tests import FIXTURES_ROOT
class TestSquadReader:
@pytest.mark.parametrize("lazy", (True, False))
def test_read_from_file(self, lazy):
reader = SquadReader(lazy=lazy)
instances = ensure_list(reader.read(FIXTURES_ROOT / "data" / "squad.json"))
assert len(instances) == 5
assert [t.text for t in instances[0].fields["question"].tokens[:3]] == ["To", "whom", "did"]
assert [t.text for t in instances[0].fields["passage"].tokens[:3]] == [
"Architecturally",
",",
"the",
]
assert [t.text for t in instances[0].fields["passage"].tokens[-3:]] == ["of", "Mary", "."]
assert instances[0].fields["span_start"].sequence_index == 102
assert instances[0].fields["span_end"].sequence_index == 104
assert [t.text for t in instances[1].fields["question"].tokens[:3]] == [
"What",
"sits",
"on",
]
assert [t.text for t in instances[1].fields["passage"].tokens[:3]] == [
"Architecturally",
",",
"the",
]
assert [t.text for t in instances[1].fields["passage"].tokens[-3:]] == ["of", "Mary", "."]
assert instances[1].fields["span_start"].sequence_index == 17
assert instances[1].fields["span_end"].sequence_index == 23
# We're checking this case because I changed the answer text to only have a partial
# annotation for the last token, which happens occasionally in the training data. We're
# making sure we get a reasonable output in that case here.
assert [t.text for t in instances[3].fields["question"].tokens[:3]] == [
"Which",
"individual",
"worked",
]
assert [t.text for t in instances[3].fields["passage"].tokens[:3]] == ["In", "1882", ","]
assert [t.text for t in instances[3].fields["passage"].tokens[-3:]] == [
"Nuclear",
"Astrophysics",
".",
]
span_start = instances[3].fields["span_start"].sequence_index
span_end = instances[3].fields["span_end"].sequence_index
answer_tokens = instances[3].fields["passage"].tokens[span_start : (span_end + 1)]
expected_answer_tokens = ["Father", "Julius", "Nieuwland"]
assert [t.text for t in answer_tokens] == expected_answer_tokens
def test_can_build_from_params(self):
reader = SquadReader.from_params(Params({}))
assert reader._tokenizer.__class__.__name__ == "SpacyTokenizer"
assert reader._token_indexers["tokens"].__class__.__name__ == "SingleIdTokenIndexer"
def test_length_limit_works(self):
# We're making sure the length of the text is correct if length limit is provided.
reader = SquadReader(
passage_length_limit=30, question_length_limit=10, skip_invalid_examples=True
)
instances = ensure_list(reader.read(FIXTURES_ROOT / "data" / "squad.json"))
assert len(instances[0].fields["question"].tokens) == 10
assert len(instances[0].fields["passage"].tokens) == 30
# invalid examples where all the answers exceed the passage length should be skipped.
assert len(instances) == 3
# Length limit still works if we do not skip the invalid examples
reader = SquadReader(
passage_length_limit=30, question_length_limit=10, skip_invalid_examples=False
)
instances = ensure_list(reader.read(FIXTURES_ROOT / "data" / "squad.json"))
assert len(instances[0].fields["question"].tokens) == 10
assert len(instances[0].fields["passage"].tokens) == 30
# invalid examples should not be skipped.
assert len(instances) == 5
# Make sure the answer texts does not change, so that the evaluation will not be affected
reader_unlimited = SquadReader(
passage_length_limit=30, question_length_limit=10, skip_invalid_examples=False
)
instances_unlimited = ensure_list(
reader_unlimited.read(FIXTURES_ROOT / "data" / "squad.json")
)
for instance_x, instance_y in zip(instances, instances_unlimited):
print(instance_x.fields["metadata"]["answer_texts"])
assert set(instance_x.fields["metadata"]["answer_texts"]) == set(
instance_y.fields["metadata"]["answer_texts"]
)
|
allennlp-reading-comprehension-master
|
tests/dataset_readers/squad_test.py
|
allennlp-reading-comprehension-master
|
tests/predictors/__init__.py
|
|
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Vocabulary
from allennlp_rc.dataset_readers import TransformerSquadReader
from allennlp_rc.models import TransformerQA
from allennlp_rc.predictors import TransformerQAPredictor
class TestTransformerQAPredictor(AllenNlpTestCase):
def setUp(self):
super().setUp()
self.reader = TransformerSquadReader(length_limit=50, stride=10)
self.vocab = Vocabulary()
self.model = TransformerQA(self.vocab)
self.predictor = TransformerQAPredictor(self.model, self.reader)
# We're running an untrained model, so the answers will be random.
def test_predict_single_instance(self):
prediction = self.predictor.predict(
"What is love?", "Baby don't hurt me, don't hurt me, no more."
)
assert all(0 <= i < len(prediction["context_tokens"]) for i in prediction["best_span"])
def test_predict_long_instance(self):
# We use a short context and a long context, so that the long context has to be broken into multiple
# instances and re-assembled into a single answer.
questions = [
{
"question": "Do fish drink water?",
"context": """
A freshwater fish's insides has a higher salt content than the exterior water, so their bodies
are constantly absorbing water through osmosis via their permeable gills.
""",
},
{
"question": "Why don't animals have wheels?",
"context": """
The worlds of fiction and myth are full of wheeled creatures, so why not the real world? After
all, the wheel is an efficient design, and it seems like there would be obvious advantages to
quickly moving around while consuming little energy.
The key is to remember that evolution is a process, not something that happens overnight. A
giraffe with just a little bit longer neck than the others will be able to reach slightly
higher trees, which will ultimately lead to the species' neck length getting longer and longer
over generations. In the meantime, those other giraffes can still eat, just not quite as well.
But a wheel either works or it doesn't. A somewhat circular semi-wheelish thing would only be a
hindrance, and evolution can't produce a trait that's perfect from the get-go.
""",
},
]
predictions = self.predictor.predict_batch_json(questions)
assert len(predictions) == 2
|
allennlp-reading-comprehension-master
|
tests/predictors/transformer_qa_test.py
|
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
from tests import FIXTURES_ROOT
class TestDialogQAPredictor:
def test_uses_named_inputs(self):
inputs = {
"paragraphs": [
{
"qas": [
{
"followup": "y",
"yesno": "x",
"question": "When was the first one?",
"answers": [{"answer_start": 0, "text": "One time"}],
"id": "C_q#0",
},
{
"followup": "n",
"yesno": "x",
"question": "What were you doing?",
"answers": [{"answer_start": 15, "text": "writing a"}],
"id": "C_q#1",
},
{
"followup": "m",
"yesno": "y",
"question": "How often?",
"answers": [{"answer_start": 4, "text": "time I"}],
"id": "C_q#2",
},
],
"context": "One time I was writing a unit test,\
and it succeeded on the first attempt.",
}
]
}
archive = load_archive(FIXTURES_ROOT / "dialog_qa" / "serialization" / "model.tar.gz")
predictor = Predictor.from_archive(archive, "dialog_qa")
result = predictor.predict_json(inputs)
best_span_str_list = result.get("best_span_str")
for best_span_str in best_span_str_list:
assert isinstance(best_span_str, str)
assert best_span_str != ""
def test_batch_prediction(self):
inputs = [
{
"paragraphs": [
{
"qas": [
{
"followup": "y",
"yesno": "x",
"question": "When was the first one?",
"answers": [{"answer_start": 0, "text": "One time"}],
"id": "C_q#0",
},
{
"followup": "n",
"yesno": "x",
"question": "What were you doing?",
"answers": [{"answer_start": 15, "text": "writing a"}],
"id": "C_q#1",
},
{
"followup": "m",
"yesno": "y",
"question": "How often?",
"answers": [{"answer_start": 4, "text": "time I"}],
"id": "C_q#2",
},
],
"context": "One time I was writing a unit test,\
and it succeeded on the first attempt.",
}
]
},
{
"paragraphs": [
{
"qas": [
{
"followup": "y",
"yesno": "x",
"question": "When was the first one?",
"answers": [{"answer_start": 0, "text": "One time"}],
"id": "C_q#0",
},
{
"followup": "n",
"yesno": "x",
"question": "What were you doing?",
"answers": [{"answer_start": 15, "text": "writing a"}],
"id": "C_q#1",
},
{
"followup": "m",
"yesno": "y",
"question": "How often?",
"answers": [{"answer_start": 4, "text": "time I"}],
"id": "C_q#2",
},
],
"context": "One time I was writing a unit test,\
and it succeeded on the first attempt.",
}
]
},
]
archive = load_archive(FIXTURES_ROOT / "dialog_qa" / "serialization" / "model.tar.gz")
predictor = Predictor.from_archive(archive, "dialog_qa")
results = predictor.predict_batch_json(inputs)
assert len(results) == 2
|
allennlp-reading-comprehension-master
|
tests/predictors/dialog_qa_test.py
|
from pytest import approx
from allennlp.common.testing import AllenNlpTestCase
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
from allennlp_rc import predictors
from tests import FIXTURES_ROOT
class TestBidafPredictor(AllenNlpTestCase):
def test_uses_named_inputs(self):
inputs = {
"question": "What kind of test succeeded on its first attempt?",
"passage": "One time I was writing a unit test, and it succeeded on the first attempt.",
}
archive = load_archive(FIXTURES_ROOT / "bidaf" / "serialization" / "model.tar.gz")
predictor = Predictor.from_archive(archive, "reading-comprehension")
result = predictor.predict_json(inputs)
best_span = result.get("best_span")
assert best_span is not None
assert isinstance(best_span, list)
assert len(best_span) == 2
assert all(isinstance(x, int) for x in best_span)
assert best_span[0] <= best_span[1]
best_span_str = result.get("best_span_str")
assert isinstance(best_span_str, str)
assert best_span_str != ""
for probs_key in ("span_start_probs", "span_end_probs"):
probs = result.get(probs_key)
assert probs is not None
assert all(isinstance(x, float) for x in probs)
assert sum(probs) == approx(1.0)
def test_batch_prediction(self):
inputs = [
{
"question": "What kind of test succeeded on its first attempt?",
"passage": "One time I was writing a unit test, and it succeeded on the first attempt.",
},
{
"question": "What kind of test succeeded on its first attempt at batch processing?",
"passage": "One time I was writing a unit test, and it always failed!",
},
]
archive = load_archive(FIXTURES_ROOT / "bidaf" / "serialization" / "model.tar.gz")
predictor = Predictor.from_archive(archive, "reading-comprehension")
results = predictor.predict_batch_json(inputs)
assert len(results) == 2
for result in results:
best_span = result.get("best_span")
best_span_str = result.get("best_span_str")
start_probs = result.get("span_start_probs")
end_probs = result.get("span_end_probs")
assert best_span is not None
assert isinstance(best_span, list)
assert len(best_span) == 2
assert all(isinstance(x, int) for x in best_span)
assert best_span[0] <= best_span[1]
assert isinstance(best_span_str, str)
assert best_span_str != ""
for probs in (start_probs, end_probs):
assert probs is not None
assert all(isinstance(x, float) for x in probs)
assert sum(probs) == approx(1.0)
def test_model_internals(self):
archive = load_archive(FIXTURES_ROOT / "bidaf" / "serialization" / "model.tar.gz")
predictor = Predictor.from_archive(archive, "reading-comprehension")
inputs = {
"question": "What kind of test succeeded on its first attempt?",
"passage": "One time I was writing a unit test, and it succeeded on the first attempt.",
}
# Context manager to capture model internals
with predictor.capture_model_internals() as internals:
predictor.predict_json(inputs)
assert internals is not None
assert len(internals) == 24
linear_50_1 = internals[23]
print(linear_50_1)
assert "Linear(in_features=50, out_features=1, bias=True)" in linear_50_1["name"]
assert len(linear_50_1["output"][0]) == 17
assert all(len(a) == 1 for a in linear_50_1["output"][0])
# hooks should be gone
for module in predictor._model.modules():
assert not module._forward_hooks
def test_predictions_to_labeled_instances(self):
inputs = {
"question": "What kind of test succeeded on its first attempt?",
"passage": "One time I was writing a unit test, and it succeeded on the first attempt.",
}
archive = load_archive(FIXTURES_ROOT / "bidaf" / "serialization" / "model.tar.gz")
predictor = Predictor.from_archive(archive, "reading-comprehension")
instance = predictor._json_to_instance(inputs)
outputs = predictor._model.forward_on_instance(instance)
new_instances = predictor.predictions_to_labeled_instances(instance, outputs)
assert "span_start" in new_instances[0].fields
assert "span_end" in new_instances[0].fields
assert new_instances[0].fields["span_start"] is not None
assert new_instances[0].fields["span_end"] is not None
assert len(new_instances) == 1
def test_predictions_to_labeled_instances_with_naqanet(self):
inputs = {
"question": "What kind of test succeeded on its first attempt?",
"passage": "One time I was writing 2 unit tests, and 1 succeeded on the first attempt.",
}
archive = load_archive(FIXTURES_ROOT / "naqanet" / "serialization" / "model.tar.gz")
predictor = Predictor.from_archive(archive, "reading-comprehension")
predictor._dataset_reader.skip_when_all_empty = False
instance = predictor._json_to_instance(inputs)
outputs = predictor._model.forward_on_instance(instance)
new_instances = predictor.predictions_to_labeled_instances(instance, outputs)
assert "number_indices" in new_instances[0].fields
assert "answer_as_passage_spans" in new_instances[0].fields
assert "answer_as_question_spans" in new_instances[0].fields
assert "answer_as_add_sub_expressions" in new_instances[0].fields
assert "answer_as_counts" in new_instances[0].fields
assert "metadata" in new_instances[0].fields
assert len(new_instances) == 1
outputs["answer"]["answer_type"] = "count"
outputs["answer"]["count"] = 2
new_instances = predictor.predictions_to_labeled_instances(instance, outputs)
assert new_instances[0]["answer_as_counts"][0].label == 2
outputs["answer"]["answer_type"] = "passage_span"
outputs["answer"]["spans"] = [[0, 8]] # character offsets
new_instances = predictor.predictions_to_labeled_instances(instance, outputs)
assert new_instances[0]["answer_as_passage_spans"][0] == (0, 1) # token indices
outputs["answer"]["answer_type"] = "arithmetic"
outputs["answer"]["numbers"] = [{"sign": 2}, {"sign": 0}]
new_instances = predictor.predictions_to_labeled_instances(instance, outputs)
assert new_instances[0]["answer_as_add_sub_expressions"][0].labels == [2, 0, 0]
outputs["answer"]["answer_type"] = "question_span"
outputs["answer"]["spans"] = [[0, 9]] # character offsets
new_instances = predictor.predictions_to_labeled_instances(instance, outputs)
assert new_instances[0]["answer_as_question_spans"][0] == (0, 1) # token indices
|
allennlp-reading-comprehension-master
|
tests/predictors/reading_comprehension_test.py
|
from flaky import flaky
import pytest
import numpy
from numpy.testing import assert_almost_equal
import torch
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import ModelTestCase
from allennlp.data import DatasetReader, Vocabulary
from allennlp.data.dataset import Batch
from allennlp.models import Model
from allennlp_rc.models import BidirectionalAttentionFlow
from tests import FIXTURES_ROOT
class BidirectionalAttentionFlowTest(ModelTestCase):
def setUp(self):
super().setUp()
self.set_up_model(
FIXTURES_ROOT / "bidaf" / "experiment.json", FIXTURES_ROOT / "data" / "squad.json"
)
@flaky
def test_forward_pass_runs_correctly(self):
batch = Batch(self.instances)
batch.index_instances(self.vocab)
training_tensors = batch.as_tensor_dict()
output_dict = self.model(**training_tensors)
metrics = self.model.get_metrics(reset=True)
# We've set up the data such that there's a fake answer that consists of the whole
# paragraph. _Any_ valid prediction for that question should produce an F1 of greater than
# zero, while if we somehow haven't been able to load the evaluation data, or there was an
# error with using the evaluation script, this will fail. This makes sure that we've
# loaded the evaluation data correctly and have hooked things up to the official evaluation
# script.
assert metrics["f1"] > 0
span_start_probs = output_dict["span_start_probs"][0].data.numpy()
span_end_probs = output_dict["span_start_probs"][0].data.numpy()
assert_almost_equal(numpy.sum(span_start_probs, -1), 1, decimal=6)
assert_almost_equal(numpy.sum(span_end_probs, -1), 1, decimal=6)
span_start, span_end = tuple(output_dict["best_span"][0].data.numpy())
assert span_start >= 0
assert span_start <= span_end
assert span_end < self.instances[0].fields["passage"].sequence_length()
assert isinstance(output_dict["best_span_str"][0], str)
# Some recent efficiency changes (using bmm for `weighted_sum`, the more efficient
# `masked_softmax`...) have made this _very_ flaky...
@flaky(max_runs=5)
def test_model_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file, tolerance=1e-4)
@flaky
def test_batch_predictions_are_consistent(self):
# The CNN encoder has problems with this kind of test - it's not properly masked yet, so
# changing the amount of padding in the batch will result in small differences in the
# output of the encoder. Because BiDAF is so deep, these differences get magnified through
# the network and make this test impossible. So, we'll remove the CNN encoder entirely
# from the model for this test. If/when we fix the CNN encoder to work correctly with
# masking, we can change this back to how the other models run this test, with just a
# single line.
# Save some state.
saved_model = self.model
saved_instances = self.instances
# Modify the state, run the test with modified state.
params = Params.from_file(self.param_file)
reader = DatasetReader.from_params(params["dataset_reader"])
reader._token_indexers = {"tokens": reader._token_indexers["tokens"]}
self.instances = reader.read(FIXTURES_ROOT / "data" / "squad.json")
vocab = Vocabulary.from_instances(self.instances)
for instance in self.instances:
instance.index_fields(vocab)
del params["model"]["text_field_embedder"]["token_embedders"]["token_characters"]
params["model"]["phrase_layer"]["input_size"] = 2
self.model = Model.from_params(vocab=vocab, params=params["model"])
self.ensure_batch_predictions_are_consistent()
# Restore the state.
self.model = saved_model
self.instances = saved_instances
def test_get_best_span(self):
span_begin_probs = torch.FloatTensor([[0.1, 0.3, 0.05, 0.3, 0.25]]).log()
span_end_probs = torch.FloatTensor([[0.65, 0.05, 0.2, 0.05, 0.05]]).log()
begin_end_idxs = BidirectionalAttentionFlow.get_best_span(span_begin_probs, span_end_probs)
assert_almost_equal(begin_end_idxs.data.numpy(), [[0, 0]])
# When we were using exclusive span ends, this was an edge case of the dynamic program.
# We're keeping the test to make sure we get it right now, after the switch in inclusive
# span end. The best answer is (1, 1).
span_begin_probs = torch.FloatTensor([[0.4, 0.5, 0.1]]).log()
span_end_probs = torch.FloatTensor([[0.3, 0.6, 0.1]]).log()
begin_end_idxs = BidirectionalAttentionFlow.get_best_span(span_begin_probs, span_end_probs)
assert_almost_equal(begin_end_idxs.data.numpy(), [[1, 1]])
# Another instance that used to be an edge case.
span_begin_probs = torch.FloatTensor([[0.8, 0.1, 0.1]]).log()
span_end_probs = torch.FloatTensor([[0.8, 0.1, 0.1]]).log()
begin_end_idxs = BidirectionalAttentionFlow.get_best_span(span_begin_probs, span_end_probs)
assert_almost_equal(begin_end_idxs.data.numpy(), [[0, 0]])
span_begin_probs = torch.FloatTensor([[0.1, 0.2, 0.05, 0.3, 0.25]]).log()
span_end_probs = torch.FloatTensor([[0.1, 0.2, 0.5, 0.05, 0.15]]).log()
begin_end_idxs = BidirectionalAttentionFlow.get_best_span(span_begin_probs, span_end_probs)
assert_almost_equal(begin_end_idxs.data.numpy(), [[1, 2]])
def test_mismatching_dimensions_throws_configuration_error(self):
params = Params.from_file(self.param_file)
# Make the phrase layer wrong - it should be 10 to match
# the embedding + char cnn dimensions.
params["model"]["phrase_layer"]["input_size"] = 12
with pytest.raises(ConfigurationError):
Model.from_params(vocab=self.vocab, params=params.pop("model"))
params = Params.from_file(self.param_file)
# Make the modeling layer input_dimension wrong - it should be 40 to match
# 4 * output_dim of the phrase_layer.
params["model"]["phrase_layer"]["input_size"] = 30
with pytest.raises(ConfigurationError):
Model.from_params(vocab=self.vocab, params=params.pop("model"))
params = Params.from_file(self.param_file)
# Make the modeling layer input_dimension wrong - it should be 70 to match
# 4 * phrase_layer.output_dim + 3 * modeling_layer.output_dim.
params["model"]["span_end_encoder"]["input_size"] = 50
with pytest.raises(ConfigurationError):
Model.from_params(vocab=self.vocab, params=params.pop("model"))
|
allennlp-reading-comprehension-master
|
tests/models/bidaf_test.py
|
import numpy
import torch
from flaky import flaky
from allennlp.common.testing import ModelTestCase
from allennlp.data.dataset import Batch
from allennlp_rc.models.bidaf_ensemble import BidafEnsemble, ensemble
from tests import FIXTURES_ROOT
class BidafEnsembleTest(ModelTestCase):
def setUp(self):
super().setUp()
self.set_up_model(
FIXTURES_ROOT / "bidaf" / "experiment.json", FIXTURES_ROOT / "data" / "squad.json"
)
self.model.eval()
def test_ensemble_chooses_highest_average_confidence_2(self):
subresults = [
{
"span_start_probs": torch.FloatTensor([[0.9, 0.0, 0.0, 0.0]]),
"span_end_probs": torch.FloatTensor([[0.9, 0.0, 0.0, 0.0]]),
"best_span": torch.LongTensor([[0, 0]]),
"best_span_str": "What",
"question_tokens": ["What", "did", "Michael", "eat", "?"],
"passage_tokens": ["Michael", "ate", "cheese", "."],
},
{
"span_start_probs": torch.FloatTensor([[0.0, 0.0, 1.0, 0.0]]),
"span_end_probs": torch.FloatTensor([[0.0, 0.0, 1.0, 0.0]]),
"best_span": torch.LongTensor([[2, 2]]),
"best_span_str": "cheese",
"question_tokens": ["What", "did", "Michael", "eat", "?"],
"passage_tokens": ["Michael", "ate", "cheese", "."],
},
]
numpy.testing.assert_almost_equal(
ensemble(subresults).data[0].cpu().numpy(), torch.LongTensor([2, 2]).cpu().numpy()
)
def test_ensemble_chooses_highest_average_confidence_3(self):
subresults = [
{
"span_start_probs": torch.FloatTensor([[0.0, 0.0, 0.9, 0.1]]),
"span_end_probs": torch.FloatTensor([[0.0, 0.0, 0.9, 0.1]]),
"best_span": torch.LongTensor([[2, 2]]),
"best_span_str": "cheese",
"question_tokens": ["What", "did", "Michael", "eat", "?"],
"passage_tokens": ["Michael", "ate", "cheese", "."],
},
{
"span_start_probs": torch.FloatTensor([[0.0, 0.0, 0.9, 0.1]]),
"span_end_probs": torch.FloatTensor([[0.0, 0.0, 0.9, 0.1]]),
"best_span": torch.LongTensor([[2, 2]]),
"best_span_str": "cheese",
"question_tokens": ["What", "did", "Michael", "eat", "?"],
"passage_tokens": ["Michael", "ate", "cheese", "."],
},
{
"span_start_probs": torch.FloatTensor([[0.9, 0.0, 0.0, 0.0]]),
"span_end_probs": torch.FloatTensor([[0.9, 0.0, 0.0, 0.0]]),
"best_span": torch.LongTensor([[0, 0]]),
"best_span_str": "What",
"question_tokens": ["What", "did", "Michael", "eat", "?"],
"passage_tokens": ["Michael", "ate", "cheese", "."],
},
]
numpy.testing.assert_almost_equal(
ensemble(subresults).data[0].cpu().numpy(), torch.LongTensor([2, 2]).numpy()
)
@flaky
def test_forward_pass_runs_correctly(self):
"""
Check to make sure a forward pass on an ensemble of two identical copies of a model yields the same
results as the model itself.
"""
bidaf_ensemble = BidafEnsemble([self.model, self.model])
batch = Batch(self.instances)
batch.index_instances(self.vocab)
training_tensors = batch.as_tensor_dict()
bidaf_output_dict = self.model(**training_tensors)
ensemble_output_dict = bidaf_ensemble(**training_tensors)
metrics = self.model.get_metrics(reset=True)
# We've set up the data such that there's a fake answer that consists of the whole
# paragraph. _Any_ valid prediction for that question should produce an F1 of greater than
# zero, while if we somehow haven't been able to load the evaluation data, or there was an
# error with using the evaluation script, this will fail. This makes sure that we've
# loaded the evaluation data correctly and have hooked things up to the official evaluation
# script.
assert metrics["f1"] > 0
assert torch.equal(ensemble_output_dict["best_span"], bidaf_output_dict["best_span"])
assert ensemble_output_dict["best_span_str"] == bidaf_output_dict["best_span_str"]
|
allennlp-reading-comprehension-master
|
tests/models/bidaf_ensemble_test.py
|
from flaky import flaky
from allennlp.common.testing import ModelTestCase
from tests import FIXTURES_ROOT
import allennlp_rc
class NumericallyAugmentedQaNetTest(ModelTestCase):
def setUp(self):
super().setUp()
self.set_up_model(
FIXTURES_ROOT / "naqanet" / "experiment.json", FIXTURES_ROOT / "data" / "drop.json"
)
@flaky(max_runs=3, min_passes=1)
def test_model_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
|
allennlp-reading-comprehension-master
|
tests/models/naqanet_test.py
|
import torch
import pytest
from flaky import flaky
import numpy
from numpy.testing import assert_almost_equal
from allennlp.common import Params
from allennlp.common.testing import ModelTestCase
from allennlp.data import DatasetReader, Vocabulary
from allennlp.data.dataset import Batch
from allennlp.data import DataLoader
from allennlp.models import Model
from allennlp.training import Trainer
from tests import FIXTURES_ROOT
class QaNetTest(ModelTestCase):
def setUp(self):
super().setUp()
self.set_up_model(
FIXTURES_ROOT / "qanet" / "experiment.json", FIXTURES_ROOT / "data" / "squad.json"
)
@flaky
def test_forward_pass_runs_correctly(self):
batch = Batch(self.instances)
batch.index_instances(self.vocab)
training_tensors = batch.as_tensor_dict()
output_dict = self.model(**training_tensors)
metrics = self.model.get_metrics(reset=True)
# We've set up the data such that there's a fake answer that consists of the whole
# paragraph. _Any_ valid prediction for that question should produce an F1 of greater than
# zero, while if we somehow haven't been able to load the evaluation data, or there was an
# error with using the evaluation script, this will fail. This makes sure that we've
# loaded the evaluation data correctly and have hooked things up to the official evaluation
# script.
assert metrics["f1"] > 0
span_start_probs = output_dict["span_start_probs"][0].data.numpy()
span_end_probs = output_dict["span_start_probs"][0].data.numpy()
assert_almost_equal(numpy.sum(span_start_probs, -1), 1, decimal=6)
assert_almost_equal(numpy.sum(span_end_probs, -1), 1, decimal=6)
span_start, span_end = tuple(output_dict["best_span"][0].data.numpy())
assert span_start >= 0
assert span_start <= span_end
assert span_end < self.instances[0].fields["passage"].sequence_length()
assert isinstance(output_dict["best_span_str"][0], str)
@flaky
def test_model_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file, tolerance=1e-4)
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Need multiple GPUs.")
def test_multigpu_qanet(self):
params = Params.from_file(self.param_file)
vocab = Vocabulary.from_instances(self.instances)
model = Model.from_params(vocab=vocab, params=params["model"]).cuda()
optimizer = torch.optim.SGD(self.model.parameters(), 0.01, momentum=0.9)
self.instances.index_with(model.vocab)
loader = DataLoader(self.instances, batch_size=4)
trainer = Trainer(model, optimizer, loader, num_epochs=2, cuda_device=[0, 1])
trainer.train()
def test_batch_predictions_are_consistent(self):
# The same issue as the bidaf test case.
# The CNN encoder has problems with this kind of test - it's not properly masked yet, so
# changing the amount of padding in the batch will result in small differences in the
# output of the encoder. So, we'll remove the CNN encoder entirely from the model for this test.
# Save some state.
saved_model = self.model
saved_instances = self.instances
# Modify the state, run the test with modified state.
params = Params.from_file(self.param_file)
reader = DatasetReader.from_params(params["dataset_reader"])
reader._token_indexers = {"tokens": reader._token_indexers["tokens"]}
self.instances = reader.read(FIXTURES_ROOT / "data" / "squad.json")
vocab = Vocabulary.from_instances(self.instances)
for instance in self.instances:
instance.index_fields(vocab)
del params["model"]["text_field_embedder"]["token_embedders"]["token_characters"]
params["model"]["phrase_layer"]["num_convs_per_block"] = 0
params["model"]["modeling_layer"]["num_convs_per_block"] = 0
self.model = Model.from_params(vocab=vocab, params=params["model"])
self.ensure_batch_predictions_are_consistent()
# Restore the state.
self.model = saved_model
self.instances = saved_instances
|
allennlp-reading-comprehension-master
|
tests/models/qanet_test.py
|
allennlp-reading-comprehension-master
|
tests/models/__init__.py
|
|
from flaky import flaky
import numpy
from numpy.testing import assert_almost_equal
from allennlp.common.testing import ModelTestCase
from allennlp.data.dataset import Batch
from tests import FIXTURES_ROOT
import allennlp_rc # noqa F401: Needed to register the registrables.
class TransformerQaTest(ModelTestCase):
def setUp(self):
super().setUp()
self.set_up_model(
FIXTURES_ROOT / "transformer_qa" / "experiment.jsonnet",
FIXTURES_ROOT / "data" / "squad.json",
)
def test_model_can_train_save_and_load(self):
# Huggingface transformer models come with pooler weights, but this model doesn't use the pooler.
self.ensure_model_can_train_save_and_load(
self.param_file,
gradients_to_ignore={
"_text_field_embedder.token_embedder_tokens.transformer_model.pooler.weight",
"_text_field_embedder.token_embedder_tokens.transformer_model.pooler.bias",
},
)
def test_forward_pass_runs_correctly(self):
batch = Batch(self.instances)
batch.index_instances(self.vocab)
training_tensors = batch.as_tensor_dict()
output_dict = self.model(**training_tensors)
metrics = self.model.get_metrics(reset=True)
# We've set up the data such that there's a fake answer that consists of the whole
# paragraph. _Any_ valid prediction for that question should produce an F1 of greater than
# zero, while if we somehow haven't been able to load the evaluation data, or there was an
# error with using the evaluation script, this will fail. This makes sure that we've
# loaded the evaluation data correctly and have hooked things up to the official evaluation
# script.
assert metrics["per_instance_f1"] > 0
span_start_probs = output_dict["span_start_probs"][0].data.numpy()
span_end_probs = output_dict["span_start_probs"][0].data.numpy()
assert_almost_equal(numpy.sum(span_start_probs, -1), 1, decimal=6)
assert_almost_equal(numpy.sum(span_end_probs, -1), 1, decimal=6)
span_start, span_end = tuple(output_dict["best_span"][0].data.numpy())
assert span_start >= 0
assert span_start <= span_end
assert span_end < self.instances[0].fields["question_with_context"].sequence_length()
assert isinstance(output_dict["best_span_str"][0], str)
|
allennlp-reading-comprehension-master
|
tests/models/transformer_qa_test.py
|
from numpy.testing import assert_almost_equal
import torch
from allennlp.common.testing import AllenNlpTestCase
from allennlp_rc.models.util import get_best_span
from tests import FIXTURES_ROOT
class TestRcUtil(AllenNlpTestCase):
def test_get_best_span(self):
span_begin_probs = torch.FloatTensor([[0.1, 0.3, 0.05, 0.3, 0.25]]).log()
span_end_probs = torch.FloatTensor([[0.65, 0.05, 0.2, 0.05, 0.05]]).log()
begin_end_idxs = get_best_span(span_begin_probs, span_end_probs)
assert_almost_equal(begin_end_idxs.data.numpy(), [[0, 0]])
# When we were using exclusive span ends, this was an edge case of the dynamic program.
# We're keeping the test to make sure we get it right now, after the switch in inclusive
# span end. The best answer is (1, 1).
span_begin_probs = torch.FloatTensor([[0.4, 0.5, 0.1]]).log()
span_end_probs = torch.FloatTensor([[0.3, 0.6, 0.1]]).log()
begin_end_idxs = get_best_span(span_begin_probs, span_end_probs)
assert_almost_equal(begin_end_idxs.data.numpy(), [[1, 1]])
# Another instance that used to be an edge case.
span_begin_probs = torch.FloatTensor([[0.8, 0.1, 0.1]]).log()
span_end_probs = torch.FloatTensor([[0.8, 0.1, 0.1]]).log()
begin_end_idxs = get_best_span(span_begin_probs, span_end_probs)
assert_almost_equal(begin_end_idxs.data.numpy(), [[0, 0]])
span_begin_probs = torch.FloatTensor([[0.1, 0.2, 0.05, 0.3, 0.25]]).log()
span_end_probs = torch.FloatTensor([[0.1, 0.2, 0.5, 0.05, 0.15]]).log()
begin_end_idxs = get_best_span(span_begin_probs, span_end_probs)
assert_almost_equal(begin_end_idxs.data.numpy(), [[1, 2]])
|
allennlp-reading-comprehension-master
|
tests/models/util_test.py
|
from allennlp.common.testing import ModelTestCase
from allennlp.data.dataset import Batch
from tests import FIXTURES_ROOT
import allennlp_rc
class DialogQATest(ModelTestCase):
def setUp(self):
super().setUp()
self.set_up_model(
FIXTURES_ROOT / "dialog_qa" / "experiment.json",
FIXTURES_ROOT / "data" / "quac_sample.json",
)
self.batch = Batch(self.instances)
self.batch.index_instances(self.vocab)
def test_forward_pass_runs_correctly(self):
training_tensors = self.batch.as_tensor_dict()
output_dict = self.model(**training_tensors)
assert "best_span_str" in output_dict and "loss" in output_dict
assert "followup" in output_dict and "yesno" in output_dict
def test_model_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file, tolerance=1e-4)
def test_batch_predictions_are_consistent(self):
self.ensure_batch_predictions_are_consistent()
|
allennlp-reading-comprehension-master
|
tests/models/dialog_qa_test.py
|
from allennlp.interpret.attackers import Hotflip
from allennlp.interpret.attackers.hotflip import DEFAULT_IGNORE_TOKENS
from allennlp.models import load_archive
from allennlp.predictors import Predictor
import allennlp_rc
from tests import FIXTURES_ROOT
class TestHotflip:
def test_using_squad_model(self):
inputs = {
"question": "OMG, I heard you coded a test that succeeded on its first attempt, is that true?",
"passage": "Bro, never doubt a coding wizard! I am the king of software, MWAHAHAHA",
}
archive = load_archive(FIXTURES_ROOT / "bidaf" / "serialization" / "model.tar.gz")
predictor = Predictor.from_archive(archive, "reading-comprehension")
hotflipper = Hotflip(predictor)
hotflipper.initialize()
attack = hotflipper.attack_from_json(inputs, "question", "grad_input_2")
print(attack)
assert attack is not None
assert "final" in attack
assert "original" in attack
assert "outputs" in attack
assert len(attack["final"][0]) == len(
attack["original"]
) # hotflip replaces words without removing
instance = predictor._json_to_instance(inputs)
assert instance["question"] != attack["final"][0] # check that the input has changed.
outputs = predictor._model.forward_on_instance(instance)
original_labeled_instance = predictor.predictions_to_labeled_instances(instance, outputs)[0]
original_span_start = original_labeled_instance["span_start"].sequence_index
original_span_end = original_labeled_instance["span_end"].sequence_index
flipped_span_start = attack["outputs"]["best_span"][0]
flipped_span_end = attack["outputs"]["best_span"][1]
for i, token in enumerate(instance["question"]):
token = str(token)
if token in DEFAULT_IGNORE_TOKENS:
assert token in attack["final"][0] # ignore tokens should not be changed
# HotFlip keeps changing tokens until either the prediction changes or all tokens have
# been changed. If there are tokens in the HotFlip final result that were in the
# original (i.e., not all tokens were flipped), then the prediction should be
# different.
else:
if token == attack["final"][0][i]:
assert (
original_span_start != flipped_span_start
or original_span_end != flipped_span_end
)
|
allennlp-reading-comprehension-master
|
tests/interpret/hotflip_test.py
|
allennlp-reading-comprehension-master
|
tests/interpret/__init__.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.