python_code
stringlengths 0
258k
|
---|
## @package normalization
# Module caffe2.python.helpers.normalization
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import scope
from caffe2.python.modeling.parameter_info import ParameterTags
from caffe2.proto import caffe2_pb2
from caffe2.python.modeling import initializers
def lrn(model, blob_in, blob_out, order="NCHW", use_cudnn=False, **kwargs):
"""LRN"""
dev = kwargs['device_option'] if 'device_option' in kwargs \
else scope.CurrentDeviceScope()
is_cpu = dev is None or dev.device_type == caffe2_pb2.CPU
if use_cudnn and (not is_cpu):
kwargs['engine'] = 'CUDNN'
blobs_out = blob_out
else:
blobs_out = [blob_out, "_" + blob_out + "_scale"]
lrn = model.net.LRN(
blob_in,
blobs_out,
order=order,
**kwargs
)
if use_cudnn and (not is_cpu):
return lrn
else:
return lrn[0]
def softmax(model, blob_in, blob_out=None, use_cudnn=False, **kwargs):
"""Softmax."""
if use_cudnn:
kwargs['engine'] = 'CUDNN'
if blob_out is not None:
return model.net.Softmax(blob_in, blob_out, **kwargs)
else:
return model.net.Softmax(blob_in, **kwargs)
def instance_norm(model, blob_in, blob_out, dim_in, order="NCHW", **kwargs):
blob_out = blob_out or model.net.NextName()
# Input: input, scale, bias
# Output: output, saved_mean, saved_inv_std
# scale: initialize with ones
# bias: initialize with zeros
def init_blob(value, suffix):
return model.param_init_net.ConstantFill(
[], blob_out + "_" + suffix, shape=[dim_in], value=value)
scale, bias = init_blob(1.0, "s"), init_blob(0.0, "b")
model.AddParameter(scale, ParameterTags.WEIGHT)
model.AddParameter(bias, ParameterTags.BIAS)
blob_outs = [blob_out, blob_out + "_sm", blob_out + "_siv"]
if 'is_test' in kwargs and kwargs['is_test']:
blob_outputs = model.net.InstanceNorm(
[blob_in, scale, bias], [blob_out],
order=order, **kwargs)
return blob_outputs
else:
blob_outputs = model.net.InstanceNorm(
[blob_in, scale, bias], blob_outs,
order=order, **kwargs)
# Return the output
return blob_outputs[0]
def spatial_bn(model, blob_in, blob_out, dim_in,
init_scale=1., init_bias=0.,
ScaleInitializer=None, BiasInitializer=None,
RunningMeanInitializer=None, RunningVarianceInitializer=None,
order="NCHW", **kwargs):
blob_out = blob_out or model.net.NextName()
# Input: input, scale, bias, est_mean, est_inv_var
# Output: output, running_mean, running_inv_var, saved_mean,
# saved_inv_var
# scale: initialize with init_scale (default 1.)
# bias: initialize with init_bias (default 0.)
# est mean: zero
# est var: ones
if model.init_params:
scale_init = ("ConstantFill", {'value': init_scale})
bias_init = ("ConstantFill", {'value': init_bias})
rm_init = ("ConstantFill", {'value': 0.0})
riv_init = ("ConstantFill", {'value': 1.0})
ScaleInitializer = initializers.update_initializer(
ScaleInitializer, scale_init, ("ConstantFill", {})
)
BiasInitializer = initializers.update_initializer(
BiasInitializer, bias_init, ("ConstantFill", {})
)
RunningMeanInitializer = initializers.update_initializer(
RunningMeanInitializer, rm_init, ("ConstantFill", {})
)
RunningVarianceInitializer = initializers.update_initializer(
RunningVarianceInitializer, riv_init, ("ConstantFill", {})
)
else:
ScaleInitializer = initializers.ExternalInitializer()
BiasInitializer = initializers.ExternalInitializer()
RunningMeanInitializer = initializers.ExternalInitializer()
RunningVarianceInitializer = initializers.ExternalInitializer()
scale = model.create_param(
param_name=blob_out + '_s',
shape=[dim_in],
initializer=ScaleInitializer,
tags=ParameterTags.WEIGHT
)
bias = model.create_param(
param_name=blob_out + '_b',
shape=[dim_in],
initializer=BiasInitializer,
tags=ParameterTags.BIAS
)
running_mean = model.create_param(
param_name=blob_out + '_rm',
shape=[dim_in],
initializer=RunningMeanInitializer,
tags=ParameterTags.COMPUTED_PARAM
)
running_inv_var = model.create_param(
param_name=blob_out + '_riv',
shape=[dim_in],
initializer=RunningVarianceInitializer,
tags=ParameterTags.COMPUTED_PARAM
)
blob_outs = [blob_out, running_mean, running_inv_var,
blob_out + "_sm", blob_out + "_siv"]
if 'is_test' in kwargs and kwargs['is_test']:
blob_outputs = model.net.SpatialBN(
[blob_in, scale, bias, blob_outs[1], blob_outs[2]], [blob_out],
order=order, **kwargs)
return blob_outputs
else:
blob_outputs = model.net.SpatialBN(
[blob_in, scale, bias, blob_outs[1], blob_outs[2]], blob_outs,
order=order, **kwargs)
# Return the output
return blob_outputs[0]
def layer_norm(
model,
blob_in,
blob_out,
dim_in,
axis=1,
epsilon=1e-4,
initial_scale=1.0,
initial_bias=0.0,
):
'''
Layer normalizes the input, cf. https://arxiv.org/pdf/1607.06450.pdf.
Args:
blob_in: The input blob to layer normalize.
blob_out: The layer normalized output blob.
dim_in: The dimension of the scale and bias. For example, if blob_in is
a 2D design matrix and axis is 1, this would be the number of
columns.
axis: (optional) The axis to normalize. Typically the feature axis.
Defaults to 1.
epsilon: (optional) A small value used for numerical stability in
calculation. Defaults to 1e-4.
initial_scale: (optional) The initial value for the learned scale
parameter. Defaults to 1.0
initial_bias: (optional) The initial value for the learned bias
parameter of the layerwise standard deviation. Defaults to 0.0.
Returns:
A 3-tuple consisting of:
- The layer normalized input blob.
- The mean of the input blob across the given axis.
- The standard deviation of the input blob acress the given axis.
'''
# The LayerNorm operator only performs the layerwise z-shift, without
# scaling and shifting by the learned scale and bias parameters. We have
# to do that separately below.
normalized, mean, stdev = model.net.LayerNorm(
[blob_in],
[blob_out, blob_out + "_mean", blob_out + "_stdev"],
axis=axis,
epsilon=epsilon,
)
# The learned multiplicative scale or "gain".
scale = model.create_param(
param_name='{}_scale'.format(blob_out),
shape=[dim_in],
initializer=initializers.Initializer(
'ConstantFill',
value=initial_scale,
),
tags=ParameterTags.WEIGHT,
)
# The learned additive bias or "shift".
bias = model.create_param(
param_name='{}_bias'.format(blob_out),
shape=[dim_in],
initializer=initializers.Initializer(
'ConstantFill',
value=initial_bias,
),
tags=ParameterTags.BIAS,
)
scaled = model.net.Mul(
[normalized, scale],
['{}_scaled'.format(blob_out)],
broadcast=1,
axis=axis,
)
biased = model.net.Add(
[scaled, bias],
['{}_biased'.format(blob_out)],
broadcast=1,
axis=axis,
)
return biased, mean, stdev
|
## @package db_input
# Module caffe2.python.helpers.db_input
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
def db_input(model, blobs_out, batch_size, db, db_type):
dbreader_name = "dbreader_" + db
dbreader = model.param_init_net.CreateDB(
[],
dbreader_name,
db=db,
db_type=db_type,
)
return model.net.TensorProtosDBInput(
dbreader, blobs_out, batch_size=batch_size)
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import workspace, scope
from caffe2.python.model_helper import ModelHelper
import numpy as np
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def tanh(x):
return 2.0 * sigmoid(2.0 * x) - 1
def _prepare_rnn(
t, n, dim_in, create_rnn, outputs_with_grads,
forget_bias, memory_optim=False,
forward_only=False, drop_states=False, T=None,
two_d_initial_states=None, dim_out=None,
num_states=2,
**kwargs
):
if dim_out is None:
dim_out = [dim_in]
print("Dims: ", t, n, dim_in, dim_out)
model = ModelHelper(name='external')
if two_d_initial_states is None:
two_d_initial_states = np.random.randint(2)
def generate_input_state(n, d):
if two_d_initial_states:
return np.random.randn(n, d).astype(np.float32)
else:
return np.random.randn(1, n, d).astype(np.float32)
states = []
for layer_id, d in enumerate(dim_out):
for i in range(num_states):
state_name = "state_{}/layer_{}".format(i, layer_id)
states.append(model.net.AddExternalInput(state_name))
workspace.FeedBlob(
states[-1], generate_input_state(n, d).astype(np.float32))
# Due to convoluted RNN scoping logic we make sure that things
# work from a namescope
with scope.NameScope("test_name_scope"):
input_blob, seq_lengths = model.net.AddScopedExternalInputs(
'input_blob', 'seq_lengths')
outputs = create_rnn(
model, input_blob, seq_lengths, states,
dim_in=dim_in, dim_out=dim_out, scope="external/recurrent",
outputs_with_grads=outputs_with_grads,
memory_optimization=memory_optim,
forget_bias=forget_bias,
forward_only=forward_only,
drop_states=drop_states,
static_rnn_unroll_size=T,
**kwargs
)
workspace.RunNetOnce(model.param_init_net)
workspace.FeedBlob(
seq_lengths,
np.random.randint(1, t + 1, size=(n,)).astype(np.int32)
)
return outputs, model.net, states + [input_blob]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.proto import caffe2_pb2
from caffe2.python import workspace, core, lstm_benchmark, utils
from copy import copy
@utils.debug
def Compare(args):
results = []
num_iters = 1000
args.gpu = True
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
for batch_size in [64, 128, 256]:
for seq_length in [20, 100]:
for hidden_dim in [40, 100, 400, 800]:
args.batch_size = batch_size
args.seq_length = seq_length
args.hidden_dim = hidden_dim
args.data_size = batch_size * seq_length * num_iters
args.iters_to_report = num_iters // 3
args.implementation = 'own'
t_own = lstm_benchmark.Benchmark(args)
workspace.ResetWorkspace()
args.implementation = 'cudnn'
t_cudnn = lstm_benchmark.Benchmark(args)
workspace.ResetWorkspace()
results.append((copy(args), float(t_own), float(t_cudnn)))
print(args)
print("t_cudnn / t_own: {}".format(t_cudnn / t_own))
for args, t_own, t_cudnn in results:
print("{}: cudnn time: {}, own time: {}, ratio: {}".format(
str(args), t_cudnn, t_own, t_cudnn / t_own))
ratio_sum = 0
for args, t_own, t_cudnn in results:
ratio = float(t_cudnn) / t_own
ratio_sum += ratio
print("hidden_dim: {}, seq_lengths: {}, batch_size: {}, num_layers: {}:"
" cudnn time: {}, own time: {}, ratio: {}".format(
args.hidden_dim, args.seq_length, args.batch_size,
args.num_layers, t_cudnn, t_own, ratio))
print("Ratio average: {}".format(ratio_sum / len(results)))
if __name__ == '__main__':
args = lstm_benchmark.GetArgumentParser().parse_args()
workspace.GlobalInit([
'caffe2',
'--caffe2_log_level=0',
'--caffe2_print_blob_sizes_at_exit=0',
'--caffe2_gpu_memory_tracking=1'])
Compare(args)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import uuid
from caffe2.distributed.python import StoreHandlerTimeoutError
from caffe2.distributed.store_ops_test_util import StoreOpsTests
from caffe2.python import core, workspace, dyndep
from caffe2.python.test_util import TestCase
dyndep.InitOpsLibrary("@/caffe2/caffe2/distributed:redis_store_handler_ops")
dyndep.InitOpsLibrary("@/caffe2/caffe2/distributed:store_ops")
class TestRedisStoreHandlerOp(TestCase):
def setUp(self):
super(TestRedisStoreHandlerOp, self).setUp()
self.uuid = str(uuid.uuid4()) + "/"
def tearDown(self):
super(TestRedisStoreHandlerOp, self).tearDown()
def create_store_handler(self):
store_handler = "store_handler"
workspace.RunOperatorOnce(
core.CreateOperator(
"RedisStoreHandlerCreate",
[],
[store_handler],
prefix=self.uuid,
host=os.getenv("REDIS_HOST", "localhost"),
port=int(os.getenv("REDIS_PORT", 6379))))
return store_handler
def test_set_get(self):
StoreOpsTests.test_set_get(self.create_store_handler)
def test_get_timeout(self):
with self.assertRaises(StoreHandlerTimeoutError):
StoreOpsTests.test_get_timeout(self.create_store_handler)
|
## @package store_ops_test_util
# Module caffe2.distributed.store_ops_test_util
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from multiprocessing import Process, Queue
import numpy as np
from caffe2.python import core, workspace
class StoreOpsTests(object):
@classmethod
def _test_set_get(cls, queue, create_store_handler_fn, index, num_procs):
store_handler = create_store_handler_fn()
blob = "blob"
value = np.full(1, 1, np.float32)
# Use last process to set blob to make sure other processes
# are waiting for the blob before it is set.
if index == (num_procs - 1):
workspace.FeedBlob(blob, value)
workspace.RunOperatorOnce(
core.CreateOperator(
"StoreSet",
[store_handler, blob],
[],
blob_name=blob))
output_blob = "output_blob"
workspace.RunOperatorOnce(
core.CreateOperator(
"StoreGet",
[store_handler],
[output_blob],
blob_name=blob))
try:
np.testing.assert_array_equal(workspace.FetchBlob(output_blob), 1)
except AssertionError as err:
queue.put(err)
workspace.ResetWorkspace()
@classmethod
def test_set_get(cls, create_store_handler_fn):
# Queue for assertion errors on subprocesses
queue = Queue()
# Start N processes in the background
num_procs = 4
procs = []
for index in range(num_procs):
proc = Process(
target=cls._test_set_get,
args=(queue, create_store_handler_fn, index, num_procs, ))
proc.start()
procs.append(proc)
# Test complete, join background processes
for proc in procs:
proc.join()
# Raise first error we find, if any
if not queue.empty():
raise queue.get()
@classmethod
def test_get_timeout(cls, create_store_handler_fn):
store_handler = create_store_handler_fn()
net = core.Net('get_missing_blob')
net.StoreGet([store_handler], 1, blob_name='blob')
workspace.RunNetOnce(net)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import errno
import os
import tempfile
import shutil
from caffe2.distributed.python import StoreHandlerTimeoutError
from caffe2.distributed.store_ops_test_util import StoreOpsTests
from caffe2.python import core, workspace, dyndep
from caffe2.python.test_util import TestCase
dyndep.InitOpsLibrary("@/caffe2/caffe2/distributed:file_store_handler_ops")
dyndep.InitOpsLibrary("@/caffe2/caffe2/distributed:store_ops")
class TestFileStoreHandlerOp(TestCase):
testCounter = 0
def setUp(self):
super(TestFileStoreHandlerOp, self).setUp()
self.tmpdir = tempfile.mkdtemp()
# Use counter to tell test cases apart
TestFileStoreHandlerOp.testCounter += 1
def tearDown(self):
shutil.rmtree(self.tmpdir)
super(TestFileStoreHandlerOp, self).tearDown()
def create_store_handler(self):
# Use new path for every test so they are isolated
path = self.tmpdir + "/" + str(TestFileStoreHandlerOp.testCounter)
# Ensure path exists (including counter)
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
store_handler = "store_handler"
workspace.RunOperatorOnce(
core.CreateOperator(
"FileStoreHandlerCreate",
[],
[store_handler],
path=path))
return store_handler
def test_set_get(self):
StoreOpsTests.test_set_get(self.create_store_handler)
def test_get_timeout(self):
with self.assertRaises(StoreHandlerTimeoutError):
StoreOpsTests.test_get_timeout(self.create_store_handler)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import sys
sizeof = {'float': 4, 'float16': 2, 'uint8_t': 1}
def unroll(uf, IndexType, InType, OutType, use_weights, isa, fused):
def compute(regid, InType, use_weights, isa, prefetch):
code = []
if InType == "float":
code.append(
"vop%d = _mm256_fmadd_ps(vwgt, \
_mm256_loadu_ps(ip + (%d)), vop%d);"
% (regid, regid, regid)
)
elif InType == "float16":
code.append(
"vop%d = _mm256_fmadd_ps(vwgt, \
_mm256_cvtph_ps(_mm_loadu_si128(reinterpret_cast<const __m128i*>(ip + (%d)))), \
vop%d);"
% (regid, regid, regid)
)
elif InType == "uint8_t":
code.append(
"vop%d = _mm256_fmadd_ps(vwgt, \
_mm256_cvtepi32_ps(_mm256_cvtepu8_epi32(_mm_loadl_epi64(reinterpret_cast<const __m128i*>(ip + (%d))))), \
_mm256_add_ps(vop%d, vbio));"
% (regid, regid, regid)
)
else:
assert False
if prefetch:
code.append("_mm_prefetch((&ip_next_T0[%d]), _MM_HINT_T0);" % (regid))
else:
code.append("// skip unnecessary prefetch of (&ip_next_T0[%d])" % (regid))
return code
code = []
code.append("// unrolling " + str(uf) + " times")
code.append(IndexType + " dataInd = 0;")
code.append("for (" + IndexType +
" rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) {")
code.append(OutType + " *op = &out[rangeIndex * block_size];")
for i in range(0, uf):
j = 8 * i
code.append("__m256 vop" + str(j) + " = _mm256_setzero_ps();")
# inner loop
code.append("for (" + IndexType +
" start = dataInd; dataInd < start + lengths[rangeIndex]; ++dataInd) {")
code.append("const " + IndexType + " idx = indices[dataInd];")
code.append(
'CAFFE_ENFORCE(idx >=0 && idx < data_size, "Index ", dataInd, "'
' is out of bounds: ", idx, ", range 0 to ", data_size);')
if InType == "uint8_t":
code.append(OutType + " wgt = 1.f;")
code.append(OutType + " bio;")
code.append("if (weights) {")
code.append(
"wgt = weights[IS_WEIGHT_POSITIONAL ? (dataInd - start) : dataInd];")
code.append("}")
if fused:
code.append(
'const float* scale_bias = reinterpret_cast<'
'const float*>(&input[idx * fused_block_size + block_size]);'
)
code.append("bio = wgt * scale_bias[1];")
code.append("wgt = wgt * scale_bias[0];")
else:
code.append("bio = wgt * scale_bias[2 * idx + 1];")
code.append("wgt = wgt * scale_bias[2 * idx];")
code.append("__m256 vbio = _mm256_set1_ps(bio);")
else:
code.append(OutType + " wgt = 1.f;")
code.append("if (weights) {")
code.append(
"wgt = weights[IS_WEIGHT_POSITIONAL ? (dataInd - start) : dataInd];")
code.append("}")
code.append("__m256 vwgt = _mm256_set1_ps(wgt);")
code.append("const {} *ip = &input[idx * fused_block_size];".format(InType))
code.append(
'const {} next_T0 = (dataInd < index_size - prefdist_T0)'
' ? (dataInd + prefdist_T0) : dataInd;'.format(IndexType)
)
code.append("const " + IndexType + " idx_pref_T0 = indices[next_T0];")
code.append(
"CAFFE_ENFORCE(idx_pref_T0 >= 0 && idx_pref_T0 < data_size);")
code.append(
'const {} *ip_next_T0 = &input[idx_pref_T0'
' * fused_block_size];'.format(InType)
)
for i in range(0, uf):
j = 8 * i
cachelinesize = 64
byteoffset = sizeof[InType] * j
prefetch = (byteoffset % cachelinesize) == 0
code.extend(compute(j, InType, use_weights, isa, prefetch))
code.append("}")
code.append("if (normalize_by_lengths == false) {")
for i in range(0, uf):
j = 8 * i
code.append(
"_mm256_storeu_ps(&op[" + str(j) + "], vop" + str(j) + ");")
code.append("} else if (lengths[rangeIndex]) {")
# inv of length
code.append(
"__m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]);")
for i in range(0, uf):
j = 8 * i
code.append(
"_mm256_storeu_ps(&op[" + str(j) + "], _mm256_mul_ps(" + "vop" + str(j) + ", vlen_inv));")
code.append("}")
code.append("}")
return code
def generic(IndexType, InType, OutType, use_weights, isa, fused):
def compute(InType, use_weights, isa):
code = []
if InType == "float":
code.append(
"_mm256_storeu_ps(&op[j], \
_mm256_fmadd_ps(vwgt,_mm256_loadu_ps(&ip[j]), _mm256_loadu_ps(&op[j])) \
);"
)
elif InType == "float16":
code.append(
"_mm256_storeu_ps(&op[j], \
_mm256_fmadd_ps(vwgt, \
_mm256_cvtph_ps(_mm_loadu_si128(reinterpret_cast<const __m128i*>(&ip[j]))), _mm256_loadu_ps(&op[j])) \
);"
)
elif InType == "uint8_t":
code.append(
"_mm256_storeu_ps(&op[j], \
_mm256_fmadd_ps(vwgt, \
_mm256_cvtepi32_ps(_mm256_cvtepu8_epi32(_mm_loadl_epi64(reinterpret_cast<const __m128i*>(&ip[j])))), \
_mm256_add_ps(_mm256_loadu_ps(&op[j]), vbio) ) \
);"
)
else:
assert False
code.append("_mm_prefetch((&ip_next_T0[j]), _MM_HINT_T0);")
return code
code = []
code.append(IndexType + " dataInd = 0;")
code.append("for (" + IndexType +
" rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) {")
code.append(OutType + " *op = &out[rangeIndex * block_size];")
# initialize to 0
code.append("TIndex j = 0;")
code.append("for(; j + 8 <= block_size; j += 8) {")
code.append("_mm256_storeu_ps(op + j, _mm256_setzero_ps());")
code.append("}")
code.append("for(; j < block_size; j++) {")
code.append("op[j] = 0.0f;")
code.append("}")
# inner loop
code.append("for (" + IndexType +
" start = dataInd; dataInd < start + lengths[rangeIndex]; ++dataInd) {")
code.append("const " + IndexType + " idx = indices[dataInd];")
code.append(
'CAFFE_ENFORCE(idx >=0 && idx < data_size, "Index ", dataInd, "' +
' is out of bounds: ", idx, ", range 0 to ", data_size);')
if InType == "uint8_t":
code.append(OutType + " wgt = 1.f;")
code.append(OutType + " bio;")
code.append("if (weights) {")
code.append(
"wgt = weights[IS_WEIGHT_POSITIONAL ? (dataInd - start) : dataInd];")
code.append("}")
if fused:
code.append(
'const float* scale_bias = reinterpret_cast<'
'const float*>(&input[idx * fused_block_size + block_size]);'
)
code.append("bio = wgt * scale_bias[1];")
code.append("wgt = wgt * scale_bias[0];")
else:
code.append("assert (scale_bias);")
code.append("bio = wgt * scale_bias[2 * idx + 1];")
code.append("wgt = wgt * scale_bias[2 * idx];")
code.append("__m256 vbio = _mm256_set1_ps(bio);")
else:
code.append(OutType + " wgt = 1.f;")
code.append("if (weights) {")
code.append(
"wgt = weights[IS_WEIGHT_POSITIONAL ? (dataInd - start) : dataInd];")
code.append("}")
code.append("__m256 vwgt = _mm256_set1_ps(wgt);")
code.append("const {} *ip = &input[idx * fused_block_size];".format(InType))
code.append(
'const {} next_T0 = (dataInd < index_size - prefdist_T0)'
' ? (dataInd + prefdist_T0) : dataInd;'.format(IndexType)
)
code.append("const " + IndexType + " idx_pref_T0 = indices[next_T0];")
code.append(
"CAFFE_ENFORCE(idx_pref_T0 >= 0 && idx_pref_T0 < data_size);")
code.append(
"const {} *ip_next_T0 = &input[idx_pref_T0 * fused_block_size];".
format(InType)
)
# compute and store main loop
code.append("j = 0;")
code.append("for(; j + 8 <= block_size; j += 8) {")
code.extend(compute(InType, use_weights, isa))
code.append("}")
# leftover
if InType == "float16":
code.append("float16 vtmp1[8] CAFFE2_ALIGNED(64);")
code.append("for(; j < block_size; j++) {")
if InType == "float":
code.append("op[j] += wgt * ip[j];")
elif InType == "float16":
code.append("vtmp1[0] = ip[j];")
code.append("__m256 vtmp2 = _mm256_cvtph_ps(*((__m128i*)vtmp1));")
code.append("op[j] += wgt * ((float*)(&vtmp2))[0];")
elif InType == "uint8_t":
code.append("op[j] += wgt * ((float)ip[j]) + bio;")
else:
assert False
code.append("}")
code.append("}")
code.append("if (normalize_by_lengths && lengths[rangeIndex]) {")
code.append("float len_inv = 1.0f / lengths[rangeIndex];")
code.append("__m256 vlen_inv = _mm256_set1_ps(len_inv);")
code.append("j = 0;")
code.append("for(; j + 8 <= block_size; j += 8) {")
code.append(
"_mm256_storeu_ps(&op[j], _mm256_mul_ps(_mm256_loadu_ps(&op[j]), vlen_inv));")
code.append("}")
code.append("for(; j < block_size; j++) {")
code.append("op[j] = len_inv * op[j];")
code.append("}")
code.append("}")
code.append("}")
return code
# start main code
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--filename', help="file name")
parser.add_argument('--fused', action='store_true')
opts = parser.parse_args()
if opts.filename:
filename = opts.filename
elif opts.fused:
filename = "embedding_lookup_fused_8bit_rowwise_avx2.cc"
else:
filename = "embedding_lookup_avx2.cc"
fout = open(filename, 'w')
options = [["int32_t", "float", "float"],
["int64_t", "float", "float"],
["int32_t", "float16", "float"],
["int64_t", "float16", "float"],
["int32_t", "uint8_t", "float"],
["int64_t", "uint8_t", "float"]]
code = []
# includes
code.append("//// --------------------------")
code.append("//// ATTENTION:")
code.append("//// THIS CODE IS AUTOGENERATED")
code.append("//// BY {}".format(sys.argv[0]))
code.append("//// DO NOT MODIFY!!!")
code.append("//// --------------------------\n\n")
code.append("#include <caffe2/core/types.h>")
code.append("#include <caffe2/core/common.h>")
code.append("#include <immintrin.h>")
code.append("\n")
code.append("namespace caffe2 {\n")
for o in options:
[IndexType, InType, OutType] = o
prefix = 'Fused8BitRowwise' if opts.fused else ''
code.append('template <bool IS_WEIGHT_POSITIONAL>')
fn_base = '{}EmbeddingLookup_{}_{}_{}'.format(
prefix, IndexType, InType, OutType
)
suffix = '__avx2_fma'
fn = "static void " + fn_base + suffix
code.append(fn + "(")
args = []
args.append("const TIndex block_size,")
args.append("const TIndex output_size,")
args.append("const TIndex index_size,")
args.append("const TIndex data_size,")
args.append("const " + InType + "* input,")
args.append("const " + IndexType + "* indices,")
args.append("const int* lengths,")
args.append("const float* weights,")
if not opts.fused:
args.append("const float* scale_bias,")
args.append("bool normalize_by_lengths,")
args.append(OutType + "* out)")
code += args
code.append("{")
code.append("const " + IndexType + " prefdist_T0 = 16;")
# block_size is the number of elements and fused_block_size is the size of
# an entire row, including scale and bias.
offset = (8 // sizeof[InType]) if opts.fused else 0
code.append(
"const {} fused_block_size = block_size + {};".
format(IndexType, offset)
)
#code.append("printf(\"calling " + fn + "\\n\");");
if not opts.fused:
if InType != "uint8_t":
code.append(
'CAFFE_ENFORCE(scale_bias == nullptr,'
' "scale_bias must be nullptr");'
)
else:
code.append(
'CAFFE_ENFORCE(scale_bias != nullptr,'
' "scale_bias must not be nullptr");'
)
code.append("if (block_size == 128) {")
code += unroll(16, IndexType, InType, OutType, True, "AVX2", opts.fused)
code.append("} else if (block_size == 64) {")
code += unroll(8, IndexType, InType, OutType, True, "AVX2", opts.fused)
code.append("} else if (block_size == 32) {")
code += unroll(4, IndexType, InType, OutType, True, "AVX2", opts.fused)
code.append("} else if (block_size == 16) {")
code += unroll(2, IndexType, InType, OutType, True, "AVX2", opts.fused)
code.append("} else {")
code.append("// generic code")
code += generic(IndexType, InType, OutType, True, "AVX2", opts.fused)
code.append("}")
code.append("}")
for is_weight_positional in ['false', 'true']:
code.append(
"void " + fn_base + "_" + is_weight_positional + suffix + "(")
code += args
code.append("{")
code.append(fn_base + suffix + "<" + is_weight_positional + ">(")
code.append("block_size,")
code.append("output_size,")
code.append("index_size,")
code.append("data_size,")
code.append("input,")
code.append("indices,")
code.append("lengths,")
code.append("weights,")
if not opts.fused:
code.append("scale_bias,")
code.append("normalize_by_lengths,")
code.append("out);")
code.append("}")
code.append("\n")
code.append("} // namespace caffe2")
for c in code:
#print(c, file = fout)
fout.write(c + "\n")
fout.close()
print("Created " + filename)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from scipy.sparse import coo_matrix
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
class TestFunHash(hu.HypothesisTestCase):
@given(n_out=st.integers(min_value=5, max_value=20),
n_in=st.integers(min_value=10, max_value=20),
n_data=st.integers(min_value=2, max_value=8),
n_weight=st.integers(min_value=8, max_value=15),
n_alpha=st.integers(min_value=3, max_value=8),
sparsity=st.floats(min_value=0.1, max_value=1.0),
**hu.gcs)
def test_funhash(self, n_out, n_in, n_data, n_weight, n_alpha, sparsity,
gc, dc):
A = np.random.rand(n_data, n_in)
A[A > sparsity] = 0
A_coo = coo_matrix(A)
val, key, seg = A_coo.data, A_coo.col, A_coo.row
weight = np.random.rand(n_weight).astype(np.float32)
alpha = np.random.rand(n_alpha).astype(np.float32)
val = val.astype(np.float32)
key = key.astype(np.int64)
seg = seg.astype(np.int32)
op = core.CreateOperator(
'SparseFunHash',
['val', 'key', 'seg', 'weight', 'alpha'],
['out'],
num_outputs=n_out)
# Gradient check wrt weight
self.assertGradientChecks(
gc, op, [val, key, seg, weight, alpha], 3, [0])
# Gradient check wrt alpha
self.assertGradientChecks(
gc, op, [val, key, seg, weight, alpha], 4, [0])
op2 = core.CreateOperator(
'SparseFunHash',
['val', 'key', 'seg', 'weight'],
['out'],
num_outputs=n_out)
# Gradient check wrt weight
self.assertGradientChecks(
gc, op2, [val, key, seg, weight], 3, [0])
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
class TestTTPad(hu.HypothesisTestCase):
@given(K=st.integers(min_value=2, max_value=10),
M=st.integers(min_value=10, max_value=20),
N=st.integers(min_value=10, max_value=20),
**hu.gcs)
def test_tt_pad(self, K, M, N, gc, dc):
op = core.CreateOperator(
'TTPad',
['A'],
['A', 'dim0'],
scale=(K))
A = np.random.rand(M, N).astype(np.float32)
workspace.FeedBlob('A', A)
workspace.RunOperatorOnce(op)
def tt_pad_ref(A_):
M_ = A_.shape[0]
if M_ % K == 0:
new_dim0 = M_
else:
new_dim0 = (M_ // K + 1) * K
return (np.vstack((A_, np.zeros((new_dim0 - M_, A_.shape[1])))),
np.array([A.shape[0]]))
# Check against numpy reference
self.assertReferenceChecks(gc, op, [A], tt_pad_ref)
# Check over multiple devices
self.assertDeviceChecks(dc, op, [A], [0])
# Gradient check wrt A
self.assertGradientChecks(gc, op, [A], 0, [0])
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from scipy.sparse import coo_matrix
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
class TestFunHash(hu.HypothesisTestCase):
@given(n_out=st.integers(min_value=5, max_value=20),
n_in=st.integers(min_value=10, max_value=20),
n_data=st.integers(min_value=2, max_value=8),
n_weight=st.integers(min_value=8, max_value=15),
n_alpha=st.integers(min_value=3, max_value=8),
sparsity=st.floats(min_value=0.1, max_value=1.0),
**hu.gcs)
def test_funhash(self, n_out, n_in, n_data, n_weight, n_alpha, sparsity,
gc, dc):
A = np.random.rand(n_data, n_in)
A[A > sparsity] = 0
A_coo = coo_matrix(A)
val, key, seg = A_coo.data, A_coo.col, A_coo.row
weight = np.random.rand(n_weight).astype(np.float32)
alpha = np.random.rand(n_alpha).astype(np.float32)
val = val.astype(np.float32)
key = key.astype(np.int64)
seg = seg.astype(np.int32)
op = core.CreateOperator(
'FunHash',
['val', 'key', 'seg', 'weight', 'alpha'],
['out'],
num_outputs=n_out)
# Check over multiple devices
self.assertDeviceChecks(
dc, op, [val, key, seg, weight, alpha], [0])
# Gradient check wrt weight
self.assertGradientChecks(
gc, op, [val, key, seg, weight, alpha], 3, [0])
# Gradient check wrt alpha
self.assertGradientChecks(
gc, op, [val, key, seg, weight, alpha], 4, [0])
op2 = core.CreateOperator(
'FunHash',
['val', 'key', 'seg', 'weight'],
['out'],
num_outputs=n_out)
# Check over multiple devices
self.assertDeviceChecks(
dc, op2, [val, key, seg, weight], [0])
# Gradient check wrt weight
self.assertGradientChecks(
gc, op2, [val, key, seg, weight], 3, [0])
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from scipy.sparse import coo_matrix
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase
def test_reshape(old_shape, new_shape, stride_only=False):
blob_in0 = 'col'
blob_out0 = 'col_out'
blob_in1 = 'row'
blob_out1 = 'row_out'
old_shape_for_op = (-1, old_shape[1]) if stride_only else old_shape
op = core.CreateOperator('SparseMatrixReshape',
[blob_in0, blob_in1],
[blob_out0, blob_out1],
old_shape=old_shape_for_op,
new_shape=new_shape)
A = np.random.random_sample(old_shape)
A[np.random.random_sample(old_shape) > .5] = 0
A_coo = coo_matrix(A)
old_row, old_col = A_coo.row, A_coo.col
workspace.FeedBlob(blob_in0, old_col.astype(np.int64))
workspace.FeedBlob(blob_in1, old_row.astype(np.int32))
workspace.RunOperatorOnce(op)
A_new_coo = coo_matrix(A.reshape(new_shape))
new_row, new_col = A_new_coo.row, A_new_coo.col
col_out = workspace.FetchBlob(blob_out0)
row_out = workspace.FetchBlob(blob_out1)
np.testing.assert_array_equal(col_out, new_col)
np.testing.assert_array_equal(row_out, new_row)
class TestSparseMatrixReshapeOp(TestCase):
def test_basic_reshape(self):
test_reshape(old_shape=(3, 4), new_shape=(4, 3))
def test_missing_dim(self):
test_reshape(old_shape=(2, 8), new_shape=(-1, 4))
def test_stride_only(self):
test_reshape(old_shape=(2, 8), new_shape=(-1, 4), stride_only=True)
def test_sparse_reshape_mm(self):
M, N, K = 300, 400, 500
A = np.random.rand(M, K).astype(np.float32)
A_sparse = A * (np.random.rand(*A.shape) > .5)
A_sparse = A_sparse.reshape((K, M))
A_coo = coo_matrix(A_sparse)
idx0, idx1, a = A_coo.row, A_coo.col, A_coo.data
B = np.random.rand(K, N).astype(np.float32)
workspace.FeedBlob('col', idx1.astype(np.int64))
workspace.FeedBlob('row', idx0.astype(np.int32))
workspace.FeedBlob('B', B)
workspace.FeedBlob('a', a)
reshape_op = core.CreateOperator(
'SparseMatrixReshape',
['col', 'row'],
['new_col', 'new_row'],
old_shape=(K, M),
new_shape=(M, K))
mm_op = core.CreateOperator(
'SparseUnsortedSegmentWeightedSum',
['B', 'a', 'new_col', 'new_row'],
['Y'])
workspace.RunOperatorOnce(reshape_op)
workspace.RunOperatorOnce(mm_op)
Y = workspace.FetchBlob('Y')
np.testing.assert_allclose(A_sparse.reshape(M, K).dot(B), Y,
rtol=1e-4)
|
## @package convnet_benchmarks
# Module caffe2.experiments.python.convnet_benchmarks
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
"""
Benchmark for common convnets.
(NOTE: Numbers below prior with missing parameter=update step, TODO to update)
Speed on Titan X, with 10 warmup steps and 10 main steps and with different
versions of cudnn, are as follows (time reported below is per-batch time,
forward / forward+backward):
CuDNN V3 CuDNN v4
AlexNet 32.5 / 108.0 27.4 / 90.1
OverFeat 113.0 / 342.3 91.7 / 276.5
Inception 134.5 / 485.8 125.7 / 450.6
VGG (batch 64) 200.8 / 650.0 164.1 / 551.7
Speed on Inception with varied batch sizes and CuDNN v4 is as follows:
Batch Size Speed per batch Speed per image
16 22.8 / 72.7 1.43 / 4.54
32 38.0 / 127.5 1.19 / 3.98
64 67.2 / 233.6 1.05 / 3.65
128 125.7 / 450.6 0.98 / 3.52
Speed on Tesla M40, which 10 warmup steps and 10 main steps and with cudnn
v4, is as follows:
AlexNet 68.4 / 218.1
OverFeat 210.5 / 630.3
Inception 300.2 / 1122.2
VGG (batch 64) 405.8 / 1327.7
(Note that these numbers involve a "full" backprop, i.e. the gradient
with respect to the input image is also computed.)
To get the numbers, simply run:
for MODEL in AlexNet OverFeat Inception; do
PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \
--batch_size 128 --model $MODEL --forward_only True
done
for MODEL in AlexNet OverFeat Inception; do
PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \
--batch_size 128 --model $MODEL
done
PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \
--batch_size 64 --model VGGA --forward_only True
PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \
--batch_size 64 --model VGGA
for BS in 16 32 64 128; do
PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \
--batch_size $BS --model Inception --forward_only True
PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \
--batch_size $BS --model Inception
done
Note that VGG needs to be run at batch 64 due to memory limit on the backward
pass.
"""
import argparse
import time
from caffe2.python import cnn, workspace, core
import caffe2.python.SparseTransformer as SparseTransformer
def MLP(order):
model = cnn.CNNModelHelper()
d = 256
depth = 20
width = 3
for i in range(depth):
for j in range(width):
current = "fc_{}_{}".format(i, j) if i > 0 else "data"
next_ = "fc_{}_{}".format(i + 1, j)
model.FC(
current, next_,
dim_in=d, dim_out=d,
weight_init=model.XavierInit,
bias_init=model.XavierInit)
model.Sum(["fc_{}_{}".format(depth, j)
for j in range(width)], ["sum"])
model.FC("sum", "last",
dim_in=d, dim_out=1000,
weight_init=model.XavierInit,
bias_init=model.XavierInit)
xent = model.LabelCrossEntropy(["last", "label"], "xent")
model.AveragedLoss(xent, "loss")
return model, d
def AlexNet(order):
model = cnn.CNNModelHelper(order, name="alexnet",
use_cudnn=True, cudnn_exhaustive_search=True)
conv1 = model.Conv(
"data",
"conv1",
3,
64,
11,
('XavierFill', {}),
('ConstantFill', {}),
stride=4,
pad=2
)
relu1 = model.Relu(conv1, "conv1")
pool1 = model.MaxPool(relu1, "pool1", kernel=3, stride=2)
conv2 = model.Conv(
pool1,
"conv2",
64,
192,
5,
('XavierFill', {}),
('ConstantFill', {}),
pad=2
)
relu2 = model.Relu(conv2, "conv2")
pool2 = model.MaxPool(relu2, "pool2", kernel=3, stride=2)
conv3 = model.Conv(
pool2,
"conv3",
192,
384,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu3 = model.Relu(conv3, "conv3")
conv4 = model.Conv(
relu3,
"conv4",
384,
256,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu4 = model.Relu(conv4, "conv4")
conv5 = model.Conv(
relu4,
"conv5",
256,
256,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu5 = model.Relu(conv5, "conv5")
pool5 = model.MaxPool(relu5, "pool5", kernel=3, stride=2)
fc6 = model.FC(
pool5, "fc6", 256 * 6 * 6, 4096, ('XavierFill', {}),
('ConstantFill', {})
)
relu6 = model.Relu(fc6, "fc6")
fc7 = model.FC(
relu6, "fc7", 4096, 4096, ('XavierFill', {}), ('ConstantFill', {})
)
relu7 = model.Relu(fc7, "fc7")
fc8 = model.FC(
relu7, "fc8", 4096, 1000, ('XavierFill', {}), ('ConstantFill', {})
)
pred = model.Softmax(fc8, "pred")
xent = model.LabelCrossEntropy([pred, "label"], "xent")
model.AveragedLoss(xent, "loss")
return model, 224
def OverFeat(order):
model = cnn.CNNModelHelper(order, name="overfeat",
use_cudnn=True, cudnn_exhaustive_search=True)
conv1 = model.Conv(
"data",
"conv1",
3,
96,
11,
('XavierFill', {}),
('ConstantFill', {}),
stride=4
)
relu1 = model.Relu(conv1, "conv1")
pool1 = model.MaxPool(relu1, "pool1", kernel=2, stride=2)
conv2 = model.Conv(
pool1, "conv2", 96, 256, 5, ('XavierFill', {}), ('ConstantFill', {})
)
relu2 = model.Relu(conv2, "conv2")
pool2 = model.MaxPool(relu2, "pool2", kernel=2, stride=2)
conv3 = model.Conv(
pool2,
"conv3",
256,
512,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu3 = model.Relu(conv3, "conv3")
conv4 = model.Conv(
relu3,
"conv4",
512,
1024,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu4 = model.Relu(conv4, "conv4")
conv5 = model.Conv(
relu4,
"conv5",
1024,
1024,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu5 = model.Relu(conv5, "conv5")
pool5 = model.MaxPool(relu5, "pool5", kernel=2, stride=2)
fc6 = model.FC(
pool5, "fc6", 1024 * 6 * 6, 3072, ('XavierFill', {}),
('ConstantFill', {})
)
relu6 = model.Relu(fc6, "fc6")
fc7 = model.FC(
relu6, "fc7", 3072, 4096, ('XavierFill', {}), ('ConstantFill', {})
)
relu7 = model.Relu(fc7, "fc7")
fc8 = model.FC(
relu7, "fc8", 4096, 1000, ('XavierFill', {}), ('ConstantFill', {})
)
pred = model.Softmax(fc8, "pred")
xent = model.LabelCrossEntropy([pred, "label"], "xent")
model.AveragedLoss(xent, "loss")
return model, 231
def VGGA(order):
model = cnn.CNNModelHelper(order, name='vgg-a',
use_cudnn=True, cudnn_exhaustive_search=True)
conv1 = model.Conv(
"data",
"conv1",
3,
64,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu1 = model.Relu(conv1, "conv1")
pool1 = model.MaxPool(relu1, "pool1", kernel=2, stride=2)
conv2 = model.Conv(
pool1,
"conv2",
64,
128,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu2 = model.Relu(conv2, "conv2")
pool2 = model.MaxPool(relu2, "pool2", kernel=2, stride=2)
conv3 = model.Conv(
pool2,
"conv3",
128,
256,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu3 = model.Relu(conv3, "conv3")
conv4 = model.Conv(
relu3,
"conv4",
256,
256,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu4 = model.Relu(conv4, "conv4")
pool4 = model.MaxPool(relu4, "pool4", kernel=2, stride=2)
conv5 = model.Conv(
pool4,
"conv5",
256,
512,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu5 = model.Relu(conv5, "conv5")
conv6 = model.Conv(
relu5,
"conv6",
512,
512,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu6 = model.Relu(conv6, "conv6")
pool6 = model.MaxPool(relu6, "pool6", kernel=2, stride=2)
conv7 = model.Conv(
pool6,
"conv7",
512,
512,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu7 = model.Relu(conv7, "conv7")
conv8 = model.Conv(
relu7,
"conv8",
512,
512,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu8 = model.Relu(conv8, "conv8")
pool8 = model.MaxPool(relu8, "pool8", kernel=2, stride=2)
fcix = model.FC(
pool8, "fcix", 512 * 7 * 7, 4096, ('XavierFill', {}),
('ConstantFill', {})
)
reluix = model.Relu(fcix, "fcix")
fcx = model.FC(
reluix, "fcx", 4096, 4096, ('XavierFill', {}), ('ConstantFill', {})
)
relux = model.Relu(fcx, "fcx")
fcxi = model.FC(
relux, "fcxi", 4096, 1000, ('XavierFill', {}), ('ConstantFill', {})
)
pred = model.Softmax(fcxi, "pred")
xent = model.LabelCrossEntropy([pred, "label"], "xent")
model.AveragedLoss(xent, "loss")
return model, 231
def net_DAG_Builder(model):
print("====================================================")
print(" Start Building DAG ")
print("====================================================")
net_root = SparseTransformer.netbuilder(model)
return net_root
def _InceptionModule(
model, input_blob, input_depth, output_name, conv1_depth, conv3_depths,
conv5_depths, pool_depth
):
# path 1: 1x1 conv
conv1 = model.Conv(
input_blob, output_name + ":conv1", input_depth, conv1_depth, 1,
('XavierFill', {}), ('ConstantFill', {})
)
conv1 = model.Relu(conv1, conv1)
# path 2: 1x1 conv + 3x3 conv
conv3_reduce = model.Conv(
input_blob, output_name +
":conv3_reduce", input_depth, conv3_depths[0],
1, ('XavierFill', {}), ('ConstantFill', {})
)
conv3_reduce = model.Relu(conv3_reduce, conv3_reduce)
conv3 = model.Conv(
conv3_reduce,
output_name + ":conv3",
conv3_depths[0],
conv3_depths[1],
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
conv3 = model.Relu(conv3, conv3)
# path 3: 1x1 conv + 5x5 conv
conv5_reduce = model.Conv(
input_blob, output_name +
":conv5_reduce", input_depth, conv5_depths[0],
1, ('XavierFill', {}), ('ConstantFill', {})
)
conv5_reduce = model.Relu(conv5_reduce, conv5_reduce)
conv5 = model.Conv(
conv5_reduce,
output_name + ":conv5",
conv5_depths[0],
conv5_depths[1],
5,
('XavierFill', {}),
('ConstantFill', {}),
pad=2
)
conv5 = model.Relu(conv5, conv5)
# path 4: pool + 1x1 conv
pool = model.MaxPool(
input_blob,
output_name + ":pool",
kernel=3,
stride=1,
pad=1
)
pool_proj = model.Conv(
pool, output_name + ":pool_proj", input_depth, pool_depth, 1,
('XavierFill', {}), ('ConstantFill', {})
)
pool_proj = model.Relu(pool_proj, pool_proj)
output = model.Concat([conv1, conv3, conv5, pool_proj], output_name)
return output
def Inception(order):
model = cnn.CNNModelHelper(order, name="inception",
use_cudnn=True, cudnn_exhaustive_search=True)
conv1 = model.Conv(
"data",
"conv1",
3,
64,
7,
('XavierFill', {}),
('ConstantFill', {}),
stride=2,
pad=3
)
relu1 = model.Relu(conv1, "conv1")
pool1 = model.MaxPool(relu1, "pool1", kernel=3, stride=2, pad=1)
conv2a = model.Conv(
pool1, "conv2a", 64, 64, 1, ('XavierFill', {}), ('ConstantFill', {})
)
conv2a = model.Relu(conv2a, conv2a)
conv2 = model.Conv(
conv2a,
"conv2",
64,
192,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu2 = model.Relu(conv2, "conv2")
pool2 = model.MaxPool(relu2, "pool2", kernel=3, stride=2, pad=1)
# Inception modules
inc3 = _InceptionModule(
model, pool2, 192, "inc3", 64, [96, 128], [16, 32], 32
)
inc4 = _InceptionModule(
model, inc3, 256, "inc4", 128, [128, 192], [32, 96], 64
)
pool5 = model.MaxPool(inc4, "pool5", kernel=3, stride=2, pad=1)
inc5 = _InceptionModule(
model, pool5, 480, "inc5", 192, [96, 208], [16, 48], 64
)
inc6 = _InceptionModule(
model, inc5, 512, "inc6", 160, [112, 224], [24, 64], 64
)
inc7 = _InceptionModule(
model, inc6, 512, "inc7", 128, [128, 256], [24, 64], 64
)
inc8 = _InceptionModule(
model, inc7, 512, "inc8", 112, [144, 288], [32, 64], 64
)
inc9 = _InceptionModule(
model, inc8, 528, "inc9", 256, [160, 320], [32, 128], 128
)
pool9 = model.MaxPool(inc9, "pool9", kernel=3, stride=2, pad=1)
inc10 = _InceptionModule(
model, pool9, 832, "inc10", 256, [160, 320], [32, 128], 128
)
inc11 = _InceptionModule(
model, inc10, 832, "inc11", 384, [192, 384], [48, 128], 128
)
pool11 = model.AveragePool(inc11, "pool11", kernel=7, stride=1)
fc = model.FC(
pool11, "fc", 1024, 1000, ('XavierFill', {}), ('ConstantFill', {})
)
# It seems that Soumith's benchmark does not have softmax on top
# for Inception. We will add it anyway so we can have a proper
# backward pass.
pred = model.Softmax(fc, "pred")
xent = model.LabelCrossEntropy([pred, "label"], "xent")
model.AveragedLoss(xent, "loss")
return model, 224
def AddInput(model, batch_size, db, db_type):
"""Adds the data input part."""
data_uint8, label = model.TensorProtosDBInput(
[], ["data_uint8", "label"], batch_size=batch_size,
db=db, db_type=db_type
)
data = model.Cast(data_uint8, "data_nhwc", to=core.DataType.FLOAT)
data = model.NHWC2NCHW(data, "data")
data = model.Scale(data, data, scale=float(1. / 256))
data = model.StopGradient(data, data)
return data, label
def AddParameterUpdate(model):
""" Simple plain SGD update -- not tuned to actually train the models """
ITER = model.Iter("iter")
LR = model.LearningRate(
ITER, "LR", base_lr=-1e-8, policy="step", stepsize=10000, gamma=0.999)
ONE = model.param_init_net.ConstantFill([], "ONE", shape=[1], value=1.0)
for param in model.params:
param_grad = model.param_to_grad[param]
model.WeightedSum([param, ONE, param_grad, LR], param)
def Benchmark(model_gen, arg):
model, input_size = model_gen(arg.order)
model.Proto().type = arg.net_type
model.Proto().num_workers = arg.num_workers
# In order to be able to run everything without feeding more stuff, let's
# add the data and label blobs to the parameter initialization net as well.
if arg.order == "NCHW":
input_shape = [arg.batch_size, 3, input_size, input_size]
else:
input_shape = [arg.batch_size, input_size, input_size, 3]
if arg.model == "MLP":
input_shape = [arg.batch_size, input_size]
model.param_init_net.GaussianFill(
[],
"data",
shape=input_shape,
mean=0.0,
std=1.0
)
model.param_init_net.UniformIntFill(
[],
"label",
shape=[arg.batch_size, ],
min=0,
max=999
)
if arg.forward_only:
print('{}: running forward only.'.format(arg.model))
else:
print('{}: running forward-backward.'.format(arg.model))
model.AddGradientOperators(["loss"])
AddParameterUpdate(model)
if arg.order == 'NHWC':
print(
'==WARNING==\n'
'NHWC order with CuDNN may not be supported yet, so I might\n'
'exit suddenly.'
)
if not arg.cpu:
model.param_init_net.RunAllOnGPU()
model.net.RunAllOnGPU()
if arg.dump_model:
# Writes out the pbtxt for benchmarks on e.g. Android
with open(
"{0}_init_batch_{1}.pbtxt".format(arg.model, arg.batch_size), "w"
) as fid:
fid.write(str(model.param_init_net.Proto()))
with open("{0}.pbtxt".format(arg.model,
arg.batch_size), "w") as fid:
fid.write(str(model.net.Proto()))
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net)
for i in range(arg.warmup_iterations):
workspace.RunNet(model.net.Proto().name)
plan = core.Plan("plan")
plan.AddStep(core.ExecutionStep("run", model.net, arg.iterations))
start = time.time()
workspace.RunPlan(plan)
print('Spent: {}'.format((time.time() - start) / arg.iterations))
if arg.layer_wise_benchmark:
print('Layer-wise benchmark.')
workspace.BenchmarkNet(model.net.Proto().name, 1, arg.iterations, True)
def GetArgumentParser():
parser = argparse.ArgumentParser(description="Caffe2 benchmark.")
parser.add_argument(
"--batch_size",
type=int,
default=128,
help="The batch size."
)
parser.add_argument("--model", type=str, help="The model to benchmark.")
parser.add_argument(
"--order",
type=str,
default="NCHW",
help="The order to evaluate."
)
parser.add_argument(
"--cudnn_ws",
type=int,
default=-1,
help="The cudnn workspace size."
)
parser.add_argument(
"--iterations",
type=int,
default=10,
help="Number of iterations to run the network."
)
parser.add_argument(
"--warmup_iterations",
type=int,
default=10,
help="Number of warm-up iterations before benchmarking."
)
parser.add_argument(
"--forward_only",
action='store_true',
help="If set, only run the forward pass."
)
parser.add_argument(
"--layer_wise_benchmark",
action='store_true',
help="If True, run the layer-wise benchmark as well."
)
parser.add_argument(
"--cpu",
action='store_true',
help="If True, run testing on CPU instead of GPU."
)
parser.add_argument(
"--dump_model",
action='store_true',
help="If True, dump the model prototxts to disk."
)
parser.add_argument("--net_type", type=str, default="dag")
parser.add_argument("--num_workers", type=int, default=2)
return parser
if __name__ == '__main__':
args = GetArgumentParser().parse_args()
if (
not args.batch_size or not args.model or not args.order or
not args.cudnn_ws
):
GetArgumentParser().print_help()
workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
model_map = {
'AlexNet': AlexNet,
'OverFeat': OverFeat,
'VGGA': VGGA,
'Inception': Inception,
'MLP': MLP,
}
Benchmark(model_map[args.model], args)
|
## @package device_reduce_sum_bench
# Module caffe2.experiments.python.device_reduce_sum_bench
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import itertools
import logging
import os
from six import add_metaclass
import numpy as np
from caffe2.python import workspace, core
from caffe2.python.hypothesis_test_util import runOpBenchmark, gpu_do
logging.basicConfig()
logger = logging.getLogger(os.path.basename(__file__))
logger.setLevel(logging.INFO)
ALL_BENCHMARKS = {}
class BenchmarkMeta(type):
def __new__(metacls, name, bases, class_dict):
cls = type.__new__(metacls, name, bases, class_dict)
if name != 'Benchmark':
ALL_BENCHMARKS[name] = cls
return cls
@add_metaclass(BenchmarkMeta)
class Benchmark(object):
def __init__(self):
self.results = []
def display(self):
print('Results ({}):'.format(type(self).__name__))
print('input size ms/iter')
print('------------------------------ -----------')
for size, ms in self.results:
print('{!s:<30} {:.4f}'.format(size, ms))
class SumElements(Benchmark):
def run(self):
op = core.CreateOperator(
"SumElements",
["X"],
["y"]
)
for n in itertools.imap(pow, itertools.cycle([10]), range(10)):
X = np.random.rand(n).astype(np.float32)
logger.info('Running benchmark for n = {}'.format(n))
ret = runOpBenchmark(gpu_do, op, inputs=[X])
self.results.append((n, ret[1]))
class SumSqrElements(Benchmark):
def run(self):
op = core.CreateOperator(
"SumSqrElements",
["X"],
["y"]
)
for n in itertools.imap(pow, itertools.cycle([10]), range(10)):
X = np.random.rand(n).astype(np.float32)
logger.info('Running benchmark for n = {}'.format(n))
ret = runOpBenchmark(gpu_do, op, inputs=[X])
self.results.append((n, ret[1]))
class SoftMaxWithLoss(Benchmark):
def run(self):
op = core.CreateOperator(
"SoftmaxWithLoss",
["X", "label"],
["probs", "avgloss"],
)
for n in itertools.imap(pow, itertools.cycle([10]), range(8)):
for D in itertools.imap(pow, itertools.cycle([10]), range(3)):
X = np.random.rand(n, D).astype(np.float32)
label = (np.random.rand(n) * D).astype(np.int32)
logger.info('Running benchmark for n = {}, D= {}'.format(n, D))
ret = runOpBenchmark(gpu_do, op, inputs=[X, label])
self.results.append(((n, D), ret[1]))
def parse_args():
parser = argparse.ArgumentParser(os.path.basename(__file__))
parser.add_argument('-b', '--benchmarks', nargs='+',
default=ALL_BENCHMARKS.keys(),
help='benchmarks to run (default: %(default)s))')
return parser.parse_args()
def main():
args = parse_args()
benchmarks = [ALL_BENCHMARKS[name]() for name in args.benchmarks]
for bench in benchmarks:
bench.run()
for bench in benchmarks:
bench.display()
if __name__ == '__main__':
workspace.GlobalInit(['caffe2', '--caffe2_log_level=2'])
main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
class TestTTContraction(hu.HypothesisTestCase):
@given(D=st.integers(min_value=5, max_value=20),
K=st.integers(min_value=5, max_value=20),
M=st.integers(min_value=5, max_value=20),
N=st.integers(min_value=5, max_value=20),
**hu.gcs)
def test_tt_contraction(self, D, K, M, N, gc, dc):
A = np.random.rand(K, M).astype(np.float32)
B = np.random.rand(D, K, N).astype(np.float32)
workspace.FeedBlob('A', A)
workspace.FeedBlob('B', B)
op = core.CreateOperator(
'TTContraction',
['A', 'B'],
['C'],
K=K,
M=M,
N=N)
workspace.RunOperatorOnce(op)
def tt_contraction_ref(A_, B_):
return ((A_[:, :, np.newaxis] * B_[:, :, np.newaxis, :])
.sum(axis=1).flatten()),
# Check against numpy reference
self.assertReferenceChecks(gc, op, [A, B], tt_contraction_ref)
# Check over multiple devices
self.assertDeviceChecks(dc, op, [A, B], [0])
# Gradient check wrt A
self.assertGradientChecks(gc, op, [A, B], 0, [0])
# Gradient check wrt B
self.assertGradientChecks(gc, op, [A, B], 1, [0])
|
## @package SparseTransformer
# Module caffe2.experiments.python.SparseTransformer
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import workspace
import scipy.sparse
class NetDefNode():
def __init__(self, name, optype, p=None, op=None):
self.name = name
self.optype = optype
self.ops = {}
self.prev = {}
self.insertInput(p)
self.visited = False
self.op = op
def insertInput(self, p):
"""
Insert input of this op
also maintain the output of previous op
p: a node or a list of node
"""
if isinstance(p, list):
for i in p:
self.prev[i.name] = i
i.ops[self.name] = self
elif isinstance(p, NetDefNode):
self.prev[p.name] = p
p.ops[self.name] = self
def deleteInput(self, p):
if isinstance(p, NetDefNode):
del self.prev[p.name]
del p.ops[self.name]
def maskNallocate(weight_name):
"""
Combine mask and weights
create wcsr, iw, jw, return their names
"""
w = workspace.FetchBlob(weight_name)
w_csr = scipy.sparse.csr_matrix(w)
wcsr = w_csr.data
iw = w_csr.indptr
jw = w_csr.indices
workspace.FeedBlob(weight_name + "wcsr", wcsr)
workspace.FeedBlob(weight_name + "iw", iw)
workspace.FeedBlob(weight_name + "jw", jw)
return weight_name + "wcsr", weight_name + "iw", weight_name + "jw"
def transFCRelu(cur, id2node, name2id, ops, model):
"""
Add trans before and after this FC_Prune->(Relu)->FC_Prune chain.
"""
# 1. add trans before the start of this chain
# assuming that cur is a FC_Prune, and it has only one input
pre = cur.prev.itervalues().next()
# Create a node /op and insert it.
# TODO(wyiming): check whether it is correct here
current_blob = model.Transpose(cur.op.input[0], cur.op.input[0] + "_trans")
# print model.net.Proto()
trans_op = model.net.Proto().op[-1]
trans_node = NetDefNode(trans_op.output[0], "Transpose", pre, trans_op)
trans_node.visited = True
pre_new = trans_node
# 2. use while loop to visit the chain
while True:
# breakup with the parent
cur.deleteInput(pre)
if not (cur.optype == "FC_Prune" or cur.optype == "Relu"):
print("Reaching the end of the chain")
break
if len(cur.ops) > 1:
print("A FC/Relu giving more than 1 useful outputs")
if cur.optype == "FC_Prune":
op = cur.op
wcsr, iw, jw = maskNallocate(op.input[1])
bias_name = op.input[3]
# TODO(wyiming): create a new Op here
current_blob = model.FC_Sparse(current_blob,
cur.op.output[0] + "_Sparse",
wcsr, iw, jw, bias_name)
sps_op = model.net.Proto().op[-1]
sps_node = NetDefNode(cur.op.output[0] + "_Sparse",
"FC_Sparse",
pre_new, sps_op)
sps_node.visited = True
pre_new = sps_node
if cur.optype == "Relu":
op = cur.op
current_blob = model.Relu(current_blob, current_blob)
rel_op = model.net.Proto().op[-1]
rel_node = NetDefNode(str(current_blob), "Relu",
pre_new, rel_op)
rel_node.visited = True
pre_new = rel_node
cur.visited = True
pre = cur
flag = False
for _, temp in cur.ops.iteritems():
if temp.optype == "Relu" or temp.optype == "FC_Prune":
flag = True
cur = temp
if not flag:
# assume that there is only 1 output that is not PrintOP
cur = cur.ops.itervalues().next()
cur.deleteInput(pre)
print("No FC/RElu children")
print(cur.op.type)
break
# 3. add trans after this chain like 1.
current_blob = model.Transpose(current_blob, pre.op.output[0])
trans_op = model.net.Proto().op[-1]
trans_node = NetDefNode(str(current_blob), "Transpose", pre_new, trans_op)
trans_node.visited = True
cur.insertInput(trans_node)
print(cur.prev)
print(trans_node.ops)
def Prune2Sparse(cur, id2node, name2id, ops, model):
# Assume that FC and Relu takes in only 1 input;
# If not raise warning
if not cur.visited and cur.optype == "FC_Prune":
transFCRelu(cur, id2node, name2id, ops, model)
cur.visited = True
for name, n in cur.ops.iteritems():
Prune2Sparse(n, id2node, name2id, ops, model)
def net2list(net_root):
"""
Use topological order(BFS) to print the op of a net in a list
"""
bfs_queue = []
op_list = []
cur = net_root
for _, n in cur.ops.iteritems():
bfs_queue.append(n)
while bfs_queue:
node = bfs_queue[0]
bfs_queue = bfs_queue[1:]
op_list.append(node.op)
for _, n in node.ops.iteritems():
bfs_queue.append(n)
return op_list
def netbuilder(model):
print("Welcome to model checker")
proto = model.net.Proto()
net_name2id = {}
net_id2node = {}
net_root = NetDefNode("net_root", "root", None)
for op_id, op in enumerate(proto.op):
if op.type == "Print":
continue
op_name = '%s/%s (op#%d)' % (op.name, op.type, op_id) \
if op.name else '%s (op#%d)' % (op.type, op_id)
# print(op_name)
op_node = NetDefNode(op_name, op.type, op=op)
net_id2node[op_id] = op_node
if_has_layer_input = False
for input_name in op.input:
if input_name not in net_name2id:
# assume that un_occured name are non_layers
# TODO: write a non-layer checker and log it
continue
op_node.insertInput(net_id2node[net_name2id[input_name]])
if_has_layer_input = True
if not if_has_layer_input:
op_node.insertInput(net_root)
for output_name in op.output:
net_name2id[output_name] = op_id
return net_root, net_name2id, net_id2node
|
## @package net_construct_bench
# Module caffe2.experiments.python.net_construct_bench
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import logging
import time
from caffe2.python import workspace, data_parallel_model
from caffe2.python import cnn
import caffe2.python.models.resnet as resnet
'''
Simple benchmark that creates a data-parallel resnet-50 model
and measurs the time.
'''
logging.basicConfig()
log = logging.getLogger("net_construct_bench")
log.setLevel(logging.DEBUG)
def AddMomentumParameterUpdate(train_model, LR):
'''
Add the momentum-SGD update.
'''
params = train_model.GetParams()
assert(len(params) > 0)
ONE = train_model.param_init_net.ConstantFill(
[], "ONE", shape=[1], value=1.0,
)
NEGONE = train_model.param_init_net.ConstantFill(
[], 'NEGONE', shape=[1], value=-1.0,
)
for param in params:
param_grad = train_model.param_to_grad[param]
param_momentum = train_model.param_init_net.ConstantFill(
[param], param + '_momentum', value=0.0
)
# Update param_grad and param_momentum in place
train_model.net.MomentumSGD(
[param_grad, param_momentum, LR],
[param_grad, param_momentum],
momentum=0.9,
nesterov=1
)
# Update parameters by applying the moment-adjusted gradient
train_model.WeightedSum(
[param, ONE, param_grad, NEGONE],
param
)
def Create(args):
gpus = list(range(args.num_gpus))
log.info("Running on gpus: {}".format(gpus))
# Create CNNModeLhelper object
train_model = cnn.CNNModelHelper(
order="NCHW",
name="resnet50",
use_cudnn=True,
cudnn_exhaustive_search=False
)
# Model building functions
def create_resnet50_model_ops(model, loss_scale):
[softmax, loss] = resnet.create_resnet50(
model,
"data",
num_input_channels=3,
num_labels=1000,
label="label",
)
model.Accuracy([softmax, "label"], "accuracy")
return [loss]
# SGD
def add_parameter_update_ops(model):
model.AddWeightDecay(1e-4)
ITER = model.Iter("ITER")
stepsz = int(30)
LR = model.net.LearningRate(
[ITER],
"LR",
base_lr=0.1,
policy="step",
stepsize=stepsz,
gamma=0.1,
)
AddMomentumParameterUpdate(model, LR)
def add_image_input(model):
pass
start_time = time.time()
# Create parallelized model
data_parallel_model.Parallelize_GPU(
train_model,
input_builder_fun=add_image_input,
forward_pass_builder_fun=create_resnet50_model_ops,
param_update_builder_fun=add_parameter_update_ops,
devices=gpus,
)
ct = time.time() - start_time
train_model.net._CheckLookupTables()
log.info("Model create for {} gpus took: {} secs".format(len(gpus), ct))
def main():
# TODO: use argv
parser = argparse.ArgumentParser(
description="Caffe2: Benchmark for net construction"
)
parser.add_argument("--num_gpus", type=int, default=1,
help="Number of GPUs.")
args = parser.parse_args()
Create(args)
if __name__ == '__main__':
workspace.GlobalInit(['caffe2', '--caffe2_log_level=2'])
import cProfile
cProfile.run('main()', sort="cumulative")
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import hypothesis.strategies as st
from hypothesis import given, assume, settings
import numpy as np
import time
import os
from caffe2.python import core, dyndep
import caffe2.python.hypothesis_test_util as hu
dyndep.InitOpsLibrary("@/caffe2/caffe2/contrib/nnpack:nnpack_ops")
np.random.seed(1)
def benchmark(ws, net, warmups=5, iters=100):
for _ in range(warmups):
ws.run(net)
plan = core.Plan("plan")
plan.AddStep(core.ExecutionStep("test-step", net, iters))
before = time.time()
ws.run(plan)
after = time.time()
print("Timing network, time taken per-iteration: {:.6f}ms".format((
after - before) / float(iters) * 1000.0))
return after - before
def has_avx2():
import subprocess
try:
subprocess.check_output(["grep", "avx2", "/proc/cpuinfo"])
return True
except subprocess.CalledProcessError:
# grep exits with rc 1 on no matches
return False
@unittest.skipIf(not has_avx2(), "NNPACK requires AVX2")
class NNPackOpsTest(hu.HypothesisTestCase):
@given(stride=st.integers(1, 3),
pad=st.integers(0, 2),
kernel=st.integers(3, 5),
size=st.integers(5, 10),
input_channels=st.integers(1, 8),
output_channels=st.integers(1, 8),
batch_size=st.integers(1, 5),
groups=st.integers(1, 2))
def test_convolution_correctness(self, stride, pad, kernel, size,
input_channels, output_channels,
batch_size, groups):
assume(input_channels % groups == 0)
assume(output_channels % groups == 0)
assume(output_channels == input_channels / groups)
assume(stride <= kernel)
if stride != 1:
assume(batch_size == 1)
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
w = np.random.rand(
output_channels, input_channels, kernel, kernel).astype(np.float32)\
- 0.5
b = np.random.rand(output_channels).astype(np.float32) - 0.5
order = "NCHW"
outputs = {}
for engine in ["", "NNPACK"]:
op = core.CreateOperator(
"Conv",
["X", "w", "b"],
["Y"],
stride=stride,
kernel=kernel,
pad=pad,
order=order,
kts="TUPLE",
engine=engine,
group=groups,
)
self.ws.create_blob("X").feed(X)
self.ws.create_blob("w").feed(w)
self.ws.create_blob("b").feed(b)
self.ws.run(op)
outputs[engine] = self.ws.blobs["Y"].fetch()
np.testing.assert_allclose(
outputs[""],
outputs["NNPACK"],
atol=1e-4,
rtol=1e-4)
@given(size=st.sampled_from([6, 8]),
input_channels=st.integers(1, 8),
batch_size=st.integers(1, 5))
def test_max_pool_correctness(self, size, input_channels, batch_size):
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
order = "NCHW"
outputs = {}
# only 2 * 2 stride and 2 * 2 pool is supported in NNPack now
stride = 2
kernel = 2
# The pooling strategy of NNPack is different from caffe2 pooling
pad = 0
for engine in ["", "NNPACK"]:
op = core.CreateOperator(
"MaxPool",
["X"],
["Y"],
stride=stride,
kernel=kernel,
pad=pad,
order=order,
engine=engine,
)
self.ws.create_blob("X").feed(X)
self.ws.run(op)
outputs[engine] = self.ws.blobs["Y"].fetch()
np.testing.assert_allclose(
outputs[""],
outputs["NNPACK"],
atol=1e-4,
rtol=1e-4)
@given(size=st.sampled_from([6, 8]),
input_channels=st.integers(1, 8),
batch_size=st.integers(1, 5))
def test_relu_correctness(self, size, input_channels, batch_size):
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
outputs = {}
for engine in ["", "NNPACK"]:
op = core.CreateOperator(
"Relu",
["X"],
["Y"],
engine=engine,
)
self.ws.create_blob("X").feed(X)
self.ws.run(op)
outputs[engine] = self.ws.blobs["Y"].fetch()
np.testing.assert_allclose(
outputs[""],
outputs["NNPACK"],
atol=1e-4,
rtol=1e-4)
@given(size=st.sampled_from([6, 8]),
input_channels=st.integers(1, 8),
batch_size=st.integers(1, 5),
alpha=st.floats(0, 1))
def test_leaky_relu_correctness(self, size, input_channels, batch_size,
alpha):
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
outputs = {}
for engine in ["", "NNPACK"]:
op = core.CreateOperator(
"LeakyRelu",
["X"],
["Y"],
alpha=alpha,
engine=engine,
)
self.ws.create_blob("X").feed(X)
self.ws.run(op)
outputs[engine] = self.ws.blobs["Y"].fetch()
np.testing.assert_allclose(
outputs[""],
outputs["NNPACK"],
atol=1e-4,
rtol=1e-4)
@settings(timeout=3600)
@unittest.skipIf(not os.environ.get("CAFFE2_BENCHMARK"), "Benchmark")
@given(stride=st.integers(1, 1),
pad=st.integers(0, 2),
kernel=st.sampled_from([3, 5, 7]),
size=st.integers(30, 90),
input_channels=st.sampled_from([3, 64, 256]),
output_channels=st.sampled_from([32, 96, 256]),
batch_size=st.sampled_from([32, 64, 96, 128]))
def test_timings(self, stride, pad, kernel, size,
input_channels, output_channels, batch_size):
assume(stride <= kernel)
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
w = np.random.rand(output_channels, input_channels,
kernel, kernel).astype(np.float32) - 0.5
b = np.random.rand(output_channels).astype(np.float32) - 0.5
order = "NCHW"
times = {}
for engine in ["", "NNPACK"]:
net = core.Net(engine + "_test")
net.Conv(
["X", "W", "b"], "Y",
order=order,
kernel=kernel,
stride=stride,
pad=pad,
kts="TUPLE",
engine=engine,
)
self.ws.create_blob("X").feed(X)
self.ws.create_blob("W").feed(w)
self.ws.create_blob("b").feed(b)
self.ws.run(net)
times[engine] = benchmark(self.ws, net)
print("Speedup for NNPACK: {:.2f}".format(
times[""] / times["NNPACK"]))
@settings(timeout=3600)
@unittest.skipIf(not os.environ.get("CAFFE2_BENCHMARK"), "Benchmark")
@given(size=st.integers(30, 90),
input_channels=st.sampled_from([3, 64, 256]),
batch_size=st.sampled_from([32, 64, 96, 128]))
def test_relu_timings(self, size, input_channels, batch_size):
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
times = {}
for engine in ["", "NNPACK"]:
net = core.Net(engine + "_test")
net.Relu(
["X"],
["Y"],
engine=engine,
)
self.ws.create_blob("X").feed(X)
self.ws.run(net)
times[engine] = benchmark(self.ws, net)
print("Speedup for NNPACK: {:.2f}".format(
times[""] / times["NNPACK"]))
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace, dyndep, test_util
dyndep.InitOpsLibrary('@/caffe2/caffe2/contrib/warpctc:ctc_ops')
workspace.GlobalInit(["python"])
def softmax(w):
maxes = np.amax(w, axis=-1, keepdims=True)
e = np.exp(w - maxes)
dist = e / np.sum(e, axis=-1, keepdims=True)
return dist
class CTCOpsTest(test_util.TestCase):
def verify_cost(self, device_option, is_test):
alphabet_size = 5
N = 1
T = 2
inputs = np.asarray(
[
[[0.1, 0.6, 0.1, 0.1, 0.1]],
[[0.1, 0.1, 0.6, 0.1, 0.1]],
]
).reshape(T, N, alphabet_size).astype(np.float32)
labels = np.asarray([1, 2]).astype(np.int32).reshape(T)
label_lengths = np.asarray([2]).astype(np.int32).reshape(N)
input_lengths = np.asarray([T]).astype(np.int32)
net = core.Net("test-net")
output_blobs = ["costs", "workspace"] if is_test \
else ["inputs_grad_to_be_copied", "costs", "workspace"]
net.CTC(["inputs", "labels", "label_lengths", "input_lengths"],
output_blobs,
is_test=is_test,
device_option=device_option)
if not is_test:
net.AddGradientOperators(["costs"])
self.ws.create_blob("inputs").feed(inputs, device_option=device_option)
self.ws.create_blob("labels").feed(labels)
self.ws.create_blob("label_lengths").feed(label_lengths)
self.ws.create_blob("input_lengths").feed(input_lengths)
self.ws.run(net)
probs = softmax(inputs)
expected = probs[0, 0, 1] * probs[1, 0, 2]
self.assertEqual(self.ws.blobs["costs"].fetch().shape, (N,))
self.assertEqual(self.ws.blobs["costs"].fetch().dtype, np.float32)
cost = self.ws.blobs["costs"].fetch()[0]
print(cost)
self.assertAlmostEqual(np.exp(-cost), expected)
if not is_test:
# Make sure inputs_grad was added by AddGradientOperators and
# it is equal to the inputs_grad_to_be_copied blob returned by CTCop
assert np.array_equal(
self.ws.blobs["inputs_grad"].fetch(),
self.ws.blobs["inputs_grad_to_be_copied"].fetch()
)
def test_ctc_cost_cpu(self):
self.verify_cost(
caffe2_pb2.DeviceOption(device_type=caffe2_pb2.CPU),
is_test=False)
def test_ctc_cost_gpu(self):
self.verify_cost(
caffe2_pb2.DeviceOption(device_type=caffe2_pb2.CUDA,
cuda_gpu_id=0),
is_test=False)
def test_ctc_forward_only_cpu(self):
self.verify_cost(
caffe2_pb2.DeviceOption(device_type=caffe2_pb2.CPU),
is_test=True)
def test_ctc_forward_only_gpu(self):
self.verify_cost(
caffe2_pb2.DeviceOption(device_type=caffe2_pb2.CUDA,
cuda_gpu_id=0),
is_test=True)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import hypothesis.strategies as st
from hypothesis import given, assume
import numpy as np
import time
import os
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace, muji, dyndep
import caffe2.python.hypothesis_test_util as hu
np.random.seed(1)
dyndep.InitOpsLibrary('@/caffe2/caffe2/contrib/nccl:nccl_ops')
def gpu_device(i):
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.CUDA
device_option.cuda_gpu_id = i
return device_option
def benchmark(ws, net, warmups=5, iters=100):
for _ in range(warmups):
ws.run(net)
plan = core.Plan("plan")
plan.AddStep(core.ExecutionStep("test-step", net, iters))
before = time.time()
ws.run(plan)
after = time.time()
print("Timing network, time taken per-iteration: {:.6f}ms".format((
after - before) / float(iters) * 1000.0))
return after - before
@unittest.skipIf(not workspace.has_gpu_support, "NCCL only on GPU")
class NCCLOpsTest(hu.HypothesisTestCase):
@given(n=st.integers(min_value=2, max_value=workspace.NumCudaDevices()),
m=st.integers(min_value=1, max_value=1000),
in_place=st.booleans())
def test_nccl_allreduce(self, n, m, in_place):
xs = [np.random.randn(m).astype(np.float32) for i in range(n)]
inputs = [str("x_{}".format(i)) for i in range(n)]
prefix = "" if in_place else "o"
outputs = [str("{}x_{}".format(prefix, i)) for i in range(n)]
op = core.CreateOperator("NCCLAllreduce", inputs, outputs)
input_device_options = {n: gpu_device(i) for i, n in enumerate(inputs)}
def allreduce(*args):
assert len(args) == n
output = np.sum(args, axis=0)
return [output for _ in range(n)]
outputs = self.assertReferenceChecks(
hu.gpu_do, op, [xs[i] for i, _ in enumerate(inputs)],
allreduce, input_device_options)
for output in outputs:
np.testing.assert_array_equal(outputs[0], output)
self.assertEqual(outputs[0].tobytes(), output.tobytes())
@given(n=st.integers(min_value=2, max_value=workspace.NumCudaDevices()),
m=st.integers(min_value=1, max_value=1000),
root=st.integers(min_value=0,
max_value=workspace.NumCudaDevices() - 1))
def test_nccl_broadcast(self, n, m, root):
assume(root < n)
xs = [np.random.randn(m).astype(np.float32) for i in range(n)]
inputs = [str("x_{}".format(i)) for i in range(n)]
op = core.CreateOperator("NCCLBroadcast", inputs, inputs, root=root)
input_device_options = {n: gpu_device(i) for i, n in enumerate(inputs)}
def broadcast(*args):
assert len(args) == n
return [args[root] for _ in range(n)]
self.assertReferenceChecks(
hu.gpu_do, op, [xs[i] for i, _ in enumerate(inputs)],
broadcast, input_device_options)
@given(n=st.integers(min_value=2, max_value=workspace.NumCudaDevices()),
m=st.integers(min_value=1, max_value=1000),
# NCCL Reduce seems to deadlock for non-zero roots.
root=st.integers(min_value=0, max_value=0),
in_place=st.booleans())
def test_nccl_reduce(self, n, m, root, in_place):
assume(in_place is False or root == 0)
xs = [np.random.randn(m).astype(np.float32) for i in range(n)]
inputs = [str("x_{}".format(i)) for i in range(n)]
op = core.CreateOperator(
"NCCLReduce", inputs,
inputs[root] if in_place else b"o", root=root)
input_device_options = {n: gpu_device(i) for i, n in enumerate(inputs)}
def reduce(*args):
assert len(args) == n
return [np.sum(args, axis=0)]
self.assertReferenceChecks(
hu.gpu_do, op, [xs[i] for i, _ in enumerate(inputs)],
reduce, input_device_options)
@given(n=st.integers(min_value=2, max_value=workspace.NumCudaDevices()),
m=st.integers(min_value=1, max_value=1000))
def test_nccl_allgather(self, n, m):
xs = [np.random.randn(m).astype(np.float32) for i in range(n)]
inputs = [str("x_{}".format(i)) for i in range(n)]
outputs = [str("o_{}".format(i)) for i in range(n)]
op = core.CreateOperator("NCCLAllGather", inputs, outputs)
input_device_options = {n: gpu_device(i) for i, n in enumerate(inputs)}
def allgather(*args):
assert len(args) == n
return [np.stack(args, axis=0) for _ in range(n)]
outputs = self.assertReferenceChecks(
hu.gpu_do, op, [xs[i] for i, _ in enumerate(inputs)],
allgather, input_device_options)
for output in outputs:
np.testing.assert_array_equal(outputs[0], output)
self.assertEqual(outputs[0].tobytes(), output.tobytes())
@given(n=st.integers(min_value=2, max_value=workspace.NumCudaDevices()),
m=st.integers(min_value=1, max_value=1000))
def test_nccl_reduce_scatter(self, n, m):
xs = [np.random.randn(n, m).astype(np.float32) for i in range(n)]
inputs = [str("x_{}".format(i)) for i in range(n)]
outputs = [str("o_{}".format(i)) for i in range(n)]
op = core.CreateOperator("NCCLReduceScatter", inputs, outputs)
input_device_options = {n: gpu_device(i) for i, n in enumerate(inputs)}
def reduce_scatter(*args):
assert len(args) == n
reduced = sum(args)
assert len(reduced.shape) > 1
ref = [reduced[i, :] for i in range(n)]
return ref
self.assertReferenceChecks(
hu.gpu_do, op, [xs[i] for i, _ in enumerate(inputs)],
reduce_scatter, input_device_options)
@given(n=st.integers(min_value=2, max_value=workspace.NumCudaDevices()),
m=st.integers(min_value=100000, max_value=100000),
iters=st.integers(min_value=1, max_value=100),
net_type=st.sampled_from(["dag", "async_dag", "simple"]))
def _test_nccl_sync(self, n, m, iters, net_type):
inputs = [str("x_{}".format(i)) for i in range(n)]
extra_inputs = [str("xe_{}".format(i)) for i in range(n)]
net = core.Net("asdf")
net.Proto().type = net_type
net.Proto().num_workers = n
for i in range(n):
net.ConstantFill([], inputs[i], shape=[m], value=0.0,
device_option=gpu_device(i))
net.ConstantFill([], extra_inputs[i], shape=[m], value=1.0,
device_option=gpu_device(i))
for _ in range(iters):
net.Sum([inputs[i], extra_inputs[i]], [inputs[i]],
device_option=gpu_device(i))
net.NCCLReduce(inputs, [inputs[0]], device_option=gpu_device(0))
self.ws.run(net)
np.testing.assert_array_equal(
self.ws.blobs[inputs[0]].fetch(),
np.full(shape=(m,), fill_value=iters * n, dtype=np.float32))
@unittest.skipIf(not os.environ.get("CAFFE2_BENCHMARK"), "Benchmark")
def test_timings(self):
for n in range(2, workspace.NumCudaDevices()):
for in_place in [False, True]:
xs = [np.random.randn(1e7).astype(np.float32)
for i in range(n)]
inputs = [str("x_{}".format(i)) for i in range(n)]
prefix = "" if in_place else "o"
outputs = [str("{}x_{}".format(prefix, i)) for i in range(n)]
net = core.Net("test")
net.NCCLAllreduce(inputs, outputs)
net.RunAllOnGPU()
for i in range(n):
self.ws.create_blob(inputs[i]).feed(xs[i], gpu_device(i))
self.ws.run(net)
net_time = benchmark(self.ws, net)
vanilla = core.Net("vanilla")
muji.Allreduce(vanilla, inputs)
vanilla_time = benchmark(self.ws, vanilla)
print("Speedup for NCCL: {:.2f}".format(
vanilla_time / net_time))
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import cPickle as pickle
from collections import OrderedDict
from caffe2.proto import caffe2_pb2
from caffe2.python import workspace, core, scope
import logging
logging.basicConfig()
log = logging.getLogger("AnyExpOnTerm")
log.setLevel(logging.DEBUG)
def initialize_params_from_file(
model, weights_file, num_xpus, opts,
broadcast_computed_param=False, reset_epoch=False):
start_epoch, lr, best_metric = initialize_master_xpu_model_params(
model, weights_file, opts, reset_epoch)
broadcast_parameters(opts, model, num_xpus, broadcast_computed_param)
return start_epoch, lr, best_metric
def initialize_master_xpu_model_params(model, weights_file, opts, reset_epoch):
log.info("Initializing model params from file: {}".format(weights_file))
with open(weights_file, 'r') as fopen:
blobs = pickle.load(fopen)
if 'blobs' in blobs:
blobs = blobs['blobs']
start_epoch = 0
best_metric = float('-inf')
if 'epoch' in blobs:
log.info('epoch {} is found in model file'.format(blobs['epoch']))
if not reset_epoch:
start_epoch = blobs['epoch']
else:
log.info('Reset epoch')
else:
log.info('no epoch is found in model file')
lr = opts['model_param']['base_learning_rate']
if 'lr' in blobs:
lr = blobs['lr']
if 'best_metric' in blobs and not reset_epoch:
best_metric = blobs['best_metric']
if model is not None:
log.info('initialize model parameters using weights file: {}'.format(
weights_file
))
ws_blobs = workspace.Blobs()
unscoped_blob_names = OrderedDict()
for blob in model.GetAllParams():
unscoped_blob_names[unscope_name(str(blob))] = True
root_xpu_id = opts['distributed']['first_xpu_id']
device = opts['distributed']['device']
caffe2_pb2_DEVICE =\
caffe2_pb2.CUDA if opts['distributed']['device'] == 'gpu'\
else caffe2_pb2.CPU
with core.NameScope('{}_{}'.format(device, root_xpu_id)):
with core.DeviceScope(core.DeviceOption(caffe2_pb2_DEVICE, 0)):
for unscoped_blob_name in unscoped_blob_names.keys():
scoped_blob_name = scoped_name(unscoped_blob_name)
if unscoped_blob_name not in blobs:
log.info('{:s} not found'.format(unscoped_blob_name))
continue
log.info(
'{:s} loaded from weights file into: {:s}'.format(
unscoped_blob_name, scoped_blob_name
)
)
if scoped_blob_name in ws_blobs:
ws_blob = workspace.FetchBlob(scoped_blob_name)
if not ws_blob.shape == blobs[unscoped_blob_name].shape:
log.info(
('Workspace blob {} with shape {} does '
'not match weights file shape {}').format(
unscoped_blob_name, ws_blob.shape,
blobs[unscoped_blob_name].shape)
)
else:
workspace.FeedBlob(
scoped_blob_name,
blobs[unscoped_blob_name].astype(
np.float32, copy=False))
else:
log.info('Skip initializing model parameters from file: {}'.format(
weights_file
))
log.info('Complete initialize_master_xpu_model_params')
return start_epoch, lr, best_metric
def broadcast_parameters(opts, model, num_xpus, broadcast_computed_param=False):
if num_xpus == 1:
log.info("only 1 device. Skip parameter broadcast")
return
all_params = [model.GetParams()]
if broadcast_computed_param:
all_params.append(model.GetComputedParams())
caffe2_pb2_DEVICE =\
caffe2_pb2.CUDA if opts['distributed']['device'] == 'gpu'\
else caffe2_pb2.CPU
for params in all_params:
assert len(params) % num_xpus == 0, \
"Current model dosen't match device number when loading checkpoint"
params_per_xpu = int(len(params) / num_xpus)
for idx in range(params_per_xpu):
blobs = [param for param in params[idx::params_per_xpu]]
data = workspace.FetchBlob(blobs[0])
log.info('Broadcasting {} to'.format(str(blobs[0])))
for i, p in enumerate(blobs[1:]):
log.info(' |-> {}'.format(str(p)))
with core.DeviceScope(core.DeviceOption(caffe2_pb2_DEVICE, i+1)):
workspace.FeedBlob(p, data)
log.info("Complete parameter broadcast")
def save_model_params(is_checkpoint, model, checkpoint_path, epoch, opts, best_metric):
# best_metric=float('-inf')
try:
save_model_params_blob(
model, checkpoint_path, epoch, opts, best_metric
)
except Exception as e:
log.warning('Exception from save_model_params {}'.format(str(e)))
return checkpoint_path
def save_model_params_blob(model, params_file, epoch, opts, best_metric):
# best_metric=float('-inf')
log.info("Saving model params...")
root_xpu_id = opts['distributed']['first_xpu_id']
device = opts['distributed']['device']
save_params = [str(param) for param in
model.GetParams('{}_{}'.format(device, root_xpu_id))]
save_computed_params = [str(param) for param in
model.GetComputedParams('{}_{}'
.format(device, root_xpu_id))]
save_blobs = {}
save_blobs['epoch'] = epoch
save_blobs['best_metric'] = best_metric
save_blobs['lr'] = \
workspace.FetchBlob('{}_{}/lr'.format(device, root_xpu_id))
for param in save_params + save_computed_params:
scoped_blob_name = str(param)
unscoped_blob_name = unscope_name(scoped_blob_name)
if unscoped_blob_name not in save_blobs:
save_blobs[unscoped_blob_name] = workspace.FetchBlob(
scoped_blob_name)
log.debug(
'{:s} -> {:s}'.format(scoped_blob_name, unscoped_blob_name))
log.info('to weights file {}'.format(params_file))
try:
with open(params_file, 'w') as fwrite:
pickle.dump(dict(blobs=save_blobs), fwrite, pickle.HIGHEST_PROTOCOL)
except IOError as e:
log.error('I/O error({0}): {1}'.format(e.errno, e.strerror))
def unscope_name(blob_name):
return blob_name[blob_name.rfind(scope._NAMESCOPE_SEPARATOR) + 1:]
def scoped_name(blob_name):
return scope.CurrentNameScope() + blob_name
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from abc import abstractmethod
class Meter(object):
@abstractmethod
def __init__(self, **kwargs):
pass
@abstractmethod
def Reset(self):
pass
@abstractmethod
def Add(self):
pass
@abstractmethod
def Compute(self):
pass
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import caffe2.contrib.playground.meter as Meter
from caffe2.python import workspace
import numpy as np
class ComputeTopKAccuracy(Meter.Meter):
# Python default arguments are evaluated once when the function is
# defined, not each time the function is called
# This means that if you use a mutable default argument and mutate it,
# you will and have mutated that object for
# all future calls to the function as well.
# def __init__(self, blob_name=['softmax', 'label'], opts=None, topk=1):
def __init__(self, blob_name=None, opts=None, topk=1):
if blob_name is None:
blob_name = ['softmax', 'label']
self.blob_name = blob_name
self.opts = opts
self.topk = topk
self.iter = 0
self.value = 0
def Reset(self):
self.iter = 0
self.value = 0
def Add(self):
for idx in range(self.opts['distributed']['first_xpu_id'],
self.opts['distributed']['first_xpu_id'] +
self.opts['distributed']['num_xpus']):
prefix = '{}_{}/'.format(self.opts['distributed']['device'], idx)
softmax = workspace.FetchBlob(prefix + self.blob_name[0])
labels = workspace.FetchBlob(prefix + self.blob_name[1])
output = np.squeeze(softmax)
target = np.squeeze(labels)
if len(output.shape) == 1:
output = output.reshape((1, output.shape[0]))
else:
assert len(output.shape) == 2, \
'wrong output size (1D or 2D expected)'
assert len(target.shape) == 1, 'wrong target size (1D expected)'
assert output.shape[0] == target.shape[0], \
'target and output do not match'
N = output.shape[0]
pred = np.argsort(-output, axis=1)[:, :self.topk]
correct = pred.astype(target.dtype) == np.repeat(
target.reshape((N, 1)), [self.topk], axis=1)
self.value += np.sum(correct[:, :self.topk])
self.iter += N
def Compute(self):
result = self.value / self.iter
self.Reset()
return result
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from abc import abstractmethod
from caffe2.python import workspace
from caffe2.python import timeout_guard
from caffe2.python import data_parallel_model
from . import checkpoint as checkpoint
from . import ModuleRegister as ModuleRegister
from . import module_map as module_map
# instantiate logger outside of distributed operators may trigger error
# logger need to be created in each idividual operator instead.
import os
import inspect
import time
import logging
logging.basicConfig()
log = logging.getLogger("AnyExp")
log.setLevel(logging.DEBUG)
def initOpts(opts):
workspace.GlobalInit(
['caffe2', '--caffe2_log_level=2', '--caffe2_gpu_memory_tracking=0'])
assert (opts['distributed']['num_gpus'] > 0 or
opts['distributed']['num_cpus'] > 0),\
"Need to specify num_gpus or num_cpus to decide which device to use."
trainWithCPU = (opts['distributed']['num_gpus'] == 0)
num_xpus = opts['distributed']['num_cpus'] if \
trainWithCPU else opts['distributed']['num_gpus']
first_xpu = opts['distributed']['first_cpu_id'] if \
trainWithCPU else opts['distributed']['first_gpu_id']
opts['distributed']['device'] = 'cpu' if trainWithCPU else 'gpu'
opts['model_param']['combine_spatial_bn'] =\
trainWithCPU and opts['model_param']['combine_spatial_bn']
opts['distributed']['num_xpus'] = num_xpus
opts['distributed']['first_xpu_id'] = first_xpu
opts['temp_var'] = {}
opts['temp_var']['metrics_output'] = {}
return opts
def initDefaultModuleMap():
registerModuleMap(module_map)
def registerModuleMap(module_map):
ModuleRegister.registerModuleMap(module_map)
def aquireDatasets(opts):
myAquireDataModule = ModuleRegister.getModule(opts['input']['input_name_py'])
return myAquireDataModule.get_input_dataset(opts)
def createTrainerClass(opts):
return ModuleRegister.constructTrainerClass(AnyExpTrainer, opts)
def runShardedTrainLoop(opts, myTrainFun):
start_epoch = 0
pretrained_model = opts['model_param']['pretrained_model']
if pretrained_model != '' and os.path.exists(pretrained_model):
# Only want to get start_epoch.
start_epoch, prev_checkpointed_lr, best_metric = \
checkpoint.initialize_params_from_file(
model=None,
weights_file=pretrained_model,
num_xpus=1,
opts=opts,
broadcast_computed_param=True,
reset_epoch=opts['model_param']['reset_epoch'],
)
log.info('start epoch: {}'.format(start_epoch))
pretrained_model = None if pretrained_model == '' else pretrained_model
ret = None
pretrained_model = ""
shard_results = []
for epoch in range(start_epoch,
opts['epoch_iter']['num_epochs'],
opts['epoch_iter']['num_epochs_per_flow_schedule']):
# must support checkpoint or the multiple schedule will always
# start from initial state
checkpoint_model = None if epoch == start_epoch else ret['model']
pretrained_model = None if epoch > start_epoch else pretrained_model
shard_results = []
# with LexicalContext('epoch{}_gang'.format(epoch),gang_schedule=False):
for shard_id in range(opts['distributed']['num_shards']):
opts['temp_var']['shard_id'] = shard_id
opts['temp_var']['pretrained_model'] = pretrained_model
opts['temp_var']['checkpoint_model'] = checkpoint_model
opts['temp_var']['epoch'] = epoch
opts['temp_var']['start_epoch'] = start_epoch
shard_ret = myTrainFun(opts)
shard_results.append(shard_ret)
ret = None
# always only take shard_0 return
for shard_ret in shard_results:
if shard_ret is not None:
ret = shard_ret
opts['temp_var']['metrics_output'] = ret['metrics']
break
log.info('ret is: {}'.format(str(ret)))
return ret
def trainFun():
def simpleTrainFun(opts):
trainerClass = createTrainerClass(opts)
trainer = trainerClass(opts)
return trainer.buildModelAndTrain(opts)
return simpleTrainFun
def initialize_params_from_file(*args, **kwargs):
return checkpoint.initialize_params_from_file(*args, **kwargs)
class AnyExpTrainer(object):
def __init__(self, opts):
import logging
logging.basicConfig()
log = logging.getLogger("AnyExp")
log.setLevel(logging.DEBUG)
self.log = log
self.opts = opts
self.train_dataset = None
self.test_dataset = None
self.train_df = None
self.test_df = None
self.metrics = {}
self.plotsIngredients = []
self.record_epochs = []
self.samples_per_sec = []
self.secs_per_train = []
self.metrics_output = opts['temp_var']['metrics_output']
first_xpu = opts['distributed']['first_xpu_id']
num_xpus = opts['distributed']['num_xpus']
self.xpus = range(first_xpu, first_xpu + num_xpus)
self.total_batch_size = \
self.opts['epoch_iter']['batch_per_device'] * \
self.opts['distributed']['num_xpus'] * \
self.opts['distributed']['num_shards']
self.epoch_iterations = \
self.opts['epoch_iter']['num_train_sample_per_epoch'] // \
self.total_batch_size
if len(opts['input']['datasets']) > 0:
self.train_df = opts['input']['datasets'][0]
if len(opts['input']['datasets']) == 2:
self.test_df = opts['input']['datasets'][1]
# at this point, the intance of this class becomes many instances
# running on different machines. Most of their attributes are same,
# but the shard_ids are different.
self.shard_id = opts['temp_var']['shard_id']
self.start_epoch = opts['temp_var']['start_epoch']
self.epoch = opts['temp_var']['epoch']
self.epochs_to_run = opts['epoch_iter']['num_epochs_per_flow_schedule']
log.info('opts: {}'.format(str(opts)))
@abstractmethod
def get_input_dataset(self, opts):
pass
@abstractmethod
def get_model_input_fun(self):
pass
@abstractmethod
def init_model(self):
pass
def init_metrics(self):
metrics = self.opts['output']['metrics']
for metric in metrics:
meterClass = self.getMeterClass(metric['meter_py'])
# log.info('metric.meter_kargs {}'.format(metric.meter_kargs))
# log.info('type meter_kargs {}'.format(type(metric.meter_kargs)))
meterInstance = meterClass(opts=self.opts, **metric['meter_kargs'])
self.add_metric(metric['name'], meterInstance, metric['is_train'])
def getMeterClass(self, meterName):
return ModuleRegister.getClassFromModule(meterName, meterName)
def add_metric(self, name, calculator, is_train):
metrics = self.metrics
metrics[name] = {}
metrics[name]['calculator'] = calculator
metrics[name]['is_train'] = is_train
metrics[name]['output'] = []
def extendMetricsOutput(self):
metrics_output = self.metrics_output
if not metrics_output:
metrics_output['epochs'] = self.record_epochs
metrics_output['samples_per_sec'] = self.samples_per_sec
metrics_output['secs_per_train'] = self.secs_per_train
for metric, value in self.metrics.items():
metrics_output[metric] = value['output']
else:
metrics_output['epochs'].extend(self.record_epochs)
metrics_output['samples_per_sec'].extend(self.samples_per_sec)
metrics_output['secs_per_train'].extend(self.secs_per_train)
for metric, value in self.metrics.items():
metrics_output[metric].extend(value['output'])
@abstractmethod
def init_plots(self):
pass
def add_plot(self, x, x_title, ys, y_title):
plotsIngredients = self.plotsIngredients
aPlotIngredients = {}
aPlotIngredients['x'] = x
aPlotIngredients['x_title'] = x_title
aPlotIngredients['ys'] = ys
aPlotIngredients['y_title'] = y_title
plotsIngredients.append(aPlotIngredients)
@abstractmethod
def init_logs(self):
pass
def list_of_epochs(self):
iter_end_point = min(self.opts['epoch_iter']['num_epochs'],
self.epoch +
self.opts['epoch_iter']['num_epochs_per_flow_schedule'])
return range(self.epoch, iter_end_point)
def list_of_epoch_iters(self):
return range(0, self.epoch_iterations)
@abstractmethod
def fun_per_epoch_b4RunNet(self, epoch):
pass
@abstractmethod
def fun_per_epoch_aftRunNet(self, epoch):
pass
def checkpoint(self, epoch):
self.model_path = checkpoint.save_model_params(
True, self.train_model, self.gen_checkpoint_path(True, epoch + 1),
epoch + 1, self.opts, float('-inf'))
def gen_checkpoint_path(self, is_checkpoint, epoch):
if (is_checkpoint):
filename = "model_checkpoint_epoch{}.pkl".format(epoch)
else:
filename = "model_final.pkl"
return self.opts['output']['checkpoint_folder'] + filename
# @abstractmethod
# def gen_checkpoint_path(self, is_checkpoint, epoch):
# pass
@abstractmethod
def fun_per_iter_b4RunNet(self, epoch, epoch_iter):
pass
@abstractmethod
def fun_per_iter_aftRunNetB4Test(self, epoch, epoch_iter):
pass
@abstractmethod
def fun_per_iter_aftRunNetAftTest(self, epoch, epoch_iter):
pass
@abstractmethod
def fun_conclude_operator(self, opts):
pass
def createMetricsPlotsModelsOutputs(self):
self.extendMetricsOutput()
self.model_output = self.model_path
@abstractmethod
def assembleAllOutputs(self):
pass
@abstractmethod
def gen_input_builder_fun(self, model, dataset, is_train):
pass
@abstractmethod
def gen_forward_pass_builder_fun(self, model, dataset, is_train):
pass
@abstractmethod
def gen_param_update_builder_fun(self, model, dataset, is_train):
pass
@abstractmethod
def gen_optimizer_fun(self, model, dataset, is_train):
pass
@abstractmethod
def gen_rendezvous_ctx(self, model, dataset, is_train):
pass
# @abstractmethod
def planning_output(self):
self.init_metrics()
self.init_plots()
self.init_logs()
def prep_data_parallel_models(self):
self.prep_a_data_parallel_model(self.train_model,
self.train_dataset, True)
self.prep_a_data_parallel_model(self.test_model,
self.test_dataset, False)
def prep_a_data_parallel_model(self, model, dataset, is_train):
if model is None:
pass
log.info('in prep_a_data_parallel_model')
param_update = \
self.gen_param_update_builder_fun(model, dataset, is_train) \
if self.gen_param_update_builder_fun is not None else None
log.info('in prep_a_data_parallel_model param_update done ')
optimizer = \
self.gen_optimizer_fun(model, dataset, is_train) \
if self.gen_optimizer_fun is not None else None
log.info('in prep_a_data_parallel_model optimizer done ')
max_ops = self.opts['model_param']['max_concurrent_distributed_ops']
data_parallel_model.Parallelize(
model,
input_builder_fun=self.gen_input_builder_fun(model, dataset, is_train),
forward_pass_builder_fun=self.gen_forward_pass_builder_fun(
model, dataset, is_train),
param_update_builder_fun=param_update,
optimizer_builder_fun=optimizer,
devices=self.xpus,
rendezvous=self.gen_rendezvous_ctx(model, dataset, is_train),
broadcast_computed_params=False,
optimize_gradient_memory=self.opts['model_param']['memonger'],
use_nccl=self.opts['model_param']['cuda_nccl'],
max_concurrent_distributed_ops=max_ops,
cpu_device=(self.opts['distributed']['device'] == 'cpu'),
# "shared model" will only keep model parameters for cpu_0 or gpu_0
# will cause issue when initialize each gpu_0, gpu_1, gpu_2 ...
# shared_model=(self.opts['distributed']['device'] == 'cpu'),
combine_spatial_bn=self.opts['model_param']['combine_spatial_bn'],
)
log.info('in prep_a_data_parallel_model Parallelize done ')
# log.info("Current blobs in workspace: {}".format(workspace.Blobs()))
workspace.RunNetOnce(model.param_init_net)
log.info('in prep_a_data_parallel_model RunNetOnce done ')
workspace.CreateNet(model.net)
log.info('in prep_a_data_parallel_model CreateNet done ')
def loadCheckpoint(self):
opts = self.opts
previous_checkpoint = opts['temp_var']['checkpoint_model']
pretrained_model = opts['temp_var']['pretrained_model']
num_xpus = opts['distributed']['num_xpus']
if (previous_checkpoint is not None):
if os.path.exists(previous_checkpoint):
log.info('Load previous checkpoint:{}'.format(
previous_checkpoint
))
start_epoch, prev_checkpointed_lr, _best_metric = \
checkpoint.initialize_params_from_file(
model=self.train_model,
weights_file=previous_checkpoint,
num_xpus=num_xpus,
opts=opts,
broadcast_computed_param=True,
reset_epoch=False,
)
elif pretrained_model is not None and os.path.exists(pretrained_model):
log.info("Load pretrained model: {}".format(pretrained_model))
start_epoch, prev_checkpointed_lr, best_metric = \
checkpoint.initialize_params_from_file(
model=self.train_model,
weights_file=pretrained_model,
num_xpus=num_xpus,
opts=opts,
broadcast_computed_param=True,
reset_epoch=opts['model_param']['reset_epoch'],
)
data_parallel_model.FinalizeAfterCheckpoint(self.train_model)
def buildModelAndTrain(self, opts):
log.info('in buildModelAndTrain, trainer_input: {}'.format(str(opts)))
log.info("check type self: {}".format(type(self)))
log.info("check self dir: {}".format(dir(self)))
log.info("check self get_input_dataset methods: {}".
format(inspect.getsource(self.get_input_dataset)))
log.info("check self gen_input_builder_fun method: {}".
format(inspect.getsource(self.gen_input_builder_fun)))
log.info("check self gen_forward_pass_builder_fun method: {}".
format(inspect.getsource(self.gen_forward_pass_builder_fun)))
if self.gen_param_update_builder_fun is not None:
log.info("check self gen_param_update_builder_fun method: {}".
format(inspect.getsource(self.gen_param_update_builder_fun)))
else:
log.info("check self gen_optimizer_fun method: {}".
format(inspect.getsource(self.gen_optimizer_fun)))
log.info("check self assembleAllOutputs method: {}".
format(inspect.getsource(self.assembleAllOutputs)))
self.get_model_input_fun()
self.init_model()
self.planning_output()
self.prep_data_parallel_models()
self.loadCheckpoint()
for epoch in self.list_of_epochs():
log.info("start training epoch {}".format(epoch))
self.fun_per_epoch_b4RunNet(epoch)
for epoch_iter in self.list_of_epoch_iters():
self.iter_start_time = time.time()
self.fun_per_iter_b4RunNet(epoch, epoch_iter)
self.run_training_net()
self.fun_per_iter_aftRunNetB4Test(epoch, epoch_iter)
self.iter_end_time = time.time()
if (epoch_iter %
opts['epoch_iter']['num_train_iteration_per_test'] == 0):
secs_per_train = (self.iter_end_time - self.iter_start_time)
self.secs_per_train.append(secs_per_train)
sample_trained = self.total_batch_size
samples_per_sec = sample_trained / secs_per_train
self.samples_per_sec.append(samples_per_sec)
self.fract_epoch = (epoch +
float(epoch_iter) / self.epoch_iterations)
self.record_epochs.append(self.fract_epoch)
for key in self.metrics:
metric = self.metrics[key]
if not metric['is_train']:
continue
metric['calculator'].Add()
metric['output'].append(metric['calculator'].Compute())
self.test_loop_start_time = time.time()
for _test_iter in range(0, opts['epoch_iter']['num_test_iter']):
timeout = 2000.0
with timeout_guard.CompleteInTimeOrDie(timeout):
workspace.RunNet(self.test_model.net.Proto().name)
for key in self.metrics:
metric = self.metrics[key]
if metric['is_train']:
continue
metric['calculator'].Add()
self.test_loop_end_time = time.time()
self.sec_per_test_loop = \
self.test_loop_end_time - self.test_loop_start_time
for metric in self.metrics.values():
if metric['is_train']:
continue
metric['output'].append(metric['calculator'].Compute())
logStr = 'epoch:{}/{} iter:{}/{} secs_per_train:{} '.format(
self.fract_epoch, self.opts['epoch_iter']['num_epochs'],
epoch_iter, self.epoch_iterations, secs_per_train)
logStr += 'samples_per_sec:{} loop {} tests takes {} sec'.format(
samples_per_sec, opts['epoch_iter']['num_test_iter'],
self.sec_per_test_loop)
for metric, value in self.metrics.items():
logStr += ' {}:{} '.format(metric, value['output'][-1])
log.info('Iter Stats: {}'.format(logStr))
self.fun_per_iter_aftRunNetAftTest(epoch, epoch_iter)
self.checkpoint(epoch)
self.fun_per_epoch_aftRunNet(epoch)
self.fun_conclude_operator()
self.createMetricsPlotsModelsOutputs()
return self.assembleAllOutputs()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# Input
import caffe2.contrib.playground.resnetdemo.\
gfs_IN1k as gfs_IN1k # noqa
# model
import caffe2.contrib.playground.resnetdemo.\
IN1k_resnet as IN1k_resnet # noqa
# FORWARD_PASS
import caffe2.contrib.playground.resnetdemo.\
caffe2_resnet50_default_forward as caffe2_resnet50_default_forward # noqa
import caffe2.contrib.playground.resnetdemo.\
explicit_resnet_forward as explicit_resnet_forward # noqa
# PARAMETER_UPDATE
import caffe2.contrib.playground.resnetdemo.\
caffe2_resnet50_default_param_update as caffe2_resnet50_default_param_update # noqa
import caffe2.contrib.playground.resnetdemo.\
explicit_resnet_param_update as explicit_resnet_param_update # noqa
# RENDEZVOUS
import caffe2.contrib.playground.resnetdemo.\
rendezvous_filestore as rendezvous_filestore # noqa
# OUTPUT
import caffe2.contrib.playground.\
output_generator as output_generator # noqa
# METERS
# for meters, use the class name as your module name in this map
import caffe2.contrib.playground.\
compute_loss as ComputeLoss # noqa
import caffe2.contrib.playground.\
compute_topk_accuracy as ComputeTopKAccuracy # noqa
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import timeout_guard
def fun_conclude_operator(self):
# Ensure the program exists. This is to "fix" some unknown problems
# causing the job sometimes get stuck.
timeout_guard.EuthanizeIfNecessary(600.0)
def assembleAllOutputs(self):
output = {}
output['train_model'] = self.train_model
output['test_model'] = self.test_model
output['model'] = self.model_output
output['metrics'] = self.metrics_output
return output
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import json
import caffe2.contrib.playground.AnyExp as AnyExp
import logging
logging.basicConfig()
log = logging.getLogger("AnyExpOnTerm")
log.setLevel(logging.DEBUG)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Any Experiment training.')
parser.add_argument("--parameters-json", type=json.loads,
help='model options in json format', dest="params")
args = parser.parse_args()
opts = args.params['opts']
opts = AnyExp.initOpts(opts)
log.info('opts is: {}'.format(str(opts)))
AnyExp.initDefaultModuleMap()
opts['input']['datasets'] = AnyExp.aquireDatasets(opts)
# defined this way so that AnyExp.trainFun(opts) can be replaced with
# some other custermized training function.
ret = AnyExp.runShardedTrainLoop(opts, AnyExp.trainFun())
log.info('ret is: {}'.format(str(ret)))
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import inspect
import logging
logging.basicConfig()
log = logging.getLogger("ModuleRegister")
log.setLevel(logging.DEBUG)
MODULE_MAPS = []
def registerModuleMap(module_map):
MODULE_MAPS.append(module_map)
log.info("ModuleRegister get modules from ModuleMap content: {}".
format(inspect.getsource(module_map)))
def constructTrainerClass(myTrainerClass, opts):
log.info("ModuleRegister, myTrainerClass name is {}".
format(myTrainerClass.__name__))
log.info("ModuleRegister, myTrainerClass type is {}".
format(type(myTrainerClass)))
log.info("ModuleRegister, myTrainerClass dir is {}".
format(dir(myTrainerClass)))
myInitializeModelModule = getModule(opts['model']['model_name_py'])
log.info("ModuleRegister, myInitializeModelModule dir is {}".
format(dir(myInitializeModelModule)))
myTrainerClass.init_model = myInitializeModelModule.init_model
myTrainerClass.run_training_net = myInitializeModelModule.run_training_net
myTrainerClass.fun_per_iter_b4RunNet = \
myInitializeModelModule.fun_per_iter_b4RunNet
myTrainerClass.fun_per_epoch_b4RunNet = \
myInitializeModelModule.fun_per_epoch_b4RunNet
myInputModule = getModule(opts['input']['input_name_py'])
log.info("ModuleRegister, myInputModule {} dir is {}".
format(opts['input']['input_name_py'], myInputModule.__name__))
# Override input methods of the myTrainerClass class
myTrainerClass.get_input_dataset = myInputModule.get_input_dataset
myTrainerClass.get_model_input_fun = myInputModule.get_model_input_fun
myTrainerClass.gen_input_builder_fun = myInputModule.gen_input_builder_fun
# myForwardPassModule = GetForwardPassModule(opts)
myForwardPassModule = getModule(opts['model']['forward_pass_py'])
myTrainerClass.gen_forward_pass_builder_fun = \
myForwardPassModule.gen_forward_pass_builder_fun
myParamUpdateModule = getModule(opts['model']['parameter_update_py'])
myTrainerClass.gen_param_update_builder_fun =\
myParamUpdateModule.gen_param_update_builder_fun \
if myParamUpdateModule is not None else None
myOptimizerModule = getModule(opts['model']['optimizer_py'])
myTrainerClass.gen_optimizer_fun = \
myOptimizerModule.gen_optimizer_fun \
if myOptimizerModule is not None else None
myRendezvousModule = getModule(opts['model']['rendezvous_py'])
myTrainerClass.gen_rendezvous_ctx = \
myRendezvousModule.gen_rendezvous_ctx \
if myRendezvousModule is not None else None
# override output module
myOutputModule = getModule(opts['output']['gen_output_py'])
log.info("ModuleRegister, myOutputModule is {}".
format(myOutputModule.__name__))
myTrainerClass.fun_conclude_operator = myOutputModule.fun_conclude_operator
myTrainerClass.assembleAllOutputs = myOutputModule.assembleAllOutputs
return myTrainerClass
def getModule(moduleName):
log.info("MODULE_MAPS content {}".format(str(MODULE_MAPS)))
myModule = None
for ModuleMap in MODULE_MAPS:
log.info("iterate through MODULE_MAPS content {}".
format(str(ModuleMap)))
for name, obj in inspect.getmembers(ModuleMap):
log.info("iterate through MODULE_MAPS a name {}".format(str(name)))
if name == moduleName:
log.info("AnyExp get module {} with source:{}".
format(moduleName, inspect.getsource(obj)))
myModule = obj
return myModule
return None
def getClassFromModule(moduleName, className):
myClass = None
for ModuleMap in MODULE_MAPS:
for name, obj in inspect.getmembers(ModuleMap):
if name == moduleName:
log.info("ModuleRegistry from module {} get class {} of source:{}".
format(moduleName, className, inspect.getsource(obj)))
myClass = getattr(obj, className)
return myClass
return None
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import caffe2.contrib.playground.meter as Meter
from caffe2.python import workspace
class ComputeLoss(Meter.Meter):
def __init__(self, opts=None, blob_name=''):
self.blob_name = blob_name
self.opts = opts
self.iter = 0
self.value = 0
def Reset(self):
self.iter = 0
self.value = 0
def Add(self):
"""Average values of a blob on each gpu"""
value = 0
for idx in range(self.opts['distributed']['first_xpu_id'],
self.opts['distributed']['first_xpu_id'] +
self.opts['distributed']['num_xpus']):
value += workspace.FetchBlob('{}_{}/{}'.
format(self.opts['distributed']['device'], idx, self.blob_name))
self.value += value
self.iter += 1
def Compute(self):
result = self.opts['distributed']['num_shards'] * self.value / self.iter
self.Reset()
return result
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
def gen_param_update_builder_fun(self, model, dataset, is_train):
if not is_train:
return None
else:
def add_parameter_update_ops(model):
model.AddWeightDecay(1e-4)
ITER = model.Iter("ITER")
stepsz = int(30 *
self.opts['epoch_iter']['num_train_sample_per_epoch'] /
self.total_batch_size)
LR = model.net.LearningRate(
[ITER],
"lr",
base_lr=self.opts['model_param']['base_learning_rate'],
policy="step",
stepsize=stepsz,
gamma=0.1,
)
params = model.GetParams()
assert(len(params) > 0)
for param in params:
param_grad = model.param_to_grad[param]
param_momentum = model.param_init_net.ConstantFill(
[param], param + '_momentum', value=0.0
)
# Update param_grad and param_momentum in place
model.net.MomentumSGDUpdate(
[param_grad, param_momentum, LR, param],
[param_grad, param_momentum, param],
momentum=0.9,
nesterov=1
)
return add_parameter_update_ops
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
logging.basicConfig()
log = logging.getLogger("AnyExp")
log.setLevel(logging.DEBUG)
# For more depths, add the block config here
BLOCK_CONFIG = {
18: (2, 2, 2, 2),
34: (3, 4, 6, 3),
50: (3, 4, 6, 3),
101: (3, 4, 23, 3),
152: (3, 8, 36, 3),
200: (3, 32, 36, 3),
264: (3, 64, 36, 3),
284: (3, 32, 64, 3),
}
def gen_forward_pass_builder_fun(self, model, dataset, is_train):
split = 'train' if is_train else 'test'
opts = self.opts
def model_creator(model, loss_scale):
model, softmax, loss = resnet_imagenet_create_model(
model=model,
data='data',
labels='label',
split=split,
opts=opts,
dataset=dataset,
)
return [loss]
return model_creator
def resnet_imagenet_create_model(model, data, labels, split, opts, dataset):
model_helper = ResNetModelHelper(model, split, opts)
opts_depth = opts['model_param']['num_layer']
log.info(' | ResNet-{} Imagenet'.format(opts_depth))
assert opts_depth in BLOCK_CONFIG.keys(), \
'Block config is not defined for specified model depth. Please check.'
(n1, n2, n3, n4) = BLOCK_CONFIG[opts_depth]
num_features = 2048
residual_block = model_helper.bottleneck_block
if opts_depth in [18, 34]:
num_features = 512
residual_block = model_helper.basic_block
num_classes = 1000
conv_blob = model.Conv(
data, 'conv1', 3, 64, 7, stride=2, pad=3, weight_init=('MSRAFill', {}),
bias_init=('ConstantFill', {'value': 0.}), no_bias=0
)
test_mode = False
if split in ['test', 'val']:
test_mode = True
bn_blob = model.SpatialBN(
conv_blob, 'res_conv1_bn', 64,
# does not appear to affect test_loss performance
# epsilon=1e-3,
epsilon=opts['model_param']['bn_epsilon'],
# momentum=0.1,
momentum=opts['model_param']['bn_momentum'],
is_test=test_mode,
)
relu_blob = model.Relu(bn_blob, bn_blob)
max_pool = model.MaxPool(relu_blob, 'pool1', kernel=3, stride=2, pad=1)
# TODO: This can be further optimized by passing dim_in, dim_out = features,
# dim_out = features * 4
if opts_depth in [50, 101, 152, 200, 264, 284]:
blob_in, dim_in = model_helper.residual_layer(
residual_block, max_pool, 64, 256, stride=1, num_blocks=n1,
prefix='res2', dim_inner=64
)
blob_in, dim_in = model_helper.residual_layer(
residual_block, blob_in, dim_in, 512, stride=2, num_blocks=n2,
prefix='res3', dim_inner=128
)
blob_in, dim_in = model_helper.residual_layer(
residual_block, blob_in, dim_in, 1024, stride=2, num_blocks=n3,
prefix='res4', dim_inner=256
)
blob_in, dim_in = model_helper.residual_layer(
residual_block, blob_in, dim_in, 2048, stride=2, num_blocks=n4,
prefix='res5', dim_inner=512
)
elif opts_depth in [18, 34]:
blob_in, dim_in = model_helper.residual_layer(
residual_block, max_pool, 64, 64, stride=1, num_blocks=n1,
prefix='res2',
)
blob_in, dim_in = model_helper.residual_layer(
residual_block, blob_in, dim_in, 128, stride=2, num_blocks=n2,
prefix='res3',
)
blob_in, dim_in = model_helper.residual_layer(
residual_block, blob_in, dim_in, 256, stride=2, num_blocks=n3,
prefix='res4',
)
blob_in, dim_in = model_helper.residual_layer(
residual_block, blob_in, dim_in, 512, stride=2, num_blocks=n4,
prefix='res5',
)
pool_blob = model.AveragePool(blob_in, 'pool5', kernel=7, stride=1)
loss_scale = 1. / opts['distributed']['num_xpus'] / \
opts['distributed']['num_shards']
loss = None
fc_blob = model.FC(
pool_blob, 'pred', num_features, num_classes,
# does not appear to affect test_loss performance
# weight_init=('GaussianFill', {'std': opts.fc_init_std}),
# bias_init=('ConstantFill', {'value': 0.})
weight_init=None,
bias_init=None)
softmax, loss = model.SoftmaxWithLoss(
[fc_blob, labels],
['softmax', 'loss'],
scale=loss_scale)
model.Accuracy(['softmax', labels], 'accuracy')
return model, softmax, loss
class ResNetModelHelper():
def __init__(self, model, split, opts):
self.model = model
self.split = split
self.opts = opts
# shortcut type B
def add_shortcut(self, blob_in, dim_in, dim_out, stride, prefix):
if dim_in == dim_out:
return blob_in
conv_blob = self.model.Conv(
blob_in, prefix, dim_in, dim_out, kernel=1,
stride=stride,
weight_init=("MSRAFill", {}),
bias_init=('ConstantFill', {'value': 0.}), no_bias=1
)
test_mode = False
if self.split in ['test', 'val']:
test_mode = True
bn_blob = self.model.SpatialBN(
conv_blob, prefix + "_bn", dim_out,
# epsilon=1e-3,
# momentum=0.1,
epsilon=self.opts['model_param']['bn_epsilon'],
momentum=self.opts['model_param']['bn_momentum'],
is_test=test_mode,
)
return bn_blob
def conv_bn(
self, blob_in, dim_in, dim_out, kernel, stride, prefix, group=1, pad=1,
):
conv_blob = self.model.Conv(
blob_in, prefix, dim_in, dim_out, kernel, stride=stride,
pad=pad, group=group,
weight_init=("MSRAFill", {}),
bias_init=('ConstantFill', {'value': 0.}), no_bias=1
)
test_mode = False
if self.split in ['test', 'val']:
test_mode = True
bn_blob = self.model.SpatialBN(
conv_blob, prefix + "_bn", dim_out,
epsilon=self.opts['model_param']['bn_epsilon'],
momentum=self.opts['model_param']['bn_momentum'],
is_test=test_mode,
)
return bn_blob
def conv_bn_relu(
self, blob_in, dim_in, dim_out, kernel, stride, prefix, pad=1, group=1,
):
bn_blob = self.conv_bn(
blob_in, dim_in, dim_out, kernel, stride, prefix, group=group,
pad=pad
)
return self.model.Relu(bn_blob, bn_blob)
# 3(a)this block uses multi-way group conv implementation that splits blobs
def multiway_bottleneck_block(
self, blob_in, dim_in, dim_out, stride, prefix, dim_inner, group
):
blob_out = self.conv_bn_relu(
blob_in, dim_in, dim_inner, 1, 1, prefix + "_branch2a", pad=0,
)
conv_blob = self.model.GroupConv_Deprecated(
blob_out, prefix + "_branch2b", dim_inner, dim_inner, kernel=3,
stride=stride, pad=1, group=group, weight_init=("MSRAFill", {}),
bias_init=('ConstantFill', {'value': 0.}), no_bias=1
)
test_mode = False
if self.split in ['test', 'val']:
test_mode = True
bn_blob = self.model.SpatialBN(
conv_blob, prefix + "_branch2b_bn", dim_out,
epsilon=self.opts['model_param']['bn_epsilon'],
momentum=self.opts['model_param']['bn_momentum'], is_test=test_mode,
)
relu_blob = self.model.Relu(bn_blob, bn_blob)
bn_blob = self.conv_bn(
relu_blob, dim_inner, dim_out, 1, 1, prefix + "_branch2c", pad=0
)
if self.opts['model_param']['custom_bn_init']:
self.model.param_init_net.ConstantFill(
[bn_blob + '_s'], bn_blob + '_s',
value=self.opts['model_param']['bn_init_gamma'])
sc_blob = self.add_shortcut(
blob_in, dim_in, dim_out, stride, prefix=prefix + "_branch1"
)
sum_blob = self.model.net.Sum([bn_blob, sc_blob], prefix + "_sum")
return self.model.Relu(sum_blob, sum_blob)
# 3(c) this block uses cudnn group conv op
def group_bottleneck_block(
self, blob_in, dim_in, dim_out, stride, prefix, dim_inner, group
):
blob_out = self.conv_bn_relu(
blob_in, dim_in, dim_inner, 1, 1, prefix + "_branch2a", pad=0,
)
blob_out = self.conv_bn_relu(
blob_out, dim_inner, dim_inner, 3, stride, prefix + "_branch2b",
group=group
)
bn_blob = self.conv_bn(
blob_out, dim_inner, dim_out, 1, 1, prefix + "_branch2c", pad=0
)
if self.opts['model_param']['custom_bn_init']:
self.model.param_init_net.ConstantFill(
[bn_blob + '_s'], bn_blob + '_s',
value=self.opts['model_param']['bn_init_gamma'])
sc_blob = self.add_shortcut(
blob_in, dim_in, dim_out, stride, prefix=prefix + "_branch1"
)
sum_blob = self.model.net.Sum([bn_blob, sc_blob], prefix + "_sum")
return self.model.Relu(sum_blob, sum_blob)
# bottleneck residual layer for 50, 101, 152 layer networks
def bottleneck_block(
self, blob_in, dim_in, dim_out, stride, prefix, dim_inner, group=None
):
blob_out = self.conv_bn_relu(
blob_in, dim_in, dim_inner, 1, 1, prefix + "_branch2a", pad=0,
)
blob_out = self.conv_bn_relu(
blob_out, dim_inner, dim_inner, 3, stride, prefix + "_branch2b",
)
bn_blob = self.conv_bn(
blob_out, dim_inner, dim_out, 1, 1, prefix + "_branch2c", pad=0
)
if self.opts['model_param']['custom_bn_init']:
self.model.param_init_net.ConstantFill(
[bn_blob + '_s'], bn_blob + '_s',
value=self.opts['model_param']['bn_init_gamma'])
sc_blob = self.add_shortcut(
blob_in, dim_in, dim_out, stride, prefix=prefix + "_branch1"
)
sum_blob = self.model.net.Sum([bn_blob, sc_blob], prefix + "_sum")
return self.model.Relu(sum_blob, sum_blob)
# basic layer for the 18 and 34 layer networks and the CIFAR data netwrorks
def basic_block(
self, blob_in, dim_in, dim_out, stride, prefix, dim_inner=None,
group=None,
):
blob_out = self.conv_bn_relu(
blob_in, dim_in, dim_out, 3, stride, prefix + "_branch2a"
)
bn_blob = self.conv_bn(
blob_out, dim_out, dim_out, 3, 1, prefix + "_branch2b", pad=1
)
sc_blob = self.add_shortcut(
blob_in, dim_in, dim_out, stride, prefix=prefix + "_branch1"
)
sum_blob = self.model.net.Sum([bn_blob, sc_blob], prefix + "_sum")
return self.model.Relu(sum_blob, sum_blob)
def residual_layer(
self, block_fn, blob_in, dim_in, dim_out, stride, num_blocks, prefix,
dim_inner=None, group=None
):
# prefix is something like: res2, res3, etc.
# each res layer has num_blocks stacked
for idx in range(num_blocks):
block_prefix = "{}_{}".format(prefix, idx)
block_stride = 2 if (idx == 0 and stride == 2) else 1
blob_in = block_fn(
blob_in, dim_in, dim_out, block_stride, block_prefix, dim_inner,
group
)
dim_in = dim_out
return blob_in, dim_in
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from caffe2.python import workspace, cnn, core
from caffe2.python import timeout_guard
from caffe2.proto import caffe2_pb2
def init_model(self):
train_model = cnn.CNNModelHelper(
order="NCHW",
name="resnet",
use_cudnn=True,
cudnn_exhaustive_search=False
)
self.train_model = train_model
test_model = cnn.CNNModelHelper(
order="NCHW",
name="resnet_test",
use_cudnn=False,
cudnn_exhaustive_search=False,
init_params=False,
)
self.test_model = test_model
self.log.info("Model creation completed")
def fun_per_epoch_b4RunNet(self, epoch):
pass
def fun_per_iter_b4RunNet(self, epoch, epoch_iter):
learning_rate = 0.05
for idx in range(self.opts['distributed']['first_xpu_id'],
self.opts['distributed']['first_xpu_id'] +
self.opts['distributed']['num_xpus']):
caffe2_pb2_device = caffe2_pb2.CUDA if \
self.opts['distributed']['device'] == 'gpu' else \
caffe2_pb2.CPU
with core.DeviceScope(core.DeviceOption(caffe2_pb2_device, idx)):
workspace.FeedBlob(
'{}_{}/lr'.format(self.opts['distributed']['device'], idx),
np.array(learning_rate, dtype=np.float32)
)
def run_training_net(self):
timeout = 2000.0
with timeout_guard.CompleteInTimeOrDie(timeout):
workspace.RunNet(self.train_model.net.Proto().name)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import caffe2.python.models.resnet as resnet
def gen_forward_pass_builder_fun(self, model, dataset, is_train):
def create_resnet50_model_ops(model, loss_scale):
[softmax, loss] = resnet.create_resnet50(
model,
"data",
num_input_channels=3,
num_labels=1000,
label="label",
)
model.Accuracy([softmax, "label"], "accuracy")
my_loss_scale = 1. / self.opts['distributed']['num_xpus'] / \
self.opts['distributed']['num_shards']
loss = model.Scale(loss, scale=my_loss_scale)
return [loss]
return create_resnet50_model_ops
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# # example1 using gfs as input source.
def gen_input_builder_fun(self, model, dataset, is_train):
if is_train:
input_path = self.opts['input']['train_input_path']
else:
input_path = self.opts['input']['test_input_path']
reader = model.CreateDB("reader",
db=input_path,
db_type='lmdb',
shard_id=self.shard_id,
num_shards=self.opts['distributed']['num_shards'],)
def AddImageInput(model, reader, batch_size, img_size):
'''
Image input operator that loads data from reader and
applies certain transformations to the images.
'''
data, label = model.ImageInput(
reader,
["data", "label"],
batch_size=batch_size,
use_caffe_datum=True,
mean=128.,
std=128.,
scale=256,
crop=img_size,
mirror=1,
is_test=True
)
data = model.StopGradient(data, data)
def add_image_input(model):
AddImageInput(
model,
reader,
batch_size=self.opts['epoch_iter']['batch_per_device'],
img_size=self.opts['input']['imsize'],
)
return add_image_input
def get_input_dataset(opts):
return []
def get_model_input_fun(self):
pass
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
from caffe2.python import dyndep
dyndep.InitOpsLibrary('@/caffe2/caffe2/distributed:file_store_handler_ops')
# rendezvous should NOT be unique for each operator. It should have
# the same run_id on different operators. say we have two shards,
# both shards created rendezvous of run_id "aaa_bbb_epoch_09", and this
# rendezvous will wait for two shards to join because max_shards is specified
# to be 2. If each shard created an rendezvous with different run_id,
# each of them are waiting for different rendezvous to join, they will
# never wait for each other and therefore timeout eventually.
def gen_rendezvous_ctx(self, model, dataset, is_train):
if self.opts['distributed']['num_shards'] < 2:
return None
# have issue when try to set this up on more shards
workspace.RunOperatorOnce(
core.CreateOperator(
"FileStoreHandlerCreate", [], ["store_handler"],
path="/tmp",
prefix="epoch.{}".format(self.epoch),
)
)
rendezvous = dict(
kv_handler="store_handler",
shard_id=self.shard_id,
num_shards=self.opts['distributed']['num_shards'],
engine="GLOO",
# transport=args.distributed_transport,
transport="tcp",
# interface=interfaces[0],
interface=[],
exit_nets=None) if is_train else None
return rendezvous
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import workspace, core
from caffe2.proto import caffe2_pb2
def gen_param_update_builder_fun(self, model, dataset, is_train):
if not is_train:
return None
else:
# from sherlok
for idx in range(self.opts['distributed']['first_xpu_id'],
self.opts['distributed']['first_xpu_id'] +
self.opts['distributed']['num_xpus']):
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, idx)):
workspace.CreateBlob('{}_{}/lr'.
format(self.opts['distributed']['device'], idx))
def add_parameter_update_ops(model):
model.Iter("ITER")
weight_decay = model.param_init_net.ConstantFill(
[], 'weight_decay', shape=[1],
value=self.opts['model_param']['weight_decay']
)
weight_decay_bn = model.param_init_net.ConstantFill(
[], 'weight_decay_bn', shape=[1],
value=self.opts['model_param']['weight_decay_bn']
)
one = model.param_init_net.ConstantFill(
[], "ONE", shape=[1], value=1.0
)
'''
Add the momentum-SGD update.
'''
params = model.GetParams()
assert(len(params) > 0)
for param in params:
param_grad = model.param_to_grad[param]
param_momentum = model.param_init_net.ConstantFill(
[param], param + '_momentum', value=0.0
)
if '_bn' in str(param):
model.WeightedSum(
[param_grad, one, param, weight_decay_bn], param_grad
)
else:
model.WeightedSum(
[param_grad, one, param, weight_decay], param_grad
)
# Update param_grad and param_momentum in place
model.net.MomentumSGDUpdate(
[param_grad, param_momentum, 'lr', param],
[param_grad, param_momentum, param],
momentum=0.9,
nesterov=1
)
return add_parameter_update_ops
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from hypothesis import given
import hypothesis.strategies as st
from multiprocessing import Process, Queue
import numpy as np
import os
import pickle
import tempfile
import shutil
from caffe2.python import core, workspace, dyndep
import caffe2.python.hypothesis_test_util as hu
from gloo.python import IoError
dyndep.InitOpsLibrary("@/caffe2/caffe2/distributed:file_store_handler_ops")
dyndep.InitOpsLibrary("@/caffe2/caffe2/distributed:redis_store_handler_ops")
dyndep.InitOpsLibrary("@/caffe2/caffe2/distributed:store_ops")
dyndep.InitOpsLibrary("@/caffe2/caffe2/contrib/gloo:gloo_ops")
dyndep.InitOpsLibrary("@/caffe2/caffe2/contrib/gloo:gloo_ops_gpu")
op_engine = 'GLOO'
class TemporaryDirectory:
def __enter__(self):
self.tmpdir = tempfile.mkdtemp()
return self.tmpdir
def __exit__(self, type, value, traceback):
shutil.rmtree(self.tmpdir)
class TestCase(hu.HypothesisTestCase):
test_counter = 0
sync_counter = 0
def run_test_locally(self, fn, device_option=None, **kwargs):
# Queue for assertion errors on subprocesses
queue = Queue()
# Capture any exception thrown by the subprocess
def run_fn(*args, **kwargs):
try:
with core.DeviceScope(device_option):
fn(*args, **kwargs)
workspace.ResetWorkspace()
queue.put(True)
except Exception as ex:
queue.put(ex)
# Start N processes in the background
procs = []
for i in range(kwargs['comm_size']):
kwargs['comm_rank'] = i
proc = Process(
target=run_fn,
kwargs=kwargs)
proc.start()
procs.append(proc)
# Test complete, join background processes
while len(procs) > 0:
proc = procs.pop(0)
while proc.is_alive():
proc.join(10)
# Raise exception if we find any. Otherwise each worker
# should put a True into the queue
# Note that the following is executed ALSO after
# the last process was joined, so if ANY exception
# was raised, it will be re-raised here.
self.assertFalse(queue.empty(), "Job failed without a result")
o = queue.get()
if isinstance(o, Exception):
raise o
else:
self.assertTrue(o)
def run_test_distributed(self, fn, device_option=None, **kwargs):
comm_rank = os.getenv('COMM_RANK')
self.assertIsNotNone(comm_rank)
comm_size = os.getenv('COMM_SIZE')
self.assertIsNotNone(comm_size)
kwargs['comm_rank'] = int(comm_rank)
kwargs['comm_size'] = int(comm_size)
with core.DeviceScope(device_option):
fn(**kwargs)
workspace.ResetWorkspace()
def create_common_world(self, comm_rank, comm_size, tmpdir=None, existing_cw=None):
store_handler = "store_handler"
# If REDIS_HOST is set, use RedisStoreHandler for rendezvous.
if existing_cw is None:
redis_host = os.getenv("REDIS_HOST")
redis_port = int(os.getenv("REDIS_PORT", 6379))
if redis_host is not None:
workspace.RunOperatorOnce(
core.CreateOperator(
"RedisStoreHandlerCreate",
[],
[store_handler],
prefix=str(TestCase.test_counter) + "/",
host=redis_host,
port=redis_port))
else:
workspace.RunOperatorOnce(
core.CreateOperator(
"FileStoreHandlerCreate",
[],
[store_handler],
path=tmpdir))
common_world = "common_world"
else:
common_world = str(existing_cw) + ".forked"
inputs = [store_handler]
if existing_cw is not None:
inputs.append(existing_cw)
workspace.RunOperatorOnce(
core.CreateOperator(
"CreateCommonWorld",
inputs,
[common_world],
size=comm_size,
rank=comm_rank,
sync=True,
engine=op_engine))
return (store_handler, common_world)
def synchronize(self, store_handler, value, comm_rank=None):
TestCase.sync_counter += 1
blob = "sync_{}".format(TestCase.sync_counter)
if comm_rank == 0:
workspace.FeedBlob(blob, pickle.dumps(value))
workspace.RunOperatorOnce(
core.CreateOperator(
"StoreSet",
[store_handler, blob],
[]))
else:
workspace.RunOperatorOnce(
core.CreateOperator(
"StoreGet",
[store_handler],
[blob]))
return pickle.loads(workspace.FetchBlob(blob))
def _test_broadcast(self,
comm_rank=None,
comm_size=None,
blob_size=None,
num_blobs=None,
tmpdir=None,
use_float16=False,
):
store_handler, common_world = self.create_common_world(
comm_rank=comm_rank,
comm_size=comm_size,
tmpdir=tmpdir)
blob_size = self.synchronize(
store_handler,
blob_size,
comm_rank=comm_rank)
num_blobs = self.synchronize(
store_handler,
num_blobs,
comm_rank=comm_rank)
for i in range(comm_size):
blobs = []
for j in range(num_blobs):
blob = "blob_{}".format(j)
offset = (comm_rank * num_blobs) + j
value = np.full(blob_size, offset,
np.float16 if use_float16 else np.float32)
workspace.FeedBlob(blob, value)
blobs.append(blob)
net = core.Net("broadcast")
net.Broadcast(
[common_world] + blobs,
blobs,
root=i,
engine=op_engine)
workspace.CreateNet(net)
workspace.RunNet(net.Name())
for j in range(num_blobs):
np.testing.assert_array_equal(
workspace.FetchBlob(blobs[j]),
i * num_blobs)
# Run the net a few more times to check the operator
# works not just the first time it's called
for _tmp in range(4):
workspace.RunNet(net.Name())
@given(comm_size=st.integers(min_value=2, max_value=8),
blob_size=st.integers(min_value=1e3, max_value=1e6),
num_blobs=st.integers(min_value=1, max_value=4),
device_option=st.sampled_from([hu.cpu_do]),
use_float16=st.booleans())
def test_broadcast(self, comm_size, blob_size, num_blobs, device_option,
use_float16):
TestCase.test_counter += 1
if os.getenv('COMM_RANK') is not None:
self.run_test_distributed(
self._test_broadcast,
blob_size=blob_size,
num_blobs=num_blobs,
use_float16=use_float16,
device_option=device_option)
else:
with TemporaryDirectory() as tmpdir:
self.run_test_locally(
self._test_broadcast,
comm_size=comm_size,
blob_size=blob_size,
num_blobs=num_blobs,
device_option=device_option,
tmpdir=tmpdir,
use_float16=use_float16)
def _test_allreduce(self,
comm_rank=None,
comm_size=None,
blob_size=None,
num_blobs=None,
tmpdir=None,
use_float16=False
):
store_handler, common_world = self.create_common_world(
comm_rank=comm_rank,
comm_size=comm_size,
tmpdir=tmpdir)
blob_size = self.synchronize(
store_handler,
blob_size,
comm_rank=comm_rank)
num_blobs = self.synchronize(
store_handler,
num_blobs,
comm_rank=comm_rank)
blobs = []
for i in range(num_blobs):
blob = "blob_{}".format(i)
value = np.full(blob_size, (comm_rank * num_blobs) + i,
np.float16 if use_float16 else np.float32)
workspace.FeedBlob(blob, value)
blobs.append(blob)
net = core.Net("allreduce")
net.Allreduce(
[common_world] + blobs,
blobs,
engine=op_engine)
workspace.CreateNet(net)
workspace.RunNet(net.Name())
for i in range(num_blobs):
np.testing.assert_array_equal(
workspace.FetchBlob(blobs[i]),
(num_blobs * comm_size) * (num_blobs * comm_size - 1) / 2)
# Run the net a few more times to check the operator
# works not just the first time it's called
for _tmp in range(4):
workspace.RunNet(net.Name())
def _test_allreduce_multicw(self,
comm_rank=None,
comm_size=None,
tmpdir=None
):
_store_handler, common_world = self.create_common_world(
comm_rank=comm_rank,
comm_size=comm_size,
tmpdir=tmpdir)
_, common_world2 = self.create_common_world(
comm_rank=comm_rank,
comm_size=comm_size,
tmpdir=tmpdir,
existing_cw=common_world)
blob_size = 1e4
num_blobs = 4
for cw in [common_world, common_world2]:
blobs = []
for i in range(num_blobs):
blob = "blob_{}".format(i)
value = np.full(blob_size, (comm_rank * num_blobs) + i, np.float32)
workspace.FeedBlob(blob, value)
blobs.append(blob)
net = core.Net("allreduce_multicw")
net.Allreduce(
[cw] + blobs,
blobs,
engine=op_engine)
workspace.RunNetOnce(net)
for i in range(num_blobs):
np.testing.assert_array_equal(
workspace.FetchBlob(blobs[i]),
(num_blobs * comm_size) * (num_blobs * comm_size - 1) / 2)
@given(comm_size=st.integers(min_value=2, max_value=8),
blob_size=st.integers(min_value=1e3, max_value=1e6),
num_blobs=st.integers(min_value=1, max_value=4),
device_option=st.sampled_from([hu.cpu_do]),
use_float16=st.booleans())
def test_allreduce(self, comm_size, blob_size, num_blobs, device_option,
use_float16):
TestCase.test_counter += 1
if os.getenv('COMM_RANK') is not None:
self.run_test_distributed(
self._test_allreduce,
blob_size=blob_size,
num_blobs=num_blobs,
use_float16=use_float16,
device_option=device_option)
else:
with TemporaryDirectory() as tmpdir:
self.run_test_locally(
self._test_allreduce,
comm_size=comm_size,
blob_size=blob_size,
num_blobs=num_blobs,
device_option=device_option,
tmpdir=tmpdir,
use_float16=use_float16)
def _test_reduce_scatter(self,
comm_rank=None,
comm_size=None,
blob_size=None,
num_blobs=None,
tmpdir=None,
use_float16=False
):
store_handler, common_world = self.create_common_world(
comm_rank=comm_rank,
comm_size=comm_size,
tmpdir=tmpdir)
blob_size = self.synchronize(
store_handler,
blob_size,
comm_rank=comm_rank)
num_blobs = self.synchronize(
store_handler,
num_blobs,
comm_rank=comm_rank)
blobs = []
for i in range(num_blobs):
blob = "blob_{}".format(i)
value = np.full(blob_size, (comm_rank * num_blobs) + i,
np.float16 if use_float16 else np.float32)
workspace.FeedBlob(blob, value)
blobs.append(blob)
# Specify distribution among ranks i.e. number of elements
# scattered/distributed to each process.
recv_counts = np.zeros(comm_size, dtype=np.int32)
remaining = blob_size
chunk_size = (blob_size + comm_size - 1) / comm_size
for i in range(comm_size):
recv_counts[i] = min(chunk_size, remaining)
remaining = remaining - chunk_size if remaining > chunk_size else 0
recv_counts_blob = "recvCounts"
workspace.FeedBlob(recv_counts_blob, recv_counts)
blobs.append(recv_counts_blob)
net = core.Net("reduce_scatter")
net.ReduceScatter(
[common_world] + blobs,
blobs,
engine=op_engine)
workspace.CreateNet(net)
workspace.RunNet(net.Name())
for i in range(num_blobs):
np.testing.assert_array_equal(
np.resize(workspace.FetchBlob(blobs[i]), recv_counts[comm_rank]),
(num_blobs * comm_size) * (num_blobs * comm_size - 1) / 2)
# Run the net a few more times to check the operator
# works not just the first time it's called
for _tmp in range(4):
workspace.RunNet(net.Name())
@given(comm_size=st.integers(min_value=2, max_value=8),
blob_size=st.integers(min_value=1e3, max_value=1e6),
num_blobs=st.integers(min_value=1, max_value=4),
device_option=st.sampled_from([hu.cpu_do]),
use_float16=st.booleans())
def test_reduce_scatter(self, comm_size, blob_size, num_blobs,
device_option, use_float16):
TestCase.test_counter += 1
if os.getenv('COMM_RANK') is not None:
self.run_test_distributed(
self._test_reduce_scatter,
blob_size=blob_size,
num_blobs=num_blobs,
use_float16=use_float16,
device_option=device_option)
else:
with TemporaryDirectory() as tmpdir:
self.run_test_locally(
self._test_reduce_scatter,
comm_size=comm_size,
blob_size=blob_size,
num_blobs=num_blobs,
device_option=device_option,
tmpdir=tmpdir,
use_float16=use_float16)
def _test_allgather(self,
comm_rank=None,
comm_size=None,
blob_size=None,
num_blobs=None,
tmpdir=None,
use_float16=False
):
store_handler, common_world = self.create_common_world(
comm_rank=comm_rank,
comm_size=comm_size,
tmpdir=tmpdir)
blob_size = self.synchronize(
store_handler,
blob_size,
comm_rank=comm_rank)
num_blobs = self.synchronize(
store_handler,
num_blobs,
comm_rank=comm_rank)
blobs = []
for i in range(num_blobs):
blob = "blob_{}".format(i)
value = np.full(blob_size, (comm_rank * num_blobs) + i,
np.float16 if use_float16 else np.float32)
workspace.FeedBlob(blob, value)
blobs.append(blob)
net = core.Net("allgather")
net.Allgather(
[common_world] + blobs,
["Gathered"],
engine=op_engine)
workspace.CreateNet(net)
workspace.RunNet(net.Name())
# create expected output
expected_output = np.array([])
for i in range(comm_size):
for j in range(num_blobs):
value = np.full(blob_size, (i * num_blobs) + j,
np.float16 if use_float16 else np.float32)
expected_output = np.concatenate((expected_output, value))
np.testing.assert_array_equal(
workspace.FetchBlob("Gathered"), expected_output)
# Run the net a few more times to check the operator
# works not just the first time it's called
for _tmp in range(4):
workspace.RunNet(net.Name())
@given(comm_size=st.integers(min_value=2, max_value=8),
blob_size=st.integers(min_value=1e3, max_value=1e6),
num_blobs=st.integers(min_value=1, max_value=4),
device_option=st.sampled_from([hu.cpu_do]),
use_float16=st.booleans())
def test_allgather(self, comm_size, blob_size, num_blobs, device_option,
use_float16):
TestCase.test_counter += 1
if os.getenv('COMM_RANK') is not None:
self.run_test_distributed(
self._test_allgather,
blob_size=blob_size,
num_blobs=num_blobs,
use_float16=use_float16,
device_option=device_option)
else:
with TemporaryDirectory() as tmpdir:
self.run_test_locally(
self._test_allgather,
comm_size=comm_size,
blob_size=blob_size,
num_blobs=num_blobs,
device_option=device_option,
tmpdir=tmpdir,
use_float16=use_float16)
@given(device_option=st.sampled_from([hu.cpu_do]))
def test_forked_cw(self, device_option):
TestCase.test_counter += 1
if os.getenv('COMM_RANK') is not None:
self.run_test_distributed(
self._test_allreduce_multicw,
device_option=device_option)
else:
with TemporaryDirectory() as tmpdir:
self.run_test_locally(
self._test_allreduce_multicw,
comm_size=8,
device_option=device_option,
tmpdir=tmpdir)
def _test_barrier(
self,
comm_rank=None,
comm_size=None,
tmpdir=None,
):
store_handler, common_world = self.create_common_world(
comm_rank=comm_rank, comm_size=comm_size, tmpdir=tmpdir
)
net = core.Net("barrier")
net.Barrier(
[common_world],
[],
engine=op_engine)
workspace.CreateNet(net)
workspace.RunNet(net.Name())
# Run the net a few more times to check the operator
# works not just the first time it's called
for _tmp in range(4):
workspace.RunNet(net.Name())
@given(comm_size=st.integers(min_value=2, max_value=8),
device_option=st.sampled_from([hu.cpu_do]))
def test_barrier(self, comm_size, device_option):
TestCase.test_counter += 1
if os.getenv('COMM_RANK') is not None:
self.run_test_distributed(
self._test_barrier,
device_option=device_option)
else:
with TemporaryDirectory() as tmpdir:
self.run_test_locally(
self._test_barrier,
comm_size=comm_size,
device_option=device_option,
tmpdir=tmpdir)
def _test_close_connection(
self,
comm_rank=None,
comm_size=None,
tmpdir=None,
):
'''
One node calls close connection, others wait it on barrier.
Test will check that all will exit eventually.
'''
# Caffe's for closers only:
# https://www.youtube.com/watch?v=QMFwFgG9NE8
closer = comm_rank == comm_size // 2,
store_handler, common_world = self.create_common_world(
comm_rank=comm_rank, comm_size=comm_size, tmpdir=tmpdir
)
net = core.Net("barrier_or_close")
if not closer:
net.Barrier(
[common_world],
[],
engine=op_engine)
else:
net.DestroyCommonWorld(
[common_world], [common_world], engine=op_engine)
# Sleep a bit to ensure others start the barrier
import time
time.sleep(0.1)
workspace.CreateNet(net)
workspace.RunNet(net.Name())
@given(comm_size=st.integers(min_value=2, max_value=8),
device_option=st.sampled_from([hu.cpu_do]))
def test_close_connection(self, comm_size, device_option):
import time
start_time = time.time()
TestCase.test_counter += 1
if os.getenv('COMM_RANK') is not None:
self.run_test_distributed(
self._test_close_connection,
device_option=device_option)
else:
with TemporaryDirectory() as tmpdir:
self.run_test_locally(
self._test_close_connection,
comm_size=comm_size,
device_option=device_option,
tmpdir=tmpdir)
# Check that test finishes quickly because connections get closed
self.assertLess(time.time() - start_time, 2.0)
def _test_io_error(
self,
comm_rank=None,
comm_size=None,
tmpdir=None,
):
'''
Only one node will participate in allreduce, resulting in an IoError
'''
store_handler, common_world = self.create_common_world(
comm_rank=comm_rank,
comm_size=comm_size,
tmpdir=tmpdir)
if comm_rank == 0:
blob_size = 1000
num_blobs = 1
blobs = []
for i in range(num_blobs):
blob = "blob_{}".format(i)
value = np.full(
blob_size, (comm_rank * num_blobs) + i, np.float32
)
workspace.FeedBlob(blob, value)
blobs.append(blob)
net = core.Net("allreduce")
net.Allreduce(
[common_world] + blobs,
blobs,
engine=op_engine)
workspace.CreateNet(net)
workspace.RunNet(net.Name())
@given(comm_size=st.integers(min_value=2, max_value=8),
device_option=st.sampled_from([hu.cpu_do]))
def test_io_error(self, comm_size, device_option):
TestCase.test_counter += 1
with self.assertRaises(IoError):
if os.getenv('COMM_RANK') is not None:
self.run_test_distributed(
self._test_io_error,
device_option=device_option)
else:
with TemporaryDirectory() as tmpdir:
self.run_test_locally(
self._test_io_error,
comm_size=comm_size,
device_option=device_option,
tmpdir=tmpdir)
if __name__ == "__main__":
import unittest
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, dyndep
import caffe2.python.hypothesis_test_util as hu
from hypothesis import given
import hypothesis.strategies as st
import numpy as np
import os
import unittest
try:
from libfb.py import parutil
except ImportError as e:
# If libfb not found, skip all tests in this file
raise unittest.SkipTest(str(e))
core.GlobalInit(["python", "--caffe2_log_level=0"])
dyndep.InitOpsLibrary('@/caffe2/caffe2/contrib/torch:torch_ops')
RUNTIME = parutil.get_runtime_path()
if 'LUA_PATH' not in os.environ:
os.environ['LUA_PATH'] = ";".join([
os.path.join(RUNTIME, '_lua', '?.lua'),
os.path.join(RUNTIME, '_lua', '?', 'init.lua'),
])
os.environ['LUA_CPATH'] = os.path.join(RUNTIME, '_lua', '?.so')
class TorchOpTest(hu.HypothesisTestCase):
@given(n=st.integers(min_value=1, max_value=10),
i=st.integers(min_value=1, max_value=10),
h=st.integers(min_value=2, max_value=10))
def test_feed(self, n, i, h):
op = core.CreateOperator(
"Torch", ["x", "W", "b"], ["y"],
init=b"nn.Linear({i}, {h})".format(h=h, i=i),
num_inputs=1,
num_params=2,
num_outputs=1
)
x = np.random.randn(n, i).astype(np.float32)
W = np.random.randn(h, i).astype(np.float32)
b = np.random.randn(h).astype(np.float32)
self.ws.create_blob("x").feed(x)
self.ws.create_blob("W").feed(W)
self.ws.create_blob("b").feed(b)
self.ws.run(op)
y = self.ws.blobs["y"].fetch()
print("y", y)
y = y.reshape((n, h))
np.testing.assert_allclose(y, np.dot(x, W.T) + b, atol=1e-4, rtol=1e-4)
@given(n=st.integers(min_value=1, max_value=10),
i=st.integers(min_value=1, max_value=10),
h=st.integers(min_value=2, max_value=10),
**hu.gcs)
def test_gradient(self, n, i, h, gc, dc):
op = core.CreateOperator(
"Torch", ["x", "W", "b"], ["y"],
init=b"nn.Linear({i}, {h})".format(h=h, i=i),
num_inputs=1,
num_params=2,
num_outputs=1
)
x = np.random.randn(n, i).astype(np.float32)
W = np.random.randn(h, i).astype(np.float32)
b = np.random.randn(h).astype(np.float32)
inputs = [x, W, b]
self.assertDeviceChecks(dc, op, inputs, [0])
for i, _ in enumerate(inputs):
self.assertGradientChecks(gc, op, inputs, i, [0])
@given(n=st.integers(min_value=1, max_value=10),
i=st.integers(min_value=1, max_value=10),
h=st.integers(min_value=2, max_value=10),
iters=st.integers(min_value=1, max_value=100))
def test_iterated(self, n, i, h, iters):
x = np.random.randn(n, i).astype(np.float32)
W = np.random.randn(h, i).astype(np.float32)
b = np.random.randn(h).astype(np.float32)
self.ws.create_blob("x").feed(x)
self.ws.create_blob("W").feed(W)
self.ws.create_blob("b").feed(b)
net = core.Net("op")
net.Torch(
["x", "W", "b"], ["y"],
init=b"nn.Linear({i}, {h})".format(h=h, i=i),
num_inputs=1,
num_params=2,
num_outputs=1
)
print(net.Proto())
net_ = self.ws.create_net(net)
for i in range(iters):
if i % 1000 == 0:
print(i)
net_.run()
y = self.ws.blobs["y"].fetch()
y = y.reshape((n, h))
np.testing.assert_allclose(y, np.dot(x, W.T) + b, atol=1e-4, rtol=1e-4)
def test_leakage_torch(self):
n = 1
i = 100
h = 1000
iters = 2000
x = np.random.randn(n, i).astype(np.float32)
W = np.random.randn(h, i).astype(np.float32)
b = np.random.randn(h).astype(np.float32)
self.ws.create_blob("x").feed(x)
self.ws.create_blob("W").feed(W)
self.ws.create_blob("b").feed(b)
net = core.Net("op")
net.Torch(
["x", "W", "b"], ["y"],
init=b"nn.Linear({i}, {h})".format(h=h, i=i),
num_inputs=1,
num_params=2,
num_outputs=1
)
net_ = self.ws.create_net(net)
for i in range(iters):
if i % 1000 == 0:
print(i)
net_.run()
y = self.ws.blobs["y"].fetch()
y = y.reshape((n, h))
np.testing.assert_allclose(y, np.dot(x, W.T) + b, atol=1e-4, rtol=1e-4)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, dyndep
import caffe2.python.hypothesis_test_util as hu
from hypothesis import given
import hypothesis.strategies as st
import numpy as np
dyndep.InitOpsLibrary('@/caffe2/caffe2/contrib/torch:th_ops')
try:
dyndep.InitOpsLibrary('@/caffe2/caffe2/contrib/torch:th_ops_gpu')
HAS_GPU = True
except Exception as e:
print("Exception loading Torch GPU library: ", e)
# GPU import can fail, as Torch is not using cuda-lazy
HAS_GPU = False
pass
class THOpsTest(hu.HypothesisTestCase):
@given(X=hu.tensor(),
alpha=st.floats(min_value=0.1, max_value=2.0),
in_place=st.booleans(),
**(hu.gcs if HAS_GPU else hu.gcs_cpu_only))
def test_elu(self, X, alpha, in_place, gc, dc):
op = core.CreateOperator(
"ELU",
["X"],
["X" if in_place else "Y"],
engine="THNN",
alpha=alpha)
self.assertDeviceChecks(dc, op, [X], [0])
def elu(X):
Y = np.copy(X)
Y[Y <= 0] = (np.exp(Y[Y <= 0]) - 1) * alpha
return (Y,)
self.assertReferenceChecks(gc, op, [X], elu)
# Avoid the nonlinearity at 0 for gradient checker.
X[X == 0] += 0.2
X[np.abs(X) < 0.2] += np.sign(X[np.abs(X) < 0.2])
assert len(X[np.abs(X) < 0.2]) == 0
self.assertGradientChecks(gc, op, [X], 0, [0])
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from hypothesis import given
from caffe2.python import core, workspace
from caffe2.proto import caffe2_pb2
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
def feed_inputs(inputs):
for name, value in inputs.items():
workspace.FeedBlob(name, value)
def assert_proto_equals(proto, expected):
proto_lines = proto.strip().split('\n')
expected_lines = expected.strip().split('\n')
assert len(proto_lines) == len(expected_lines), \
'{} != {}'.format(proto, expected)
for left, right in zip(proto_lines, expected_lines):
assert left.strip() == right.strip(), \
'{} != {}'.format(proto, expected)
class TestCaffe2Script(hu.HypothesisTestCase):
test_program = """
def foo(a,b,X,W) -> (c):
t = a + b*b
c = FC(X,W,t)
def testIf(c0,c1,t,f) -> (r):
if c0 < c1:
r = t
else:
r = f
r = Add(r,3f,broadcast=1)
def testWhile(r) -> (r):
m = 0
while m < 4:
# Plus operator automatically broadcasts, and we cannot
# do in-place B and C arguments when we broadcast, so use
# an explicit Add op.
r = Add(r, r)
m = m + 1
"""
@given(firstdim=st.integers(min_value=1, max_value=4096),
seconddim=st.integers(min_value=1, max_value=4096),
seed=st.integers(min_value=0, max_value=65536),
**hu.gcs)
def test_foo(self, firstdim, seconddim, seed, gc, dc):
np.random.seed(int(seed))
inputs = {}
a = inputs['a'] = np.random.rand(seconddim).astype(np.float32)
b = inputs['b'] = np.random.rand(seconddim).astype(np.float32)
X = inputs['X'] = np.random.rand(firstdim, firstdim).astype(np.float32)
W = inputs['W'] = np.random.rand(seconddim, firstdim).astype(np.float32)
feed_inputs(inputs)
CU = core.C.CompilationUnit()
CU.define(self.test_program)
CU.create_net('foo').run()
ref_t = a + b * b
ref_c = np.matmul(X, W.transpose()) + ref_t
actual_c = workspace.FetchBlob('c')
np.testing.assert_allclose(actual_c, ref_c, rtol=1e-05)
def test_trinary(self):
CU = core.C.CompilationUnit()
CU.define("""
def foo(c) -> (d):
d = 1 + (2 if c else 4)
""")
workspace.FeedBlob('c', np.ones((1), dtype=bool))
net = CU.create_net('foo')
net.run()
assert(3 == workspace.FetchBlob('d'))
workspace.FeedBlob('c', np.zeros((1), dtype=bool))
net.run()
assert(5 == workspace.FetchBlob('d'))
def test_bool_literal(self):
CU = core.C.CompilationUnit()
CU.define("""
def foo() -> (a,b):
a = True
b = False
""")
net = CU.create_net('foo')
net.run()
assert(workspace.FetchBlob('a'))
assert(not workspace.FetchBlob('b'))
def test_bool_operators(self):
CU = core.C.CompilationUnit()
CU.define("""
def foo() -> (a, b, c, d, e):
a = True and False
b = True or False
c = not b
d = not False or True
e = not (1 if a else 0) == (1 if b else 0)
""")
net = CU.create_net('foo')
net.run()
assert(not workspace.FetchBlob('a'))
assert(workspace.FetchBlob('b'))
assert(not workspace.FetchBlob('c'))
assert(workspace.FetchBlob('d'))
assert(workspace.FetchBlob('e'))
def expect_fail(self, fn, msg):
try:
fn()
except RuntimeError as r:
if msg not in str(r):
raise RuntimeError(
"Failed wrong: expected string '{}' ".format(msg) +
"in error message but found\n{}".format(str(r)))
def test_fails(self):
def fail_inputs():
CU = core.C.CompilationUnit()
CU.define("""
def foo() -> ():
Print(1,4)
""")
self.expect_fail(fail_inputs, "expects 1 inputs but found 2")
def fail_undef():
CU = core.C.CompilationUnit()
CU.define("""
def foo(a) -> (b):
a = what()
""")
self.expect_fail(fail_undef, "attempting to call unknown operation")
def fail_schema():
CU = core.C.CompilationUnit()
CU.define("""
def foo(a) -> (b):
a = FC(a,a,a)
""")
self.expect_fail(fail_schema, "failed schema checking")
def test_print(self):
CU = core.C.CompilationUnit()
CU.define("""
def foo() -> ():
a = 1
Print(a)
Print(a+1)
_ = 4
Print(_) # verify in print this isn't _ but some temorary
Print(1)
Print(1.f)
Print(3.0)
""")
net = CU.create_net('foo')
net.run()
def test_method(self):
CU = core.C.CompilationUnit()
CU.define("""
def foo() -> (a):
a = (3+1).Add(4).Add(1)
""")
net = CU.create_net('foo')
net.run()
assert(9 == workspace.FetchBlob('a'))
def test_plus_eq(self):
CU = core.C.CompilationUnit()
CU.define("""
def foo() -> (a):
a = 4
a += 1
""")
net = CU.create_net('foo')
net.run()
assert(5 == workspace.FetchBlob('a'))
def test_cast(self):
CU = core.C.CompilationUnit()
CU.define("""
def foo() -> (a):
a = int(4.5f)
""")
net = CU.create_net('foo')
net.run()
assert(4 == workspace.FetchBlob('a'))
def test_global(self):
CU = core.C.CompilationUnit()
CU.define("""
def foo() -> (a):
global m
m.a = 4
m.b = 5
a = m.a + m.b
""")
net = CU.create_net('foo')
net.run()
assert(9 == workspace.FetchBlob('a'))
def test_module_as_arg_ret(self):
CU = core.C.CompilationUnit()
CU.define("""
def bar(a,c) -> (b):
b = Module()
temp = a.second
b.first = temp
b.second = a.first + c
def foo() -> (a,b):
x = Module()
x.first = 1
x.second = 2
x.y = bar(x,4)
a = x.y.first
b = x.y.second
""")
net = CU.create_net('foo')
net.run()
assert(2 == workspace.FetchBlob('a'))
assert(5 == workspace.FetchBlob('b'))
def test_call_extern(self):
CU = core.C.CompilationUnit()
net = caffe2_pb2.NetDef()
net.op.extend([
core.CreateOperator(
'Mul',
['i', 'i'],
['o'],
)
])
net.external_input.append('i')
net.external_output.append('o')
CU.extern("myActualExtern", net)
CU.define("""
def myExtern(x) -> (y):
t = x
if t > 1:
y = t * t
else:
y = 5
def foo() -> (b):
a = 4
a += 1
b = 2 + myExtern(a) + myExtern(a, rename=False) + myActualExtern(a)
""")
net = CU.create_net('foo')
net.run()
assert(77 == workspace.FetchBlob('b'))
@given(seed=st.integers(min_value=0, max_value=65536), **hu.gcs)
def test_if(self, seed, gc, dc):
np.random.seed(int(seed))
inputs = {}
c0 = inputs['c0'] = np.random.rand(1).astype(np.float32)
c1 = inputs['c1'] = np.random.rand(1).astype(np.float32)
t = inputs['t'] = np.random.rand(3, 3).astype(np.float32)
f = inputs['f'] = np.random.rand(3, 3).astype(np.float32)
feed_inputs(inputs)
CU = core.C.CompilationUnit()
CU.define(self.test_program)
CU.create_net('testIf').run()
if c0 < c1:
ref_r = t + 3
else:
ref_r = f + 3
actual_r = workspace.FetchBlob('r')
np.testing.assert_allclose(actual_r, ref_r)
@given(seed=st.integers(min_value=0, max_value=65536), **hu.gcs)
def test_while(self, seed, gc, dc):
np.random.seed(int(seed))
inputs = {}
r = inputs['r'] = np.ones([3, 3]).astype(np.float32)
feed_inputs(inputs)
CU = core.C.CompilationUnit()
CU.define(self.test_program)
CU.create_net('testWhile').run()
m = 0
while m < 4:
r = r + r
m = m + 1
actual_r = workspace.FetchBlob('r')
np.testing.assert_allclose(actual_r, r)
@given(seed=st.integers(min_value=0, max_value=65536), **hu.gcs)
def test_gather(self, seed, gc, dc):
CU = core.C.CompilationUnit()
CU.define("""
def easy(tensor, indices) -> (output):
output = tensor[indices]
def hard(tensor, i, j, k) -> (output):
output = tensor[i][j][k]
""")
# First check that the generated proto is as expected. This tests that
# we desugar the gather syntax correctly and emit the right code.
proto = CU.get_proto('easy')
assert_proto_equals(proto, """
name: "easy"
op {
input: "tensor"
input: "indices"
output: "output"
type: "Gather"
}""")
proto = CU.get_proto('hard')
assert_proto_equals(proto, """
name: "hard"
op {
input: "tensor"
input: "i"
output: "$t1"
type: "Gather"
}
op {
input: "$t1"
input: "j"
output: "$t0"
type: "Gather"
}
op {
input: "$t0"
input: "k"
output: "output"
type: "Gather"
}""")
# Now just test that the effect of the generated code is as expected.
np.random.seed(int(seed))
tensor = np.random.rand(5, 4, 3).astype(np.float32)
indices = np.random.randint(len(tensor), size=(5, 5))
feed_inputs(dict(tensor=tensor, indices=indices))
net = CU.create_net('easy')
net.run()
output = workspace.FetchBlob('output')
expected_output = [tensor[sample] for sample in indices]
np.testing.assert_allclose(output, expected_output)
@given(seed=st.integers(min_value=0, max_value=65536), **hu.gcs)
def test_slice(self, seed, gc, dc):
CU = core.C.CompilationUnit()
CU.define("""
def slice_from_tensor(tensor, start, end) -> (output):
output = tensor[start:end]
def slice_from_vector(vector, start, end) -> (a, b, c, d):
a = vector[start:end]
b = vector[start:]
c = vector[:end]
d = vector[:]
""")
# slice_from_tensor
proto = CU.get_proto('slice_from_tensor')
assert_proto_equals(proto, """
name: "slice_from_tensor"
op {
input: "tensor"
input: "start"
input: "end"
output: "output"
type: "Slice"
}""")
np.random.seed(int(seed))
tensor = np.random.rand(5, 4, 3).astype(np.float32)
start = np.array([0, 1, 0], dtype=np.int32)
end = np.array([-1, 2, -1], dtype=np.int32)
feed_inputs(dict(tensor=tensor, start=start, end=end))
net = CU.create_net('slice_from_tensor')
net.run()
output = workspace.FetchBlob('output')
np.testing.assert_allclose(output, tensor[:, 1:2])
# slice_from_vector
proto = CU.get_proto('slice_from_vector')
assert_proto_equals(proto, """
name: "slice_from_vector"
op {
input: "vector"
input: "start"
input: "end"
output: "a"
type: "Slice"
}
op {
output: "$t0"
type: "ConstantFill"
arg {
name: "dtype"
i: 2
}
arg {
name: "value"
i: -1
}
arg {
name: "shape"
ints: 1
}
}
op {
input: "vector"
input: "start"
input: "$t0"
output: "b"
type: "Slice"
}
op {
output: "$t1"
type: "ConstantFill"
arg {
name: "dtype"
i: 2
}
arg {
name: "value"
i: 0
}
arg {
name: "shape"
ints: 1
}
}
op {
input: "vector"
input: "$t1"
input: "end"
output: "c"
type: "Slice"
}
op {
output: "$t2"
type: "ConstantFill"
arg {
name: "dtype"
i: 2
}
arg {
name: "value"
i: 0
}
arg {
name: "shape"
ints: 1
}
}
op {
output: "$t3"
type: "ConstantFill"
arg {
name: "dtype"
i: 2
}
arg {
name: "value"
i: -1
}
arg {
name: "shape"
ints: 1
}
}
op {
input: "vector"
input: "$t2"
input: "$t3"
output: "d"
type: "Slice"
}""")
vector = np.random.rand(10).astype(np.float32)
start = np.array([2], dtype=np.int32)
end = np.array([6], dtype=np.int32)
feed_inputs(dict(vector=vector, start=start, end=end))
net = CU.create_net('slice_from_vector')
net.run()
output = workspace.FetchBlob('a')
np.testing.assert_allclose(output, vector[2:6])
output = workspace.FetchBlob('b')
np.testing.assert_allclose(output, vector[2:])
output = workspace.FetchBlob('c')
np.testing.assert_allclose(output, vector[:6])
output = workspace.FetchBlob('d')
np.testing.assert_allclose(output, vector)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
import glob
import json
import numpy as np
example_files = glob.glob('example_*.c2s')
for ex in example_files:
print('Running example file', ex)
with open(ex, 'r') as f:
inits = json.loads(f.readline())
net_name = f.readline().strip()
outputs = json.loads(f.readline())
CU = core.C.CompilationUnit()
CU.define(f.read())
# Initialize workspace with required inputs
for name, shape, dt in inits:
workspace.FeedBlob(name, np.random.rand(*shape).astype(np.dtype(dt)))
net = CU.create_net(net_name)
net.run()
print('Success! Interesting outputs:')
for output in outputs:
print(output, workspace.FetchBlob(output))
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from caffe2.proto import caffe2_pb2
from caffe2.python import core, dyndep, workspace
dyndep.InitOpsLibrary("@/caffe2/caffe2/contrib/prof:cuda_profile_ops")
class CudaProfileOpsTest(unittest.TestCase):
@unittest.skipIf(workspace.NumCudaDevices() < 1, "Need at least 1 GPU")
def test_run(self):
net = core.Net("net")
net.CudaProfileInitialize([], [], output="/tmp/cuda_profile_test")
net.CudaProfileStart([], [])
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
net.ConstantFill([], ["out"], shape=[1, 3, 244, 244])
net.CudaProfileStop([], [])
workspace.CreateNet(net)
workspace.RunNet(net)
|
## @package htrace_to_chrome
# Module caffe2.contrib.prof.htrace_to_chrome
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import json
import re
import sys
display_levels = ["network", "worker", "operator", "kernel"]
def stop_display(limit, curr):
return display_levels.index(limit) <= display_levels.index(curr)
def build_trace_dict(f, start_time, end_time):
"""Creates a python dictionary that has trace ids as keys and the
corresponding trace objects as values.
Input: python file object that points to a file with traces, written by
htrace-c's local file span receiver.
The exact format shouldn't concern you if you're using htrace-c correctly.
https://github.com/apache/incubator-htrace/blob/master/htrace-c.
Returns: a tuple (trace_dic, root_list), where trace_dic is a dictionary
containing all traces parsed from the input file object, and root_list is a
list of traces from trace_dic which have no parents.
Each value in trace_dic is in the form of another dictionary with the
folowing keys:
"begin" : timestamp of trace start time, microseconds
"end" : timestamp of trace end time, microseconds
"desc" : description of trace
"parent" : trace id of parent trace
"children": dictionary of child traces, in the same format as trace_dic
"""
trace_dic = {}
root_list = []
for line in f:
h = json.loads(line)
if h["e"] < start_time or h["b"] > end_time:
continue
entry = {"begin": h["b"], "end": h["e"], "desc": h["d"]}
if "p" not in h or len(h["p"]) == 0:
root_list.append(entry)
else:
entry["parent"] = h["p"][0]
trace_dic[h["a"]] = entry
for k, v in trace_dic.items():
if "parent" not in v:
continue
parent = trace_dic[v["parent"]]
if "children" not in parent:
parent["children"] = {}
parent["children"][k] = v
return trace_dic, root_list
def generate_chrome_trace(root_list, display):
"""Takes trace objects created by build_trace_dict() and generates a list of
python dictionaries that can be written to a file in json format, which in
turn can be given to Chrome tracing (chrome://tracing).
Input: refer to root_list in build_trace_dict()'s return value.
Output: list of dictionaries that can be directly written to a json file by
json.dumps().
The dictionary format follows the JSON array format of Chrome tracing.
Complete events ("ph": "X") are used to express most traces; such events
will appear as horizontal blocks with lengths equal to the trace duration.
Instant events ("ph": "i") are used for traces with many occurrencs which
may make the trace graph unreadable; such events are shown as thin lines.
"""
ct = []
for root_idx, root in enumerate(root_list):
# network-level spans
ct.append({
"name": root["desc"],
"ph": "X",
"ts": root["begin"],
"dur": root["end"] - root["begin"],
"pid": root_idx,
"tid": root_idx,
"args": {
"Start timestamp": root["begin"],
"End timestamp": root["end"]
}
})
for _, v in root["children"].items():
# run-scopes and worker-scopes
c = {
"name": v["desc"],
"ph": "X",
"ts": v["begin"],
"dur": v["end"] - v["begin"],
"pid": root_idx,
"args": {
"Start timestamp": v["begin"],
"End timestamp": v["end"]
}
}
if "run-scope" in v["desc"]:
c["tid"] = root_idx
ct.append(c)
else:
if stop_display(display, "network"):
continue
m = re.search("(?<=worker-scope-)\d+", v["desc"])
wid = m.group(0)
c["tid"] = wid
ct.append(c)
if stop_display(display, "worker") or "children" not in v:
continue
for k_op, v_op in v["children"].items():
# operator scopes
ct.append({
"name": v_op["desc"],
"ph": "X",
"ts": v_op["begin"],
"dur": v_op["end"] - v_op["begin"],
"pid": root_idx,
"tid": wid,
"args": {
"Start timestamp": v_op["begin"],
"End timestamp": v_op["end"]
}
})
if stop_display(display, "operator") or "children" not in v_op:
continue
for idx, (k_gpu_op, v_gpu_op) in \
enumerate(sorted(v_op["children"].items(),
key=lambda e: e[1]["begin"])):
# kernel scopes
if idx == 0:
ct.append({
"name": v_op["desc"] + "-GPU",
"ph": "X",
"ts": v_gpu_op["begin"],
"dur": v_gpu_op["end"] - v_gpu_op["begin"],
"pid": root_idx,
"tid": wid,
"args": {
"desc": "NEW OPERATOR",
"Start timestamp": v_gpu_op["begin"],
"End timestamp": v_gpu_op["end"]
}
})
ct.append({
"name": v_op["desc"] + "-GPU",
"ph": "i",
"ts": v_gpu_op["begin"],
"pid": root_idx,
"tid": wid,
"args": {
"desc": v_gpu_op["desc"]
}
})
return ct
def get_argument_parser():
parser = argparse.ArgumentParser(
description="Format conversion from HTrace to Chrome tracing.")
parser.add_argument("htrace_log", type=str, help="input htrace span log file")
parser.add_argument("--display",
type=str, choices=display_levels, default="operator",
help="deepest level of spans to display (default: operator)")
parser.add_argument("--start_time", type=int, default=-1,
help="do not display spans occuring before this timestamp")
parser.add_argument("--end_time", type=int, default=sys.maxsize,
help="do not display spans occuring after this timestamp")
return parser
def main():
args = get_argument_parser().parse_args()
with open(args.htrace_log, "r") as f:
trace_dic, root_list = build_trace_dict(f, args.start_time, args.end_time)
ct = generate_chrome_trace(root_list, args.display)
print("Writing chrome json file to %s.json" % args.htrace_log)
print("Now import %s.json in chrome://tracing" % args.htrace_log)
with open(args.htrace_log + ".json", "w") as f:
f.write(json.dumps(ct))
if __name__ == '__main__':
main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import bytes
import copy
import logging
import os
import six
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
import tensorflow as tf
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.core.framework import graph_pb2
def _make_unique_name(seen, name, min_version=0):
assert name is not None
i = min_version
x = '%s_%d' % (name, i) if i else name
while x in seen:
i += 1
x = '%s_%d' % (name, i)
seen.add(x)
return x
def _convert_to_ssa(shapes, track_blob_names, ops):
"""
Convert an operator graph to SSA (i.e. out-of-place).
I.e. blobs will be renamed so that each blob is produced only once.
"""
ir = core.IR(ops)
seen = set()
versioned = {}
shapes2 = {}
track_blob_names2 = {}
def ssa_name(name, versions):
assert name in versions
version = versions[name]
if (name, version) in versioned:
return versioned[(name, version)]
# Always setting name2 = `{name}_{version}` would work, but we also try
# to avoid a trailing `_0`, so we have to be careful not to introduce
# name collisions, such as (foo_1, 0) = foo_1 = (foo, 1).
# Note: operator names (if any) will be handled later.
name2 = _make_unique_name(seen, name, min_version=version)
versioned[(name, version)] = name2
# Transfer shape.
if name in shapes:
shapes2[name2] = shapes[name]
if track_blob_names and name in track_blob_names:
track_blob_names2[name2] = track_blob_names[name]
return name2
for (op, ssa) in zip(ops, ir.ssa):
assert op is ssa.op
inputs = list(op.input)
outputs = list(op.output)
del op.input[:]
del op.output[:]
op.input.extend(ssa_name(name, ssa.in_versions) for name in inputs)
op.output.extend(ssa_name(name, ssa.out_versions) for name in outputs)
shapes.clear()
shapes.update(shapes2)
if track_blob_names:
track_blob_names.clear()
track_blob_names.update(track_blob_names2)
def _get_blob_names(ops):
names = set()
for op in ops:
names.update(op.input)
names.update(op.output)
return {name: name for name in names}
def _remap_keys(m, f):
m2 = {f(key): value for key, value in six.iteritems(m)}
m.clear()
m.update(m2)
def _rename_all(shapes, track_blob_names, ops, f):
seen = set()
renamed = {}
def g(name):
""" Collision-free version of f.
"""
if name is None:
return None
if name in renamed:
return renamed[name]
name2 = _make_unique_name(seen, f(name))
renamed[name] = name2
return name2
for op in ops:
inputs = list(op.input)
outputs = list(op.output)
del op.input[:]
del op.output[:]
op.input.extend(g(name) for name in inputs)
op.output.extend(g(name) for name in outputs)
_remap_keys(shapes, g)
if track_blob_names:
_remap_keys(track_blob_names, g)
# Rename all operator names (if any) independently so that the
# unique-fication happens only once in _fill_missing_operator_names().
seen.clear()
renamed.clear()
for op in ops:
op.name = g(op.name)
def _add_gradient_scope(shapes, track_blob_names, ops):
"""
For all operators or blobs with name containing "_grad", add a
"GRADIENTS/" scope.
Note: breaks graph execution since the blob -> gradient mapping is
hardcoded.
"""
def f(name):
if '_grad' in name:
return 'GRADIENTS/{}'.format(name)
else:
return name
_rename_all(shapes, track_blob_names, ops, f)
def _replace_colons(shapes, track_blob_names, ops, repl):
"""
`:i` has a special meaning in Tensorflow.
"""
def f(name):
return name.replace(':', repl)
_rename_all(shapes, track_blob_names, ops, f)
def _fill_missing_operator_names(ops):
''' Give missing operators a name.
We expect C2 operators to be generally unnamed. This gives them a scope
(inferred from their outputs) and a name after their type. Duplicates will
be postfixed by an index.
'''
seen = set()
for op in ops:
# Make sure operator names don't collide with blobs.
seen.update(op.input)
seen.update(op.output)
for op in ops:
if op.name:
name = op.name
elif op.output or op.input:
l = [os.path.dirname(name) for name in op.output or op.input]
scope = os.path.commonprefix(l)
name = os.path.join(scope, op.type)
else:
name = op.type
assert(name)
op.name = _make_unique_name(seen, name)
def _tf_device(device_option):
if not device_option.HasField("device_type"):
return ""
if device_option.device_type == caffe2_pb2.CPU:
return "/cpu:*"
if device_option.device_type == caffe2_pb2.CUDA:
return "/gpu:{}".format(device_option.cuda_gpu_id)
raise Exception("Unhandled device", device_option)
def _add_tf_shape(m, ints):
sh = tensor_shape_pb2.TensorShapeProto()
for i in ints:
dim = tensor_shape_pb2.TensorShapeProto.Dim()
dim.size = i
sh.dim.extend([dim])
m['_output_shapes'].list.shape.extend([sh])
def _set_tf_attr(m, arg):
k = arg.name
if k == 'shape' and arg.ints:
_add_tf_shape(m, arg.ints)
return
if arg.HasField("f"):
m[k].f = arg.f
return
if arg.HasField("i"):
m[k].i = arg.i
return
if arg.HasField("s"):
m[k].s = (
arg.s if isinstance(arg.s, bytes) else str(arg.s).encode('utf-8')
)
return
if arg.floats:
m[k].list.f.extend(arg.floats)
return
if arg.ints:
m[k].list.i.extend(arg.ints)
return
if arg.strings:
m[k].list.s.extend(
s if isinstance(s, bytes) else str(s).encode('utf-8')
for s in arg.strings
)
return
# The value is an empty list.
m[k].list.s.extend([])
def _operator_to_node(shapes, op):
assert op.name, op
# Check for existance of __version__ for backwards compatibility
n = tf.NodeDef() if hasattr(tf, '__version__') else graph_pb2.NodeDef()
n.name = op.name
n.input.extend(op.input)
n.op = op.type
n.device = _tf_device(op.device_option)
if shapes:
# Add shapes in order.
for output in op.output:
if output not in shapes:
break
_add_tf_shape(n.attr, shapes[output])
for arg in op.arg:
_set_tf_attr(n.attr, arg)
return n
def _blob_to_node(producing_ops, shapes, name):
assert name
# Check for existance of __version__ for backwards compatibility
n = tf.NodeDef() if hasattr(tf, '__version__') else graph_pb2.NodeDef()
n.name = name
inputs = producing_ops.get(name, [])
if inputs:
n.op = 'Blob'
else:
n.op = 'Placeholder'
n.input.extend('%s:%d' % (op.name, i) for op, i in inputs)
if inputs:
device = inputs[0][0].device_option
if (all(input[0].device_option == device for input in inputs)):
n.device = _tf_device(device)
if shapes and name in shapes:
_add_tf_shape(n.attr, shapes[name])
return n
def _operators_to_graph_def(
shapes,
ops,
replace_colons='$',
with_ssa=True,
with_gradient_scope=True,
track_blob_names=None, # pass an empty array to track blob names
):
if track_blob_names is not None:
track_blob_names.clear()
track_blob_names.update(_get_blob_names(ops))
if replace_colons:
_replace_colons(shapes, track_blob_names, ops, replace_colons)
if with_ssa:
_convert_to_ssa(shapes, track_blob_names, ops)
if with_gradient_scope:
_add_gradient_scope(shapes, track_blob_names, ops)
_fill_missing_operator_names(ops)
# Check for existance of __version__ for backwards compatibility
g = tf.GraphDef() if hasattr(tf, '__version__') else graph_pb2.GraphDef()
producing_ops = {}
blobs = set()
for op in ops:
g.node.extend([_operator_to_node(shapes, op)])
for input_blob in op.input:
blobs.add(input_blob)
for i, output_blob in enumerate(op.output):
blobs.add(output_blob)
producing_ops.setdefault(output_blob, []).append((op, i))
for blob in blobs:
g.node.extend([_blob_to_node(producing_ops, shapes, blob)])
return g
def _propagate_device_option(net):
if not net.HasField("device_option"):
return
for op in net.op:
if not op.HasField("device_option"):
op.device_option.CopyFrom(net.device_option)
def _try_get_shapes(nets):
try:
# Note: this will inspect the workspace for better or worse.
shapes, _ = workspace.InferShapesAndTypes(nets)
return shapes
except Exception as e:
logging.warning('Failed to compute shapes: %s', e)
return {}
def nets_to_graph_def(nets, shapes=None, **kwargs):
if shapes is None:
shapes = _try_get_shapes(nets)
nets = [copy.deepcopy(net.Proto()) for net in nets]
shapes = copy.deepcopy(shapes)
for net in nets:
_propagate_device_option(net)
return _operators_to_graph_def(
shapes,
[op for net in nets for op in net.op],
**kwargs
)
def cnn_to_graph_def(cnn, **kwargs):
return nets_to_graph_def([cnn.param_init_net, cnn.net], **kwargs)
def ops_to_graph_def(ops, shapes=None, **kwargs):
ops = copy.deepcopy(ops)
shapes = copy.deepcopy(shapes or {})
return _operators_to_graph_def(shapes, ops, **kwargs)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import click.testing
import numpy as np
import os
import tempfile
import unittest
from caffe2.python import brew, core, model_helper
import caffe2.contrib.tensorboard.tensorboard as tb
import caffe2.contrib.tensorboard.tensorboard_exporter as tb_exporter
import tensorflow as tf
class TensorboardTest(unittest.TestCase):
def test_events(self):
runner = click.testing.CliRunner()
c2_dir = tempfile.mkdtemp()
np.random.seed(1701)
n_iters = 2
blobs = ["w", "b"]
data = np.random.randn(len(blobs), n_iters, 10)
for i, blob in enumerate(blobs):
with open(os.path.join(c2_dir, blob), "w") as f:
for row in data[i]:
stats = [row.min(), row.max(), row.mean(), row.std()]
f.write(" ".join(str(s) for s in stats) + "\n")
# Test error handling path
with open(os.path.join(c2_dir, "not-a-summary"), "w") as f:
f.write("not-a-summary")
tf_dir = tempfile.mkdtemp()
result = runner.invoke(
tb.cli,
["tensorboard-events", "--c2-dir", c2_dir, "--tf-dir", tf_dir])
self.assertEqual(result.exit_code, 0)
entries = list(os.walk(tf_dir))
self.assertEqual(len(entries), 1)
((d, _, (fname,)),) = entries
self.assertEqual(tf_dir, d)
events = list(tf.train.summary_iterator(os.path.join(tf_dir, fname)))
self.assertEqual(len(events), n_iters + 1)
events = events[1:]
self.maxDiff = None
self.assertEqual(len(events), 2)
def test_tensorboard_graphs(self):
model = model_helper.ModelHelper(name="overfeat")
data, label = brew.image_input(
model, ["db"], ["data", "label"], is_test=0
)
with core.NameScope("conv1"):
conv1 = brew.conv(model, data, "conv1", 3, 96, 11, stride=4)
relu1 = brew.relu(model, conv1, conv1)
pool1 = brew.max_pool(model, relu1, "pool1", kernel=2, stride=2)
with core.NameScope("classifier"):
fc = brew.fc(model, pool1, "fc", 4096, 1000)
pred = brew.softmax(model, fc, "pred")
xent = model.LabelCrossEntropy([pred, label], "xent")
loss = model.AveragedLoss(xent, "loss")
model.AddGradientOperators([loss], skip=1)
c2_dir = tempfile.mkdtemp()
tf_dir = tempfile.mkdtemp()
with open(os.path.join(c2_dir, "init"), "w") as f:
f.write(str(model.param_init_net.Proto()))
with open(os.path.join(c2_dir, "net"), "w") as f:
f.write(str(model.net.Proto()))
runner = click.testing.CliRunner()
result = runner.invoke(
tb.cli,
["tensorboard-graphs",
"--c2-netdef", os.path.join(c2_dir, "init"),
"--c2-netdef", os.path.join(c2_dir, "net"),
"--tf-dir", tf_dir])
self.assertEqual(result.exit_code, 0)
entries = list(os.walk(tf_dir))
self.assertEqual(len(entries), 1)
((d, _, (fname,)),) = entries
self.assertEqual(tf_dir, d)
events = list(tf.train.summary_iterator(os.path.join(tf_dir, fname)))
self.assertEqual(len(events), 3)
events = events[1:]
nets = [model.param_init_net, model.net]
for i, (event, net) in enumerate(zip(events, nets), start=1):
self.assertEqual(event.step, i)
self.assertEqual(event.wall_time, i)
self.assertEqual(
event.graph_def,
tb_exporter.nets_to_graph_def([net]).SerializeToString())
if __name__ == "__main__":
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import click
import collections
import logging
import numpy as np
import os
from caffe2.proto import caffe2_pb2
from caffe2.python import core
import caffe2.contrib.tensorboard.tensorboard_exporter as tb_exporter
import tensorflow as tf
class Config(object):
HEIGHT = 600
ASPECT_RATIO = 1.6
CODE_TEMPLATE = """
<script>
function load() {{
document.getElementById("{id}").pbtxt = {data};
}}
</script>
<link rel="import"
href="https://tensorboard.appspot.com/tf-graph-basic.build.html"
onload=load()
>
<div style="height:{height}px">
<tf-graph-basic id="{id}"></tf-graph-basic>
</div>
"""
IFRAME_TEMPLATE = """
<iframe
seamless
style="width:{width}px;height:{height}px;border:0"
srcdoc="{code}">
</iframe>
"""
def _show_graph(graph_def):
import IPython.display
code = CODE_TEMPLATE.format(
data=repr(str(graph_def)),
id='graph' + str(np.random.rand()),
height=Config.HEIGHT)
iframe = IFRAME_TEMPLATE.format(
code=code.replace('"', '"'),
width=Config.HEIGHT * Config.ASPECT_RATIO,
height=Config.HEIGHT + 20)
IPython.display.display(IPython.display.HTML(iframe))
def visualize_cnn(cnn, **kwargs):
g = tb_exporter.cnn_to_graph_def(cnn, **kwargs)
_show_graph(g)
def visualize_net(nets, **kwargs):
g = tb_exporter.nets_to_graph_def(nets, **kwargs)
_show_graph(g)
def visualize_ops(ops, **kwargs):
g = tb_exporter.ops_to_graph_def(ops, **kwargs)
_show_graph(g)
@click.group()
def cli():
pass
def write_events(tf_dir, events):
# tf.summary.FileWriter exists in the current Tensorflow release
# tf.train.SummaryWriter is the way in older versions
if hasattr(tf.summary, 'FileWriter'):
writer = tf.summary.FileWriter(logdir=tf_dir, max_queue=len(events))
else:
writer = tf.train.SummaryWriter(logdir=tf_dir, max_queue=len(events))
for event in events:
writer.add_event(event)
writer.flush()
writer.close()
def graph_def_to_event(step, graph_def):
return tf.Event(
wall_time=step, step=step, graph_def=graph_def.SerializeToString())
@cli.command("tensorboard-graphs")
@click.option("--c2-netdef", type=click.Path(exists=True, dir_okay=False),
multiple=True)
@click.option("--tf-dir", type=click.Path(exists=True))
def tensorboard_graphs(c2_netdef, tf_dir):
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
def parse_net_def(path):
import google.protobuf.text_format
net_def = caffe2_pb2.NetDef()
with open(path) as f:
google.protobuf.text_format.Merge(f.read(), net_def)
return core.Net(net_def)
graph_defs = [tb_exporter.nets_to_graph_def([parse_net_def(path)])
for path in c2_netdef]
events = [graph_def_to_event(i, graph_def)
for (i, graph_def) in enumerate(graph_defs, start=1)]
write_events(tf_dir, events)
log.info("Wrote %s graphs to logdir %s", len(events), tf_dir)
@cli.command("tensorboard-events")
@click.option("--c2-dir", type=click.Path(exists=True, file_okay=False),
help="Root directory of the Caffe2 run")
@click.option("--tf-dir", type=click.Path(writable=True),
help="Output path to the logdir used by TensorBoard")
def tensorboard_events(c2_dir, tf_dir):
np.random.seed(1701)
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
S = collections.namedtuple('S', ['min', 'max', 'mean', 'std'])
def parse_summary(filename):
try:
with open(filename) as f:
rows = [(float(el) for el in line.split()) for line in f]
return [S(*r) for r in rows]
except Exception as e:
log.exception(e)
return None
def get_named_summaries(root):
summaries = [
(fname, parse_summary(os.path.join(dirname, fname)))
for dirname, _, fnames in os.walk(root)
for fname in fnames
]
return [(n, s) for (n, s) in summaries if s]
def inferred_histo(summary, samples=1000):
np.random.seed(hash(
summary.std + summary.mean + summary.min + summary.max))
samples = np.random.randn(samples) * summary.std + summary.mean
samples = np.clip(samples, a_min=summary.min, a_max=summary.max)
(hist, edges) = np.histogram(samples)
upper_edges = edges[1:]
r = tf.HistogramProto(
min=summary.min,
max=summary.max,
num=len(samples),
sum=samples.sum(),
sum_squares=(samples * samples).sum())
r.bucket_limit.extend(upper_edges)
r.bucket.extend(hist)
return r
def named_summaries_to_events(named_summaries):
names = [n for (n, _) in named_summaries]
summaries = [s for (_, s) in named_summaries]
summaries = list(zip(*summaries))
def event(step, values):
s = tf.Summary()
scalar = [
tf.Summary.Value(
tag="{}/{}".format(name, field),
simple_value=v)
for name, value in zip(names, values)
for field, v in value._asdict().items()]
hist = [
tf.Summary.Value(
tag="{}/inferred_normal_hist".format(name),
histo=inferred_histo(value))
for name, value in zip(names, values)
]
s.value.extend(scalar + hist)
return tf.Event(wall_time=int(step), step=step, summary=s)
return [event(step, values)
for step, values in enumerate(summaries, start=1)]
named_summaries = get_named_summaries(c2_dir)
events = named_summaries_to_events(named_summaries)
write_events(tf_dir, events)
log.info("Wrote %s events to logdir %s", len(events), tf_dir)
if __name__ == "__main__":
cli()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from caffe2.proto import caffe2_pb2
import caffe2.python.cnn as cnn
import caffe2.python.core as core
import caffe2.contrib.tensorboard.tensorboard_exporter as tb
EXPECTED = """
node {
name: "conv1/XavierFill"
op: "XavierFill"
device: "/gpu:0"
attr {
key: "_output_shapes"
value {
list {
shape {
dim {
size: 96
}
dim {
size: 3
}
dim {
size: 11
}
dim {
size: 11
}
}
}
}
}
}
node {
name: "conv1/ConstantFill"
op: "ConstantFill"
device: "/gpu:0"
attr {
key: "_output_shapes"
value {
list {
shape {
dim {
size: 96
}
}
}
}
}
}
node {
name: "classifier/XavierFill"
op: "XavierFill"
device: "/gpu:0"
attr {
key: "_output_shapes"
value {
list {
shape {
dim {
size: 1000
}
dim {
size: 4096
}
}
}
}
}
}
node {
name: "classifier/ConstantFill"
op: "ConstantFill"
device: "/gpu:0"
attr {
key: "_output_shapes"
value {
list {
shape {
dim {
size: 1000
}
}
}
}
}
}
node {
name: "ImageInput"
op: "ImageInput"
input: "db"
device: "/gpu:0"
attr {
key: "cudnn_exhaustive_search"
value {
i: 0
}
}
attr {
key: "is_test"
value {
i: 0
}
}
attr {
key: "use_cudnn"
value {
i: 1
}
}
}
node {
name: "NHWC2NCHW"
op: "NHWC2NCHW"
input: "data_nhwc"
device: "/gpu:0"
}
node {
name: "conv1/Conv"
op: "Conv"
input: "data"
input: "conv1/conv1_w"
input: "conv1/conv1_b"
device: "/gpu:0"
attr {
key: "exhaustive_search"
value {
i: 0
}
}
attr {
key: "kernel"
value {
i: 11
}
}
attr {
key: "order"
value {
s: "NCHW"
}
}
attr {
key: "stride"
value {
i: 4
}
}
}
node {
name: "conv1/Relu"
op: "Relu"
input: "conv1/conv1"
device: "/gpu:0"
attr {
key: "cudnn_exhaustive_search"
value {
i: 0
}
}
attr {
key: "order"
value {
s: "NCHW"
}
}
}
node {
name: "conv1/MaxPool"
op: "MaxPool"
input: "conv1/conv1_1"
device: "/gpu:0"
attr {
key: "cudnn_exhaustive_search"
value {
i: 0
}
}
attr {
key: "kernel"
value {
i: 2
}
}
attr {
key: "order"
value {
s: "NCHW"
}
}
attr {
key: "stride"
value {
i: 2
}
}
}
node {
name: "classifier/FC"
op: "FC"
input: "conv1/pool1"
input: "classifier/fc_w"
input: "classifier/fc_b"
device: "/gpu:0"
attr {
key: "cudnn_exhaustive_search"
value {
i: 0
}
}
attr {
key: "order"
value {
s: "NCHW"
}
}
attr {
key: "use_cudnn"
value {
i: 1
}
}
}
node {
name: "classifier/Softmax"
op: "Softmax"
input: "classifier/fc"
device: "/gpu:0"
attr {
key: "cudnn_exhaustive_search"
value {
i: 0
}
}
attr {
key: "order"
value {
s: "NCHW"
}
}
}
node {
name: "classifier/LabelCrossEntropy"
op: "LabelCrossEntropy"
input: "classifier/pred"
input: "label"
device: "/gpu:0"
}
node {
name: "classifier/AveragedLoss"
op: "AveragedLoss"
input: "classifier/xent"
device: "/gpu:0"
}
node {
name: "GRADIENTS/classifier/ConstantFill"
op: "ConstantFill"
input: "classifier/loss"
device: "/gpu:0"
attr {
key: "value"
value {
f: 1.0
}
}
}
node {
name: "GRADIENTS/classifier/AveragedLossGradient"
op: "AveragedLossGradient"
input: "classifier/xent"
input: "GRADIENTS/classifier/loss_autogen_grad"
device: "/gpu:0"
}
node {
name: "GRADIENTS/classifier/LabelCrossEntropyGradient"
op: "LabelCrossEntropyGradient"
input: "classifier/pred"
input: "label"
input: "GRADIENTS/classifier/xent_grad"
device: "/gpu:0"
}
node {
name: "GRADIENTS/classifier/SoftmaxGradient"
op: "SoftmaxGradient"
input: "classifier/pred"
input: "GRADIENTS/classifier/pred_grad"
device: "/gpu:0"
attr {
key: "cudnn_exhaustive_search"
value {
i: 0
}
}
attr {
key: "order"
value {
s: "NCHW"
}
}
}
node {
name: "GRADIENTS/c/FCGradient"
op: "FCGradient"
input: "conv1/pool1"
input: "classifier/fc_w"
input: "GRADIENTS/classifier/fc_grad"
device: "/gpu:0"
attr {
key: "cudnn_exhaustive_search"
value {
i: 0
}
}
attr {
key: "order"
value {
s: "NCHW"
}
}
attr {
key: "use_cudnn"
value {
i: 1
}
}
}
node {
name: "GRADIENTS/conv1/MaxPoolGradient"
op: "MaxPoolGradient"
input: "conv1/conv1_1"
input: "conv1/pool1"
input: "GRADIENTS/conv1/pool1_grad"
device: "/gpu:0"
attr {
key: "cudnn_exhaustive_search"
value {
i: 0
}
}
attr {
key: "kernel"
value {
i: 2
}
}
attr {
key: "order"
value {
s: "NCHW"
}
}
attr {
key: "stride"
value {
i: 2
}
}
}
node {
name: "GRADIENTS/conv1/ReluGradient"
op: "ReluGradient"
input: "conv1/conv1_1"
input: "GRADIENTS/conv1/conv1_grad"
device: "/gpu:0"
attr {
key: "cudnn_exhaustive_search"
value {
i: 0
}
}
attr {
key: "order"
value {
s: "NCHW"
}
}
}
node {
name: "GRADIENTS/ConvGradient"
op: "ConvGradient"
input: "data"
input: "conv1/conv1_w"
input: "GRADIENTS/conv1/conv1_grad_1"
device: "/gpu:0"
attr {
key: "exhaustive_search"
value {
i: 0
}
}
attr {
key: "kernel"
value {
i: 11
}
}
attr {
key: "order"
value {
s: "NCHW"
}
}
attr {
key: "stride"
value {
i: 4
}
}
}
node {
name: "GRADIENTS/NCHW2NHWC"
op: "NCHW2NHWC"
input: "GRADIENTS/data_grad"
device: "/gpu:0"
}
node {
name: "conv1/conv1_w"
op: "Blob"
input: "conv1/XavierFill:0"
device: "/gpu:0"
}
node {
name: "classifier/fc"
op: "Blob"
input: "classifier/FC:0"
device: "/gpu:0"
}
node {
name: "data_nhwc"
op: "Blob"
input: "ImageInput:0"
device: "/gpu:0"
}
node {
name: "GRADIENTS/conv1/conv1_b_grad"
op: "Blob"
input: "GRADIENTS/ConvGradient:1"
device: "/gpu:0"
}
node {
name: "GRADIENTS/classifier/pred_grad"
op: "Blob"
input: "GRADIENTS/classifier/LabelCrossEntropyGradient:0"
device: "/gpu:0"
}
node {
name: "GRADIENTS/classifier/fc_grad"
op: "Blob"
input: "GRADIENTS/classifier/SoftmaxGradient:0"
device: "/gpu:0"
}
node {
name: "conv1/conv1_b"
op: "Blob"
input: "conv1/ConstantFill:0"
device: "/gpu:0"
}
node {
name: "GRADIENTS/classifier/fc_b_grad"
op: "Blob"
input: "GRADIENTS/c/FCGradient:1"
device: "/gpu:0"
}
node {
name: "GRADIENTS/classifier/fc_w_grad"
op: "Blob"
input: "GRADIENTS/c/FCGradient:0"
device: "/gpu:0"
}
node {
name: "label"
op: "Blob"
input: "ImageInput:1"
device: "/gpu:0"
}
node {
name: "GRADIENTS/data_grad"
op: "Blob"
input: "GRADIENTS/ConvGradient:2"
device: "/gpu:0"
}
node {
name: "classifier/loss"
op: "Blob"
input: "classifier/AveragedLoss:0"
device: "/gpu:0"
}
node {
name: "conv1/conv1"
op: "Blob"
input: "conv1/Conv:0"
device: "/gpu:0"
}
node {
name: "GRADIENTS/conv1/conv1_grad"
op: "Blob"
input: "GRADIENTS/conv1/MaxPoolGradient:0"
device: "/gpu:0"
}
node {
name: "classifier/xent"
op: "Blob"
input: "classifier/LabelCrossEntropy:0"
device: "/gpu:0"
}
node {
name: "GRADIENTS/classifier/loss_autogen_grad"
op: "Blob"
input: "GRADIENTS/classifier/ConstantFill:0"
device: "/gpu:0"
}
node {
name: "classifier/fc_w"
op: "Blob"
input: "classifier/XavierFill:0"
device: "/gpu:0"
}
node {
name: "conv1/conv1_1"
op: "Blob"
input: "conv1/Relu:0"
device: "/gpu:0"
}
node {
name: "db"
op: "Placeholder"
}
node {
name: "classifier/pred"
op: "Blob"
input: "classifier/Softmax:0"
device: "/gpu:0"
}
node {
name: "classifier/fc_b"
op: "Blob"
input: "classifier/ConstantFill:0"
device: "/gpu:0"
}
node {
name: "GRADIENTS/classifier/xent_grad"
op: "Blob"
input: "GRADIENTS/classifier/AveragedLossGradient:0"
device: "/gpu:0"
}
node {
name: "data"
op: "Blob"
input: "NHWC2NCHW:0"
device: "/gpu:0"
}
node {
name: "GRADIENTS/conv1/conv1_w_grad"
op: "Blob"
input: "GRADIENTS/ConvGradient:0"
device: "/gpu:0"
}
node {
name: "GRADIENTS/conv1/conv1_grad_1"
op: "Blob"
input: "GRADIENTS/conv1/ReluGradient:0"
device: "/gpu:0"
}
node {
name: "GRADIENTS/data_nhwc_grad"
op: "Blob"
input: "GRADIENTS/NCHW2NHWC:0"
device: "/gpu:0"
}
node {
name: "GRADIENTS/conv1/pool1_grad"
op: "Blob"
input: "GRADIENTS/c/FCGradient:2"
device: "/gpu:0"
}
node {
name: "conv1/pool1"
op: "Blob"
input: "conv1/MaxPool:0"
device: "/gpu:0"
}
"""
class TensorboardExporterTest(unittest.TestCase):
def test_that_operators_gets_non_colliding_names(self):
op = caffe2_pb2.OperatorDef()
op.type = 'foo'
op.input.extend(['foo'])
tb._fill_missing_operator_names([op])
self.assertEqual(op.input[0], 'foo')
self.assertEqual(op.name, 'foo_1')
def test_that_replacing_colons_gives_non_colliding_names(self):
# .. and update shapes
op = caffe2_pb2.OperatorDef()
op.name = 'foo:0'
op.input.extend(['foo:0', 'foo$0'])
shapes = {'foo:0': [1]}
track_blob_names = tb._get_blob_names([op])
tb._replace_colons(shapes, track_blob_names, [op], '$')
self.assertEqual(op.input[0], 'foo$0')
self.assertEqual(op.input[1], 'foo$0_1')
# Collision but blobs and op names are handled later by
# _fill_missing_operator_names.
self.assertEqual(op.name, 'foo$0')
self.assertEqual(len(shapes), 1)
self.assertEqual(shapes['foo$0'], [1])
self.assertEqual(len(track_blob_names), 2)
self.assertEqual(track_blob_names['foo$0'], 'foo:0')
self.assertEqual(track_blob_names['foo$0_1'], 'foo$0')
def test_that_adding_gradient_scope_does_no_fancy_renaming(self):
# because it cannot create collisions
op = caffe2_pb2.OperatorDef()
op.name = 'foo_grad'
op.input.extend(['foo_grad', 'foo_grad_1'])
shapes = {'foo_grad': [1]}
track_blob_names = tb._get_blob_names([op])
tb._add_gradient_scope(shapes, track_blob_names, [op])
self.assertEqual(op.input[0], 'GRADIENTS/foo_grad')
self.assertEqual(op.input[1], 'GRADIENTS/foo_grad_1')
self.assertEqual(op.name, 'GRADIENTS/foo_grad')
self.assertEqual(len(shapes), 1)
self.assertEqual(shapes['GRADIENTS/foo_grad'], [1])
self.assertEqual(len(track_blob_names), 2)
self.assertEqual(
track_blob_names['GRADIENTS/foo_grad'], 'foo_grad')
self.assertEqual(
track_blob_names['GRADIENTS/foo_grad_1'], 'foo_grad_1')
def test_that_auto_ssa_gives_non_colliding_names(self):
op1 = caffe2_pb2.OperatorDef()
op1.output.extend(['foo'])
op2 = caffe2_pb2.OperatorDef()
op2.input.extend(['foo'])
op2.output.extend(['foo'])
op2.output.extend(['foo_1'])
shapes = {'foo': [1], 'foo_1': [2]}
track_blob_names = tb._get_blob_names([op1, op2])
tb._convert_to_ssa(shapes, track_blob_names, [op1, op2])
self.assertEqual(op1.output[0], 'foo')
self.assertEqual(op2.input[0], 'foo')
self.assertEqual(op2.output[0], 'foo_1')
# Unfortunate name but we do not parse original `_` for now.
self.assertEqual(op2.output[1], 'foo_1_1')
self.assertEqual(len(shapes), 3)
self.assertEqual(shapes['foo'], [1])
self.assertEqual(shapes['foo_1'], [1])
self.assertEqual(shapes['foo_1_1'], [2])
self.assertEqual(len(track_blob_names), 3)
self.assertEqual(track_blob_names['foo'], 'foo')
self.assertEqual(track_blob_names['foo_1'], 'foo')
self.assertEqual(track_blob_names['foo_1_1'], 'foo_1')
def test_simple_cnnmodel(self):
model = cnn.CNNModelHelper("NCHW", name="overfeat")
data, label = model.ImageInput(["db"], ["data", "label"], is_test=0)
with core.NameScope("conv1"):
conv1 = model.Conv(data, "conv1", 3, 96, 11, stride=4)
relu1 = model.Relu(conv1, conv1)
pool1 = model.MaxPool(relu1, "pool1", kernel=2, stride=2)
with core.NameScope("classifier"):
fc = model.FC(pool1, "fc", 4096, 1000)
pred = model.Softmax(fc, "pred")
xent = model.LabelCrossEntropy([pred, label], "xent")
loss = model.AveragedLoss(xent, "loss")
model.net.RunAllOnGPU()
model.param_init_net.RunAllOnGPU()
model.AddGradientOperators([loss], skip=1)
track_blob_names = {}
graph = tb.cnn_to_graph_def(
model,
track_blob_names=track_blob_names,
shapes={},
)
self.assertEqual(
track_blob_names['GRADIENTS/conv1/conv1_b_grad'],
'conv1/conv1_b_grad',
)
self.maxDiff = None
# We can't guarantee the order in which they appear, so we sort
# both before we compare them
sep = "node {"
expected = "\n".join(sorted(
sep + "\n " + part.strip()
for part in EXPECTED.strip().split(sep)
if part.strip()
))
actual = "\n".join(sorted(
sep + "\n " + part.strip()
for part in str(graph).strip().split(sep)
if part.strip()
))
self.assertMultiLineEqual(actual, expected)
if __name__ == "__main__":
unittest.main()
|
#!/bin/env python
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import sys
import yaml
import argparse
import os
from copy import deepcopy
parser = argparse.ArgumentParser()
parser.add_argument("--template_dir", default=".", help="where template.h is")
parser.add_argument("--yaml_dir", default="aten/src/ATen/ATen",
help="where ATen yaml files are")
parser.add_argument("--output_prefix", default="", help="")
parser.add_argument(
"--install_dir", default=".", help="where to put generated file")
parser.add_argument("--third_party_root", default="", help="caffe2 third_party")
args, _ = parser.parse_known_args()
if args.third_party_root:
sys.path.append(os.path.join(args.third_party_root, "aten/src/ATen"))
from code_template import CodeTemplate as CT
else:
from src.ATen.code_template import CodeTemplate as CT
OP_TEMPLATE = CT.from_file(
os.path.join(args.template_dir, 'aten_op_template.h'))
try:
# use faster C loader if available
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
def write(filename, s):
with open(filename, "w") as f:
f.write(s)
def read(filename):
with open(filename, "r") as f:
return f.read()
def value_has_tensors(v):
# Sparse shouldn't appear in public API, seems to be temporary bug
return "Tensor" in v['dynamic_type'] and "Sparse" not in v['dynamic_type']
def value_is_tensor_type(v):
return value_has_tensors(v) and v['dynamic_type'] != 'TensorList'
# for each aten type, how do we handle a return value of that type?
RETURN_MAP = {
'Tensor': 'assignTo(Output(${offset}),${output});',
'Scalar': 'assignTo(Output(${offset}),*inferred_type, ${output});',
'bool': 'assignToValue<int64_t>(Output(${offset}),${output});',
'int64_t': 'assignToValue<int64_t>(Output(${offset}),${output});',
'std::vector<Tensor>': 'assignListStartingAt(${offset}, ${output});',
}
# for each non-Tensor aten argument, how to we read it from caffe2's
# attribute list. Most of these call runtime functions defined in the
# template class.
ARGUMENT_MAP = {
'Scalar': 'at::Scalar ${arg} = readScalarAttribute("${arg}");',
'bool': 'bool ${arg} = readAttribute<int64_t>("${arg}");',
'int': 'int ${arg} = readAttribute<int64_t>("${arg}");',
'double': 'double ${arg} = readAttribute<float>("${arg}");',
'int64_t': 'int64_t ${arg} = readAttribute<int64_t>("${arg}");',
'IntList': 'auto ${arg} = readIntList("${arg}");',
'std::array<bool, 2>': 'auto ${arg} = readBoolMask<2>("${arg}");',
'std::array<bool, 3>': 'auto ${arg} = readBoolMask<3>("${arg}");',
}
def expand(o):
num_defaults = sum(1 if 'default' in arg else 0 for arg in o['arguments'])
results = [o]
for i in range(0, num_defaults):
# last num_default values should be default
assert('default' in o['arguments'][-(i + 1)])
v = deepcopy(o)
v['arguments'] = v['arguments'][:-(i + 1)]
results.append(v)
return results
# filter the list of declarations removing things we cannot support
def supports(o):
# skip all in-place operators for now since aten cannot Resize
# caffe2 memory inside an operator
if o['inplace']:
return False
# _out variants also work in-place on arguments taken as destinations
# we also cannot handle these because aten cannot resize caffe2 Tensors
if "_out" in o['name']:
return False
# skip return types we cannot handle
for ret in o['returns']:
if not value_has_tensors(ret) and ret['type'] not in RETURN_MAP:
print("Skipping {} Because of Ret: {} ({})".format(
o['name'], ret['type'], ret['dynamic_type']))
return False
# skip arguments we cannot handle
for arg in o['arguments']:
if not value_has_tensors(arg) and arg['type'] not in ARGUMENT_MAP:
print("Skipping {} Because of Arg: {} ({}) ".format(
o['name'], arg['type'], arg['dynamic_type']))
return False
return True
# template for each potential operator.
# each operator has an integer 'key' associated with it, and
# a lambda that defines the operator
# non-tensor attributes are created in ${initialization}
# and then saved as arguments to the lambda
# Inputs/Outputs are read inside the lambda
OPTION_TEMPLATE = CT("""\
case ${key}: { // ${name}
${initialization}
run_op = [=] {
${statements}
auto the_result = ${invocation};
${assignments}
return true;
};
} break;
""")
def get_output(o, i):
if len(o['returns']) == 1:
return 'the_result'
else:
return 'std::get<{}>(the_result)'.format(i)
def attribute_names(o):
return sorted([a['name'] for a in o['arguments'] if not value_has_tensors(a)])
def required_attribute_names(o):
return sorted([a['name'] for a in o['arguments'] if not value_has_tensors(a) and 'default' not in a])
def self_as_first_argument(arguments):
return ([a for a in arguments if a['name'] == 'self'] +
[a for a in arguments if a['name'] != 'self'])
def get_num_inputs(o):
args = 0
for a in o['arguments']:
if a['type'] == 'TensorList':
return '*'
elif value_has_tensors(a):
args += 1
return str(args)
if __name__ == '__main__':
decls = yaml.load(read(os.path.join(args.yaml_dir, 'Declarations.yaml')), Loader=Loader)
filtered = [expanded for o in decls for expanded in expand(o) if supports(expanded)]
top_env = {
'mappings': [],
'implementations': [],
}
seen = set()
key = 0
for o in filtered:
# [DESCRIPTORS]
# each option is associated with a descriptor string that is used
# to figure out which version of an op is being used:
# The format is:
# opname-num_inputs-attribute_1-attribute2
# Example:
# lerp-2-weight
# the operator lerp takes 2 arguments and has the attribute weight
attr_names = attribute_names(o)
num_inputs = get_num_inputs(o)
descriptor = '-'.join([o['name']] + attr_names + [num_inputs])
if descriptor in seen:
continue
seen.add(descriptor)
# map from descriptor string to the integer key in the switch statements
# that initializes the operators
top_env['mappings'].append('{{ "{}", {} }},'.format(descriptor, key))
env = {
'name': o['name'],
'statements': [],
'arguments': [],
'assignments': [],
'initialization': [],
'key': str(key),
}
defined_inferred_type = False
if 'Tensor' in o['method_of']:
# make sure 'self' is the first argument. currently Declarations.yaml
# does not always do this. Instead it keeps the argument list the same order
# as the Type method.
o['arguments'] = self_as_first_argument(o['arguments'])
elif 'namespace' not in o['method_of']:
# methods on type like 'ones' or 'zeros' always take a
# string attribute that is translated into the at::Type object
# e.g. "Float" is at::kFloat
assert('Type' in o['method_of'])
defined_inferred_type = True
env['initialization'].append(
'auto inferred_type = readTypeAttribute("type");')
i = 0
for arg in o['arguments']:
env['arguments'].append(arg['name'])
if arg['type'] == 'TensorList':
env['statements'].append(
'auto {} = loadInputsAtOffset({});'.format(arg['name'], i))
elif value_is_tensor_type(arg):
assert(i != '*') # tensor list is not last argument
# load tensor inputs from Caffe2
env['statements'].append(
"auto {} = loadInput({});".format(arg['name'], i))
i += 1
if arg['dynamic_type'] == 'Tensor' and not defined_inferred_type:
# first tensor input is used to define the output type.
defined_inferred_type = True
env['statements'].append(
'auto inferred_type = &({}.type());'.format(
arg['name']))
else:
init = CT(ARGUMENT_MAP[arg['type']]).substitute(env, arg=arg['name'])
env['initialization'].append(init)
for i, r in enumerate(o['returns']):
t = RETURN_MAP[r['type'] if not value_is_tensor_type(r) else 'Tensor']
assignment = CT(t).substitute(env, offset=i, output=get_output(o, i))
env['assignments'].append(assignment)
if 'Tensor' in o['method_of']:
env['invocation'] = "self.{}({})".format(
o['name'], ', '.join(env['arguments'][1:]))
elif 'namespace' in o['method_of']:
env['invocation'] = CT("at::${name}(${arguments})").substitute(env)
else:
assert('Type' in o['method_of'])
env['invocation'] = CT(
'inferred_type->${name}(${arguments})').substitute(env)
top_env['implementations'].append(OPTION_TEMPLATE.substitute(env))
key += 1
write(os.path.join(args.install_dir, args.output_prefix + "aten_op.h"), OP_TEMPLATE.substitute(top_env))
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, dyndep
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
dyndep.InitOpsLibrary('@/caffe2/caffe2/contrib/aten:aten_op')
class TestATen(hu.HypothesisTestCase):
@given(inputs=hu.tensors(n=2), **hu.gcs)
def test_add(self, inputs, gc, dc):
op = core.CreateOperator(
"ATen",
["X", "Y"],
["Z"],
operator="add")
def ref(X, Y):
return [X + Y]
self.assertReferenceChecks(gc, op, inputs, ref)
@given(inputs=hu.tensors(n=1), **hu.gcs)
def test_pow(self, inputs, gc, dc):
op = core.CreateOperator(
"ATen",
["S"],
["Z"],
operator="pow", exponent=2.0)
def ref(X):
return [np.square(X)]
self.assertReferenceChecks(gc, op, inputs, ref)
@given(x=st.integers(min_value=2, max_value=8), **hu.gcs)
def test_sort(self, x, gc, dc):
inputs = [np.random.permutation(x)]
op = core.CreateOperator(
"ATen",
["S"],
["Z", "I"],
operator="sort")
def ref(X):
return [np.sort(X), np.argsort(X)]
self.assertReferenceChecks(gc, op, inputs, ref)
@given(inputs=hu.tensors(n=1), **hu.gcs)
def test_sum(self, inputs, gc, dc):
op = core.CreateOperator(
"ATen",
["S"],
["Z"],
operator="sum")
def ref(X):
return [np.sum(X)]
self.assertReferenceChecks(gc, op, inputs, ref)
@given(**hu.gcs)
def test_ones(self, gc, dc):
op = core.CreateOperator(
"ATen",
[],
["Z"],
operator="ones", type="float", size={2, 4})
def ref():
return [np.ones([2, 4])]
self.assertReferenceChecks(gc, op, [], ref)
if __name__ == "__main__":
import unittest
unittest.main()
|
import numpy as np
from torch import nn
from torch.autograd import Variable, Function
import torch.onnx
import onnx
import caffe2.python.onnx.backend
class MyFunction(Function):
@staticmethod
def forward(ctx, x, y):
return x*x + y
@staticmethod
def symbolic(graph, x, y):
x2 = graph.at("mul", x, x)
r = graph.at("add", x2, y)
# x, y, x2, and r are 'Node' objects
# print(r) or print(graph) will print out a textual representation for debugging.
# this representation will be converted to ONNX protobufs on export.
return r
class MyModule(nn.Module):
def forward(self, x, y):
# you can combine your ATen ops with standard onnx ones
x = nn.ReLU()(x)
return MyFunction.apply(x, y)
torch.onnx.export(MyModule(),
(Variable(torch.ones(3,4)), Variable(torch.ones(3,4))),
"output.onnx",
verbose=True)
# prints the graph for debugging:
# graph(%1 : Float(3, 4)
# %2 : Float(3, 4)) {
# %3 : Float(3, 4) = Relu(%1), uses = [%4.i0, %4.i1];
# %4 : UNKNOWN_TYPE = ATen[operator=mul](%3, %3), uses = [%5.i0];
# %5 : Float(3, 4) = ATen[operator=add](%4, %2), uses = [%0.i0];
# return (%5);
# }
graph = onnx.load("output.onnx")
a = np.random.randn(3, 4).astype(np.float32)
b = np.random.randn(3, 4).astype(np.float32)
prepared_backend = caffe2.python.onnx.backend.prepare(graph)
W = {graph.graph.input[0].name: a, graph.graph.input[1].name: b}
c2_out = prepared_backend.run(W)[0]
x = np.maximum(a, 0)
r = x*x + b
np.testing.assert_array_almost_equal(r, c2_out)
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as n
import numpy.random as nr
import random as r
from python_util.util import *
from python_util.data import *
from python_util.options import *
from python_util.gpumodel import *
import sys
import math as m
import layer as lay
from convdata import ImageDataProvider, CIFARDataProvider, DummyConvNetLogRegDataProvider
from os import linesep as NL
import copy as cp
import os
class Driver(object):
def __init__(self, convnet):
self.convnet = convnet
def on_start_batch(self, batch_data, train):
pass
def on_finish_batch(self):
pass
class GradCheckDriver(Driver):
def on_start_batch(self, batch_data, train):
data = batch_data[2]
self.convnet.libmodel.checkGradients(data)
class TrainingDriver(Driver):
def on_start_batch(self, batch_data, train):
data = batch_data[2]
self.convnet.libmodel.startBatch(data, self.convnet.get_progress(), not train)
class MultiviewTestDriver(TrainingDriver):
def on_start_batch(self, batch_data, train):
self.write_output = False
if train:
TrainingDriver.on_start_batch(self, batch_data, train)
else:
data = batch_data[2]
num_views = self.convnet.test_data_provider.num_views
if self.convnet.test_out != "" and self.convnet.logreg_name != "":
self.write_output = True
self.test_file_name = os.path.join(self.convnet.test_out, 'test_preds_%d' % batch_data[1])
self.probs = n.zeros((data[0].shape[1]/num_views, self.convnet.test_data_provider.get_num_classes()), dtype=n.single)
self.convnet.libmodel.startMultiviewTest(data, num_views, self.probs, self.convnet.logreg_name)
else:
self.convnet.libmodel.startMultiviewTest(data, num_views)
def on_finish_batch(self):
if self.write_output:
if not os.path.exists(self.convnet.test_out):
os.makedirs(self.convnet.test_out)
pickle(self.test_file_name, {'data': self.probs,
'note': 'generated from %s' % self.convnet.save_file})
class FeatureWriterDriver(Driver):
def __init__(self, convnet):
Driver.__init__(self, convnet)
self.last_batch = convnet.test_batch_range[-1]
def on_start_batch(self, batch_data, train):
if train:
raise ModelStateException("FeatureWriter must be used in conjunction with --test-only=1. It writes test data features.")
self.batchnum, self.data = batch_data[1], batch_data[2]
if not os.path.exists(self.convnet.feature_path):
os.makedirs(self.convnet.feature_path)
self.num_ftrs = self.convnet.layers[self.convnet.write_features]['outputs']
self.ftrs = n.zeros((self.data[0].shape[1], self.num_ftrs), dtype=n.single)
self.convnet.libmodel.startFeatureWriter(self.data, [self.ftrs], [self.convnet.write_features])
def on_finish_batch(self):
path_out = os.path.join(self.convnet.feature_path, 'data_batch_%d' % self.batchnum)
pickle(path_out, {'data': self.ftrs, 'labels': self.data[1]})
print "Wrote feature file %s" % path_out
if self.batchnum == self.last_batch:
pickle(os.path.join(self.convnet.feature_path, 'batches.meta'), {'source_model':self.convnet.load_file,
'num_vis':self.num_ftrs,
'batch_size': self.convnet.test_data_provider.batch_meta['batch_size']})
class ConvNet(IGPUModel):
def __init__(self, op, load_dic, dp_params={}):
filename_options = []
for v in ('color_noise', 'multiview_test', 'inner_size', 'scalar_mean', 'minibatch_size'):
dp_params[v] = op.get_value(v)
IGPUModel.__init__(self, "ConvNet", op, load_dic, filename_options, dp_params=dp_params)
def import_model(self):
lib_name = "cudaconvnet._ConvNet"
print "========================="
print "Importing %s C++ module" % lib_name
self.libmodel = __import__(lib_name,fromlist=['_ConvNet'])
def init_model_lib(self):
self.libmodel.initModel(self.layers,
self.device_ids,
self.minibatch_size,
self.conserve_mem)
def init_model_state(self):
ms = self.model_state
layers = ms['layers'] if self.loaded_from_checkpoint else {}
ms['layers'] = lay.LayerParser.parse_layers(os.path.join(self.layer_path, self.layer_def),
os.path.join(self.layer_path, self.layer_params), self, layers=layers)
self.do_decouple_conv()
self.do_unshare_weights()
self.op.set_value('conv_to_local', [], parse=False)
self.op.set_value('unshare_weights', [], parse=False)
self.set_driver()
def do_decouple_conv(self):
# Convert convolutional layers to local
if len(self.op.get_value('conv_to_local')) > 0:
for lname in self.op.get_value('conv_to_local'):
if self.model_state['layers'][lname]['type'] == 'conv':
lay.LocalLayerParser.conv_to_local(self.model_state['layers'], lname)
def do_unshare_weights(self):
# Decouple weight matrices
if len(self.op.get_value('unshare_weights')) > 0:
for name_str in self.op.get_value('unshare_weights'):
if name_str:
name = lay.WeightLayerParser.get_layer_name(name_str)
if name is not None:
name, idx = name[0], name[1]
if name not in self.model_state['layers']:
raise ModelStateException("Layer '%s' does not exist; unable to unshare" % name)
layer = self.model_state['layers'][name]
lay.WeightLayerParser.unshare_weights(layer, self.model_state['layers'], matrix_idx=idx)
else:
raise ModelStateException("Invalid layer name '%s'; unable to unshare." % name_str)
def set_driver(self):
if self.op.get_value('check_grads'):
self.driver = GradCheckDriver(self)
elif self.op.get_value('multiview_test'):
self.driver = MultiviewTestDriver(self)
elif self.op.get_value('write_features'):
self.driver = FeatureWriterDriver(self)
else:
self.driver = TrainingDriver(self)
def fill_excused_options(self):
if self.op.get_value('check_grads'):
self.op.set_value('save_path', '')
self.op.set_value('train_batch_range', '0')
self.op.set_value('test_batch_range', '0')
self.op.set_value('data_path', '')
# Make sure the data provider returned data in proper format
def parse_batch_data(self, batch_data, train=True):
if max(d.dtype != n.single for d in batch_data[2]):
raise DataProviderException("All matrices returned by data provider must consist of single-precision floats.")
return batch_data
def start_batch(self, batch_data, train=True):
self.driver.on_start_batch(batch_data, train)
def finish_batch(self):
ret = IGPUModel.finish_batch(self)
self.driver.on_finish_batch()
return ret
def print_iteration(self):
print "%d.%d (%.2f%%)..." % (self.epoch, self.batchnum, 100 * self.get_progress()),
def print_train_time(self, compute_time_py):
print "(%.3f sec)" % (compute_time_py)
def print_costs(self, cost_outputs):
costs, num_cases = cost_outputs[0], cost_outputs[1]
children = set()
for errname in costs:
if sum(errname in self.layers[z]['children'] for z in costs) == 0:
# print self.layers[errname]['children']
for child in set(self.layers[errname]['children']) & set(costs.keys()):
costs[errname] = [v + u for v, u in zip(costs[errname], costs[child])]
children.add(child)
filtered_costs = eval(self.layers[errname]['outputFilter'])(costs[errname], num_cases)
print "%s: " % errname,
if 'outputFilterFormatter' not in self.layers[errname]:
print ", ".join("%.6f" % v for v in filtered_costs),
else:
print eval(self.layers[errname]['outputFilterFormatter'])(self,filtered_costs),
if m.isnan(filtered_costs[0]) or m.isinf(filtered_costs[0]):
print "<- error nan or inf!"
sys.exit(1)
for c in children:
del costs[c]
def print_train_results(self):
self.print_costs(self.train_outputs[-1])
def print_test_status(self):
pass
def print_test_results(self):
print NL + "======================Test output======================"
self.print_costs(self.test_outputs[-1])
if not self.test_only:
print NL + "----------------------Averages-------------------------"
self.print_costs(self.aggregate_test_outputs(self.test_outputs[-len(self.test_batch_range):]))
print NL + "-------------------------------------------------------",
for name,val in sorted(self.layers.items(), key=lambda x: x[1]['id']): # This is kind of hacky but will do for now.
l = self.layers[name]
if 'weights' in l:
wscales = [(l['name'], i, n.mean(n.abs(w)), n.mean(n.abs(wi))) for i,(w,wi) in enumerate(zip(l['weights'],l['weightsInc']))]
print ""
print NL.join("Layer '%s' weights[%d]: %e [%e] [%e]" % (s[0], s[1], s[2], s[3], s[3]/s[2] if s[2] > 0 else 0) for s in wscales),
print "%sLayer '%s' biases: %e [%e]" % (NL, l['name'], n.mean(n.abs(l['biases'])), n.mean(n.abs(l['biasesInc']))),
print ""
def conditional_save(self):
self.save_state()
def aggregate_test_outputs(self, test_outputs):
test_outputs = cp.deepcopy(test_outputs)
num_cases = sum(t[1] for t in test_outputs)
for i in xrange(1 ,len(test_outputs)):
for k,v in test_outputs[i][0].items():
for j in xrange(len(v)):
test_outputs[0][0][k][j] += test_outputs[i][0][k][j]
return (test_outputs[0][0], num_cases)
@classmethod
def get_options_parser(cls):
op = IGPUModel.get_options_parser()
op.add_option("mini", "minibatch_size", IntegerOptionParser, "Minibatch size", default=128)
op.add_option("layer-def", "layer_def", StringOptionParser, "Layer definition file", set_once=False)
op.add_option("layer-params", "layer_params", StringOptionParser, "Layer parameter file")
op.add_option("layer-path", "layer_path", StringOptionParser, "Layer file path prefix", default="")
op.add_option("check-grads", "check_grads", BooleanOptionParser, "Check gradients and quit?", default=0, excuses=['data_path','save_path', 'save_file_override', 'train_batch_range','test_batch_range'])
op.add_option("multiview-test", "multiview_test", BooleanOptionParser, "Cropped DP: test on multiple patches?", default=0)
op.add_option("inner-size", "inner_size", IntegerOptionParser, "Cropped DP: crop size (0 = don't crop)", default=0, set_once=True)
op.add_option("conv-to-local", "conv_to_local", ListOptionParser(StringOptionParser), "Convert given conv layers to unshared local", default=[])
op.add_option("unshare-weights", "unshare_weights", ListOptionParser(StringOptionParser), "Unshare weight matrices in given layers", default=[])
op.add_option("conserve-mem", "conserve_mem", BooleanOptionParser, "Conserve GPU memory (slower)?", default=0)
op.add_option("color-noise", "color_noise", FloatOptionParser, "Add PCA noise to color channels with given scale", default=0.0)
op.add_option("test-out", "test_out", StringOptionParser, "Output test case predictions to given path", default="", requires=['logreg_name', 'multiview_test'])
op.add_option("logreg-name", "logreg_name", StringOptionParser, "Logreg cost layer name (for --test-out)", default="")
op.add_option("scalar-mean", "scalar_mean", FloatOptionParser, "Subtract this scalar from image (-1 = don't)", default=-1)
op.add_option("write-features", "write_features", StringOptionParser, "Write test data features from given layer", default="", requires=['feature-path'])
op.add_option("feature-path", "feature_path", StringOptionParser, "Write test data features to this path (to be used with --write-features)", default="")
op.delete_option('max_test_err')
op.options["testing_freq"].default = 57
op.options["num_epochs"].default = 50000
op.options['dp_type'].default = None
DataProvider.register_data_provider('dummy-lr-n', 'Dummy ConvNet logistic regression', DummyConvNetLogRegDataProvider)
DataProvider.register_data_provider('image', 'JPEG-encoded image data provider', ImageDataProvider)
DataProvider.register_data_provider('cifar', 'CIFAR-10 data provider', CIFARDataProvider)
return op
if __name__ == "__main__":
# nr.seed(6)
op = ConvNet.get_options_parser()
op, load_dic = IGPUModel.parse_options(op)
model = ConvNet(op, load_dic)
model.start()
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from python_util.gpumodel import *
import numpy as n
import numpy.random as nr
def get_src(filename):
src = IGPUModel.load_checkpoint(filename)
return src['model_state']['layers']
# Initialize weight matrix by copying weight matrix of given layer
def makew(name, idx, shape, params):
src = get_src(params[0])
return src[name]['weights'][idx]
# Initialize bias vector by copying bias vector of given layer
def makeb(name, shape, params):
src = get_src(params[0])
return src[name]['biases']
def concat(shape, src, src_layers, src_func):
mat = n.empty(shape, dtype=n.single, order='F')
start = 0
for s in src_layers:
m = src_func(src[s])
mat[:,start:start+m.shape[1]] = m
start += m.shape[1]
return mat
# Initialize weight matrix by concatenating weight matrices of given layers
def makewcat(name, idx, shape, params):
src, src_layers = get_src(params[0]), params[1:]
return concat(shape, src, src_layers, lambda x: x['weights'][idx])
# Initialize bias vector by concatenating bias vectors of given layers
def makebcat(name, shape, params):
src, src_layers = get_src(params[0]), params[1:]
return concat(shape, src, src_layers, lambda x: x['biases'])
# Initialize bias vector from tuple input
def makeb_vec(name, shape, params):
return n.array([n.single(x) for x in params], dtype=n.single).reshape((1, len(params)))
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from tarfile import TarFile, TarInfo
from matplotlib import pylab as pl
import numpy as n
import getopt as opt
from python_util.util import *
from math import sqrt, ceil, floor
from python_util.gpumodel import IGPUModel
import random as r
import numpy.random as nr
from convnet import ConvNet
from python_util.options import *
from PIL import Image
from time import sleep
class ShowNetError(Exception):
pass
class ShowConvNet(ConvNet):
def __init__(self, op, load_dic):
ConvNet.__init__(self, op, load_dic)
def init_data_providers(self):
self.need_gpu = self.op.get_value('show_preds')
class Dummy:
def advance_batch(self):
pass
if self.need_gpu:
ConvNet.init_data_providers(self)
else:
self.train_data_provider = self.test_data_provider = Dummy()
def import_model(self):
if self.need_gpu:
ConvNet.import_model(self)
def init_model_state(self):
if self.op.get_value('show_preds'):
self.softmax_name = self.op.get_value('show_preds')
def init_model_lib(self):
if self.need_gpu:
ConvNet.init_model_lib(self)
def plot_cost(self):
if self.show_cost not in self.train_outputs[0][0]:
raise ShowNetError("Cost function with name '%s' not defined by given convnet." % self.show_cost)
# print self.test_outputs
train_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.train_outputs]
test_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.test_outputs]
if self.smooth_test_errors:
test_errors = [sum(test_errors[max(0,i-len(self.test_batch_range)):i])/(i-max(0,i-len(self.test_batch_range))) for i in xrange(1,len(test_errors)+1)]
numbatches = len(self.train_batch_range)
test_errors = n.row_stack(test_errors)
test_errors = n.tile(test_errors, (1, self.testing_freq))
test_errors = list(test_errors.flatten())
test_errors += [test_errors[-1]] * max(0,len(train_errors) - len(test_errors))
test_errors = test_errors[:len(train_errors)]
numepochs = len(train_errors) / float(numbatches)
pl.figure(1)
x = range(0, len(train_errors))
pl.plot(x, train_errors, 'k-', label='Training set')
pl.plot(x, test_errors, 'r-', label='Test set')
pl.legend()
ticklocs = range(numbatches, len(train_errors) - len(train_errors) % numbatches + 1, numbatches)
epoch_label_gran = int(ceil(numepochs / 20.))
epoch_label_gran = int(ceil(float(epoch_label_gran) / 10) * 10) if numepochs >= 10 else epoch_label_gran
ticklabels = map(lambda x: str((x[1] / numbatches)) if x[0] % epoch_label_gran == epoch_label_gran-1 else '', enumerate(ticklocs))
pl.xticks(ticklocs, ticklabels)
pl.xlabel('Epoch')
# pl.ylabel(self.show_cost)
pl.title('%s[%d]' % (self.show_cost, self.cost_idx))
# print "plotted cost"
def make_filter_fig(self, filters, filter_start, fignum, _title, num_filters, combine_chans, FILTERS_PER_ROW=16):
MAX_ROWS = 24
MAX_FILTERS = FILTERS_PER_ROW * MAX_ROWS
num_colors = filters.shape[0]
f_per_row = int(ceil(FILTERS_PER_ROW / float(1 if combine_chans else num_colors)))
filter_end = min(filter_start+MAX_FILTERS, num_filters)
filter_rows = int(ceil(float(filter_end - filter_start) / f_per_row))
filter_pixels = filters.shape[1]
filter_size = int(sqrt(filters.shape[1]))
fig = pl.figure(fignum)
fig.text(.5, .95, '%s %dx%d filters %d-%d' % (_title, filter_size, filter_size, filter_start, filter_end-1), horizontalalignment='center')
num_filters = filter_end - filter_start
if not combine_chans:
bigpic = n.zeros((filter_size * filter_rows + filter_rows + 1, filter_size*num_colors * f_per_row + f_per_row + 1), dtype=n.single)
else:
bigpic = n.zeros((3, filter_size * filter_rows + filter_rows + 1, filter_size * f_per_row + f_per_row + 1), dtype=n.single)
for m in xrange(filter_start,filter_end ):
filter = filters[:,:,m]
y, x = (m - filter_start) / f_per_row, (m - filter_start) % f_per_row
if not combine_chans:
for c in xrange(num_colors):
filter_pic = filter[c,:].reshape((filter_size,filter_size))
bigpic[1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size*num_colors) * x + filter_size*c:1 + (1 + filter_size*num_colors) * x + filter_size*(c+1)] = filter_pic
else:
filter_pic = filter.reshape((3, filter_size,filter_size))
bigpic[:,
1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size) * x:1 + (1 + filter_size) * x + filter_size] = filter_pic
pl.xticks([])
pl.yticks([])
if not combine_chans:
pl.imshow(bigpic, cmap=pl.cm.gray, interpolation='nearest')
else:
bigpic = bigpic.swapaxes(0,2).swapaxes(0,1)
pl.imshow(bigpic, interpolation='nearest')
def plot_filters(self):
FILTERS_PER_ROW = 16
filter_start = 0 # First filter to show
if self.show_filters not in self.layers:
raise ShowNetError("Layer with name '%s' not defined by given convnet." % self.show_filters)
layer = self.layers[self.show_filters]
filters = layer['weights'][self.input_idx]
# filters = filters - filters.min()
# filters = filters / filters.max()
if layer['type'] == 'fc': # Fully-connected layer
num_filters = layer['outputs']
channels = self.channels
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
elif layer['type'] in ('conv', 'local'): # Conv layer
num_filters = layer['filters']
channels = layer['filterChannels'][self.input_idx]
if layer['type'] == 'local':
filters = filters.reshape((layer['modules'], channels, layer['filterPixels'][self.input_idx], num_filters))
filters = filters[:, :, :, self.local_plane] # first map for now (modules, channels, pixels)
filters = filters.swapaxes(0,2).swapaxes(0,1)
num_filters = layer['modules']
# filters = filters.swapaxes(0,1).reshape(channels * layer['filterPixels'][self.input_idx], num_filters * layer['modules'])
# num_filters *= layer['modules']
FILTERS_PER_ROW = layer['modulesX']
else:
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
# Convert YUV filters to RGB
if self.yuv_to_rgb and channels == 3:
R = filters[0,:,:] + 1.28033 * filters[2,:,:]
G = filters[0,:,:] + -0.21482 * filters[1,:,:] + -0.38059 * filters[2,:,:]
B = filters[0,:,:] + 2.12798 * filters[1,:,:]
filters[0,:,:], filters[1,:,:], filters[2,:,:] = R, G, B
combine_chans = not self.no_rgb and channels == 3
# Make sure you don't modify the backing array itself here -- so no -= or /=
if self.norm_filters:
#print filters.shape
filters = filters - n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).mean(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1))
filters = filters / n.sqrt(n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).var(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1)))
#filters = filters - n.tile(filters.min(axis=0).min(axis=0), (3, filters.shape[1], 1))
#filters = filters / n.tile(filters.max(axis=0).max(axis=0), (3, filters.shape[1], 1))
#else:
filters = filters - filters.min()
filters = filters / filters.max()
self.make_filter_fig(filters, filter_start, 2, 'Layer %s' % self.show_filters, num_filters, combine_chans, FILTERS_PER_ROW=FILTERS_PER_ROW)
def plot_predictions(self):
epoch, batch, data = self.get_next_batch(train=False) # get a test batch
num_classes = self.test_data_provider.get_num_classes()
NUM_ROWS = 2
NUM_COLS = 4
NUM_IMGS = NUM_ROWS * NUM_COLS if not self.save_preds else data[0].shape[1]
NUM_TOP_CLASSES = min(num_classes, 5) # show this many top labels
NUM_OUTPUTS = self.model_state['layers'][self.softmax_name]['outputs']
PRED_IDX = 1
label_names = [lab.split(',')[0] for lab in self.test_data_provider.batch_meta['label_names']]
if self.only_errors:
preds = n.zeros((data[0].shape[1], NUM_OUTPUTS), dtype=n.single)
else:
preds = n.zeros((NUM_IMGS, NUM_OUTPUTS), dtype=n.single)
#rand_idx = nr.permutation(n.r_[n.arange(1), n.where(data[1] == 552)[1], n.where(data[1] == 795)[1], n.where(data[1] == 449)[1], n.where(data[1] == 274)[1]])[:NUM_IMGS]
rand_idx = nr.randint(0, data[0].shape[1], NUM_IMGS)
if NUM_IMGS < data[0].shape[1]:
data = [n.require(d[:,rand_idx], requirements='C') for d in data]
# data += [preds]
# Run the model
print [d.shape for d in data], preds.shape
self.libmodel.startFeatureWriter(data, [preds], [self.softmax_name])
IGPUModel.finish_batch(self)
print preds
data[0] = self.test_data_provider.get_plottable_data(data[0])
if self.save_preds:
if not gfile.Exists(self.save_preds):
gfile.MakeDirs(self.save_preds)
preds_thresh = preds > 0.5 # Binarize predictions
data[0] = data[0] * 255.0
data[0][data[0]<0] = 0
data[0][data[0]>255] = 255
data[0] = n.require(data[0], dtype=n.uint8)
dir_name = '%s_predictions_batch_%d' % (os.path.basename(self.save_file), batch)
tar_name = os.path.join(self.save_preds, '%s.tar' % dir_name)
tfo = gfile.GFile(tar_name, "w")
tf = TarFile(fileobj=tfo, mode='w')
for img_idx in xrange(NUM_IMGS):
img = data[0][img_idx,:,:,:]
imsave = Image.fromarray(img)
prefix = "CORRECT" if data[1][0,img_idx] == preds_thresh[img_idx,PRED_IDX] else "FALSE_POS" if preds_thresh[img_idx,PRED_IDX] == 1 else "FALSE_NEG"
file_name = "%s_%.2f_%d_%05d_%d.png" % (prefix, preds[img_idx,PRED_IDX], batch, img_idx, data[1][0,img_idx])
# gf = gfile.GFile(file_name, "w")
file_string = StringIO()
imsave.save(file_string, "PNG")
tarinf = TarInfo(os.path.join(dir_name, file_name))
tarinf.size = file_string.tell()
file_string.seek(0)
tf.addfile(tarinf, file_string)
tf.close()
tfo.close()
# gf.close()
print "Wrote %d prediction PNGs to %s" % (preds.shape[0], tar_name)
else:
fig = pl.figure(3, figsize=(12,9))
fig.text(.4, .95, '%s test samples' % ('Mistaken' if self.only_errors else 'Random'))
if self.only_errors:
# what the net got wrong
if NUM_OUTPUTS > 1:
err_idx = [i for i,p in enumerate(preds.argmax(axis=1)) if p not in n.where(data[2][:,i] > 0)[0]]
else:
err_idx = n.where(data[1][0,:] != preds[:,0].T)[0]
print err_idx
err_idx = r.sample(err_idx, min(len(err_idx), NUM_IMGS))
data[0], data[1], preds = data[0][:,err_idx], data[1][:,err_idx], preds[err_idx,:]
import matplotlib.gridspec as gridspec
import matplotlib.colors as colors
cconv = colors.ColorConverter()
gs = gridspec.GridSpec(NUM_ROWS*2, NUM_COLS,
width_ratios=[1]*NUM_COLS, height_ratios=[2,1]*NUM_ROWS )
#print data[1]
for row in xrange(NUM_ROWS):
for col in xrange(NUM_COLS):
img_idx = row * NUM_COLS + col
if data[0].shape[0] <= img_idx:
break
pl.subplot(gs[(row * 2) * NUM_COLS + col])
#pl.subplot(NUM_ROWS*2, NUM_COLS, row * 2 * NUM_COLS + col + 1)
pl.xticks([])
pl.yticks([])
img = data[0][img_idx,:,:,:]
pl.imshow(img, interpolation='lanczos')
show_title = data[1].shape[0] == 1
true_label = [int(data[1][0,img_idx])] if show_title else n.where(data[1][:,img_idx]==1)[0]
#print true_label
#print preds[img_idx,:].shape
#print preds[img_idx,:].max()
true_label_names = [label_names[i] for i in true_label]
img_labels = sorted(zip(preds[img_idx,:], label_names), key=lambda x: x[0])[-NUM_TOP_CLASSES:]
#print img_labels
axes = pl.subplot(gs[(row * 2 + 1) * NUM_COLS + col])
height = 0.5
ylocs = n.array(range(NUM_TOP_CLASSES))*height
pl.barh(ylocs, [l[0] for l in img_labels], height=height, \
color=['#ffaaaa' if l[1] in true_label_names else '#aaaaff' for l in img_labels])
#pl.title(", ".join(true_labels))
if show_title:
pl.title(", ".join(true_label_names), fontsize=15, fontweight='bold')
else:
print true_label_names
pl.yticks(ylocs + height/2, [l[1] for l in img_labels], x=1, backgroundcolor=cconv.to_rgba('0.65', alpha=0.5), weight='bold')
for line in enumerate(axes.get_yticklines()):
line[1].set_visible(False)
#pl.xticks([width], [''])
#pl.yticks([])
pl.xticks([])
pl.ylim(0, ylocs[-1] + height)
pl.xlim(0, 1)
def start(self):
self.op.print_values()
# print self.show_cost
if self.show_cost:
self.plot_cost()
if self.show_filters:
self.plot_filters()
if self.show_preds:
self.plot_predictions()
if pl:
pl.show()
sys.exit(0)
@classmethod
def get_options_parser(cls):
op = ConvNet.get_options_parser()
for option in list(op.options):
if option not in ('gpu', 'load_file', 'inner_size', 'train_batch_range', 'test_batch_range', 'multiview_test', 'data_path', 'pca_noise', 'scalar_mean'):
op.delete_option(option)
op.add_option("show-cost", "show_cost", StringOptionParser, "Show specified objective function", default="")
op.add_option("show-filters", "show_filters", StringOptionParser, "Show learned filters in specified layer", default="")
op.add_option("norm-filters", "norm_filters", BooleanOptionParser, "Individually normalize filters shown with --show-filters", default=0)
op.add_option("input-idx", "input_idx", IntegerOptionParser, "Input index for layer given to --show-filters", default=0)
op.add_option("cost-idx", "cost_idx", IntegerOptionParser, "Cost function return value index for --show-cost", default=0)
op.add_option("no-rgb", "no_rgb", BooleanOptionParser, "Don't combine filter channels into RGB in layer given to --show-filters", default=False)
op.add_option("yuv-to-rgb", "yuv_to_rgb", BooleanOptionParser, "Convert RGB filters to YUV in layer given to --show-filters", default=False)
op.add_option("channels", "channels", IntegerOptionParser, "Number of channels in layer given to --show-filters (fully-connected layers only)", default=0)
op.add_option("show-preds", "show_preds", StringOptionParser, "Show predictions made by given softmax on test set", default="")
op.add_option("save-preds", "save_preds", StringOptionParser, "Save predictions to given path instead of showing them", default="")
op.add_option("only-errors", "only_errors", BooleanOptionParser, "Show only mistaken predictions (to be used with --show-preds)", default=False, requires=['show_preds'])
op.add_option("local-plane", "local_plane", IntegerOptionParser, "Local plane to show", default=0)
op.add_option("smooth-test-errors", "smooth_test_errors", BooleanOptionParser, "Use running average for test error plot?", default=1)
op.options['load_file'].default = None
return op
if __name__ == "__main__":
#nr.seed(6)
try:
op = ShowConvNet.get_options_parser()
op, load_dic = IGPUModel.parse_options(op)
model = ShowConvNet(op, load_dic)
model.start()
except (UnpickleError, ShowNetError, opt.GetoptError), e:
print "----------------"
print "Error:"
print e
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from math import exp
import sys
import ConfigParser as cfg
import os
import numpy as n
import numpy.random as nr
from math import ceil, floor
from collections import OrderedDict
from os import linesep as NL
from python_util.options import OptionsParser
import re
class LayerParsingError(Exception):
pass
# A neuron that doesn't take parameters
class NeuronParser:
def __init__(self, type, func_str, uses_acts=True, uses_inputs=True):
self.type = type
self.func_str = func_str
self.uses_acts = uses_acts
self.uses_inputs = uses_inputs
def parse(self, type):
if type == self.type:
return {'type': self.type,
'params': {},
'usesActs': self.uses_acts,
'usesInputs': self.uses_inputs}
return None
# A neuron that takes parameters
class ParamNeuronParser(NeuronParser):
neuron_regex = re.compile(r'^\s*(\w+)\s*\[\s*(\w+(\s*,\w+)*)\s*\]\s*$')
def __init__(self, type, func_str, uses_acts=True, uses_inputs=True):
NeuronParser.__init__(self, type, func_str, uses_acts, uses_inputs)
m = self.neuron_regex.match(type)
self.base_type = m.group(1)
self.param_names = m.group(2).split(',')
assert len(set(self.param_names)) == len(self.param_names)
def parse(self, type):
m = re.match(r'^%s\s*\[([\d,\.\s\-]*)\]\s*$' % self.base_type, type)
if m:
try:
param_vals = [float(v.strip()) for v in m.group(1).split(',')]
if len(param_vals) == len(self.param_names):
return {'type': self.base_type,
'params': dict(zip(self.param_names, param_vals)),
'usesActs': self.uses_acts,
'usesInputs': self.uses_inputs}
except TypeError:
pass
return None
class AbsTanhNeuronParser(ParamNeuronParser):
def __init__(self):
ParamNeuronParser.__init__(self, 'abstanh[a,b]', 'f(x) = a * |tanh(b * x)|')
def parse(self, type):
dic = ParamNeuronParser.parse(self, type)
# Make b positive, since abs(tanh(bx)) = abs(tanh(-bx)) and the C++ code
# assumes b is positive.
if dic:
dic['params']['b'] = abs(dic['params']['b'])
return dic
class ParamParser:
lrs_regex = re.compile(r'^\s*(\w+)\s*(?:\[\s*(\w+(\s*;\w+)*)\s*\])?\s*$')
param_converters = {'i': int,
'f': float}
def __init__(self, type):
m = self.lrs_regex.match(type)
self.base_type = m.group(1)
param_names_with_type = m.group(2).split(';') if m.group(2) is not None else []
self.param_names = [p[1:] for p in param_names_with_type]
self.param_types = [self.param_converters[p[0]] for p in param_names_with_type]
self.param_regex_inner = ";".join([('\s*%s\s*=\s*[^;,\s=]+\s*' % p) for p in self.param_names])
self.regex_str = ('^%s\s*(?:\[(%s)\])?\s*$') % (self.base_type, self.param_regex_inner)
assert len(set(self.param_names)) == len(self.param_names)
def parse(self, type):
m = re.match(self.regex_str, type, flags=re.IGNORECASE)
if m:
try:
param_vals = [ptype(v.split('=')[1].strip()) for ptype,v in zip(self.param_types, m.group(1).split(';'))] if m.group(1) is not None else []
if len(param_vals) == len(self.param_names):
return {'type': self.base_type,
'params': dict(zip(self.param_names, param_vals))}
except TypeError:
pass
return None
# Subclass that throws more convnet-specific exceptions than the default
class MyConfigParser(cfg.SafeConfigParser):
def safe_get(self, section, option, f=cfg.SafeConfigParser.get, typestr=None, default=None):
try:
return f(self, section, option)
except cfg.NoOptionError, e:
if default is not None:
return default
raise LayerParsingError("Layer '%s': required parameter '%s' missing" % (section, option))
except ValueError, e:
if typestr is None:
raise e
raise LayerParsingError("Layer '%s': parameter '%s' must be %s" % (section, option, typestr))
def safe_get_list(self, section, option, f=str, typestr='strings', default=None):
v = self.safe_get(section, option, default=default)
if type(v) == list:
return v
try:
return [f(x.strip()) for x in v.split(',')]
except:
raise LayerParsingError("Layer '%s': parameter '%s' must be ','-delimited list of %s" % (section, option, typestr))
def safe_get_int(self, section, option, default=None):
return self.safe_get(section, option, f=cfg.SafeConfigParser.getint, typestr='int', default=default)
def safe_get_float(self, section, option, default=None):
return self.safe_get(section, option, f=cfg.SafeConfigParser.getfloat, typestr='float', default=default)
def safe_get_bool(self, section, option, default=None):
return self.safe_get(section, option, f=cfg.SafeConfigParser.getboolean, typestr='bool', default=default)
def safe_get_float_list(self, section, option, default=None):
return self.safe_get_list(section, option, float, typestr='floats', default=default)
def safe_get_int_list(self, section, option, default=None):
return self.safe_get_list(section, option, int, typestr='ints', default=default)
def safe_get_bool_list(self, section, option, default=None):
return self.safe_get_list(section, option, lambda x: x.lower() in ('true', '1'), typestr='bools', default=default)
# A class that implements part of the interface of MyConfigParser
class FakeConfigParser(object):
def __init__(self, dic):
self.dic = dic
def safe_get(self, section, option, default=None):
if option in self.dic:
return self.dic[option]
return default
def safe_get_int(self, section, option, default=None):
return int(self.safe_get(section, option, default))
def safe_get_int_list(self, section, option, default=None):
return list(self.safe_get(section, option, default))
class LayerParser:
def __init__(self):
self.dic = {}
self.set_defaults()
# Post-processing step -- this is called after all layers have been initialized
def optimize(self, layers):
self.dic['actsTarget'] = -1
self.dic['actsGradTarget'] = -1
if len(set(len(l['gpu']) for l in layers.values() if 'inputs' in l and self.dic['name'] in l['inputs'])) > 1:
# print set(len(l['gpu']) for l in layers.values())
raise LayerParsingError("Layer '%s': all next layers must have equal number of replicas." % (self.dic['name']))
def parse_params(self, vals, parsers, param_name, human_name, num_params=1):
dic, name = self.dic, self.dic['name']
# print vals
if len(vals) != num_params and len(vals) != 1:
raise LayerParsingError("Layer '%s': expected list of length %d for %s but got list of length %d."% (name, num_params, param_name, len(vals)))
parsed = []
# print vals
for v in vals:
for p in parsers:
parsedv = p.parse(v)
if parsedv:
parsed += [parsedv]
break
if len(parsed) == 1 and num_params > 1:
parsed = parsed * num_params
if len(parsed) == num_params:
return parsed
# print parsed, vals
raise LayerParsingError("Layer '%s': unable to parse %s %s=%s." % (name, human_name, param_name, ",".join(vals)))
# Add parameters from layer parameter file
def add_params(self, mcp):
pass
# self.dic['conserveMem'] = mcp.convnet.op.get_value('conserve_mem') if mcp.convnet is not None else 0
def init(self, dic):
self.dic = dic
return self
def set_defaults(self):
self.dic['outputs'] = 0
self.dic['parser'] = self
self.dic['requiresParams'] = False
# Does this layer use its own activity matrix
# for some purpose other than computing its output?
# Usually, this will only be true for layers that require their
# own activity matrix for gradient computations. For example, layers
# with logistic units must compute the gradient y * (1 - y), where y is
# the activity matrix.
#
# Layers that do not not use their own activity matrix should advertise
# this, since this will enable memory-saving matrix re-use optimizations.
#
# The default value of this property is True, for safety purposes.
# If a layer advertises that it does not use its own activity matrix when
# in fact it does, bad things will happen.
self.dic['usesActs'] = True
# Does this layer use the activity matrices of its input layers
# for some purpose other than computing its output?
#
# Again true by default for safety
self.dic['usesInputs'] = True
# Force this layer to use its own activity gradient matrix,
# instead of borrowing one from one of its inputs.
#
# This should be true for layers where the mapping from output
# gradient to input gradient is non-elementwise.
self.dic['forceOwnActs'] = True
# Does this layer need the gradient at all?
# Should only be true for layers with parameters (weights).
self.dic['gradConsumer'] = False
# The gpu indices on which this layer runs
self.dic['gpu'] = [-1]
def parse(self, name, mcp, prev_layers, model=None):
self.prev_layers = prev_layers
self.dic['name'] = name
self.dic['type'] = mcp.safe_get(name, 'type')
self.dic['id'] = len(prev_layers)
return self.dic
def verify_float_range(self, v, param_name, _min, _max):
self.verify_num_range(v, param_name, _min, _max, strconv=lambda x: '%.3f' % x)
def verify_num_range(self, v, param_name, _min, _max, strconv=lambda x:'%d' % x):
if type(v) == list:
for i,vv in enumerate(v):
self._verify_num_range(vv, param_name, _min, _max, i, strconv=strconv)
else:
self._verify_num_range(v, param_name, _min, _max, strconv=strconv)
def _verify_num_range(self, v, param_name, _min, _max, input=-1, strconv=lambda x:'%d' % x):
layer_name = self.dic['name'] if input < 0 else '%s[%d]' % (self.dic['name'], input)
if _min is not None and _max is not None and (v < _min or v > _max):
raise LayerParsingError("Layer '%s': parameter '%s' must be in the range %s-%s" % (layer_name, param_name, strconv(_min), strconv(_max)))
elif _min is not None and v < _min:
raise LayerParsingError("Layer '%s': parameter '%s' must be greater than or equal to %s" % (layer_name, param_name, strconv(_min)))
elif _max is not None and v > _max:
raise LayerParsingError("Layer '%s': parameter '%s' must be smaller than or equal to %s" % (layer_name, param_name, strconv(_max)))
def verify_divisible(self, value, div, value_name, div_name=None, input_idx=0):
layer_name = self.dic['name'] if len(self.dic['inputs']) == 0 else '%s[%d]' % (self.dic['name'], input_idx)
if value % div != 0:
raise LayerParsingError("Layer '%s': parameter '%s' must be divisible by %s" % (layer_name, value_name, str(div) if div_name is None else "'%s'" % div_name))
def verify_str_in(self, value, param_name, lst, input_idx=-1):
lname = self.dic['name'] if input_idx == -1 else ('%s[%d]' % (self.dic['name'], input_idx))
if value not in lst:
raise LayerParsingError("Layer '%s': parameter '%s' must be one of %s" % (lname, param_name, ", ".join("'%s'" % s for s in lst)))
def verify_int_in(self, value, param_name, lst):
if value not in lst:
raise LayerParsingError("Layer '%s': parameter '%s' must be one of %s" % (self.dic['name'], param_name, ", ".join("'%d'" % s for s in lst)))
def verify_all_ints_in(self, values, param_name, lst):
if len([v for v in values if v not in lst]) > 0:
raise LayerParsingError("Layer '%s': all parameters to '%s' must be among %s" % (self.dic['name'], param_name, ", ".join("'%d'" % s for s in lst)))
def verify_input_dims(self, dims):
for i,d in enumerate(dims):
if d is not None and self.dic['numInputs'][i] != d: # first input must be labels
raise LayerParsingError("Layer '%s': dimensionality of input %d must be %d" % (self.dic['name'], i, d))
# This looks for neuron=x arguments in various layers, and creates
# separate layer definitions for them.
@staticmethod
def detach_neuron_layers(layers):
for name,l in layers.items():
if l['type'] != 'neuron' and 'neuron' in l and l['neuron']:
NeuronLayerParser().detach_neuron_layer(name, layers)
@staticmethod
def parse_layers(layer_cfg_path, param_cfg_path, model, layers={}):
try:
if not os.path.exists(layer_cfg_path):
raise LayerParsingError("Layer definition file '%s' does not exist" % layer_cfg_path)
if not os.path.exists(param_cfg_path):
raise LayerParsingError("Layer parameter file '%s' does not exist" % param_cfg_path)
if len(layers) == 0:
mcp = MyConfigParser(dict_type=OrderedDict)
mcp.readfp(open(layer_cfg_path))
for name in mcp.sections():
if not mcp.has_option(name, 'type'):
raise LayerParsingError("Layer '%s': no type given" % name)
ltype = mcp.safe_get(name, 'type')
if ltype not in layer_parsers:
raise LayerParsingError("Layer '%s': Unknown layer type: '%s'" % (name, ltype))
layers[name] = layer_parsers[ltype]().parse(name, mcp, layers, model)
LayerParser.detach_neuron_layers(layers)
for l in layers.values():
l['parser'].optimize(layers)
del l['parser']
for name,l in layers.items():
if not l['type'].startswith('cost.'):
found = max(name in l2['inputs'] for l2 in layers.values() if 'inputs' in l2)
if not found:
raise LayerParsingError("Layer '%s' of type '%s' is unused" % (name, l['type']))
mcp = MyConfigParser(dict_type=OrderedDict)
mcp.readfp(open(param_cfg_path))
# mcp.convnet = model
for name,l in layers.items():
if not mcp.has_section(name) and l['requiresParams']:
raise LayerParsingError("Layer '%s' of type '%s' requires extra parameters, but none given in file '%s'." % (name, l['type'], param_cfg_path))
lp = layer_parsers[l['type']]().init(l)
lp.add_params(mcp)
except LayerParsingError, e:
print e
sys.exit(1)
return layers
@staticmethod
def register_layer_parser(ltype, cls):
if ltype in layer_parsers:
raise LayerParsingError("Layer type '%s' already registered" % ltype)
layer_parsers[ltype] = cls
# Any layer that takes an input (i.e. non-data layer)
class LayerWithInputParser(LayerParser):
def __init__(self, num_inputs=-1):
LayerParser.__init__(self)
self.num_inputs = num_inputs
def verify_num_params(self, params, auto_expand=True):
for param in params:
if len(self.dic[param]) != len(self.dic['inputs']):
if auto_expand and len(self.dic[param]) == 1:
self.dic[param] *= len(self.dic['inputs'])
else:
raise LayerParsingError("Layer '%s': %s list length does not match number of inputs" % (self.dic['name'], param))
# layers: dictionary: name -> layer
def optimize(self, layers):
LayerParser.optimize(self, layers)
dic = self.dic
# Check if I have an input that no one else uses.
#print "Layer %s optimizing" % dic['name']
if not dic['forceOwnActs']:
for i, inp in enumerate(dic['inputLayers']):
if inp['outputs'] == dic['outputs'] and sum(('inputs' in ll) and (inp['name'] in ll['inputs']) for ll in layers.itervalues()) == 1:
# I can share my activity matrix with this layer
# if it does not use its activity matrix, and I
# do not need to remember my inputs.
# TODO: a dropout layer should always be able to overwrite
# its input. Make it so.
# print "Layer %s(uses inputs=%d), input %s(uses acts = %d)" % (dic['name'], dic['usesInputs'], inp['name'], inp['usesActs'])
if not inp['usesActs'] and not dic['usesInputs']:
dic['actsTarget'] = i
print "Layer %s using acts from layer %s" % (dic['name'], inp['name'])
# print "Layer '%s' sharing activity matrix with layer '%s'" % (dic['name'], l['name'])
# I can share my gradient matrix with this layer if we're on the same GPU.
# This is different from the logic for actsTarget because this guy doesn't
# have an actsGrad matrix on my GPU if our GPUs are different, so there's
# nothing to share.
if dic['gpu'] == inp['gpu']:
dic['actsGradTarget'] = i
# print "Layer '%s' sharing activity gradient matrix with layer '%s'" % (dic['name'], l['name'])
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerParser.parse(self, name, mcp, prev_layers, model)
dic['inputs'] = [inp.strip() for inp in mcp.safe_get(name, 'inputs').split(',')]
for inp in dic['inputs']:
if inp not in prev_layers:
raise LayerParsingError("Layer '%s': input layer '%s' not defined" % (name, inp))
dic['inputLayers'] = [prev_layers[inp] for inp in dic['inputs']]
dic['gpu'] = mcp.safe_get_int_list(name, 'gpu', default=dic['inputLayers'][0]['gpu'])
dic['gpus'] = ", ".join('%s' % d for d in dic['gpu'])
dic['numReplicas'] = len(dic['gpu'])
if len(set(dic['gpu'])) != len(dic['gpu']):
raise LayerParsingError("Layer '%s': all replicas must run on different GPUs." % (name))
for inp in dic['inputs']:
# Data layers do not explicitly define how many replicas they have.
# The number of replicas for a data layer is given by the number of replicas
# in the next layer(s). So we set that here.
inpl = prev_layers[inp]
if inpl['type'] == 'data':
inpl['numReplicas'] = dic['numReplicas']
if inpl['numReplicas'] % dic['numReplicas'] != 0:
raise LayerParsingError("Layer '%s': number of replicas (%d) must divide number of replicas in all input layers (input %s has %d replicas)." % (name, dic['numReplicas'], inpl['name'], inpl['numReplicas']))
if len(set(inp['numReplicas'] for inp in dic['inputLayers'])) != 1:
raise LayerParsingError("Layer '%s': all input layers must have equal numbers of replicas." % (name))
# Need to also assert that all *next* layers have equal number of replicas but this is hard so it's done in Layer.optimize
for inp in dic['inputLayers']:
if inp['outputs'] == 0:
raise LayerParsingError("Layer '%s': input layer '%s' does not produce any output" % (name, inp['name']))
dic['numInputs'] = [inp['outputs'] for inp in dic['inputLayers']]
# Layers can declare a neuron activation function to apply to their output, as a shortcut
# to avoid declaring a separate neuron layer above themselves.
dic['neuron'] = mcp.safe_get(name, 'neuron', default="")
if self.num_inputs > 0 and len(dic['numInputs']) != self.num_inputs:
raise LayerParsingError("Layer '%s': number of inputs must be %d" % (name, self.num_inputs))
if model:
self.verify_all_ints_in(dic['gpu'], 'gpu', range(len(model.op.get_value('gpu'))))
return dic
def verify_img_size(self):
dic = self.dic
if dic['numInputs'][0] % dic['imgPixels'] != 0 or dic['imgSize'] * dic['imgSize'] != dic['imgPixels']:
raise LayerParsingError("Layer '%s': has %-d dimensional input, not interpretable as %d-channel images" % (dic['name'], dic['numInputs'][0], dic['channels']))
@staticmethod
def grad_consumers_below(dic):
if dic['gradConsumer']:
return True
if 'inputLayers' in dic:
return any(LayerWithInputParser.grad_consumers_below(l) for l in dic['inputLayers'])
def verify_no_grads(self):
if LayerWithInputParser.grad_consumers_below(self.dic):
raise LayerParsingError("Layer '%s': layers of type '%s' cannot propagate gradient and must not be placed over layers with parameters." % (self.dic['name'], self.dic['type']))
class NailbedLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['stride'] = mcp.safe_get_int(name, 'stride')
self.verify_num_range(dic['channels'], 'channels', 1, None)
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['outputsX'] = (dic['imgSize'] + dic['stride'] - 1) / dic['stride']
dic['start'] = (dic['imgSize'] - dic['stride'] * (dic['outputsX'] - 1)) / 2
dic['outputs'] = dic['channels'] * dic['outputsX']**2
self.verify_num_range(dic['outputsX'], 'outputsX', 0, None)
self.verify_img_size()
print "Initialized bed-of-nails layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (name, dic['gpus'], dic['outputsX'], dic['outputsX'], dic['channels'])
return dic
class GaussianBlurLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['outputs'] = dic['numInputs'][0]
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['filterSize'] = mcp.safe_get_int(name, 'filterSize')
dic['stdev'] = mcp.safe_get_float(name, 'stdev')
self.verify_num_range(dic['channels'], 'channels', 1, None)
self.verify_int_in(dic['filterSize'], 'filterSize', [3, 5, 7, 9])
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['filter'] = n.array([exp(-(dic['filterSize']/2 - i)**2 / float(2 * dic['stdev']**2))
for i in xrange(dic['filterSize'])], dtype=n.float32).reshape(1, dic['filterSize'])
dic['filter'] /= dic['filter'].sum()
self.verify_img_size()
if dic['filterSize'] > dic['imgSize']:
raise LayerParsingError("Later '%s': filter size (%d) must be smaller than image size (%d)." % (dic['name'], dic['filterSize'], dic['imgSize']))
print "Initialized Gaussian blur layer '%s', producing %dx%d %d-channel output" % (name, dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class HorizontalReflectionLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = dic['numInputs'][0]
dic['channels'] = mcp.safe_get_int(name, 'channels')
self.verify_num_range(dic['channels'], 'channels', 1, 3)
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
self.verify_img_size()
print "Initialized horizontal reflection layer '%s', producing %dx%d %d-channel output" % (name, dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class ResizeLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['scale'] = mcp.safe_get_float(name, 'scale')
dic['tgtSize'] = int(floor(dic['imgSize'] / dic['scale']))
dic['tgtPixels'] = dic['tgtSize']**2
self.verify_num_range(dic['channels'], 'channels', 1, None)
# Really not recommended to use this for such severe scalings
self.verify_float_range(dic['scale'], 'scale', 0.5, 2)
dic['outputs'] = dic['channels'] * dic['tgtPixels']
self.verify_img_size()
self.verify_no_grads()
print "Initialized resize layer '%s', producing %dx%d %d-channel output" % (name, dic['tgtSize'], dic['tgtSize'], dic['channels'])
return dic
class RandomScaleLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['channels'] = mcp.safe_get_int(name, 'channels')
self.verify_num_range(dic['channels'], 'channels', 1, None)
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['maxScale'] = mcp.safe_get_float(name, 'maxScale')
dic['tgtSize'] = mcp.safe_get_int(name, 'tgtSize')
min_size = int(floor(dic['imgSize'] / dic['maxScale']))
max_size = dic['imgSize'] #int(floor(dic['imgSize'] * dic['maxScale']))
if dic['tgtSize'] < min_size:
raise LayerParsingError("Layer '%s': target size must be greater than minimum image size after rescaling (%d)" % (name, min_size))
if dic['tgtSize'] > max_size:
raise LayerParsingError("Layer '%s': target size must be smaller than maximum image size after rescaling (%d)" % (name, max_size))
dic['tgtPixels'] = dic['tgtSize']**2
self.verify_float_range(dic['maxScale'], 'maxScale', 1, 2)
dic['outputs'] = dic['channels'] * dic['tgtPixels']
self.verify_img_size()
self.verify_no_grads()
print "Initialized random scale layer '%s', producing %dx%d %d-channel output" % (name, dic['tgtSize'], dic['tgtSize'], dic['channels'])
return dic
class CropLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['channels'] = mcp.safe_get_int(name, 'channels')
self.verify_num_range(dic['channels'], 'channels', 1, None)
dic['startX'] = mcp.safe_get_int(name, 'startX')
dic['startY'] = mcp.safe_get_int(name, 'startY', default=dic['startX'])
dic['sizeX'] = mcp.safe_get_int(name, 'sizeX')
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['outputs'] = dic['channels'] * (dic['sizeX']**2)
self.verify_num_range(dic['startX'], 'startX', 0, dic['imgSize']-1)
self.verify_num_range(dic['sizeX'], 'sizeX', 1, dic['imgSize'])
self.verify_num_range(dic['startY'], 'startY', 0, dic['imgSize']-1)
self.verify_img_size()
self.verify_no_grads()
if dic['startX'] + dic['sizeX'] > dic['imgSize']:
raise LayerParsingError("Layer '%s': startX (%d) + sizeX (%d) > imgSize (%d)" % (name, dic['startX'], dic['sizeX'], dic['imgSize']))
print "Initialized cropping layer '%s', producing %dx%d %d-channel output" % (name, dic['sizeX'], dic['sizeX'], dic['channels'])
return dic
class ColorTransformLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / 3
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['channels'] = 3
dic['outputs'] = dic['numInputs'][0]
self.verify_img_size()
self.verify_no_grads()
return dic
class RGBToYUVLayerParser(ColorTransformLayerParser):
def __init__(self):
ColorTransformLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model=None):
dic = ColorTransformLayerParser.parse(self, name, mcp, prev_layers, model)
print "Initialized RGB --> YUV layer '%s', producing %dx%d %d-channel output" % (name, dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class RGBToLABLayerParser(ColorTransformLayerParser):
def __init__(self):
ColorTransformLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model=None):
dic = ColorTransformLayerParser.parse(self, name, mcp, prev_layers, model)
dic['center'] = mcp.safe_get_bool(name, 'center', default=False)
print "Initialized RGB --> LAB layer '%s', producing %dx%d %d-channel output" % (name, dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class NeuronLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
@staticmethod
def get_unused_layer_name(layers, wish):
if wish not in layers:
return wish
for i in xrange(1, 100):
name = '%s.%d' % (wish, i)
if name not in layers:
return name
raise LayerParsingError("This is insane.")
def parse_neuron(self, neuron_str):
for n in neuron_parsers:
p = n.parse(neuron_str)
if p: # Successfully parsed neuron, return it
self.dic['neuron'] = p
self.dic['usesActs'] = self.dic['neuron']['usesActs']
self.dic['usesInputs'] = self.dic['neuron']['usesInputs']
return
# Could not parse neuron
# Print available neuron types
colnames = ['Neuron type', 'Function']
m = max(len(colnames[0]), OptionsParser._longest_value(neuron_parsers, key=lambda x:x.type)) + 2
ntypes = [OptionsParser._bold(colnames[0].ljust(m))] + [n.type.ljust(m) for n in neuron_parsers]
fnames = [OptionsParser._bold(colnames[1])] + [n.func_str for n in neuron_parsers]
usage_lines = NL.join(ntype + fname for ntype,fname in zip(ntypes, fnames))
raise LayerParsingError("Layer '%s': unable to parse neuron type '%s'. Valid neuron types: %sWhere neurons have parameters, they must be floats." % (self.dic['name'], neuron_str, NL + usage_lines + NL))
def detach_neuron_layer(self, src_name, layers):
dic = self.dic
# self.set_defaults()
dic['name'] = NeuronLayerParser.get_unused_layer_name(layers, '%s_neuron' % src_name)
dic['type'] = 'neuron'
dic['inputs'] = src_name
dic['neuron'] = layers[src_name]['neuron']
dic['gpu'] = layers[src_name]['gpu']
# Yes it's not entirely correct to pass all of layers as prev_layers, but it's harmless
dic = self.parse(dic['name'], FakeConfigParser(dic), layers)
dic['src_layer'] = src_name
# Link upper layers to this new one
for l in layers.values():
if 'inputs' in l:
l['inputs'] = [inp if inp != src_name else dic['name'] for inp in l['inputs']]
l['inputLayers'] = [inp if inp['name'] != src_name else dic for inp in l['inputLayers']]
layers[dic['name']] = dic
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = dic['numInputs'][0]
self.parse_neuron(dic['neuron'])
dic['forceOwnActs'] = False
print "Initialized neuron layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class EltwiseSumLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self)
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['coeffs'] = mcp.safe_get_float_list(name, 'coeffs', default=[1.0] * len(dic['inputs']))
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
if len(set(dic['numInputs'])) != 1:
raise LayerParsingError("Layer '%s': all inputs must have the same dimensionality. Got dimensionalities: %s" % (name, ", ".join(str(s) for s in dic['numInputs'])))
dic['outputs'] = dic['numInputs'][0]
dic['usesInputs'] = False
dic['usesActs'] = False
dic['forceOwnActs'] = False
dic['requiresParams'] = True
print "Initialized elementwise sum layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class EltwiseMaxLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
if len(dic['inputs']) < 2:
raise LayerParsingError("Layer '%s': elementwise max layer must have at least 2 inputs, got %d." % (name, len(dic['inputs'])))
if len(set(dic['numInputs'])) != 1:
raise LayerParsingError("Layer '%s': all inputs must have the same dimensionality. Got dimensionalities: %s" % (name, ", ".join(str(s) for s in dic['numInputs'])))
dic['outputs'] = dic['numInputs'][0]
print "Initialized elementwise max layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class SumLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['stride'] = mcp.safe_get_int(name, 'stride', default=1)
self.verify_divisible(dic['numInputs'][0], dic['stride'], 'input dimensionality', 'stride')
dic['outputs'] = dic['numInputs'][0] / dic['stride']
print "Initialized sum layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class DropoutLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['enable'] = mcp.safe_get_bool(name, 'enable', default=True)
dic['keep'] = mcp.safe_get_float(name, 'keep', default=0.5)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
dic['usesInputs'] = False
dic['usesActs'] = False
dic['forceOwnActs'] = False
dic['outputs'] = dic['numInputs'][0]
print "Initialized %s layer '%s' on GPUs %s, producing %d outputs" % (dic['type'], name, dic['gpus'], dic['outputs'])
return dic
class Dropout2LayerParser(DropoutLayerParser):
def __init__(self):
DropoutLayerParser.__init__(self)
class WeightLayerParser(LayerWithInputParser):
LAYER_PAT = re.compile(r'^\s*([^\s\[]+)(?:\[(\d+)\])?\s*$') # matches things like layername[5], etc
def __init__(self, num_inputs=-1):
LayerWithInputParser.__init__(self, num_inputs=num_inputs)
@staticmethod
def get_layer_name(name_str):
m = WeightLayerParser.LAYER_PAT.match(name_str)
if not m:
return None
return m.group(1), m.group(2)
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['momW'] = mcp.safe_get_float_list(name, 'momW')
dic['momB'] = mcp.safe_get_float(name, 'momB')
dic['superEps'] = mcp.safe_get_float(name, 'superEps', default=0.0)
dic['superMom'] = mcp.safe_get_float(name, 'superMom', default=0.0)
dic['wc'] = mcp.safe_get_float_list(name, 'wc', default=[0.0] * len(dic['inputs']))
dic['wball'] = mcp.safe_get_float_list(name, 'wball', default=[0.0] * len(dic['inputs']))
self.verify_num_params(['momW', 'wc', 'wball'])
# dic['wballNormed'] = [wball * nweights for wball,nweights in zip(dic['wball'], dic['weightsPerFilter'])]
dic['wballNormed'] = dic['wball']
# Convert from old-style 0.001,0.02 hyperparam specification to new-stye
# const[base=0.001],const[base=0.02] and so forth
def convert_scalars_to_schedules(scalars):
parts = scalars.split(',')
for i,p in enumerate(parts):
p = p.strip()
if re.match('(?:\d*\.)?\d+$', p):
parts[i] = 'const[base=%s]' % p
return parts
dic['epsW'] = self.parse_params(convert_scalars_to_schedules(mcp.safe_get(name, 'epsW')), lrs_parsers, 'epsW', 'learning rate schedule', num_params=len(dic['inputs']))
dic['epsB'] = self.parse_params(convert_scalars_to_schedules(mcp.safe_get(name, 'epsB')), lrs_parsers, 'epsB', 'learning rate schedule', num_params=1)[0]
dic['updatePeriod'] = mcp.safe_get_int(name, 'updatePeriod', default=0) # 0 means update as often as possible
# TODO: assert that updatePeriod is a multiple of active pass period, which is unknown here.
# the assert has to go in some post-processing step..
dic['gradConsumer'] = dic['epsB']['params']['base'] > 0 or any(w['params']['base'] > 0 for w in dic['epsW'])
@staticmethod
def unshare_weights(layer, layers, matrix_idx=None):
def unshare(layer, layers, indices):
for i in indices:
if layer['weightSourceLayers'][i] >= 0:
src_matrix_idx = layer['weightSourceMatrixIndices'][i]
layer['weightSourceLayers'][i] = ""
layer['weightSourceMatrixIndices'][i] = -1
layer['weights'][i] = layer['weights'][i].copy()
layer['weightsInc'][i] = n.zeros_like(layer['weights'][i])
print "Unshared weight matrix %s[%d] from %s[%d]." % (layer['name'], i, layer['weightSourceLayers'][i], src_matrix_idx)
else:
print "Weight matrix %s[%d] already unshared." % (layer['name'], i)
if 'weightSourceLayers' in layer:
unshare(layer, layers, range(len(layer['inputs'])) if matrix_idx is None else [matrix_idx])
# Load weight/biases initialization module
def call_init_func(self, param_name, shapes, input_idx=-1):
dic = self.dic
func_pat = re.compile('^([^\.]+)\.([^\(\)]+)\s*(?:\(([^,]+(?:,[^,]+)*)\))?$')
m = func_pat.match(dic[param_name])
if not m:
raise LayerParsingError("Layer '%s': '%s' parameter must have format 'moduleName.functionName(param1,param2,...)'; got: %s." % (dic['name'], param_name, dic['initWFunc']))
module, func = m.group(1), m.group(2)
params = m.group(3).split(',') if m.group(3) is not None else []
try:
mod = __import__(module)
return getattr(mod, func)(dic['name'], input_idx, shapes, params=params) if input_idx >= 0 else getattr(mod, func)(dic['name'], shapes, params=params)
except (ImportError, AttributeError, TypeError), e:
raise LayerParsingError("Layer '%s': %s." % (dic['name'], e))
def make_weights(self, initW, rows, cols, order='C'):
dic = self.dic
dic['weights'], dic['weightsInc'] = [], []
if dic['initWFunc']: # Initialize weights from user-supplied python function
# Initialization function is supplied in the format
# module.func
for i in xrange(len(dic['inputs'])):
dic['weights'] += [self.call_init_func('initWFunc', (rows[i], cols[i]), input_idx=i)]
if type(dic['weights'][i]) != n.ndarray:
raise LayerParsingError("Layer '%s[%d]': weight initialization function %s must return numpy.ndarray object. Got: %s." % (dic['name'], i, dic['initWFunc'], type(dic['weights'][i])))
if dic['weights'][i].dtype != n.float32:
raise LayerParsingError("Layer '%s[%d]': weight initialization function %s must weight matrices consisting of single-precision floats. Got: %s." % (dic['name'], i, dic['initWFunc'], dic['weights'][i].dtype))
if dic['weights'][i].shape != (rows[i], cols[i]):
raise LayerParsingError("Layer '%s[%d]': weight matrix returned by weight initialization function %s has wrong shape. Should be: %s; got: %s." % (dic['name'], i, dic['initWFunc'], (rows[i], cols[i]), dic['weights'][i].shape))
# Convert to desired order
dic['weights'][i] = n.require(dic['weights'][i], requirements=order)
dic['weightsInc'] += [n.zeros_like(dic['weights'][i])]
print "Layer '%s[%d]' initialized weight matrices from function %s" % (dic['name'], i, dic['initWFunc'])
else:
for i in xrange(len(dic['inputs'])):
if dic['weightSourceLayers'][i] != '': # Shared weight matrix
src_layer = self.prev_layers[dic['weightSourceLayers'][i]] if dic['weightSourceLayers'][i] != dic['name'] else dic
dic['weights'] += [src_layer['weights'][dic['weightSourceMatrixIndices'][i]]]
dic['weightsInc'] += [src_layer['weightsInc'][dic['weightSourceMatrixIndices'][i]]]
if dic['weights'][i].shape != (rows[i], cols[i]):
raise LayerParsingError("Layer '%s': weight sharing source matrix '%s' has shape %dx%d; should be %dx%d."
% (dic['name'], dic['weightSource'][i], dic['weights'][i].shape[0], dic['weights'][i].shape[1], rows[i], cols[i]))
print "Layer '%s' initialized weight matrix %d from %s" % (dic['name'], i, dic['weightSource'][i])
else:
dic['weights'] += [n.array(initW[i] * nr.randn(rows[i], cols[i]), dtype=n.single, order=order)]
dic['weightsInc'] += [n.zeros_like(dic['weights'][i])]
def make_biases(self, rows, cols, order='C'):
dic = self.dic
if dic['initBFunc']:
dic['biases'] = self.call_init_func('initBFunc', (rows, cols))
if type(dic['biases']) != n.ndarray:
raise LayerParsingError("Layer '%s': bias initialization function %s must return numpy.ndarray object. Got: %s." % (dic['name'], dic['initBFunc'], type(dic['biases'])))
if dic['biases'].dtype != n.float32:
raise LayerParsingError("Layer '%s': bias initialization function %s must return numpy.ndarray object consisting of single-precision floats. Got: %s." % (dic['name'], dic['initBFunc'], dic['biases'].dtype))
if dic['biases'].shape != (rows, cols):
raise LayerParsingError("Layer '%s': bias vector returned by bias initialization function %s has wrong shape. Should be: %s; got: %s." % (dic['name'], dic['initBFunc'], (rows, cols), dic['biases'].shape))
dic['biases'] = n.require(dic['biases'], requirements=order)
print "Layer '%s' initialized bias vector from function %s" % (dic['name'], dic['initBFunc'])
else:
dic['biases'] = dic['initB'] * n.ones((rows, cols), order=order, dtype=n.single)
dic['biasesInc'] = n.zeros_like(dic['biases'])
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
dic['gradConsumer'] = True
dic['usesActs'] = False
dic['initW'] = mcp.safe_get_float_list(name, 'initW', default=0.01)
dic['initB'] = mcp.safe_get_float(name, 'initB', default=0)
dic['initWFunc'] = mcp.safe_get(name, 'initWFunc', default="")
dic['initBFunc'] = mcp.safe_get(name, 'initBFunc', default="")
# Find shared weight matrices
dic['weightSource'] = mcp.safe_get_list(name, 'weightSource', default=[''] * len(dic['inputs']))
self.verify_num_params(['initW'])
self.verify_num_params(['weightSource'], auto_expand=False)
dic['weightSourceLayers'] = []
dic['weightSourceMatrixIndices'] = []
for i, src_name in enumerate(dic['weightSource']):
src_layer_matrix_idx = -1
src_layer_name = ''
if src_name != '':
src_layer_match = WeightLayerParser.get_layer_name(src_name)
if src_layer_match is None:
raise LayerParsingError("Layer '%s': unable to parse weight sharing source '%s'. Format is layer[idx] or just layer, in which case idx=0 is used." % (name, src_name))
src_layer_name = src_layer_match[0]
src_layer_matrix_idx = int(src_layer_match[1]) if src_layer_match[1] is not None else 0
if src_layer_name not in prev_layers and src_layer_name != name:
raise LayerParsingError("Layer '%s': weight sharing source layer '%s' does not exist." % (name, src_layer_name))
# src_layer_idx = prev_names.index(src_layer_name) if src_layer_name != name else len(prev_names)
src_layer = prev_layers[src_layer_name] if src_layer_name != name else dic
if src_layer['gpu'] != dic['gpu']:
raise LayerParsingError("Layer '%s': weight sharing source layer '%s' runs on GPUs %s, while '%s' runs on GPUs %s." % (name, src_layer_name, src_layer['gpu'], name, dic['gpu']))
if src_layer['type'] != dic['type']:
raise LayerParsingError("Layer '%s': weight sharing source layer '%s' is of type '%s'; should be '%s'." % (name, src_layer_name, src_layer['type'], dic['type']))
if src_layer_name != name and len(src_layer['weights']) <= src_layer_matrix_idx:
raise LayerParsingError("Layer '%s': weight sharing source layer '%s' has %d weight matrices, but '%s[%d]' requested." % (name, src_layer_name, len(src_layer['weights']), src_name, src_layer_matrix_idx))
if src_layer_name == name and src_layer_matrix_idx >= i:
raise LayerParsingError("Layer '%s': weight sharing source '%s[%d]' not defined yet." % (name, name, src_layer_matrix_idx))
dic['weightSourceLayers'] += [src_layer_name]
dic['weightSourceMatrixIndices'] += [src_layer_matrix_idx]
return dic
class FCLayerParser(WeightLayerParser):
def __init__(self):
WeightLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = WeightLayerParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = mcp.safe_get_int(name, 'outputs')
dic['weightsPerFilter'] = dic['numInputs']
self.verify_num_range(dic['outputs'], 'outputs', 1, None)
self.make_weights(dic['initW'], dic['numInputs'], [dic['outputs']] * len(dic['numInputs']), order='F')
self.make_biases(1, dic['outputs'], order='F')
print "Initialized fully-connected layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class SplitFCLayerParser(WeightLayerParser):
def __init__(self):
WeightLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = WeightLayerParser.parse(self, name, mcp, prev_layers, model)
dic['parts'] = mcp.safe_get_int(name, 'parts')
dic['outputs'] = mcp.safe_get_int(name, 'outputs') * dic['parts']
dic['weightsPerFilter'] = dic['numInputs']
self.verify_num_range(dic['parts'], 'parts', 1, None)
self.make_weights(dic['initW'], dic['numInputs'], [dic['outputs']/dic['parts']] * len(dic['numInputs']), order='F')
self.make_biases(1, dic['outputs'], order='F')
for i in xrange(len(dic['numInputs'])):
self.verify_divisible(dic['numInputs'][i], dic['parts'], 'numInputs', 'parts', input_idx=i)
print "Initialized split fully-connected layer '%s' on GPUs %s, producing %d outputs in %d parts" % (name, dic['gpus'], dic['outputs'], dic['parts'])
return dic
class LocalLayerParser(WeightLayerParser):
def __init__(self):
WeightLayerParser.__init__(self)
# Convert convolutional layer to unshared, locally-connected layer
@staticmethod
def conv_to_local(layers, lname):
layer = layers[lname]
if layer['type'] == 'conv':
layer['type'] = 'local'
for inp,inpname in enumerate(layer['inputs']):
src_layer_name = layer['weightSourceLayers'][inp]
if src_layer_name != '':
src_layer = layers[src_layer_name]
src_matrix_idx = layer['weightSourceMatrixIndices'][inp]
LocalLayerParser.conv_to_local(layers, src_layer_name)
for w in ('weights', 'weightsInc'):
layer[w][inp] = src_layer[w][src_matrix_idx]
else:
layer['weights'][inp] = n.require(n.reshape(n.tile(n.reshape(layer['weights'][inp], (1, n.prod(layer['weights'][inp].shape))), (layer['modules'], 1)),
(layer['modules'] * layer['filterChannels'][inp] * layer['filterPixels'][inp], layer['filters'])),
requirements='C')
layer['weightsInc'][inp] = n.zeros_like(layer['weights'][inp])
if layer['sharedBiases']:
layer['biases'] = n.require(n.repeat(layer['biases'], layer['modules'], axis=0), requirements='C')
layer['biasesInc'] = n.zeros_like(layer['biases'])
print "Converted layer '%s' from convolutional to unshared, locally-connected" % layer['name']
# Also call this function on any layers sharing my weights
for l in layers:
if 'weightSourceLayers' in l and lname in l['weightSourceLayers']:
LocalLayerParser.conv_to_local(layers, l)
return layer
def parse(self, name, mcp, prev_layers, model):
dic = WeightLayerParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
dic['usesActs'] = False
# Supplied values
dic['channels'] = mcp.safe_get_int_list(name, 'channels')
dic['padding'] = mcp.safe_get_int_list(name, 'padding', default=[0]*len(dic['inputs']))
dic['stride'] = mcp.safe_get_int_list(name, 'stride', default=[1]*len(dic['inputs']))
dic['filterSize'] = mcp.safe_get_int_list(name, 'filterSize')
dic['filters'] = mcp.safe_get_int_list(name, 'filters')
dic['groups'] = mcp.safe_get_int_list(name, 'groups', default=[1]*len(dic['inputs']))
dic['initW'] = mcp.safe_get_float_list(name, 'initW')
dic['initCFunc'] = mcp.safe_get(name, 'initCFunc', default='')
dic['modulesX'] = mcp.safe_get_int(name, 'modulesX', default=0)
self.verify_num_params(['channels', 'padding', 'stride', 'filterSize', \
'filters', 'groups', 'initW'])
self.verify_num_range(dic['stride'], 'stride', 1, None)
self.verify_num_range(dic['filterSize'],'filterSize', 1, None)
self.verify_num_range(dic['padding'], 'padding', 0, None)
self.verify_num_range(dic['channels'], 'channels', 1, None)
self.verify_num_range(dic['groups'], 'groups', 1, None)
self.verify_num_range(dic['modulesX'], 'modulesX', 0, None)
for i in xrange(len(dic['filters'])):
self.verify_divisible(dic['filters'][i], 16, 'filters', input_idx=i)
# Computed values
dic['imgPixels'] = [numInputs/channels for numInputs,channels in zip(dic['numInputs'], dic['channels'])]
dic['imgSize'] = [int(n.sqrt(imgPixels)) for imgPixels in dic['imgPixels']]
self.verify_num_range(dic['imgSize'], 'imgSize', 1, None)
dic['filters'] = [filters*groups for filters,groups in zip(dic['filters'], dic['groups'])]
dic['filterPixels'] = [filterSize**2 for filterSize in dic['filterSize']]
if dic['modulesX'] <= 0:
dic['modulesX'] = [1 + int(ceil((2*padding + imgSize - filterSize) / float(stride))) for padding,imgSize,filterSize,stride in zip(dic['padding'], dic['imgSize'], dic['filterSize'], dic['stride'])]
else:
dic['modulesX'] = [dic['modulesX']] * len(dic['inputs'])
dic['filterChannels'] = [channels/groups for channels,groups in zip(dic['channels'], dic['groups'])]
if len(set(dic['modulesX'])) != 1 or len(set(dic['filters'])) != 1:
raise LayerParsingError("Layer '%s': all inputs must produce equally-dimensioned output. Dimensions are: %s." % (name, ", ".join("%dx%dx%d" % (filters, modulesX, modulesX) for filters,modulesX in zip(dic['filters'], dic['modulesX']))))
dic['modulesX'] = dic['modulesX'][0]
dic['modules'] = dic['modulesX']**2
dic['filters'] = dic['filters'][0]
dic['outputs'] = dic['modules'] * dic['filters']
# dic['filterConns'] = [[]] * len(dic['inputs'])
for i in xrange(len(dic['inputs'])):
if dic['numInputs'][i] % dic['imgPixels'][i] != 0 or dic['imgSize'][i] * dic['imgSize'][i] != dic['imgPixels'][i]:
raise LayerParsingError("Layer '%s[%d]': has %-d dimensional input, not interpretable as square %d-channel images" % (name, i, dic['numInputs'][i], dic['channels'][i]))
if dic['channels'][i] > 3 and dic['channels'][i] % 4 != 0:
raise LayerParsingError("Layer '%s[%d]': number of channels must be smaller than 4 or divisible by 4" % (name, i))
# if dic['filterSize'][i] > totalPadding[i] + dic['imgSize'][i]:
# raise LayerParsingError("Layer '%s[%d]': filter size (%d) greater than image size + padding (%d)" % (name, i, dic['filterSize'][i], dic['padding'][i] + dic['imgSize'][i]))
if -dic['padding'][i] + dic['stride'][i] * (dic['modulesX'] - 1) + dic['filterSize'][i] < dic['imgSize'][i]:
raise LayerParsingError("Layer '%s[%d]': %dx%d output map with padding=%d, stride=%d does not cover entire input image." % (name, i, dic['modulesX'], dic['outputsX'], dic['padding'][i], dic['stride'][i]))
if dic['groups'][i] > 1:
self.verify_divisible(dic['channels'][i], 4*dic['groups'][i], 'channels', '4 * groups', input_idx=i)
self.verify_divisible(dic['channels'][i], dic['groups'][i], 'channels', 'groups', input_idx=i)
self.verify_divisible(dic['filters'], 16*dic['groups'][i], 'filters * groups', input_idx=i)
dic['padding'][i] = -dic['padding'][i]
# dic['overSample'] = [groups*filterChannels/channels for groups,filterChannels,channels in zip(dic['groups'], dic['filterChannels'], dic['channels'])]
dic['weightsPerFilter'] = [fc * (fz**2) for fc, fz in zip(dic['filterChannels'], dic['filterSize'])]
return dic
class ConvLayerParser(LocalLayerParser):
def __init__(self):
LocalLayerParser.__init__(self)
def add_params(self, mcp):
LocalLayerParser.add_params(self, mcp)
self.dic['wcNormMax'] = mcp.safe_get_float_list(self.dic['name'], 'wcNormMax', default=[0.0] * len(self.dic['inputs']))
self.dic['wcNormMin'] = mcp.safe_get_float_list(self.dic['name'], 'wcNormMin', default=[0.0] * len(self.dic['inputs']))
self.verify_num_params(['wcNormMax', 'wcNormMin'])
for min,max in zip(self.dic['wcNormMin'], self.dic['wcNormMax']):
if min > max:
raise LayerParsingError("Layer '%s': wcNormMin must be <= wcNormMax." % (self.dic['name']))
def parse(self, name, mcp, prev_layers, model):
dic = LocalLayerParser.parse(self, name, mcp, prev_layers, model)
dic['sumWidth'] = mcp.safe_get_int(name, 'sumWidth')
dic['sharedBiases'] = mcp.safe_get_bool(name, 'sharedBiases', default=True)
num_biases = dic['filters'] if dic['sharedBiases'] else dic['modules']*dic['filters']
eltmult = lambda list1, list2: [l1 * l2 for l1,l2 in zip(list1, list2)]
self.make_weights(dic['initW'], eltmult(dic['filterPixels'], dic['filterChannels']), [dic['filters']] * len(dic['inputs']), order='C')
self.make_biases(num_biases, 1, order='C')
print "Initialized convolutional layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (name, dic['gpus'], dic['modulesX'], dic['modulesX'], dic['filters'])
return dic
class LocalUnsharedLayerParser(LocalLayerParser):
def __init__(self):
LocalLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = LocalLayerParser.parse(self, name, mcp, prev_layers, model)
eltmult = lambda list1, list2: [l1 * l2 for l1,l2 in zip(list1, list2)]
scmult = lambda x, lst: [x * l for l in lst]
self.make_weights(dic['initW'], scmult(dic['modules'], eltmult(dic['filterPixels'], dic['filterChannels'])), [dic['filters']] * len(dic['inputs']), order='C')
self.make_biases(dic['modules'] * dic['filters'], 1, order='C')
print "Initialized locally-connected layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (name, dic['gpus'], dic['modulesX'], dic['modulesX'], dic['filters'])
return dic
class DataLayerParser(LayerParser):
def __init__(self):
LayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = LayerParser.parse(self, name, mcp, prev_layers, model)
dic['dataIdx'] = mcp.safe_get_int(name, 'dataIdx')
dic['start'] = mcp.safe_get_int(name, 'start', default=0)
dic['end'] = mcp.safe_get_int(name, 'end', default=model.train_data_provider.get_data_dims(idx=dic['dataIdx']))
dic['outputs'] = dic['end'] - dic['start']
# dic['usesActs'] = False
print "Initialized data layer '%s', producing %d outputs" % (name, dic['outputs'])
return dic
class SoftmaxLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = dic['inputLayers'][0]['outputs']
print "Initialized softmax layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class ConcatentionLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = sum(l['outputs'] for l in dic['inputLayers'])
dic['copyOffsets'] = [sum(dic['inputLayers'][j]['outputs'] for j in xrange(i)) for i in xrange(len(dic['inputLayers']))]
print "Initialized concatenation layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class PassThroughLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self)
# Note: this doesn't verify all the necessary constraints. Layer construction may still fail in C++ code.
# For example, it does not verify that every layer only has one pass-through parent. Obviously having
# two such parents is incoherent.
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
# if len(dic['inputLayers']) == 1:
# raise LayerParsingError("Layer %s: pass-through layer must have more than one input." % dic['name'])
if len(dic['gpu']) != len(dic['inputLayers'][0]['gpu']):
raise LayerParsingError("Layer '%s': number of replicas in pass-through layer must be equivalent to number of replicas in input layers." % dic['name'])
for inp in dic['inputLayers']:
conflicting_layers = [l for l in prev_layers.values() if l['type'] == 'pass' and inp['name'] in l['inputs'] and len(set(dic['gpu']).intersection(set(l['gpu']))) > 0]
if len(conflicting_layers) > 0:
raise LayerParsingError("Layer '%s' conflicts with layer '%s'. Both pass-through layers take layer '%s' as input and operate on an overlapping set of GPUs." % (dic['name'], conflicting_layers[0]['name'], inp['name']))
dic['outputs'] = sum(l['outputs'] for l in dic['inputLayers'])
# dic['copyOffsets'] = [sum(dic['inputLayers'][j]['outputs'] for j in xrange(i)) for i in xrange(len(dic['inputLayers']))]
print "Initialized pass-through layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class PoolLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['sizeX'] = mcp.safe_get_int(name, 'sizeX')
dic['start'] = mcp.safe_get_int(name, 'start', default=0)
dic['stride'] = mcp.safe_get_int(name, 'stride')
dic['outputsX'] = mcp.safe_get_int(name, 'outputsX', default=0)
dic['pool'] = mcp.safe_get(name, 'pool')
# Avg pooler does not use its acts or inputs
dic['usesActs'] = dic['pool'] != 'avg'
dic['usesInputs'] = dic['pool'] != 'avg'
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
if dic['pool'] == 'avg':
dic['sum'] = mcp.safe_get_bool(name, 'sum', default=False)
self.verify_num_range(dic['sizeX'], 'sizeX', 1, dic['imgSize'])
self.verify_num_range(dic['stride'], 'stride', 1, dic['sizeX'])
self.verify_num_range(dic['outputsX'], 'outputsX', 0, None)
self.verify_num_range(dic['channels'], 'channels', 1, None)
if LayerWithInputParser.grad_consumers_below(dic):
self.verify_divisible(dic['channels'], 16, 'channels')
self.verify_str_in(dic['pool'], 'pool', ['max', 'maxabs', 'avg'])
self.verify_img_size()
if dic['outputsX'] <= 0:
dic['outputsX'] = int(ceil((dic['imgSize'] - dic['start'] - dic['sizeX']) / float(dic['stride']))) + 1;
dic['outputs'] = dic['outputsX']**2 * dic['channels']
print "Initialized %s-pooling layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (dic['pool'], name, dic['gpus'], dic['outputsX'], dic['outputsX'], dic['channels'])
return dic
class CrossMapPoolLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['size'] = mcp.safe_get_int(name, 'size')
dic['start'] = mcp.safe_get_int(name, 'start', default=0)
dic['stride'] = mcp.safe_get_int(name, 'stride')
dic['outputChannels'] = mcp.safe_get_int(name, 'outputs', default=0)
dic['pool'] = mcp.safe_get(name, 'pool')
dic['requiresParams'] = False
# Avg pooler does not use its acts or inputs
dic['usesActs'] = 'pool' != 'avg'
dic['usesInputs'] = 'pool' != 'avg'
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['outputs'] = dic['outputChannels'] * dic['imgPixels']
self.verify_num_range(dic['size'], 'size', 1, dic['channels'])
self.verify_num_range(dic['stride'], 'stride', 1, dic['size'])
self.verify_num_range(dic['outputChannels'], 'outputChannels', 0, None)
self.verify_num_range(dic['channels'], 'channels', 1, None)
self.verify_num_range(dic['start'], 'start', None, 0)
self.verify_str_in(dic['pool'], 'pool', ['max'])
self.verify_img_size()
covered_chans = dic['start'] + (dic['outputChannels'] - 1) * dic['stride'] + dic['size']
if covered_chans < dic['channels']:
raise LayerParsingError("Layer '%s': cross-map pooling with start=%d, stride=%d, size=%d, outputs=%d covers only %d of %d input channels." % \
(name, dic['start'], dic['stride'], dic['size'], dic['outputChannels'], covered_chans, dic['channels']))
print "Initialized cross-map %s-pooling layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (dic['pool'], name, dic['gpus'], dic['imgSize'], dic['imgSize'], dic['outputChannels'])
return dic
class NormLayerParser(LayerWithInputParser):
RESPONSE_NORM = 'response'
CONTRAST_NORM = 'contrast'
CROSSMAP_RESPONSE_NORM = 'cross-map response'
def __init__(self, norm_type):
LayerWithInputParser.__init__(self, num_inputs=1)
self.norm_type = norm_type
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['scale'] = mcp.safe_get_float(name, 'scale')
dic['scale'] /= dic['size'] if self.norm_type == self.CROSSMAP_RESPONSE_NORM else dic['size']**2
dic['pow'] = mcp.safe_get_float(name, 'pow')
dic['minDiv'] = mcp.safe_get_float(name, 'minDiv', default=1.0)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['size'] = mcp.safe_get_int(name, 'size')
dic['blocked'] = mcp.safe_get_bool(name, 'blocked', default=False)
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
# Contrast normalization layer does not use its inputs
dic['usesInputs'] = self.norm_type != self.CONTRAST_NORM
self.verify_num_range(dic['channels'], 'channels', 1, None)
if self.norm_type == self.CROSSMAP_RESPONSE_NORM:
self.verify_num_range(dic['size'], 'size', 2, dic['channels'])
if dic['channels'] % 16 != 0:
raise LayerParsingError("Layer '%s': number of channels must be divisible by 16 when using crossMap" % name)
else:
self.verify_num_range(dic['size'], 'size', 1, dic['imgSize'])
if self.norm_type != self.CROSSMAP_RESPONSE_NORM and dic['channels'] > 3 and dic['channels'] % 4 != 0:
raise LayerParsingError("Layer '%s': number of channels must be smaller than 4 or divisible by 4" % name)
self.verify_img_size()
dic['outputs'] = dic['imgPixels'] * dic['channels']
print "Initialized %s-normalization layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (self.norm_type, name, dic['gpus'], dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class CostParser(LayerWithInputParser):
def __init__(self, num_inputs=-1):
LayerWithInputParser.__init__(self, num_inputs=num_inputs)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
# Stored as string because python can't pickle lambda functions
dic['outputFilter'] = 'lambda costs,num_cases: [c/num_cases for c in costs]'
dic['children'] = mcp.safe_get_list(name, 'children', default=[])
# Aggregated costs only produce outputs which are additive.
for c in dic['children']:
if c not in prev_layers:
raise LayerParsingError("Layer '%s': child cost layer '%s' not defined" % (name, c))
if prev_layers[c]['type'] != dic['type']:
raise LayerParsingError("Layer '%s': child cost layer '%s' must have same type as parent" % (name, c))
prev_layers[c]['aggregated'] = 1
dic['aggregated'] = dic['children'] != []
del dic['neuron']
return dic
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['coeff'] = mcp.safe_get_float(name, 'coeff')
dic['gradConsumer'] = dic['coeff'] > 0
class CrossEntCostParser(CostParser):
def __init__(self):
CostParser.__init__(self, num_inputs=2)
def parse(self, name, mcp, prev_layers, model):
dic = CostParser.parse(self, name, mcp, prev_layers, model)
if dic['numInputs'][0] != model.train_data_provider.get_num_classes(): # first input must be labels
raise LayerParsingError("Layer '%s': Dimensionality of first input must be equal to number of labels" % name)
if dic['inputLayers'][1]['type'] != 'softmax':
raise LayerParsingError("Layer '%s': Second input must be softmax layer" % name)
if dic['numInputs'][1] != model.train_data_provider.get_num_classes():
raise LayerParsingError("Layer '%s': Softmax input '%s' must produce %d outputs, because that is the number of classes in the dataset" \
% (name, dic['inputs'][1], model.train_data_provider.get_num_classes()))
print "Initialized cross-entropy cost '%s' on GPUs %s" % (name, dic['gpus'])
return dic
class LogregCostParser(CostParser):
def __init__(self):
CostParser.__init__(self, num_inputs=2)
def add_params(self, mcp):
CostParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['topk'] = mcp.safe_get_int(name, 'topk', default=1)
if dic['topk'] > dic['numInputs'][1]:
raise LayerParsingError("Layer '%s': parameter 'topk'must not have value greater than the number of classess." % (name))
def parse(self, name, mcp, prev_layers, model):
dic = CostParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
if dic['numInputs'][0] != 1: # first input must be labels
raise LayerParsingError("Layer '%s': dimensionality of first input must be 1" % name)
if dic['inputLayers'][1]['type'] != 'softmax':
raise LayerParsingError("Layer '%s': second input must be softmax layer" % name)
if dic['numInputs'][1] != model.train_data_provider.get_num_classes():
raise LayerParsingError("Layer '%s': softmax input '%s' must produce %d outputs, because that is the number of classes in the dataset" \
% (name, dic['inputs'][1], model.train_data_provider.get_num_classes()))
print "Initialized logistic regression cost '%s' on GPUs %s" % (name, dic['gpus'])
return dic
class BinomialCrossEntCostParser(CostParser):
def __init__(self):
CostParser.__init__(self, num_inputs=2)
def add_params(self, mcp):
CostParser.add_params(self, mcp)
self.dic['posWeight'] = mcp.safe_get_float(self.dic['name'], 'posWeight', default=1.0)
def parse(self, name, mcp, prev_layers, model):
dic = CostParser.parse(self, name, mcp, prev_layers, model)
if dic['numInputs'][0] != dic['numInputs'][1]:
raise LayerParsingError("Layer '%s': both inputs must produce the same number of outputs" % (name))
if 'neuron' not in dic['inputLayers'][1] or dic['inputLayers'][1]['neuron'] != 'logistic':
print "WARNING: Layer '%s': input '%s' is not logistic, results may not be what you intend." % (dic['name'], dic['inputs'][1])
if dic['type'] == 'cost.bce':
print "Initialized binomial cross-entropy cost '%s' on GPUs %s" % (name, dic['gpus'])
dic['computeSoftmaxErrorRate'] = True
return dic
class DetectionCrossEntCostParser(BinomialCrossEntCostParser):
def __init__(self):
BinomialCrossEntCostParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = BinomialCrossEntCostParser.parse(self, name, mcp, prev_layers, model)
if dic['numInputs'][0] != model.train_data_provider.get_num_classes(): # first input must be labels
raise LayerParsingError("Layer '%s': Dimensionality of first input must be equal to number of labels" % name)
dic['computeSoftmaxErrorRate'] = False
dic['outputFilter'] = 'lambda costs,num_cases: [c/num_cases for c in costs[:2]] + [(class_cost[2] / class_cost[j] if class_cost[j] > 0 else n.inf) for class_cost in [costs[2:][i*3:(i+1)*3] for i in range(len(costs[2:])/3)] for j in range(2)]'
dic['outputFilterFormatter'] = 'lambda self,costs: "(crossent) %.6f, (err) %.6f, " % (costs[0], costs[1]) + ", ".join("(%s) %.6f, %.6f" % (self.train_data_provider.batch_meta["label_names"][i/2-1],costs[i],costs[i+1]) for i in xrange(2, len(costs), 2))'
print "Initialized detection cross-entropy cost '%s' on GPUs %s" % (name, dic['gpus'])
return dic
class SumOfSquaresCostParser(CostParser):
def __init__(self):
CostParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model):
dic = CostParser.parse(self, name, mcp, prev_layers, model)
print "Initialized sum-of-squares cost '%s' on GPUs %s" % (name, dic['gpus'])
return dic
# All the layer parsers
layer_parsers = {'data' : lambda : DataLayerParser(),
'fc': lambda : FCLayerParser(),
'sfc': lambda : SplitFCLayerParser(),
'conv': lambda : ConvLayerParser(),
'local': lambda : LocalUnsharedLayerParser(),
'softmax': lambda : SoftmaxLayerParser(),
'eltsum': lambda : EltwiseSumLayerParser(),
'eltmax': lambda : EltwiseMaxLayerParser(),
'sum': lambda : SumLayerParser(),
'neuron': lambda : NeuronLayerParser(),
'pool': lambda : PoolLayerParser(),
'cmpool': lambda : CrossMapPoolLayerParser(),
'rnorm': lambda : NormLayerParser(NormLayerParser.RESPONSE_NORM),
'cnorm': lambda : NormLayerParser(NormLayerParser.CONTRAST_NORM),
'cmrnorm': lambda : NormLayerParser(NormLayerParser.CROSSMAP_RESPONSE_NORM),
'nailbed': lambda : NailbedLayerParser(),
'blur': lambda : GaussianBlurLayerParser(),
'href': lambda : HorizontalReflectionLayerParser(),
'resize': lambda : ResizeLayerParser(),
'rgb2yuv': lambda : RGBToYUVLayerParser(),
'rgb2lab': lambda : RGBToLABLayerParser(),
'rscale': lambda : RandomScaleLayerParser(),
'crop': lambda : CropLayerParser(),
'concat': lambda : ConcatentionLayerParser(),
'pass': lambda : PassThroughLayerParser(),
'dropout': lambda : DropoutLayerParser(),
'dropout2': lambda : Dropout2LayerParser(),
'cost.logreg': lambda : LogregCostParser(),
'cost.crossent': lambda : CrossEntCostParser(),
'cost.bce': lambda : BinomialCrossEntCostParser(),
'cost.dce': lambda : DetectionCrossEntCostParser(),
'cost.sum2': lambda : SumOfSquaresCostParser()}
# All the neuron parsers
# This isn't a name --> parser mapping as the layer parsers above because neurons don't have fixed names.
# A user may write tanh[0.5,0.25], etc.
neuron_parsers = sorted([NeuronParser('ident', 'f(x) = x', uses_acts=False, uses_inputs=False),
NeuronParser('logistic', 'f(x) = 1 / (1 + e^-x)', uses_acts=True, uses_inputs=False),
NeuronParser('abs', 'f(x) = |x|', uses_acts=False, uses_inputs=True),
NeuronParser('relu', 'f(x) = max(0, x)', uses_acts=True, uses_inputs=False),
NeuronParser('nrelu', 'f(x) = max(0, x) + noise', uses_acts=True, uses_inputs=False),
NeuronParser('softrelu', 'f(x) = log(1 + e^x)', uses_acts=True, uses_inputs=False),
NeuronParser('square', 'f(x) = x^2', uses_acts=False, uses_inputs=True),
NeuronParser('sqrt', 'f(x) = sqrt(x)', uses_acts=True, uses_inputs=False),
ParamNeuronParser('log[a]', 'f(x) = log(a + x)', uses_acts=False, uses_inputs=True),
ParamNeuronParser('tanh[a,b]', 'f(x) = a * tanh(b * x)', uses_acts=True, uses_inputs=False),
ParamNeuronParser('brelu[a]', 'f(x) = min(a, max(0, x))', uses_acts=True, uses_inputs=False),
ParamNeuronParser('linear[a,b]', 'f(x) = a * x + b', uses_acts=True, uses_inputs=False),
ParamNeuronParser('drelu[a]', 'f(x) = x - a * tanh(x / a)', uses_acts=False, uses_inputs=True)],
key=lambda x:x.type)
# Learning rate schedules
lrs_parsers = sorted([ParamParser('const[fbase]'),
ParamParser('linear[fbase;ftgtFactor]'),
ParamParser('exp[fbase;ftgtFactor]'),
ParamParser('dexp[fbase;ftgtFactor;inumSteps]')])
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from python_util.data import *
import numpy.random as nr
import numpy as n
import random as r
from time import time
from threading import Thread
from math import sqrt
import sys
#from matplotlib import pylab as pl
from PIL import Image
from StringIO import StringIO
from time import time
import itertools as it
class JPEGBatchLoaderThread(Thread):
def __init__(self, dp, batch_num, label_offset, list_out):
Thread.__init__(self)
self.list_out = list_out
self.label_offset = label_offset
self.dp = dp
self.batch_num = batch_num
@staticmethod
def load_jpeg_batch(rawdics, dp, label_offset):
if type(rawdics) != list:
rawdics = [rawdics]
nc_total = sum(len(r['data']) for r in rawdics)
jpeg_strs = list(it.chain.from_iterable(rd['data'] for rd in rawdics))
labels = list(it.chain.from_iterable(rd['labels'] for rd in rawdics))
img_mat = n.empty((nc_total * dp.data_mult, dp.inner_pixels * dp.num_colors), dtype=n.float32)
lab_mat = n.zeros((nc_total, dp.get_num_classes()), dtype=n.float32)
dp.convnet.libmodel.decodeJpeg(jpeg_strs, img_mat, dp.img_size, dp.inner_size, dp.test, dp.multiview)
lab_vec = n.tile(n.asarray([(l[nr.randint(len(l))] if len(l) > 0 else -1) + label_offset for l in labels], dtype=n.single).reshape((nc_total, 1)), (dp.data_mult,1))
for c in xrange(nc_total):
lab_mat[c, [z + label_offset for z in labels[c]]] = 1
lab_mat = n.tile(lab_mat, (dp.data_mult, 1))
return {'data': img_mat[:nc_total * dp.data_mult,:],
'labvec': lab_vec[:nc_total * dp.data_mult,:],
'labmat': lab_mat[:nc_total * dp.data_mult,:]}
def run(self):
rawdics = self.dp.get_batch(self.batch_num)
p = JPEGBatchLoaderThread.load_jpeg_batch(rawdics,
self.dp,
self.label_offset)
self.list_out.append(p)
class ColorNoiseMakerThread(Thread):
def __init__(self, pca_stdevs, pca_vecs, num_noise, list_out):
Thread.__init__(self)
self.pca_stdevs, self.pca_vecs = pca_stdevs, pca_vecs
self.num_noise = num_noise
self.list_out = list_out
def run(self):
noise = n.dot(nr.randn(self.num_noise, 3).astype(n.single) * self.pca_stdevs.T, self.pca_vecs.T)
self.list_out.append(noise)
class ImageDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.data_mean = self.batch_meta['data_mean'].astype(n.single)
self.color_eig = self.batch_meta['color_pca'][1].astype(n.single)
self.color_stdevs = n.c_[self.batch_meta['color_pca'][0].astype(n.single)]
self.color_noise_coeff = dp_params['color_noise']
self.num_colors = 3
self.img_size = int(sqrt(self.batch_meta['num_vis'] / self.num_colors))
self.mini = dp_params['minibatch_size']
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.img_size
self.inner_pixels = self.inner_size **2
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 5*2
self.data_mult = self.num_views if self.multiview else 1
self.batch_size = self.batch_meta['batch_size']
self.label_offset = 0 if 'label_offset' not in self.batch_meta else self.batch_meta['label_offset']
self.scalar_mean = dp_params['scalar_mean']
# Maintain pointers to previously-returned data matrices so they don't get garbage collected.
self.data = [None, None] # These are pointers to previously-returned data matrices
self.loader_thread, self.color_noise_thread = None, None
self.convnet = dp_params['convnet']
self.num_noise = self.batch_size
self.batches_generated, self.loaders_started = 0, 0
self.data_mean_crop = self.data_mean.reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((1,3*self.inner_size**2))
if self.scalar_mean >= 0:
self.data_mean_crop = self.scalar_mean
def showimg(self, img):
from matplotlib import pylab as pl
pixels = img.shape[0] / 3
size = int(sqrt(pixels))
img = img.reshape((3,size,size)).swapaxes(0,2).swapaxes(0,1)
pl.imshow(img, interpolation='nearest')
pl.show()
def get_data_dims(self, idx=0):
if idx == 0:
return self.inner_size**2 * 3
if idx == 2:
return self.get_num_classes()
return 1
def start_loader(self, batch_idx):
self.load_data = []
self.loader_thread = JPEGBatchLoaderThread(self,
self.batch_range[batch_idx],
self.label_offset,
self.load_data)
self.loader_thread.start()
def start_color_noise_maker(self):
color_noise_list = []
self.color_noise_thread = ColorNoiseMakerThread(self.color_stdevs, self.color_eig, self.num_noise, color_noise_list)
self.color_noise_thread.start()
return color_noise_list
def set_labels(self, datadic):
pass
def get_data_from_loader(self):
if self.loader_thread is None:
self.start_loader(self.batch_idx)
self.loader_thread.join()
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
else:
# Set the argument to join to 0 to re-enable batch reuse
self.loader_thread.join()
if not self.loader_thread.is_alive():
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
#else:
# print "Re-using batch"
self.advance_batch()
def add_color_noise(self):
# At this point the data already has 0 mean.
# So I'm going to add noise to it, but I'm also going to scale down
# the original data. This is so that the overall scale of the training
# data doesn't become too different from the test data.
s = self.data[self.d_idx]['data'].shape
cropped_size = self.get_data_dims(0) / 3
ncases = s[0]
if self.color_noise_thread is None:
self.color_noise_list = self.start_color_noise_maker()
self.color_noise_thread.join()
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
else:
self.color_noise_thread.join(0)
if not self.color_noise_thread.is_alive():
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases*3, cropped_size))
self.color_noise = self.color_noise[:ncases,:].reshape((3*ncases, 1))
self.data[self.d_idx]['data'] += self.color_noise * self.color_noise_coeff
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases, 3* cropped_size))
self.data[self.d_idx]['data'] *= 1.0 / (1.0 + self.color_noise_coeff) # <--- NOTE: This is the slow line, 0.25sec. Down from 0.75sec when I used division.
def get_next_batch(self):
self.d_idx = self.batches_generated % 2
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.get_data_from_loader()
# Subtract mean
self.data[self.d_idx]['data'] -= self.data_mean_crop
if self.color_noise_coeff > 0 and not self.test:
self.add_color_noise()
self.batches_generated += 1
return epoch, batchnum, [self.data[self.d_idx]['data'].T, self.data[self.d_idx]['labvec'].T, self.data[self.d_idx]['labmat'].T]
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data, add_mean=True):
mean = self.data_mean_crop.reshape((data.shape[0],1)) if data.flags.f_contiguous or self.scalar_mean else self.data_mean_crop.reshape((data.shape[0],1))
return n.require((data + (mean if add_mean else 0)).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
class CIFARDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.img_size = 32
self.num_colors = 3
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.batch_meta['img_size']
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 9
self.scalar_mean = dp_params['scalar_mean']
self.data_mult = self.num_views if self.multiview else 1
self.data_dic = []
for i in batch_range:
self.data_dic += [unpickle(self.get_data_file_name(i))]
self.data_dic[-1]["labels"] = n.require(self.data_dic[-1]['labels'], dtype=n.single)
self.data_dic[-1]["labels"] = n.require(n.tile(self.data_dic[-1]["labels"].reshape((1, n.prod(self.data_dic[-1]["labels"].shape))), (1, self.data_mult)), requirements='C')
self.data_dic[-1]['data'] = n.require(self.data_dic[-1]['data'] - self.scalar_mean, dtype=n.single, requirements='C')
self.cropped_data = [n.zeros((self.get_data_dims(), self.data_dic[0]['data'].shape[1]*self.data_mult), dtype=n.single) for x in xrange(2)]
self.batches_generated = 0
self.data_mean = self.batch_meta['data_mean'].reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((self.get_data_dims(), 1))
def get_next_batch(self):
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
bidx = batchnum - self.batch_range[0]
cropped = self.cropped_data[self.batches_generated % 2]
self.__trim_borders(self.data_dic[bidx]['data'], cropped)
cropped -= self.data_mean
self.batches_generated += 1
return epoch, batchnum, [cropped, self.data_dic[bidx]['labels']]
def get_data_dims(self, idx=0):
return self.inner_size**2 * self.num_colors if idx == 0 else 1
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data):
return n.require((data + self.data_mean).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
def __trim_borders(self, x, target):
y = x.reshape(self.num_colors, self.img_size, self.img_size, x.shape[1])
if self.test: # don't need to loop over cases
if self.multiview:
start_positions = [(0,0), (0, self.border_size), (0, self.border_size*2),
(self.border_size, 0), (self.border_size, self.border_size), (self.border_size, self.border_size*2),
(self.border_size*2, 0), (self.border_size*2, self.border_size), (self.border_size*2, self.border_size*2)]
end_positions = [(sy+self.inner_size, sx+self.inner_size) for (sy,sx) in start_positions]
for i in xrange(self.num_views):
target[:,i * x.shape[1]:(i+1)* x.shape[1]] = y[:,start_positions[i][0]:end_positions[i][0],start_positions[i][1]:end_positions[i][1],:].reshape((self.get_data_dims(),x.shape[1]))
else:
pic = y[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size, :] # just take the center for now
target[:,:] = pic.reshape((self.get_data_dims(), x.shape[1]))
else:
for c in xrange(x.shape[1]): # loop over cases
startY, startX = nr.randint(0,self.border_size*2 + 1), nr.randint(0,self.border_size*2 + 1)
endY, endX = startY + self.inner_size, startX + self.inner_size
pic = y[:,startY:endY,startX:endX, c]
if nr.randint(2) == 0: # also flip the image with 50% probability
pic = pic[:,:,::-1]
target[:,c] = pic.reshape((self.get_data_dims(),))
class DummyConvNetLogRegDataProvider(LabeledDummyDataProvider):
def __init__(self, data_dim):
LabeledDummyDataProvider.__init__(self, data_dim)
self.img_size = int(sqrt(data_dim/3))
def get_next_batch(self):
epoch, batchnum, dic = LabeledDummyDataProvider.get_next_batch(self)
dic = {'data': dic[0], 'labels': dic[1]}
print dic['data'].shape, dic['labels'].shape
return epoch, batchnum, [dic['data'], dic['labels']]
# Returns the dimensionality of the two data matrices returned by get_next_batch
def get_data_dims(self, idx=0):
return self.batch_meta['num_vis'] if idx == 0 else 1
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from getopt import getopt
import os
import re
#import types
TERM_BOLD_START = "\033[1m"
TERM_BOLD_END = "\033[0m"
class Option:
def __init__(self, letter, name, desc, parser, set_once, default, excuses, requires, save):
assert not name is None
self.letter = letter
self.name = name
self.desc = desc
self.parser = parser
self.set_once = set_once
self.default = default
self.excuses = excuses
self.requires = requires
self.save = save
self.value = None
self.value_given = False
self.prefixed_letter = min(2, len(letter)) * '-' + letter
def set_value(self, value, parse=True):
try:
self.value = self.parser.parse(value) if parse else value
self.value_given = True
# print self.name, self.value
except OptionException, e:
raise OptionException("Unable to parse option %s (%s): %s" % (self.prefixed_letter, self.desc, e))
def set_default(self):
if not self.default is None:
self.value = self.default
def eval_expr_default(self, env):
try:
if isinstance(self.default, OptionExpression) and not self.value_given:
self.value = self.default.evaluate(env)
if not self.parser.is_type(self.value):
raise OptionException("expression result %s is not of right type (%s)" % (self.value, self.parser.get_type_str()))
except Exception, e:
raise OptionException("Unable to set default value for option %s (%s): %s" % (self.prefixed_letter, self.desc, e))
def get_str_value(self, get_default_str=False):
val = self.value
if get_default_str: val = self.default
if val is None: return ""
if isinstance(val, OptionExpression):
return val.expr
return self.parser.to_string(val)
class OptionsParser:
"""An option parsing class. All options without default values are mandatory, unless a excuses
option (usually a load file) is given.
Does not support options without arguments."""
SORT_LETTER = 1
SORT_DESC = 2
SORT_EXPR_LAST = 3
EXCUSE_ALL = "all"
def __init__(self):
self.options = {}
def add_option(self, letter, name, parser, desc, set_once=False, default=None, excuses=[], requires=[], save=True):
"""
The letter parameter is the actual parameter that the user will have to supply on the command line.
The name parameter is some name to be given to this option and must be a valid python variable name.
An explanation of the "default" parameter:
The default value, if specified, should have the same type as the option.
You can also specify an expression as the default value. In this case, the default value of the parameter
will be the output of the expression. The expression may assume all other option names
as local variables. For example, you can define the hidden bias
learning rate to be 10 times the weight learning rate by setting this default:
default=OptionExpression("eps_w * 10") (assuming an option named eps_w exists).
However, it is up to you to make sure you do not make any circular expression definitions.
Note that the order in which the options are parsed is arbitrary.
In particular, expression default values that depend on other expression default values
will often raise errors (depending on the order in which they happen to be parsed).
Therefore it is best not to make the default value of one variable depend on the value
of another if the other variable's default value is itself an expression.
An explanation of the "excuses" parameter:
All options are mandatory, but certain options can exclude other options from being mandatory.
For example, if the excuses parameter for option "load_file" is ["num_hid", "num_vis"],
then the options num_hid and num_vis are not mandatory as long as load_file is specified.
Use the special flag EXCUSE_ALL to allow an option to make all other options optional.
"""
assert name not in self.options
self.options[name] = Option(letter, name, desc, parser, set_once, default, excuses, requires, save)
def set_value(self, name, value, parse=True):
self.options[name].set_value(value, parse=parse)
def get_value(self, name):
return self.options[name].value
def delete_option(self, name):
if name in self.options:
del self.options[name]
def parse(self, eval_expr_defaults=False):
"""Parses the options in sys.argv based on the options added to this parser. The
default behavior is to leave any expression default options as OptionExpression objects.
Set eval_expr_defaults=True to circumvent this."""
short_opt_str = ''.join(["%s:" % self.options[name].letter for name in self.options if len(self.options[name].letter) == 1])
long_opts = ["%s=" % self.options[name].letter for name in self.options if len(self.options[name].letter) > 1]
(go, ga) = getopt(sys.argv[1:], short_opt_str, longopts=long_opts)
dic = dict(go)
for o in self.get_options_list(sort_order=self.SORT_EXPR_LAST):
if o.prefixed_letter in dic:
o.set_value(dic[o.prefixed_letter])
else:
# check if excused or has default
excused = max([o2.prefixed_letter in dic for o2 in self.options.values() if o2.excuses == self.EXCUSE_ALL or o.name in o2.excuses])
if not excused and o.default is None:
raise OptionMissingException("Option %s (%s) not supplied" % (o.prefixed_letter, o.desc))
o.set_default()
# check requirements
if o.prefixed_letter in dic:
for o2 in self.get_options_list(sort_order=self.SORT_LETTER):
if o2.name in o.requires and o2.prefixed_letter not in dic:
raise OptionMissingException("Option %s (%s) requires option %s (%s)" % (o.prefixed_letter, o.desc,
o2.prefixed_letter, o2.desc))
if eval_expr_defaults:
self.eval_expr_defaults()
return self.options
def merge_from(self, op2):
"""Merges the options in op2 into this instance, but does not overwrite
this instances's SET options with op2's default values."""
for name, o in self.options.iteritems():
if name in op2.options and ((op2.options[name].value_given and op2.options[name].value != self.options[name].value) or not op2.options[name].save):
if op2.options[name].set_once:
raise OptionException("Option %s (%s) cannot be changed" % (op2.options[name].prefixed_letter, op2.options[name].desc))
self.options[name] = op2.options[name]
for name in op2.options:
if name not in self.options:
self.options[name] = op2.options[name]
def eval_expr_defaults(self):
env = dict([(name, o.value) for name, o in self.options.iteritems()])
for o in self.options.values():
o.eval_expr_default(env)
def all_values_given(self):
return max([o.value_given for o in self.options.values() if o.default is not None])
def get_options_list(self, sort_order=SORT_LETTER):
""" Returns the list of Option objects in this OptionParser,
sorted as specified"""
cmp = lambda x, y: (x.desc < y.desc and -1 or 1)
if sort_order == self.SORT_LETTER:
cmp = lambda x, y: (x.letter < y.letter and -1 or 1)
elif sort_order == self.SORT_EXPR_LAST:
cmp = lambda x, y: (type(x.default) == OptionExpression and 1 or -1)
return sorted(self.options.values(), cmp=cmp)
def print_usage(self, print_constraints=False):
print "%s usage:" % os.path.basename(sys.argv[0])
opslist = self.get_options_list()
usage_strings = []
num_def = 0
for o in opslist:
excs = ' '
if o.default is None:
excs = ', '.join(sorted([o2.prefixed_letter for o2 in self.options.values() if o2.excuses == self.EXCUSE_ALL or o.name in o2.excuses]))
reqs = ', '.join(sorted([o2.prefixed_letter for o2 in self.options.values() if o2.name in o.requires]))
usg = (OptionsParser._bold(o.prefixed_letter) + " <%s>" % o.parser.get_type_str(), o.desc, ("[%s]" % o.get_str_value(get_default_str=True)) if not o.default is None else None, excs, reqs)
if o.default is None:
usage_strings += [usg]
else:
usage_strings.insert(num_def, usg)
num_def += 1
col_widths = [self._longest_value(usage_strings, key=lambda x:x[i]) for i in range(len(usage_strings[0]) - 1)]
col_names = [" Option", "Description", "Default"]
if print_constraints:
col_names += ["Excused by", "Requires"]
for i, s in enumerate(col_names):
print self._bold(s.ljust(col_widths[i])),
print ""
for l, d, de, ex, req in usage_strings:
if de is None:
de = ' '
print (" %s -" % l.ljust(col_widths[0])), d.ljust(col_widths[1]), de.ljust(col_widths[2]),
else:
print (" [%s] -" % l.ljust(col_widths[0])), d.ljust(col_widths[1]), de.ljust(col_widths[2]),
if print_constraints:
print ex.ljust(col_widths[3]), req
else:
print ""
def print_values(self):
longest_desc = self._longest_value(self.options.values(), key=lambda x:x.desc)
longest_def_value = self._longest_value([v for v in self.options.values() if not v.value_given and not v.default is None],
key=lambda x:x.get_str_value())
for o in self.get_options_list(sort_order=self.SORT_DESC):
print "%s: %s %s" % (o.desc.ljust(longest_desc), o.get_str_value().ljust(longest_def_value), (not o.value_given and not o.default is None) and "[DEFAULT]" or "")
@staticmethod
def _longest_value(values, key=lambda x:x):
mylen = lambda x: 0 if x is None else len(x)
return mylen(key(max(values, key=lambda x:mylen(key(x)))))
@staticmethod
def _bold(str):
return TERM_BOLD_START + str + TERM_BOLD_END
class OptionException(Exception):
pass
class OptionMissingException(OptionException):
pass
class OptionParser:
@staticmethod
def parse(value):
return str(value)
@staticmethod
def to_string(value):
return str(value)
@staticmethod
def get_type_str():
pass
class IntegerOptionParser(OptionParser):
@staticmethod
def parse(value):
try:
return int(value)
except:
raise OptionException("argument is not an integer")
@staticmethod
def get_type_str():
return "int"
@staticmethod
def is_type(value):
return type(value) == int
class BooleanOptionParser(OptionParser):
@staticmethod
def parse(value):
try:
v = int(value)
if not v in (0,1):
raise OptionException
return v
except:
raise OptionException("argument is not a boolean")
@staticmethod
def get_type_str():
return "0/1"
@staticmethod
def is_type(value):
return type(value) == int and value in (0, 1)
class StringOptionParser(OptionParser):
@staticmethod
def get_type_str():
return "string"
@staticmethod
def is_type(value):
return type(value) == str
class FloatOptionParser(OptionParser):
@staticmethod
def parse(value):
try:
return float(value)
except:
raise OptionException("argument is not a float")
@staticmethod
def to_string(value):
return "%.6g" % value
@staticmethod
def get_type_str():
return "float"
@staticmethod
def is_type(value):
return type(value) == float
class RangeOptionParser(OptionParser):
@staticmethod
def parse(value):
m = re.match("^(\d+)\-(\d+)$", value)
try:
if m: return range(int(m.group(1)), int(m.group(2)) + 1)
return [int(value)]
except:
raise OptionException("argument is neither an integer nor a range")
@staticmethod
def to_string(value):
return "%d-%d" % (value[0], value[-1])
@staticmethod
def get_type_str():
return "int[-int]"
@staticmethod
def is_type(value):
return type(value) == list
class ListOptionParser(OptionParser):
"""
A parser that parses a delimited list of items. If the "parsers"
argument is a list of parsers, then the list of items must have the form and length
specified by that list.
Example:
ListOptionParser([FloatOptionParser, IntegerOptionParser])
would parse "0.5,3" but not "0.5,3,0.6" or "0.5" or "3,0.5".
If the "parsers" argument is another parser, then the list of items may be of
arbitrary length, but each item must be parseable by the given parser.
Example:
ListOptionParser(FloatOptionParser)
would parse "0.5" and "0.5,0.3" and "0.5,0.3,0.6", etc.
"""
def __init__(self, parsers, sepchar=','):
self.parsers = parsers
self.sepchar = sepchar
def parse(self, value):
values = value.split(self.sepchar)
if type(self.parsers) == list and len(values) != len(self.parsers):
raise OptionException("requires %d arguments, given %d" % (len(self.parsers), len(values)))
try:
if type(self.parsers) == list:
return [p.parse(v) for p, v in zip(self.parsers, values)]
return [self.parsers.parse(v) for v in values]
except:
raise OptionException("argument is not of the form %s" % self.get_type_str())
def to_string(self, value):
if type(self.parsers) == list:
return self.sepchar.join([p.to_string(v) for p, v in zip(self.parsers, value)])
return self.sepchar.join([self.parsers.to_string(v) for v in value])
def get_type_str(self):
if type(self.parsers) == list:
return self.sepchar.join([p.get_type_str() for p in self.parsers])
return "%s%s..." % (self.parsers.get_type_str(), self.sepchar)
@staticmethod
def is_type(value):
return type(value) == list
class OptionExpression:
"""
This allows you to specify option values in terms of other option values.
Example:
op.add_option("eps-w", "eps_w", ListOptionParser(FloatOptionParser), "Weight learning rates for each layer")
op.add_option("eps-b", "eps_b", ListOptionParser(FloatOptionParser), "Bias learning rates for each layer", default=OptionExpression("[o * 10 for o in eps_w]"))
This says: the default bias learning rate for each layer is 10
times the weight learning rate for that layer.
"""
def __init__(self, expr):
self.expr = expr
def evaluate(self, options):
locals().update(options)
try:
return eval(self.expr)
except Exception, e:
raise OptionException("expression '%s': unable to parse: %s" % (self.expr, e))
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import cPickle
import os
from cStringIO import StringIO
class UnpickleError(Exception):
pass
GPU_LOCK_NO_SCRIPT = -2
GPU_LOCK_NO_LOCK = -1
def pickle(filename, data):
fo = filename
if type(filename) == str:
fo = open(filename, "w")
cPickle.dump(data, fo, protocol=cPickle.HIGHEST_PROTOCOL)
fo.close()
def unpickle(filename):
if not os.path.exists(filename):
raise UnpickleError("Path '%s' does not exist." % filename)
fo = open(filename, 'r')
z = StringIO()
file_size = os.fstat(fo.fileno()).st_size
# Read 1GB at a time to avoid overflow
while fo.tell() < file_size:
z.write(fo.read(1 << 30))
fo.close()
dict = cPickle.loads(z.getvalue())
z.close()
return dict
def is_intel_machine():
VENDOR_ID_REGEX = re.compile('^vendor_id\s+: (\S+)')
f = open('/proc/cpuinfo')
for line in f:
m = VENDOR_ID_REGEX.match(line)
if m:
f.close()
return m.group(1) == 'GenuineIntel'
f.close()
return False
# Returns the CPUs associated with a given GPU
def get_cpus_for_gpu(gpu):
#proc = subprocess.Popen(['nvidia-smi', '-q', '-i', str(gpu)], stdout=subprocess.PIPE)
#lines = proc.communicate()[0]
#lines = subprocess.check_output(['nvidia-smi', '-q', '-i', str(gpu)]).split(os.linesep)
with open('/proc/driver/nvidia/gpus/%d/information' % gpu) as f:
for line in f:
if line.startswith('Bus Location'):
bus_id = line.split(':', 1)[1].strip()
bus_id = bus_id[:7] + ':' + bus_id[8:]
ff = open('/sys/module/nvidia/drivers/pci:nvidia/%s/local_cpulist' % bus_id)
cpus_str = ff.readline()
ff.close()
cpus = [cpu for s in cpus_str.split(',') for cpu in range(int(s.split('-')[0]),int(s.split('-')[1])+1)]
return cpus
return [-1]
def get_cpu():
if is_intel_machine():
return 'intel'
return 'amd'
def is_windows_machine():
return os.name == 'nt'
def tryint(s):
try:
return int(s)
except:
return s
def alphanum_key(s):
return [tryint(c) for c in re.split('([0-9]+)', s)]
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. |
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as n
from numpy.random import randn, rand, random_integers
import os
from threading import Thread
from util import *
BATCH_META_FILE = "batches.meta"
class DataLoaderThread(Thread):
def __init__(self, path, tgt):
Thread.__init__(self)
self.path = path
self.tgt = tgt
def run(self):
self.tgt += [unpickle(self.path)]
class DataProvider:
BATCH_REGEX = re.compile('^data_batch_(\d+)(\.\d+)?$')
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params={}, test=False):
if batch_range == None:
batch_range = DataProvider.get_batch_nums(data_dir)
if init_batchnum is None or init_batchnum not in batch_range:
init_batchnum = batch_range[0]
self.data_dir = data_dir
self.batch_range = batch_range
self.curr_epoch = init_epoch
self.curr_batchnum = init_batchnum
self.dp_params = dp_params
self.batch_meta = self.get_batch_meta(data_dir)
self.data_dic = None
self.test = test
self.batch_idx = batch_range.index(init_batchnum)
def get_next_batch(self):
if self.data_dic is None or len(self.batch_range) > 1:
self.data_dic = self.get_batch(self.curr_batchnum)
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
return epoch, batchnum, self.data_dic
def get_batch(self, batch_num):
fname = self.get_data_file_name(batch_num)
if os.path.isdir(fname): # batch in sub-batches
sub_batches = sorted(os.listdir(fname), key=alphanum_key)
#print sub_batches
num_sub_batches = len(sub_batches)
tgts = [[] for i in xrange(num_sub_batches)]
threads = [DataLoaderThread(os.path.join(fname, s), tgt) for (s, tgt) in zip(sub_batches, tgts)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return [t[0] for t in tgts]
return unpickle(self.get_data_file_name(batch_num))
def get_data_dims(self,idx=0):
return self.batch_meta['num_vis'] if idx == 0 else 1
def advance_batch(self):
self.batch_idx = self.get_next_batch_idx()
self.curr_batchnum = self.batch_range[self.batch_idx]
if self.batch_idx == 0: # we wrapped
self.curr_epoch += 1
def get_next_batch_idx(self):
return (self.batch_idx + 1) % len(self.batch_range)
def get_next_batch_num(self):
return self.batch_range[self.get_next_batch_idx()]
# get filename of current batch
def get_data_file_name(self, batchnum=None):
if batchnum is None:
batchnum = self.curr_batchnum
return os.path.join(self.data_dir, 'data_batch_%d' % batchnum)
@classmethod
def get_instance(cls, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, type="default", dp_params={}, test=False):
# why the fuck can't i reference DataProvider in the original definition?
#cls.dp_classes['default'] = DataProvider
type = type or DataProvider.get_batch_meta(data_dir)['dp_type'] # allow data to decide data provider
if type.startswith("dummy-"):
name = "-".join(type.split('-')[:-1]) + "-n"
if name not in dp_types:
raise DataProviderException("No such data provider: %s" % type)
_class = dp_classes[name]
dims = int(type.split('-')[-1])
return _class(dims)
elif type in dp_types:
_class = dp_classes[type]
return _class(data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
raise DataProviderException("No such data provider: %s" % type)
@classmethod
def register_data_provider(cls, name, desc, _class):
if name in dp_types:
raise DataProviderException("Data provider %s already registered" % name)
dp_types[name] = desc
dp_classes[name] = _class
@staticmethod
def get_batch_meta(data_dir):
return unpickle(os.path.join(data_dir, BATCH_META_FILE))
@staticmethod
def get_batch_filenames(srcdir):
return sorted([f for f in os.listdir(srcdir) if DataProvider.BATCH_REGEX.match(f)], key=alphanum_key)
@staticmethod
def get_batch_nums(srcdir):
names = DataProvider.get_batch_filenames(srcdir)
return sorted(list(set(int(DataProvider.BATCH_REGEX.match(n).group(1)) for n in names)))
@staticmethod
def get_num_batches(srcdir):
return len(DataProvider.get_batch_nums(srcdir))
class DummyDataProvider(DataProvider):
def __init__(self, data_dim):
#self.data_dim = data_dim
self.batch_range = [1]
self.batch_meta = {'num_vis': data_dim, 'data_in_rows':True}
self.curr_epoch = 1
self.curr_batchnum = 1
self.batch_idx = 0
def get_next_batch(self):
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
data = rand(512, self.get_data_dims()).astype(n.single)
return self.curr_epoch, self.curr_batchnum, {'data':data}
class LabeledDataProvider(DataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params={}, test=False):
DataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
def get_num_classes(self):
return len(self.batch_meta['label_names'])
class LabeledDummyDataProvider(DummyDataProvider):
def __init__(self, data_dim, num_classes=10, num_cases=7):
#self.data_dim = data_dim
self.batch_range = [1]
self.batch_meta = {'num_vis': data_dim,
'label_names': [str(x) for x in range(num_classes)],
'data_in_rows':True}
self.num_cases = num_cases
self.num_classes = num_classes
self.curr_epoch = 1
self.curr_batchnum = 1
self.batch_idx=0
self.data = None
def get_num_classes(self):
return self.num_classes
def get_next_batch(self):
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
if self.data is None:
data = rand(self.num_cases, self.get_data_dims()).astype(n.single) # <--changed to rand
labels = n.require(n.c_[random_integers(0,self.num_classes-1,self.num_cases)], requirements='C', dtype=n.single)
self.data, self.labels = data, labels
else:
data, labels = self.data, self.labels
# print data.shape, labels.shape
return self.curr_epoch, self.curr_batchnum, [data.T, labels.T ]
dp_types = {"dummy-n": "Dummy data provider for n-dimensional data",
"dummy-labeled-n": "Labeled dummy data provider for n-dimensional data"}
dp_classes = {"dummy-n": DummyDataProvider,
"dummy-labeled-n": LabeledDummyDataProvider}
class DataProviderException(Exception):
pass
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as n
import os
from time import time, asctime, localtime, strftime
from util import *
from data import *
from options import *
from math import ceil, floor, sqrt
from data import DataProvider, dp_types
import sys
import shutil
import platform
from os import linesep as NL
from threading import Thread
import tempfile as tf
class ModelStateException(Exception):
pass
class CheckpointWriter(Thread):
def __init__(self, path, dic):
Thread.__init__(self)
self.path = path
self.dic = dic
def run(self):
save_dir = os.path.dirname(self.path)
save_file = os.path.basename(self.path)
# Write checkpoint to temporary filename
tmpfile = tf.NamedTemporaryFile(dir=os.path.dirname(save_dir), delete=False)
pickle(tmpfile, self.dic) # Also closes tf
# Move it to final filename
os.rename(tmpfile.name, self.path)
# Delete old checkpoints
for f in os.listdir(save_dir):
if f != save_file:
os.remove(os.path.join(save_dir, f))
# GPU Model interface
class IGPUModel:
def __init__(self, model_name, op, load_dic, filename_options=[], dp_params={}):
# these are input parameters
self.model_name = model_name
self.op = op
self.options = op.options
self.load_dic = load_dic
self.filename_options = filename_options
self.dp_params = dp_params
self.device_ids = self.op.get_value('gpu')
self.fill_excused_options()
self.checkpoint_writer = None
#assert self.op.all_values_given()
for o in op.get_options_list():
setattr(self, o.name, o.value)
self.loaded_from_checkpoint = load_dic is not None
# these are things that the model must remember but they're not input parameters
if self.loaded_from_checkpoint:
self.model_state = load_dic["model_state"]
self.save_file = self.options["save_file_override"].value if self.options["save_file_override"].value_given else self.options['load_file'].value
if not os.path.isdir(self.save_file) and os.path.exists(self.save_file):
self.save_file = os.path.dirname(self.save_file)
# print self.options["save_file_override"].value, self.save_file
else:
self.model_state = {}
self.save_file = self.options["save_file_override"].value if self.options["save_file_override"].value_given else os.path.join(self.options['save_path'].value, model_name + "_" + '_'.join(['%s_%s' % (char, self.options[opt].get_str_value()) for opt, char in filename_options]) + '_' + strftime('%Y-%m-%d_%H.%M.%S'))
self.model_state["train_outputs"] = []
self.model_state["test_outputs"] = []
self.model_state["epoch"] = 1
self.model_state["batchnum"] = self.train_batch_range[0]
# print self.save_file
self.init_data_providers()
if load_dic:
self.train_data_provider.advance_batch()
# model state often requries knowledge of data provider, so it's initialized after
try:
self.init_model_state()
except ModelStateException, e:
print e
sys.exit(1)
for var, val in self.model_state.iteritems():
setattr(self, var, val)
self.import_model()
self.init_model_lib()
def import_model(self):
print "========================="
print "Importing %s C++ module" % ('_' + self.model_name)
self.libmodel = __import__('_' + self.model_name)
def fill_excused_options(self):
pass
def init_data_providers(self):
self.dp_params['convnet'] = self
try:
self.test_data_provider = DataProvider.get_instance(self.data_path, self.test_batch_range,
type=self.dp_type, dp_params=self.dp_params, test=True)
self.train_data_provider = DataProvider.get_instance(self.data_path, self.train_batch_range,
self.model_state["epoch"], self.model_state["batchnum"],
type=self.dp_type, dp_params=self.dp_params, test=False)
except DataProviderException, e:
print "Unable to create data provider: %s" % e
self.print_data_providers()
sys.exit()
def init_model_state(self):
pass
def init_model_lib(self):
pass
def start(self):
if self.test_only:
self.test_outputs += [self.get_test_error()]
self.print_test_results()
else:
self.train()
self.cleanup()
if self.force_save:
self.save_state().join()
sys.exit(0)
def train(self):
print "========================="
print "Training %s" % self.model_name
self.op.print_values()
print "========================="
self.print_model_state()
print "Running on CUDA device(s) %s" % ", ".join("%d" % d for d in self.device_ids)
print "Current time: %s" % asctime(localtime())
print "Saving checkpoints to %s" % self.save_file
print "========================="
next_data = self.get_next_batch()
while self.epoch <= self.num_epochs:
data = next_data
self.epoch, self.batchnum = data[0], data[1]
self.print_iteration()
sys.stdout.flush()
compute_time_py = time()
self.start_batch(data)
# load the next batch while the current one is computing
next_data = self.get_next_batch()
batch_output = self.finish_batch()
self.train_outputs += [batch_output]
self.print_train_results()
if self.get_num_batches_done() % self.testing_freq == 0:
self.sync_with_host()
self.test_outputs += [self.get_test_error()]
self.print_test_results()
self.print_test_status()
self.conditional_save()
self.print_elapsed_time(time() - compute_time_py)
def cleanup(self):
if self.checkpoint_writer is not None:
self.checkpoint_writer.join()
self.checkpoint_writer = None
def print_model_state(self):
pass
def get_num_batches_done(self):
return len(self.train_batch_range) * (self.epoch - 1) + self.batchnum - self.train_batch_range[0] + 1
def get_next_batch(self, train=True):
dp = self.train_data_provider
if not train:
dp = self.test_data_provider
return self.parse_batch_data(dp.get_next_batch(), train=train)
def parse_batch_data(self, batch_data, train=True):
return batch_data[0], batch_data[1], batch_data[2]['data']
def start_batch(self, batch_data, train=True):
self.libmodel.startBatch(batch_data[2], not train)
def finish_batch(self):
return self.libmodel.finishBatch()
def print_iteration(self):
print "\t%d.%d..." % (self.epoch, self.batchnum),
def print_elapsed_time(self, compute_time_py):
print "(%.3f sec)" % (compute_time_py)
def print_train_results(self):
batch_error = self.train_outputs[-1][0]
if not (batch_error > 0 and batch_error < 2e20):
print "Crazy train error: %.6f" % batch_error
self.cleanup()
print "Train error: %.6f " % (batch_error),
def print_test_results(self):
batch_error = self.test_outputs[-1][0]
print "%s\t\tTest error: %.6f" % (NL, batch_error),
def print_test_status(self):
status = (len(self.test_outputs) == 1 or self.test_outputs[-1][0] < self.test_outputs[-2][0]) and "ok" or "WORSE"
print status,
def sync_with_host(self):
if self.checkpoint_writer is not None:
self.checkpoint_writer.join()
self.checkpoint_writer = None
self.libmodel.syncWithHost()
def conditional_save(self):
batch_error = self.test_outputs[-1][0]
if batch_error > 0 and batch_error < self.max_test_err:
self.save_state()
else:
print "\tTest error > %g, not saving." % self.max_test_err,
def aggregate_test_outputs(self, test_outputs):
test_error = tuple([sum(t[r] for t in test_outputs) / (1 if self.test_one else len(self.test_batch_range)) for r in range(len(test_outputs[-1]))])
return test_error
def get_test_error(self):
next_data = self.get_next_batch(train=False)
test_outputs = []
while True:
data = next_data
start_time_test = time()
self.start_batch(data, train=False)
load_next = (not self.test_one or self.test_only) and data[1] < self.test_batch_range[-1]
if load_next: # load next batch
next_data = self.get_next_batch(train=False)
test_outputs += [self.finish_batch()]
if self.test_only: # Print the individual batch results for safety
print "batch %d: %s" % (data[1], str(test_outputs[-1])),
self.print_elapsed_time(time() - start_time_test)
if not load_next:
break
sys.stdout.flush()
return self.aggregate_test_outputs(test_outputs)
def set_var(self, var_name, var_val):
setattr(self, var_name, var_val)
self.model_state[var_name] = var_val
return var_val
def get_var(self, var_name):
return self.model_state[var_name]
def has_var(self, var_name):
return var_name in self.model_state
def save_state(self):
for att in self.model_state:
if hasattr(self, att):
self.model_state[att] = getattr(self, att)
dic = {"model_state": self.model_state,
"op": self.op}
checkpoint_file = "%d.%d" % (self.epoch, self.batchnum)
checkpoint_file_full_path = os.path.join(self.save_file, checkpoint_file)
if not os.path.exists(self.save_file):
os.makedirs(self.save_file)
assert self.checkpoint_writer is None
self.checkpoint_writer = CheckpointWriter(checkpoint_file_full_path, dic)
self.checkpoint_writer.start()
print "-------------------------------------------------------"
print "Saved checkpoint to %s" % self.save_file
print "=======================================================",
return self.checkpoint_writer
def get_progress(self):
num_batches_total = self.num_epochs * len(self.train_batch_range)
return min(1.0, max(0.0, float(self.get_num_batches_done()-1) / num_batches_total))
@staticmethod
def load_checkpoint(load_dir):
if os.path.isdir(load_dir):
return unpickle(os.path.join(load_dir, sorted(os.listdir(load_dir), key=alphanum_key)[-1]))
return unpickle(load_dir)
@staticmethod
def get_options_parser():
op = OptionsParser()
op.add_option("load-file", "load_file", StringOptionParser, "Load file", default="", excuses=OptionsParser.EXCUSE_ALL)
op.add_option("save-path", "save_path", StringOptionParser, "Save path", excuses=['save_file_override'])
op.add_option("save-file", "save_file_override", StringOptionParser, "Save file override", excuses=['save_path'])
op.add_option("train-range", "train_batch_range", RangeOptionParser, "Data batch range: training")
op.add_option("test-range", "test_batch_range", RangeOptionParser, "Data batch range: testing")
op.add_option("data-provider", "dp_type", StringOptionParser, "Data provider", default="default")
op.add_option("test-freq", "testing_freq", IntegerOptionParser, "Testing frequency", default=25)
op.add_option("epochs", "num_epochs", IntegerOptionParser, "Number of epochs", default=500)
op.add_option("data-path", "data_path", StringOptionParser, "Data path")
op.add_option("max-test-err", "max_test_err", FloatOptionParser, "Maximum test error for saving")
op.add_option("test-only", "test_only", BooleanOptionParser, "Test and quit?", default=0)
op.add_option("test-one", "test_one", BooleanOptionParser, "Test on one batch at a time?", default=1)
op.add_option("force-save", "force_save", BooleanOptionParser, "Force save before quitting", default=0)
op.add_option("gpu", "gpu", ListOptionParser(IntegerOptionParser), "GPU override")
return op
@staticmethod
def print_data_providers():
print "Available data providers:"
for dp, desc in dp_types.iteritems():
print " %s: %s" % (dp, desc)
@staticmethod
def parse_options(op):
try:
load_dic = None
options = op.parse()
load_location = None
# print options['load_file'].value_given, options['save_file_override'].value_given
# print options['save_file_override'].value
if options['load_file'].value_given:
load_location = options['load_file'].value
elif options['save_file_override'].value_given and os.path.exists(options['save_file_override'].value):
load_location = options['save_file_override'].value
if load_location is not None:
load_dic = IGPUModel.load_checkpoint(load_location)
old_op = load_dic["op"]
old_op.merge_from(op)
op = old_op
op.eval_expr_defaults()
return op, load_dic
except OptionMissingException, e:
print e
op.print_usage()
except OptionException, e:
print e
except UnpickleError, e:
print "Error loading checkpoint:"
print e
sys.exit()
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#################################################################################
# This script makes batches suitable for training from raw ILSVRC 2012 tar files.
import tarfile
from StringIO import StringIO
from random import shuffle
import sys
from time import time
from pyext._MakeDataPyExt import resizeJPEG
import itertools
import os
import cPickle
import scipy.io
import math
import argparse as argp
# Set this to True to crop images to square. In this case each image will be
# resized such that its shortest edge is OUTPUT_IMAGE_SIZE pixels, and then the
# center OUTPUT_IMAGE_SIZE x OUTPUT_IMAGE_SIZE patch will be extracted.
#
# Set this to False to preserve image borders. In this case each image will be
# resized such that its shortest edge is OUTPUT_IMAGE_SIZE pixels. This was
# demonstrated to be superior by Andrew Howard in his very nice paper:
# http://arxiv.org/abs/1312.5402
CROP_TO_SQUARE = True
OUTPUT_IMAGE_SIZE = 256
# Number of threads to use for JPEG decompression and image resizing.
NUM_WORKER_THREADS = 8
# Don't worry about these.
OUTPUT_BATCH_SIZE = 3072
OUTPUT_SUB_BATCH_SIZE = 1024
def pickle(filename, data):
with open(filename, "w") as fo:
cPickle.dump(data, fo, protocol=cPickle.HIGHEST_PROTOCOL)
def unpickle(filename):
fo = open(filename, 'r')
contents = cPickle.load(fo)
fo.close()
return contents
def partition_list(l, partition_size):
divup = lambda a,b: (a + b - 1) / b
return [l[i*partition_size:(i+1)*partition_size] for i in xrange(divup(len(l),partition_size))]
def open_tar(path, name):
if not os.path.exists(path):
print "ILSVRC 2012 %s not found at %s. Make sure to set ILSVRC_SRC_DIR correctly at the top of this file (%s)." % (name, path, sys.argv[0])
sys.exit(1)
return tarfile.open(path)
def makedir(path):
if not os.path.exists(path):
os.makedirs(path)
def parse_devkit_meta(ILSVRC_DEVKIT_TAR):
tf = open_tar(ILSVRC_DEVKIT_TAR, 'devkit tar')
fmeta = tf.extractfile(tf.getmember('ILSVRC2012_devkit_t12/data/meta.mat'))
meta_mat = scipy.io.loadmat(StringIO(fmeta.read()))
labels_dic = dict((m[0][1][0], m[0][0][0][0]-1) for m in meta_mat['synsets'] if m[0][0][0][0] >= 1 and m[0][0][0][0] <= 1000)
label_names_dic = dict((m[0][1][0], m[0][2][0]) for m in meta_mat['synsets'] if m[0][0][0][0] >= 1 and m[0][0][0][0] <= 1000)
label_names = [tup[1] for tup in sorted([(v,label_names_dic[k]) for k,v in labels_dic.items()], key=lambda x:x[0])]
fval_ground_truth = tf.extractfile(tf.getmember('ILSVRC2012_devkit_t12/data/ILSVRC2012_validation_ground_truth.txt'))
validation_ground_truth = [[int(line.strip()) - 1] for line in fval_ground_truth.readlines()]
tf.close()
return labels_dic, label_names, validation_ground_truth
def write_batches(target_dir, name, start_batch_num, labels, jpeg_files):
jpeg_files = partition_list(jpeg_files, OUTPUT_BATCH_SIZE)
labels = partition_list(labels, OUTPUT_BATCH_SIZE)
makedir(target_dir)
print "Writing %s batches..." % name
for i,(labels_batch, jpeg_file_batch) in enumerate(zip(labels, jpeg_files)):
t = time()
jpeg_strings = list(itertools.chain.from_iterable(resizeJPEG([jpeg.read() for jpeg in jpeg_file_batch], OUTPUT_IMAGE_SIZE, NUM_WORKER_THREADS, CROP_TO_SQUARE)))
batch_path = os.path.join(target_dir, 'data_batch_%d' % (start_batch_num + i))
makedir(batch_path)
for j in xrange(0, len(labels_batch), OUTPUT_SUB_BATCH_SIZE):
pickle(os.path.join(batch_path, 'data_batch_%d.%d' % (start_batch_num + i, j/OUTPUT_SUB_BATCH_SIZE)),
{'data': jpeg_strings[j:j+OUTPUT_SUB_BATCH_SIZE],
'labels': labels_batch[j:j+OUTPUT_SUB_BATCH_SIZE]})
print "Wrote %s (%s batch %d of %d) (%.2f sec)" % (batch_path, name, i+1, len(jpeg_files), time() - t)
return i + 1
if __name__ == "__main__":
parser = argp.ArgumentParser()
parser.add_argument('--src-dir', help='Directory containing ILSVRC2012_img_train.tar, ILSVRC2012_img_val.tar, and ILSVRC2012_devkit_t12.tar.gz', required=True)
parser.add_argument('--tgt-dir', help='Directory to output ILSVRC 2012 batches suitable for cuda-convnet to train on.', required=True)
args = parser.parse_args()
print "CROP_TO_SQUARE: %s" % CROP_TO_SQUARE
print "OUTPUT_IMAGE_SIZE: %s" % OUTPUT_IMAGE_SIZE
print "NUM_WORKER_THREADS: %s" % NUM_WORKER_THREADS
ILSVRC_TRAIN_TAR = os.path.join(args.src_dir, 'ILSVRC2012_img_train.tar')
ILSVRC_VALIDATION_TAR = os.path.join(args.src_dir, 'ILSVRC2012_img_val.tar')
ILSVRC_DEVKIT_TAR = os.path.join(args.src_dir, 'ILSVRC2012_devkit_t12.tar.gz')
assert OUTPUT_BATCH_SIZE % OUTPUT_SUB_BATCH_SIZE == 0
labels_dic, label_names, validation_labels = parse_devkit_meta(ILSVRC_DEVKIT_TAR)
with open_tar(ILSVRC_TRAIN_TAR, 'training tar') as tf:
synsets = tf.getmembers()
synset_tars = [tarfile.open(fileobj=tf.extractfile(s)) for s in synsets]
print "Loaded synset tars."
print "Building training set image list (this can take 10-20 minutes)..."
sys.stdout.flush()
train_jpeg_files = []
for i,st in enumerate(synset_tars):
if i % 100 == 0:
print "%d%% ..." % int(round(100.0 * float(i) / len(synset_tars))),
sys.stdout.flush()
train_jpeg_files += [st.extractfile(m) for m in st.getmembers()]
st.close()
shuffle(train_jpeg_files)
train_labels = [[labels_dic[jpeg.name[:9]]] for jpeg in train_jpeg_files]
print "done"
# Write training batches
i = write_batches(args.tgt_dir, 'training', 0, train_labels, train_jpeg_files)
# Write validation batches
val_batch_start = int(math.ceil((i / 1000.0))) * 1000
with open_tar(ILSVRC_VALIDATION_TAR, 'validation tar') as tf:
validation_jpeg_files = sorted([tf.extractfile(m) for m in tf.getmembers()], key=lambda x:x.name)
write_batches(args.tgt_dir, 'validation', val_batch_start, validation_labels, validation_jpeg_files)
# Write meta file
meta = unpickle('input_meta')
meta_file = os.path.join(args.tgt_dir, 'batches.meta')
meta.update({'batch_size': OUTPUT_BATCH_SIZE,
'num_vis': OUTPUT_IMAGE_SIZE**2 * 3,
'label_names': label_names})
pickle(meta_file, meta)
print "Wrote %s" % meta_file
print "All done! ILSVRC 2012 batches are in %s" % args.tgt_dir
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. |
## @package process
# Module doxygen.process
# Script to insert preamble for doxygen and regen API docs
import glob, os, shutil
# Module caffe2...caffe2.python.control_test
def insert(originalfile,first_line,description):
with open(originalfile,'r') as f:
f1 = f.readline()
if(f1.find(first_line)<0):
docs = first_line + description + f1
with open('newfile.txt','w') as f2:
f2.write(docs)
f2.write(f.read())
os.rename('newfile.txt',originalfile)
else:
print('already inserted')
# move up from /caffe2_root/doxygen
os.chdir("..")
os.system("git checkout caffe2/contrib/.")
os.system("git checkout caffe2/distributed/.")
os.system("git checkout caffe2/experiments/.")
os.system("git checkout caffe2/python/.")
for root, dirs, files in os.walk("."):
for file in files:
if (file.endswith(".py") and not file.endswith("_test.py") and not file.endswith("__.py")):
filepath = os.path.join(root, file)
print("filepath: " + filepath)
directory = os.path.dirname(filepath)[2:]
directory = directory.replace("/",".")
print "directory: " + directory
name = os.path.splitext(file)[0]
first_line = "## @package " + name
description = "\n# Module " + directory + "." + name + "\n"
print first_line,description
insert(filepath,first_line,description)
if os.path.exists("doxygen/doxygen-python"):
print("Looks like you ran this before, so we need to cleanup those old files...")
shutil.rmtree("doxygen/doxygen-python")
else:
os.makedirs("doxygen/doxygen-python")
if os.path.exists("doxygen/doxygen-c"):
print("Looks like you ran this before, so we need to cleanup those old files...")
shutil.rmtree("doxygen/doxygen-c")
else:
os.makedirs("doxygen/doxygen-c")
os.system("doxygen .Doxyfile-python")
os.system("doxygen .Doxyfile-c")
|
## @package diagnose_protobuf
# Module scripts.diagnose_protobuf
"""Diagnoses the current protobuf situation.
Protocol buffer needs to be properly installed for Caffe2 to work, and
sometimes it is rather tricky. Specifically, we will need to have a
consistent version between C++ and python simultaneously. This is a
convenience script for one to quickly check if this is so on one's local
machine.
Usage:
[set your environmental variables like PATH and PYTHONPATH]
python scripts/diagnose_protobuf.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import re
from subprocess import Popen, PIPE
# Get python protobuf version.
try:
import google.protobuf
python_version = google.protobuf.__version__
python_protobuf_installed = True
except ImportError:
print("DEBUG: cannot find python protobuf install.")
python_protobuf_installed = False
if os.name == 'nt':
protoc_name = 'protoc.exe'
else:
protoc_name = 'protoc'
try:
p = Popen([protoc_name, '--version'], stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
except:
print('DEBUG: did not find protoc binary.')
print('DEBUG: out: ' + out)
print('DEBUG: err: ' + err)
native_protobuf_installed = False
else:
if p.returncode:
print('DEBUG: protoc returned a non-zero return code.')
print('DEBUG: out: ' + out)
print('DEBUG: err: ' + err)
native_protobuf_installed = False
else:
tmp = re.search('\d\.\d\.\d', out)
if tmp:
native_version = tmp.group(0)
native_protobuf_installed = True
else:
print('DEBUG: cannot parse protoc version string.')
print('DEBUG: out: ' + out)
native_protobuf_installed = False
PYTHON_PROTOBUF_NOT_INSTALLED = """
You have not installed python protobuf. Protobuf is needed to run caffe2. You
can install protobuf via pip or conda (if you are using anaconda python).
"""
NATIVE_PROTOBUF_NOT_INSTALLED = """
You have not installed the protoc binary. Protoc is needed to compile Caffe2
protobuf source files. Depending on the platform you are on, you can install
protobuf via:
(1) Mac: using homebrew and do brew install protobuf.
(2) Linux: use apt and do apt-get install libprotobuf-dev
(3) Windows: install from source, or from the releases here:
https://github.com/google/protobuf/releases/
"""
VERSION_MISMATCH = """
Your python protobuf is of version {py_ver} but your native protoc version is of
version {native_ver}. This will cause the installation to produce incompatible
protobuf files. This is bad in general - consider installing the same version.
""".format(py_ver=python_version, native_ver=native_version)
# Now, give actual recommendations
if not python_protobuf_installed:
print(PYTHON_PROTOBUF_NOT_INSTALLED)
if not native_protobuf_installed:
print(NATIVE_PROTOBUF_NOT_INSTALLED)
if python_protobuf_installed and native_protobuf_installed:
if python_version != native_version:
print(VERSION_MISMATCH)
else:
print('All looks good.')
|
## @package get_python_cmake_flags
# Module scripts.get_python_cmake_flags
##############################################################################
# Use this script to find your preferred python installation.
##############################################################################
#
# You can use the following to build with your preferred version of python
# if your installation is not being properly detected by CMake.
#
# mkdir -p build && cd build
# cmake $(python ../scripts/get_python_libs.py) ..
# make
#
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from distutils import sysconfig
import os
import sys
import platform
# Flags to print to stdout
flags = ''
inc = sysconfig.get_python_inc()
lib = sysconfig.get_config_var("LIBDIR")
# macOS specific
if sys.platform == "darwin":
lib = os.path.dirname(lib) + '/Python'
if os.path.isfile(lib):
flags += '-DPYTHON_LIBRARY={lib} '.format(lib=lib)
if os.path.isfile(inc + '/Python.h'):
flags += '-DPYTHON_INCLUDE_DIR={inc} '.format(inc=inc)
print(flags, end='')
|
#!/usr/bin/env python
from setuptools import setup, find_packages
from torch.utils.cpp_extension import BuildExtension, CppExtension
setup(
name="torchaudio",
version="0.1",
description="An audio package for PyTorch",
url="https://github.com/pytorch/audio",
author="Soumith Chintala, David Pollack, Sean Naren, Peter Goldsborough",
author_email="[email protected]",
install_requires=["torch>=0.4"],
setup_requires=["torch>=0.4"],
# Exclude the build files.
packages=find_packages(exclude=["build"]),
ext_modules=[
CppExtension(
'_torch_sox', ['torchaudio/torch_sox.cpp'], libraries=['sox']),
],
cmdclass={'build_ext': BuildExtension})
|
import unittest
import torch
import torchaudio
import math
import os
class Test_LoadSave(unittest.TestCase):
test_dirpath = os.path.dirname(os.path.realpath(__file__))
test_filepath = os.path.join(test_dirpath, "assets",
"steam-train-whistle-daniel_simon.mp3")
def test_load(self):
# check normal loading
x, sr = torchaudio.load(self.test_filepath)
self.assertEqual(sr, 44100)
self.assertEqual(x.size(), (278756, 2))
self.assertGreater(x.sum(), 0)
# check normalizing
x, sr = torchaudio.load(self.test_filepath, normalization=True)
self.assertTrue(x.min() >= -1.0)
self.assertTrue(x.max() <= 1.0)
# check raising errors
with self.assertRaises(OSError):
torchaudio.load("file-does-not-exist.mp3")
with self.assertRaises(OSError):
tdir = os.path.join(
os.path.dirname(self.test_dirpath), "torchaudio")
torchaudio.load(tdir)
def test_save(self):
# load signal
x, sr = torchaudio.load(self.test_filepath)
# check save
new_filepath = os.path.join(self.test_dirpath, "test.wav")
torchaudio.save(new_filepath, x, sr)
self.assertTrue(os.path.isfile(new_filepath))
os.unlink(new_filepath)
# check automatic normalization
x /= 1 << 31
torchaudio.save(new_filepath, x, sr)
self.assertTrue(os.path.isfile(new_filepath))
os.unlink(new_filepath)
# test save 1d tensor
x = x[:, 0] # get mono signal
x.squeeze_() # remove channel dim
torchaudio.save(new_filepath, x, sr)
self.assertTrue(os.path.isfile(new_filepath))
os.unlink(new_filepath)
# don't allow invalid sizes as inputs
with self.assertRaises(ValueError):
x.unsqueeze_(0) # N x L not L x N
torchaudio.save(new_filepath, x, sr)
with self.assertRaises(ValueError):
x.squeeze_()
x.unsqueeze_(1)
x.unsqueeze_(0) # 1 x L x 1
torchaudio.save(new_filepath, x, sr)
# automatically convert sr from floating point to int
x.squeeze_(0)
torchaudio.save(new_filepath, x, float(sr))
self.assertTrue(os.path.isfile(new_filepath))
os.unlink(new_filepath)
# don't allow uneven integers
with self.assertRaises(TypeError):
torchaudio.save(new_filepath, x, float(sr) + 0.5)
self.assertTrue(os.path.isfile(new_filepath))
os.unlink(new_filepath)
# don't save to folders that don't exist
with self.assertRaises(OSError):
new_filepath = os.path.join(self.test_dirpath, "no-path",
"test.wav")
torchaudio.save(new_filepath, x, sr)
# save created file
sinewave_filepath = os.path.join(self.test_dirpath, "assets",
"sinewave.wav")
sr = 16000
freq = 440
volume = 0.3
y = (torch.cos(
2 * math.pi * torch.arange(0, 4 * sr) * freq / sr)).float()
y.unsqueeze_(1)
# y is between -1 and 1, so must scale
y = (y * volume * 2**31).long()
torchaudio.save(sinewave_filepath, y, sr)
self.assertTrue(os.path.isfile(sinewave_filepath))
def test_load_and_save_is_identity(self):
input_path = os.path.join(self.test_dirpath, 'assets', 'sinewave.wav')
tensor, sample_rate = torchaudio.load(input_path)
output_path = os.path.join(self.test_dirpath, 'test.wav')
torchaudio.save(output_path, tensor, sample_rate)
tensor2, sample_rate2 = torchaudio.load(output_path)
self.assertTrue(tensor.allclose(tensor2))
self.assertEqual(sample_rate, sample_rate2)
os.unlink(output_path)
if __name__ == '__main__':
unittest.main()
|
from __future__ import print_function
import torch
import torchaudio
import torchaudio.transforms as transforms
import numpy as np
import unittest
class Tester(unittest.TestCase):
sr = 16000
freq = 440
volume = .3
sig = (torch.cos(2 * np.pi * torch.arange(0, 4 * sr) * freq / sr)).float()
# sig = (torch.cos((1+torch.arange(0, 4 * sr) * 2) / sr * 2 * np.pi * torch.arange(0, 4 * sr) * freq / sr)).float()
sig.unsqueeze_(1)
sig = (sig * volume * 2**31).long()
def test_scale(self):
audio_orig = self.sig.clone()
result = transforms.Scale()(audio_orig)
self.assertTrue(result.min() >= -1. and result.max() <= 1.,
print("min: {}, max: {}".format(result.min(), result.max())))
maxminmax = np.abs(
[audio_orig.min(), audio_orig.max()]).max().astype(np.float)
result = transforms.Scale(factor=maxminmax)(audio_orig)
self.assertTrue((result.min() == -1. or result.max() == 1.) and
result.min() >= -1. and result.max() <= 1.,
print("min: {}, max: {}".format(result.min(), result.max())))
repr_test = transforms.Scale()
repr_test.__repr__()
def test_pad_trim(self):
audio_orig = self.sig.clone()
length_orig = audio_orig.size(0)
length_new = int(length_orig * 1.2)
result = transforms.PadTrim(max_len=length_new)(audio_orig)
self.assertTrue(result.size(0) == length_new,
print("old size: {}, new size: {}".format(audio_orig.size(0), result.size(0))))
audio_orig = self.sig.clone()
length_orig = audio_orig.size(0)
length_new = int(length_orig * 0.8)
result = transforms.PadTrim(max_len=length_new)(audio_orig)
self.assertTrue(result.size(0) == length_new,
print("old size: {}, new size: {}".format(audio_orig.size(0), result.size(0))))
repr_test = transforms.PadTrim(max_len=length_new)
repr_test.__repr__()
def test_downmix_mono(self):
audio_L = self.sig.clone()
audio_R = self.sig.clone()
R_idx = int(audio_R.size(0) * 0.1)
audio_R = torch.cat((audio_R[R_idx:], audio_R[:R_idx]))
audio_Stereo = torch.cat((audio_L, audio_R), dim=1)
self.assertTrue(audio_Stereo.size(1) == 2)
result = transforms.DownmixMono()(audio_Stereo)
self.assertTrue(result.size(1) == 1)
repr_test = transforms.DownmixMono()
repr_test.__repr__()
def test_lc2cl(self):
audio = self.sig.clone()
result = transforms.LC2CL()(audio)
self.assertTrue(result.size()[::-1] == audio.size())
repr_test = transforms.LC2CL()
repr_test.__repr__()
def test_mel(self):
audio = self.sig.clone()
audio = transforms.Scale()(audio)
self.assertTrue(audio.dim() == 2)
result = transforms.MEL()(audio)
self.assertTrue(result.dim() == 3)
result = transforms.BLC2CBL()(result)
self.assertTrue(result.dim() == 3)
repr_test = transforms.MEL()
repr_test.__repr__()
repr_test = transforms.BLC2CBL()
repr_test.__repr__()
def test_compose(self):
audio_orig = self.sig.clone()
length_orig = audio_orig.size(0)
length_new = int(length_orig * 1.2)
maxminmax = np.abs(
[audio_orig.min(), audio_orig.max()]).max().astype(np.float)
tset = (transforms.Scale(factor=maxminmax),
transforms.PadTrim(max_len=length_new))
result = transforms.Compose(tset)(audio_orig)
self.assertTrue(np.abs([result.min(), result.max()]).max() == 1.)
self.assertTrue(result.size(0) == length_new)
repr_test = transforms.Compose(tset)
repr_test.__repr__()
def test_mu_law_companding(self):
sig = self.sig.clone()
quantization_channels = 256
sig = self.sig.numpy()
sig = sig / np.abs(sig).max()
self.assertTrue(sig.min() >= -1. and sig.max() <= 1.)
sig_mu = transforms.MuLawEncoding(quantization_channels)(sig)
self.assertTrue(sig_mu.min() >= 0. and sig.max() <= quantization_channels)
sig_exp = transforms.MuLawExpanding(quantization_channels)(sig_mu)
self.assertTrue(sig_exp.min() >= -1. and sig_exp.max() <= 1.)
sig = self.sig.clone()
sig = sig / torch.abs(sig).max()
self.assertTrue(sig.min() >= -1. and sig.max() <= 1.)
sig_mu = transforms.MuLawEncoding(quantization_channels)(sig)
self.assertTrue(sig_mu.min() >= 0. and sig.max() <= quantization_channels)
sig_exp = transforms.MuLawExpanding(quantization_channels)(sig_mu)
self.assertTrue(sig_exp.min() >= -1. and sig_exp.max() <= 1.)
repr_test = transforms.MuLawEncoding(quantization_channels)
repr_test.__repr__()
repr_test = transforms.MuLawExpanding(quantization_channels)
repr_test.__repr__()
def test_mel2(self):
audio_orig = self.sig.clone() # (16000, 1)
audio_scaled = transforms.Scale()(audio_orig) # (16000, 1)
audio_scaled = transforms.LC2CL()(audio_scaled) # (1, 16000)
spectrogram_torch = transforms.MEL2()(audio_scaled) # (1, 319, 40)
self.assertTrue(spectrogram_torch.dim() == 3)
self.assertTrue(spectrogram_torch.max() <= 0.)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# PyTorch documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 23 13:31:47 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import torch
import torchaudio
import sphinx_rtd_theme
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinxcontrib.googleanalytics',
]
napoleon_use_ivar = True
googleanalytics_id = 'UA-90545585-1'
googleanalytics_enabled = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Torchaudio'
copyright = '2017, Torch Contributors'
author = 'Torch Contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# TODO: change to [:2] at v1.0
version = 'master '
# The full version, including alpha/beta/rc tags.
# TODO: verify this works as expected
release = 'master'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'collapse_navigation': False,
'display_version': True,
'logo_only': True,
}
html_logo = '_static/img/pytorch-logo-dark.svg'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# html_style_path = 'css/pytorch_theme.css'
html_context = {
'css_files': [
'https://fonts.googleapis.com/css?family=Lato',
'_static/css/pytorch_theme.css'
],
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyTorchdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pytorch.tex', 'torchaudio Documentation',
'Torch Contributors', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'torchaudio', 'torchaudio Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'torchaudio', 'torchaudio Documentation',
author, 'torchaudio', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
}
# -- A patch that prevents Sphinx from cross-referencing ivar tags -------
# See http://stackoverflow.com/a/41184353/3343043
from docutils import nodes
from sphinx.util.docfields import TypedField
from sphinx import addnodes
def patched_make_field(self, types, domain, items, **kw):
# `kw` catches `env=None` needed for newer sphinx while maintaining
# backwards compatibility when passed along further down!
# type: (List, unicode, Tuple) -> nodes.field
def handle_item(fieldarg, content):
par = nodes.paragraph()
par += addnodes.literal_strong('', fieldarg) # Patch: this line added
# par.extend(self.make_xrefs(self.rolename, domain, fieldarg,
# addnodes.literal_strong))
if fieldarg in types:
par += nodes.Text(' (')
# NOTE: using .pop() here to prevent a single type node to be
# inserted twice into the doctree, which leads to
# inconsistencies later when references are resolved
fieldtype = types.pop(fieldarg)
if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text):
typename = u''.join(n.astext() for n in fieldtype)
typename = typename.replace('int', 'python:int')
typename = typename.replace('long', 'python:long')
typename = typename.replace('float', 'python:float')
typename = typename.replace('type', 'python:type')
par.extend(self.make_xrefs(self.typerolename, domain, typename,
addnodes.literal_emphasis, **kw))
else:
par += fieldtype
par += nodes.Text(')')
par += nodes.Text(' -- ')
par += content
return par
fieldname = nodes.field_name('', self.label)
if len(items) == 1 and self.can_collapse:
fieldarg, content = items[0]
bodynode = handle_item(fieldarg, content)
else:
bodynode = self.list_type()
for fieldarg, content in items:
bodynode += nodes.list_item('', handle_item(fieldarg, content))
fieldbody = nodes.field_body('', bodynode)
return nodes.field('', fieldname, fieldbody)
TypedField.make_field = patched_make_field
|
from __future__ import division, print_function
import torch
from torch.autograd import Variable
import numpy as np
try:
import librosa
except ImportError:
librosa = None
def _check_is_variable(tensor):
if isinstance(tensor, torch.Tensor):
is_variable = False
tensor = Variable(tensor, requires_grad=False)
elif isinstance(tensor, Variable):
is_variable = True
else:
raise TypeError("tensor should be a Variable or Tensor, but is {}".format(type(tensor)))
return tensor, is_variable
def _tlog10(x):
"""Pytorch Log10
"""
return torch.log(x) / torch.log(x.new([10]))
class Compose(object):
"""Composes several transforms together.
Args:
transforms (list of ``Transform`` objects): list of transforms to compose.
Example:
>>> transforms.Compose([
>>> transforms.Scale(),
>>> transforms.PadTrim(max_len=16000),
>>> ])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, audio):
for t in self.transforms:
audio = t(audio)
return audio
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class Scale(object):
"""Scale audio tensor from a 16-bit integer (represented as a FloatTensor)
to a floating point number between -1.0 and 1.0. Note the 16-bit number is
called the "bit depth" or "precision", not to be confused with "bit rate".
Args:
factor (int): maximum value of input tensor. default: 16-bit depth
"""
def __init__(self, factor=2**31):
self.factor = factor
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor of audio of size (Samples x Channels)
Returns:
Tensor: Scaled by the scale factor. (default between -1.0 and 1.0)
"""
if isinstance(tensor, (torch.LongTensor, torch.IntTensor)):
tensor = tensor.float()
return tensor / self.factor
def __repr__(self):
return self.__class__.__name__ + '()'
class PadTrim(object):
"""Pad/Trim a 1d-Tensor (Signal or Labels)
Args:
tensor (Tensor): Tensor of audio of size (Samples x Channels)
max_len (int): Length to which the tensor will be padded
"""
def __init__(self, max_len, fill_value=0):
self.max_len = max_len
self.fill_value = fill_value
def __call__(self, tensor):
"""
Returns:
Tensor: (max_len x Channels)
"""
if self.max_len > tensor.size(0):
pad = torch.ones((self.max_len - tensor.size(0),
tensor.size(1))) * self.fill_value
pad = pad.type_as(tensor)
tensor = torch.cat((tensor, pad), dim=0)
elif self.max_len < tensor.size(0):
tensor = tensor[:self.max_len, :]
return tensor
def __repr__(self):
return self.__class__.__name__ + '(max_len={0})'.format(self.max_len)
class DownmixMono(object):
"""Downmix any stereo signals to mono
Inputs:
tensor (Tensor): Tensor of audio of size (Samples x Channels)
Returns:
tensor (Tensor) (Samples x 1):
"""
def __init__(self):
pass
def __call__(self, tensor):
if isinstance(tensor, (torch.LongTensor, torch.IntTensor)):
tensor = tensor.float()
if tensor.size(1) > 1:
tensor = torch.mean(tensor.float(), 1, True)
return tensor
def __repr__(self):
return self.__class__.__name__ + '()'
class LC2CL(object):
"""Permute a 2d tensor from samples (Length) x Channels to Channels x
samples (Length)
"""
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor of audio signal with shape (LxC)
Returns:
tensor (Tensor): Tensor of audio signal with shape (CxL)
"""
return tensor.transpose(0, 1).contiguous()
def __repr__(self):
return self.__class__.__name__ + '()'
class SPECTROGRAM(object):
"""Create a spectrogram from a raw audio signal
Args:
sr (int): sample rate of audio signal
ws (int): window size, often called the fft size as well
hop (int, optional): length of hop between STFT windows. default: ws // 2
n_fft (int, optional): number of fft bins. default: ws // 2 + 1
pad (int): two sided padding of signal
window (torch windowing function): default: torch.hann_window
wkwargs (dict, optional): arguments for window function
"""
def __init__(self, sr=16000, ws=400, hop=None, n_fft=None,
pad=0, window=torch.hann_window, wkwargs=None):
if isinstance(window, Variable):
self.window = window
else:
self.window = window(ws) if wkwargs is None else window(ws, **wkwargs)
self.window = Variable(self.window, volatile=True)
self.sr = sr
self.ws = ws
self.hop = hop if hop is not None else ws // 2
self.n_fft = n_fft # number of fft bins
self.pad = pad
self.wkwargs = wkwargs
def __call__(self, sig):
"""
Args:
sig (Tensor or Variable): Tensor of audio of size (c, n)
Returns:
spec_f (Tensor or Variable): channels x hops x n_fft (c, l, f), where channels
is unchanged, hops is the number of hops, and n_fft is the
number of fourier bins, which should be the window size divided
by 2 plus 1.
"""
sig, is_variable = _check_is_variable(sig)
assert sig.dim() == 2
spec_f = torch.stft(sig, self.ws, self.hop, self.n_fft,
True, True, self.window, self.pad) # (c, l, n_fft, 2)
spec_f /= self.window.pow(2).sum().sqrt()
spec_f = spec_f.pow(2).sum(-1) # get power of "complex" tensor (c, l, n_fft)
return spec_f if is_variable else spec_f.data
class F2M(object):
"""This turns a normal STFT into a MEL Frequency STFT, using a conversion
matrix. This uses triangular filter banks.
Args:
n_mels (int): number of MEL bins
sr (int): sample rate of audio signal
f_max (float, optional): maximum frequency. default: sr // 2
f_min (float): minimum frequency. default: 0
"""
def __init__(self, n_mels=40, sr=16000, f_max=None, f_min=0.):
self.n_mels = n_mels
self.sr = sr
self.f_max = f_max if f_max is not None else sr // 2
self.f_min = f_min
def __call__(self, spec_f):
spec_f, is_variable = _check_is_variable(spec_f)
n_fft = spec_f.size(2)
m_min = 0. if self.f_min == 0 else 2595 * np.log10(1. + (self.f_min / 700))
m_max = 2595 * np.log10(1. + (self.f_max / 700))
m_pts = torch.linspace(m_min, m_max, self.n_mels + 2)
f_pts = (700 * (10**(m_pts / 2595) - 1))
bins = torch.floor(((n_fft - 1) * 2) * f_pts / self.sr).long()
fb = torch.zeros(n_fft, self.n_mels)
for m in range(1, self.n_mels + 1):
f_m_minus = bins[m - 1].item()
f_m = bins[m].item()
f_m_plus = bins[m + 1].item()
if f_m_minus != f_m:
fb[f_m_minus:f_m, m - 1] = (torch.arange(f_m_minus, f_m) - f_m_minus) / (f_m - f_m_minus)
if f_m != f_m_plus:
fb[f_m:f_m_plus, m - 1] = (f_m_plus - torch.arange(f_m, f_m_plus)) / (f_m_plus - f_m)
fb = Variable(fb)
spec_m = torch.matmul(spec_f, fb) # (c, l, n_fft) dot (n_fft, n_mels) -> (c, l, n_mels)
return spec_m if is_variable else spec_m.data
class SPEC2DB(object):
"""Turns a spectrogram from the power/amplitude scale to the decibel scale.
Args:
stype (str): scale of input spectrogram ("power" or "magnitude"). The
power being the elementwise square of the magnitude. default: "power"
top_db (float, optional): minimum negative cut-off in decibels. A reasonable number
is -80.
"""
def __init__(self, stype="power", top_db=None):
self.stype = stype
self.top_db = -top_db if top_db > 0 else top_db
self.multiplier = 10. if stype == "power" else 20.
def __call__(self, spec):
spec, is_variable = _check_is_variable(spec)
spec_db = self.multiplier * _tlog10(spec / spec.max()) # power -> dB
if self.top_db is not None:
spec_db = torch.max(spec_db, spec_db.new([self.top_db]))
return spec_db if is_variable else spec_db.data
class MEL2(object):
"""Create MEL Spectrograms from a raw audio signal using the stft
function in PyTorch. Hopefully this solves the speed issue of using
librosa.
Sources:
* https://gist.github.com/kastnerkyle/179d6e9a88202ab0a2fe
* https://timsainb.github.io/spectrograms-mfccs-and-inversion-in-python.html
* http://haythamfayek.com/2016/04/21/speech-processing-for-machine-learning.html
Args:
sr (int): sample rate of audio signal
ws (int): window size, often called the fft size as well
hop (int, optional): length of hop between STFT windows. default: ws // 2
n_fft (int, optional): number of fft bins. default: ws // 2 + 1
pad (int): two sided padding of signal
n_mels (int): number of MEL bins
window (torch windowing function): default: torch.hann_window
wkwargs (dict, optional): arguments for window function
Example:
>>> sig, sr = torchaudio.load("test.wav", normalization=True)
>>> sig = transforms.LC2CL()(sig) # (n, c) -> (c, n)
>>> spec_mel = transforms.MEL2(sr)(sig) # (c, l, m)
"""
def __init__(self, sr=16000, ws=400, hop=None, n_fft=None,
pad=0, n_mels=40, window=torch.hann_window, wkwargs=None):
self.window = window(ws) if wkwargs is None else window(ws, **wkwargs)
self.window = Variable(self.window, requires_grad=False)
self.sr = sr
self.ws = ws
self.hop = hop if hop is not None else ws // 2
self.n_fft = n_fft # number of fourier bins (ws // 2 + 1 by default)
self.pad = pad
self.n_mels = n_mels # number of mel frequency bins
self.wkwargs = wkwargs
self.top_db = -80.
self.f_max = None
self.f_min = 0.
def __call__(self, sig):
"""
Args:
sig (Tensor): Tensor of audio of size (channels [c], samples [n])
Returns:
spec_mel_db (Tensor): channels x hops x n_mels (c, l, m), where channels
is unchanged, hops is the number of hops, and n_mels is the
number of mel bins.
"""
sig, is_variable = _check_is_variable(sig)
transforms = Compose([
SPECTROGRAM(self.sr, self.ws, self.hop, self.n_fft,
self.pad, self.window),
F2M(self.n_mels, self.sr, self.f_max, self.f_min),
SPEC2DB("power", self.top_db),
])
spec_mel_db = transforms(sig)
return spec_mel_db if is_variable else spec_mel_db.data
class MEL(object):
"""Create MEL Spectrograms from a raw audio signal. Relatively pretty slow.
Usage (see librosa.feature.melspectrogram docs):
MEL(sr=16000, n_fft=1600, hop_length=800, n_mels=64)
"""
def __init__(self, **kwargs):
self.kwargs = kwargs
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor of audio of size (samples [n] x channels [c])
Returns:
tensor (Tensor): n_mels x hops x channels (BxLxC), where n_mels is
the number of mel bins, hops is the number of hops, and channels
is unchanged.
"""
if librosa is None:
print("librosa not installed, cannot create spectrograms")
return tensor
L = []
for i in range(tensor.size(1)):
nparr = tensor[:, i].numpy() # (samples, )
sgram = librosa.feature.melspectrogram(
nparr, **self.kwargs) # (n_mels, hops)
L.append(sgram)
L = np.stack(L, 2) # (n_mels, hops, channels)
tensor = torch.from_numpy(L).type_as(tensor)
return tensor
def __repr__(self):
return self.__class__.__name__ + '()'
class BLC2CBL(object):
"""Permute a 3d tensor from Bands x samples (Length) x Channels to Channels x
Bands x samples (Length)
"""
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor of spectrogram with shape (BxLxC)
Returns:
tensor (Tensor): Tensor of spectrogram with shape (CxBxL)
"""
return tensor.permute(2, 0, 1).contiguous()
def __repr__(self):
return self.__class__.__name__ + '()'
class MuLawEncoding(object):
"""Encode signal based on mu-law companding. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This algorithm assumes the signal has been scaled to between -1 and 1 and
returns a signal encoded with values from 0 to quantization_channels - 1
Args:
quantization_channels (int): Number of channels. default: 256
"""
def __init__(self, quantization_channels=256):
self.qc = quantization_channels
def __call__(self, x):
"""
Args:
x (FloatTensor/LongTensor or ndarray)
Returns:
x_mu (LongTensor or ndarray)
"""
mu = self.qc - 1.
if isinstance(x, np.ndarray):
x_mu = np.sign(x) * np.log1p(mu * np.abs(x)) / np.log1p(mu)
x_mu = ((x_mu + 1) / 2 * mu + 0.5).astype(int)
elif isinstance(x, (torch.Tensor, torch.LongTensor)):
if isinstance(x, torch.LongTensor):
x = x.float()
mu = torch.FloatTensor([mu])
x_mu = torch.sign(x) * torch.log1p(mu *
torch.abs(x)) / torch.log1p(mu)
x_mu = ((x_mu + 1) / 2 * mu + 0.5).long()
return x_mu
def __repr__(self):
return self.__class__.__name__ + '()'
class MuLawExpanding(object):
"""Decode mu-law encoded signal. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This expects an input with values between 0 and quantization_channels - 1
and returns a signal scaled between -1 and 1.
Args:
quantization_channels (int): Number of channels. default: 256
"""
def __init__(self, quantization_channels=256):
self.qc = quantization_channels
def __call__(self, x_mu):
"""
Args:
x_mu (FloatTensor/LongTensor or ndarray)
Returns:
x (FloatTensor or ndarray)
"""
mu = self.qc - 1.
if isinstance(x_mu, np.ndarray):
x = ((x_mu) / mu) * 2 - 1.
x = np.sign(x) * (np.exp(np.abs(x) * np.log1p(mu)) - 1.) / mu
elif isinstance(x_mu, (torch.Tensor, torch.LongTensor)):
if isinstance(x_mu, torch.LongTensor):
x_mu = x_mu.float()
mu = torch.FloatTensor([mu])
x = ((x_mu) / mu) * 2 - 1.
x = torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.) / mu
return x
def __repr__(self):
return self.__class__.__name__ + '()'
|
import os.path
import torch
import _torch_sox
from torchaudio import transforms
from torchaudio import datasets
def get_tensor_type_name(tensor):
return tensor.type().replace('torch.', '').replace('Tensor', '')
def check_input(src):
if not torch.is_tensor(src):
raise TypeError('Expected a tensor, got %s' % type(src))
if src.is_cuda:
raise TypeError('Expected a CPU based tensor, got %s' % type(src))
def load(filepath, out=None, normalization=None):
"""Loads an audio file from disk into a Tensor
Args:
filepath (string): path to audio file
out (Tensor, optional): an output Tensor to use instead of creating one
normalization (bool or number, optional): If boolean `True`, then output is divided by `1 << 31`
(assumes 16-bit depth audio, and normalizes to `[0, 1]`.
If `number`, then output is divided by that number
Returns: tuple(Tensor, int)
- Tensor: output Tensor of size `[L x C]` where L is the number of audio frames, C is the number of channels
- int: the sample-rate of the audio (as listed in the metadata of the file)
Example::
>>> data, sample_rate = torchaudio.load('foo.mp3')
>>> print(data.size())
torch.Size([278756, 2])
>>> print(sample_rate)
44100
"""
# check if valid file
if not os.path.isfile(filepath):
raise OSError("{} not found or is a directory".format(filepath))
# initialize output tensor
if out is not None:
check_input(out)
else:
out = torch.FloatTensor()
sample_rate = _torch_sox.read_audio_file(filepath, out)
# normalize if needed
if isinstance(normalization, bool) and normalization:
out /= 1 << 31 # assuming 16-bit depth
elif isinstance(normalization, (float, int)):
out /= normalization # normalize with custom value
return out, sample_rate
def save(filepath, src, sample_rate):
"""Saves a Tensor with audio signal to disk as a standard format like mp3, wav, etc.
Args:
filepath (string): path to audio file
src (Tensor): an input 2D Tensor of shape `[L x C]` where L is
the number of audio frames, C is the number of channels
sample_rate (int): the sample-rate of the audio to be saved
Example::
>>> data, sample_rate = torchaudio.load('foo.mp3')
>>> torchaudio.save('foo.wav', data, sample_rate)
"""
# check if save directory exists
abs_dirpath = os.path.dirname(os.path.abspath(filepath))
if not os.path.isdir(abs_dirpath):
raise OSError("Directory does not exist: {}".format(abs_dirpath))
# Check/Fix shape of source data
if len(src.size()) == 1:
# 1d tensors as assumed to be mono signals
src.unsqueeze_(1)
elif len(src.size()) > 2 or src.size(1) > 2:
raise ValueError(
"Expected format (L x N), N = 1 or 2, but found {}".format(src.size()))
# check if sample_rate is an integer
if not isinstance(sample_rate, int):
if int(sample_rate) == sample_rate:
sample_rate = int(sample_rate)
else:
raise TypeError('Sample rate should be a integer')
# programs such as librosa normalize the signal, unnormalize if detected
if src.min() >= -1.0 and src.max() <= 1.0:
src = src * (1 << 31) # assuming 16-bit depth
src = src.long()
# save data to file
extension = os.path.splitext(filepath)[1]
check_input(src)
_torch_sox.write_audio_file(filepath, src, extension[1:], sample_rate)
|
from .yesno import YESNO
from .vctk import VCTK
__all__ = ('YESNO', 'VCTK')
|
from __future__ import print_function
import torch.utils.data as data
import os
import os.path
import shutil
import errno
import torch
import torchaudio
class YESNO(data.Dataset):
"""`YesNo Hebrew <http://www.openslr.org/1/>`_ Dataset.
Args:
root (string): Root directory of dataset where ``processed/training.pt``
and ``processed/test.pt`` exist.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.Scale``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
dev_mode(bool, optional): if true, clean up is not performed on downloaded
files. Useful to keep raw audio and transcriptions.
"""
raw_folder = 'yesno/raw'
processed_folder = 'yesno/processed'
url = 'http://www.openslr.org/resources/1/waves_yesno.tar.gz'
dset_path = 'waves_yesno'
processed_file = 'yesno.pt'
def __init__(self, root, transform=None, target_transform=None, download=False, dev_mode=False):
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.dev_mode = dev_mode
self.data = []
self.labels = []
self.num_samples = 0
self.max_len = 0
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found.' +
' You can use download=True to download it')
self.data, self.labels = torch.load(os.path.join(
self.root, self.processed_folder, self.processed_file))
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
audio, target = self.data[index], self.labels[index]
if self.transform is not None:
audio = self.transform(audio)
if self.target_transform is not None:
target = self.target_transform(target)
return audio, target
def __len__(self):
return len(self.data)
def _check_exists(self):
return os.path.exists(os.path.join(self.root, self.processed_folder, self.processed_file))
def download(self):
"""Download the yesno data if it doesn't exist in processed_folder already."""
from six.moves import urllib
import tarfile
if self._check_exists():
return
raw_abs_dir = os.path.join(self.root, self.raw_folder)
processed_abs_dir = os.path.join(self.root, self.processed_folder)
dset_abs_path = os.path.join(
self.root, self.raw_folder, self.dset_path)
# download files
try:
os.makedirs(os.path.join(self.root, self.raw_folder))
os.makedirs(os.path.join(self.root, self.processed_folder))
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
url = self.url
print('Downloading ' + url)
filename = url.rpartition('/')[2]
file_path = os.path.join(self.root, self.raw_folder, filename)
if not os.path.isfile(file_path):
urllib.request.urlretrieve(url, file_path)
else:
print("Tar file already downloaded")
if not os.path.exists(dset_abs_path):
with tarfile.open(file_path) as zip_f:
zip_f.extractall(raw_abs_dir)
else:
print("Tar file already extracted")
if not self.dev_mode:
os.unlink(file_path)
# process and save as torch files
print('Processing...')
shutil.copyfile(
os.path.join(dset_abs_path, "README"),
os.path.join(processed_abs_dir, "YESNO_README")
)
audios = [x for x in os.listdir(dset_abs_path) if ".wav" in x]
print("Found {} audio files".format(len(audios)))
tensors = []
labels = []
lengths = []
for i, f in enumerate(audios):
full_path = os.path.join(dset_abs_path, f)
sig, sr = torchaudio.load(full_path)
tensors.append(sig)
lengths.append(sig.size(0))
labels.append(os.path.basename(f).split(".", 1)[0].split("_"))
# sort sigs/labels: longest -> shortest
tensors, labels = zip(*[(b, c) for (a, b, c) in sorted(
zip(lengths, tensors, labels), key=lambda x: x[0], reverse=True)])
self.max_len = tensors[0].size(0)
torch.save(
(tensors, labels),
os.path.join(
self.root,
self.processed_folder,
self.processed_file
)
)
if not self.dev_mode:
shutil.rmtree(raw_abs_dir, ignore_errors=True)
print('Done!')
|
from __future__ import print_function
import torch.utils.data as data
import os
import os.path
import shutil
import errno
import torch
import torchaudio
AUDIO_EXTENSIONS = [
'.wav', '.mp3', '.flac', '.sph', '.ogg', '.opus',
'.WAV', '.MP3', '.FLAC', '.SPH', '.OGG', '.OPUS',
]
def is_audio_file(filename):
return any(filename.endswith(extension) for extension in AUDIO_EXTENSIONS)
def make_manifest(dir):
audios = []
dir = os.path.expanduser(dir)
for target in sorted(os.listdir(dir)):
d = os.path.join(dir, target)
if not os.path.isdir(d):
continue
for root, _, fnames in sorted(os.walk(d)):
for fname in fnames:
if is_audio_file(fname):
path = os.path.join(root, fname)
item = path
audios.append(item)
return audios
def read_audio(fp, downsample=True):
sig, sr = torchaudio.load(fp)
if downsample:
# 48khz -> 16 khz
if sig.size(0) % 3 == 0:
sig = sig[::3].contiguous()
else:
sig = sig[:-(sig.size(0) % 3):3].contiguous()
return sig, sr
def load_txts(dir):
"""Create a dictionary with all the text of the audio transcriptions."""
utterences = dict()
txts = []
dir = os.path.expanduser(dir)
for target in sorted(os.listdir(dir)):
d = os.path.join(dir, target)
if not os.path.isdir(d):
continue
for root, _, fnames in sorted(os.walk(d)):
for fname in fnames:
if fname.endswith(".txt"):
with open(os.path.join(root, fname), "r") as f:
fname_no_ext = os.path.basename(
fname).rsplit(".", 1)[0]
utterences[fname_no_ext] = f.readline()
return utterences
class VCTK(data.Dataset):
"""`VCTK <http://homepages.inf.ed.ac.uk/jyamagis/page3/page58/page58.html>`_ Dataset.
`alternate url <http://datashare.is.ed.ac.uk/handle/10283/2651>`
Args:
root (string): Root directory of dataset where ``processed/training.pt``
and ``processed/test.pt`` exist.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.Scale``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
dev_mode(bool, optional): if true, clean up is not performed on downloaded
files. Useful to keep raw audio and transcriptions.
"""
raw_folder = 'vctk/raw'
processed_folder = 'vctk/processed'
url = 'http://homepages.inf.ed.ac.uk/jyamagis/release/VCTK-Corpus.tar.gz'
dset_path = 'VCTK-Corpus'
def __init__(self, root, downsample=True, transform=None, target_transform=None, download=False, dev_mode=False):
self.root = os.path.expanduser(root)
self.downsample = downsample
self.transform = transform
self.target_transform = target_transform
self.dev_mode = dev_mode
self.data = []
self.labels = []
self.chunk_size = 1000
self.num_samples = 0
self.max_len = 0
self.cached_pt = 0
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found.' +
' You can use download=True to download it')
self._read_info()
self.data, self.labels = torch.load(os.path.join(
self.root, self.processed_folder, "vctk_{:04d}.pt".format(self.cached_pt)))
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
if self.cached_pt != index // self.chunk_size:
self.cached_pt = int(index // self.chunk_size)
self.data, self.labels = torch.load(os.path.join(
self.root, self.processed_folder, "vctk_{:04d}.pt".format(self.cached_pt)))
index = index % self.chunk_size
audio, target = self.data[index], self.labels[index]
if self.transform is not None:
audio = self.transform(audio)
if self.target_transform is not None:
target = self.target_transform(target)
return audio, target
def __len__(self):
return self.num_samples
def _check_exists(self):
return os.path.exists(os.path.join(self.root, self.processed_folder, "vctk_info.txt"))
def _write_info(self, num_items):
info_path = os.path.join(
self.root, self.processed_folder, "vctk_info.txt")
with open(info_path, "w") as f:
f.write("num_samples,{}\n".format(num_items))
f.write("max_len,{}\n".format(self.max_len))
def _read_info(self):
info_path = os.path.join(
self.root, self.processed_folder, "vctk_info.txt")
with open(info_path, "r") as f:
self.num_samples = int(f.readline().split(",")[1])
self.max_len = int(f.readline().split(",")[1])
def download(self):
"""Download the VCTK data if it doesn't exist in processed_folder already."""
from six.moves import urllib
import tarfile
if self._check_exists():
return
raw_abs_dir = os.path.join(self.root, self.raw_folder)
processed_abs_dir = os.path.join(self.root, self.processed_folder)
dset_abs_path = os.path.join(
self.root, self.raw_folder, self.dset_path)
# download files
try:
os.makedirs(os.path.join(self.root, self.raw_folder))
os.makedirs(os.path.join(self.root, self.processed_folder))
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
url = self.url
print('Downloading ' + url)
filename = url.rpartition('/')[2]
file_path = os.path.join(self.root, self.raw_folder, filename)
if not os.path.isfile(file_path):
urllib.request.urlretrieve(url, file_path)
if not os.path.exists(dset_abs_path):
with tarfile.open(file_path) as zip_f:
zip_f.extractall(raw_abs_dir)
else:
print("Using existing raw folder")
if not self.dev_mode:
os.unlink(file_path)
# process and save as torch files
print('Processing...')
shutil.copyfile(
os.path.join(dset_abs_path, "COPYING"),
os.path.join(processed_abs_dir, "VCTK_COPYING")
)
audios = make_manifest(dset_abs_path)
utterences = load_txts(dset_abs_path)
self.max_len = 0
print("Found {} audio files and {} utterences".format(
len(audios), len(utterences)))
for n in range(len(audios) // self.chunk_size + 1):
tensors = []
labels = []
lengths = []
st_idx = n * self.chunk_size
end_idx = st_idx + self.chunk_size
for i, f in enumerate(audios[st_idx:end_idx]):
txt_dir = os.path.dirname(f).replace("wav48", "txt")
if os.path.exists(txt_dir):
f_rel_no_ext = os.path.basename(f).rsplit(".", 1)[0]
sig = read_audio(f, downsample=self.downsample)[0]
tensors.append(sig)
lengths.append(sig.size(0))
labels.append(utterences[f_rel_no_ext])
self.max_len = sig.size(0) if sig.size(
0) > self.max_len else self.max_len
# sort sigs/labels: longest -> shortest
tensors, labels = zip(*[(b, c) for (a, b, c) in sorted(
zip(lengths, tensors, labels), key=lambda x: x[0], reverse=True)])
data = (tensors, labels)
torch.save(
data,
os.path.join(
self.root,
self.processed_folder,
"vctk_{:04d}.pt".format(n)
)
)
self._write_info((n * self.chunk_size) + i + 1)
if not self.dev_mode:
shutil.rmtree(raw_abs_dir, ignore_errors=True)
print('Done!')
|
#! /usr/bin/env python
# MEGA TEST
#
# usage: mega-test.py <config>
#
# This runs several tests in parallel and shows progress bars for each, based on a config file.
#
# <config> is a file containing a list of commands to run along with the expected number of lines
# they will output (to stdout and stderr combined), which is how the progress bar is calculated.
# The format of the file is simply one test per line, with the line containing the test name,
# the number of output lines expected, and the test command. Example:
#
# mytest 1523 ./my-test --foo bar
# another 862 ./another-test --baz
#
# Each command is interpreted by `sh -euc`, therefore it is acceptable to use environment
# variables and other shell syntax.
#
# After all tests complete, the config file will be rewritten to update the line counts to the
# actual number of lines seen for all passing tests (failing tests are not updated).
import sys
import re
import os
from errno import EAGAIN
from fcntl import fcntl, F_GETFL, F_SETFL
from select import poll, POLLIN, POLLHUP
from subprocess import Popen, PIPE, STDOUT
CONFIG_LINE = re.compile("^([^ ]+) +([0-9]+) +(.*)$")
if len(sys.argv) != 2:
sys.stderr.write("Wrong number of arguments.\n");
sys.exit(1)
if not os.access("/tmp/test-output", os.F_OK):
os.mkdir("/tmp/test-output")
config = open(sys.argv[1], 'r')
tests = []
class Test:
def __init__(self, name, command, lines):
self.name = name
self.command = command
self.lines = lines
self.count = 0
self.done = False
def start(self, poller):
self.proc = Popen(["sh", "-euc", test.command], stdin=dev_null, stdout=PIPE, stderr=STDOUT)
fd = self.proc.stdout.fileno()
flags = fcntl(fd, F_GETFL)
fcntl(fd, F_SETFL, flags | os.O_NONBLOCK)
poller.register(self.proc.stdout, POLLIN)
self.log = open("/tmp/test-output/" + self.name + ".log", "w")
def update(self):
try:
while True:
text = self.proc.stdout.read()
if text == "":
self.proc.wait()
self.done = True
self.log.close()
return True
self.count += text.count("\n")
self.log.write(text)
except IOError as e:
if e.errno == EAGAIN:
return False
raise
def print_bar(self):
percent = self.count * 100 / self.lines
status = "(%3d%%)" % percent
color_on = ""
color_off = ""
if self.done:
if self.proc.returncode == 0:
color_on = "\033[0;32m"
status = "PASS"
else:
color_on = "\033[0;31m"
status = "FAIL: /tmp/test-output/%s.log" % self.name
color_off = "\033[0m"
print "%s%-16s |%-25s| %6d/%6d %s%s " % (
color_on, self.name, '=' * min(percent / 4, 25), self.count, self.lines, status, color_off)
def passed(self):
return self.proc.returncode == 0
for line in config:
if len(line) > 0 and not line.startswith("#"):
match = CONFIG_LINE.match(line)
if not match:
sys.stderr.write("Invalid config syntax: %s\n" % line);
sys.exit(1)
test = Test(match.group(1), match.group(3), int(match.group(2)))
tests.append(test)
config.close()
dev_null = open("/dev/null", "rw")
poller = poll()
fd_map = {}
for test in tests:
test.start(poller)
fd_map[test.proc.stdout.fileno()] = test
active_count = len(tests)
def print_bars():
for test in tests:
test.print_bar()
print_bars()
while active_count > 0:
for (fd, event) in poller.poll():
if fd_map[fd].update():
active_count -= 1
poller.unregister(fd)
sys.stdout.write("\033[%dA\r" % len(tests))
print_bars()
new_config = open(sys.argv[1], "w")
for test in tests:
if test.passed():
new_config.write("%-16s %6d %s\n" % (test.name, test.count, test.command))
else:
new_config.write("%-16s %6d %s\n" % (test.name, test.lines, test.command))
for test in tests:
if not test.passed():
sys.exit(1)
sys.exit(0)
|
#! /usr/bin/env python
from pygments.lexer import RegexLexer
from pygments.token import *
class CapnpLexer(RegexLexer):
name = "Cap'n Proto lexer"
aliases = ['capnp']
filenames = ['*.capnp']
tokens = {
'root': [
(r'#.*?$', Comment.Single),
(r'@[0-9a-zA-Z]*', Name.Decorator),
(r'=', Literal, 'expression'),
(r':', Name.Class, 'type'),
(r'\$', Name.Attribute, 'annotation'),
(r'(struct|enum|interface|union|import|using|const|annotation|extends|in|of|on|as|with|from|fixed|bulk|realtime)\b',
Token.Keyword),
(r'[a-zA-Z0-9_.]+', Token.Name),
(r'[^#@=:$a-zA-Z0-9_]+', Text),
],
'type': [
(r'[^][=;,(){}$]+', Name.Class),
(r'[[(]', Name.Class, 'parentype'),
(r'', Name.Class, '#pop')
],
'parentype': [
(r'[^][;()]+', Name.Class),
(r'[[(]', Name.Class, '#push'),
(r'[])]', Name.Class, '#pop'),
(r'', Name.Class, '#pop')
],
'expression': [
(r'[^][;,(){}$]+', Literal),
(r'[[(]', Literal, 'parenexp'),
(r'', Literal, '#pop')
],
'parenexp': [
(r'[^][;()]+', Literal),
(r'[[(]', Literal, '#push'),
(r'[])]', Literal, '#pop'),
(r'', Literal, '#pop')
],
'annotation': [
(r'[^][;,(){}=:]+', Name.Attribute),
(r'[[(]', Name.Attribute, 'annexp'),
(r'', Name.Attribute, '#pop')
],
'annexp': [
(r'[^][;()]+', Name.Attribute),
(r'[[(]', Name.Attribute, '#push'),
(r'[])]', Name.Attribute, '#pop'),
(r'', Name.Attribute, '#pop')
],
}
if __name__ == "__main__":
from setuptools import setup, find_packages
setup(name = "CapnpPygmentsLexer",
version = "0.1",
packages = find_packages(),
py_modules = [ 'capnp_lexer' ],
entry_points = {'pygments.lexers': 'capnp = capnp_lexer:CapnpLexer'})
|
# Frequent English words, frequencies per billion words
# obtained from http://en.wiktionary.org/
frequencies = {
"the" : 56271872,
"of" : 33950064,
"and" : 29944184,
"to" : 25956096,
"in" : 17420636,
"i" : 11764797,
"that" : 11073318,
"was" : 10078245,
"his" : 8799755,
"he" : 8397205,
"it" : 8058110,
"with" : 7725512,
"is" : 7557477,
"for" : 7097981,
"as" : 7037543,
"had" : 6139336,
"you" : 6048903,
"not" : 5741803,
"be" : 5662527,
"her" : 5202501,
"on" : 5113263,
"at" : 5091841,
"by" : 5061050,
"which" : 4580906,
"have" : 4346500,
"or" : 4228287,
"from" : 4108111,
"this" : 4015425,
"him" : 3971997,
"but" : 3894211,
"all" : 3703342,
"she" : 3415846,
"they" : 3340398,
"were" : 3323884,
"my" : 3277699,
"are" : 3224178,
"me" : 3027134,
"one" : 2832569,
"their" : 2820265,
"so" : 2802481,
"an" : 2641417,
"said" : 2637136,
"them" : 2509917,
"we" : 2491655,
"who" : 2472663,
"would" : 2400858,
"been" : 2357654,
"will" : 2320022,
"no" : 2241145,
"when" : 1980046,
"there" : 1961200,
"if" : 1951102,
"more" : 1899787,
"out" : 1875351,
"up" : 1792712,
"into" : 1703963,
"do" : 1680164,
"any" : 1665366,
"your" : 1658553,
"what" : 1605908,
"has" : 1602329,
"man" : 1573117,
"could" : 1571110,
"other" : 1533530,
"than" : 1508779,
"our" : 1498473,
"some" : 1476767,
"very" : 1462382,
"time" : 1449681,
"upon" : 1424595,
"about" : 1414687,
"may" : 1400642,
"its" : 1373270,
"only" : 1318367,
"now" : 1317723,
"like" : 1280625,
"little" : 1273589,
"then" : 1255636,
"can" : 1210074,
"should" : 1192154,
"made" : 1188501,
"did" : 1185720,
"us" : 1171742,
"such" : 1136757,
"a" : 1135294,
"great" : 1120163,
"before" : 1117089,
"must" : 1108116,
"two" : 1093366,
"these" : 1090510,
"see" : 1084286,
"know" : 1075612,
"over" : 1056659,
"much" : 1021822,
"down" : 989808,
"after" : 978575,
"first" : 978196,
"mr|mr" : 974419,
"good" : 966602,
"men" : 923053,
"own" : 922130,
"never" : 899673,
"most" : 889691,
"old" : 887917,
"shall" : 883846,
"day" : 882331,
"where" : 881975,
"those" : 878621,
"came" : 873144,
"come" : 873007,
"himself" : 863478,
"way" : 860027,
"work" : 829823,
"life" : 825485,
"without" : 819684,
"go" : 816536,
"make" : 807600,
"well" : 799596,
"through" : 792925,
"being" : 792220,
"long" : 791686,
"say" : 788124,
"might" : 787455,
"how" : 770603,
"am" : 761957,
"too" : 758856,
"even" : 750750,
"def" : 748992,
"again" : 745230,
"many" : 744168,
"back" : 740270,
"here" : 729829,
"think" : 715780,
"every" : 704444,
"people" : 701741,
"went" : 690186,
"same" : 689376,
"last" : 680833,
"thought" : 674623,
"away" : 673810,
"under" : 671168,
"take" : 656486,
"found" : 654512,
"hand" : 648227,
"eyes" : 647788,
"still" : 640067,
"place" : 621773,
"while" : 613918,
"just" : 610168,
"also" : 608042,
"young" : 591821,
"yet" : 588615,
"though" : 570877,
"against" : 569459,
"things" : 567559,
"get" : 564674,
"ever" : 559207,
"give" : 554003,
"god" : 552668,
"years" : 547420,
"off" : 545832,
"face" : 544251,
"nothing" : 541692,
"right" : 536737,
"once" : 534154,
"another" : 533985,
"left" : 531797,
"part" : 526137,
"saw" : 520922,
"house" : 517564,
"world" : 517557,
"head" : 512481,
"three" : 502146,
"took" : 501669,
"new" : 498040,
"love" : 496496,
"always" : 495834,
"mrs" : 495443,
"put" : 495189,
"night" : 484878,
"each" : 484599,
"king" : 479849,
"between" : 479034,
"tell" : 475277,
"mind" : 470313,
"heart" : 467157,
"few" : 466338,
"because" : 465587,
"thing" : 461472,
"whom" : 458312,
"far" : 456267,
"seemed" : 447884,
"looked" : 447491,
"called" : 445602,
"whole" : 435059,
"de" : 433393,
"set" : 432637,
"both" : 432491,
"got" : 432016,
"find" : 431120,
"done" : 430389,
"heard" : 429972,
"look" : 428871,
"name" : 427021,
"days" : 426104,
"told" : 424696,
"let" : 424320,
"lord" : 422407,
"country" : 420788,
"asked" : 420044,
"going" : 419315,
"seen" : 418862,
"better" : 416463,
"p" : 415673,
"having" : 415355,
"home" : 413499,
"knew" : 413101,
"side" : 405810,
"something" : 398727,
"moment" : 390988,
"father" : 387790,
"among" : 387549,
"course" : 385303,
"hands" : 385081,
"woman" : 384156,
"enough" : 382266,
"words" : 380328,
"mother" : 373898,
"soon" : 373813,
"full" : 371831,
"end" : 369761,
"gave" : 369036,
"room" : 366719,
"almost" : 366630,
"small" : 359970,
"thou" : 355857,
"cannot" : 355656,
"water" : 355467,
"want" : 354212,
"however" : 352828,
"light" : 351253,
"quite" : 350537,
"brought" : 349925,
"nor" : 349691,
"word" : 349685,
"whose" : 344377,
"given" : 344141,
"door" : 342388,
"best" : 337544,
"turned" : 337367,
"taken" : 335210,
"does" : 334332,
"use" : 333883,
"morning" : 330567,
"myself" : 328630,
"gutenberg" : 328324,
"felt" : 326524,
"until" : 326391,
"since" : 326386,
"power" : 326243,
"themselves" : 325793,
"used" : 325791,
"rather" : 325719,
"began" : 325327,
"present" : 324509,
"voice" : 322870,
"others" : 322643,
"white" : 322465,
"works" : 318937,
"less" : 316490,
"money" : 315642,
"next" : 313167,
"poor" : 311818,
"death" : 309653,
"stood" : 308025,
"form" : 307506,
"within" : 307223,
"together" : 304955,
"till" : 304735,
"thy" : 304489,
"large" : 304240,
"matter" : 301283,
"kind" : 298191,
"often" : 296798,
"certain" : 296795,
"herself" : 295916,
"year" : 295745,
"friend" : 295078,
"half" : 293866,
"order" : 293593,
"round" : 291647,
"true" : 291427,
"anything" : 289997,
"keep" : 289304,
"sent" : 287876,
"wife" : 286847,
"means" : 284431,
"believe" : 281965,
"passed" : 279864,
"feet" : 279821,
"near" : 278870,
"public" : 278365,
"state" : 277682,
"son" : 277227,
"hundred" : 275990,
"children" : 275607,
"thus" : 275221,
"hope" : 273746,
"alone" : 272173,
"above" : 271641,
"case" : 271588,
"dear" : 270503,
"thee" : 269414,
"says" : 268542,
"person" : 267878,
"high" : 266672,
"read" : 265947,
"city" : 265138,
"already" : 264662,
"received" : 264606,
"fact" : 263613,
"gone" : 263585,
"girl" : 262689,
"known" : 262571,
"hear" : 260746,
"times" : 260596,
"least" : 259916,
"perhaps" : 257964,
"sure" : 255885,
"indeed" : 255789,
"english" : 255212,
"open" : 254373,
"body" : 252812,
"itself" : 251252,
"along" : 251163,
"land" : 249677,
"return" : 249533,
"leave" : 249063,
"air" : 247480,
"nature" : 246792,
"answered" : 246251,
"either" : 244426,
"law" : 244138,
"help" : 243712,
"lay" : 242753,
"point" : 242269,
"child" : 242201,
"letter" : 242178,
"four" : 242099,
"wish" : 241091,
"fire" : 240652,
"cried" : 240280,
"2" : 240009,
"women" : 239735,
"speak" : 239025,
"number" : 238734,
"therefore" : 238281,
"hour" : 237964,
"friends" : 237481,
"held" : 235474,
"free" : 235012,
"war" : 234544,
"during" : 233771,
"several" : 233197,
"business" : 233158,
"whether" : 230819,
"er" : 230485,
"manner" : 230401,
"second" : 230300,
"reason" : 229940,
"replied" : 229913,
"united" : 226953,
"call" : 226661,
"general" : 226391,
"why" : 226216,
"behind" : 226205,
"became" : 224811,
"john" : 224569,
"become" : 224326,
"dead" : 224049,
"earth" : 222546,
"boy" : 222315,
"lost" : 222264,
"forth" : 220598,
"thousand" : 218623,
"looking" : 218510,
"i'll" : 218372,
"family" : 218118,
"soul" : 217840,
"feel" : 216356,
"coming" : 215147,
"england" : 214339,
"spirit" : 213257,
"question" : 213124,
"care" : 213072,
"truth" : 212548,
"ground" : 212369,
"really" : 211722,
"rest" : 211668,
"mean" : 211299,
"different" : 211043,
"making" : 210031,
"possible" : 209099,
"fell" : 208344,
"towards" : 208199,
"human" : 206740,
"kept" : 206329,
"short" : 206216,
"town" : 205687,
"following" : 205653,
"need" : 204955,
"cause" : 204686,
"met" : 203956,
"evening" : 203331,
"returned" : 202041,
"five" : 201451,
"strong" : 200224,
"able" : 200145,
"french" : 199969,
"live" : 199658,
"lady" : 199560,
"subject" : 198566,
"sn" : 198498,
"answer" : 198187,
"sea" : 198128,
"fear" : 196739,
"understand" : 196729,
"hard" : 196458,
"terms" : 196252,
"doubt" : 195905,
"around" : 195594,
"ask" : 194903,
"arms" : 194298,
"turn" : 192763,
"sense" : 192719,
"seems" : 192229,
"black" : 191272,
"bring" : 191148,
"followed" : 190649,
"beautiful" : 190563,
"close" : 188915,
"dark" : 188316,
"hold" : 186609,
"character" : 186256,
"sort" : 186136,
"sight" : 185862,
"ten" : 184612,
"show" : 184074,
"party" : 184068,
"fine" : 183059,
"ye" : 182978,
"ready" : 181866,
"story" : 180998,
"common" : 180061,
"book" : 179739,
"electronic" : 179347,
"talk" : 178877,
"account" : 178452,
"mark" : 178084,
"interest" : 178001,
"written" : 177232,
"can't" : 176728,
"bed" : 176635,
"necessary" : 176467,
"age" : 176320,
"else" : 175980,
"force" : 175520,
"idea" : 174236,
"longer" : 173897,
"art" : 173544,
"spoke" : 172990,
"across" : 172901,
"brother" : 172692,
"early" : 172467,
"ought" : 171690,
"sometimes" : 171309,
"line" : 170962,
"saying" : 170695,
"table" : 170143,
"appeared" : 169913,
"river" : 169470,
"continued" : 169086,
"eye" : 168723,
"ety" : 168713,
"sun" : 168545,
"information" : 168408,
"later" : 167805,
"everything" : 166395,
"reached" : 165752,
"suddenly" : 164850,
"past" : 164703,
"hours" : 164326,
"strange" : 164147,
"deep" : 163819,
"change" : 163514,
"miles" : 163341,
"feeling" : 163269,
"act" : 162869,
"meet" : 162687,
"paid" : 162605,
"further" : 162327,
"purpose" : 162154,
"happy" : 162105,
"added" : 161953,
"seem" : 161549,
"taking" : 160626,
"blood" : 160547,
"rose" : 159794,
"south" : 158664,
"beyond" : 158344,
"cold" : 158204,
"neither" : 158200,
"forward" : 157578,
"view" : 157416,
"i've" : 157210,
"position" : 156851,
"sound" : 156616,
"none" : 155743,
"entered" : 155480,
"clear" : 155472,
"road" : 154977,
"late" : 154840,
"stand" : 154582,
"suppose" : 154536,
"la" : 154457,
"daughter" : 154261,
"real" : 154046,
"nearly" : 154001,
"mine" : 153940,
"laws" : 153830,
"knowledge" : 153829,
"comes" : 153299,
"toward" : 152972,
"bad" : 152889,
"cut" : 152625,
"copy" : 151661,
"husband" : 151651,
"six" : 151612,
"france" : 151108,
"living" : 151043,
"peace" : 150281,
"didn't" : 149696,
"low" : 149690,
"north" : 149601,
"remember" : 149323,
"effect" : 148795,
"natural" : 148744,
"pretty" : 148124,
"fall" : 147435,
"fair" : 147401,
"service" : 146483,
"below" : 146062,
"except" : 145998,
"american" : 145980,
"hair" : 145817,
"london" : 145606,
"laid" : 145490,
"pass" : 145440,
"led" : 145393,
"copyright" : 145244,
"doing" : 145131,
"army" : 144925,
"run" : 144688,
"horse" : 144022,
"future" : 143658,
"opened" : 143625,
"pleasure" : 142952,
"history" : 141958,
"west" : 141745,
"pay" : 141597,
"red" : 141588,
"an'" : 141517,
"4" : 141402,
"hath" : 141246,
"note" : 140679,
"although" : 140667,
"wanted" : 140608,
"gold" : 139711,
"makes" : 139167,
"desire" : 138288,
"play" : 138228,
"master" : 137871,
"office" : 136616,
"tried" : 136507,
"front" : 136296,
"big" : 136265,
"dr" : 135902,
"lived" : 135512,
"certainly" : 135386,
"wind" : 134689,
"receive" : 134351,
"attention" : 134257,
"government" : 134075,
"unto" : 134048,
"church" : 133975,
"strength" : 133771,
"length" : 133663,
"company" : 133159,
"placed" : 133084,
"paper" : 133030,
"letters" : 132785,
"probably" : 132560,
"glad" : 132368,
"important" : 132288,
"especially" : 132096,
"greater" : 132045,
"yourself" : 131617,
"fellow" : 131509,
"bear" : 131397,
"opinion" : 130867,
"window" : 130590,
"ran" : 130394,
"faith" : 130376,
"ago" : 130255,
"agreement" : 130079,
"charge" : 129644,
"beauty" : 129586,
"lips" : 129473,
"remained" : 129411,
"arm" : 129325,
"latter" : 129276,
"duty" : 129116,
"send" : 129075,
"distance" : 129046,
"silence" : 128067,
"foot" : 128053,
"wild" : 127758,
"object" : 127468,
"die" : 127167,
"save" : 126799,
"gentleman" : 126761,
"trees" : 126469,
"green" : 126431,
"trouble" : 125885,
"smile" : 125830,
"books" : 125827,
"wrong" : 125401,
"various" : 125006,
"sleep" : 124634,
"persons" : 123820,
"blockquote" : 123703,
"happened" : 123421,
"particular" : 123182,
"drew" : 122264,
"minutes" : 122201,
"hardly" : 121603,
"walked" : 121276,
"chief" : 121207,
"chance" : 120739,
"according" : 120733,
"beginning" : 120733,
"action" : 120590,
"deal" : 120186,
"loved" : 120145,
"visit" : 119807,
"thinking" : 119753,
"follow" : 119666,
"standing" : 119506,
"knows" : 119114,
"try" : 118860,
"presence" : 118852,
"heavy" : 118834,
"sweet" : 118754,
"plain" : 118641,
"donations" : 118443,
"immediately" : 118250,
"wrote" : 118114,
"mouth" : 117921,
"rich" : 117386,
"thoughts" : 117251,
"months" : 116955,
"u" : 116586,
"won't" : 116568,
"afraid" : 116467,
"paris" : 116402,
"single" : 115905,
"joy" : 115788,
"enemy" : 115195,
"broken" : 115006,
"unless" : 114054,
"states" : 113807,
"ship" : 113611,
"condition" : 113578,
"carry" : 113357,
"exclaimed" : 113352,
"including" : 113104,
"filled" : 112921,
"seeing" : 112889,
"influence" : 112447,
"write" : 112285,
"boys" : 112125,
"appear" : 112044,
"outside" : 111849,
"secret" : 111678,
"parts" : 111194,
"please" : 111114,
"appearance" : 110932,
"evil" : 110898,
"march" : 110834,
"george" : 110754,
"whatever" : 110549,
"slowly" : 110500,
"tears" : 110314,
"horses" : 110296,
"places" : 110250,
"caught" : 110061,
"stay" : 109894,
"instead" : 109837,
"struck" : 109662,
"blue" : 109449,
"york" : 109354,
"impossible" : 109330,
"period" : 109160,
"sister" : 108983,
"battle" : 108781,
"school" : 108701,
"mary" : 108633,
"raised" : 108580,
"occasion" : 108544,
"married" : 108419,
"man's" : 108346,
"former" : 108299,
"food" : 108140,
"youth" : 108097,
"learned" : 108072,
"merely" : 108034,
"reach" : 107787,
"system" : 107496,
"twenty" : 107475,
"dinner" : 107414,
"quiet" : 107167,
"easily" : 107012,
"moved" : 106996,
"afterwards" : 106992,
"giving" : 106981,
"walk" : 106858,
"stopped" : 106661,
"laughed" : 106591,
"language" : 106445,
"expression" : 106415,
"week" : 106184,
"hall" : 106108,
"danger" : 105775,
"property" : 105765,
"wonder" : 105588,
"usual" : 105412,
"figure" : 105403,
"born" : 104938,
"court" : 104606,
"generally" : 104448,
"grew" : 104326,
"showed" : 104205,
"getting" : 103981,
"ancient" : 103755,
"respect" : 103497,
"third" : 103468,
"worth" : 103346,
"simple" : 102885,
"tree" : 102872,
"leaving" : 102830,
"remain" : 102801,
"society" : 102355,
"fight" : 102206,
"wall" : 102124,
"result" : 102039,
"heaven" : 101875,
"william" : 101780,
"started" : 101771,
"command" : 101717,
"tone" : 101569,
"regard" : 101139,
"expected" : 101117,
"mere" : 101061,
"month" : 101037,
"beside" : 100710,
"silent" : 100695,
"perfect" : 100522,
"experience" : 100504,
"street" : 100499,
"writing" : 100292,
"goes" : 100235,
"circumstances" : 100166,
"entirely" : 99803.2,
"fresh" : 99654.4,
"duke" : 99561.9,
"covered" : 99439.2,
"bound" : 99304.7,
"east" : 99220.9,
"wood" : 99157.6,
"stone" : 99073.7,
"quickly" : 98994.6,
"notice" : 98872.0,
"bright" : 98773.9,
"christ" : 98758.1,
"boat" : 98756.5,
"noble" : 98714.6,
"meant" : 98705.1,
"somewhat" : 98651.3,
"sudden" : 98497.8,
"value" : 98232.8,
"c." : 97991.5,
"direction" : 97628.3,
"chair" : 97567.4,
"due" : 97353.0,
"support" : 97334.0,
"tom" : 97093.5,
"date" : 96908.4,
"waiting" : 96834.8,
"christian" : 96735.9,
"village" : 96681.3,
"lives" : 96547.6,
"reading" : 96465.4,
"agree" : 96413.9,
"lines" : 96198.0,
"considered" : 96182.9,
"field" : 96170.3,
"observed" : 96110.1,
"scarcely" : 95979.6,
"wished" : 95538.2,
"wait" : 95523.9,
"greatest" : 95398.1,
"permission" : 95391.0,
"success" : 95371.2,
"piece" : 95303.2,
"british" : 95149.7,
"ex" : 95127.6,
"charles" : 95049.2,
"formed" : 94978.0,
"speaking" : 94833.3,
"trying" : 94706.7,
"conversation" : 94578.5,
"proper" : 94480.4,
"hill" : 94379.1,
"music" : 94284.2,
"opportunity" : 94123.6,
"that's" : 93912.4,
"german" : 93866.5,
"afternoon" : 93839.6,
"cry" : 93675.0,
"cost" : 93470.1,
"allowed" : 93398.1,
"girls" : 93384.7,
"considerable" : 93235.2,
"c" : 92990.7,
"broke" : 92800.8,
"honour" : 92446.4,
"seven" : 92292.1,
"private" : 92260.5,
"sit" : 92217.0,
"news" : 92204.3,
"top" : 91995.5,
"scene" : 91985.2,
"discovered" : 91968.6,
"marriage" : 91937.7,
"step" : 91592.0,
"garden" : 91392.6,
"race" : 91305.6,
"begin" : 91207.5,
"per" : 91115.7,
"individual" : 90923.5,
"sitting" : 90632.3,
"learn" : 90513.7,
"political" : 90412.4,
"difficult" : 90385.5,
"bit" : 90352.3,
"speech" : 90319.0,
"henry" : 90034.2,
"lie" : 89756.5,
"cast" : 89723.3,
"eat" : 89650.5,
"authority" : 89562.7,
"etc." : 89480.4,
"floor" : 89151.3,
"ill" : 89125.2,
"ways" : 88381.5,
"officers" : 88207.5,
"offered" : 88164.0,
"original" : 88133.9,
"happiness" : 88005.0,
"flowers" : 87879.2,
"produced" : 87876.0,
"summer" : 87793.7,
"provide" : 87573.0,
"study" : 87538.2,
"religion" : 87445.6,
"picture" : 87340.4,
"walls" : 87300.1,
"personal" : 87235.2,
"america" : 87195.6,
"watch" : 87073.8,
"pleased" : 86744.7,
"leaves" : 86702.0,
"declared" : 86560.4,
"hot" : 86478.9,
"understood" : 86271.6,
"effort" : 86256.6,
"prepared" : 86240.7,
"escape" : 86157.7,
"attempt" : 86132.4,
"supposed" : 86122.1,
"killed" : 86020.0,
"fast" : 86008.1,
"author" : 85992.3,
"indian" : 85940.9,
"brown" : 85875.2,
"determined" : 85873.7,
"pain" : 85833.3,
"spring" : 85777.9,
"takes" : 85761.3,
"drawn" : 85701.2,
"soldiers" : 85694.1,
"houses" : 85488.4,
"beneath" : 85453.6,
"talking" : 85391.1,
"turning" : 85325.4,
"century" : 85256.6,
"steps" : 84796.1,
"intended" : 84789.0,
"soft" : 84783.5,
"straight" : 84750.2,
"matters" : 84637.1,
"likely" : 84636.3,
"corner" : 84584.1,
"trademark" : 84518.4,
"justice" : 84500.2,
"simply" : 84365.0,
"produce" : 84323.0,
"trust" : 84298.5,
"appears" : 84153.7,
"rome" : 84141.1,
"laugh" : 84072.2,
"forget" : 84004.2,
"europe" : 83940.9,
"passage" : 83908.5,
"eight" : 83853.9,
"closed" : 83814.3,
"ourselves" : 83716.2,
"gives" : 83639.5,
"dress" : 83486.0,
"passing" : 83482.1,
"terrible" : 83421.9,
"required" : 83248.7,
"medium" : 83151.4,
"efforts" : 83147.4,
"sake" : 83124.5,
"breath" : 83075.4,
"wise" : 83039.0,
"ladies" : 82918.0,
"possession" : 82883.2,
"pleasant" : 82839.7,
"perfectly" : 82830.9,
"o'" : 82761.3,
"memory" : 82725.7,
"usually" : 82644.2,
"grave" : 82623.7,
"fixed" : 82566.7,
"modern" : 82562.0,
"spot" : 82338.9,
"troops" : 82223.4,
"rise" : 82126.8,
"break" : 82118.1,
"fifty" : 82035.1,
"island" : 81974.9,
"meeting" : 81962.3,
"camp" : 81899.8,
"nation" : 81890.3,
"existence" : 81804.1,
"reply" : 81671.1,
"i'd" : 81624.5,
"copies" : 81479.7,
"sky" : 81457.5,
"touch" : 81396.6,
"equal" : 81392.7,
"fortune" : 81232.1,
"v." : 81207.5,
"shore" : 81109.4,
"domain" : 81056.4,
"named" : 80756.6,
"situation" : 80651.4,
"looks" : 80463.1,
"promise" : 80426.7,
"orders" : 80294.6,
"degree" : 80293.8,
"middle" : 80239.2,
"winter" : 80239.2,
"plan" : 80069.9,
"spent" : 80046.2,
"allow" : 79876.1,
"pale" : 79860.2,
"conduct" : 79819.9,
"running" : 79752.6,
"religious" : 79751.1,
"surprise" : 79635.6,
"minute" : 79605.5,
"roman" : 79482.1,
"cases" : 79432.2,
"shot" : 79425.1,
"lead" : 79418.8,
"move" : 79376.1,
"names" : 79366.6,
"stop" : 79301.7,
"higher" : 79280.3,
"et" : 79225.0,
"father's" : 79201.2,
"threw" : 79179.1,
"worse" : 79163.2,
"built" : 79110.2,
"spoken" : 79085.7,
"glass" : 79053.3,
"board" : 78858.7,
"vain" : 78809.6,
"affairs" : 78788.3,
"instance" : 78564.4,
"safe" : 78317.5,
"loss" : 78305.7,
"doctor" : 78281.9,
"offer" : 78246.3,
"class" : 78173.5,
"complete" : 78039.8,
"access" : 77716.3,
"lower" : 77368.2,
"wouldn't" : 77327.0,
"repeated" : 77300.1,
"forms" : 77286.7,
"darkness" : 77261.4,
"military" : 77249.5,
"warm" : 77220.2,
"drink" : 77183.8,
"passion" : 77028.0,
"ones" : 76976.6,
"physical" : 76974.2,
"example" : 76922.8,
"ears" : 76672.0,
"questions" : 76596.8,
"start" : 76592.9,
"lying" : 76560.4,
"smiled" : 76547.8,
"keeping" : 76541.4,
"spite" : 76495.5,
"shown" : 76402.2,
"directly" : 76333.4,
"james" : 76067.5,
"hart" : 76059.6,
"serious" : 75990.8,
"hat" : 75918.0,
"dog" : 75874.5,
"silver" : 75859.5,
"sufficient" : 75856.3,
"main" : 75850.0,
"mentioned" : 75813.6,
"servant" : 75473.4,
"pride" : 75429.9,
"crowd" : 75422.0,
"train" : 75404.6,
"wonderful" : 75347.6,
"moral" : 75253.5,
"instant" : 75209.9,
"associated" : 75153.0,
"path" : 75150.6,
"greek" : 75128.5,
"meaning" : 74981.3,
"fit" : 74906.9,
"ordered" : 74834.2,
"lot" : 74745.6,
"he's" : 74505.0,
"proved" : 74438.6,
"obliged" : 74413.3,
"enter" : 74391.9,
"rule" : 74378.5,
"sword" : 74342.9,
"attack" : 74293.0,
"seat" : 74266.1,
"game" : 74220.2,
"health" : 74210.7,
"paragraph" : 74195.7,
"statement" : 74053.3,
"social" : 74035.9,
"refund" : 73914.1,
"sorry" : 73906.2,
"courage" : 73882.4,
"members" : 73833.4,
"grace" : 73826.3,
"official" : 73755.1,
"dream" : 73726.6,
"worthy" : 73622.1,
"rock" : 73581.8,
"jack" : 73527.2,
"provided" : 73443.3,
"special" : 73437.8,
"shook" : 73338.9,
"request" : 73327.8,
"mighty" : 73313.6,
"glance" : 73302.5,
"heads" : 73281.2,
"movement" : 73266.9,
"fee" : 73232.9,
"share" : 73182.3,
"expect" : 73178.3,
"couldn't" : 73076.3,
"dollars" : 72925.9,
"spread" : 72890.3,
"opposite" : 72815.2,
"glory" : 72644.3,
"twelve" : 72570.7,
"space" : 72550.2,
"engaged" : 72545.4,
"peter" : 72509.0,
"wine" : 72450.5,
"ordinary" : 72404.6,
"mountains" : 72333.4,
"taste" : 72268.5,
"iron" : 72252.7,
"isn't" : 72166.5,
"distribute" : 72160.9,
"trade" : 72098.4,
"consider" : 72033.5,
"greatly" : 71894.3,
"accepted" : 71888.0,
"forced" : 71884.8,
"advantage" : 71878.5,
"ideas" : 71849.2,
"decided" : 71766.1,
"using" : 71751.9,
"officer" : 71744.0,
"rate" : 71688.6,
"clothes" : 71654.6,
"sign" : 71582.6,
"feelings" : 71558.9,
"native" : 71399.1,
"promised" : 71195.7,
"judge" : 71190.2,
"difference" : 71072.3,
"working" : 71042.3,
"anxious" : 71040.7,
"marry" : 70948.9,
"captain" : 70861.9,
"finished" : 70763.8,
"extent" : 70649.8,
"watched" : 70643.5,
"curious" : 70641.1,
"foreign" : 70609.5,
"besides" : 70524.9,
"method" : 70513.0,
"excellent" : 70452.9,
"confidence" : 70414.1,
"marked" : 70264.6,
"'em" : 70256.7,
"jesus" : 70228.2,
"exactly" : 70213.1,
"importance" : 70066.8,
"finally" : 70018.5,
"bill" : 69973.4,
"vast" : 69861.1,
"prove" : 69854.0,
"fancy" : 69771.7,
"quick" : 69761.4,
"yes" : 69731.3,
"sought" : 69666.5,
"prevent" : 69664.1,
"neck" : 69634.8,
"hearts" : 69628.5,
"liberty" : 69603.2,
"interesting" : 69601.6,
"sides" : 69594.5,
"legal" : 69532.8,
"gentlemen" : 69507.5,
"dry" : 69442.6,
"serve" : 69414.9,
"aside" : 69368.2,
"pure" : 69287.5,
"concerning" : 69279.6,
"forgotten" : 69212.4,
"lose" : 69115.8,
"powers" : 69073.1,
"possessed" : 68995.6,
"thrown" : 68931.5,
"evidence" : 68904.6,
"distant" : 68882.5,
"michael" : 68874.5,
"progress" : 68830.2,
"similar" : 68603.2,
"narrow" : 68505.9,
"altogether" : 68471.9,
"building" : 68394.3,
"page" : 68234.5,
"particularly" : 68193.4,
"knowing" : 68050.2,
"weeks" : 67942.6,
"settled" : 67880.1,
"holding" : 67815.2,
"mountain" : 67762.2,
"search" : 67660.2,
"sad" : 67596.1,
"sin" : 67589.7,
"lies" : 67551.0,
"proud" : 67492.4,
"pieces" : 67423.6,
"clearly" : 67318.4,
"price" : 67277.2,
"ships" : 67259.0,
"thirty" : 67229.0,
"sick" : 67152.2,
"honest" : 67088.2,
"shut" : 67063.6,
"talked" : 66998.0,
"bank" : 66981.4,
"fate" : 66946.5,
"dropped" : 66940.2,
"judgment" : 66827.9,
"conditions" : 66803.4,
"king's" : 66590.5,
"accept" : 66498.8,
"hills" : 66475.8,
"removed" : 66429.1,
"forest" : 66263.0,
"measure" : 66262.2,
"species" : 66220.3,
"seek" : 66141.2,
"highest" : 66066.0,
"otherwise" : 66054.9,
"stream" : 66003.5,
"honor" : 65994.0,
"carefully" : 65960.0,
"obtained" : 65884.1,
"ear" : 65819.2,
"bread" : 65708.4,
"bottom" : 65645.1,
"additional" : 65640.4,
"presented" : 65615.9,
"aid" : 65581.1,
"fingers" : 65564.4,
"q" : 65556.5,
"remembered" : 65554.2,
"choose" : 65396.7,
"agreed" : 65284.4,
"animal" : 65255.1,
"events" : 65118.2,
"there's" : 65017.8,
"fully" : 64982.2,
"delight" : 64971.9,
"rights" : 64836.6,
"amount" : 64720.3,
"obtain" : 64706.1,
"tax" : 64658.6,
"servants" : 64642.0,
"sons" : 64592.9,
"cross" : 64403.8,
"shoulders" : 64397.5,
"thick" : 64364.3,
"points" : 64311.3,
"stranger" : 64270.9,
"woods" : 64227.4,
"facts" : 64155.4,
"dare" : 64055.0,
"grow" : 64005.1,
"creature" : 63988.5,
"hung" : 63988.5,
"rain" : 63905.4,
"false" : 63861.1,
"tall" : 63854.8,
"gate" : 63851.6,
"nations" : 63793.9,
"created" : 63774.9,
"refused" : 63774.9,
"quietly" : 63767.8,
"surface" : 63717.1,
"freely" : 63711.6,
"holy" : 63709.2,
"streets" : 63695.0,
"blow" : 63577.9,
"july" : 63558.1,
"regarded" : 63536.0,
"fashion" : 63526.5,
"report" : 63492.5,
"coast" : 63479.8,
"daily" : 63422.8,
"file" : 63420.5,
"shoulder" : 63340.6,
"surprised" : 63312.9,
"faces" : 63252.7,
"succeeded" : 63179.2,
"birds" : 63162.6,
"distribution" : 63155.4,
"royal" : 63124.6,
"song" : 63090.6,
"wealth" : 63067.6,
"comfort" : 63064.5,
"failed" : 63013.0,
"freedom" : 62979.8,
"peculiar" : 62951.3,
"anyone" : 62941.8,
"advance" : 62895.1,
"gentle" : 62856.4,
"surely" : 62831.1,
"animals" : 62815.2,
"waited" : 62808.1,
"secure" : 62770.9,
"desired" : 62740.1,
"grass" : 62726.6,
"touched" : 62649.9,
"occupied" : 62645.1,
"draw" : 62587.4,
"stage" : 62536.0,
"portion" : 62479.8,
"expressed" : 62460.0,
"opening" : 62316.0,
"june" : 62288.3,
"spirits" : 62147.5,
"fish" : 62138.0,
"tongue" : 62119.8,
"capital" : 62114.3,
"angry" : 61966.4,
"growing" : 61954.5,
"served" : 61954.5,
"carriage" : 61920.5,
"weather" : 61892.8,
"breast" : 61786.0,
"presently" : 61503.5,
"snow" : 61384.9,
"david" : 61377.8,
"papers" : 61338.2,
"necessity" : 61325.5,
"practice" : 61313.7,
"claim" : 61248.0,
"hast" : 61220.3,
"education" : 61160.2,
"sharp" : 61096.9,
"prince" : 60998.8,
"permitted" : 60987.7,
"group" : 60983.0,
"enemies" : 60982.2,
"robert" : 60967.2,
"played" : 60926.0,
"throughout" : 60896.7,
"pity" : 60858.0,
"expense" : 60836.6,
"yours" : 60770.2,
"million" : 60752.8,
"add" : 60647.5,
"pray" : 60553.4,
"taught" : 60553.4,
"explained" : 60525.7,
"tired" : 60525.7,
"leading" : 60475.9,
"kill" : 60435.5,
"shadow" : 60402.3,
"companion" : 60384.9,
"weight" : 60339.8,
"mass" : 60298.6,
"established" : 60226.7,
"suffered" : 60210.0,
"gray" : 60200.5,
"brave" : 60085.0,
"thin" : 60068.4,
"satisfied" : 60044.7,
"check" : 60002.0,
"virtue" : 59979.0,
"golden" : 59969.5,
"numerous" : 59925.2,
"frequently" : 59873.8,
"famous" : 59843.0,
"telling" : 59831.9,
"powerful" : 59634.9,
"alive" : 59632.5,
"waters" : 59569.2,
"national" : 59565.3,
"weak" : 59510.7,
"divine" : 59479.8,
"material" : 59413.4,
"principal" : 59406.2,
"gathered" : 59324.8,
"suggested" : 59261.5,
"frank" : 59180.8,
"valley" : 59172.1,
"guess" : 59171.3,
"finding" : 59160.2,
"yellow" : 59051.8,
"heat" : 58979.8,
"remains" : 58941.1,
"bent" : 58939.5,
"seized" : 58899.1,
"guard" : 58892.8,
"equally" : 58840.6,
"naturally" : 58811.3,
"box" : 58805.8,
"remarkable" : 58805.8,
"gods" : 58789.2,
"moon" : 58760.7,
"slight" : 58710.8,
"style" : 58699.8,
"pointed" : 58669.7,
"saved" : 58632.5,
"windows" : 58556.6,
"crossed" : 58551.8,
"louis" : 58498.8,
"pounds" : 58458.5,
"ain't" : 58451.3,
"evidently" : 58378.6,
"principle" : 58369.9,
"immediate" : 58268.6,
"willing" : 58218.0,
"consequence" : 58195.8,
"richard" : 58188.7,
"principles" : 58148.3,
"characters" : 58140.4,
"paul" : 58106.4,
"season" : 58088.2,
"remarked" : 58020.2,
"science" : 57960.1,
"tender" : 57959.3,
"worked" : 57946.6,
"grown" : 57772.6,
"whispered" : 57721.1,
"interested" : 57714.8,
"quarter" : 57661.0,
"midst" : 57518.6,
"liked" : 57509.9,
"advanced" : 57495.7,
"apparently" : 57471.1,
"bore" : 57458.5,
"pwh" : 57456.9,
"active" : 57449.0,
"noticed" : 57389.6,
"aware" : 57388.9,
"thomas" : 57341.4,
"uncle" : 57326.4,
"list" : 57295.5,
"dangerous" : 57292.3,
"august" : 57283.6,
"calm" : 57251.2,
"genius" : 57225.1,
"sacred" : 57211.6,
"kingdom" : 57199.0,
"entire" : 57187.9,
"popular" : 57115.1,
"unknown" : 57040.8,
"nice" : 57038.4,
"habit" : 57035.2,
"spanish" : 56981.4,
"familiar" : 56835.1,
"reader" : 56832.7,
"published" : 56807.4,
"direct" : 56755.2,
"handsome" : 56748.8,
"you'll" : 56740.1,
"joined" : 56737.0,
"actually" : 56699.0,
"kings" : 56630.2,
"sd" : 56603.3,
"posted" : 56561.3,
"approach" : 56555.8,
"washington" : 56527.3,
"hearing" : 56509.9,
"needed" : 56455.3,
"increased" : 56113.5,
"walking" : 56094.6,
"twice" : 55893.6,
"throw" : 55850.1,
"intellectual" : 55849.3,
"appointed" : 55847.7,
"wisdom" : 55827.2,
"ceased" : 55810.5,
"truly" : 55776.5,
"numbers" : 55771.8,
"demanded" : 55718.0,
"priest" : 55667.3,
"wounded" : 55666.6,
"sorrow" : 55657.1,
"drive" : 55626.2,
"fault" : 55610.4,
"listened" : 55604.1,
"palace" : 55599.3,
"affair" : 55588.2,
"contact" : 55576.4,
"distinguished" : 55568.5,
"station" : 55551.0,
"beat" : 55452.9,
"distributed" : 55431.6,
"e" : 55405.5,
"listen" : 55400.7,
"italy" : 55327.9,
"fool" : 55324.0,
"becomes" : 55307.4,
"watching" : 55264.7,
"hurt" : 55227.5,
"wants" : 55209.3,
"express" : 55193.5,
"occurred" : 55186.3,
"favour" : 55161.8,
"height" : 55161.8,
"size" : 55123.0,
"edge" : 55074.8,
"subjects" : 55021.8,
"task" : 54999.6,
"follows" : 54945.8,
"interests" : 54938.7,
"nine" : 54922.9,
"sympathy" : 54921.3,
"burst" : 54920.5,
"putting" : 54835.9,
"dressed" : 54816.9,
"lifted" : 54796.3,
"hopes" : 54772.6,
"suffer" : 54764.7,
"noise" : 54731.4,
"smiling" : 54720.4,
"rode" : 54627.8,
"tells" : 54559.0,
"minds" : 54524.9,
"farther" : 54471.1,
"literature" : 54426.1,
"vessel" : 54407.9,
"affection" : 54381.0,
"suffering" : 54363.6,
"proceeded" : 54343.8,
"flesh" : 54333.5,
"advice" : 54172.9,
"grand" : 54172.9,
"carrying" : 54168.9,
"legs" : 54149.2,
"spain" : 54147.6,
"post" : 54054.2,
"collection" : 54031.3,
"empty" : 54020.2,
"rank" : 54009.9,
"storm" : 54009.1,
"god's" : 53975.9,
"imagine" : 53930.8,
"wore" : 53915.0,
"duties" : 53904.7,
"admitted" : 53846.2,
"countries" : 53834.3,
"pocket" : 53783.7,
"arrival" : 53776.5,
"imagination" : 53768.6,
"driven" : 53753.6,
"loud" : 53742.5,
"sentence" : 53699.0,
"lovely" : 53673.7,
"extraordinary" : 53589.0,
"november" : 53551.8,
"december" : 53541.6,
"happen" : 53527.3,
"absence" : 53502.8,
"breakfast" : 53488.6,
"population" : 53423.7,
"thank" : 53413.4,
"rules" : 53386.5,
"inhabitants" : 53168.1,
"series" : 53157.9,
"laughing" : 53113.6,
"address" : 53096.2,
"relief" : 53061.3,
"bird" : 52958.5,
"owner" : 52920.5,
"impression" : 52901.5,
"satisfaction" : 52896.8,
"coat" : 52869.9,
"prepare" : 52846.2,
"relations" : 52841.4,
"shape" : 52710.9,
"birth" : 52703.0,
"rapidly" : 52677.6,
"smoke" : 52676.9,
"january" : 52657.1,
"mother's" : 52657.1,
"machine" : 52618.3,
"content" : 52615.1,
"consideration" : 52606.4,
"accompanied" : 52581.1,
"regular" : 52519.4,
"moving" : 52512.3,
"stands" : 52512.3,
"wholly" : 52482.2,
"teeth" : 52449.0,
"busy" : 52407.1,
"treated" : 52378.6,
"burning" : 52322.4,
"shame" : 52312.9,
"quality" : 52292.4,
"bay" : 52212.5,
"discover" : 52201.4,
"inside" : 52123.1,
"brain" : 52105.7,
"soil" : 52100.9,
"completely" : 52050.3,
"message" : 52049.5,
"ring" : 52000.4,
"resolved" : 51981.4,
"calling" : 51950.6,
"phrase" : 51940.3,
"acts" : 51907.1,
"mention" : 51888.9,
"square" : 51832.7,
"pair" : 51818.5,
"won" : 51798.7,
"title" : 51777.3,
"understanding" : 51713.3,
"sunday" : 51687.9,
"fruit" : 51674.5,
"mad" : 51668.9,
"forces" : 51593.8,
"included" : 51591.4,
"tea" : 51547.1,
"rocks" : 51540.0,
"nearer" : 51456.1,
"slaves" : 51427.7,
"falling" : 51396.0,
"absolutely" : 51394.4,
"slow" : 51380.2,
"bearing" : 51315.3,
"mercy" : 51306.6,
"larger" : 51297.9,
"explain" : 51285.2,
"contain" : 51210.9,
"grief" : 51205.3,
"soldier" : 51200.6,
"wasn't" : 51135.7,
"countenance" : 51077.2,
"previous" : 51055.8,
"explanation" : 51050.3,
"welcome" : 51009.1,
"proposed" : 51006.8,
"prayer" : 50987.8,
"stars" : 50977.5,
"germany" : 50905.5,
"belief" : 50725.9,
"informed" : 50691.1,
"moments" : 50676.1,
"poetry" : 50673.7,
"constant" : 50650.8,
"buy" : 50619.9,
"final" : 50604.9,
"faithful" : 50587.5,
"ride" : 50551.9,
"policy" : 50464.8,
"supper" : 50460.9,
"drawing" : 50447.4,
"excitement" : 50435.6,
"dying" : 50335.1,
"demand" : 50257.6,
"fighting" : 50245.7,
"fields" : 50201.4,
"drove" : 50184.8,
"upper" : 50168.2,
"sum" : 50041.6,
"philip" : 49999.7,
"motion" : 49955.4,
"assistance" : 49929.2,
"forty" : 49903.1,
"april" : 49899.2,
"stones" : 49887.3,
"edward" : 49840.6,
"fees" : 49820.1,
"kindly" : 49809.8,
"dignity" : 49803.5,
"catch" : 49718.8,
"october" : 49715.6,
"seated" : 49654.7,
"knees" : 49642.1,
"amongst" : 49628.6,
"current" : 49580.4,
"sending" : 49578.8,
"parties" : 49574.0,
"objects" : 49571.7,
"gained" : 49550.3,
"bitter" : 49544.8,
"possibly" : 49520.2,
"slave" : 49489.4,
"separate" : 49480.7,
"loose" : 49427.7,
"text" : 49423.7,
"receiving" : 49391.3,
"worst" : 49369.1,
"sold" : 49339.1,
"don" : 49305.0,
"credit" : 49262.3,
"chosen" : 49225.9,
"hoped" : 49174.5,
"printed" : 49171.3,
"terror" : 49161.1,
"features" : 49124.7,
"fond" : 49101.7,
"control" : 49074.0,
"capable" : 49070.1,
"fifteen" : 49002.8,
"doesn't" : 48968.0,
"firm" : 48962.5,
"superior" : 48948.2,
"cruel" : 48839.1,
"spiritual" : 48809.8,
"harry" : 48809.0,
"splendid" : 48752.0,
"proof" : 48702.2,
"pressed" : 48635.7,
"sooner" : 48619.1,
"join" : 48614.4,
"process" : 48561.4,
"crime" : 48528.9,
"dust" : 48502.8,
"instantly" : 48494.9,
"lands" : 48475.9,
"relation" : 48472.8,
"doors" : 48413.4,
"concerned" : 48411.1,
"deeply" : 48404.7,
"practical" : 48402.4,
"colour" : 48371.5,
"sing" : 48331.2,
"destroy" : 48261.5,
"anger" : 48238.6,
"distributing" : 48223.6,
"results" : 48202.2,
"increase" : 48197.5,
"reasons" : 48062.2,
"nose" : 48059.8,
"friendly" : 48021.0,
"entrance" : 47983.8,
"rooms" : 47962.5,
"admit" : 47956.2,
"supply" : 47924.5,
"clean" : 47921.3,
"useful" : 47849.4,
"yesterday" : 47824.0,
"delicate" : 47818.5,
"fail" : 47778.1,
"continue" : 47737.8,
"remove" : 47699.0,
"addressed" : 47673.7,
"choice" : 47671.3,
"huge" : 47619.9,
"needs" : 47619.1,
"wear" : 47608.8,
"blind" : 47599.4,
"unable" : 47589.1,
"cover" : 47574.0,
"double" : 47550.3,
"victory" : 47530.5,
"dozen" : 47519.4,
"constantly" : 47490.2,
"level" : 47487.8,
"india" : 47392.1,
"release" : 47389.7,
"rough" : 47332.7,
"ended" : 47325.6,
"shows" : 47324.0,
"fly" : 47316.9,
"praise" : 47267.9,
"devil" : 47254.4,
"ahead" : 47194.3,
"smith" : 47190.3,
"connected" : 47128.6,
"degrees" : 47110.4,
"gain" : 47094.6,
"addition" : 47084.3,
"committed" : 47063.0,
"chamber" : 47055.8,
"notes" : 47051.9,
"italian" : 47021.8,
"gradually" : 46902.4,
"acquaintance" : 46877.8,
"bought" : 46854.1,
"souls" : 46847.0,
"mission" : 46837.5,
"sacrifice" : 46810.6,
"cities" : 46797.1,
"mistake" : 46768.7,
"exercise" : 46752.0,
"conscience" : 46747.3,
"based" : 46717.2,
"car" : 46691.1,
"buried" : 46646.8,
"theory" : 46621.5,
"commanded" : 46561.4,
"nobody" : 46560.6,
"minister" : 46464.9,
"closely" : 46446.7,
"energy" : 46407.9,
"dick" : 46399.2,
"bare" : 46386.5,
"fought" : 46382.6,
"partly" : 46369.9,
"mistress" : 46298.7,
"hate" : 46287.7,
"arose" : 46271.0,
"playing" : 46253.6,
"color" : 46252.1,
"lake" : 46240.2,
"safety" : 46193.5,
"provisions" : 46145.2,
"description" : 46112.0,
"asleep" : 46108.9,
"centre" : 46093.8,
"faint" : 46085.1,
"thinks" : 46053.5,
"parents" : 46044.8,
"escaped" : 46024.2,
"careful" : 45995.7,
"enjoy" : 45987.8,
"drop" : 45918.2,
"brilliant" : 45799.5,
"brief" : 45788.4,
"bringing" : 45758.4,
"worship" : 45744.1,
"goods" : 45735.4,
"tale" : 45733.1,
"skin" : 45696.7,
"roof" : 45695.9,
"grey" : 45689.6,
"highly" : 45680.1,
"crown" : 45638.1,
"castle" : 45581.2,
"excited" : 45567.7,
"throne" : 45558.2,
"stated" : 45524.2,
"despair" : 45463.3,
"ease" : 45457.8,
"attached" : 45447.5,
"total" : 45438.8,
"kindness" : 45424.5,
"mile" : 45375.5,
"citizens" : 45309.8,
"circle" : 45281.3,
"dull" : 45281.3,
"extreme" : 45269.5,
"clouds" : 45264.7,
"figures" : 45251.3,
"intention" : 45248.1,
"prison" : 45241.0,
"term" : 45215.7,
"assured" : 45181.6,
"hidden" : 45123.9,
"thoroughly" : 45123.1,
"cup" : 45107.3,
"member" : 45100.2,
"civil" : 44958.5,
"apply" : 44943.5,
"labor" : 44925.3,
"everywhere" : 44877.1,
"intelligence" : 44856.5,
"strike" : 44825.6,
"fairly" : 44822.5,
"comply" : 44817.7,
"fellows" : 44813.0,
"haven't" : 44742.6,
"event" : 44675.3,
"gently" : 44659.5,
"connection" : 44629.4,
"protection" : 44625.5,
"conscious" : 44568.5,
"edition" : 44551.1,
"directed" : 44487.0,
"pulled" : 44473.6,
"flight" : 44454.6,
"evident" : 44453.8,
"surrounded" : 44441.1,
"wishes" : 44411.9,
"yards" : 44411.9,
"voices" : 44401.6,
"weary" : 44397.6,
"couple" : 44368.4,
"variety" : 44354.1,
"whilst" : 44316.1,
"volume" : 44293.2,
"details" : 44279.0,
"older" : 44260.0,
"requirements" : 44256.0,
"custom" : 44253.6,
"apart" : 44248.1,
"bow" : 44208.5,
"awful" : 44175.3,
"everybody" : 44135.8,
"labour" : 44127.9,
"asking" : 44097.8,
"lover" : 44066.9,
"showing" : 44052.7,
"introduced" : 44021.8,
"suit" : 43998.9,
"becoming" : 43985.4,
"composed" : 43982.3,
"plans" : 43978.3,
"rendered" : 43966.5,
"pictures" : 43960.1,
"lest" : 43949.8,
"volunteers" : 43949.8,
"singing" : 43942.7,
"eager" : 43919.0,
"precious" : 43892.9,
"paused" : 43885.0,
"require" : 43847.0,
"meat" : 43838.3,
"whenever" : 43837.5,
"milk" : 43784.5,
"dogs" : 43682.4,
"successful" : 43611.2,
"plants" : 43609.7,
"vision" : 43605.7,
"rare" : 43603.3,
"granted" : 43561.4,
"raise" : 43533.7,
"egypt" : 43529.8,
"manners" : 43503.6,
"cousin" : 43494.9,
"you've" : 43454.6,
"development" : 43431.7,
"arthur" : 43405.5,
"obs" : 43402.4,
"cool" : 43305.1,
"trial" : 43233.9,
"learning" : 43177.7,
"approached" : 43176.9,
"bridge" : 43143.7,
"abroad" : 43081.2,
"devoted" : 43071.7,
"paying" : 43055.9,
"literary" : 43044.0,
"writer" : 42938.0,
"fn" : 42935.6,
"israel" : 42903.2,
"disappeared" : 42899.2,
"interrupted" : 42888.9,
"stock" : 42862.8,
"readers" : 42829.6,
"dreadful" : 42801.9,
"female" : 42767.9,
"protect" : 42699.1,
"accustomed" : 42696.7,
"virginia" : 42695.1,
"type" : 42657.9,
"recognized" : 42656.3,
"salt" : 42649.2,
"destroyed" : 42641.3,
"signs" : 42635.0,
"innocent" : 42613.6,
"temper" : 42601.8,
"plenty" : 42597.0,
"pope" : 42596.2,
"avoid" : 42559.0,
"hurried" : 42480.7,
"represented" : 42461.7,
"favor" : 42434.8,
"mental" : 42347.0,
"attitude" : 42329.6,
"returning" : 42271.9,
"admiration" : 42255.2,
"brothers" : 42245.0,
"anxiety" : 42233.9,
"queen" : 42223.6,
"teach" : 42216.5,
"count" : 42207.8,
"curiosity" : 42203.8,
"solemn" : 42188.8,
"causes" : 42152.4,
"vessels" : 42102.5,
"compelled" : 42082.0,
"dance" : 42075.7,
"hotel" : 42072.5,
"wicked" : 42017.9,
"fled" : 41992.6,
"kissed" : 41987.0,
"guns" : 41921.4,
"fill" : 41911.9,
"visible" : 41884.2,
"younger" : 41825.7,
"guide" : 41750.5,
"earnest" : 41745.7,
"actual" : 41729.1,
"companions" : 41728.3,
"prisoner" : 41725.2,
"miserable" : 41704.6,
"lad" : 41692.0,
"harm" : 41617.6,
"views" : 41499.7,
"irish" : 41490.2,
"utterly" : 41479.9,
"ends" : 41431.7,
"shop" : 41349.4,
"stairs" : 41339.1,
"pardon" : 41335.1,
"gay" : 41319.3,
"beg" : 41312.2,
"seldom" : 41303.5,
"kinds" : 41287.7,
"record" : 41281.4,
"fat" : 41278.2,
"sand" : 41232.3,
"violent" : 41203.8,
"branches" : 41195.1,
"inquired" : 41185.6,
"iv" : 41152.4,
"september" : 41145.3,
"worn" : 41117.6,
"ireland" : 41105.7,
"flat" : 41104.1,
"departure" : 41101.0,
"delivered" : 41099.4,
"gift" : 41080.4,
"ruin" : 41073.3,
"skill" : 41029.8,
"cattle" : 40983.1,
"equipment" : 40935.6,
"temple" : 40926.1,
"calls" : 40860.5,
"earlier" : 40843.1,
"license" : 40827.2,
"visited" : 40818.5,
"en" : 40812.2,
"consent" : 40810.6,
"sufficiently" : 40787.7,
"natives" : 40772.7,
"wound" : 40752.1,
"laughter" : 40713.3,
"contained" : 40691.2,
"perceived" : 40691.2,
"scattered" : 40669.8,
"whence" : 40661.1,
"rushed" : 40657.9,
"chiefly" : 40639.0,
"bold" : 40614.4,
"anywhere" : 40597.0,
"witness" : 40545.6,
"foolish" : 40523.4,
"helped" : 40487.8,
"kitchen" : 40483.9,
"sell" : 40483.9,
"anybody" : 40479.1,
"self" : 40477.6,
"extremely" : 40476.0,
"treatment" : 40449.1,
"throat" : 40414.3,
"dreams" : 40383.4,
"patient" : 40380.2,
"speed" : 40372.3,
"growth" : 40351.0,
"quantity" : 40347.8,
"latin" : 40314.6,
"immense" : 40278.2,
"conclusion" : 40262.4,
"computer" : 40222.0,
"affected" : 40214.1,
"severe" : 40200.7,
"excuse" : 40180.9,
"triumph" : 40169.8,
"origin" : 40165.1,
"joseph" : 40161.1,
"slept" : 40160.3,
"eternal" : 40157.1,
"thine" : 40157.1,
"audience" : 40127.9,
"pages" : 40117.6,
"sounds" : 40089.1,
"swift" : 40080.4,
"limited" : 40060.6,
"wings" : 40014.0,
"stepped" : 39957.0,
"services" : 39933.3,
"library" : 39915.9,
"remaining" : 39911.9,
"containing" : 39908.7,
"base" : 39900.8,
"confusion" : 39871.6,
"win" : 39858.9,
"maid" : 39847.8,
"charming" : 39846.2,
"editions" : 39843.1,
"attended" : 39842.3,
"softly" : 39831.2,
"reality" : 39813.0,
"performed" : 39808.3,
"glorious" : 39774.2,
"likewise" : 39757.6,
"site" : 39722.8,
"sail" : 39713.3,
"frightened" : 39697.5,
"acquainted" : 39660.3,
"unhappy" : 39660.3,
"feared" : 39591.5,
"article" : 39586.7,
"prisoners" : 39576.5,
"store" : 39566.2,
"adopted" : 39561.4,
"shalt" : 39534.5,
"remark" : 39519.5,
"cook" : 39479.1,
"thousands" : 39463.3,
"pause" : 39426.1,
"inclined" : 39411.1,
"convinced" : 39401.6,
"band" : 39397.7,
"valuable" : 39324.1,
"hence" : 39316.2,
"desert" : 39278.2,
"effects" : 39257.6,
"kiss" : 39251.3,
"plant" : 39201.5,
"ice" : 39198.3,
"ball" : 39180.9,
"stick" : 39108.1,
"absolute" : 39093.1,
"readily" : 39085.2,
"behold" : 39051.1,
"fierce" : 39048.0,
"argument" : 38991.0,
"observe" : 38990.2,
"blessed" : 38967.3,
"bosom" : 38956.2,
"rage" : 38949.1,
"striking" : 38941.2,
"discovery" : 38883.4,
"creatures" : 38875.5,
"shouted" : 38859.7,
"guilty" : 38836.7,
"related" : 38810.6,
"setting" : 38686.4,
"forgot" : 38668.2,
"punishment" : 38650.8,
"gun" : 38632.6,
"slightly" : 38629.5,
"articles" : 38575.7,
"police" : 38557.5,
"mysterious" : 38550.4,
"extended" : 38536.1,
"confess" : 38535.3,
"shade" : 38527.4,
"murder" : 38525.0,
"emotion" : 38510.0,
"destruction" : 38505.3,
"wondered" : 38474.4,
"increasing" : 38463.3,
"hide" : 38423.8,
"expedition" : 38402.4,
"horror" : 38361.3,
"local" : 38355.7,
"expenses" : 38353.4,
"ignorant" : 38321.7,
"doctrine" : 38303.5,
"generous" : 38301.1,
"range" : 38284.5,
"host" : 38240.2,
"wet" : 38201.5,
"cloud" : 38187.2,
"mystery" : 38185.6,
"ed" : 38165.9,
"waste" : 38164.3,
"changes" : 38156.4,
"possess" : 38151.6,
"consciousness" : 38142.1,
"february" : 38131.1,
"trembling" : 38110.5,
"disease" : 38039.3,
"formerly" : 38037.7,
"spend" : 38033.7,
"production" : 38021.1,
"source" : 38004.5,
"mankind" : 37996.6,
"universal" : 37991.8,
"deck" : 37965.7,
"sees" : 37923.0,
"habits" : 37913.5,
"estate" : 37878.7,
"aunt" : 37832.8,
"reign" : 37816.2,
"humble" : 37795.6,
"compliance" : 37787.7,
"delay" : 37771.1,
"shining" : 37764.0,
"reported" : 37740.2,
"hers" : 37736.3,
"unfortunate" : 37731.5,
"midnight" : 37703.8,
"listening" : 37700.7,
"flower" : 37688.8,
"hero" : 37673.8,
"accomplished" : 37666.7,
"doth" : 37665.1,
"classes" : 37658.7,
"thanks" : 37642.1,
"banks" : 37623.1,
"philosophy" : 37611.3,
"belong" : 37593.9,
"finger" : 37580.4,
"comfortable" : 37542.5,
"market" : 37529.0,
"cap" : 37514.8,
"waves" : 37462.5,
"woman's" : 37437.2,
"glanced" : 37392.9,
"troubled" : 37376.3,
"difficulties" : 37338.3,
"picked" : 37330.4,
"european" : 37324.9,
"purposes" : 37314.6,
"somewhere" : 37273.5,
"delighted" : 37264.0,
"pushed" : 37259.2,
"press" : 37241.8,
"household" : 37219.7,
"fleet" : 37214.9,
"baby" : 37208.6,
"region" : 37161.9,
"lately" : 37158.7,
"uttered" : 37142.1,
"exact" : 37133.4,
"image" : 37131.9,
"ages" : 37127.1,
"murmured" : 37113.7,
"melancholy" : 37112.9,
"suspicion" : 37107.3,
"bowed" : 37089.9,
"refuse" : 37051.9,
"elizabeth" : 37040.9,
"staff" : 37036.1,
"liability" : 37032.2,
"we'll" : 37019.5,
"enjoyed" : 36985.5,
"stretched" : 36973.6,
"gaze" : 36916.7,
"belonged" : 36888.2,
"ashamed" : 36880.3,
"reward" : 36873.2,
"meal" : 36778.2,
"blame" : 36775.0,
"nodded" : 36767.1,
"status" : 36747.4,
"opinions" : 36725.2,
"indicate" : 36699.1,
"poem" : 36654.0,
"savage" : 36642.9,
"arise" : 36635.0,
"voyage" : 36628.7,
"misery" : 36563.8,
"guests" : 36562.2,
"painted" : 36515.6,
"attend" : 36513.2,
"afford" : 36482.3,
"donate" : 36471.3,
"job" : 36450.7,
"proceed" : 36439.6,
"loves" : 36414.3,
"forehead" : 36382.6,
"regret" : 36381.9,
"plainly" : 36355.7,
"risk" : 36336.8,
"ad" : 36312.2,
"lighted" : 36312.2,
"angel" : 36288.5,
"rapid" : 36285.3,
"distinct" : 36267.9,
"doubtless" : 36256.1,
"properly" : 36256.1,
"wit" : 36238.7,
"fame" : 36237.9,
"singular" : 36221.3,
"error" : 36182.5,
"utmost" : 36158.0,
"methods" : 36155.6,
"reputation" : 36155.6,
"appeal" : 36123.2,
"she's" : 36120.8,
"w" : 36088.3,
"strongly" : 36080.4,
"margaret" : 36006.9,
"lack" : 35996.6,
"breaking" : 35977.6,
"dawn" : 35959.4,
"violence" : 35957.8,
"fatal" : 35917.5,
"render" : 35896.1,
"career" : 35881.1,
"design" : 35854.2,
"displayed" : 35848.6,
"gets" : 35843.1,
"commercial" : 35830.4,
"forgive" : 35809.9,
"lights" : 35807.5,
"agreeable" : 35805.1,
"suggestion" : 35777.4,
"utter" : 35777.4,
"sheep" : 35725.2,
"resolution" : 35703.9,
"spare" : 35671.4,
"patience" : 35631.9,
"domestic" : 35618.4,
"concluded" : 35542.5,
"'tis" : 35536.9,
"farm" : 35520.3,
"reference" : 35509.2,
"chinese" : 35506.1,
"exist" : 35494.2,
"corn" : 35491.8,
"approaching" : 35449.1,
"alike" : 35448.3,
"mounted" : 35384.2,
"jane" : 35352.6,
"issue" : 35310.7,
"key" : 35281.4,
"providing" : 35281.4,
"majority" : 35276.6,
"measures" : 35270.3,
"towns" : 35240.2,
"flame" : 35233.1,
"boston" : 35180.9,
"dared" : 35177.7,
"ignorance" : 35152.4,
"reduced" : 35122.4,
"occasionally" : 35100.2,
"y" : 35094.7,
"weakness" : 35090.7,
"furnished" : 35089.1,
"china" : 35058.3,
"priests" : 35025.1,
"flying" : 34986.3,
"cloth" : 34966.5,
"gazed" : 34953.9,
"profit" : 34932.5,
"fourth" : 34923.8,
"bell" : 34920.6,
"hitherto" : 34911.1,
"benefit" : 34854.2,
"movements" : 34847.1,
"eagerly" : 34842.3,
"acted" : 34841.5,
"urged" : 34841.5,
"ascii" : 34791.7,
"disposed" : 34790.9,
"electronically" : 34761.6,
"atmosphere" : 34748.2,
"chapter" : 34699.9,
"begged" : 34626.3,
"helen" : 34619.2,
"hole" : 34616.8,
"invited" : 34593.1,
"borne" : 34548.8,
"departed" : 34546.4,
"catholic" : 34522.7,
"files" : 34514.0,
"reasonable" : 34512.4,
"sugar" : 34501.3,
"replacement" : 34483.9,
"sigh" : 34454.7,
"humanity" : 34419.1,
"thrust" : 34395.3,
"frame" : 34362.9,
"opposition" : 34350.2,
"disk" : 34347.1,
"haste" : 34328.9,
"lonely" : 34328.1,
"artist" : 34290.1,
"knight" : 34282.2,
"quarters" : 34277.4,
"charm" : 34223.6,
"substance" : 34163.5,
"rolled" : 34154.0,
"email" : 34145.3,
"flung" : 34144.5,
"celebrated" : 34143.7,
"division" : 34136.6,
"slavery" : 34114.5,
"verse" : 34107.3,
"decision" : 34067.8,
"probable" : 34063.8,
"painful" : 34042.5,
"governor" : 34023.5,
"forever" : 33999.7,
"turns" : 33988.7,
"branch" : 33961.0,
"ocean" : 33958.6,
"rear" : 33947.5,
"leader" : 33937.2,
"delightful" : 33915.1,
"stared" : 33876.3,
"boats" : 33808.3,
"keen" : 33788.5,
"disposition" : 33779.8,
"senses" : 33722.1,
"occasions" : 33700.7,
"readable" : 33692.8,
"beloved" : 33687.3,
"inches" : 33684.1,
"bones" : 33679.3,
"enthusiasm" : 33648.5,
"materials" : 33589.9,
"luck" : 33588.4,
"derived" : 33575.7,
"managed" : 33479.2,
"community" : 33464.9,
"apparent" : 33464.2,
"preserved" : 33454.7,
"magnificent" : 33434.9,
"hurry" : 33396.1,
"scheme" : 33393.7,
"oil" : 33392.2,
"thence" : 33382.7,
"reaching" : 33374.8,
"dim" : 33362.1,
"wretched" : 33290.1,
"hanging" : 33275.9,
"pipe" : 33218.1,
"useless" : 33215.7,
"nevertheless" : 33203.1,
"print" : 33203.1,
"smooth" : 33167.5,
"solid" : 33148.5,
"pursued" : 33135.0,
"necessarily" : 33108.1,
"build" : 33084.4,
"attempted" : 33080.5,
"centuries" : 33059.9,
"eggs" : 33059.9,
"equivalent" : 33033.8,
"hastily" : 33032.2,
"burned" : 33008.5,
"you'd" : 33008.5,
"recent" : 33000.5,
"oh" : 32957.0,
"travel" : 32951.5,
"cries" : 32925.4,
"noon" : 32919.9,
"crying" : 32901.7,
"generations" : 32858.9,
"located" : 32835.2,
"cabin" : 32809.1,
"announcement" : 32782.2,
"britain" : 32752.9,
"compared" : 32731.6,
"handed" : 32720.5,
"cease" : 32714.2,
"smaller" : 32668.3,
"circumstance" : 32666.7,
"tent" : 32665.1,
"frequent" : 32616.8,
"alarm" : 32602.6,
"nervous" : 32602.6,
"beast" : 32586.8,
"what's" : 32580.5,
"aloud" : 32565.4,
"independent" : 32519.5,
"gates" : 32509.3,
"distinction" : 32502.9,
"essential" : 32487.9,
"observation" : 32472.9,
"stronger" : 32455.5,
"recovered" : 32450.7,
"belonging" : 32404.8,
"loving" : 32402.4,
"masters" : 32388.2,
"writers" : 32341.5,
"cf." : 32326.5,
"permanent" : 32324.9,
"mortal" : 32284.6,
"stern" : 32284.6,
"gratitude" : 32278.2,
"preserve" : 32248.2,
"burden" : 32242.6,
"aspect" : 32240.3,
"millions" : 32199.1,
"merry" : 32182.5,
"knife" : 32177.8,
"dread" : 32133.5,
"clever" : 32114.5,
"applicable" : 32052.0,
"district" : 32050.4,
"shadows" : 32038.5,
"jim" : 32031.4,
"silk" : 32029.8,
"failure" : 32022.7,
"links" : 32009.3,
"cent" : 31995.0,
"sentiment" : 31990.3,
"amid" : 31980.0,
"profits" : 31968.9,
"agent" : 31949.1,
"finds" : 31934.9,
"russia" : 31928.6,
"bade" : 31920.6,
"russian" : 31857.4,
"desperate" : 31847.9,
"union" : 31834.4,
"imagined" : 31804.4,
"contempt" : 31802.0,
"raising" : 31786.2,
"lords" : 31775.1,
"hell" : 31770.3,
"separated" : 31752.1,
"grant" : 31743.4,
"seriously" : 31736.3,
"tribes" : 31726.8,
"hit" : 31725.2,
"enormous" : 31700.7,
"defective" : 31696.0,
"conviction" : 31688.8,
"secured" : 31688.8,
"mixed" : 31679.4,
"insisted" : 31661.2,
"wooden" : 31632.7,
"prefer" : 31621.6,
"prayers" : 31606.6,
"fever" : 31586.8,
"selected" : 31568.6,
"daughters" : 31553.6,
"treat" : 31550.4,
"warning" : 31522.7,
"flew" : 31518.0,
"speaks" : 31489.5,
"developed" : 31472.9,
"impulse" : 31472.9,
"slipped" : 31472.1,
"ours" : 31465.0,
"johnson" : 31408.8,
"mistaken" : 31390.6,
"damages" : 31382.7,
"ambition" : 31356.6,
"resumed" : 31351.8,
"christmas" : 31347.9,
"yield" : 31347.1,
"ideal" : 31343.1,
"schools" : 31335.2,
"confirmed" : 31324.1,
"descended" : 31322.5,
"rush" : 31290.9,
"falls" : 31268.0,
"deny" : 31254.5,
"calculated" : 31233.2,
"correct" : 31211.8,
"perform" : 31210.2,
"hadn't" : 31207.0,
"somehow" : 31184.9,
"accordingly" : 31181.7,
"stayed" : 31165.9,
"acquired" : 31144.5,
"counsel" : 31135.1,
"distress" : 31131.9,
"sins" : 31105.0,
"notion" : 31102.6,
"discussion" : 31093.9,
"constitution" : 31085.2,
"anne" : 31055.9,
"hundreds" : 31006.1,
"instrument" : 31001.3,
"firmly" : 30976.0,
"actions" : 30971.3,
"steady" : 30965.7,
"remarks" : 30933.3,
"empire" : 30925.4,
"elements" : 30909.6,
"idle" : 30891.4,
"pen" : 30885.8,
"entering" : 30872.4,
"online" : 30872.4,
"africa" : 30843.1,
"permit" : 30835.2,
"th'" : 30812.3,
"tide" : 30803.6,
"vol" : 30800.4,
"leaned" : 30798.8,
"college" : 30750.6,
"maintain" : 30737.9,
"sovereign" : 30706.3,
"tail" : 30699.9,
"generation" : 30695.2,
"crowded" : 30692.8,
"fears" : 30679.4,
"nights" : 30650.9,
"limitation" : 30643.8,
"tied" : 30625.6,
"horrible" : 30622.4,
"cat" : 30600.2,
"displaying" : 30545.7,
"port" : 30537.7,
"male" : 30525.9,
"experienced" : 30487.9,
"opposed" : 30485.5,
"treaty" : 30483.2,
"contents" : 30481.6,
"rested" : 30450.7,
"mode" : 30448.3,
"poured" : 30431.7,
"les" : 30400.1,
"occur" : 30399.3,
"seeking" : 30389.0,
"practically" : 30323.3,
"abandoned" : 30312.3,
"reports" : 30295.7,
"eleven" : 30294.1,
"sank" : 30292.5,
"begins" : 30288.5,
"founded" : 30249.0,
"brings" : 30244.2,
"trace" : 30238.7,
"instinct" : 30233.9,
"collected" : 30227.6,
"scotland" : 30202.3,
"characteristic" : 30186.5,
"chose" : 30184.1,
"cheerful" : 30177.0,
"tribe" : 30165.9,
"costs" : 30125.6,
"threatened" : 30120.8,
"arrangement" : 30110.5,
"western" : 30106.6,
"sang" : 30102.6,
"beings" : 30093.1,
"sam" : 30027.5,
"pressure" : 30019.6,
"politics" : 30010.8,
"sorts" : 29999.8,
"shelter" : 29992.7,
"rude" : 29987.1,
"scientific" : 29984.0,
"revealed" : 29965.0,
"winds" : 29930.9,
"riding" : 29915.1,
"scenes" : 29905.6,
"shake" : 29900.9,
"industry" : 29891.4,
"claims" : 29884.3,
"pp." : 29884.3,
"merit" : 29862.9,
"profession" : 29855.0,
"lamp" : 29830.5,
"interview" : 29817.0,
"territory" : 29813.1,
"sleeping" : 29806.7,
"sex" : 29802.8,
"coffee" : 29791.7,
"devotion" : 29791.7,
"thereof" : 29758.5,
"creation" : 29755.3,
"trail" : 29747.4,
"romans" : 29746.6,
"supported" : 29741.9,
"requires" : 29704.7,
"fathers" : 29703.9,
"prospect" : 29680.9,
"obey" : 29679.4,
"alexander" : 29667.5,
"shone" : 29662.0,
"operation" : 29642.2,
"northern" : 29598.7,
"nurse" : 29567.8,
"profound" : 29544.1,
"hungry" : 29534.6,
"scott" : 29470.5,
"sisters" : 29453.1,
"assure" : 29451.5,
"exceedingly" : 29445.2,
"match" : 29445.2,
"wrath" : 29406.4,
"continually" : 29405.6,
"rest." : 29404.0,
"gifts" : 29381.1,
"folly" : 29369.2,
"chain" : 29362.9,
"uniform" : 29359.0,
"debt" : 29346.3,
"teaching" : 29345.5,
"venture" : 29333.6,
"execution" : 29331.3,
"shoes" : 29324.1,
"mood" : 29319.4,
"crew" : 29317.0,
"perceive" : 29302.0,
"accounts" : 29288.5,
"eating" : 29284.6,
"multitude" : 29272.7,
"declare" : 29260.1,
"yard" : 29253.7,
"o'er" : 29218.1,
"astonishment" : 29188.1,
"version" : 29178.6,
"vague" : 29158.0,
"odd" : 29121.6,
"grateful" : 29103.4,
"nearest" : 29094.7,
"infinite" : 29093.9,
"elsewhere" : 29086.0,
"copying" : 29053.6,
"apartment" : 29045.7,
"activity" : 29037.0,
"wives" : 29022.7,
"parted" : 29014.0,
"security" : 29002.2,
"cared" : 28977.6,
"sensible" : 28974.5,
"owing" : 28972.1,
"martin" : 28942.0,
"saturday" : 28923.8,
"cottage" : 28919.1,
"jews" : 28880.3,
"leaning" : 28852.6,
"capacity" : 28840.8,
"joe" : 28833.6,
"settle" : 28821.0,
"referred" : 28805.2,
"francis" : 28802.8,
"holder" : 28802.0,
"involved" : 28795.7,
"sunshine" : 28794.1,
"dutch" : 28790.1,
"council" : 28741.1,
"princes" : 28733.2,
"ate" : 28730.0,
"examination" : 28722.1,
"steel" : 28720.5,
"strangers" : 28717.3,
"beheld" : 28689.7,
"test" : 28684.1,
"noted" : 28678.6,
"slightest" : 28678.6,
"widow" : 28660.4,
"charity" : 28623.2,
"realized" : 28608.2,
"element" : 28584.4,
"shed" : 28565.4,
"errors" : 28560.7,
"communication" : 28547.2,
"reflection" : 28544.9,
"attacked" : 28530.6,
"organization" : 28527.5,
"maintained" : 28505.3,
"restored" : 28491.1,
"folks" : 28478.4,
"concealed" : 28468.1,
"accordance" : 28450.7,
"heavens" : 28450.7,
"star" : 28433.3,
"examined" : 28407.2,
"deeds" : 28369.2,
"wordforms" : 28357.4,
"somebody" : 28352.6,
"incident" : 28345.5,
"oath" : 28324.1,
"guest" : 28321.0,
"bar" : 28297.3,
"row" : 28295.7,
"poverty" : 28267.2,
"bottle" : 28240.3,
"prevented" : 28237.9,
"bless" : 28226.8,
"stir" : 28214.2,
"intense" : 28207.1,
"completed" : 28162.0,
"quarrel" : 28149.3,
"touching" : 28146.9,
"inner" : 28146.1,
"available" : 28136.7,
"fix" : 28136.7,
"resistance" : 28131.9,
"unusual" : 28120.8,
"deed" : 28114.5,
"derive" : 28109.8,
"hollow" : 28106.6,
"suspected" : 28105.8,
"contains" : 28103.4,
"sighed" : 28101.8,
"province" : 28082.9,
"deserted" : 28082.1,
"establishment" : 28072.6,
"vote" : 28063.9,
"muttered" : 28043.3,
"thither" : 28028.3,
"oxford" : 28020.4,
"cavalry" : 28018.8,
"lofty" : 28017.2,
"endure" : 27969.7,
"succeed" : 27952.3,
"leg" : 27931.0,
"bid" : 27910.4,
"alice" : 27885.9,
"hated" : 27883.5,
"civilization" : 27878.7,
"u.s." : 27877.2,
"acting" : 27874.0,
"landed" : 27866.9,
"christians" : 27862.1,
"passions" : 27855.0,
"interior" : 27851.1,
"scarce" : 27839.2,
"lightly" : 27830.5,
"disturbed" : 27825.7,
"rev" : 27821.8,
"supreme" : 27819.4,
"hang" : 27813.9,
"notwithstanding" : 27812.3,
"shock" : 27777.5,
"exception" : 27775.9,
"offering" : 27766.4,
"display" : 27765.6,
"strain" : 27765.6,
"drank" : 27750.6,
"confined" : 27737.9,
"o" : 27727.6,
"exhausted" : 27713.4,
"poets" : 27693.6,
"sounded" : 27690.5,
"aim" : 27679.4,
"critical" : 27665.9,
"jerusalem" : 27662.0,
"directions" : 27660.4,
"negro" : 27637.4,
"fearful" : 27624.0,
"standard" : 27611.3,
"studied" : 27586.0,
"bag" : 27579.7,
"n" : 27563.9,
"buildings" : 27546.5,
"consequences" : 27544.1,
"commenced" : 27534.6,
"deeper" : 27506.1,
"repeat" : 27504.5,
"driving" : 27495.8,
"beasts" : 27484.8,
"track" : 27484.0,
"rid" : 27457.1,
"holds" : 27452.3,
"residence" : 27445.2,
"steadily" : 27404.1,
"intimate" : 27403.3,
"drinking" : 27385.1,
"swear" : 27380.3,
"treasure" : 27351.1,
"fun" : 27345.5,
"throwing" : 27340.0,
"apt" : 27321.0,
"enterprise" : 27321.0,
"queer" : 27302.0,
"seed" : 27296.5,
"tower" : 27285.4,
"runs" : 27276.7,
"defend" : 27261.7,
"favourite" : 27258.5,
"desires" : 27249.0,
"heavily" : 27238.7,
"assembled" : 27230.0,
"existed" : 27221.3,
"depends" : 27216.6,
"poems" : 27210.2,
"hesitated" : 27208.7,
"stuff" : 27203.9,
"section" : 27202.3,
"settlement" : 27201.5,
"staring" : 27199.2,
"sole" : 27191.2,
"roads" : 27188.9,
"plate" : 27174.6,
"mexico" : 27162.8,
"overcome" : 27160.4,
"pains" : 27159.6,
"performing" : 27141.4,
"dwell" : 27132.7,
"grounds" : 27110.5,
"taxes" : 27100.3,
"marble" : 27081.3,
"recently" : 27076.5,
"tones" : 27064.7,
"ability" : 27037.8,
"awake" : 27037.8,
"walter" : 27037.8,
"wave" : 27034.6,
"shaking" : 27018.0,
"folk" : 26996.6,
"possibility" : 26961.0,
"butter" : 26960.2,
"fury" : 26960.2,
"marched" : 26915.9,
"moses" : 26908.8,
"writes" : 26901.7,
"issued" : 26897.7,
"sailed" : 26858.2,
"instructions" : 26838.4,
"hatred" : 26834.4,
"pursuit" : 26832.9,
"pull" : 26815.5,
"furniture" : 26789.3,
"additions" : 26788.6,
"hid" : 26783.0,
"rope" : 26781.4,
"vi" : 26770.4,
"adventure" : 26767.2,
"royalty" : 26757.7,
"vanished" : 26753.7,
"arts" : 26748.2,
"elder" : 26727.6,
"signal" : 26715.8,
"wanting" : 26696.0,
"supplied" : 26694.4,
"feast" : 26689.7,
"safely" : 26650.1,
"burn" : 26627.2,
"describe" : 26605.8,
"references" : 26602.6,
"lesson" : 26592.4,
"annual" : 26582.9,
"card" : 26574.2,
"passes" : 26556.0,
"application" : 26542.5,
"intelligent" : 26535.4,
"county" : 26518.8,
"beaten" : 26513.2,
"presents" : 26511.7,
"format" : 26507.7,
"flow" : 26504.5,
"sixty" : 26488.7,
"scale" : 26480.8,
"damage" : 26479.2,
"marks" : 26478.4,
"obtaining" : 26468.9,
"moreover" : 26447.6,
"commerce" : 26403.3,
"startled" : 26381.1,
"southern" : 26375.6,
"consequently" : 26362.1,
"outer" : 26352.6,
"belongs" : 26350.3,
"ben" : 26346.3,
"wrought" : 26343.9,
"average" : 26342.4,
"naked" : 26341.6,
"conducted" : 26318.6,
"rivers" : 26306.8,
"songs" : 26299.6,
"obvious" : 26283.8,
"foundation" : 26269.6,
"concern" : 26239.5,
"ceremony" : 26230.0,
"magic" : 26210.2,
"campaign" : 26203.9,
"hunting" : 26202.3,
"carolina" : 26199.2,
"liberal" : 26184.1,
"whisper" : 26184.1,
"largely" : 26177.0,
"commonly" : 26158.8,
"torn" : 26151.7,
"exists" : 26145.4,
"contributions" : 26143.0,
"hunt" : 26139.8,
"teacher" : 26128.8,
"christianity" : 26112.9,
"lawyer" : 26093.2,
"operations" : 26091.6,
"detail" : 26089.2,
"shortly" : 26070.2,
"caesar" : 26043.3,
"wondering" : 26040.9,
"leaders" : 26037.0,
"blessing" : 26023.5,
"princess" : 26022.7,
"he'd" : 26014.8,
"altar" : 26013.2,
"tenderness" : 26013.2,
"tiny" : 25980.0,
"web" : 25971.3,
"cardinal" : 25968.9,
"sharply" : 25962.6,
"regiment" : 25926.2,
"chest" : 25915.1,
"distinctly" : 25914.4,
"purple" : 25912.0,
"creating" : 25910.4,
"gather" : 25905.7,
"depth" : 25902.5,
"indignation" : 25876.4,
"performance" : 25870.8,
"election" : 25863.7,
"prosperity" : 25843.9,
"gloomy" : 25825.7,
"conception" : 25822.6,
"clerk" : 25785.4,
"decide" : 25779.1,
"drunk" : 25774.3,
"victim" : 25770.4,
"reflected" : 25768.8,
"pour" : 25765.6,
"preceding" : 25764.0,
"individuals" : 25759.3,
"gazing" : 25737.9,
"absurd" : 25718.9,
"lift" : 25715.0,
"gesture" : 25714.2,
"armies" : 25710.2,
"limbs" : 25667.5,
"manage" : 25667.5,
"brethren" : 25642.2,
"hugh" : 25616.1,
"plays" : 25582.1,
"hastened" : 25580.5,
"dragged" : 25533.0,
"motive" : 25512.5,
"whatsoever" : 25421.5,
"pointing" : 25416.7,
"verses" : 25411.2,
"pronounced" : 25410.4,
"exchange" : 25408.0,
"definite" : 25393.8,
"emperor" : 25366.9,
"tendency" : 25363.7,
"remote" : 25339.2,
"finish" : 25337.6,
"flag" : 25313.9,
"boots" : 25307.6,
"enabled" : 25290.1,
"administration" : 25289.4,
"denied" : 25283.8,
"churches" : 25279.1,
"rarely" : 25251.4,
"earnestly" : 25212.6,
"considering" : 25210.2,
"previously" : 25203.9,
"ugly" : 25169.9,
"bears" : 25165.9,
"signed" : 25156.4,
"genuine" : 25153.3,
"harmless" : 25150.1,
"mingled" : 25120.1,
"obedience" : 25107.4,
"walks" : 25103.4,
"training" : 25071.0,
"badly" : 25045.7,
"feed" : 25028.3,
"central" : 25010.1,
"contrast" : 25009.3,
"relieved" : 24995.8,
"romance" : 24988.7,
"mississippi" : 24987.9,
"structure" : 24976.1,
"payment" : 24963.4,
"pace" : 24960.2,
"passages" : 24957.9,
"succession" : 24955.5,
"persuaded" : 24952.3,
"sources" : 24952.3,
"inquiry" : 24950.0,
"inspired" : 24932.6,
"angels" : 24925.4,
"roll" : 24922.3,
"wilt" : 24907.2,
"inch" : 24887.5,
"troubles" : 24878.8,
"perfection" : 24870.1,
"lee" : 24850.3,
"wherever" : 24845.5,
"owe" : 24817.8,
"handle" : 24805.2,
"advantages" : 24802.8,
"trip" : 24790.2,
"shoot" : 24775.9,
"fortunate" : 24755.3,
"newspaper" : 24752.2,
"employment" : 24726.1,
"fitted" : 24721.3,
"refuge" : 24717.4,
"misfortune" : 24711.0,
"providence" : 24702.3,
"owns" : 24692.8,
"cutting" : 24671.5,
"beard" : 24658.8,
"stirred" : 24654.1,
"tear" : 24653.3,
"dan" : 24639.8,
"resist" : 24634.3,
"bob" : 24620.1,
"depths" : 24612.9,
"maiden" : 24599.5,
"determine" : 24596.3,
"commission" : 24594.0,
"merchant" : 24578.9,
"whereas" : 24576.5,
"crossing" : 24575.0,
"independence" : 24561.5,
"lively" : 24553.6,
"breeze" : 24552.0,
"provinces" : 24533.8,
"jean" : 24520.4,
"virtues" : 24516.4,
"conceived" : 24510.1,
"relative" : 24507.7,
"solitary" : 24481.6,
"smell" : 24476.1,
"wandering" : 24466.6,
"thereby" : 24465.0,
"eighteen" : 24454.7,
"locked" : 24438.1,
"provision" : 24386.7,
"courts" : 24372.4,
"eaten" : 24366.9,
"historical" : 24366.9,
"regarding" : 24365.3,
"florence" : 24349.5,
"preferred" : 24349.5,
"pick" : 24343.2,
"ruined" : 24340.0,
"wherein" : 24337.6,
"vanity" : 24324.2,
"condemned" : 24297.3,
"deliver" : 24296.5,
"unexpected" : 24270.4,
"desk" : 24263.3,
"gross" : 24261.7,
"lane" : 24259.3,
"happens" : 24249.0,
"represent" : 24244.3,
"billy" : 24241.9,
"root" : 24241.9,
"holland" : 24220.5,
"mud" : 24215.0,
"respectable" : 24197.6,
"cleared" : 24196.8,
"feels" : 24189.7,
"fruits" : 24181.8,
"testimony" : 24178.6,
"milton" : 24177.8,
"existing" : 24176.2,
"bride" : 24170.7,
"rang" : 24165.2,
"ranks" : 24146.2,
"responsibility" : 24110.6,
"beating" : 24105.8,
"disappointed" : 24102.7,
"suitable" : 24099.5,
"depend" : 24080.5,
"judges" : 24065.5,
"giant" : 24050.4,
"grasp" : 24039.4,
"arrive" : 24036.2,
"simplicity" : 24033.8,
"autumn" : 24030.7,
"absent" : 24029.9,
"legally" : 24015.6,
"veil" : 24014.0,
"gloom" : 23980.8,
"doubtful" : 23956.3,
"suspect" : 23953.1,
"weapons" : 23950.8,
"limits" : 23919.9,
"determination" : 23918.3,
"feeble" : 23902.5,
"prophet" : 23900.1,
"shak" : 23892.2,
"gathering" : 23868.5,
"basis" : 23866.1,
"examine" : 23826.5,
"corrupt" : 23785.4,
"payments" : 23779.9,
"returns" : 23775.1,
"laying" : 23756.9,
"prize" : 23749.0,
"instances" : 23733.2,
"greeks" : 23730.8,
"d" : 23730.0,
"they're" : 23719.0,
"theatre" : 23711.0,
"purchase" : 23704.7,
"comparison" : 23702.3,
"composition" : 23694.4,
"rival" : 23688.9,
"someone" : 23680.2,
"realize" : 23669.1,
"defeat" : 23665.2,
"demands" : 23635.9,
"foe" : 23628.8,
"shared" : 23620.1,
"consists" : 23616.9,
"studies" : 23614.5,
"balance" : 23609.8,
"intercourse" : 23609.0,
"id" : 23605.0,
"forming" : 23571.0,
"slender" : 23570.2,
"coach" : 23561.5,
"criminal" : 23556.8,
"knocked" : 23553.6,
"silly" : 23553.6,
"humour" : 23550.4,
"masses" : 23542.5,
"indifferent" : 23541.0,
"recall" : 23534.6,
"occupation" : 23529.1,
"discourse" : 23524.3,
"keeps" : 23523.5,
"regions" : 23516.4,
"intervals" : 23510.9,
"assist" : 23508.5,
"novel" : 23506.1,
"intellect" : 23493.5,
"leads" : 23492.7,
"hither" : 23489.5,
"tales" : 23483.2,
"sale" : 23476.9,
"revenge" : 23465.8,
"lucy" : 23459.5,
"yonder" : 23446.8,
"resources" : 23435.7,
"jealous" : 23421.5,
"we're" : 23376.4,
"wheel" : 23376.4,
"invitation" : 23368.5,
"narrative" : 23365.3,
"risen" : 23364.5,
"burnt" : 23335.3,
"sentiments" : 23335.3,
"inferior" : 23327.3,
"amusement" : 23320.2,
"marie" : 23309.9,
"flash" : 23304.4,
"recognize" : 23302.8,
"swiftly" : 23299.7,
"portrait" : 23294.9,
"create" : 23259.3,
"summoned" : 23245.9,
"suggest" : 23241.1,
"induced" : 23229.2,
"conflict" : 23227.7,
"fed" : 23220.5,
"curse" : 23215.8,
"disappointment" : 23215.8,
"helpless" : 23213.4,
"preparing" : 23208.7,
"construction" : 23205.5,
"lincoln" : 23183.4,
"zeal" : 23168.3,
"responsible" : 23160.4,
"indicated" : 23152.5,
"groups" : 23150.1,
"positive" : 23135.1,
"germans" : 23128.8,
"attracted" : 23124.0,
"vengeance" : 23121.6,
"fort" : 23113.7,
"club" : 23099.5,
"cure" : 23094.0,
"stout" : 23094.0,
"missed" : 23090.0,
"gracious" : 23082.1,
"include" : 23071.0,
"flood" : 23042.5,
"satisfy" : 23040.2,
"agony" : 23033.0,
"respects" : 23020.4,
"ventured" : 23020.4,
"implied" : 23018.8,
"maria" : 23010.1,
"stupid" : 22992.7,
"seas" : 22991.9,
"spaniards" : 22987.9,
"grain" : 22985.6,
"enjoyment" : 22982.4,
"wearing" : 22981.6,
"indifference" : 22936.5,
"conceal" : 22933.4,
"horizon" : 22922.3,
"pleasures" : 22903.3,
"therein" : 22901.7,
"precisely" : 22900.1,
"canada" : 22872.4,
"day's" : 22868.5,
"assume" : 22844.8,
"registered" : 22844.8,
"estimate" : 22841.6,
"steep" : 22823.4,
"route" : 22808.4,
"gardens" : 22789.4,
"visitor" : 22780.7,
"closer" : 22770.4,
"harmony" : 22753.0,
"non" : 22749.0,
"thunder" : 22749.0,
"wire" : 22741.1,
"graceful" : 22736.4,
"crept" : 22735.6,
"greece" : 22734.0,
"childhood" : 22726.9,
"knee" : 22696.0,
"saddle" : 22691.3,
"supplies" : 22683.4,
"weeping" : 22673.1,
"mostly" : 22658.0,
"paragraphs" : 22647.8,
"unconscious" : 22647.0,
"mutual" : 22639.8,
"scorn" : 22638.3,
"grows" : 22628.0,
"external" : 22592.4,
"agents" : 22586.8,
"software" : 22586.1,
"institutions" : 22584.5,
"losing" : 22575.0,
"universe" : 22574.2,
"clock" : 22548.9,
"attempts" : 22547.3,
"instruction" : 22538.6,
"injury" : 22525.9,
"roots" : 22522.8,
"receipt" : 22519.6,
"jumped" : 22518.8,
"dearest" : 22502.2,
"sore" : 22499.8,
"earliest" : 22496.7,
"finest" : 22488.0,
"enable" : 22482.4,
"discipline" : 22480.8,
"motives" : 22465.8,
"fastened" : 22454.7,
"introduction" : 22446.0,
"converted" : 22419.9,
"wilderness" : 22419.1,
"confused" : 22416.0,
"fancied" : 22410.4,
"offices" : 22410.4,
"slip" : 22402.5,
"revolution" : 22396.2,
"wedding" : 22374.8,
"girl's" : 22366.1,
"farmer" : 22364.5,
"silently" : 22363.7,
"fires" : 22362.2,
"wept" : 22355.0,
"behalf" : 22346.3,
"reckon" : 22346.3,
"responded" : 22333.7,
"uncertain" : 22328.9,
"neglected" : 22328.1,
"stroke" : 22326.6,
"exquisite" : 22305.2,
"engagement" : 22298.9,
"dirty" : 22289.4,
"rolling" : 22286.2,
"platform" : 22282.3,
"messenger" : 22272.0,
"privilege" : 22260.9,
"admirable" : 22255.4,
"offers" : 22252.2,
"mischief" : 22247.4,
"physician" : 22245.1,
"imposed" : 22228.5,
"organized" : 22222.1,
"covering" : 22208.7,
"student" : 22207.1,
"daring" : 22198.4,
"cave" : 22174.7,
"wars" : 22170.7,
"convey" : 22166.7,
"he'll" : 22163.6,
"sincere" : 22162.0,
"tradition" : 22160.4,
"gravely" : 22153.3,
"combined" : 22149.3,
"gallant" : 22122.4,
"sensation" : 22120.9,
"travelling" : 22120.1,
"charges" : 22089.2,
"submit" : 22087.6,
"tragedy" : 22082.1,
"specific" : 22075.0,
"commander" : 22064.7,
"inn" : 22059.2,
"stiff" : 22057.6,
"accompany" : 22056.8,
"score" : 22052.0,
"virgin" : 22052.0,
"farewell" : 22033.0,
"paradise" : 22014.9,
"villages" : 22012.5,
"hunger" : 21975.3,
"trembled" : 21947.6,
"favorite" : 21929.4,
"criticism" : 21925.5,
"proprietary" : 21921.5,
"customs" : 21915.2,
"cotton" : 21910.4,
"ruth" : 21907.3,
"hospital" : 21895.4,
"restrictions" : 21881.2,
"outward" : 21868.5,
"impressed" : 21857.4,
"blows" : 21851.1,
"plains" : 21847.1,
"flashed" : 21836.8,
"rent" : 21836.8,
"prey" : 21827.4,
"owed" : 21813.1,
"longing" : 21804.4,
"musical" : 21803.6,
"satisfactory" : 21803.6,
"ridiculous" : 21802.8,
"sheet" : 21798.1,
"disgrace" : 21776.7,
"colored" : 21769.6,
"shouldn't" : 21767.2,
"originally" : 21762.5,
"samuel" : 21760.1,
"wages" : 21755.4,
"papa" : 21749.8,
"gas" : 21748.2,
"inevitable" : 21741.1,
"extensive" : 21735.6,
"leisure" : 21729.3,
"deadly" : 21718.2,
"chin" : 21717.4,
"claimed" : 21713.4,
"glow" : 21707.1,
"husband's" : 21703.9,
"emotions" : 21695.2,
"adam" : 21681.8,
"jealousy" : 21661.2,
"leaf" : 21659.6,
"publication" : 21649.3,
"englishman" : 21647.8,
"allah" : 21647.0,
"jones" : 21635.1,
"hostile" : 21631.9,
"wandered" : 21621.7,
"railway" : 21615.3,
"translation" : 21607.4,
"procession" : 21575.8,
"betrayed" : 21573.4,
"pound" : 21572.6,
"admired" : 21561.5,
"elected" : 21552.0,
"pierre" : 21522.8,
"sunk" : 21519.6,
"ruins" : 21516.4,
"eastern" : 21515.6,
"roses" : 21511.7,
"citizen" : 21498.2,
"reminded" : 21497.4,
"deceived" : 21495.9,
"tables" : 21432.6,
"beach" : 21431.8,
"starting" : 21427.8,
"funeral" : 21414.4,
"arrested" : 21412.0,
"flour" : 21409.6,
"feature" : 21404.1,
"correspondence" : 21403.3,
"consisted" : 21398.6,
"counted" : 21397.8,
"reserve" : 21391.4,
"proceedings" : 21381.2,
"roar" : 21378.8,
"romantic" : 21377.2,
"twenty-five" : 21374.8,
"hut" : 21374.0,
"strangely" : 21361.4,
"absorbed" : 21357.4,
"propose" : 21352.7,
"seats" : 21348.7,
"bark" : 21347.1,
"reception" : 21339.2,
"pleasing" : 21334.5,
"attained" : 21329.7,
"wake" : 21326.6,
"research" : 21323.4,
"prayed" : 21309.2,
"monarch" : 21306.8,
"clothing" : 21300.5,
"dollar" : 21293.3,
"illness" : 21291.8,
"calmly" : 21282.3,
"obeyed" : 21275.9,
"heartily" : 21268.8,
"pressing" : 21264.9,
"daylight" : 21264.1,
"warriors" : 21243.5,
"jest" : 21238.7,
"abruptly" : 21222.1,
"washed" : 21200.8,
"comment" : 21197.6,
"metal" : 21196.0,
"preparations" : 21193.7,
"nerves" : 21177.8,
"solution" : 21177.8,
"pretended" : 21173.1,
"sixteen" : 21166.0,
"assembly" : 21155.7,
"tobacco" : 21154.1,
"entity" : 21142.2,
"dwelling" : 21136.7,
"depart" : 21135.9,
"swung" : 21134.3,
"bitterly" : 21130.4,
"alteration" : 21128.0,
"colony" : 21126.4,
"disclaimers" : 21124.8,
"wing" : 21122.5,
"peaceful" : 21120.9,
"lion" : 21119.3,
"opportunities" : 21097.9,
"alarmed" : 21090.8,
"furnish" : 21090.0,
"resting" : 21086.9,
"accused" : 21084.5,
"culture" : 21083.7,
"writings" : 21073.4,
"dwelt" : 21071.0,
"conquered" : 21067.1,
"trick" : 21052.8,
"trusted" : 21049.7,
"column" : 21046.5,
"financial" : 21028.3,
"cunning" : 21027.5,
"preparation" : 21022.0,
"drama" : 21014.9,
"joke" : 21010.1,
"entertained" : 21003.0,
"mist" : 20999.0,
"hypertext" : 20994.3,
"shell" : 20981.6,
"medicine" : 20972.9,
"proofread" : 20964.2,
"nest" : 20950.0,
"reverence" : 20947.6,
"situated" : 20945.2,
"yielded" : 20942.9,
"conceive" : 20938.9,
"appointment" : 20934.2,
"lessons" : 20897.8,
"fetch" : 20895.4,
"tomb" : 20895.4,
"candle" : 20894.6,
"offence" : 20882.7,
"coarse" : 20870.1,
"heap" : 20866.1,
"mixture" : 20863.8,
"homes" : 20847.9,
"model" : 20843.2,
"men's" : 20819.4,
"defect" : 20818.7,
"destined" : 20814.7,
"occasional" : 20813.1,
"fourteen" : 20803.6,
"hint" : 20802.0,
"knights" : 20795.7,
"solicit" : 20783.8,
"dreamed" : 20774.4,
"objection" : 20756.2,
"craft" : 20753.8,
"acid" : 20752.2,
"namely" : 20747.5,
"asia" : 20744.3,
"neglect" : 20734.8,
"data" : 20722.9,
"weapon" : 20717.4,
"confessed" : 20715.0,
"arrangements" : 20709.5,
"repose" : 20701.6,
"complying" : 20700.0,
"copied" : 20700.0,
"pink" : 20698.4,
"user" : 20696.0,
"heels" : 20695.2,
"grandfather" : 20692.1,
"other's" : 20690.5,
"income" : 20679.4,
"i.e." : 20661.2,
"regards" : 20650.1,
"streams" : 20649.4,
"vigorous" : 20633.5,
"accepting" : 20628.8,
"bishop" : 20628.8,
"lightning" : 20622.5,
"authors" : 20613.8,
"flames" : 20613.8,
"observations" : 20602.7,
"compressed" : 20599.5,
"sport" : 20588.4,
"powder" : 20587.6,
"beds" : 20576.6,
"orange" : 20573.4,
"painting" : 20559.2,
"shout" : 20548.9,
"austria" : 20499.0,
"bath" : 20495.9,
"careless" : 20495.1,
"chap" : 20493.5,
"derivative" : 20492.7,
"roused" : 20491.1,
"primitive" : 20488.8,
"doorway" : 20487.2,
"climbed" : 20484.8,
"volumes" : 20479.3,
"vulgar" : 20448.4,
"arguments" : 20441.3,
"1st" : 20440.5,
"sunset" : 20435.0,
"convenient" : 20431.0,
"mail" : 20423.9,
"recalled" : 20419.1,
"wrapped" : 20419.1,
"abode" : 20409.6,
"planted" : 20405.7,
"paint" : 20381.2,
"surrender" : 20362.2,
"establish" : 20357.4,
"mild" : 20353.5,
"promptly" : 20342.4,
"appearing" : 20328.2,
"department" : 20315.5,
"parish" : 20302.8,
"stephen" : 20288.6,
"nay" : 20246.7,
"lit" : 20243.5,
"handkerchief" : 20229.3,
"basket" : 20219.8,
"easier" : 20216.6,
"deserve" : 20213.4,
"quit" : 20213.4,
"assurance" : 20212.6,
"mirror" : 20210.3,
"plot" : 20208.7,
"yer" : 20196.8,
"upward" : 20192.9,
"sadly" : 20191.3,
"secretary" : 20190.5,
"adding" : 20187.3,
"modest" : 20185.7,
"dish" : 20176.3,
"cares" : 20161.2,
"straw" : 20158.9,
"net" : 20152.5,
"advised" : 20146.2,
"heavenly" : 20124.0,
"largest" : 20119.3,
"proceeding" : 20107.4,
"impatient" : 20085.3,
"wounds" : 20076.6,
"warmth" : 20071.8,
"certainty" : 20053.6,
"restless" : 20048.1,
"meantime" : 20043.3,
"rays" : 20040.2,
"salvation" : 20031.5,
"lovers" : 20030.7,
"experiment" : 20024.4,
"shores" : 20017.2,
"today" : 20016.4,
"tremendous" : 20006.2,
"afforded" : 20004.6,
"moonlight" : 20002.2,
"intend" : 19999.0,
"california" : 19991.1,
"cultivated" : 19976.9,
"flushed" : 19971.4,
"shakespeare" : 19966.6,
"newspapers" : 19953.2,
"rocky" : 19916.8,
"pious" : 19900.9,
"wont" : 19900.9,
"steam" : 19892.2,
"improvement" : 19883.5,
"garments" : 19878.8,
"ned" : 19874.8,
"treasury" : 19873.3,
"merchants" : 19868.5,
"perpetual" : 19866.9,
"trained" : 19866.9,
"products" : 19866.1,
"affectionate" : 19858.2,
"dispute" : 19854.3,
"visitors" : 19847.9,
"poison" : 19814.7,
"proposition" : 19793.3,
"maybe" : 19771.2,
"rifle" : 19769.6,
"warned" : 19767.2,
"parting" : 19751.4,
"shield" : 19736.4,
"erected" : 19722.9,
"employ" : 19722.1,
"prevailed" : 19721.4,
"talent" : 19714.2,
"rises" : 19713.4,
"climate" : 19693.7,
"chairs" : 19690.5,
"searched" : 19689.7,
"unlike" : 19687.3,
"recover" : 19681.0,
"mate" : 19655.7,
"arrange" : 19648.6,
"fortunes" : 19647.0,
"puzzled" : 19639.1,
"committee" : 19637.5,
"aged" : 19628.8,
"ohio" : 19605.1,
"ashes" : 19593.2,
"ghost" : 19563.9,
"b" : 19549.7,
"promises" : 19547.3,
"bushes" : 19541.0,
"effective" : 19525.9,
"distinguish" : 19508.5,
"manifest" : 19496.7,
"comparatively" : 19486.4,
"esteem" : 19478.5,
"blew" : 19452.4,
"revelation" : 19451.6,
"wash" : 19425.5,
"recognition" : 19417.6,
"confession" : 19405.7,
"clay" : 19398.6,
"nonsense" : 19375.6,
"trunk" : 19374.8,
"management" : 19371.7,
"undoubtedly" : 19371.7,
"dried" : 19369.3,
"dorothy" : 19360.6,
"chiefs" : 19347.1,
"coal" : 19337.7,
"stolen" : 19337.7,
"earthly" : 19335.3,
"restore" : 19332.1,
"indirectly" : 19327.4,
"lasted" : 19324.2,
"selfish" : 19316.3,
"renewed" : 19313.9,
"canoe" : 19310.8,
"protest" : 19307.6,
"vice" : 19294.1,
"races" : 19291.0,
"deemed" : 19279.1,
"temporary" : 19278.3,
"pile" : 19276.7,
"frederick" : 19263.3,
"chapel" : 19251.4,
"moderate" : 19245.9,
"spell" : 19236.4,
"massachusetts" : 19235.6,
"upright" : 19228.5,
"quoted" : 19226.9,
"area" : 19226.1,
"bone" : 19221.4,
"solitude" : 19215.8,
"instruments" : 19215.0,
"formal" : 19211.9,
"students" : 19209.5,
"greatness" : 19206.3,
"struggling" : 19192.1,
"monday" : 19175.5,
"reproach" : 19173.1,
"altered" : 19167.6,
"grim" : 19161.2,
"leaped" : 19157.3,
"venice" : 19153.3,
"federal" : 19149.4,
"questioned" : 19147.0,
"editor" : 19143.0,
"desirable" : 19141.5,
"acknowledge" : 19137.5,
"motionless" : 19137.5,
"remedy" : 19135.9,
"bestowed" : 19130.4,
"pursue" : 19121.7,
"representative" : 19117.7,
"pole" : 19116.9,
"gladly" : 19111.4,
"linen" : 19109.8,
"vital" : 19099.5,
"sink" : 19096.4,
"pacific" : 19093.2,
"hopeless" : 19090.0,
"dangers" : 19087.7,
"gratefully" : 19087.7,
"president" : 19068.7,
"travelled" : 19061.5,
"ward" : 19060.8,
"nephew" : 19041.8,
"ms" : 19038.6,
"cheer" : 19029.9,
"bloody" : 19023.6,
"siege" : 19020.4,
"commands" : 19005.4,
"justified" : 19003.8,
"atlantic" : 18995.1,
"stomach" : 18991.9,
"improved" : 18971.4,
"admire" : 18965.0,
"openly" : 18962.7,
"sailors" : 18961.1,
"abide" : 18940.5,
"advancing" : 18937.3,
"forests" : 18927.1,
"records" : 18920.7,
"polly" : 18909.6,
"recorded" : 18908.9,
"modification" : 18904.9,
"dramatic" : 18897.8,
"statements" : 18891.5,
"upstairs" : 18873.3,
"varied" : 18858.2,
"letting" : 18857.4,
"wilson" : 18857.4,
"comrades" : 18855.9,
"sets" : 18848.7,
"descent" : 18847.9,
"whither" : 18847.9,
"envy" : 18837.7,
"load" : 18836.9,
"pretend" : 18834.5,
"folded" : 18829.0,
"brass" : 18824.2,
"internal" : 18823.4,
"furious" : 18820.3,
"curtain" : 18816.3,
"healthy" : 18814.7,
"obscure" : 18810.0,
"summit" : 18806.0,
"alas" : 18755.4,
"fifth" : 18753.0,
"center" : 18747.5,
"faced" : 18734.8,
"cheap" : 18723.7,
"saints" : 18722.9,
"colonel" : 18721.4,
"egyptian" : 18719.0,
"contest" : 18707.9,
"owned" : 18702.4,
"adventures" : 18700.8,
"exclusion" : 18688.1,
"seize" : 18670.7,
"chances" : 18669.9,
"springs" : 18661.2,
"alter" : 18660.4,
"landing" : 18659.7,
"fence" : 18652.5,
"leagues" : 18642.2,
"glimpse" : 18634.3,
"statue" : 18632.0,
"contract" : 18625.6,
"luxury" : 18614.6,
"artillery" : 18598.7,
"doubts" : 18597.2,
"saving" : 18594.8,
"fro" : 18586.9,
"string" : 18586.1,
"combination" : 18582.9,
"awakened" : 18575.8,
"faded" : 18573.4,
"arrest" : 18570.3,
"protected" : 18568.7,
"temperature" : 18567.1,
"strict" : 18564.7,
"contented" : 18563.9,
"professional" : 18563.9,
"intent" : 18559.2,
"brother's" : 18548.9,
"injured" : 18548.9,
"neighborhood" : 18546.5,
"andrew" : 18543.4,
"abundance" : 18541.0,
"smoking" : 18541.0,
"yourselves" : 18529.9,
"medical" : 18527.5,
"garrison" : 18525.9,
"likes" : 18508.5,
"corps" : 18503.8,
"heroic" : 18497.5,
"inform" : 18494.3,
"wife's" : 18488.8,
"retained" : 18477.7,
"agitation" : 18469.0,
"nobles" : 18462.7,
"prominent" : 18457.1,
"institution" : 18454.0,
"judged" : 18441.3,
"embrace" : 18431.0,
"wheels" : 18427.8,
"closing" : 18397.8,
"damaged" : 18391.5,
"pack" : 18390.7,
"affections" : 18389.1,
"eldest" : 18389.1,
"anguish" : 18382.8,
"surrounding" : 18381.2,
"obviously" : 18376.4,
"strictly" : 18366.1,
"capture" : 18344.0,
"drops" : 18339.2,
"inquire" : 18336.1,
"ample" : 18328.2,
"remainder" : 18325.8,
"justly" : 18324.2,
"recollection" : 18324.2,
"deer" : 18321.0,
"answers" : 18317.9,
"bedroom" : 18311.6,
"purely" : 18310.0,
"bush" : 18306.8,
"plunged" : 18299.7,
"thyself" : 18296.5,
"joint" : 18291.8,
"refer" : 18275.9,
"expecting" : 18274.4,
"madam" : 18271.2,
"railroad" : 18264.9,
"spake" : 18253.8,
"respecting" : 18252.2,
"jan" : 18250.6,
"columns" : 18248.3,
"weep" : 18242.7,
"identify" : 18232.4,
"discharge" : 18215.8,
"bench" : 18207.1,
"ralph" : 18196.0,
"heir" : 18192.9,
"oak" : 18186.6,
"rescue" : 18177.1,
"limit" : 18168.4,
"unpleasant" : 18163.6,
"anxiously" : 18159.7,
"innocence" : 18153.3,
"awoke" : 18151.7,
"expectation" : 18145.4,
"incomplete" : 18143.8,
"program" : 18109.8,
"reserved" : 18096.4,
"secretly" : 18083.7,
"we've" : 18079.7,
"invention" : 18079.0,
"faults" : 18075.8,
"disagreeable" : 18074.2,
"piano" : 18074.2,
"defeated" : 18073.4,
"charms" : 18072.6,
"purse" : 18067.9,
"persuade" : 18066.3,
"deprived" : 18063.1,
"electric" : 18058.4,
"endless" : 18058.4,
"interval" : 18054.4,
"chase" : 18052.8,
"heroes" : 18052.1,
"invisible" : 18048.9,
"well-known" : 18044.9,
"occupy" : 18038.6,
"jacob" : 18036.2,
"gown" : 18026.0,
"cruelty" : 18021.2,
"lock" : 18018.0,
"lowest" : 18007.8,
"hesitation" : 18003.8,
"withdrew" : 18003.8,
"proposal" : 18001.4,
"destiny" : 17999.1,
"recognised" : 17998.3,
"commons" : 17995.1,
"foul" : 17994.3,
"loaded" : 17991.9,
"amidst" : 17989.6,
"titles" : 17984.8,
"ancestors" : 17984.0,
"types" : 17972.2,
"commanding" : 17959.5,
"madness" : 17954.7,
"happily" : 17954.0,
"assigned" : 17951.6,
"declined" : 17932.6,
"temptation" : 17932.6,
"lady's" : 17929.4,
"subsequent" : 17917.6,
"jewels" : 17912.8,
"breathed" : 17910.4,
"willingly" : 17906.5,
"youthful" : 17906.5,
"bells" : 17904.1,
"spectacle" : 17903.3,
"uneasy" : 17897.8,
"shine" : 17896.2,
"formidable" : 17893.0,
"stately" : 17892.2,
"machinery" : 17886.7,
"fragments" : 17875.6,
"rushing" : 17859.0,
"attractive" : 17857.4,
"product" : 17857.4,
"economic" : 17854.3,
"sickness" : 17847.9,
"uses" : 17822.6,
"dashed" : 17819.5,
"engine" : 17817.1,
"ashore" : 17813.9,
"dates" : 17805.2,
"theirs" : 17793.4,
"adv" : 17783.1,
"clasped" : 17773.6,
"international" : 17772.8,
"leather" : 17768.8,
"spared" : 17758.5,
"crushed" : 17753.0,
"interfere" : 17748.3,
"subtle" : 17745.9,
"waved" : 17743.5,
"slope" : 17739.6,
"floating" : 17737.2,
"worry" : 17730.1,
"effected" : 17725.3,
"passengers" : 17723.7,
"violently" : 17715.8,
"donation" : 17715.0,
"steamer" : 17714.2,
"witnesses" : 17712.7,
"specified" : 17710.3,
"learnt" : 17698.4,
"stores" : 17677.9,
"designed" : 17666.8,
"guessed" : 17654.9,
"roger" : 17654.9,
"timber" : 17654.1,
"talents" : 17639.9,
"heed" : 17636.7,
"jackson" : 17631.2,
"murdered" : 17629.6,
"vivid" : 17628.0,
"woe" : 17614.6,
"calculate" : 17612.2,
"killing" : 17608.2,
"laura" : 17605.9,
"savages" : 17603.5,
"wasted" : 17600.3,
"trifle" : 17584.5,
"funny" : 17576.6,
"pockets" : 17560.0,
"philosopher" : 17552.9,
"insult" : 17544.2,
"den" : 17543.4,
"representation" : 17541.0,
"incapable" : 17538.6,
"eloquence" : 17537.0,
"dine" : 17526.7,
"temples" : 17526.0,
"ann" : 17521.2,
"sensitive" : 17519.6,
"robin" : 17518.8,
"appetite" : 17507.0,
"wishing" : 17501.4,
"picturesque" : 17495.9,
"douglas" : 17494.3,
"courtesy" : 17472.2,
"flowing" : 17467.4,
"remembrance" : 17465.8,
"lawyers" : 17464.2,
"sphere" : 17464.2,
"murmur" : 17463.5,
"elegant" : 17460.3,
"honourable" : 17453.2,
"stopping" : 17452.4,
"guilt" : 17440.5,
"welfare" : 17437.3,
"avoided" : 17429.4,
"fishing" : 17423.9,
"perish" : 17420.7,
"sober" : 17416.0,
"steal" : 17415.2,
"delicious" : 17401.0,
"infant" : 17393.8,
"lip" : 17393.0,
"norman" : 17392.3,
"offended" : 17392.3,
"dost" : 17373.3,
"memories" : 17358.2,
"wheat" : 17354.3,
"japanese" : 17343.2,
"humor" : 17342.4,
"exhibited" : 17329.0,
"encounter" : 17321.8,
"footsteps" : 17314.7,
"marquis" : 17308.4,
"smiles" : 17302.1,
"amiable" : 17294.9,
"twilight" : 17290.2,
"arrows" : 17287.8,
"consisting" : 17287.0,
"park" : 17285.4,
"retire" : 17280.7,
"economy" : 17278.3,
"sufferings" : 17276.7,
"secrets" : 17276.0,
"na" : 17275.2,
"halted" : 17268.0,
"govern" : 17262.5,
"favourable" : 17260.9,
"colors" : 17248.3,
"translated" : 17245.9,
"stretch" : 17245.1,
"formation" : 17240.4,
"immortal" : 17234.8,
"gallery" : 17233.2,
"parallel" : 17233.2,
"lean" : 17230.1,
"tempted" : 17229.3,
"frontier" : 17228.5,
"continent" : 17226.1,
"knock" : 17221.4,
"impatience" : 17210.3,
"unity" : 17210.3,
"dealing" : 17192.9,
"prohibition" : 17169.9,
"decent" : 17166.8,
"fiery" : 17163.6,
"images" : 17163.6,
"tie" : 17162.8,
"punished" : 17161.2,
"submitted" : 17161.2,
"julia" : 17140.7,
"albert" : 17127.2,
"rejoined" : 17126.4,
"speedily" : 17125.6,
"consented" : 17122.5,
"major" : 17113.8,
"preliminary" : 17113.8,
"cell" : 17112.2,
"void" : 17112.2,
"placing" : 17111.4,
"prudence" : 17103.5,
"egg" : 17096.4,
"amazement" : 17090.0,
"border" : 17086.9,
"artificial" : 17080.5,
"hereafter" : 17075.8,
"fanny" : 17065.5,
"crimes" : 17063.1,
"breathe" : 17060.0,
"exempt" : 17056.8,
"anchor" : 17037.8,
"chicago" : 17037.8,
"sits" : 17036.2,
"purchased" : 17032.3,
"eminent" : 17031.5,
"neighbors" : 17029.1,
"glowing" : 17016.5,
"sunlight" : 17008.6,
"examples" : 17006.2,
"exercised" : 17005.4,
"wealthy" : 17003.8,
"seeming" : 16999.1,
"bonaparte" : 16995.1,
"shouting" : 16994.3,
"thanked" : 16992.7,
"illustrious" : 16985.6,
"curiously" : 16984.8,
"inspiration" : 16984.0,
"seeds" : 16983.2,
"naval" : 16971.4,
"foes" : 16954.8,
"everyone" : 16950.8,
"longed" : 16944.5,
"abundant" : 16940.5,
"doubted" : 16936.6,
"painter" : 16933.4,
"greeted" : 16931.0,
"erect" : 16929.4,
"glasses" : 16926.3,
"meanwhile" : 16926.3,
"shooting" : 16918.4,
"athens" : 16914.4,
"wagon" : 16913.6,
"lend" : 16903.3,
"lent" : 16901.7,
"crisis" : 16892.3,
"undertake" : 16885.9,
"particulars" : 16872.5,
"eh" : 16870.9,
"veins" : 16869.3,
"polite" : 16864.6,
"anna" : 16863.0,
"experiences" : 16859.0,
"seal" : 16858.2,
"header" : 16850.3,
"clergy" : 16848.7,
"mount" : 16843.2,
"array" : 16834.5,
"corners" : 16834.5,
"magazine" : 16833.7,
"loudly" : 16832.1,
"bitterness" : 16825.8,
"texas" : 16816.3,
"guardian" : 16813.1,
"searching" : 16811.6,
"rejected" : 16802.1,
"harsh" : 16801.3,
"includes" : 16800.5,
"boldly" : 16791.8,
"maurice" : 16791.8,
"kate" : 16790.2,
"lunch" : 16786.2,
"pine" : 16785.5,
"shells" : 16783.9,
"seconds" : 16783.1,
"despite" : 16779.1,
"hoping" : 16776.7,
"injustice" : 16768.0,
"expressions" : 16766.5,
"flies" : 16766.5,
"push" : 16757.0,
"tight" : 16756.2,
"problems" : 16753.0,
"landscape" : 16749.1,
"sue" : 16742.7,
"protested" : 16739.6,
"scarlet" : 16734.8,
"abandon" : 16734.0,
"artistic" : 16721.4,
"mainly" : 16720.6,
"measured" : 16688.9,
"loyal" : 16683.4,
"boiling" : 16681.8,
"desirous" : 16644.6,
"suited" : 16644.6,
"alliance" : 16641.5,
"advise" : 16635.1,
"waist" : 16634.3,
"sinking" : 16631.2,
"apprehension" : 16616.1,
"stable" : 16611.4,
"gregory" : 16607.4,
"maximum" : 16592.4,
"commit" : 16590.0,
"hideous" : 16575.0,
"hamilton" : 16571.1,
"sweetness" : 16566.3,
"dismissed" : 16563.9,
"tore" : 16560.8,
"affect" : 16558.4,
"shaken" : 16556.0,
"evils" : 16552.1,
"unworthy" : 16547.3,
"significance" : 16544.9,
"modified" : 16531.5,
"miracle" : 16529.9,
"lieu" : 16522.0,
"peasant" : 16519.6,
"considerably" : 16501.4,
"observing" : 16495.9,
"conveyed" : 16494.3,
"resemblance" : 16475.3,
"extend" : 16473.0,
"riches" : 16471.4,
"personally" : 16466.6,
"morality" : 16455.5,
"rebellion" : 16450.8,
"thread" : 16435.8,
"dumb" : 16431.8,
"inclination" : 16427.9,
"forbidden" : 16427.1,
"copper" : 16426.3,
"differences" : 16418.4,
"sailor" : 16411.2,
"requested" : 16408.1,
"alfred" : 16401.8,
"response" : 16401.8,
"promoting" : 16398.6,
"imperial" : 16392.3,
"blank" : 16390.7,
"purity" : 16382.8,
"victor" : 16382.8,
"bending" : 16376.4,
"solemnly" : 16363.0,
"twenty-four" : 16363.0,
"minor" : 16356.7,
"del" : 16353.5,
"crimson" : 16348.7,
"republic" : 16343.2,
"teachers" : 16332.9,
"ma'am" : 16329.8,
"danced" : 16326.6,
"bargain" : 16325.8,
"dealt" : 16321.8,
"fatigue" : 16319.5,
"telephone" : 16317.9,
"cents" : 16317.1,
"whip" : 16310.8,
"adams" : 16303.7,
"dislike" : 16280.7,
"witnessed" : 16265.7,
"infantry" : 16260.9,
"acres" : 16254.6,
"checked" : 16253.8,
"countrymen" : 16252.2,
"enemy's" : 16249.9,
"companies" : 16249.1,
"normal" : 16248.3,
"shirt" : 16247.5,
"addresses" : 16243.5,
"introduce" : 16242.7,
"sofa" : 16237.2,
"mothers" : 16235.6,
"sweep" : 16222.2,
"conversion" : 16219.8,
"sketch" : 16215.0,
"african" : 16205.6,
"deserved" : 16204.0,
"answering" : 16200.0,
"virtuous" : 16198.4,
"persian" : 16176.3,
"anyway" : 16169.9,
"thief" : 16152.5,
"driver" : 16147.8,
"retain" : 16145.4,
"constructed" : 16144.6,
"daniel" : 16143.8,
"ut" : 16132.8,
"philadelphia" : 16130.4,
"conspicuous" : 16129.6,
"channel" : 16126.4,
"nobility" : 16124.1,
"edith" : 16114.6,
"berlin" : 16113.8,
"editing" : 16105.9,
"cambridge" : 16103.5,
"declaration" : 16094.8,
"guards" : 16094.8,
"personality" : 16088.5,
"smallest" : 16080.6,
"excess" : 16077.4,
"separation" : 16075.8,
"disgust" : 16071.8,
"ha" : 16062.4,
"accomplish" : 16054.4,
"speeches" : 16054.4,
"herbert" : 16053.7,
"convent" : 16048.1,
"rightly" : 16044.2,
"suspended" : 16042.6,
"reform" : 16035.5,
"mob" : 16028.3,
"thirst" : 16027.5,
"unnecessary" : 16021.2,
"treasures" : 16018.8,
"asks" : 16018.1,
"viewed" : 16016.5,
"designs" : 16004.6,
"gleam" : 16000.6,
"threatening" : 15999.1,
"palm" : 15994.3,
"missouri" : 15992.7,
"filling" : 15984.0,
"quoth" : 15980.9,
"fur" : 15978.5,
"fortnight" : 15973.0,
"holes" : 15972.2,
"addressing" : 15970.6,
"frightful" : 15954.8,
"encourage" : 15954.0,
"speaker" : 15945.3,
"tribute" : 15944.5,
"procure" : 15935.8,
"frankly" : 15931.0,
"recommended" : 15913.6,
"relieve" : 15912.0,
"intentions" : 15910.5,
"unjust" : 15909.7,
"legislation" : 15904.1,
"project" : 15894.6,
"threshold" : 15894.6,
"merits" : 15893.8,
"morrow" : 15885.1,
"traces" : 15885.1,
"induce" : 15883.6,
"spear" : 15882.0,
"inward" : 15873.3,
"pupils" : 15863.8,
"corresponding" : 15859.0,
"fairy" : 15854.3,
"conclude" : 15850.3,
"clung" : 15846.4,
"neat" : 15845.6,
"lucky" : 15834.5,
"lap" : 15823.4,
"session" : 15822.6,
"torture" : 15818.7,
"damp" : 15814.7,
"ridge" : 15802.1,
"spoil" : 15785.5,
"liable" : 15784.7,
"swords" : 15780.7,
"hearty" : 15778.3,
"bc" : 15774.4,
"abraham" : 15769.6,
"thoughtful" : 15768.8,
"traveller" : 15764.9,
"chains" : 15760.9,
"favorable" : 15760.1,
"tin" : 15755.4,
"imp." : 15748.3,
"strongest" : 15748.3,
"horace" : 15745.1,
"dependent" : 15738.0,
"couch" : 15736.4,
"bills" : 15732.5,
"warrant" : 15731.7,
"complaint" : 15722.2,
"endeavour" : 15721.4,
"sails" : 15716.6,
"dined" : 15705.6,
"convention" : 15701.6,
"guarded" : 15696.1,
"angle" : 15694.5,
"widely" : 15692.1,
"illinois" : 15677.9,
"charlotte" : 15677.1,
"endeavoured" : 15677.1,
"ardent" : 15674.7,
"cow" : 15670.0,
"mill" : 15668.4,
"victims" : 15667.6,
"prejudice" : 15666.0,
"foremost" : 15665.2,
"map" : 15665.2,
"probability" : 15656.5,
"porch" : 15645.4,
"lieutenant" : 15641.5,
"surprising" : 15633.6,
"fountain" : 15632.0,
"sustained" : 15630.4,
"appropriate" : 15626.4,
"ford" : 15620.9,
"clara" : 15616.2,
"assisted" : 15615.4,
"lewis" : 15615.4,
"rejoice" : 15609.8,
"extending" : 15608.2,
"marvellous" : 15601.1,
"clothed" : 15597.2,
"jew" : 15579.8,
"collar" : 15568.7,
"bands" : 15567.9,
"confident" : 15563.1,
"hasty" : 15560.0,
"nigh" : 15557.6,
"organ" : 15557.6,
"prose" : 15545.0,
"privileges" : 15533.1,
"selection" : 15523.6,
"inquiries" : 15522.8,
"codes" : 15510.1,
"replace" : 15496.7,
"saint" : 15492.7,
"districts" : 15491.9,
"deliberately" : 15481.7,
"awe" : 15474.5,
"beforehand" : 15470.6,
"strife" : 15470.6,
"released" : 15468.2,
"compare" : 15466.6,
"beer" : 15465.0,
"retorted" : 15461.9,
"relate" : 15459.5,
"cheerfully" : 15448.4,
"pistol" : 15448.4,
"presume" : 15447.6,
"velvet" : 15446.1,
"wretch" : 15446.1,
"susan" : 15438.2,
"pennsylvania" : 15432.6,
"stirring" : 15430.2,
"righteousness" : 15429.4,
"missing" : 15428.7,
"fain" : 15427.1,
"facing" : 15425.5,
"fashionable" : 15425.5,
"producing" : 15420.7,
"peoples" : 15416.8,
"positively" : 15408.9,
"reasoning" : 15402.5,
"gravity" : 15401.8,
"disturb" : 15393.1,
"sermon" : 15389.1,
"exchanged" : 15385.1,
"partner" : 15379.6,
"brains" : 15374.1,
"lowered" : 15372.5,
"association" : 15365.4,
"estates" : 15352.7,
"abuse" : 15338.5,
"flock" : 15329.8,
"niece" : 15329.8,
"languages" : 15325.0,
"asserted" : 15321.1,
"bodily" : 15318.7,
"notions" : 15315.5,
"oliver" : 15313.2,
"faculty" : 15304.4,
"cannon" : 15302.1,
"thirteen" : 15290.2,
"sailing" : 15289.4,
"rings" : 15285.5,
"smart" : 15284.7,
"possessions" : 15280.7,
"disciples" : 15272.0,
"petty" : 15271.2,
"widest" : 15253.0,
"divisions" : 15252.2,
"prudent" : 15248.3,
"caution" : 15246.7,
"justify" : 15239.6,
"awhile" : 15236.4,
"boxes" : 15233.2,
"manuscript" : 15228.5,
"cigar" : 15225.3,
"warrior" : 15223.0,
"impressions" : 15221.4,
"aught" : 15217.4,
"lifting" : 15217.4,
"inaccurate" : 15212.7,
"tidings" : 15207.9,
"friday" : 15206.3,
"liquid" : 15202.4,
"staying" : 15202.4,
"concept" : 15191.3,
"creek" : 15186.6,
"lo" : 15173.1,
"brush" : 15172.3,
"download" : 15171.5,
"specially" : 15168.4,
"cream" : 15166.8,
"meetings" : 15162.0,
"jump" : 15159.7,
"unwilling" : 15158.9,
"adapted" : 15157.3,
"practised" : 15157.3,
"combat" : 15151.8,
"subdued" : 15151.8,
"jewish" : 15144.6,
"innumerable" : 15141.5,
"blowing" : 15139.9,
"extra" : 15138.3,
"civilized" : 15130.4,
"invented" : 15123.3,
"japan" : 15120.9,
"pitch" : 15109.8,
"cliff" : 15107.5,
"crowned" : 15092.4,
"portions" : 15090.8,
"awkward" : 15086.9,
"horrid" : 15085.3,
"pulling" : 15067.1,
"appreciate" : 15065.5,
"communicated" : 15060.8,
"kentucky" : 15056.8,
"jury" : 15050.5,
"encountered" : 15046.5,
"attacks" : 15041.8,
"monster" : 15031.5,
"simon" : 15022.0,
"maintaining" : 15021.2,
"sites" : 15019.6,
"frozen" : 15015.7,
"invariably" : 15010.9,
"dies" : 15009.4,
"survive" : 15007.8,
"literally" : 15005.4,
"consolation" : 14994.3,
"m" : 14994.3,
"phenomena" : 14994.3,
"pot" : 14993.5,
"ellen" : 14984.0,
"briefly" : 14979.3,
"rice" : 14975.3,
"planned" : 14974.5,
"barbara" : 14966.6,
"respected" : 14958.7,
"sublime" : 14954.0,
"dropping" : 14948.4,
"guy" : 14945.3,
"behaviour" : 14940.5,
"desolate" : 14938.9,
"penny" : 14935.0,
"adopt" : 14934.2,
"replaced" : 14933.4,
"revenue" : 14923.1,
"formats" : 14918.4,
"hired" : 14912.8,
"regularly" : 14904.9,
"infringement" : 14893.8,
"curtains" : 14892.3,
"eagerness" : 14883.6,
"helping" : 14882.8,
"investigation" : 14882.8,
"constitutional" : 14878.8,
"insist" : 14878.8,
"occurs" : 14875.7,
"fools" : 14871.7,
"inheritance" : 14870.9,
"latest" : 14868.5,
"leap" : 14866.2,
"games" : 14846.4,
"apple" : 14840.1,
"visiting" : 14840.1,
"travellers" : 14836.1,
"experiments" : 14830.6,
"hasn't" : 14830.6,
"pupil" : 14830.6,
"enjoying" : 14829.0,
"totally" : 14821.9,
"twisted" : 14821.9,
"discuss" : 14813.9,
"firing" : 14802.9,
"background" : 14801.3,
"subscribe" : 14795.0,
"tenderly" : 14786.3,
"transcribe" : 14783.1,
"descend" : 14779.9,
"differ" : 14772.0,
"majesty's" : 14763.3,
"avail" : 14758.6,
"disaster" : 14756.2,
"bet" : 14753.8,
"periodic" : 14750.7,
"bull" : 14749.1,
"entertainment" : 14748.3,
"computers" : 14743.5,
"cursed" : 14738.0,
"raw" : 14734.0,
"fulfilled" : 14732.5,
"georgia" : 14728.5,
"virus" : 14726.9,
"log" : 14726.1,
"skies" : 14707.9,
"scotch" : 14706.4,
"embraced" : 14705.6,
"hospitality" : 14703.2,
"faintly" : 14701.6,
"solomon" : 14690.5,
"robbed" : 14689.7,
"cart" : 14688.9,
"influences" : 14686.6,
"ascended" : 14682.6,
"incidents" : 14682.6,
"childish" : 14674.7,
"robe" : 14668.4,
"aboard" : 14658.1,
"resembling" : 14657.3,
"reflect" : 14652.6,
"dominion" : 14649.4,
"dreary" : 14644.6,
"serving" : 14639.1,
"complexion" : 14636.7,
"engage" : 14624.1,
"tents" : 14620.9,
"herd" : 14619.3,
"attain" : 14618.5,
"collect" : 14617.0,
"disclaims" : 14617.0,
"pan" : 14614.6,
"relatives" : 14613.8,
"borrowed" : 14610.6,
"convert" : 14601.9,
"outline" : 14598.8,
"blown" : 14594.8,
"comprehend" : 14578.2,
"peasants" : 14575.8,
"opera" : 14571.9,
"assault" : 14568.7,
"deceive" : 14566.3,
"doctrines" : 14560.0,
"representatives" : 14555.2,
"dedicated" : 14553.7,
"struggled" : 14548.1,
"officials" : 14545.8,
"hiding" : 14540.2,
"paths" : 14537.8,
"backs" : 14533.9,
"prominently" : 14518.1,
"prices" : 14517.3,
"procured" : 14517.3,
"mourning" : 14510.1,
"compliment" : 14505.4,
"heights" : 14503.0,
"approval" : 14502.2,
"gasped" : 14495.1,
"breadth" : 14492.0,
"withdraw" : 14488.8,
"tune" : 14473.8,
"compassion" : 14470.6,
"polished" : 14468.2,
"latitude" : 14463.5,
"dishes" : 14461.1,
"parent" : 14461.1,
"contrived" : 14459.5,
"delicacy" : 14459.5,
"projected" : 14456.4,
"akin" : 14454.0,
"f" : 14452.4,
"betray" : 14448.4,
"traced" : 14446.9,
"resentment" : 14432.6,
"indemnify" : 14431.8,
"pseud" : 14428.7,
"sacrifices" : 14418.4,
"disguise" : 14416.0,
"transcription" : 14410.5,
"document" : 14408.1,
"neighbour" : 14405.7,
"squire" : 14402.6,
"punish" : 14393.9,
"bars" : 14391.5,
"glittering" : 14390.7,
"tossed" : 14388.3,
"block" : 14383.6,
"lots" : 14375.7,
"worldly" : 14370.1,
"muscles" : 14367.0,
"elbow" : 14365.4,
"obligation" : 14359.8,
"trifling" : 14359.8,
"decline" : 14357.5,
"attachment" : 14356.7,
"ambitious" : 14355.9,
"filename" : 14351.1,
"artists" : 14341.6,
"bloom" : 14341.6,
"holiday" : 14340.8,
"brute" : 14335.3,
"repair" : 14329.8,
"fist" : 14325.8,
"recollect" : 14324.2,
"eagle" : 14313.9,
"honorable" : 14313.9,
"significant" : 14309.2,
"barren" : 14302.9,
"functions" : 14296.5,
"guided" : 14289.4,
"dense" : 14283.9,
"fiction" : 14277.6,
"viz." : 14273.6,
"adds" : 14270.4,
"rows" : 14270.4,
"recommend" : 14264.1,
"suspicious" : 14261.7,
"resulting" : 14257.8,
"seventy" : 14257.0,
"shillings" : 14253.8,
"educational" : 14252.2,
"duly" : 14247.5,
"governed" : 14246.7,
"scripture" : 14237.2,
"upwards" : 14235.6,
"sworn" : 14234.0,
"nicholas" : 14226.9,
"horn" : 14226.1,
"brook" : 14225.3,
"fund" : 14222.2,
"vienna" : 14213.5,
"lodge" : 14209.5,
"infinitely" : 14207.9,
"clergyman" : 14207.1,
"marshal" : 14206.4,
"ruled" : 14205.6,
"fiercely" : 14200.0,
"portuguese" : 14200.0,
"costume" : 14192.9,
"pit" : 14192.1,
"disorder" : 14188.2,
"sheer" : 14184.2,
"exalted" : 14179.5,
"fare" : 14175.5,
"applause" : 14173.9,
"chaucer" : 14166.8,
"remind" : 14161.3,
"binary" : 14155.7,
"packed" : 14141.5,
"pillow" : 14140.7,
"jersey" : 14139.1,
"abbey" : 14136.7,
"nowhere" : 14135.2,
"anyhow" : 14125.7,
"agitated" : 14122.5,
"marching" : 14122.5,
"catching" : 14121.7,
"el" : 14119.3,
"grasped" : 14117.7,
"arrow" : 14115.4,
"tend" : 14113.8,
"carved" : 14109.8,
"fitting" : 14108.3,
"bonds" : 14105.9,
"instructed" : 14105.9,
"elaborate" : 14101.1,
"corpse" : 14095.6,
"bewildered" : 14088.5,
"essence" : 14071.9,
"positions" : 14071.9,
"emily" : 14067.1,
"edited" : 14063.9,
"continues" : 14062.4,
"harold" : 14056.8,
"elevation" : 14055.2,
"realm" : 14041.8,
"debts" : 14032.3,
"glancing" : 14027.6,
"shops" : 14013.3,
"complained" : 14008.6,
"loyalty" : 14001.4,
"coin" : 13996.7,
"clad" : 13992.0,
"staircase" : 13988.8,
"documents" : 13976.9,
"interpreted" : 13969.8,
"4th" : 13969.0,
"extremity" : 13966.6,
"accord" : 13965.1,
"sally" : 13963.5,
"lace" : 13960.3,
"tremble" : 13957.9,
"exile" : 13955.6,
"gospel" : 13947.7,
"mechanical" : 13947.7,
"successfully" : 13943.7,
"scholar" : 13932.6,
"wonders" : 13923.9,
"arab" : 13920.0,
"temperament" : 13920.0,
"expressing" : 13918.4,
"fred" : 13917.6,
"trap" : 13917.6,
"spots" : 13914.4,
"awaiting" : 13910.5,
"potatoes" : 13909.7,
"likeness" : 13902.6,
"harbour" : 13898.6,
"proofs" : 13885.2,
"jolly" : 13883.6,
"contributed" : 13879.6,
"wits" : 13876.5,
"generosity" : 13869.3,
"ruler" : 13866.2,
"lawrence" : 13858.3,
"cake" : 13856.7,
"lamps" : 13855.9,
"crazy" : 13852.7,
"sincerity" : 13852.7,
"entertain" : 13851.1,
"madame" : 13849.6,
"sir" : 13849.6,
"faculties" : 13845.6,
"hesitate" : 13840.1,
"deepest" : 13836.9,
"seventeen" : 13830.6,
"lordship" : 13817.9,
"greeting" : 13808.4,
"feminine" : 13805.2,
"monstrous" : 13802.9,
"tongues" : 13799.7,
"barely" : 13795.8,
"3d" : 13785.5,
"mansion" : 13784.7,
"facility" : 13783.9,
"praised" : 13779.1,
"warranties" : 13776.0,
"sarah" : 13775.2,
"happier" : 13772.8,
"indicating" : 13772.0,
"rob" : 13764.1,
"gigantic" : 13763.3,
"honey" : 13760.9,
"ladder" : 13757.8,
"ending" : 13754.6,
"wales" : 13754.6,
"swallowed" : 13751.5,
"sunny" : 13747.5,
"knelt" : 13743.5,
"tyranny" : 13742.7,
"decree" : 13739.6,
"stake" : 13738.0,
"divide" : 13734.0,
"dreaming" : 13734.0,
"proclaimed" : 13732.5,
"dignified" : 13730.9,
"tread" : 13729.3,
"mines" : 13724.6,
"viewing" : 13723.8,
"defense" : 13723.0,
"oldest" : 13720.6,
"incredible" : 13718.2,
"bidding" : 13711.9,
"brick" : 13711.9,
"arch" : 13707.1,
"everlasting" : 13703.2,
"elect" : 13700.0,
"sprung" : 13696.9,
"harder" : 13688.2,
"winding" : 13686.6,
"deductible" : 13684.2,
"magistrate" : 13681.8,
"respective" : 13679.5,
"liquor" : 13676.3,
"imitation" : 13670.0,
"shy" : 13670.0,
"perished" : 13669.2,
"prime" : 13666.0,
"studying" : 13662.8,
"eighty" : 13661.3,
"hebrew" : 13658.1,
"unfortunately" : 13656.5,
"licensed" : 13654.9,
"fog" : 13651.0,
"coloured" : 13648.6,
"bits" : 13647.0,
"consult" : 13642.3,
"moves" : 13642.3,
"r" : 13642.3,
"warn" : 13639.9,
"taylor" : 13638.3,
"vile" : 13616.2,
"depended" : 13612.2,
"phil" : 13609.8,
"legend" : 13609.0,
"locations" : 13604.3,
"shallow" : 13602.7,
"doom" : 13601.1,
"dreaded" : 13595.6,
"encouragement" : 13592.4,
"impatiently" : 13575.8,
"scent" : 13567.9,
"varieties" : 13567.1,
"irregular" : 13555.2,
"battles" : 13554.5,
"compass" : 13543.4,
"neighbouring" : 13538.6,
"bliss" : 13536.3,
"harvest" : 13533.9,
"promotion" : 13533.1,
"stove" : 13519.6,
"faithfully" : 13518.9,
"anthony" : 13517.3,
"excellence" : 13515.7,
"transfer" : 13515.7,
"awaited" : 13511.7,
"heathen" : 13510.9,
"poetic" : 13510.9,
"consulted" : 13509.4,
"illustrated" : 13507.0,
"gilbert" : 13501.5,
"br" : 13497.5,
"fundamental" : 13496.7,
"bundle" : 13492.0,
"rebel" : 13481.7,
"cultivation" : 13478.5,
"joys" : 13478.5,
"rigid" : 13476.9,
"tragic" : 13468.2,
"review" : 13462.7,
"representing" : 13459.5,
"flowed" : 13455.6,
"brows" : 13454.0,
"whereupon" : 13451.6,
"terribly" : 13438.2,
"melted" : 13435.8,
"venerable" : 13435.8,
"towers" : 13434.2,
"cooking" : 13432.6,
"mustn't" : 13422.3,
"suspicions" : 13420.8,
"old-fashioned" : 13419.2,
"oppressed" : 13418.4,
"australia" : 13413.6,
"friend's" : 13412.1,
"revolt" : 13411.3,
"swell" : 13407.3,
"improve" : 13405.7,
"williams" : 13405.7,
"describes" : 13403.4,
"goddess" : 13401.8,
"wreck" : 13393.1,
"tennessee" : 13392.3,
"convince" : 13384.4,
"sentences" : 13377.2,
"bowl" : 13376.5,
"radiant" : 13367.8,
"prussia" : 13364.6,
"westward" : 13357.5,
"indignant" : 13355.9,
"refined" : 13345.6,
"unseen" : 13344.8,
"illustration" : 13340.9,
"pertaining" : 13333.7,
"swamp" : 13330.6,
"austrian" : 13329.0,
"saxon" : 13325.0,
"congregation" : 13323.4,
"nerve" : 13321.1,
"undertaking" : 13321.1,
"disclaimer" : 13317.1,
"characteristics" : 13302.9,
"stare" : 13291.8,
"specimens" : 13291.0,
"ascertain" : 13288.6,
"pledge" : 13287.1,
"earn" : 13286.3,
"warfare" : 13285.5,
"supposing" : 13279.9,
"subsequently" : 13279.1,
"attending" : 13278.4,
"angrily" : 13273.6,
"select" : 13268.9,
"animated" : 13267.3,
"industrial" : 13267.3,
"hurriedly" : 13259.4,
"manhood" : 13257.0,
"quantities" : 13246.7,
"interpretation" : 13245.9,
"dressing" : 13242.0,
"rejoiced" : 13241.2,
"edinburgh" : 13238.8,
"catherine" : 13236.4,
"challenge" : 13236.4,
"produces" : 13236.4,
"forbid" : 13235.6,
"gang" : 13234.8,
"boiled" : 13233.3,
"shouts" : 13231.7,
"so-called" : 13229.3,
"theme" : 13229.3,
"thankful" : 13227.7,
"admission" : 13226.9,
"enters" : 13218.2,
"elevated" : 13217.4,
"frenchman" : 13208.7,
"pool" : 13204.8,
"terrified" : 13203.2,
"lads" : 13202.4,
"persisted" : 13189.7,
"conference" : 13185.0,
"equality" : 13183.4,
"genus" : 13180.3,
"didst" : 13176.3,
"newly" : 13172.3,
"generals" : 13171.5,
"surroundings" : 13162.1,
"sorrows" : 13158.9,
"occasioned" : 13154.1,
"invasion" : 13151.0,
"workmen" : 13147.0,
"monks" : 13145.4,
"sends" : 13145.4,
"turkish" : 13144.7,
"discretion" : 13141.5,
"pattern" : 13139.9,
"reveal" : 13139.1,
"endured" : 13128.8,
"resolve" : 13128.0,
"columbia" : 13121.7,
"preach" : 13120.9,
"exceeding" : 13119.3,
"ringing" : 13117.0,
"triumphant" : 13117.0,
"defiance" : 13113.8,
"errand" : 13105.1,
"woke" : 13104.3,
"grandmother" : 13103.5,
"weighed" : 13095.6,
"wool" : 13092.4,
"orleans" : 13080.6,
"communicate" : 13078.2,
"strikes" : 13075.8,
"promising" : 13066.3,
"scenery" : 13066.3,
"righteous" : 13065.5,
"essentially" : 13064.0,
"oppose" : 13060.8,
"joyous" : 13053.7,
"specimen" : 13052.9,
"doctors" : 13049.7,
"eloquent" : 13045.0,
"manager" : 13045.0,
"organs" : 13043.4,
"sticks" : 13042.6,
"drag" : 13041.0,
"haunted" : 13041.0,
"chorus" : 13040.2,
"rational" : 13025.2,
"crop" : 13023.6,
"processing" : 13023.6,
"accurate" : 13018.9,
"wolf" : 13010.9,
"adorned" : 13009.4,
"sheets" : 13007.8,
"resort" : 13006.2,
"refusal" : 13002.2,
"bond" : 12999.1,
"vicinity" : 12992.8,
"preacher" : 12990.4,
"sympathetic" : 12988.0,
"casting" : 12987.2,
"opens" : 12982.5,
"prophets" : 12980.1,
"horns" : 12978.5,
"warmly" : 12976.1,
"salary" : 12970.6,
"continuous" : 12965.9,
"satan" : 12962.7,
"continual" : 12959.5,
"defended" : 12959.5,
"breaks" : 12958.7,
"workers" : 12957.9,
"lantern" : 12957.2,
"balls" : 12955.6,
"rod" : 12955.6,
"blaze" : 12952.4,
"examining" : 12951.6,
"naples" : 12951.6,
"peculiarly" : 12950.0,
"vegetables" : 12950.0,
"ingenious" : 12948.4,
"excite" : 12942.1,
"howard" : 12937.4,
"horseback" : 12935.8,
"re-use" : 12923.1,
"louisiana" : 12921.6,
"farmers" : 12920.0,
"wildly" : 12919.2,
"mouths" : 12916.8,
"carpet" : 12912.8,
"sadness" : 12910.5,
"customary" : 12904.1,
"circles" : 12903.4,
"aren't" : 12895.4,
"wonderfully" : 12891.5,
"max" : 12889.9,
"juan" : 12885.9,
"successor" : 12871.7,
"allied" : 12869.3,
"ceiling" : 12863.8,
"confirmation" : 12855.9,
"glances" : 12855.1,
"diamonds" : 12851.1,
"goal" : 12848.0,
"representations" : 12845.6,
"cash" : 12840.1,
"vacant" : 12837.7,
"antiquity" : 12829.8,
"despise" : 12826.6,
"lawn" : 12817.9,
"they'll" : 12817.9,
"appealed" : 12814.0,
"turkey" : 12811.6,
"texts" : 12809.2,
"neighbor" : 12805.3,
"spreading" : 12802.9,
"discharged" : 12792.6,
"phrases" : 12791.8,
"ultimate" : 12785.5,
"tastes" : 12781.5,
"submission" : 12779.1,
"entry" : 12775.2,
"rachel" : 12769.7,
"blush" : 12760.2,
"monument" : 12757.8,
"hardy" : 12756.2,
"thorough" : 12755.4,
"ein" : 12753.8,
"ecclesiastical" : 12751.5,
"fertile" : 12745.1,
"exciting" : 12744.3,
"captive" : 12738.8,
"severity" : 12736.4,
"considerations" : 12735.6,
"shew" : 12734.8,
"faster" : 12730.9,
"louise" : 12726.1,
"grandeur" : 12723.0,
"winning" : 12716.6,
"solely" : 12713.5,
"globe" : 12709.5,
"malice" : 12708.7,
"echoed" : 12706.4,
"lodging" : 12692.9,
"conservative" : 12692.1,
"throng" : 12691.3,
"prosperous" : 12688.2,
"whistle" : 12685.0,
"floated" : 12671.6,
"transferred" : 12667.6,
"declaring" : 12662.1,
"reckoned" : 12655.7,
"cheese" : 12654.9,
"bite" : 12653.4,
"thoughtfully" : 12652.6,
"breach" : 12643.1,
"enthusiastic" : 12642.3,
"cars" : 12638.3,
"downstairs" : 12638.3,
"allowing" : 12631.2,
"invite" : 12629.6,
"adjoining" : 12620.1,
"dusk" : 12618.5,
"cathedral" : 12617.0,
"truths" : 12616.2,
"plague" : 12612.2,
"sandy" : 12609.8,
"boil" : 12606.7,
"caroline" : 12603.5,
"beautifully" : 12600.4,
"inhabited" : 12600.4,
"tomorrow" : 12600.4,
"exclamation" : 12599.6,
"finishing" : 12590.9,
"shocked" : 12589.3,
"escort" : 12588.5,
"forgetting" : 12584.5,
"hanged" : 12582.9,
"hats" : 12576.6,
"mirth" : 12576.6,
"uncomfortable" : 12574.2,
"connecticut" : 12571.1,
"bows" : 12568.7,
"pierced" : 12562.4,
"harbor" : 12561.6,
"tricks" : 12560.0,
"rubbed" : 12559.2,
"apparatus" : 12556.0,
"mysteries" : 12541.0,
"honesty" : 12537.9,
"negroes" : 12535.5,
"concerns" : 12529.9,
"wander" : 12529.9,
"assert" : 12528.4,
"ceremonies" : 12528.4,
"sacrificed" : 12520.4,
"utterance" : 12518.1,
"dismay" : 12513.3,
"fright" : 12510.2,
"rail" : 12509.4,
"reflections" : 12508.6,
"crops" : 12501.5,
"pushing" : 12498.3,
"proves" : 12496.7,
"jimmy" : 12495.1,
"pathetic" : 12493.5,
"imperfect" : 12487.2,
"haughty" : 12481.7,
"navy" : 12481.7,
"fortress" : 12478.5,
"hurrying" : 12476.9,
"x" : 12474.6,
"blessings" : 12471.4,
"attempting" : 12466.6,
"insects" : 12465.9,
"selling" : 12456.4,
"appreciation" : 12455.6,
"suppressed" : 12446.1,
"acquire" : 12444.5,
"offensive" : 12443.7,
"ripe" : 12435.0,
"dresses" : 12432.6,
"reigned" : 12427.9,
"coldly" : 12417.6,
"candles" : 12412.1,
"km" : 12409.7,
"sixth" : 12398.6,
"blazing" : 12397.8,
"youngest" : 12395.4,
"mask" : 12394.7,
"florida" : 12393.9,
"lecture" : 12393.9,
"parlor" : 12393.9,
"decidedly" : 12392.3,
"whereby" : 12390.7,
"gordon" : 12386.0,
"reverend" : 12386.0,
"successive" : 12385.2,
"perception" : 12383.6,
"buffalo" : 12381.2,
"sire" : 12378.8,
"quitted" : 12375.7,
"keys" : 12374.9,
"develop" : 12373.3,
"function" : 12358.3,
"morals" : 12356.7,
"damned" : 12355.1,
"vexed" : 12345.6,
"2d" : 12344.8,
"pouring" : 12340.9,
"bullet" : 12333.7,
"excessive" : 12332.9,
"bind" : 12325.8,
"identical" : 12325.0,
"cliffs" : 12317.9,
"tools" : 12312.4,
"byron" : 12309.2,
"mexican" : 12308.4,
"piety" : 12308.4,
"superstition" : 12304.5,
"git" : 12302.9,
"substantial" : 12302.9,
"bulk" : 12293.4,
"prevail" : 12293.4,
"wiser" : 12291.8,
"preaching" : 12284.7,
"prolonged" : 12284.7,
"annoyed" : 12276.0,
"westminster" : 12275.2,
"splendour" : 12273.6,
"remembering" : 12272.0,
"richmond" : 12259.4,
"upset" : 12251.5,
"cab" : 12250.7,
"bunch" : 12244.3,
"pencil" : 12243.5,
"subjected" : 12243.5,
"vegetable" : 12241.2,
"exhibit" : 12238.8,
"emerged" : 12237.2,
"cooked" : 12235.6,
"hay" : 12233.3,
"kansas" : 12233.3,
"gale" : 12226.9,
"preached" : 12226.9,
"arnold" : 12219.0,
"trousers" : 12217.4,
"debate" : 12213.5,
"dated" : 12204.0,
"tumult" : 12204.0,
"corruption" : 12202.4,
"summons" : 12201.6,
"comrade" : 12193.7,
"eternity" : 12193.7,
"hears" : 12193.7,
"lingered" : 12188.2,
"propriety" : 12187.4,
"stillness" : 12185.8,
"partial" : 12182.6,
"welcomed" : 12182.6,
"cabinet" : 12178.7,
"proceeds" : 12177.9,
"vow" : 12176.3,
"quaint" : 12174.7,
"soup" : 12173.9,
"beef" : 12170.8,
"rests" : 12165.2,
"slay" : 12164.4,
"surgeon" : 12163.6,
"irresistible" : 12158.9,
"sealed" : 12149.4,
"repeating" : 12146.2,
"needn't" : 12144.7,
"allowance" : 12143.1,
"undertaken" : 12136.7,
"treachery" : 12135.2,
"posts" : 12131.2,
"borders" : 12128.8,
"attendant" : 12127.3,
"unite" : 12123.3,
"murderer" : 12120.9,
"owners" : 12116.2,
"nm" : 12115.4,
"sweeping" : 12114.6,
"unconsciously" : 12108.3,
"blade" : 12101.9,
"saviour" : 12099.6,
"theories" : 12099.6,
"graham" : 12098.8,
"behaved" : 12096.4,
"pleaded" : 12095.6,
"spy" : 12094.8,
"possesses" : 12094.0,
"lawful" : 12091.7,
"tommy" : 12091.7,
"seasons" : 12090.1,
"withdrawn" : 12090.1,
"reckless" : 12086.9,
"factory" : 12086.1,
"shades" : 12083.7,
"gossip" : 12080.6,
"seventh" : 12079.0,
"attendance" : 12075.0,
"robes" : 12071.9,
"journal" : 12065.5,
"systems" : 12063.2,
"dryden" : 12060.0,
"maine" : 12059.2,
"token" : 12054.5,
"intimacy" : 12049.7,
"abstract" : 12048.9,
"machines" : 12048.1,
"bestow" : 12037.1,
"chanced" : 12036.3,
"locks" : 12027.6,
"honestly" : 12026.0,
"legitimate" : 12026.0,
"accent" : 12023.6,
"symptoms" : 12017.3,
"votes" : 12011.7,
"ragged" : 12010.2,
"thursday" : 12009.4,
"manifested" : 12008.6,
"fidelity" : 12006.2,
"swinging" : 12000.7,
"descending" : 11999.9,
"sincerely" : 11999.9,
"bred" : 11995.9,
"whereof" : 11995.9,
"indies" : 11994.3,
"novels" : 11990.4,
"league" : 11988.8,
"failing" : 11984.1,
"succeeding" : 11981.7,
"santa" : 11979.3,
"approve" : 11976.9,
"cautiously" : 11976.1,
"miller" : 11974.6,
"afflicted" : 11972.2,
"lodgings" : 11972.2,
"petition" : 11965.1,
"traffic" : 11963.5,
"sparkling" : 11957.2,
"limb" : 11955.6,
"architecture" : 11951.6,
"disposal" : 11936.6,
"carriages" : 11929.5,
"crack" : 11929.5,
"kindred" : 11927.1,
"naught" : 11927.1,
"ornament" : 11921.6,
"slew" : 11916.8,
"steward" : 11911.3,
"fantastic" : 11905.7,
"evolution" : 11901.0,
"patiently" : 11901.0,
"reverse" : 11896.2,
"survey" : 11890.7,
"dug" : 11889.1,
"amuse" : 11881.2,
"stretching" : 11879.6,
"isaac" : 11874.1,
"forthwith" : 11873.3,
"contemporary" : 11869.3,
"foliage" : 11867.8,
"receives" : 11865.4,
"scandal" : 11861.4,
"donors" : 11858.3,
"deliberate" : 11855.9,
"influenced" : 11855.1,
"intolerable" : 11851.1,
"hearth" : 11849.6,
"symbol" : 11847.2,
"governments" : 11844.8,
"repaired" : 11844.8,
"pleasantly" : 11837.7,
"homage" : 11836.1,
"victorious" : 11835.3,
"columbus" : 11831.4,
"recovery" : 11830.6,
"defined" : 11829.8,
"attendants" : 11828.2,
"modesty" : 11824.2,
"diana" : 11821.1,
"washing" : 11817.9,
"pavement" : 11815.5,
"unnatural" : 11814.8,
"decisive" : 11811.6,
"wisely" : 11806.1,
"precise" : 11799.7,
"negative" : 11798.9,
"occurrence" : 11798.1,
"snatched" : 11796.6,
"shaft" : 11795.0,
"linked" : 11793.4,
"festival" : 11792.6,
"exclusively" : 11788.6,
"jove" : 11788.6,
"wickedness" : 11785.5,
"visions" : 11782.3,
"maggie" : 11779.9,
"rosy" : 11777.6,
"carelessly" : 11776.0,
"stem" : 11775.2,
"corporation" : 11772.0,
"dec" : 11771.2,
"feeding" : 11771.2,
"allen" : 11767.3,
"cows" : 11762.5,
"schemes" : 11754.6,
"preference" : 11753.8,
"urge" : 11753.8,
"husbands" : 11752.3,
"labours" : 11751.5,
"shrill" : 11750.7,
"exercises" : 11746.7,
"sovereignty" : 11745.1,
"reduce" : 11740.4,
"distressed" : 11731.7,
"clearing" : 11730.1,
"removal" : 11727.7,
"dean" : 11725.4,
"scottish" : 11722.2,
"assertion" : 11719.8,
"accessible" : 11718.2,
"comedy" : 11718.2,
"flush" : 11718.2,
"code" : 11715.9,
"philosophers" : 11713.5,
"adequate" : 11711.1,
"vaguely" : 11711.1,
"treason" : 11709.5,
"hunter" : 11702.4,
"chambers" : 11697.7,
"split" : 11695.3,
"yielding" : 11692.9,
"newsletter" : 11691.3,
"snake" : 11689.0,
"pub" : 11685.8,
"historian" : 11681.8,
"ass" : 11678.7,
"intensity" : 11678.7,
"democracy" : 11669.2,
"battery" : 11668.4,
"draws" : 11666.8,
"netherlands" : 11666.8,
"creed" : 11666.0,
"liking" : 11666.0,
"luke" : 11655.7,
"tyrant" : 11654.2,
"strove" : 11647.8,
"attraction" : 11647.0,
"slaughter" : 11647.0,
"dismal" : 11639.1,
"deposited" : 11636.0,
"assent" : 11632.8,
"cups" : 11632.8,
"concert" : 11628.8,
"downward" : 11620.1,
"canal" : 11616.2,
"evenings" : 11612.2,
"wax" : 11612.2,
"detective" : 11609.1,
"fancies" : 11604.3,
"spoiled" : 11604.3,
"revolver" : 11599.6,
"murray" : 11598.0,
"earned" : 11586.1,
"analysis" : 11582.2,
"finer" : 11579.0,
"paces" : 11575.8,
"roaring" : 11573.5,
"prompt" : 11570.3,
"paperwork" : 11568.7,
"wherefore" : 11567.9,
"emphasis" : 11560.8,
"sharing" : 11551.3,
"delayed" : 11550.5,
"inherited" : 11549.7,
"bronze" : 11545.0,
"waking" : 11542.6,
"garment" : 11541.8,
"redistributing" : 11541.8,
"wholesome" : 11537.9,
"remorse" : 11537.1,
"plato" : 11535.5,
"morris" : 11533.1,
"stooped" : 11527.6,
"dew" : 11524.4,
"monk" : 11522.8,
"thrill" : 11522.0,
"hue" : 11515.7,
"exclusive" : 11507.8,
"funds" : 11507.8,
"porter" : 11503.0,
"uncommon" : 11502.3,
"dash" : 11496.7,
"strained" : 11494.3,
"confounded" : 11492.8,
"swim" : 11492.8,
"strip" : 11488.0,
"middle-aged" : 11483.3,
"ultimately" : 11481.7,
"team" : 11477.7,
"missionary" : 11476.9,
"esteemed" : 11461.9,
"tracks" : 11446.9,
"envelope" : 11444.5,
"whoever" : 11443.7,
"expensive" : 11442.9,
"headquarters" : 11442.1,
"cherished" : 11440.5,
"brandy" : 11429.5,
"startling" : 11427.1,
"homer" : 11426.3,
"talks" : 11425.5,
"acute" : 11422.4,
"cigarette" : 11417.6,
"motor" : 11417.6,
"embarrassed" : 11413.6,
"janet" : 11407.3,
"volunteer" : 11402.6,
"offspring" : 11401.8,
"network" : 11397.8,
"reaches" : 11397.8,
"indispensable" : 11397.0,
"plane" : 11393.9,
"reaction" : 11392.3,
"regiments" : 11385.2,
"g" : 11383.6,
"sums" : 11380.4,
"partially" : 11379.6,
"prejudices" : 11375.7,
"proudly" : 11370.1,
"baggage" : 11364.6,
"terrace" : 11360.6,
"deaf" : 11358.3,
"allusion" : 11357.5,
"grip" : 11357.5,
"juice" : 11354.3,
"isabel" : 11346.4,
"resigned" : 11346.4,
"humility" : 11343.2,
"benjamin" : 11330.6,
"blast" : 11330.6,
"ministry" : 11329.8,
"sexual" : 11329.8,
"nile" : 11329.0,
"diameter" : 11327.4,
"troop" : 11325.8,
"onward" : 11316.3,
"crowds" : 11313.2,
"marrying" : 11309.2,
"tightly" : 11309.2,
"sullen" : 11302.1,
"brutal" : 11301.3,
"axe" : 11300.5,
"holmes" : 11293.4,
"penalty" : 11292.6,
"tops" : 11290.2,
"diamond" : 11283.1,
"boards" : 11274.4,
"corridor" : 11274.4,
"endowed" : 11266.5,
"strengthened" : 11266.5,
"cells" : 11248.3,
"proportions" : 11246.7,
"alternate" : 11242.8,
"echo" : 11242.0,
"restraint" : 11241.2,
"trials" : 11240.4,
"reads" : 11239.6,
"identity" : 11238.8,
"headed" : 11238.0,
"softened" : 11237.2,
"quivering" : 11231.7,
"stages" : 11230.1,
"sway" : 11225.4,
"poetical" : 11224.6,
"objected" : 11222.2,
"screen" : 11220.6,
"professed" : 11216.7,
"dirt" : 11215.9,
"ascertained" : 11215.1,
"era" : 11213.5,
"wider" : 11208.0,
"ambassador" : 11205.6,
"constituted" : 11205.6,
"breed" : 11204.0,
"interference" : 11204.0,
"eyebrows" : 11197.7,
"shapes" : 11197.7,
"afar" : 11192.9,
"consist" : 11183.4,
"acceptance" : 11180.3,
"displays" : 11176.3,
"flashing" : 11176.3,
"hunted" : 11173.1,
"beauties" : 11172.4,
"lazy" : 11172.4,
"shrewd" : 11170.0,
"extravagant" : 11169.2,
"momentary" : 11169.2,
"cordial" : 11166.8,
"engineer" : 11166.0,
"rapidity" : 11166.0,
"nov" : 11163.6,
"halt" : 11158.9,
"alternative" : 11158.1,
"devils" : 11156.5,
"stamp" : 11154.9,
"compact" : 11152.6,
"whites" : 11147.0,
"breathless" : 11146.2,
"encoding" : 11146.2,
"drift" : 11145.5,
"disappear" : 11141.5,
"roared" : 11138.3,
"revived" : 11136.8,
"counter" : 11134.4,
"venus" : 11128.8,
"imaginary" : 11128.0,
"diminished" : 11127.3,
"honoured" : 11123.3,
"5th" : 11111.4,
"despatched" : 11111.4,
"objections" : 11111.4,
"ray" : 11110.6,
"climbing" : 11105.1,
"attract" : 11103.5,
"astonishing" : 11099.6,
"competition" : 11097.2,
"suggestions" : 11096.4,
"ink" : 11082.2,
"oft" : 11076.6,
"crystal" : 11074.3,
"shower" : 11074.3,
"diseases" : 11067.1,
"ferdinand" : 11065.5,
"obedient" : 11062.4,
"draught" : 11061.6,
"wondrous" : 11057.6,
"await" : 11049.7,
"armour" : 11048.9,
"massive" : 11048.1,
"bottles" : 11047.4,
"kin" : 11045.8,
"cellar" : 11045.0,
"falsehood" : 11041.8,
"pillars" : 11041.0,
"edgar" : 11038.7,
"philosophical" : 11033.1,
"martha" : 11031.5,
"worlds" : 11022.0,
"memorable" : 11015.7,
"jacques" : 11011.0,
"detected" : 11007.8,
"stealing" : 11006.2,
"noisy" : 11004.6,
"henceforth" : 10996.7,
"cicero" : 10995.1,
"laden" : 10992.0,
"frost" : 10986.4,
"device" : 10984.9,
"glare" : 10984.9,
"touches" : 10984.1,
"senate" : 10981.7,
"lasting" : 10979.3,
"communion" : 10976.9,
"transport" : 10976.2,
"constantinople" : 10975.4,
"coffin" : 10973.8,
"eventually" : 10973.0,
"johnny" : 10973.0,
"enclosed" : 10970.6,
"forgiveness" : 10970.6,
"awfully" : 10966.7,
"clinging" : 10947.7,
"darkened" : 10946.9,
"contemplation" : 10944.5,
"termed" : 10944.5,
"manufacture" : 10942.1,
"swallow" : 10935.0,
"commonplace" : 10931.1,
"nancy" : 10929.5,
"resembled" : 10927.9,
"she'd" : 10927.1,
"labors" : 10922.4,
"contracted" : 10921.6,
"inscription" : 10921.6,
"comfortably" : 10905.7,
"indulge" : 10901.0,
"indulgence" : 10898.6,
"bravely" : 10892.3,
"kneeling" : 10890.7,
"yea" : 10888.3,
"keenly" : 10886.8,
"exhibition" : 10884.4,
"agricultural" : 10881.2,
"enlightened" : 10879.6,
"quest" : 10878.1,
"compliments" : 10874.9,
"crest" : 10868.6,
"extension" : 10865.4,
"uneasiness" : 10856.7,
"constitute" : 10851.2,
"inflicted" : 10850.4,
"lakes" : 10848.0,
"swing" : 10845.6,
"meadow" : 10836.1,
"noblest" : 10836.1,
"downloading" : 10834.5,
"complex" : 10826.6,
"controversy" : 10824.3,
"freed" : 10819.5,
"resignation" : 10819.5,
"tempest" : 10818.7,
"guidance" : 10817.9,
"prospects" : 10816.3,
"humbly" : 10811.6,
"lined" : 10809.2,
"serene" : 10804.5,
"shrugged" : 10797.4,
"honours" : 10796.6,
"roughly" : 10796.6,
"checks" : 10795.0,
"remarkably" : 10789.4,
"dainty" : 10784.7,
"overhead" : 10783.1,
"commencement" : 10778.4,
"singularly" : 10776.8,
"brightness" : 10761.0,
"oppression" : 10760.2,
"repeatedly" : 10759.4,
"conspiracy" : 10755.4,
"restrain" : 10751.5,
"splendor" : 10742.8,
"preservation" : 10740.4,
"pub" : 10739.6,
"pepper" : 10738.0,
"basin" : 10736.4,
"creeping" : 10735.6,
"matthew" : 10730.9,
"publicly" : 10729.3,
"percy" : 10725.4,
"continuing" : 10723.8,
"grove" : 10721.4,
"calamity" : 10719.8,
"pony" : 10715.1,
"vigour" : 10715.1,
"melody" : 10711.1,
"profitable" : 10711.1,
"descendants" : 10710.3,
"hire" : 10704.8,
"speculation" : 10704.0,
"discoveries" : 10699.3,
"accepts" : 10698.5,
"drunken" : 10698.5,
"candidate" : 10696.9,
"principally" : 10694.5,
"worried" : 10692.1,
"obstinate" : 10689.0,
"hasten" : 10686.6,
"foreigners" : 10682.6,
"elderly" : 10681.1,
"overwhelmed" : 10681.1,
"instincts" : 10677.9,
"telegraph" : 10670.8,
"russell" : 10662.9,
"university" : 10661.3,
"ghastly" : 10660.5,
"patron" : 10659.7,
"varying" : 10658.1,
"barbarous" : 10655.7,
"celestial" : 10655.7,
"t'" : 10654.2,
"patriotism" : 10653.4,
"modify" : 10649.4,
"earnestness" : 10647.0,
"exertion" : 10646.2,
"fox" : 10645.5,
"refusing" : 10643.1,
"horsemen" : 10639.1,
"inspection" : 10634.4,
"stations" : 10633.6,
"grieved" : 10632.0,
"louder" : 10632.0,
"bursting" : 10625.7,
"regretted" : 10620.9,
"mournful" : 10615.4,
"pursuing" : 10612.2,
"traitor" : 10609.1,
"associations" : 10601.9,
"cautious" : 10598.0,
"foundations" : 10598.0,
"stamped" : 10598.0,
"prior" : 10597.2,
"undertook" : 10594.0,
"telegram" : 10592.5,
"beggar" : 10591.7,
"chimney" : 10590.9,
"complicated" : 10586.9,
"davis" : 10586.1,
"striving" : 10586.1,
"magistrates" : 10584.5,
"converse" : 10582.2,
"graces" : 10581.4,
"wiped" : 10575.8,
"oars" : 10572.7,
"apology" : 10571.1,
"scared" : 10571.1,
"imprisonment" : 10569.5,
"eastward" : 10567.9,
"substitute" : 10565.6,
"yahweh" : 10560.8,
"handful" : 10557.6,
"usage" : 10556.1,
"lodged" : 10551.3,
"of." : 10551.3,
"villain" : 10551.3,
"banished" : 10541.8,
"restoration" : 10541.0,
"serpent" : 10538.7,
"hedge" : 10534.7,
"k" : 10534.7,
"jurisdiction" : 10532.3,
"captains" : 10530.7,
"settlers" : 10530.7,
"gaining" : 10530.0,
"valiant" : 10530.0,
"primary" : 10525.2,
"storms" : 10525.2,
"beam" : 10522.8,
"victoria" : 10522.8,
"tour" : 10512.5,
"prophecy" : 10510.2,
"spectacles" : 10510.2,
"obsolete" : 10507.0,
"buying" : 10502.3,
"shepherd" : 10500.7,
"wells" : 10500.7,
"harriet" : 10496.7,
"exaggerated" : 10495.1,
"heated" : 10494.4,
"penetrated" : 10493.6,
"travels" : 10492.0,
"earl" : 10489.6,
"hereditary" : 10488.8,
"ali" : 10488.0,
"supernatural" : 10486.4,
"competent" : 10482.5,
"piled" : 10481.7,
"hostess" : 10480.1,
"agriculture" : 10478.5,
"boughs" : 10476.2,
"urgent" : 10476.2,
"gratified" : 10475.4,
"suffice" : 10474.6,
"ports" : 10473.0,
"drifted" : 10470.6,
"accuracy" : 10465.9,
"deceased" : 10465.9,
"circular" : 10463.5,
"securing" : 10458.7,
"possibilities" : 10455.6,
"rhine" : 10454.8,
"alert" : 10451.6,
"neighboring" : 10442.9,
"democratic" : 10440.6,
"quebec" : 10440.6,
"bud" : 10439.8,
"accounted" : 10431.9,
"aided" : 10427.1,
"augustus" : 10425.5,
"blanket" : 10425.5,
"hail" : 10423.9,
"pretence" : 10422.4,
"beams" : 10417.6,
"andy" : 10416.8,
"pig" : 10416.0,
"shaped" : 10408.1,
"oven" : 10407.3,
"rounded" : 10406.5,
"ivory" : 10400.2,
"northward" : 10395.5,
"isolated" : 10390.7,
"policeman" : 10382.0,
"aug" : 10381.2,
"conventional" : 10380.4,
"babylon" : 10378.1,
"dusty" : 10378.1,
"bishops" : 10374.1,
"complaints" : 10372.5,
"stripped" : 10370.1,
"plead" : 10367.8,
"hinder" : 10365.4,
"8vo" : 10363.0,
"cord" : 10355.1,
"flows" : 10351.9,
"personage" : 10351.9,
"classical" : 10351.2,
"alongside" : 10349.6,
"wrongs" : 10348.0,
"extract" : 10345.6,
"rewarded" : 10343.2,
"lungs" : 10334.5,
"lighter" : 10332.2,
"kisses" : 10331.4,
"serves" : 10329.8,
"pint" : 10324.3,
"forgiven" : 10322.7,
"sternly" : 10321.1,
"proclamation" : 10320.3,
"realised" : 10320.3,
"pipes" : 10316.3,
"arising" : 10314.0,
"pitched" : 10314.0,
"tube" : 10310.8,
"observer" : 10308.4,
"smote" : 10308.4,
"avenue" : 10302.9,
"elephant" : 10298.9,
"burke" : 10297.4,
"footing" : 10295.8,
"statesman" : 10295.8,
"rebels" : 10292.6,
"nails" : 10291.0,
"wears" : 10290.2,
"doomed" : 10289.4,
"edges" : 10283.9,
"esther" : 10278.4,
"indiana" : 10276.0,
"affecting" : 10274.4,
"stormy" : 10274.4,
"bee" : 10268.9,
"bury" : 10267.3,
"efficient" : 10266.5,
"mix" : 10266.5,
"supporting" : 10266.5,
"actor" : 10261.8,
"disturbance" : 10261.0,
"sweat" : 10260.2,
"executive" : 10258.6,
"seemingly" : 10258.6,
"tenth" : 10252.3,
"blossoms" : 10250.7,
"ethel" : 10247.5,
"folds" : 10245.1,
"painfully" : 10245.1,
"polish" : 10245.1,
"shudder" : 10240.4,
"oe." : 10235.6,
"roofs" : 10229.3,
"comparative" : 10222.2,
"begging" : 10216.7,
"imposing" : 10216.7,
"notable" : 10213.5,
"invested" : 10209.5,
"imprisoned" : 10208.0,
"mute" : 10199.3,
"amy" : 10196.9,
"cage" : 10195.3,
"esq" : 10195.3,
"pg" : 10192.1,
"cured" : 10190.6,
"cargo" : 10187.4,
"prof." : 10185.8,
"negotiations" : 10181.9,
"assented" : 10180.3,
"jail" : 10180.3,
"skilful" : 10180.3,
"ideals" : 10178.7,
"conferred" : 10177.1,
"resulted" : 10171.6,
"illusion" : 10169.2,
"torment" : 10167.6,
"troublesome" : 10162.1,
"crowns" : 10158.9,
"feb" : 10155.7,
"repentance" : 10152.6,
"blankets" : 10151.0,
"proprietor" : 10144.7,
"uncertainty" : 10144.7,
"concentrated" : 10143.9,
"mediterranean" : 10143.9,
"covers" : 10141.5,
"scream" : 10140.7,
"compromise" : 10137.5,
"respectful" : 10137.5,
"chariot" : 10136.8,
"ammunition" : 10136.0,
"bonnet" : 10135.2,
"secondary" : 10132.8,
"persia" : 10126.5,
"persecution" : 10122.5,
"lesser" : 10120.1,
"assistant" : 10119.4,
"saluted" : 10116.2,
"fits" : 10112.2,
"indulged" : 10111.4,
"springing" : 10109.1,
"cane" : 10106.7,
"fold" : 10106.7,
"boundary" : 10105.1,
"valued" : 10101.9,
"she'll" : 10099.6,
"rugged" : 10098.8,
"aloft" : 10098.0,
"thieves" : 10093.2,
"parlour" : 10091.7,
"indebted" : 10090.1,
"tons" : 10088.5,
"processes" : 10085.3,
"dave" : 10078.2,
"moore" : 10076.6,
"argue" : 10074.3,
"dearly" : 10056.1,
"logic" : 10054.5,
"panic" : 10047.4,
"restrained" : 10045.8,
"lb" : 10043.4,
"vainly" : 10043.4,
"weariness" : 10041.0,
"enlarged" : 10036.3,
"franklin" : 10035.5,
"tasted" : 10033.9,
"rural" : 10030.0,
"torrent" : 10030.0,
"resolute" : 10026.8,
"refrain" : 10024.4,
"kissing" : 10021.3,
"gorgeous" : 10015.7,
"meets" : 10015.7,
"circulation" : 10014.9,
"passionately" : 10009.4,
"inasmuch" : 10005.4,
"unexpectedly" : 10004.6,
"stress" : 10002.3,
"consumption" : 9999.94,
"groan" : 9995.98,
"suits" : 9994.40,
"sustain" : 9993.61,
"hosts" : 9987.28,
"crash" : 9985.70,
"resemble" : 9984.11,
"epoch" : 9979.37,
"quote" : 9975.41,
"lacking" : 9971.46,
"nominally" : 9971.46,
"choked" : 9970.66,
"aristocracy" : 9969.87,
"granite" : 9969.08,
"gradual" : 9967.50,
"delights" : 9964.33,
"hurled" : 9964.33,
"joyful" : 9961.96,
"sack" : 9960.38,
"slumber" : 9958.01,
"detached" : 9956.42,
"snapped" : 9956.42,
"shadowy" : 9954.05,
"accompanying" : 9950.09,
"annoyance" : 9948.51,
"crush" : 9947.72,
"needle" : 9941.39,
"repent" : 9934.27,
"phenomenon" : 9927.94,
"execute" : 9926.36,
"canst" : 9920.82,
"smoked" : 9914.49,
"greet" : 9911.33,
"monarchy" : 9908.16,
"behave" : 9905.00,
"richly" : 9905.00,
"controlled" : 9904.21,
"strive" : 9902.63,
"endeavor" : 9901.84,
"barrier" : 9897.88,
"canadian" : 9897.88,
"curve" : 9890.76,
"politeness" : 9889.18,
"flora" : 9883.64,
"rely" : 9875.73,
"flank" : 9872.56,
"convenience" : 9870.98,
"courteous" : 9866.23,
"logs" : 9866.23,
"lamb" : 9863.86,
"effectually" : 9859.11,
"robinson" : 9856.74,
"logical" : 9855.16,
"shan't" : 9855.16,
"dimly" : 9853.58,
"withered" : 9851.99,
"diet" : 9851.20,
"praises" : 9851.20,
"fulfil" : 9849.62,
"mantle" : 9848.83,
"ne'er" : 9848.83,
"discussing" : 9843.29,
"chicken" : 9838.54,
"judicial" : 9838.54,
"consistent" : 9836.17,
"ridicule" : 9835.38,
"as." : 9834.59,
"reins" : 9834.59,
"barrel" : 9833.01,
"distrust" : 9832.22,
"trunks" : 9829.84,
"verily" : 9829.05,
"hunters" : 9827.47,
"feather" : 9825.10,
"desperately" : 9820.35,
"goodly" : 9817.18,
"habitual" : 9815.60,
"voluntary" : 9815.60,
"luncheon" : 9812.44,
"eighteenth" : 9808.48,
"exertions" : 9807.69,
"expert" : 9806.90,
"coolly" : 9806.11,
"mistakes" : 9804.53,
"tedious" : 9802.94,
"contemplated" : 9796.61,
"clark" : 9795.82,
"jacket" : 9795.82,
"gleaming" : 9792.66,
"shrank" : 9787.91,
"swimming" : 9787.12,
"kent" : 9785.54,
"perplexed" : 9782.37,
"impressive" : 9780.79,
"universally" : 9780.00,
"displeasure" : 9776.84,
"maids" : 9772.09,
"rates" : 9772.09,
"underneath" : 9771.30,
"expedient" : 9767.34,
"emma" : 9758.64,
"impress" : 9755.48,
"bees" : 9751.52,
"bounded" : 9751.52,
"worshipped" : 9750.73,
"resisted" : 9749.94,
"provincial" : 9749.15,
"popularity" : 9748.36,
"baker" : 9747.56,
"shattered" : 9747.56,
"merciful" : 9745.98,
"olive" : 9745.19,
"tramp" : 9742.82,
"compensation" : 9740.44,
"ernest" : 9736.49,
"martial" : 9736.49,
"genial" : 9735.70,
"syria" : 9735.70,
"conjecture" : 9734.91,
"van" : 9734.91,
"waiter" : 9734.11,
"detained" : 9730.95,
"items" : 9728.58,
"promote" : 9727.79,
"delaware" : 9726.99,
"covenant" : 9725.41,
"nought" : 9722.25,
"interposed" : 9721.46,
"seizing" : 9721.46,
"sinner" : 9719.08,
"vigor" : 9715.92,
"devote" : 9715.13,
"decorated" : 9705.63,
"sentimental" : 9700.10,
"yoke" : 9700.10,
"properties" : 9698.51,
"warlike" : 9696.93,
"perilous" : 9694.56,
"threats" : 9687.44,
"kindled" : 9684.27,
"lays" : 9684.27,
"hostility" : 9682.69,
"dragging" : 9679.53,
"mare" : 9677.94,
"regulations" : 9676.36,
"obstacle" : 9674.78,
"sage" : 9673.20,
"destitute" : 9672.41,
"pays" : 9671.62,
"sleepy" : 9669.24,
"dublin" : 9662.91,
"jonathan" : 9661.33,
"posterity" : 9661.33,
"they'd" : 9661.33,
"nod" : 9658.96,
"mason" : 9655.00,
"patriotic" : 9653.42,
"plantation" : 9650.25,
"pitiful" : 9649.46,
"foster" : 9648.67,
"requisite" : 9647.88,
"expose" : 9647.09,
"oxen" : 9647.09,
"patch" : 9647.09,
"anderson" : 9644.72,
"stuart" : 9643.13,
"interruption" : 9641.55,
"lance" : 9641.55,
"payable" : 9639.18,
"definition" : 9637.60,
"birthday" : 9636.81,
"thumb" : 9636.81,
"wolves" : 9635.22,
"hammer" : 9632.06,
"overwhelming" : 9631.27,
"intensely" : 9628.10,
"revolutionary" : 9627.31,
"fragrant" : 9626.52,
"bleeding" : 9625.73,
"sheltered" : 9625.73,
"circuit" : 9624.15,
"dominions" : 9623.36,
"sales" : 9616.24,
"energetic" : 9615.44,
"insignificant" : 9612.28,
"repetition" : 9610.70,
"we'd" : 9602.00,
"amazing" : 9595.67,
"trains" : 9591.71,
"skirts" : 9590.13,
"tip" : 9589.34,
"trivial" : 9589.34,
"kick" : 9588.55,
"tended" : 9586.17,
"rejoicing" : 9581.43,
"dig" : 9579.05,
"pet" : 9575.89,
"skull" : 9575.89,
"lectures" : 9574.31,
"ness" : 9573.51,
"threat" : 9571.93,
"legislature" : 9570.35,
"plunder" : 9567.98,
"removing" : 9567.98,
"jungle" : 9567.19,
"ghosts" : 9566.39,
"numbered" : 9562.44,
"famine" : 9559.27,
"palaces" : 9552.94,
"sorrowful" : 9547.41,
"improvements" : 9543.45,
"coleridge" : 9542.66,
"fuller" : 9540.29,
"asp" : 9533.96,
"blocks" : 9531.58,
"darted" : 9526.05,
"shrine" : 9524.46,
"heel" : 9522.88,
"typical" : 9518.93,
"throws" : 9516.55,
"fortunately" : 9514.97,
"recognise" : 9512.60,
"fuel" : 9511.01,
"6th" : 9507.85,
"tranquil" : 9507.06,
"frown" : 9501.52,
"destination" : 9498.36,
"plunge" : 9494.40,
"moor" : 9488.86,
"pin" : 9485.70,
"mars" : 9484.91,
"associate" : 9484.12,
"here's" : 9480.95,
"owen" : 9480.95,
"10th" : 9480.16,
"arabic" : 9478.58,
"vicious" : 9478.58,
"framed" : 9477.79,
"banquet" : 9477.00,
"expressive" : 9474.62,
"instinctively" : 9461.96,
"lighting" : 9461.96,
"scanning" : 9461.96,
"subordinate" : 9461.17,
"jaws" : 9458.80,
"patent" : 9458.80,
"courtyard" : 9452.47,
"gulf" : 9449.31,
"destroying" : 9446.93,
"detailed" : 9439.81,
"regulating" : 9430.32,
"closet" : 9429.53,
"compel" : 9427.15,
"inland" : 9426.36,
"excepting" : 9423.20,
"pretext" : 9417.66,
"legislative" : 9414.50,
"stationed" : 9413.71,
"rash" : 9408.17,
"margin" : 9401.05,
"champion" : 9400.26,
"settling" : 9400.26,
"billion" : 9396.30,
"shorter" : 9393.93,
"betwixt" : 9393.14,
"admiring" : 9392.34,
"morgan" : 9390.76,
"nick" : 9390.76,
"chemical" : 9389.97,
"chapters" : 9389.18,
"worthless" : 9386.02,
"aristocratic" : 9382.85,
"nan" : 9382.06,
"especial" : 9378.10,
"hon" : 9378.10,
"attentive" : 9376.52,
"maintenance" : 9374.15,
"charlie" : 9372.57,
"explanatory" : 9371.78,
"differently" : 9367.82,
"furiously" : 9367.82,
"pulse" : 9367.03,
"scanty" : 9367.03,
"flee" : 9364.65,
"admiral" : 9363.86,
"clause" : 9360.70,
"resume" : 9359.12,
"compound" : 9358.33,
"pilot" : 9353.58,
"growled" : 9351.21,
"charmed" : 9344.09,
"imitate" : 9341.71,
"happening" : 9339.34,
"knot" : 9335.38,
"rags" : 9329.85,
"mock" : 9320.35,
"majestic" : 9316.40,
"messages" : 9314.02,
"prussian" : 9312.44,
"suspense" : 9312.44,
"clare" : 9310.07,
"relationship" : 9309.28,
"skirt" : 9308.48,
"agency" : 9303.74,
"arisen" : 9302.95,
"grin" : 9301.36,
"unusually" : 9300.57,
"michigan" : 9298.99,
"hoarse" : 9296.62,
"mills" : 9286.33,
"intently" : 9283.17,
"dining" : 9281.59,
"demonstration" : 9280.79,
"depression" : 9277.63,
"lain" : 9276.84,
"expectations" : 9272.88,
"joining" : 9272.09,
"weekly" : 9268.14,
"trenches" : 9267.35,
"technical" : 9262.60,
"vehicle" : 9262.60,
"aimed" : 9253.10,
"borrow" : 9253.10,
"flattering" : 9249.15,
"portugal" : 9248.36,
"prodigious" : 9246.78,
"scope" : 9245.98,
"vegetation" : 9245.98,
"switzerland" : 9243.61,
"arkansas" : 9242.82,
"swelling" : 9242.82,
"fortified" : 9241.24,
"favoured" : 9238.07,
"salute" : 9238.07,
"topic" : 9237.28,
"blushed" : 9233.33,
"superb" : 9231.74,
"strengthen" : 9230.16,
"confidential" : 9228.58,
"crow" : 9227.79,
"shawl" : 9226.21,
"sunrise" : 9226.21,
"sings" : 9224.62,
"coats" : 9220.67,
"sturdy" : 9219.88,
"dissolved" : 9218.30,
"lifetime" : 9218.30,
"dispersed" : 9217.50,
"sergeant" : 9216.71,
"contribute" : 9215.92,
"strode" : 9215.13,
"brigade" : 9214.34,
"verdict" : 9211.97,
"they've" : 9203.26,
"honors" : 9198.52,
"panting" : 9188.23,
"females" : 9187.44,
"richest" : 9187.44,
"attribute" : 9186.65,
"brighter" : 9184.28,
"hook" : 9183.49,
"discontent" : 9173.20,
"orderly" : 9172.41,
"airs" : 9165.29,
"tiger" : 9165.29,
"messengers" : 9163.71,
"penetrate" : 9157.38,
"sabbath" : 9157.38,
"identification" : 9156.59,
"holiness" : 9155.80,
"crooked" : 9154.21,
"housekeeper" : 9144.72,
"productions" : 9142.35,
"prescribed" : 9139.18,
"rector" : 9139.18,
"spark" : 9135.23,
"sleeve" : 9132.06,
"honored" : 9127.31,
"tame" : 9124.94,
"highway" : 9124.15,
"alabama" : 9123.36,
"edmund" : 9118.61,
"militia" : 9113.87,
"nobleman" : 9113.87,
"energies" : 9112.28,
"spacious" : 9109.12,
"tearing" : 9102.00,
"affliction" : 9099.62,
"photograph" : 9094.88,
"ally" : 9090.92,
"hampshire" : 9088.55,
"ascent" : 9086.18,
"ditch" : 9084.59,
"fishes" : 9083.80,
"jupiter" : 9080.64,
"rubbing" : 9071.94,
"tract" : 9069.56,
"standards" : 9064.81,
"afore" : 9064.02,
"ribbon" : 9063.23,
"cecilia" : 9058.49,
"oregon" : 9057.69,
"integrity" : 9054.53,
"plus" : 9051.37,
"transparent" : 9048.20,
"farms" : 9043.45,
"pulpit" : 9036.33,
"ropes" : 9034.75,
"nineteen" : 9033.96,
"rescued" : 9032.38,
"counting" : 9030.80,
"perfume" : 9030.80,
"socrates" : 9030.80,
"hounds" : 9028.42,
"solicited" : 9028.42,
"bother" : 9022.88,
"fascinating" : 9016.56,
"qualified" : 9016.56,
"desolation" : 9015.76,
"essay" : 9014.97,
"rains" : 9014.97,
"renew" : 9014.18,
"odious" : 9011.81,
"assuredly" : 9011.02,
"suggests" : 9005.48,
"rider" : 8996.78,
"loneliness" : 8992.82,
"pond" : 8992.03,
"activities" : 8982.54,
"dazzling" : 8981.75,
"leaping" : 8981.75,
"squadron" : 8980.16,
"bowing" : 8977.79,
"novelty" : 8977.79,
"wrist" : 8971.46,
"keeper" : 8968.30,
"homeward" : 8966.71,
"alexandria" : 8965.92,
"finely" : 8962.76,
"li" : 8960.39,
"efficiency" : 8958.80,
"marvel" : 8956.43,
"tranquillity" : 8954.85,
"agnes" : 8951.68,
"charities" : 8951.68,
"spenser" : 8950.10,
"condemn" : 8946.14,
"elephants" : 8945.35,
"elders" : 8942.98,
"julian" : 8942.98,
"tries" : 8942.98,
"2nd" : 8939.82,
"sweetly" : 8939.02,
"endurance" : 8937.44,
"bags" : 8936.65,
"reared" : 8932.70,
"jaw" : 8931.90,
"unique" : 8931.90,
"navigation" : 8931.11,
"inevitably" : 8928.74,
"admirably" : 8927.16,
"sect" : 8927.16,
"drum" : 8923.20,
"poles" : 8921.62,
"verge" : 8918.46,
"piercing" : 8912.13,
"sanction" : 8911.33,
"russians" : 8904.21,
"forlorn" : 8901.84,
"approbation" : 8899.47,
"organic" : 8895.51,
"stanley" : 8895.51,
"allegiance" : 8891.56,
"bin" : 8891.56,
"expressly" : 8879.69,
"ingenuity" : 8877.32,
"dispose" : 8873.36,
"stained" : 8873.36,
"theology" : 8870.20,
"withal" : 8870.20,
"duration" : 8868.61,
"fundraising" : 8851.21,
"adj." : 8846.46,
"collecting" : 8846.46,
"weigh" : 8841.71,
"sweetest" : 8840.13,
"float" : 8839.34,
"consul" : 8837.76,
"monastery" : 8837.76,
"raging" : 8836.18,
"publish" : 8833.80,
"knocking" : 8832.22,
"precaution" : 8832.22,
"privately" : 8832.22,
"aaron" : 8828.27,
"endeavored" : 8827.47,
"insight" : 8827.47,
"definitely" : 8825.89,
"stature" : 8825.10,
"troy" : 8825.10,
"miriam" : 8824.31,
"judah" : 8823.52,
"oblige" : 8822.73,
"urging" : 8816.40,
"shift" : 8810.86,
"mould" : 8808.49,
"courses" : 8807.70,
"countless" : 8806.11,
"associates" : 8798.20,
"hymn" : 8797.41,
"rapture" : 8797.41,
"tonight" : 8797.41,
"trumpet" : 8795.83,
"parker" : 8791.87,
"entrusted" : 8787.92,
"firmness" : 8787.92,
"comic" : 8780.80,
"breeding" : 8780.01,
"ken" : 8775.26,
"questioning" : 8772.89,
"factor" : 8772.10,
"monuments" : 8767.35,
"loveliness" : 8766.56,
"handled" : 8761.02,
"communities" : 8754.69,
"saloon" : 8754.69,
"stumbled" : 8749.15,
"witch" : 8748.36,
"confronted" : 8747.57,
"traveling" : 8747.57,
"seamen" : 8745.99,
"backed" : 8744.41,
"profoundly" : 8742.03,
"gladness" : 8738.87,
"pomp" : 8737.29,
"mess" : 8735.70,
"practise" : 8734.12,
"sanctuary" : 8734.12,
"superstitious" : 8734.12,
"casual" : 8732.54,
"iowa" : 8732.54,
"analyzed" : 8725.42,
"historic" : 8724.63,
"bored" : 8723.84,
"shrink" : 8723.84,
"judging" : 8723.04,
"treating" : 8717.51,
"expenditure" : 8716.72,
"encouraging" : 8715.13,
"diplomatic" : 8714.34,
"forcing" : 8713.55,
"studio" : 8712.76,
"exposure" : 8710.39,
"crude" : 8705.64,
"compilation" : 8704.85,
"vermont" : 8692.98,
"eve" : 8689.82,
"ascend" : 8689.03,
"unbroken" : 8689.03,
"apollo" : 8688.23,
"countess" : 8682.70,
"binding" : 8680.32,
"exceed" : 8677.95,
"frail" : 8677.16,
"hans" : 8676.37,
"champagne" : 8671.62,
"shuddered" : 8671.62,
"carter" : 8670.83,
"mule" : 8667.67,
"inserted" : 8666.87,
"parson" : 8666.08,
"rascal" : 8664.50,
"inspire" : 8660.55,
"banner" : 8657.38,
"divorce" : 8655.01,
"treacherous" : 8655.01,
"nineteenth" : 8651.84,
"invalid" : 8650.26,
"weaker" : 8650.26,
"organizations" : 8648.68,
"bolt" : 8646.30,
"ticket" : 8643.14,
"backwards" : 8642.35,
"captivity" : 8642.35,
"lame" : 8640.77,
"provoked" : 8639.18,
"vein" : 8636.02,
"lists" : 8625.74,
"gallop" : 8624.94,
"communications" : 8622.57,
"dagger" : 8619.41,
"passive" : 8618.62,
"shoe" : 8618.62,
"thrice" : 8613.08,
"corrected" : 8611.49,
"mystic" : 8605.17,
"infancy" : 8602.00,
"foam" : 8600.42,
"keith" : 8600.42,
"tavern" : 8599.63,
"fraud" : 8597.25,
"7th" : 8596.46,
"cradle" : 8594.09,
"rifles" : 8589.34,
"vigorously" : 8589.34,
"censure" : 8587.76,
"gentleness" : 8587.76,
"jr" : 8587.76,
"sobbing" : 8586.18,
"monotonous" : 8579.85,
"explosion" : 8578.27,
"catastrophe" : 8570.36,
"respectfully" : 8567.98,
"wearied" : 8565.61,
"cats" : 8564.82,
"blamed" : 8563.24,
"needful" : 8562.44,
"fireplace" : 8561.65,
"gravel" : 8560.86,
"affords" : 8560.07,
"discovering" : 8560.07,
"jar" : 8560.07,
"selfishness" : 8557.70,
"tolerably" : 8553.74,
"clerks" : 8551.37,
"ark" : 8549.00,
"moist" : 8549.00,
"wid" : 8544.25,
"sauce" : 8543.46,
"prompted" : 8534.75,
"exceptions" : 8532.38,
"bullets" : 8527.63,
"writ" : 8527.63,
"bruce" : 8526.84,
"insolent" : 8523.68,
"moisture" : 8523.68,
"thompson" : 8522.89,
"furnace" : 8519.72,
"healing" : 8519.72,
"fewer" : 8517.35,
"deem" : 8515.77,
"apron" : 8513.39,
"humiliation" : 8513.39,
"punctuation" : 8512.60,
"rolls" : 8511.81,
"doe" : 8509.44,
"rotten" : 8507.86,
"richer" : 8505.48,
"swiss" : 8505.48,
"behavior" : 8503.90,
"nowadays" : 8501.53,
"pamphlet" : 8499.15,
"loan" : 8497.57,
"beads" : 8494.41,
"divers" : 8493.62,
"unreasonable" : 8492.03,
"realise" : 8490.45,
"lust" : 8484.12,
"ah" : 8480.17,
"annually" : 8479.38,
"detach" : 8478.58,
"gaily" : 8477.00,
"shares" : 8477.00,
"gifted" : 8473.05,
"planet" : 8473.05,
"feverish" : 8466.72,
"resurrection" : 8466.72,
"saul" : 8464.34,
"consecrated" : 8461.97,
"enforced" : 8460.39,
"vincent" : 8453.27,
"shelf" : 8451.69,
"fan" : 8450.89,
"fluid" : 8449.31,
"brightly" : 8448.52,
"damsel" : 8448.52,
"gabriel" : 8447.73,
"kid" : 8446.94,
"frantic" : 8441.40,
"neatly" : 8441.40,
"anon" : 8435.86,
"ascribed" : 8435.07,
"insane" : 8435.07,
"tropical" : 8434.28,
"8th" : 8431.12,
"milan" : 8429.53,
"hardened" : 8427.95,
"overthrow" : 8427.16,
"phase" : 8427.16,
"achievement" : 8424.79,
"immortality" : 8424.79,
"obscurity" : 8421.62,
"assumption" : 8420.04,
"discern" : 8412.92,
"hopeful" : 8412.13,
"humorous" : 8410.55,
"composure" : 8408.17,
"turf" : 8408.17,
"poland" : 8407.38,
"dame" : 8406.59,
"missionaries" : 8406.59,
"orator" : 8405.01,
"perpetually" : 8405.01,
"arbitrary" : 8403.43,
"ecstasy" : 8397.89,
"retirement" : 8397.10,
"pronounce" : 8393.14,
"authorized" : 8387.60,
"familiarity" : 8387.60,
"nl" : 8386.02,
"hastings" : 8384.44,
"clubs" : 8383.65,
"reconciled" : 8383.65,
"grievous" : 8382.86,
"mercury" : 8381.27,
"elegance" : 8379.69,
"chivalry" : 8378.90,
"luminous" : 8377.32,
"beseech" : 8375.74,
"benevolent" : 8374.95,
"confided" : 8374.95,
"dances" : 8374.15,
"perplexity" : 8370.20,
"escaping" : 8369.41,
"terrific" : 8369.41,
"companionship" : 8365.45,
"commence" : 8364.66,
"daisy" : 8364.66,
"parliament" : 8361.50,
"9th" : 8353.58,
"creep" : 8352.79,
"pleading" : 8347.26,
"disdain" : 8345.67,
"pm" : 8345.67,
"sympathies" : 8341.72,
"guides" : 8340.93,
"emergency" : 8338.55,
"parcel" : 8337.76,
"suicide" : 8337.76,
"replies" : 8336.18,
"drawer" : 8335.39,
"contribution" : 8334.60,
"supposition" : 8331.43,
"vii" : 8329.85,
"weren't" : 8325.90,
"link" : 8325.10,
"homely" : 8321.94,
"pluck" : 8321.94,
"ruling" : 8317.19,
"patrick" : 8316.40,
"statesmen" : 8311.65,
"hannah" : 8310.86,
"printing" : 8310.86,
"joshua" : 8309.28,
"synonymous" : 8307.70,
"sinister" : 8306.91,
"advocate" : 8306.12,
"destructive" : 8304.53,
"environment" : 8304.53,
"blossom" : 8302.16,
"bridle" : 8296.62,
"yon" : 8295.83,
"waistcoat" : 8291.88,
"extends" : 8291.09,
"confirm" : 8289.50,
"listing" : 8287.13,
"solemnity" : 8287.13,
"projects" : 8284.76,
"reporter" : 8284.76,
"deprive" : 8283.17,
"detachment" : 8280.80,
"infernal" : 8270.52,
"traversed" : 8269.72,
"moss" : 8266.56,
"skilled" : 8264.98,
"announce" : 8263.40,
"hateful" : 8260.23,
"fugitive" : 8257.07,
"gothic" : 8257.07,
"coolness" : 8256.28,
"insurrection" : 8254.69,
"cum" : 8252.32,
"med" : 8251.53,
"coachman" : 8249.16,
"expend" : 8249.16,
"stepping" : 8248.36,
"julius" : 8242.83,
"resign" : 8237.29,
"despatch" : 8233.33,
"excluded" : 8233.33,
"reject" : 8233.33,
"tough" : 8230.17,
"plea" : 8228.59,
"roy" : 8227.79,
"fragment" : 8227.00,
"lacked" : 8227.00,
"wordsworth" : 8224.63,
"balcony" : 8223.84,
"darker" : 8222.26,
"mac" : 8222.26,
"nevada" : 8222.26,
"christopher" : 8219.88,
"fork" : 8219.88,
"flatter" : 8219.09,
"iniquity" : 8212.76,
"meditation" : 8212.76,
"disastrous" : 8211.18,
"stain" : 8209.60,
"patches" : 8208.02,
"hints" : 8203.27,
"ordained" : 8198.52,
"drinks" : 8197.73,
"whipped" : 8197.73,
"burial" : 8196.15,
"matt" : 8196.15,
"employee" : 8193.78,
"employer" : 8192.19,
"hypothesis" : 8192.19,
"steed" : 8191.40,
"width" : 8189.82,
"sweden" : 8187.45,
"transaction" : 8184.28,
"victories" : 8182.70,
"devout" : 8181.91,
"outrage" : 8181.91,
"vary" : 8179.54,
"attorney" : 8176.37,
"rouse" : 8175.58,
"doubled" : 8170.04,
"sidney" : 8168.46,
"schooner" : 8166.88,
"flaming" : 8165.29,
"offend" : 8161.34,
"sheriff" : 8161.34,
"encamped" : 8160.55,
"magnificence" : 8158.17,
"vent" : 8145.52,
"politely" : 8144.73,
"vines" : 8144.73,
"flags" : 8142.35,
"italians" : 8138.40,
"necessities" : 8136.81,
"austin" : 8135.23,
"nobler" : 8132.07,
"accusation" : 8117.04,
"impulses" : 8113.08,
"packet" : 8112.29,
"shabby" : 8111.50,
"irritated" : 8108.33,
"dakota" : 8107.54,
"industrious" : 8105.17,
"classic" : 8103.59,
"ranch" : 8102.80,
"ascending" : 8097.26,
"cruelly" : 8096.47,
"happiest" : 8095.68,
"antonio" : 8094.88,
"accuse" : 8092.51,
"insulted" : 8089.35,
"bridges" : 8086.97,
"players" : 8086.97,
"sixteenth" : 8084.60,
"solicitation" : 8083.81,
"embarked" : 8075.11,
"idol" : 8071.94,
"odds" : 8071.94,
"aims" : 8067.99,
"illuminated" : 8067.99,
"enchanted" : 8064.82,
"adversary" : 8060.87,
"pie" : 8060.87,
"reflecting" : 8059.28,
"pension" : 8057.70,
"luxurious" : 8056.12,
"pigs" : 8055.33,
"choir" : 8053.74,
"tumbled" : 8052.16,
"conqueror" : 8051.37,
"irritation" : 8049.00,
"verb" : 8049.00,
"monkey" : 8046.62,
"acceptable" : 8045.04,
"dynasty" : 8045.04,
"accurately" : 8043.46,
"divinity" : 8042.67,
"signature" : 8042.67,
"heretofore" : 8041.88,
"hazard" : 8041.09,
"dora" : 8040.30,
"sq." : 8039.50,
"stead" : 8037.92,
"attire" : 8037.13,
"fling" : 8036.34,
"marine" : 8034.76,
"occupations" : 8031.59,
"soothing" : 8031.59,
"devised" : 8030.80,
"singer" : 8028.43,
"spaces" : 8027.64,
"emerson" : 8026.06,
"disguised" : 8023.68,
"antique" : 8022.10,
"orthodox" : 8019.73,
"poisoned" : 8016.56,
"dove" : 8015.77,
"gratification" : 8014.98,
"sydney" : 8011.81,
"electricity" : 8008.65,
"alien" : 8002.32,
"sorely" : 8002.32,
"cracked" : 7994.41,
"supremacy" : 7994.41,
"summon" : 7991.25,
"depressed" : 7989.66,
"sexes" : 7988.87,
"offerings" : 7988.08,
"pledged" : 7988.08,
"irony" : 7987.29,
"recourse" : 7982.54,
"tortured" : 7982.54,
"thickly" : 7978.59,
"correspondent" : 7976.21,
"sounding" : 7975.42,
"64" : 7973.84,
"sombre" : 7969.88,
"brushed" : 7968.30,
"reasonably" : 7957.23,
"12th" : 7956.44,
"duel" : 7956.44,
"reluctantly" : 7956.44,
"implies" : 7955.64,
"cable" : 7954.85,
"ridden" : 7949.32,
"acre" : 7948.52,
"grieve" : 7945.36,
"inquiring" : 7944.57,
"colonists" : 7942.19,
"addison" : 7938.24,
"republican" : 7938.24,
"illustrate" : 7937.45,
"tim" : 7937.45,
"liverpool" : 7936.66,
"gilded" : 7935.87,
"clumsy" : 7935.07,
"satin" : 7931.12,
"displeased" : 7927.95,
"odor" : 7927.16,
"clearer" : 7926.37,
"prairie" : 7921.63,
"hudson" : 7919.25,
"feudal" : 7916.09,
"flint" : 7908.97,
"easter" : 7908.18,
"freshness" : 7908.18,
"nursery" : 7906.59,
"explanations" : 7905.01,
"adoption" : 7902.64,
"reluctance" : 7902.64,
"crosses" : 7898.68,
"blushing" : 7897.89,
"imported" : 7897.89,
"notorious" : 7895.52,
"equipped" : 7893.94,
"sinful" : 7890.77,
"starving" : 7890.77,
"eugene" : 7886.02,
"bedside" : 7884.44,
"sovereigns" : 7883.65,
"abrupt" : 7882.86,
"excused" : 7879.70,
"injure" : 7877.32,
"incessant" : 7876.53,
"correctly" : 7874.95,
"drooping" : 7872.58,
"adored" : 7871.78,
"embroidered" : 7871.78,
"pasture" : 7871.78,
"pillar" : 7870.20,
"import" : 7867.83,
"founder" : 7862.29,
"torch" : 7862.29,
"vault" : 7862.29,
"worm" : 7862.29,
"ay" : 7859.92,
"bravery" : 7859.13,
"confinement" : 7853.59,
"trusting" : 7846.47,
"butler" : 7844.89,
"rattle" : 7844.89,
"transported" : 7844.89,
"estimation" : 7844.09,
"edit" : 7840.93,
"gotten" : 7839.35,
"cuts" : 7838.56,
"headlong" : 7836.97,
"outfit" : 7836.18,
"insolence" : 7829.85,
"secrecy" : 7829.85,
"thereupon" : 7820.36,
"unlucky" : 7817.20,
"eighth" : 7816.40,
"valour" : 7815.61,
"grammar" : 7814.03,
"relaxed" : 7812.45,
"mentions" : 7804.54,
"adjacent" : 7802.96,
"knives" : 7802.16,
"attacking" : 7801.37,
"exceptional" : 7801.37,
"recollections" : 7800.58,
"deposit" : 7794.25,
"establishing" : 7794.25,
"muddy" : 7794.25,
"arches" : 7793.46,
"aspects" : 7790.30,
"senior" : 7788.71,
"fragrance" : 7785.55,
"colonial" : 7784.76,
"penetrating" : 7783.18,
"refinement" : 7779.22,
"te" : 7776.85,
"yacht" : 7776.85,
"intelligible" : 7776.06,
"stray" : 7772.89,
"forcibly" : 7771.31,
"jenny" : 7771.31,
"superficial" : 7771.31,
"tends" : 7767.35,
"identified" : 7766.56,
"wan" : 7766.56,
"choosing" : 7764.19,
"frighten" : 7762.61,
"grotesque" : 7762.61,
"reprinted" : 7761.82,
"tutor" : 7761.82,
"contributing" : 7761.03,
"welsh" : 7757.07,
"gaiety" : 7756.28,
"besieged" : 7753.90,
"robbery" : 7753.11,
"transmitted" : 7753.11,
"swam" : 7749.95,
"consequential" : 7746.78,
"slid" : 7743.62,
"stony" : 7742.83,
"donald" : 7741.25,
"gratify" : 7741.25,
"heavier" : 7741.25,
"confidently" : 7740.46,
"mabel" : 7739.66,
"demon" : 7734.92,
"treatise" : 7734.92,
"mechanically" : 7732.54,
"batteries" : 7728.59,
"trading" : 7728.59,
"cock" : 7727.80,
"pilgrimage" : 7727.80,
"extinct" : 7726.22,
"idleness" : 7725.42,
"sicily" : 7724.63,
"merrily" : 7723.84,
"excursion" : 7721.47,
"handling" : 7719.89,
"utah" : 7719.89,
"eminence" : 7718.30,
"lump" : 7714.35,
"boyhood" : 7713.56,
"montana" : 7713.56,
"superfluous" : 7713.56,
"wee" : 7711.97,
"dome" : 7709.60,
"shivering" : 7708.81,
"accidental" : 7708.02,
"thickness" : 7708.02,
"darwin" : 7706.44,
"continuance" : 7704.06,
"fixing" : 7703.27,
"harris" : 7703.27,
"rustic" : 7703.27,
"cheered" : 7697.73,
"vernon" : 7696.94,
"premises" : 7694.57,
"delivery" : 7687.45,
"nodding" : 7687.45,
"snowy" : 7681.91,
"curved" : 7680.33,
"productive" : 7679.54,
"discouraged" : 7677.96,
"variations" : 7677.16,
"shilling" : 7674.79,
"swollen" : 7674.79,
"miraculous" : 7673.21,
"stubborn" : 7673.21,
"belgium" : 7669.25,
"drives" : 7668.46,
"jerome" : 7667.67,
"orchard" : 7666.88,
"persuasion" : 7666.88,
"invaded" : 7666.09,
"alps" : 7661.34,
"ungrateful" : 7658.97,
"insensible" : 7658.18,
"muscle" : 7655.80,
"madrid" : 7655.01,
"flanders" : 7654.22,
"cultivate" : 7652.64,
"involuntarily" : 7652.64,
"speedy" : 7651.06,
"variation" : 7649.48,
"marian" : 7648.68,
"harp" : 7647.89,
"peaks" : 7643.94,
"daybreak" : 7642.35,
"magnitude" : 7642.35,
"precautions" : 7640.77,
"rub" : 7640.77,
"requiring" : 7638.40,
"coral" : 7636.03,
"grapes" : 7634.44,
"fairest" : 7628.91,
"locality" : 7628.91,
"opponent" : 7622.58,
"bondage" : 7621.79,
"beans" : 7620.99,
"cowardly" : 7619.41,
"grandson" : 7614.67,
"leo" : 7612.29,
"gertrude" : 7605.17,
"nail" : 7605.17,
"protecting" : 7604.38,
"hospitable" : 7603.59,
"proving" : 7603.59,
"benevolence" : 7594.89,
"brussels" : 7594.89,
"civilisation" : 7594.89,
"mounting" : 7591.72,
"desiring" : 7590.93,
"rushes" : 7588.56,
"precision" : 7587.77,
"watchful" : 7586.18,
"harness" : 7584.60,
"perchance" : 7584.60,
"forbade" : 7579.06,
"channels" : 7577.48,
"indication" : 7576.69,
"zealous" : 7576.69,
"tact" : 7574.32,
"seventeenth" : 7567.99,
"theodore" : 7565.61,
"stating" : 7564.82,
"toast" : 7564.03,
"dreadfully" : 7562.45,
"judith" : 7561.66,
"asterisk" : 7560.08,
"virgil" : 7559.29,
"edifice" : 7556.12,
"swelled" : 7556.12,
"accomplishment" : 7555.33,
"sundry" : 7550.58,
"reckoning" : 7548.21,
"mouse" : 7544.25,
"prostrate" : 7544.25,
"helm" : 7541.09,
"slim" : 7541.09,
"whistling" : 7537.93,
"syllable" : 7537.13,
"handwriting" : 7536.34,
"commissioners" : 7535.55,
"lime" : 7535.55,
"spur" : 7534.76,
"unfit" : 7532.39,
"gen." : 7531.60,
"relish" : 7529.22,
"reduction" : 7526.06,
"sown" : 7526.06,
"venetian" : 7525.27,
"cordially" : 7521.31,
"hush" : 7520.52,
"breasts" : 7515.77,
"slipping" : 7514.98,
"pat" : 7513.40,
"arabian" : 7512.61,
"dialogue" : 7511.82,
"forwards" : 7511.82,
"entreat" : 7511.03,
"fascination" : 7510.24,
"belly" : 7509.44,
"neutral" : 7509.44,
"grasping" : 7507.86,
"diligence" : 7505.49,
"disgusted" : 7504.70,
"retiring" : 7503.12,
"strokes" : 7500.74,
"sob" : 7497.58,
"vine" : 7496.00,
"compose" : 7495.20,
"valentine" : 7492.04,
"harvey" : 7489.67,
"icy" : 7488.08,
"inconvenience" : 7488.08,
"v" : 7483.34,
"pots" : 7482.55,
"dimensions" : 7480.96,
"abused" : 7479.38,
"armor" : 7478.59,
"detect" : 7478.59,
"contradiction" : 7473.84,
"banker" : 7468.31,
"infamous" : 7463.56,
"powerless" : 7461.19,
"passenger" : 7458.81,
"crust" : 7456.44,
"historians" : 7455.65,
"disclaim" : 7453.27,
"norway" : 7451.69,
"peculiarities" : 7450.90,
"sting" : 7450.90,
"simultaneously" : 7445.36,
"watches" : 7445.36,
"cong." : 7444.57,
"episode" : 7443.78,
"achieve" : 7439.82,
"populace" : 7439.03,
"sherman" : 7439.03,
"incense" : 7438.24,
"rebecca" : 7436.66,
"jordan" : 7435.08,
"persistent" : 7435.08,
"wisconsin" : 7435.08,
"ho" : 7428.75,
"ta" : 7428.75,
"fruitful" : 7427.17,
"scoundrel" : 7427.17,
"coasts" : 7424.00,
"starve" : 7419.26,
"denmark" : 7415.30,
"scots" : 7415.30,
"consultation" : 7414.51,
"habitation" : 7410.55,
"goat" : 7406.60,
"howling" : 7406.60,
"tailor" : 7406.60,
"flourish" : 7401.85,
"trifles" : 7394.73,
"dashing" : 7393.94,
"disappearance" : 7393.15,
"sour" : 7393.15,
"practicable" : 7390.77,
"shameful" : 7389.19,
"inviting" : 7386.03,
"criminals" : 7383.65,
"leisurely" : 7383.65,
"accumulated" : 7382.07,
"audible" : 7380.49,
"topics" : 7380.49,
"expends" : 7378.91,
"radiance" : 7377.32,
"underline" : 7375.74,
"parade" : 7374.95,
"spoils" : 7374.95,
"helmet" : 7365.46,
"consternation" : 7364.67,
"expenditures" : 7364.67,
"impose" : 7363.88,
"originator" : 7363.08,
"pa" : 7362.29,
"unequal" : 7362.29,
"wooded" : 7356.76,
"enduring" : 7352.01,
"ox" : 7349.64,
"valet" : 7349.64,
"proclaim" : 7348.05,
"carl" : 7346.47,
"impossibility" : 7346.47,
"lydia" : 7344.10,
"territories" : 7342.51,
"deference" : 7340.93,
"ravine" : 7340.93,
"geoffrey" : 7339.35,
"blanche" : 7336.98,
"accommodation" : 7333.02,
"boyish" : 7331.44,
"spray" : 7329.07,
"theological" : 7328.27,
"anonymous" : 7327.48,
"injurious" : 7326.69,
"formally" : 7324.32,
"sports" : 7324.32,
"ab" : 7322.74,
"scales" : 7322.74,
"wyoming" : 7321.95,
"discontinue" : 7321.15,
"calf" : 7319.57,
"manual" : 7318.78,
"disturbing" : 7317.99,
"potent" : 7317.20,
"anticipation" : 7316.41,
"melt" : 7314.83,
"tilde" : 7314.83,
"thames" : 7314.03,
"grade" : 7312.45,
"mischievous" : 7310.87,
"pang" : 7310.87,
"pathos" : 7308.50,
"alternately" : 7306.12,
"brisk" : 7305.33,
"stool" : 7304.54,
"justification" : 7299.79,
"foreigner" : 7298.21,
"endeavouring" : 7297.42,
"satire" : 7297.42,
"al" : 7295.84,
"delete" : 7294.26,
"masculine" : 7293.46,
"spies" : 7291.88,
"umbrella" : 7284.76,
"transportation" : 7283.18,
"yell" : 7281.60,
"remnant" : 7280.81,
"boot" : 7279.22,
"ignored" : 7276.06,
"thrilling" : 7276.06,
"ale" : 7270.52,
"mineral" : 7265.77,
"goose" : 7263.40,
"nebraska" : 7261.82,
"truce" : 7261.82,
"lastly" : 7260.24,
"airy" : 7254.70,
"sketches" : 7254.70,
"groves" : 7253.91,
"col." : 7253.12,
"11th" : 7250.74,
"comprehension" : 7250.74,
"cling" : 7247.58,
"duck" : 7247.58,
"abyss" : 7246.79,
"alaska" : 7246.79,
"baffled" : 7246.79,
"planning" : 7246.00,
"abominable" : 7235.71,
"aversion" : 7235.71,
"drawings" : 7234.13,
"customers" : 7233.34,
"weird" : 7230.96,
"stewart" : 7230.17,
"traveled" : 7230.17,
"alan" : 7228.59,
"incessantly" : 7226.22,
"flattery" : 7223.84,
"director" : 7221.47,
"improbable" : 7221.47,
"moderation" : 7219.89,
"awakening" : 7219.10,
"males" : 7219.10,
"pairs" : 7218.31,
"temporal" : 7217.52,
"con" : 7215.93,
"nicely" : 7215.93,
"lapse" : 7212.77,
"vitality" : 7211.98,
"soap" : 7208.02,
"patriot" : 7207.23,
"malicious" : 7206.44,
"eyed" : 7205.65,
"pirates" : 7205.65,
"enforce" : 7203.28,
"doll" : 7199.32,
"briskly" : 7195.36,
"sez" : 7191.41,
"skeleton" : 7189.83,
"comprehensive" : 7188.24,
"buttons" : 7187.45,
"crushing" : 7186.66,
"personages" : 7185.08,
"threaten" : 7184.29,
"nuts" : 7182.71,
"undone" : 7181.12,
"wright" : 7181.12,
"frankness" : 7179.54,
"hides" : 7177.96,
"progressive" : 7170.05,
"rogers" : 7170.05,
"villa" : 7163.72,
"aristotle" : 7160.55,
"resource" : 7160.55,
"irs" : 7155.81,
"confine" : 7154.22,
"sewing" : 7154.22,
"co" : 7148.69,
"congratulate" : 7144.73,
"walt" : 7141.57,
"reconcile" : 7139.19,
"insurance" : 7138.40,
"terminated" : 7137.61,
"dusky" : 7134.45,
"appoint" : 7133.66,
"pearl" : 7132.86,
"thrilled" : 7131.28,
"gains" : 7127.33,
"interrupt" : 7122.58,
"extravagance" : 7121.79,
"jokes" : 7121.79,
"suppress" : 7121.79,
"quod" : 7121.00,
"signify" : 7120.21,
"layer" : 7117.04,
"clue" : 7116.25,
"kettle" : 7115.46,
"contemplate" : 7113.09,
"aforesaid" : 7111.50,
"tooth" : 7109.13,
"sensibility" : 7106.76,
"boldness" : 7105.97,
"mature" : 7105.17,
"cuba" : 7098.05,
"tolerable" : 7096.47,
"rabbit" : 7095.68,
"befallen" : 7092.52,
"needless" : 7092.52,
"yankee" : 7091.73,
"awaken" : 7083.02,
"clasp" : 7083.02,
"lets" : 7080.65,
"blinded" : 7079.07,
"conductor" : 7078.28,
"dependence" : 7077.48,
"guarantee" : 7076.69,
"affectionately" : 7073.53,
"player" : 7072.74,
"wires" : 7072.74,
"thicket" : 7066.41,
"walker" : 7062.45,
"outstretched" : 7061.66,
"procedure" : 7061.66,
"wheeled" : 7060.87,
"aye" : 7059.29,
"oneself" : 7056.12,
"recommendation" : 7055.33,
"projecting" : 7054.54,
"shriek" : 7052.96,
"futile" : 7052.17,
"cheerfulness" : 7051.38,
"deity" : 7051.38,
"fifteenth" : 7045.84,
"gap" : 7045.05,
"muscular" : 7045.05,
"dripping" : 7044.26,
"insect" : 7041.88,
"values" : 7039.51,
"brooding" : 7038.72,
"restaurant" : 7037.14,
"baptism" : 7036.35,
"imaginative" : 7036.35,
"rhyme" : 7033.18,
"exhaustion" : 7031.60,
"intrigue" : 7031.60,
"senseless" : 7031.60,
"hercules" : 7030.81,
"yearly" : 7030.81,
"baron" : 7028.43,
"occupying" : 7026.85,
"imply" : 7022.90,
"absurdity" : 7020.52,
"launched" : 7020.52,
"resolutely" : 7015.78,
"vowed" : 7014.99,
"attach" : 7011.82,
"characterized" : 7010.24,
"fellowship" : 7010.24,
"posture" : 7006.28,
"caps" : 7005.49,
"leon" : 7004.70,
"demanding" : 7003.12,
"owl" : 7002.33,
"beset" : 7001.54,
"ensuring" : 7001.54,
"suite" : 6997.58,
"tennyson" : 6996.79,
"thereto" : 6996.00,
"heaped" : 6992.04,
"jewel" : 6992.04,
"regained" : 6991.25,
"voluntarily" : 6984.92,
"longitude" : 6977.01,
"permanently" : 6976.22,
"jumping" : 6974.64,
"babe" : 6973.85,
"secondly" : 6973.06,
"violin" : 6971.47,
"rogue" : 6969.10,
"rainy" : 6968.31,
"reconciliation" : 6968.31,
"emotional" : 6967.52,
"radical" : 6962.77,
"accursed" : 6958.81,
"tendencies" : 6958.02,
"concrete" : 6957.23,
"resident" : 6956.44,
"lustre" : 6954.86,
"hull" : 6954.07,
"ominous" : 6953.28,
"overboard" : 6952.49,
"uproar" : 6951.69,
"cavern" : 6950.90,
"combine" : 6950.11,
"respectively" : 6950.11,
"menace" : 6946.95,
"pilgrims" : 6945.37,
"jeff" : 6942.99,
"peak" : 6942.20,
"currency" : 6941.41,
"silken" : 6941.41,
"violet" : 6939.83,
"khan" : 6937.45,
"mastery" : 6937.45,
"objective" : 6929.54,
"plucked" : 6927.17,
"litter" : 6926.38,
"memorial" : 6925.59,
"bids" : 6923.21,
"fondly" : 6923.21,
"clapped" : 6920.84,
"tariff" : 6917.68,
"beneficial" : 6916.88,
"unsolicited" : 6916.88,
"reluctant" : 6914.51,
"separately" : 6906.60,
"patronage" : 6905.81,
"revenues" : 6904.23,
"dragon" : 6903.44,
"zeus" : 6901.85,
"mike" : 6899.48,
"ranges" : 6897.90,
"vexation" : 6897.11,
"indicates" : 6896.32,
"overheard" : 6895.52,
"tray" : 6894.73,
"raymond" : 6891.57,
"thereafter" : 6890.78,
"exporting" : 6889.99,
"mound" : 6889.99,
"taxation" : 6886.82,
"frenzy" : 6884.45,
"horizontal" : 6881.28,
"thirsty" : 6880.49,
"disputed" : 6879.70,
"charter" : 6876.54,
"redistribution" : 6876.54,
"boasted" : 6875.75,
"item" : 6875.75,
"moscow" : 6873.37,
"termination" : 6872.58,
"eminently" : 6871.79,
"suggestive" : 6871.00,
"linger" : 6869.42,
"shady" : 6868.63,
"calculation" : 6867.04,
"expansion" : 6864.67,
"mast" : 6859.92,
"confer" : 6859.13,
"sophia" : 6859.13,
"commanders" : 6853.59,
"pitied" : 6852.01,
"twist" : 6852.01,
"traditional" : 6851.22,
"involve" : 6850.43,
"interfered" : 6848.06,
"achilles" : 6846.47,
"wanton" : 6846.47,
"repay" : 6845.68,
"brother-in-law" : 6844.89,
"routine" : 6844.89,
"son-in-law" : 6842.52,
"gaul" : 6841.73,
"groom" : 6840.14,
"solve" : 6840.14,
"grassy" : 6836.98,
"tempt" : 6836.19,
"unsuccessful" : 6836.19,
"witty" : 6836.19,
"politician" : 6834.61,
"yearning" : 6834.61,
"lid" : 6833.02,
"noticing" : 6833.02,
"courtiers" : 6831.44,
"cheering" : 6829.86,
"bounty" : 6828.28,
"consequent" : 6826.70,
"renown" : 6824.32,
"regulation" : 6823.53,
"fowl" : 6820.37,
"mayor" : 6818.78,
"wrinkled" : 6817.99,
"defy" : 6817.20,
"threads" : 6817.20,
"violation" : 6817.20,
"junction" : 6816.41,
"boss" : 6814.83,
"particles" : 6814.04,
"glories" : 6810.87,
"signifies" : 6810.08,
"constrained" : 6806.92,
"paternal" : 6806.92,
"piles" : 6805.33,
"hardware" : 6804.54,
"engaging" : 6803.75,
"e.g." : 6802.17,
"peer" : 6802.17,
"counties" : 6801.38,
"mocking" : 6801.38,
"ch." : 6799.01,
"avoiding" : 6798.21,
"rebuke" : 6796.63,
"abolished" : 6793.47,
"cheers" : 6792.68,
"idiot" : 6791.09,
"3rd" : 6790.30,
"morbid" : 6790.30,
"wrung" : 6787.93,
"e-mail" : 6787.14,
"outcome" : 6782.39,
"gilt" : 6774.48,
"coldness" : 6768.94,
"applying" : 6768.15,
"strand" : 6761.82,
"renowned" : 6760.24,
"fishermen" : 6757.08,
"creative" : 6755.49,
"circus" : 6753.12,
"moustache" : 6753.12,
"proverb" : 6750.75,
"lowering" : 6746.79,
"biggest" : 6746.00,
"sly" : 6742.83,
"nursing" : 6741.25,
"boon" : 6739.67,
"weighing" : 6738.09,
"oklahoma" : 6735.71,
"brink" : 6734.13,
"degraded" : 6734.13,
"avenge" : 6731.76,
"hum" : 6730.97,
"minority" : 6730.97,
"spaniard" : 6730.97,
"ridges" : 6729.39,
"perils" : 6727.80,
"larry" : 6725.43,
"merchandise" : 6723.06,
"aloof" : 6721.47,
"despairing" : 6720.68,
"acquisition" : 6719.10,
"asylum" : 6718.31,
"chickens" : 6718.31,
"placid" : 6718.31,
"affirm" : 6715.94,
"trod" : 6715.94,
"gardener" : 6711.98,
"schedule" : 6711.19,
"calmness" : 6710.40,
"protector" : 6710.40,
"concealment" : 6707.23,
"trench" : 6704.86,
"fore" : 6699.32,
"accession" : 6689.83,
"h" : 6688.25,
"dey" : 6685.87,
"connexion" : 6685.08,
"cairo" : 6684.29,
"mend" : 6681.92,
"considers" : 6677.17,
"twenty-one" : 6677.17,
"municipal" : 6675.59,
"achievements" : 6674.80,
"cherish" : 6674.80,
"deserving" : 6674.80,
"exert" : 6672.42,
"riot" : 6672.42,
"veteran" : 6672.42,
"advancement" : 6670.05,
"inventor" : 6666.89,
"meek" : 6666.09,
"cameron" : 6662.93,
"hopelessly" : 6661.35,
"judicious" : 6661.35,
"tending" : 6658.18,
"testify" : 6657.39,
"governess" : 6656.60,
"orchestra" : 6655.81,
"garb" : 6655.02,
"condemnation" : 6653.44,
"foregoing" : 6652.65,
"bacon" : 6649.48,
"maternal" : 6648.69,
"wasting" : 6648.69,
"australian" : 6645.53,
"strata" : 6645.53,
"hushed" : 6644.73,
"maryland" : 6644.73,
"sculpture" : 6644.73,
"miniature" : 6640.78,
"corrections" : 6639.99,
"tangled" : 6638.41,
"completion" : 6631.28,
"regulated" : 6631.28,
"athenian" : 6629.70,
"flavor" : 6628.12,
"brand" : 6627.33,
"intimately" : 6625.75,
"unlimited" : 6625.75,
"dipped" : 6622.58,
"luggage" : 6621.79,
"inconsistent" : 6621.00,
"forsaken" : 6619.42,
"feebly" : 6618.63,
"woven" : 6617.84,
"lloyd" : 6617.04,
"rubbish" : 6617.04,
"tool" : 6617.04,
"spirited" : 6615.46,
"christendom" : 6614.67,
"chaos" : 6610.72,
"twinkling" : 6610.72,
"muffled" : 6609.13,
"accents" : 6607.55,
"accidentally" : 6603.60,
"degradation" : 6599.64,
"emancipation" : 6598.06,
"prosecution" : 6596.48,
"cleveland" : 6595.68,
"outbreak" : 6594.89,
"defending" : 6593.31,
"dwarf" : 6592.52,
"abundantly" : 6590.15,
"turner" : 6590.15,
"disadvantage" : 6586.19,
"abolition" : 6585.40,
"disregard" : 6585.40,
"deliberation" : 6584.61,
"filthy" : 6583.82,
"ak" : 6579.07,
"notifies" : 6577.49,
"dealings" : 6576.70,
"demonstrated" : 6576.70,
"paced" : 6575.91,
"tense" : 6575.91,
"drums" : 6573.53,
"interpreter" : 6573.53,
"vanish" : 6572.74,
"astray" : 6571.16,
"hen" : 6569.58,
"workman" : 6569.58,
"asunder" : 6566.41,
"baked" : 6566.41,
"baltimore" : 6566.41,
"bustle" : 6565.62,
"winged" : 6565.62,
"mentioning" : 6564.04,
"pastoral" : 6564.04,
"fabric" : 6563.25,
"trim" : 6563.25,
"musician" : 6558.50,
"twenty-two" : 6558.50,
"patty" : 6556.92,
"mentally" : 6553.75,
"wrecked" : 6553.75,
"discreet" : 6552.96,
"godfrey" : 6552.96,
"apostle" : 6552.17,
"ledge" : 6549.80,
"roast" : 6549.80,
"accessed" : 6547.42,
"preface" : 6546.63,
"convincing" : 6542.68,
"quiver" : 6537.93,
"stocks" : 6537.93,
"mourn" : 6534.77,
"commented" : 6533.98,
"redistribute" : 6532.39,
"precipice" : 6528.44,
"outdated" : 6527.65,
"juliet" : 6526.86,
"dialect" : 6526.06,
"elementary" : 6525.27,
"freight" : 6525.27,
"cowardice" : 6522.90,
"wipe" : 6522.90,
"deserts" : 6519.74,
"shelves" : 6517.36,
"denial" : 6514.20,
"1b" : 6504.70,
"traits" : 6503.91,
"denounced" : 6503.12,
"eric" : 6503.12,
"underground" : 6499.17,
"phantom" : 6498.37,
"whirling" : 6498.37,
"pecuniary" : 6494.42,
"dire" : 6493.63,
"hostilities" : 6493.63,
"gait" : 6492.84,
"it'll" : 6492.05,
"vividly" : 6490.46,
"instruct" : 6488.88,
"dickens" : 6486.51,
"puritan" : 6486.51,
"clutched" : 6484.93,
"acknowledgment" : 6484.13,
"conjunction" : 6481.76,
"oppressive" : 6480.97,
"intermediate" : 6480.18,
"formula" : 6478.60,
"hungary" : 6477.01,
"sneer" : 6469.10,
"ore" : 6468.31,
"plentiful" : 6468.31,
"plump" : 6467.52,
"combinations" : 6464.36,
"purest" : 6463.56,
"cheat" : 6462.77,
"doubly" : 6462.77,
"inadequate" : 6461.19,
"leslie" : 6459.61,
"blest" : 6458.82,
"forbear" : 6457.24,
"haunt" : 6454.07,
"treaties" : 6454.07,
"fearless" : 6453.28,
"constable" : 6452.49,
"enveloped" : 6450.91,
"enmity" : 6449.32,
"watson" : 6447.74,
"bridegroom" : 6446.16,
"curate" : 6445.37,
"developing" : 6445.37,
"frock" : 6442.99,
"mining" : 6439.83,
"audacity" : 6436.67,
"improper" : 6434.29,
"motto" : 6432.71,
"parisian" : 6431.92,
"faction" : 6424.80,
"architect" : 6422.43,
"melting" : 6421.63,
"delicately" : 6420.05,
"register" : 6419.26,
"heroine" : 6416.89,
"indefinite" : 6412.14,
"console" : 6408.19,
"defensive" : 6408.19,
"perceptible" : 6406.60,
"fruitless" : 6405.81,
"ransom" : 6401.06,
"surplus" : 6398.69,
"solicitude" : 6396.32,
"effectual" : 6393.15,
"shiver" : 6387.62,
"gal" : 6386.82,
"wed" : 6386.03,
"contemptuous" : 6385.24,
"plough" : 6382.87,
"snakes" : 6381.29,
"felicity" : 6380.50,
"reef" : 6380.50,
"outset" : 6379.70,
"constitutes" : 6378.91,
"lament" : 6378.91,
"tissue" : 6378.12,
"draft" : 6377.33,
"impelled" : 6376.54,
"epic" : 6374.96,
"fisherman" : 6369.42,
"hawaii" : 6368.63,
"obstinacy" : 6367.84,
"ulysses" : 6367.84,
"lemon" : 6367.05,
"voltaire" : 6365.46,
"hound" : 6364.67,
"measuring" : 6361.51,
"conscientious" : 6358.34,
"robber" : 6355.97,
"toy" : 6355.97,
"impart" : 6355.18,
"statute" : 6353.60,
"barry" : 6352.81,
"girdle" : 6352.01,
"basil" : 6348.06,
"rebellious" : 6348.06,
"stair" : 6346.48,
"biting" : 6344.89,
"consulting" : 6344.89,
"perseverance" : 6344.89,
"manila" : 6340.94,
"massacre" : 6339.36,
"cough" : 6338.57,
"blazed" : 6337.77,
"claude" : 6337.77,
"transition" : 6337.77,
"button" : 6334.61,
"headache" : 6332.24,
"tenant" : 6331.44,
"burns" : 6330.65,
"harmonious" : 6329.86,
"dreamy" : 6325.91,
"burgundy" : 6324.32,
"collections" : 6321.95,
"unkind" : 6321.16,
"inscribed" : 6319.58,
"cushions" : 6318.79,
"programme" : 6317.20,
"din" : 6316.41,
"laborious" : 6316.41,
"manufacturing" : 6315.62,
"markets" : 6312.46,
"zone" : 6308.50,
"humane" : 6306.92,
"ac" : 6306.13,
"fertility" : 6305.34,
"languid" : 6305.34,
"ninth" : 6304.55,
"curses" : 6303.76,
"introducing" : 6302.96,
"alcohol" : 6300.59,
"impending" : 6299.01,
"declining" : 6297.43,
"advantageous" : 6295.05,
"heal" : 6294.26,
"millennium" : 6294.26,
"karl" : 6293.47,
"ft." : 6291.10,
"staid" : 6289.51,
"planting" : 6287.93,
"theatrical" : 6287.93,
"spectator" : 6286.35,
"winchester" : 6283.19,
"greedy" : 6281.60,
"commonwealth" : 6280.81,
"suffrage" : 6280.81,
"tremulous" : 6280.81,
"commodities" : 6280.02,
"stuffed" : 6280.02,
"admitting" : 6275.27,
"aching" : 6273.69,
"ninety" : 6273.69,
"discomfort" : 6272.90,
"imperative" : 6272.90,
"montreal" : 6272.11,
"bobby" : 6271.32,
"bachelor" : 6269.74,
"geographical" : 6268.95,
"longest" : 6268.95,
"courageous" : 6266.57,
"carpenter" : 6259.45,
"sundays" : 6254.70,
"concluding" : 6253.12,
"danish" : 6252.33,
"steer" : 6251.54,
"influential" : 6249.96,
"surround" : 6249.96,
"random" : 6249.17,
"ounce" : 6248.38,
"afresh" : 6246.79,
"dictated" : 6239.67,
"ruddy" : 6239.67,
"rusty" : 6239.67,
"drown" : 6238.88,
"irving" : 6238.09,
"slide" : 6238.09,
"sow" : 6237.30,
"appalling" : 6236.51,
"profess" : 6234.93,
"sickly" : 6234.14,
"rides" : 6233.34,
"spoon" : 6233.34,
"imminent" : 6232.55,
"dominant" : 6230.97,
"leadership" : 6224.64,
"pinch" : 6223.06,
"wearily" : 6223.06,
"ducks" : 6222.27,
"diary" : 6219.10,
"duchess" : 6218.31,
"regain" : 6218.31,
"rum" : 6217.52,
"churchyard" : 6214.36,
"fondness" : 6214.36,
"apprehend" : 6212.77,
"ordinarily" : 6211.19,
"quicker" : 6211.19,
"thereon" : 6211.19,
"ni" : 6209.61,
"balloon" : 6208.03,
"individuality" : 6208.03,
"securely" : 6208.03,
"connecting" : 6207.24,
"celebrate" : 6206.45,
"bluff" : 6205.65,
"dawned" : 6205.65,
"amiss" : 6204.86,
"chalk" : 6203.28,
"sticking" : 6201.70,
"fuss" : 6200.91,
"dazed" : 6199.33,
"deputy" : 6199.33,
"forsake" : 6197.74,
"automobile" : 6196.95,
"discussions" : 6195.37,
"harrison" : 6195.37,
"refreshment" : 6195.37,
"amendment" : 6194.58,
"appealing" : 6189.04,
"eden" : 6189.04,
"vertical" : 6188.25,
"insufficient" : 6184.29,
"manchester" : 6182.71,
"hem" : 6179.55,
"gorge" : 6177.96,
"baptized" : 6175.59,
"damn" : 6174.01,
"silvery" : 6173.22,
"pastor" : 6171.64,
"inherent" : 6170.05,
"preventing" : 6169.26,
"inference" : 6168.47,
"advertisement" : 6167.68,
"mutton" : 6167.68,
"packing" : 6167.68,
"enclosure" : 6165.31,
"theft" : 6164.52,
"publisher" : 6162.93,
"spontaneous" : 6161.35,
"otto" : 6158.98,
"rats" : 6158.98,
"apparition" : 6158.19,
"refreshing" : 6158.19,
"irene" : 6156.60,
"sweetheart" : 6156.60,
"renounce" : 6155.02,
"lifeless" : 6154.23,
"adore" : 6153.44,
"vinegar" : 6149.48,
"normandy" : 6147.11,
"uncovered" : 6147.11,
"utility" : 6146.32,
"orphan" : 6144.74,
"symbols" : 6143.15,
"gracefully" : 6142.36,
"mightily" : 6142.36,
"peculiarity" : 6142.36,
"ash" : 6141.57,
"floods" : 6139.20,
"partake" : 6138.41,
"contemptible" : 6137.62,
"deities" : 6135.24,
"profane" : 6134.45,
"foreseen" : 6133.66,
"ti" : 6132.87,
"conceit" : 6132.08,
"commend" : 6129.71,
"twelfth" : 6129.71,
"bristol" : 6127.33,
"manifestation" : 6126.54,
"revive" : 6126.54,
"prone" : 6123.38,
"connect" : 6121.79,
"princely" : 6117.84,
"overtake" : 6117.05,
"improving" : 6115.47,
"downwards" : 6112.30,
"ferocious" : 6111.51,
"intervention" : 6110.72,
"subsistence" : 6109.14,
"susceptible" : 6109.14,
"tunnel" : 6109.14,
"disciple" : 6108.35,
"revival" : 6107.55,
"twins" : 6106.76,
"ivy" : 6105.97,
"puzzle" : 6103.60,
"citadel" : 6100.43,
"temporarily" : 6100.43,
"despotism" : 6099.64,
"internet" : 6099.64,
"mechanism" : 6098.85,
"stoop" : 6098.85,
"directors" : 6097.27,
"mathematics" : 6095.69,
"raft" : 6095.69,
"fade" : 6094.90,
"soothe" : 6093.31,
"pork" : 6092.52,
"substituted" : 6092.52,
"physically" : 6091.73,
"brilliancy" : 6086.98,
"dot" : 6086.98,
"loaf" : 6086.19,
"expanse" : 6079.86,
"shocking" : 6079.07,
"rudely" : 6075.12,
"isle" : 6074.33,
"balanced" : 6072.74,
"extracted" : 6071.95,
"fable" : 6071.95,
"matches" : 6071.95,
"index" : 6068.00,
"gerard" : 6066.41,
"cigars" : 6065.62,
"liver" : 6063.25,
"transmit" : 6063.25,
"dispatch" : 6061.67,
"onto" : 6056.13,
"veranda" : 6056.13,
"dip" : 6054.55,
"inexplicable" : 6052.97,
"liar" : 6052.17,
"diminish" : 6049.80,
"dungeon" : 6045.85,
"unit" : 6043.47,
"pagan" : 6042.68,
"phillips" : 6040.31,
"brig" : 6039.52,
"monopoly" : 6039.52,
"rim" : 6039.52,
"sordid" : 6031.60,
"complaining" : 6030.81,
"temperate" : 6030.81,
"chat" : 6030.02,
"gambling" : 6030.02,
"maps" : 6028.44,
"amber" : 6025.28,
"trot" : 6025.28,
"howl" : 6021.32,
"shipping" : 6021.32,
"ton" : 6021.32,
"magazines" : 6020.53,
"bricks" : 6017.36,
"submarine" : 6016.57,
"roberts" : 6015.78,
"cumberland" : 6012.62,
"cecil" : 6007.87,
"semblance" : 6007.08,
"palestine" : 6006.29,
"perpendicular" : 6005.50,
"regardless" : 6005.50,
"fervent" : 6004.71,
"sane" : 6004.71,
"wreath" : 6003.92,
"animation" : 6001.54,
"earthquake" : 5999.96,
"sloping" : 5998.38,
"smoothly" : 5998.38,
"tension" : 5998.38,
"intrigues" : 5996.00,
"fearfully" : 5995.21,
"macaulay" : 5994.42,
"laboratory" : 5992.05,
"cork" : 5991.26,
"comments" : 5986.51,
"whale" : 5986.51,
"wedded" : 5985.72,
"whiteness" : 5984.93,
"convicted" : 5984.14,
"deception" : 5982.55,
"paved" : 5982.55,
"scruple" : 5982.55,
"paintings" : 5981.76,
"therewith" : 5981.76,
"religions" : 5980.97,
"governing" : 5978.60,
"colleagues" : 5977.81,
"shrinking" : 5977.02,
"tickets" : 5975.43,
"prophetic" : 5974.64,
"undergo" : 5974.64,
"hare" : 5973.06,
"haze" : 5972.27,
"poisonous" : 5971.48,
"omit" : 5969.90,
"beware" : 5969.11,
"sagacity" : 5965.94,
"concession" : 5965.15,
"worker" : 5965.15,
"ted" : 5962.78,
"incline" : 5961.99,
"caste" : 5960.40,
"leapt" : 5960.40,
"dissatisfied" : 5955.66,
"hardest" : 5954.07,
"self-control" : 5954.07,
"toilet" : 5953.28,
"buddha" : 5951.70,
"offense" : 5951.70,
"woodland" : 5951.70,
"gentry" : 5950.91,
"starvation" : 5947.74,
"grudge" : 5946.95,
"penance" : 5946.16,
"tips" : 5946.16,
"rooted" : 5944.58,
"outburst" : 5943.00,
"fortitude" : 5939.83,
"turk" : 5939.04,
"devour" : 5937.46,
"malignant" : 5935.88,
"accorded" : 5932.71,
"brandon" : 5931.92,
"anticipate" : 5931.13,
"speechless" : 5931.13,
"inquisition" : 5929.55,
"eccentric" : 5927.97,
"anecdote" : 5927.18,
"annals" : 5927.18,
"scrutiny" : 5924.01,
"burroughs" : 5920.85,
"rhythm" : 5918.47,
"discord" : 5917.68,
"marius" : 5912.93,
"diversion" : 5908.98,
"archie" : 5905.02,
"rat" : 5905.02,
"knit" : 5902.65,
"correspond" : 5901.07,
"detain" : 5901.07,
"dis" : 5901.07,
"esp" : 5901.07,
"interpret" : 5900.28,
"vehement" : 5898.69,
"soda" : 5897.90,
"naughty" : 5894.74,
"salon" : 5893.16,
"operate" : 5890.78,
"idly" : 5889.99,
"imperious" : 5889.20,
"peru" : 5889.20,
"candid" : 5888.41,
"whig" : 5888.41,
"blooming" : 5886.83,
"wharf" : 5886.04,
"disgraceful" : 5883.66,
"stunned" : 5883.66,
"redemption" : 5881.29,
"drain" : 5878.12,
"wage" : 5878.12,
"cooper" : 5874.96,
"embassy" : 5874.96,
"unfinished" : 5874.17,
"nasty" : 5872.59,
"impetuous" : 5871.00,
"cemetery" : 5868.63,
"oblivion" : 5868.63,
"prohibited" : 5867.05,
"breeches" : 5866.26,
"abound" : 5864.68,
"christine" : 5860.72,
"frivolous" : 5855.97,
"hugo" : 5855.18,
"essays" : 5854.39,
"plaster" : 5852.81,
"tap" : 5850.44,
"chairman" : 5848.06,
"dismiss" : 5848.06,
"katherine" : 5848.06,
"provoke" : 5848.06,
"reside" : 5844.11,
"deficient" : 5842.52,
"decoration" : 5840.94,
"heroism" : 5840.15,
"toe" : 5840.15,
"wade" : 5839.36,
"apparel" : 5836.19,
"hazel" : 5836.19,
"inability" : 5836.19,
"farthest" : 5833.82,
"invent" : 5831.45,
"knave" : 5831.45,
"twain" : 5829.07,
"carelessness" : 5826.70,
"affectation" : 5822.75,
"connections" : 5821.16,
"climax" : 5820.37,
"avowed" : 5816.42,
"industries" : 5816.42,
"brood" : 5813.25,
"tempting" : 5812.46,
"define" : 5804.55,
"antwerp" : 5803.76,
"forefathers" : 5803.76,
"stretches" : 5802.18,
"gratifying" : 5801.38,
"plight" : 5800.59,
"restricted" : 5800.59,
"cupboard" : 5799.01,
"ludicrous" : 5798.22,
"alms" : 5797.43,
"colossal" : 5795.06,
"stupidity" : 5791.10,
"monotony" : 5790.31,
"stimulus" : 5790.31,
"vigilance" : 5788.73,
"digest" : 5784.77,
"vale" : 5784.77,
"overcoat" : 5783.19,
"colorado" : 5782.40,
"wink" : 5780.82,
"nous" : 5775.28,
"rack" : 5775.28,
"incomprehensible" : 5773.70,
"antagonist" : 5772.90,
"methinks" : 5767.37,
"barley" : 5764.20,
"plateau" : 5758.66,
"superintendent" : 5754.71,
"indescribable" : 5744.42,
"expanded" : 5743.63,
"presentation" : 5742.84,
"archbishop" : 5742.05,
"devise" : 5740.47,
"rubber" : 5738.89,
"adieu" : 5738.09,
"exclude" : 5737.30,
"carts" : 5736.51,
"lone" : 5734.93,
"whisky" : 5734.14,
"abuses" : 5732.56,
"inflict" : 5730.97,
"nightfall" : 5730.97,
"counts" : 5730.18,
"chocolate" : 5728.60,
"privileged" : 5728.60,
"hermit" : 5727.81,
"exultation" : 5727.02,
"overtook" : 5727.02,
"coincidence" : 5726.23,
"scratch" : 5726.23,
"screw" : 5723.85,
"caravan" : 5723.06,
"divert" : 5719.90,
"eliza" : 5719.90,
"comparing" : 5717.52,
"hood" : 5716.73,
"explore" : 5715.15,
"glove" : 5715.15,
"chaste" : 5713.57,
"whirl" : 5713.57,
"adventurous" : 5707.24,
"skipper" : 5703.28,
"tiresome" : 5702.49,
"implements" : 5701.70,
"recompense" : 5701.70,
"plank" : 5700.91,
"insure" : 5696.96,
"laboured" : 5696.16,
"exaggeration" : 5691.42,
"mi" : 5689.83,
"shepherds" : 5689.04,
"lilies" : 5688.25,
"ballad" : 5685.88,
"befall" : 5683.51,
"cylinder" : 5682.71,
"teddy" : 5676.39,
"summary" : 5671.64,
"daresay" : 5669.27,
"photographs" : 5669.27,
"colleges" : 5664.52,
"dissolution" : 5664.52,
"geneva" : 5662.94,
"marches" : 5662.15,
"instituted" : 5655.02,
"seals" : 5655.02,
"vehemence" : 5654.23,
"chaplain" : 5653.44,
"knots" : 5653.44,
"wail" : 5650.28,
"kneel" : 5647.11,
"unlikely" : 5644.74,
"deceit" : 5643.95,
"challenged" : 5640.78,
"geography" : 5639.20,
"herald" : 5637.62,
"lowly" : 5636.83,
"peep" : 5636.83,
"swarm" : 5636.04,
"clarke" : 5633.66,
"joyfully" : 5633.66,
"engraved" : 5632.08,
"ll" : 5632.08,
"bowels" : 5631.29,
"purposely" : 5629.71,
"blindness" : 5628.92,
"systematic" : 5626.54,
"virtually" : 5624.96,
"conformity" : 5621.80,
"remedies" : 5617.84,
"maxim" : 5617.05,
"indexes" : 5613.89,
"marshall" : 5613.89,
"baking" : 5613.09,
"invincible" : 5612.30,
"impertinent" : 5611.51,
"bust" : 5609.93,
"visage" : 5609.93,
"intuition" : 5609.14,
"mingle" : 5609.14,
"bathing" : 5607.56,
"arched" : 5606.77,
"investment" : 5605.97,
"tabernacle" : 5604.39,
"86" : 5601.23,
"client" : 5601.23,
"ghostly" : 5601.23,
"furs" : 5600.44,
"catalogue" : 5598.85,
"dock" : 5598.06,
"tenor" : 5598.06,
"arouse" : 5597.27,
"verbal" : 5592.53,
"excessively" : 5590.94,
"brazil" : 5588.57,
"strenuous" : 5587.78,
"irishman" : 5585.41,
"recess" : 5582.24,
"unclean" : 5581.45,
"psalms" : 5580.66,
"analogy" : 5579.08,
"chemistry" : 5579.08,
"peninsula" : 5579.08,
"infer" : 5578.28,
"maritime" : 5577.49,
"secular" : 5576.70,
"hawk" : 5574.33,
"rein" : 5573.54,
"averted" : 5572.75,
"bake" : 5572.75,
"constantine" : 5571.96,
"oracle" : 5571.96,
"alley" : 5568.00,
"softness" : 5568.00,
"pierce" : 5565.63,
"spinning" : 5564.84,
"snatch" : 5563.25,
"manufactured" : 5561.67,
"launch" : 5560.88,
"psychology" : 5560.88,
"worms" : 5560.88,
"regulate" : 5560.09,
"farming" : 5557.72,
"fasten" : 5556.92,
"actress" : 5553.76,
"etiquette" : 5551.39,
"theater" : 5551.39,
"thanksgiving" : 5550.60,
"valor" : 5549.01,
"untouched" : 5548.22,
"tactics" : 5547.43,
"drug" : 5546.64,
"adverse" : 5545.06,
"gaunt" : 5544.27,
"conducting" : 5542.68,
"veritable" : 5541.10,
"overtaken" : 5539.52,
"distorted" : 5538.73,
"rosa" : 5538.73,
"nina" : 5537.94,
"quart" : 5537.94,
"caprice" : 5536.35,
"candy" : 5531.61,
"obliging" : 5525.28,
"planets" : 5525.28,
"soothed" : 5524.49,
"sic" : 5523.70,
"opium" : 5520.53,
"pavilion" : 5520.53,
"strait" : 5518.16,
"sanguine" : 5516.58,
"cords" : 5512.62,
"odour" : 5512.62,
"trout" : 5510.25,
"paste" : 5509.46,
"regularity" : 5504.71,
"metallic" : 5497.59,
"scrap" : 5496.80,
"convict" : 5495.22,
"instructive" : 5494.42,
"investigate" : 5492.05,
"celtic" : 5490.47,
"package" : 5488.10,
"pirate" : 5486.51,
"fiend" : 5484.93,
"moan" : 5484.93,
"revealing" : 5484.93,
"trades" : 5483.35,
"rounds" : 5481.77,
"accomplishments" : 5479.39,
"crawl" : 5477.81,
"aft" : 5476.23,
"prevalent" : 5473.86,
"role" : 5473.86,
"dose" : 5471.48,
"evans" : 5471.48,
"hypocrisy" : 5470.69,
"l" : 5469.90,
"salmon" : 5468.32,
"snap" : 5468.32,
"alma" : 5465.94,
"magical" : 5464.36,
"tire" : 5463.57,
"hetty" : 5462.78,
"impenetrable" : 5462.78,
"geese" : 5461.99,
"madly" : 5460.41,
"manifold" : 5460.41,
"noticeable" : 5460.41,
"pudding" : 5460.41,
"volcanic" : 5459.61,
"locke" : 5458.82,
"magnetic" : 5458.82,
"deals" : 5458.03,
"core" : 5456.45,
"decency" : 5455.66,
"observance" : 5455.66,
"durst" : 5448.54,
"scratched" : 5448.54,
"predecessor" : 5446.96,
"diplomacy" : 5446.17,
"wert" : 5446.17,
"impartial" : 5444.58,
"disinterested" : 5440.63,
"wig" : 5440.63,
"pump" : 5439.05,
"swedish" : 5437.46,
"norfolk" : 5436.67,
"reigns" : 5433.51,
"similarly" : 5432.72,
"reap" : 5431.13,
"dam" : 5430.34,
"facilities" : 5430.34,
"slippery" : 5430.34,
"transformation" : 5427.97,
"oxygen" : 5427.18,
"suburbs" : 5427.18,
"dares" : 5426.39,
"ornamental" : 5425.60,
"pondered" : 5424.80,
"fringe" : 5423.22,
"raiment" : 5421.64,
"henrietta" : 5420.85,
"wellington" : 5420.85,
"foreman" : 5419.27,
"feat" : 5418.48,
"thirteenth" : 5418.48,
"sultan" : 5416.89,
"certificate" : 5416.10,
"rue" : 5415.31,
"heresy" : 5413.73,
"arabia" : 5410.56,
"medal" : 5409.77,
"location" : 5405.03,
"ether" : 5404.24,
"ruby" : 5401.86,
"heading" : 5396.32,
"subdue" : 5394.74,
"adorn" : 5391.58,
"ancestor" : 5391.58,
"warmer" : 5391.58,
"cluster" : 5389.99,
"quotation" : 5389.99,
"fullest" : 5389.20,
"exposition" : 5388.41,
"custody" : 5386.04,
"thermometer" : 5386.04,
"plausible" : 5383.67,
"toss" : 5381.29,
"desperation" : 5378.92,
"rhetoric" : 5378.92,
"scornful" : 5378.13,
"bailey" : 5376.55,
"rung" : 5376.55,
"civility" : 5375.75,
"dingy" : 5375.75,
"scaffold" : 5374.96,
"concentration" : 5374.17,
"avarice" : 5373.38,
"scrape" : 5373.38,
"pools" : 5371.80,
"oar" : 5370.22,
"flutter" : 5369.43,
"martyr" : 5369.43,
"handy" : 5368.63,
"montague" : 5368.63,
"bait" : 5367.84,
"login" : 5367.84,
"commotion" : 5367.05,
"congenial" : 5367.05,
"drawers" : 5365.47,
"telescope" : 5365.47,
"deposits" : 5363.10,
"edwards" : 5361.51,
"craving" : 5360.72,
"bureau" : 5359.14,
"oscar" : 5358.35,
"speculative" : 5358.35,
"huddled" : 5356.77,
"diverse" : 5355.18,
"slice" : 5354.39,
"renaissance" : 5352.81,
"angelo" : 5348.86,
"meg" : 5347.27,
"murderous" : 5347.27,
"serenity" : 5347.27,
"perspiration" : 5346.48,
"coventry" : 5344.90,
"impudent" : 5344.11,
"ardor" : 5343.32,
"necklace" : 5342.53,
"alight" : 5341.74,
"stimulated" : 5339.36,
"clifford" : 5337.78,
"steadfast" : 5333.82,
"genoa" : 5332.24,
"anglo-saxon" : 5330.66,
"courier" : 5328.29,
"inflamed" : 5328.29,
"xi" : 5328.29,
"drill" : 5325.91,
"spelling" : 5323.54,
"respond" : 5322.75,
"seriousness" : 5321.17,
"fourteenth" : 5319.58,
"womb" : 5319.58,
"literal" : 5317.21,
"singers" : 5317.21,
"usefulness" : 5315.63,
"cloudy" : 5314.84,
"mortality" : 5314.84,
"profusion" : 5314.84,
"fleeting" : 5314.05,
"twentieth" : 5314.05,
"maturity" : 5313.25,
"surf" : 5310.88,
"weed" : 5307.72,
"phases" : 5306.13,
"overcame" : 5304.55,
"womanhood" : 5304.55,
"envious" : 5302.97,
"tapped" : 5302.18,
"latent" : 5300.60,
"whiskey" : 5298.22,
"relatively" : 5292.69,
"forbidding" : 5290.31,
"cleopatra" : 5288.73,
"willow" : 5288.73,
"mathematical" : 5286.36,
"sojourn" : 5283.19,
"booty" : 5282.40,
"camel" : 5280.03,
"implore" : 5280.03,
"amateur" : 5279.24,
"morally" : 5276.07,
"qualifications" : 5273.70,
"gasp" : 5271.32,
"101" : 5270.53,
"gliding" : 5268.95,
"tested" : 5263.41,
"racing" : 5261.83,
"brightest" : 5261.04,
"joel" : 5260.25,
"extremes" : 5257.88,
"damascus" : 5257.08,
"labored" : 5256.29,
"peggy" : 5255.50,
"exit" : 5252.34,
"originality" : 5251.55,
"humming" : 5248.38,
"isolation" : 5247.59,
"sometime" : 5246.80,
"glee" : 5246.01,
"adult" : 5245.22,
"solace" : 5244.43,
"biography" : 5242.84,
"ff." : 5241.26,
"hardship" : 5241.26,
"lied" : 5241.26,
"donkey" : 5239.68,
"trader" : 5238.89,
"rumour" : 5234.93,
"amply" : 5231.77,
"confide" : 5231.77,
"favors" : 5231.77,
"perspective" : 5227.81,
"belgian" : 5226.23,
"withstand" : 5225.44,
"robust" : 5224.65,
"pro" : 5223.07,
"val" : 5222.27,
"eats" : 5221.48,
"snare" : 5220.69,
"monthly" : 5219.90,
"wines" : 5215.95,
"ignore" : 5215.15,
"envoy" : 5214.36,
"flown" : 5214.36,
"reverie" : 5213.57,
"jehovah" : 5207.24,
"contrive" : 5206.45,
"chatter" : 5205.66,
"judas" : 5205.66,
"nourishment" : 5204.87,
"reforms" : 5203.29,
"clatter" : 5201.70,
"adrian" : 5198.54,
"allude" : 5197.75,
"corrupted" : 5197.75,
"thorn" : 5196.17,
"junior" : 5193.79,
"tony" : 5187.46,
"calcutta" : 5185.88,
"re" : 5185.88,
"holt" : 5185.09,
"psychological" : 5182.72,
"constancy" : 5181.14,
"misunderstood" : 5180.34,
"signals" : 5177.97,
"drying" : 5175.60,
"harshly" : 5174.81,
"distressing" : 5170.85,
"novelist" : 5170.85,
"cyril" : 5169.27,
"editors" : 5168.48,
"intricate" : 5167.69,
"limestone" : 5166.10,
"forty-five" : 5165.31,
"collision" : 5164.52,
"pebbles" : 5163.73,
"willie" : 5163.73,
"knitting" : 5162.94,
"ordeal" : 5160.57,
"foresee" : 5158.98,
"peas" : 5157.40,
"repast" : 5156.61,
"supplying" : 5152.65,
"clan" : 5151.86,
"abject" : 5150.28,
"dart" : 5149.49,
"berth" : 5148.70,
"bridal" : 5148.70,
"indirect" : 5148.70,
"unnoticed" : 5148.70,
"tint" : 5147.91,
"insults" : 5145.53,
"precedent" : 5143.95,
"twisting" : 5142.37,
"bully" : 5139.21,
"vacation" : 5138.41,
"'ll" : 5133.67,
"canon" : 5132.88,
"aisle" : 5131.29,
"click" : 5131.29,
"inspiring" : 5131.29,
"oval" : 5130.50,
"impracticable" : 5128.92,
"delirium" : 5127.34,
"cedar" : 5126.55,
"contradict" : 5125.76,
"ingratitude" : 5125.76,
"soften" : 5125.76,
"bewilderment" : 5124.96,
"servitude" : 5123.38,
"comely" : 5122.59,
"stump" : 5121.80,
"redeem" : 5121.01,
"spun" : 5118.64,
"elastic" : 5117.84,
"poultry" : 5115.47,
"horseman" : 5114.68,
"dictionary" : 5112.31,
"prettiest" : 5112.31,
"adoration" : 5109.93,
"icel." : 5109.14,
"wager" : 5109.14,
"involving" : 5107.56,
"pathway" : 5104.40,
"essex" : 5102.81,
"wistful" : 5102.81,
"advent" : 5102.02,
"gear" : 5102.02,
"celebration" : 5100.44,
"conceivable" : 5100.44,
"drowning" : 5100.44,
"faintest" : 5097.28,
"acquiring" : 5094.90,
"befell" : 5092.53,
"good-looking" : 5092.53,
"wares" : 5092.53,
"rendezvous" : 5091.74,
"snug" : 5091.74,
"watery" : 5091.74,
"accompaniment" : 5090.95,
"chaps" : 5090.95,
"crawling" : 5088.57,
"lumber" : 5087.78,
"publishing" : 5087.78,
"customer" : 5086.99,
"mediaeval" : 5084.62,
"prints" : 5079.87,
"lavish" : 5078.29,
"md" : 5074.33,
"genesis" : 5069.59,
"rug" : 5068.79,
"analogous" : 5066.42,
"eleventh" : 5066.42,
"noah" : 5066.42,
"galley" : 5062.47,
"partition" : 5062.47,
"blunder" : 5061.67,
"glasgow" : 5061.67,
"fanciful" : 5060.09,
"ham" : 5057.72,
"rainbow" : 5056.14,
"sentinel" : 5055.34,
"hereby" : 5053.76,
"outlook" : 5053.76,
"smitten" : 5051.39,
"unmarried" : 5050.60,
"mice" : 5049.81,
"installed" : 5049.02,
"vivacity" : 5049.02,
"marking" : 5048.22,
"aesthetic" : 5045.85,
"consume" : 5045.85,
"resent" : 5044.27,
"pose" : 5041.90,
"contentment" : 5040.31,
"sterling" : 5039.52,
"veneration" : 5038.73,
"p.m." : 5037.15,
"barking" : 5034.78,
"bower" : 5034.78,
"organism" : 5034.78,
"unintelligible" : 5032.40,
"emphatic" : 5031.61,
"occurring" : 5030.03,
"factors" : 5029.24,
"guise" : 5027.66,
"editorial" : 5026.07,
"impudence" : 5025.28,
"midday" : 5022.91,
"corporal" : 5022.12,
"sg" : 5022.12,
"aright" : 5018.95,
"nigger" : 5015.79,
"lily" : 5012.62,
"noun" : 5007.09,
"scout" : 5007.09,
"spit" : 5007.09,
"cursing" : 5006.29,
"friedrich" : 5005.50,
"manifestly" : 5004.71,
"marco" : 5004.71,
"battalion" : 5000.76,
"heritage" : 5000.76,
"brotherhood" : 4999.17,
"nun" : 4999.17,
"wad" : 4997.59,
"folding" : 4995.22,
"discerned" : 4994.43,
"powerfully" : 4994.43,
"mitchell" : 4990.47,
"helpful" : 4989.68,
"persist" : 4989.68,
"ellis" : 4985.73,
"frigate" : 4984.93,
"spotted" : 4984.93,
"atoms" : 4983.35,
"curves" : 4983.35,
"outlet" : 4981.77,
"erroneous" : 4979.40,
"violated" : 4979.40,
"withheld" : 4978.60,
"fairies" : 4975.44,
"inherit" : 4975.44,
"sledge" : 4975.44,
"taller" : 4973.07,
"supervision" : 4972.28,
"butt" : 4971.48,
"handsomely" : 4971.48,
"tank" : 4965.16,
"velocity" : 4965.16,
"arctic" : 4963.57,
"colleague" : 4963.57,
"pins" : 4962.78,
"butcher" : 4961.99,
"drowsy" : 4961.99,
"butterfly" : 4960.41,
"chart" : 4957.24,
"twin" : 4957.24,
"sunken" : 4954.08,
"exasperated" : 4950.12,
"narrowly" : 4950.12,
"collins" : 4948.54,
"insulting" : 4946.96,
"deficiency" : 4945.38,
"operating" : 4943.79,
"overthrown" : 4942.21,
"gallows" : 4941.42,
"diligent" : 4940.63,
"hindu" : 4940.63,
"blunt" : 4939.84,
"omen" : 4939.05,
"bleak" : 4938.26,
"vehemently" : 4938.26,
"wretchedness" : 4935.88,
"e'er" : 4935.09,
"ensure" : 4931.93,
"denotes" : 4931.14,
"sentenced" : 4927.97,
"unfair" : 4927.18,
"encampment" : 4923.23,
"possessor" : 4923.23,
"absorbing" : 4921.64,
"descendant" : 4920.85,
"sub" : 4918.48,
"drugs" : 4917.69,
"engineers" : 4917.69,
"independently" : 4915.31,
"bucket" : 4914.52,
"clerical" : 4914.52,
"ache" : 4911.36,
"glitter" : 4911.36,
"ordinance" : 4906.61,
"bamboo" : 4905.82,
"amsterdam" : 4905.03,
"vocation" : 4899.49,
"admirer" : 4898.70,
"limp" : 4897.91,
"pallid" : 4897.91,
"mildly" : 4893.16,
"organisation" : 4891.58,
"timothy" : 4891.58,
"dealer" : 4890.79,
"yorkshire" : 4890.79,
"auspicious" : 4886.83,
"deuce" : 4882.88,
"emblem" : 4882.88,
"gibson" : 4882.88,
"primarily" : 4882.88,
"reducing" : 4881.30,
"ritual" : 4877.34,
"decorations" : 4876.55,
"thigh" : 4875.76,
"groaning" : 4874.18,
"scant" : 4871.80,
"fiscal" : 4871.01,
"mien" : 4871.01,
"charging" : 4867.85,
"cor" : 4867.85,
"railing" : 4867.85,
"peers" : 4866.26,
"inferred" : 4865.47,
"sanctity" : 4865.47,
"accumulation" : 4863.89,
"cynical" : 4861.52,
"inspector" : 4861.52,
"wardrobe" : 4859.14,
"jesuit" : 4854.40,
"texture" : 4853.61,
"adjustment" : 4852.02,
"epistle" : 4851.23,
"adventurer" : 4850.44,
"priesthood" : 4850.44,
"seaman" : 4849.65,
"turbulent" : 4849.65,
"chant" : 4844.90,
"marsh" : 4844.90,
"palmer" : 4844.90,
"unaware" : 4844.11,
"vase" : 4843.32,
"ty" : 4839.37,
"initial" : 4837.78,
"baths" : 4836.20,
"weighty" : 4836.20,
"minimum" : 4835.41,
"correction" : 4829.87,
"morsel" : 4829.08,
"overlook" : 4828.29,
"meagre" : 4827.50,
"unanimous" : 4826.71,
"magician" : 4824.33,
"mystical" : 4824.33,
"twenty-three" : 4823.54,
"inhabit" : 4822.75,
"shaggy" : 4822.75,
"unaccountable" : 4822.75,
"nightmare" : 4818.80,
"carbon" : 4818.00,
"coil" : 4818.00,
"lawless" : 4818.00,
"stairway" : 4818.00,
"willingness" : 4818.00,
"sarcasm" : 4815.63,
"crisp" : 4810.88,
"jerk" : 4810.09,
"tout" : 4808.51,
"vocabulary" : 4808.51,
"stroll" : 4806.14,
"poorly" : 4805.35,
"composing" : 4804.56,
"parliamentary" : 4804.56,
"controlling" : 4803.76,
"fitness" : 4803.76,
"thoughtless" : 4802.97,
"soames" : 4802.18,
"temperance" : 4802.18,
"illumination" : 4801.39,
"translations" : 4800.60,
"martyrdom" : 4799.02,
"mellow" : 4798.23,
"nationality" : 4795.06,
"jam" : 4794.27,
"austere" : 4792.69,
"shoots" : 4791.11,
"casually" : 4789.52,
"pensive" : 4788.73,
"flavour" : 4787.15,
"nets" : 4786.36,
"dice" : 4784.78,
"satisfactorily" : 4783.99,
"shrunk" : 4783.19,
"administer" : 4781.61,
"ante" : 4781.61,
"swine" : 4781.61,
"baptist" : 4780.03,
"listener" : 4778.45,
"unimportant" : 4778.45,
"genera" : 4776.87,
"contrivance" : 4776.07,
"deplorable" : 4776.07,
"museum" : 4776.07,
"benefactor" : 4773.70,
"tints" : 4772.12,
"alphabet" : 4770.54,
"rout" : 4768.16,
"scatter" : 4767.37,
"boer" : 4766.58,
"ftp" : 4766.58,
"steve" : 4766.58,
"extant" : 4765.79,
"bohemia" : 4765.00,
"misunderstanding" : 4765.00,
"universities" : 4765.00,
"dexterity" : 4762.63,
"rag" : 4759.46,
"inseparable" : 4758.67,
"punch" : 4757.09,
"brazen" : 4756.30,
"economical" : 4756.30,
"pernicious" : 4756.30,
"craig" : 4755.50,
"mythology" : 4755.50,
"drained" : 4754.71,
"bolted" : 4753.92,
"abel" : 4753.13,
"stride" : 4751.55,
"circumference" : 4749.18,
"meddle" : 4749.18,
"axis" : 4747.59,
"gum" : 4746.80,
"las" : 4746.01,
"kinder" : 4744.43,
"closes" : 4742.06,
"ferocity" : 4742.06,
"giddy" : 4740.47,
"secluded" : 4740.47,
"resisting" : 4737.31,
"satisfying" : 4735.73,
"reliable" : 4734.94,
"disgusting" : 4733.35,
"thirty-six" : 4733.35,
"ethical" : 4730.98,
"raleigh" : 4729.40,
"crouching" : 4728.61,
"lash" : 4728.61,
"recital" : 4727.02,
"buddhist" : 4726.23,
"collapse" : 4723.86,
"unsatisfactory" : 4721.49,
"lore" : 4718.32,
"varies" : 4718.32,
"mainland" : 4715.95,
"scot" : 4715.95,
"repute" : 4715.16,
"cushion" : 4714.37,
"confound" : 4712.78,
"scrub" : 4712.78,
"myth" : 4710.41,
"flights" : 4709.62,
"oats" : 4708.04,
"layers" : 4707.25,
"ownership" : 4707.25,
"cape" : 4706.45,
"glimmer" : 4704.87,
"scare" : 4704.87,
"waked" : 4704.87,
"bengal" : 4703.29,
"scrupulous" : 4703.29,
"equals" : 4700.92,
"redress" : 4700.13,
"brake" : 4698.54,
"nut" : 4698.54,
"stability" : 4698.54,
"crafty" : 4697.75,
"kirk" : 4697.75,
"bough" : 4696.17,
"momentous" : 4696.17,
"albeit" : 4695.38,
"enlarge" : 4694.59,
"hardness" : 4694.59,
"civilised" : 4693.80,
"dotted" : 4692.21,
"defiant" : 4691.42,
"timidity" : 4691.42,
"solar" : 4687.47,
"heartless" : 4683.51,
"thomson" : 4681.93,
"mat" : 4681.14,
"shun" : 4681.14,
"raid" : 4679.56,
"disclose" : 4678.76,
"suppression" : 4674.02,
"puff" : 4673.23,
"juncture" : 4670.85,
"beak" : 4670.06,
"unjustly" : 4668.48,
"foresaw" : 4664.52,
"rot" : 4662.15,
"aggressive" : 4661.36,
"predicted" : 4661.36,
"quaker" : 4661.36,
"grate" : 4659.78,
"lease" : 4657.40,
"ponderous" : 4656.61,
"maketh" : 4655.82,
"repaid" : 4655.82,
"charcoal" : 4652.66,
"chilly" : 4652.66,
"arrogance" : 4651.87,
"friction" : 4651.87,
"participation" : 4651.87,
"pier" : 4649.49,
"stale" : 4648.70,
"intoxicated" : 4644.75,
"commissioned" : 4643.16,
"ratio" : 4643.16,
"121" : 4640.79,
"comb" : 4640.00,
"masterpiece" : 4640.00,
"wholesale" : 4640.00,
"embraces" : 4639.21,
"trodden" : 4639.21,
"ephraim" : 4638.42,
"shaw" : 4634.46,
"translate" : 4631.30,
"mortar" : 4630.51,
"recreation" : 4629.71,
"rite" : 4628.13,
"truthful" : 4628.13,
"cavalier" : 4627.34,
"caress" : 4626.55,
"si" : 4624.97,
"curling" : 4624.18,
"rivalry" : 4623.39,
"whim" : 4623.39,
"abreast" : 4621.01,
"thebes" : 4620.22,
"faust" : 4619.43,
"peg" : 4619.43,
"wilhelm" : 4619.43,
"pestilence" : 4618.64,
"ceremonial" : 4617.85,
"receiver" : 4617.06,
"sample" : 4617.06,
"distinctive" : 4615.47,
"consummate" : 4614.68,
"matron" : 4610.73,
"claiming" : 4609.94,
"plural" : 4608.35,
"initiative" : 4607.56,
"inexhaustible" : 4606.77,
"a.m." : 4605.98,
"spider" : 4603.61,
"reed" : 4602.82,
"streak" : 4602.82,
"blocked" : 4601.23,
"titus" : 4601.23,
"smashed" : 4598.07,
"populous" : 4597.28,
"baronet" : 4596.49,
"commodore" : 4596.49,
"jelly" : 4596.49,
"advocates" : 4594.11,
"dinah" : 4592.53,
"salutation" : 4589.37,
"mutiny" : 4586.99,
"chronicles" : 4586.20,
"comforting" : 4585.41,
"serviceable" : 4584.62,
"parchment" : 4583.04,
"playful" : 4583.04,
"potato" : 4583.04,
"transient" : 4579.87,
"administrative" : 4579.08,
"anarchy" : 4579.08,
"barber" : 4579.08,
"revision" : 4579.08,
"operated" : 4578.29,
"farce" : 4577.50,
"germ" : 4576.71,
"profile" : 4576.71,
"provides" : 4576.71,
"noting" : 4575.13,
"disordered" : 4573.54,
"menacing" : 4573.54,
"heightened" : 4571.17,
"finance" : 4570.38,
"averse" : 4569.59,
"azure" : 4568.80,
"bathe" : 4568.80,
"campaigns" : 4564.84,
"lessen" : 4562.47,
"slate" : 4562.47,
"acquaint" : 4561.68,
"gin" : 4559.30,
"humiliating" : 4559.30,
"cleft" : 4556.93,
"conveyance" : 4556.93,
"chivalrous" : 4554.56,
"capricious" : 4553.77,
"tribune" : 4553.77,
"pilgrim" : 4552.97,
"entreaty" : 4551.39,
"womanly" : 4550.60,
"paltry" : 4549.81,
"sporting" : 4549.81,
"maker" : 4549.02,
"digestion" : 4545.85,
"bart" : 4544.27,
"infamy" : 4541.90,
"lambs" : 4541.90,
"gaping" : 4540.32,
"periodical" : 4540.32,
"standpoint" : 4540.32,
"amorous" : 4539.53,
"tub" : 4539.53,
"luxuriant" : 4538.73,
"basic" : 4536.36,
"mutually" : 4535.57,
"chris" : 4533.99,
"greed" : 4532.40,
"premature" : 4532.40,
"extinction" : 4531.61,
"boiler" : 4530.03,
"intimation" : 4529.24,
"scandalous" : 4527.66,
"separating" : 4526.87,
"oratory" : 4525.28,
"banish" : 4524.49,
"electrical" : 4524.49,
"herb" : 4523.70,
"multiply" : 4523.70,
"prosper" : 4522.91,
"friar" : 4522.12,
"nightly" : 4520.54,
"ole" : 4519.75,
"monkeys" : 4518.16,
"interminable" : 4516.58,
"enjoys" : 4515.79,
"similarity" : 4515.00,
"riddle" : 4514.21,
"cleaning" : 4512.63,
"subscription" : 4511.84,
"copious" : 4510.25,
"exclaim" : 4509.46,
"forged" : 4509.46,
"voting" : 4509.46,
"scourge" : 4508.67,
"darkly" : 4507.09,
"privacy" : 4506.30,
"arena" : 4503.92,
"bearded" : 4502.34,
"vera" : 4499.97,
"alacrity" : 4494.43,
"sensual" : 4493.64,
"spin" : 4493.64,
"neutrality" : 4492.85,
"flannel" : 4492.06,
"fasting" : 4491.27,
"trailer" : 4491.27,
"avert" : 4489.68,
"trustworthy" : 4489.68,
"jamaica" : 4488.10,
"unchanged" : 4485.73,
"traveler" : 4484.15,
"unfamiliar" : 4483.35,
"puffed" : 4482.56,
"mirrors" : 4480.98,
"phoebe" : 4480.19,
"father-in-law" : 4478.61,
"conform" : 4477.03,
"particle" : 4476.23,
"railways" : 4476.23,
"stupendous" : 4476.23,
"paddle" : 4474.65,
"innate" : 4473.86,
"reformation" : 4473.07,
"volley" : 4471.49,
"statistics" : 4470.70,
"agrees" : 4469.11,
"simpler" : 4469.11,
"padre" : 4468.32,
"congratulations" : 4467.53,
"lids" : 4466.74,
"muse" : 4466.74,
"inhabitant" : 4465.95,
"ishmael" : 4465.16,
"rustle" : 4465.16,
"clump" : 4464.37,
"calendar" : 4463.58,
"flute" : 4463.58,
"inaccessible" : 4461.99,
"yore" : 4461.20,
"jay" : 4459.62,
"repulsive" : 4459.62,
"fray" : 4458.04,
"po" : 4456.46,
"nomination" : 4454.08,
"conclusive" : 4453.29,
"peaceable" : 4453.29,
"beth" : 4452.50,
"inconceivable" : 4452.50,
"e'en" : 4450.92,
"emerald" : 4450.13,
"lava" : 4450.13,
"trillion" : 4448.54,
"uppermost" : 4448.54,
"arduous" : 4447.75,
"lyric" : 4446.96,
"downright" : 4446.17,
"reproduction" : 4444.59,
"foresight" : 4443.01,
"consistency" : 4442.22,
"ape" : 4441.42,
"senators" : 4439.05,
"pallor" : 4437.47,
"span" : 4436.68,
"salad" : 4431.93,
"snuff" : 4431.93,
"drooped" : 4431.14,
"greetings" : 4431.14,
"chestnut" : 4427.98,
"inquisitive" : 4427.98,
"vicar" : 4427.98,
"noel" : 4426.39,
"attic" : 4425.60,
"savings" : 4425.60,
"affirmative" : 4424.02,
"ills" : 4422.44,
"applications" : 4421.65,
"t" : 4421.65,
"dye" : 4420.06,
"gloucester" : 4420.06,
"nominal" : 4417.69,
"demonstrate" : 4414.53,
"dispense" : 4414.53,
"dissatisfaction" : 4414.53,
"merciless" : 4414.53,
"trusty" : 4414.53,
"coloring" : 4412.15,
"perusal" : 4412.15,
"plaintive" : 4412.15,
"discarded" : 4410.57,
"precarious" : 4408.20,
"infection" : 4406.61,
"ruinous" : 4405.03,
"bolts" : 4404.24,
"arithmetic" : 4402.66,
"considerate" : 4402.66,
"lark" : 4401.87,
"ethics" : 4401.08,
"conventions" : 4400.29,
"stumbling" : 4400.29,
"pitcher" : 4399.49,
"slips" : 4399.49,
"seine" : 4398.70,
"officially" : 4396.33,
"danube" : 4395.54,
"annoy" : 4393.96,
"glide" : 4392.37,
"impunity" : 4390.79,
"amends" : 4390.00,
"sol" : 4389.21,
"conveying" : 4386.05,
"abandonment" : 4385.25,
"mane" : 4384.46,
"tinge" : 4384.46,
"brim" : 4382.09,
"forenoon" : 4380.51,
"seventy-five" : 4380.51,
"sparkle" : 4380.51,
"syllables" : 4380.51,
"shrug" : 4377.34,
"enchantment" : 4375.76,
"franz" : 4375.76,
"trait" : 4375.76,
"bribe" : 4374.97,
"composer" : 4373.39,
"preparatory" : 4373.39,
"audacious" : 4372.60,
"outskirts" : 4372.60,
"soiled" : 4372.60,
"fiddle" : 4371.01,
"football" : 4370.22,
"isaiah" : 4370.22,
"partnership" : 4370.22,
"continuation" : 4368.64,
"pioneer" : 4368.64,
"vest" : 4367.85,
"bass" : 4367.06,
"derby" : 4367.06,
"quarry" : 4367.06,
"rigging" : 4367.06,
"dizzy" : 4366.27,
"abnormal" : 4365.48,
"omission" : 4364.68,
"idolatry" : 4363.89,
"sequence" : 4363.89,
"squeeze" : 4362.31,
"cabbage" : 4360.73,
"canopy" : 4360.73,
"athletic" : 4359.94,
"shirley" : 4359.94,
"drunkenness" : 4359.15,
"intrusion" : 4358.36,
"'cause" : 4355.98,
"assign" : 4355.19,
"tackle" : 4354.40,
"dreamt" : 4353.61,
"sceptre" : 4352.82,
"exacting" : 4352.03,
"parched" : 4349.65,
"eddy" : 4347.28,
"percentage" : 4346.49,
"twinkle" : 4342.53,
"curb" : 4340.95,
"sandstone" : 4340.16,
"invaluable" : 4337.79,
"fathom" : 4336.20,
"preferable" : 4336.20,
"adelaide" : 4334.62,
"advertising" : 4332.25,
"scraps" : 4330.67,
"lever" : 4329.08,
"muster" : 4328.29,
"cavity" : 4324.34,
"barbarian" : 4322.75,
"sleepless" : 4322.75,
"fried" : 4320.38,
"abstraction" : 4319.59,
"forefinger" : 4319.59,
"spade" : 4319.59,
"erection" : 4318.80,
"scorned" : 4318.80,
"pail" : 4317.22,
"withdrawal" : 4317.22,
"senator" : 4315.63,
"mortgage" : 4314.84,
"ancestral" : 4311.68,
"succour" : 4310.89,
"ma" : 4309.31,
"forbearance" : 4308.51,
"repress" : 4308.51,
"spouse" : 4305.35,
"valid" : 4304.56,
"witchcraft" : 4303.77,
"workmanship" : 4302.98,
"legacy" : 4300.60,
"proximity" : 4300.60,
"bombay" : 4299.81,
"paula" : 4299.81,
"incorporated" : 4298.23,
"muzzle" : 4297.44,
"reuben" : 4296.65,
"clusters" : 4293.48,
"valve" : 4291.11,
"compelling" : 4290.32,
"dissipated" : 4289.53,
"flickering" : 4287.15,
"guinea" : 4286.36,
"sup" : 4286.36,
"tarry" : 4286.36,
"derision" : 4285.57,
"vehicles" : 4283.20,
"accommodate" : 4282.41,
"glossy" : 4282.41,
"iris" : 4278.45,
"relic" : 4277.66,
"ant" : 4276.08,
"heath" : 4269.75,
"bug" : 4266.58,
"vocal" : 4265.00,
"downfall" : 4262.63,
"construct" : 4261.05,
"undue" : 4261.05,
"vapor" : 4261.05,
"bat" : 4259.46,
"whimsical" : 4259.46,
"contradictory" : 4255.51,
"unlocked" : 4255.51,
"foretold" : 4250.76,
"automatic" : 4249.97,
"explicit" : 4249.18,
"indolent" : 4248.39,
"mates" : 4247.60,
"artful" : 4243.64,
"downcast" : 4242.85,
"well-being" : 4241.27,
"winston" : 4241.27,
"ordinances" : 4240.48,
"catharine" : 4239.69,
"effectively" : 4239.69,
"missions" : 4239.69,
"stalk" : 4239.69,
"indistinct" : 4238.89,
"pregnant" : 4236.52,
"reddish" : 4236.52,
"coveted" : 4235.73,
"fret" : 4234.94,
"peeping" : 4234.15,
"buck" : 4233.36,
"sumptuous" : 4232.56,
"indefinitely" : 4231.77,
"reliance" : 4230.98,
"panama" : 4230.19,
"cocked" : 4229.40,
"dad" : 4226.24,
"everyday" : 4224.65,
"intoxication" : 4221.49,
"aghast" : 4219.12,
"subterranean" : 4218.32,
"turmoil" : 4218.32,
"forfeit" : 4215.16,
"chasm" : 4214.37,
"inspect" : 4212.79,
"perverse" : 4212.79,
"precipitate" : 4212.79,
"dover" : 4212.00,
"ambush" : 4210.41,
"evermore" : 4210.41,
"mass." : 4210.41,
"blot" : 4209.62,
"nook" : 4209.62,
"verdure" : 4209.62,
"parapet" : 4208.83,
"jake" : 4208.04,
"cessation" : 4207.25,
"ankle" : 4206.46,
"classification" : 4206.46,
"fervently" : 4206.46,
"oddly" : 4205.67,
"haul" : 4204.08,
"saxony" : 4203.29,
"embarrassing" : 4202.50,
"hairy" : 4200.92,
"northwest" : 4200.92,
"disabled" : 4199.34,
"laurel" : 4199.34,
"preston" : 4198.55,
"arrogant" : 4196.96,
"hurts" : 4196.96,
"demonstrations" : 4195.38,
"splash" : 4195.38,
"curl" : 4194.59,
"livelihood" : 4193.80,
"wary" : 4193.80,
"scattering" : 4193.01,
"brace" : 4192.22,
"converts" : 4190.63,
"detestable" : 4190.63,
"143" : 4189.05,
"abandoning" : 4189.05,
"somerset" : 4189.05,
"weakly" : 4189.05,
"clothe" : 4188.26,
"gem" : 4187.47,
"tremor" : 4185.89,
"surveying" : 4184.31,
"variable" : 4183.51,
"anniversary" : 4175.60,
"thirty-two" : 4174.81,
"wrap" : 4174.02,
"curly" : 4171.65,
"diversity" : 4170.86,
"prestige" : 4170.86,
"desertion" : 4170.07,
"freezing" : 4170.07,
"heedless" : 4170.07,
"sentry" : 4170.07,
"believer" : 4169.27,
"ram" : 4169.27,
"rowing" : 4169.27,
"negligence" : 4168.48,
"self-" : 4168.48,
"sulphur" : 4167.69,
"discrimination" : 4166.90,
"cooling" : 4162.95,
"millionaire" : 4162.95,
"flowering" : 4161.36,
"meridian" : 4161.36,
"wins" : 4161.36,
"awed" : 4159.78,
"beastly" : 4159.78,
"nuisance" : 4158.99,
"abstain" : 4158.20,
"continental" : 4158.20,
"stanza" : 4157.41,
"target" : 4156.62,
"unwonted" : 4156.62,
"whit" : 4155.82,
"jason" : 4153.45,
"stall" : 4152.66,
"sham" : 4151.87,
"dictate" : 4151.08,
"empress" : 4151.08,
"gout" : 4151.08,
"jobs" : 4151.08,
"manure" : 4151.08,
"nigel" : 4151.08,
"sidewalk" : 4150.29,
"sate" : 4148.70,
"grievance" : 4147.91,
"axes" : 4147.12,
"bony" : 4146.33,
"invest" : 4146.33,
"birmingham" : 4143.96,
"ebb" : 4143.96,
"rabble" : 4140.79,
"restlessness" : 4140.00,
"cruise" : 4137.63,
"rally" : 4136.84,
"rumor" : 4135.26,
"hysterical" : 4132.88,
"girlish" : 4130.51,
"actively" : 4129.72,
"shortest" : 4129.72,
"marseilles" : 4128.93,
"cheque" : 4128.14,
"disregarded" : 4127.34,
"retort" : 4127.34,
"rocking" : 4127.34,
"emerge" : 4125.76,
"perch" : 4124.18,
"flask" : 4123.39,
"ka" : 4123.39,
"countryman" : 4121.81,
"lonesome" : 4121.81,
"manned" : 4121.01,
"unarmed" : 4121.01,
"wast" : 4121.01,
"frog" : 4119.43,
"twenty-eight" : 4119.43,
"unscrupulous" : 4119.43,
"yarn" : 4119.43,
"victuals" : 4118.64,
"outrageous" : 4117.85,
"appropriation" : 4114.69,
"foolishness" : 4114.69,
"quickness" : 4114.69,
"adversity" : 4113.89,
"parma" : 4112.31,
"diseased" : 4111.52,
"iliad" : 4109.94,
"salutary" : 4109.94,
"smelt" : 4108.36,
"territorial" : 4108.36,
"hurricane" : 4107.57,
"irons" : 4106.77,
"canyon" : 4105.98,
"jeremiah" : 4105.98,
"brooklyn" : 4105.19,
"indulging" : 4105.19,
"vapour" : 4104.40,
"disobedience" : 4103.61,
"atrocious" : 4102.82,
"leaps" : 4102.03,
"tapestry" : 4099.65,
"provocation" : 4098.86,
"twenty-six" : 4098.86,
"impotent" : 4098.07,
"smite" : 4093.33,
"acquitted" : 4090.95,
"os" : 4090.16,
"tumultuous" : 4090.16,
"barge" : 4089.37,
"palpable" : 4087.79,
"apprentice" : 4087.00,
"lances" : 4086.21,
"compartment" : 4085.41,
"godly" : 4085.41,
"sarcastic" : 4085.41,
"therefrom" : 4085.41,
"specifically" : 4084.62,
"uniformity" : 4083.83,
"emerging" : 4083.04,
"atonement" : 4082.25,
"whereabouts" : 4082.25,
"davy" : 4081.46,
"framework" : 4081.46,
"sponge" : 4081.46,
"mountainous" : 4080.67,
"annoying" : 4079.08,
"cot" : 4079.08,
"squirrel" : 4079.08,
"wand" : 4076.71,
"grind" : 4075.92,
"bang" : 4075.13,
"unreal" : 4075.13,
"blacksmith" : 4074.34,
"injunction" : 4072.76,
"scarcity" : 4071.96,
"withhold" : 4071.96,
"outright" : 4070.38,
"bavaria" : 4069.59,
"cement" : 4068.80,
"growl" : 4067.22,
"aggregate" : 4066.43,
"fraction" : 4066.43,
"exaltation" : 4064.05,
"inexorable" : 4063.26,
"jug" : 4063.26,
"purer" : 4063.26,
"sap" : 4063.26,
"illegal" : 4062.47,
"sister-in-law" : 4061.68,
"presses" : 4060.89,
"stealthily" : 4060.89,
"dissolve" : 4060.10,
"volcano" : 4059.31,
"hungarian" : 4057.72,
"equilibrium" : 4056.93,
"obstinately" : 4056.14,
"sullenly" : 4056.14,
"assassination" : 4055.35,
"commissions" : 4054.56,
"respectability" : 4052.98,
"bases" : 4051.40,
"maxwell" : 4050.60,
"resounded" : 4050.60,
"closest" : 4049.81,
"embroidery" : 4049.02,
"gunpowder" : 4049.02,
"reproof" : 4049.02,
"yale" : 4045.07,
"combining" : 4043.48,
"weaving" : 4041.90,
"earnings" : 4041.11,
"hamburg" : 4039.53,
"indoors" : 4039.53,
"manufacturers" : 4039.53,
"pitiless" : 4039.53,
"scarf" : 4039.53,
"picnic" : 4037.95,
"misled" : 4035.57,
"pompous" : 4035.57,
"brian" : 4034.78,
"respite" : 4033.99,
"exploit" : 4033.20,
"tracing" : 4033.20,
"geological" : 4031.62,
"passport" : 4031.62,
"confines" : 4030.83,
"dishonour" : 4029.24,
"executioner" : 4029.24,
"township" : 4029.24,
"vacancy" : 4029.24,
"acquiescence" : 4026.87,
"cornwall" : 4026.87,
"crumbling" : 4026.08,
"three-quarters" : 4025.29,
"exploration" : 4022.91,
"needy" : 4021.33,
"stationary" : 4021.33,
"disconcerted" : 4020.54,
"wanderer" : 4019.75,
"beaver" : 4018.17,
"lookout" : 4015.79,
"onion" : 4015.79,
"depicted" : 4015.00,
"boisterous" : 4014.21,
"couples" : 4013.42,
"speakers" : 4013.42,
"woollen" : 4012.63,
"lightness" : 4011.05,
"bitten" : 4007.88,
"aux" : 4007.09,
"toleration" : 4005.51,
"lucia" : 4004.72,
"scar" : 4004.72,
"bohemian" : 4002.34,
"vested" : 4002.34,
"affinity" : 4001.55,
"carlo" : 4001.55,
"sous" : 4001.55,
"penitent" : 4000.76,
"simpson" : 4000.76,
"abiding" : 3997.60,
"ca" : 3996.02,
"immoral" : 3996.02,
"dishonest" : 3995.22,
"yawning" : 3994.43,
"mustache" : 3992.85,
"supplement" : 3992.85,
"whirlwind" : 3992.85,
"clash" : 3991.27,
"terence" : 3990.48,
"lamentable" : 3989.69,
"bennett" : 3988.90,
"farthing" : 3987.31,
"speck" : 3987.31,
"biscuit" : 3986.52,
"appellation" : 3985.73,
"gdp" : 3984.94,
"reserves" : 3983.36,
"uncouth" : 3982.57,
"birch" : 3980.98,
"armchair" : 3980.19,
"judy" : 3980.19,
"greasy" : 3978.61,
"leaden" : 3978.61,
"dough" : 3977.03,
"lining" : 3976.24,
"cleverness" : 3971.49,
"ascetic" : 3969.91,
"clutch" : 3969.12,
"krishna" : 3969.12,
"embark" : 3968.33,
"quotations" : 3968.33,
"friendliness" : 3967.53,
"liberally" : 3967.53,
"trance" : 3965.16,
"untrue" : 3965.16,
"rejection" : 3964.37,
"grating" : 3962.79,
"hanover" : 3961.21,
"inexperienced" : 3961.21,
"mon" : 3960.41,
"wintry" : 3960.41,
"stalwart" : 3958.83,
"meats" : 3958.04,
"stamping" : 3956.46,
"variance" : 3956.46,
"apiece" : 3954.88,
"firmament" : 3954.88,
"absorption" : 3953.29,
"apprehensive" : 3953.29,
"terminate" : 3953.29,
"wilful" : 3952.50,
"conveniently" : 3951.71,
"cleanliness" : 3950.92,
"collective" : 3950.92,
"angela" : 3950.13,
"filth" : 3950.13,
"philippines" : 3950.13,
"timely" : 3950.13,
"herein" : 3948.55,
"ignoble" : 3948.55,
"canton" : 3946.17,
"lamentations" : 3944.59,
"moslem" : 3944.59,
"ware" : 3943.80,
"adjective" : 3943.01,
"glen" : 3943.01,
"invade" : 3943.01,
"livid" : 3943.01,
"buggy" : 3941.43,
"prolong" : 3940.64,
"weaken" : 3937.47,
"folio" : 3935.10,
"dismissal" : 3934.31,
"quay" : 3934.31,
"enchanting" : 3933.52,
"heave" : 3931.93,
"purified" : 3931.14,
"syrian" : 3931.14,
"significantly" : 3929.56,
"experimental" : 3927.98,
"film" : 3926.40,
"repressed" : 3926.40,
"cooperation" : 3924.81,
"sequel" : 3924.02,
"wench" : 3924.02,
"calves" : 3923.23
}
def get_frequency(word):
"Return the word frequency, or 0 if not found"
return frequencies.get(word, 0.0)
|
#!/usr/bin/env python
import sys
import re
import frequency
max_occurrences = 1000
filter_re = sys.argv[1:] if len(sys.argv) > 1 else ['.*']
files = [
# "macros.tex",
"front.tex",
"preface.tex",
"introduction.tex",
"preliminaries.tex",
"basics.tex",
"logic.tex",
"equivalences.tex",
"induction.tex",
"hits.tex",
"hlevels.tex",
"homotopy.tex",
"categories.tex",
"setmath.tex",
"reals.tex",
"formal.tex"
]
words = {}
macros = set({})
antimacros = set({})
antifiles = ['symbols.tex',
'macros.tex',
'opt-letter.tex',
'opt-ustrade.tex',
'opt-color.tex',
'hott-ustrade.tex',
'hott-letter.tex',
'hott-online.tex']
def matchtospaces(m):
return ' ' * len(m.group(0))
for fn in files:
with open(fn, "r") as f:
text = f.read()
# Remove environment names
#text = re.sub(r'\\(begin|end){[^}]+}', ' ', text)
# Remove all labels and refs
#text = re.sub(r'(\\(label|cref|autoref|eqref|ref){[^}]+})', ' ', text)
# Remove hyphenation hints
#text = re.sub(r'\\-', '', text)
# Remove quotes
#text = re.sub(r"['`]", ' ', text)
# Replace --- with space
text = re.sub(r'---', ' ', text)
# Replace punctuations with space
#text = re.sub(r'[,.;:?!]', ' ', text)
# Replace newlines with spaces
#text = re.sub(r'\n', ' ', text)
# Find macros
for m in re.findall(r"\\[a-zA-Z]+\b", text):
if fn in antifiles:
antimacros.add(m)
else:
macros.add(m)
# Delete macros
#text = re.sub(r'\\[a-zA-Z]+\b', ' ', text)
# Delete cross-references, labels, citations, urls, math terms, urls, environments, index entries
text = re.sub(r'\\(autoref|cref|cite|label|ref|eqref|mathsf|href|url|begin|end|index|indexdef|indexfoot|indexsee){[0-9a-zA-Z-_:,!@$* \\]*}', matchtospaces, text)
# Find words, try to include things like "$(n-2)$-connected"
for m in re.finditer(r"(?<=(.{20}))[^\\]\b(\$[^$]*\$-)?([a-zA-Z]([a-zA-Z-']|\\-)*[a-zA-Z-])\b(?=(.{20}))", text, re.DOTALL):
key = str(m.group(3)).lower()
key = re.sub(r'\\-', '', key) # remove hyphenation hints
pos = m.start(3)
excerpt = str(m.group(1) + m.group(0) + m.group(5))
excerpt = re.sub(r'\n', ' ', excerpt) # replace newlines with spaces
if key in words:
words[key].append((excerpt, fn, pos))
else:
words[key] = [(excerpt, fn, pos)]
# Macros which appear somewhere but are not in the symbols index, macros.tex,
# or configuration files.
macros -= antimacros
# Uncomment to see the macros.
#for macro in sorted(macros - antimacros):
# print (macro)
def sortkey(word):
return (frequency.get_frequency(word), word)
def filter_word(w, fs):
for r in fs:
if re.search(r, w, flags = re.IGNORECASE): return True
return False
for key in sorted(words.keys(), key = sortkey):
if filter_word(key, filter_re):
freq = frequency.get_frequency(key)
if freq > 1100000:
continue
print("\n\n======== %s [%d]\n\n" % (key, freq))
for (excerpt, fn, pos) in words[key][:max_occurrences]:
print (" ...%s... [%s @ %d]" % (excerpt, fn, pos))
if len(words[key]) > max_occurrences:
print ("\n [[%d omitted occurrences]]" % (len(words[key]) - max_occurrences))
|
#!/usr/bin/env python
# This script generates symbol images from which the
# torus picture is then assembled.
import subprocess
import os
import os.path
symbols = [
r"$$\sum$$",
r"$$\prod$$",
r"$$\lambda$$",
r"$$\times$$",
r"$$\simeq$$"
]
ncols = 1
colors = [("gray", str(float(i)/ncols)) for i in range(0, ncols+1)]
## Generate LaTeX
template = r"""
\documentclass{article}
\usepackage{palatino}
\usepackage{amsmath,amssymb,amsfonts}
\usepackage{xcolor}
\pagestyle{empty}
\begin{document}
%s
\end{document}"""
tex = ""
for (i, s) in enumerate(symbols):
for (j, (m,c)) in enumerate(colors):
tex = tex + (r"\definecolor{mycolor}{%s}{%s}\textcolor{mycolor}{%s}\newpage" % (m, c, s)) + "\n"
# Write LaTeX to file
with open("temp.tex", "w") as f:
f.write(template % tex)
# Process LaTeX and generate png files
subprocess.call(["latex", "temp.tex"])
subprocess.call(["dvipng", "-D", "1200", "-o", "preimg/image_%02d.png", "-T", "tight", "temp.dvi"])
# Convert png files to jpg
filelist = [f for f in os.listdir('preimg') if f.endswith(".png")]
for f in filelist:
fin = os.path.join("preimg", f)
fout = os.path.join("srcimg", os.path.splitext(f)[0] + ".jpg")
subprocess.call(["convert", "-bordercolor", "white", "-border", "20x20", "-quality", "100", fin, fout])
# Remove auxiliary files
for f in filelist:
os.remove(os.path.join("preimg", f))
for f in [f for f in os.listdir('.') if f.startswith("temp.")]:
os.remove(f)
|
#!/usr/bin/python
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
name='BlueChips',
version='1.0.0',
description='BlueChips - finances for people with shared expenses',
long_description=open('README.rst').read(),
author='Residents of Blue Sun Corporate Headquarters',
author_email='[email protected]',
url='http://github.com/ebroder/bluechips',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Pylons',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Topic :: Home Automation',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
'Topic :: Office/Business :: Financial :: Accounting',
],
install_requires=["Pylons>=0.9.6",
"WebHelpers==0.6.4",
"SQLAlchemy>=0.5",
"AuthKit>=0.4.0",
"FormEncode>=1.2.1",
"mailer>=0.5"],
setup_requires=["PasteScript==dev,>=1.6.3dev-r7326"],
packages=find_packages(exclude=['ez_setup']),
include_package_data=True,
test_suite='nose.collector',
zip_safe=False,
paster_plugins=['PasteScript', 'Pylons'],
entry_points="""
[paste.app_factory]
main = bluechips.config.middleware:make_app
[paste.app_install]
main = pylons.util:PylonsInstaller
""",
)
|
#!python
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import sys
DEFAULT_VERSION = "0.6c8"
DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f',
'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2',
'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc',
'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167',
'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64',
'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d',
'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20',
'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab',
'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53',
'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902',
'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de',
'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b',
}
import sys, os
def _validate_md5(egg_name, data):
if egg_name in md5_data:
from md5 import md5
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
pkg_resources.require("setuptools>="+version); return
except pkg_resources.VersionConflict, e:
if was_imported:
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first, using 'easy_install -U setuptools'."
"\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return do_download()
except pkg_resources.DistributionNotFound:
return do_download()
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
)
sys.exit(2)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
from md5 import md5
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:])
|
"""Setup the BlueChips application"""
import logging
from bluechips.config.environment import load_environment
log = logging.getLogger(__name__)
def setup_app(command, conf, vars):
"""Place any commands to setup bluechips here"""
load_environment(conf.global_conf, conf.local_conf)
# Load the models
from bluechips.model import meta
meta.metadata.bind = meta.engine
# Create the tables if they aren't there already
meta.metadata.create_all(checkfirst=True)
|
"""Routes configuration
The more specific and detailed routes should be defined first so they
may take precedent over the more generic routes. For more information
refer to the routes manual at http://routes.groovie.org/docs/
"""
from pylons import config
from routes import Mapper
def make_map():
"""Create, configure and return the routes Mapper"""
map = Mapper(directory=config['pylons.paths']['controllers'],
always_scan=config['debug'])
map.minimization = False
# The ErrorController route (handles 404/500 error pages); it should
# likely stay at the top, ensuring it can always be resolved
map.connect('error/:action/:id', controller='error')
# CUSTOM ROUTES HERE
map.connect('/', controller='status', action='index')
map.connect('/:controller', action='index')
map.connect('/:controller/:action')
map.connect('/:controller/:action/:id')
return map
|
"""Pylons environment configuration"""
import os
from mako.lookup import TemplateLookup
from pylons import config
from sqlalchemy import engine_from_config
from mailer import Mailer
import bluechips.lib.app_globals as app_globals
import bluechips.lib.helpers
from bluechips.config.routing import make_map
from bluechips.model import init_model
def load_environment(global_conf, app_conf):
"""Configure the Pylons environment via the ``pylons.config``
object
"""
# Pylons paths
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
paths = dict(root=root,
controllers=os.path.join(root, 'controllers'),
static_files=os.path.join(root, 'public'),
templates=[os.path.join(root, 'templates')])
# Initialize config with the basic options
config.init_app(global_conf, app_conf, package='bluechips', paths=paths)
config['routes.map'] = make_map()
config['pylons.app_globals'] = app_globals.Globals()
config['pylons.h'] = bluechips.lib.helpers
# Create the Mako TemplateLookup, with the default auto-escaping
config['pylons.app_globals'].mako_lookup = TemplateLookup(
directories=paths['templates'],
module_directory=os.path.join(app_conf['cache_dir'], 'templates'),
input_encoding='utf-8', output_encoding='utf-8',
imports=['from webhelpers.html import escape'],
default_filters=['escape'])
# Setup SQLAlchemy database engine
engine = engine_from_config(config, 'sqlalchemy.')
init_model(engine)
# CONFIGURATION OPTIONS HERE (note: all config options will override
# any Pylons config options)
config['pylons.app_globals'].mailer = Mailer(config.get('mailer.host',
'127.0.0.1'))
|
"""Pylons middleware initialization"""
from beaker.middleware import CacheMiddleware, SessionMiddleware
from paste.cascade import Cascade
from paste.registry import RegistryManager
from paste.urlparser import StaticURLParser
from paste.auth.basic import AuthBasicHandler
from paste.deploy.converters import asbool
from pylons import config
from pylons.middleware import ErrorHandler, StatusCodeRedirect
from pylons.wsgiapp import PylonsApp
from routes.middleware import RoutesMiddleware
import authkit.authorize
from bluechips.config.environment import load_environment
from bluechips.lib.permissions import (BlueChipUser, DummyAuthenticate,
authenticate)
def make_app(global_conf, full_stack=True, **app_conf):
"""Create a Pylons WSGI application and return it
``global_conf``
The inherited configuration for this application. Normally from
the [DEFAULT] section of the Paste ini file.
``full_stack``
Whether or not this application provides a full WSGI stack (by
default, meaning it handles its own exceptions and errors).
Disable full_stack when this application is "managed" by
another WSGI middleware.
``app_conf``
The application's local configuration. Normally specified in the
[app:<name>] section of the Paste ini file (where <name>
defaults to main).
"""
# Configure the Pylons environment
load_environment(global_conf, app_conf)
# The Pylons WSGI app
app = PylonsApp()
# CUSTOM MIDDLEWARE HERE (filtered by error handling middlewares)
app = authkit.authorize.middleware(app, BlueChipUser())
# Routing/Session/Cache Middleware
app = RoutesMiddleware(app, config['routes.map'])
app = SessionMiddleware(app, config)
app = CacheMiddleware(app, config)
if asbool(full_stack):
# Handle Python exceptions
app = ErrorHandler(app, global_conf, **config['pylons.errorware'])
# Display error documents for 401, 403, 404 status codes (and
# 500 when debug is disabled)
status_codes = [400, 401, 403, 404]
if not asbool(config.get('debug')):
status_codes.append(500)
app = StatusCodeRedirect(app, status_codes)
# Establish the Registry for this application
app = RegistryManager(app)
# Static files (If running in production, and Apache or another web
# server is handling this static content, remove the following 3 lines)
static_app = StaticURLParser(config['pylons.paths']['static_files'])
app = Cascade([static_app, app])
app = AuthBasicHandler(app, 'BlueChips', authenticate)
app = DummyAuthenticate(app, app_conf)
return app
|
"""Pylons application test package
This package assumes the Pylons environment is already loaded, such as
when this script is imported from the `nosetests --with-pylons=test.ini`
command.
This module initializes the application via ``websetup`` (`paster
setup-app`) and provides the base testing objects.
"""
from unittest import TestCase
from paste.deploy import loadapp
from paste.fixture import TestApp
from paste.script.appinstall import SetupCommand
from pylons import config
from routes import url_for
import bluechips.model
from bluechips.model import meta
from bluechips.model.types import Currency
import random
__all__ = ['url_for', 'TestController',
'createUsers', 'createExpenditures',
'deleteUsers', 'deleteExpenditures']
sample_users = [u'Alice', u'Bob', u'Charlie', u'Dave', u'Eve']
def setUpPackage():
# Invoke websetup with the current config file
SetupCommand('setup-app').run([config['__file__']])
u1 = bluechips.model.User(u'root', u'Charlie Root', True)
u1.email = u'[email protected]'
u1.password = u'charliepass'
u2 = bluechips.model.User(u'ben', u'Ben Bitdiddle', True)
u3 = bluechips.model.User(u'gotta', u'Gotta Lisp', True)
u4 = bluechips.model.User(u'rich', u'Rich Scheme', True)
for u in (u1, u2, u3, u4):
meta.Session.add(u)
meta.Session.commit()
def tearDownPackage():
meta.metadata.drop_all()
class TestController(TestCase):
def __init__(self, *args, **kwargs):
wsgiapp = loadapp('config:%s' % config['__file__'])
self.app = TestApp(wsgiapp)
TestCase.__init__(self, *args, **kwargs)
def createUsers(n=None):
if n is None:
n = random.randint(2, 5)
for i in xrange(n):
u = bluechips.model.User(sample_users[i].lower(), resident=True)
meta.Session.add(u)
meta.Session.commit()
def createExpenditures(n=None):
if n is None:
n = random.randint(5, 20)
users = meta.Session.query(bluechips.model.User).all()
for i in xrange(n):
e = bluechips.model.Expenditure(random.choice(users),
Currency(random.randint(1000, 100000)))
meta.Session.add(e)
e.even_split()
meta.Session.commit()
def deleteUsers():
map(meta.Session.delete, meta.Session.query(bluechips.model.User))
def deleteExpenditures():
map(meta.Session.delete, meta.Session.query(bluechips.model.Expenditure))
|
from unittest import TestCase
from bluechips import model
from bluechips.model.types import Currency
class TestSplit(TestCase):
def setUp(self):
self.u = model.User('chaz', u'Charles Root', False)
self.e = model.Expenditure(self.u, Currency('12.34'),
u'A test expenditure')
self.sp = model.Split(self.e, self.u, Currency('5.55'))
def test_constructor(self):
assert self.sp.expenditure == self.e
assert self.sp.user == self.u
assert self.sp.share == Currency('5.55')
def test_repr(self):
assert (repr(self.sp) == '<Split: expense: %s user: %s share: %s>' %
(self.sp.expenditure, self.sp.user, self.sp.share))
|
from unittest import TestCase
from bluechips import model
class TestUser(TestCase):
def setUp(self):
self.u = model.User('chaz', u'Charles Root', False)
def test_constructor(self):
assert self.u.username == 'chaz'
assert self.u.name == u'Charles Root'
assert self.u.resident == False
def test_repr(self):
assert repr(self.u) == '<User: chaz>'
def test_str(self):
assert str(self.u) == 'Charles Root'
|
from unittest import TestCase
from bluechips.model import types
class TestCurrency(TestCase):
def setUp(self):
self.c = types.Currency('12.34')
def test_currency_float(self):
assert float(self.c) == 1234.
def test_currency_int(self):
val = int(self.c)
assert val == 1234
assert type(val) == int
def test_currency_long(self):
val = long(self.c)
assert val == 1234
assert type(val) == long
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.