code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
# Fill this dict with config data
return {
'hash_name': self.hash_name,
'dim': self.dim,
'projection_count': self.projection_count,
'normals': self.normals,
'tree_root': self.tree_root,
'minimum_result_size': self.minimum_result_size
}
|
def get_config(self)
|
Returns pickle-serializable configuration struct for storage.
| 5.473358 | 4.984433 | 1.09809 |
self.hash_name = config['hash_name']
self.dim = config['dim']
self.projection_count = config['projection_count']
self.normals = config['normals']
self.tree_root = config['tree_root']
self.minimum_result_size = config['minimum_result_size']
|
def apply_config(self, config)
|
Applies config
| 4.075185 | 4.082365 | 0.998241 |
mongo_key = self._format_mongo_key(hash_name, bucket_key)
val_dict = {}
val_dict['lsh'] = mongo_key
# Depending on type (sparse or not) fill value dict
if scipy.sparse.issparse(v):
# Make sure that we are using COO format (easy to handle)
if not scipy.sparse.isspmatrix_coo(v):
v = scipy.sparse.coo_matrix(v)
# Construct list of [index, value] items,
# one for each non-zero element of the sparse vector
encoded_values = []
for k in range(v.data.size):
row_index = v.row[k]
value = v.data[k]
encoded_values.append([int(row_index), value])
val_dict['sparse'] = 1
val_dict['nonzeros'] = encoded_values
val_dict['dim'] = v.shape[0]
else:
# Make sure it is a 1d vector
v = numpy.reshape(v, v.shape[0])
val_dict['vector'] = v.tostring()
val_dict['dtype'] = v.dtype.name
# Add data if set
if data is not None:
val_dict['data'] = data
# Push JSON representation of dict to end of bucket list
self.mongo_object.insert_one(val_dict)
|
def store_vector(self, hash_name, bucket_key, v, data)
|
Stores vector and JSON-serializable data in MongoDB with specified key.
| 3.609668 | 3.480994 | 1.036965 |
lsh_keys = [self._format_mongo_key(hash_name, key)
for key in bucket_keys]
self.mongo_object.remove({'lsh': {'$in': lsh_keys},
'data': data})
|
def delete_vector(self, hash_name, bucket_keys, data)
|
Deletes vector and JSON-serializable data in buckets with specified keys.
| 4.91901 | 5.015842 | 0.980695 |
self.mongo_object.insert_one(
{'hash_conf_name': lshash.hash_name+'_conf',
'hash_configuration': pickle.dumps(lshash.get_config())
}
)
|
def store_hash_configuration(self, lshash)
|
Stores hash configuration
| 5.99978 | 5.682214 | 1.055888 |
conf = self.mongo_object.find_one(
{'hash_conf_name': hash_name + '_conf'}
)
return pickle.loads(conf['hash_configuration']) if conf is not None\
else None
|
def load_hash_configuration(self, hash_name)
|
Loads and returns hash configuration
| 6.044988 | 5.66542 | 1.066997 |
# If vectors is not a numpy matrix, create one
if not isinstance(vectors, numpy.ndarray):
V = numpy.zeros((vectors[0].shape[0], len(vectors)))
for index in range(len(vectors)):
vector = vectors[index]
V[:, index] = vector
return V
return vectors
|
def numpy_array_from_list_or_numpy_array(vectors)
|
Returns numpy array representation of argument.
Argument maybe numpy array (input is returned)
or a list of numpy vectors.
| 3.190521 | 3.281312 | 0.972331 |
if scipy.sparse.issparse(vec): # convert scipy.sparse to standard numpy array
vec = vec.tocsr()
veclen = numpy.sqrt(numpy.sum(vec.data ** 2))
if veclen > 0.0:
return vec / veclen
else:
return vec
if isinstance(vec, numpy.ndarray):
vec = numpy.asarray(vec, dtype=float)
veclen = numpy.linalg.norm(vec)
if veclen > 0.0:
return vec / veclen
else:
return vec
|
def unitvec(vec)
|
Scale a vector to unit length. The only exception is the zero vector, which
is returned back unchanged.
| 2.284688 | 2.257913 | 1.011858 |
# First subtract the mean
M = (A-numpy.mean(A.T, axis=1)).T
# Get eigenvectors and values of covariance matrix
return numpy.linalg.eig(numpy.cov(M))
|
def perform_pca(A)
|
Computes eigenvalues and eigenvectors of covariance matrix of A.
The rows of a correspond to observations, the columns to variables.
| 4.806037 | 4.409708 | 1.089876 |
c = ba.copy()
for i in xrange(len(self.mapping)):
ba[i] = c[self.mapping[i]]
return ba
|
def permute(self, ba)
|
Permute the bitarray ba inplace.
| 4.273559 | 3.732336 | 1.145009 |
pba = ba.copy()
self.permute(pba)
assert(beam_size % 2 == 0)
half_beam = beam_size / 2
# binary search (pba,ba) in bas
idx = bisect_left(bas, (pba, ba))
start = int(max(0, idx - half_beam))
end = int(min(len(bas), idx + half_beam))
res = bas[start:end]
# return the original(unpermuted) keys
res = [x[1] for x in res]
return res
|
def search_revert(self, bas, ba, beam_size)
|
ba: query bitarray
bas: a sorted list of tuples of (permuted bitarray, original bitarray)
return : query bitarray's beam-size neighbours (unpermuted bitarray)
| 4.468675 | 3.879549 | 1.151854 |
# We will store the normalized vector (used during retrieval)
nv = unitvec(v)
# Store vector in each bucket of all hashes
for lshash in self.lshashes:
for bucket_key in lshash.hash_vector(v):
#print 'Storying in bucket %s one vector' % bucket_key
self.storage.store_vector(lshash.hash_name, bucket_key,
nv, data)
|
def store_vector(self, v, data=None)
|
Hashes vector v and stores it in all matching buckets in the storage.
The data argument must be JSON-serializable. It is stored with the
vector and will be returned in search results.
| 9.47507 | 8.755855 | 1.082141 |
# We will store the normalized vector (used during retrieval)
nvs = [unitvec(i) for i in vs]
# Store vector in each bucket of all hashes
for lshash in self.lshashes:
bucket_keys = [lshash.hash_vector(i)[0] for i in vs]
self.storage.store_many_vectors(lshash.hash_name, bucket_keys,
nvs, data)
|
def store_many_vectors(self, vs, data=None)
|
Store a batch of vectors.
Hashes vector vs and stores them in all matching buckets in the storage.
The data argument must be either None or a list of JSON-serializable
object. It is stored with the vector and will be returned in search
results.
| 6.29759 | 5.939366 | 1.060313 |
# Delete data id in each hashes
for lshash in self.lshashes:
if v is None:
keys = self.storage.get_all_bucket_keys(lshash.hash_name)
else:
keys = lshash.hash_vector(v)
self.storage.delete_vector(lshash.hash_name, keys, data)
|
def delete_vector(self, data, v=None)
|
Deletes vector v and his id (data) in all matching buckets in the storage.
The data argument must be JSON-serializable.
| 5.154251 | 4.637418 | 1.111449 |
# Collect candidates from all buckets from all hashes
candidates = self._get_candidates(v)
# print 'Candidate count is %d' % len(candidates)
# Apply fetch vector filters if specified and return filtered list
if fetch_vector_filters:
candidates = self._apply_filter(fetch_vector_filters,
candidates)
# Apply distance implementation if specified
if not distance:
distance = self.distance
candidates = self._append_distances(v, distance, candidates)
# Apply vector filters if specified and return filtered list
if not vector_filters:
vector_filters = self.vector_filters
candidates = self._apply_filter(vector_filters, candidates)
# If there is no vector filter, just return list of candidates
return candidates
|
def neighbours(self, v,
distance=None,
fetch_vector_filters=None,
vector_filters=None)
|
Hashes vector v, collects all candidate vectors from the matching
buckets in storage, applys the (optional) distance function and
finally the (optional) filter function to construct the returned list
of either (vector, data, distance) tuples or (vector, data) tuples.
| 3.560892 | 3.38752 | 1.05118 |
candidates = []
for lshash in self.lshashes:
for bucket_key in lshash.hash_vector(v, querying=True):
bucket_content = self.storage.get_bucket(
lshash.hash_name,
bucket_key,
)
#print 'Bucket %s size %d' % (bucket_key, len(bucket_content))
candidates.extend(bucket_content)
return candidates
|
def _get_candidates(self, v)
|
Collect candidates from all buckets from all hashes
| 4.548556 | 4.065653 | 1.118776 |
if filters:
filter_input = candidates
for fetch_vector_filter in filters:
filter_input = fetch_vector_filter.filter_vectors(filter_input)
return filter_input
else:
return candidates
|
def _apply_filter(self, filters, candidates)
|
Apply vector filters if specified and return filtered list
| 5.15001 | 3.874289 | 1.329279 |
if distance:
# Normalize vector (stored vectors are normalized)
nv = unitvec(v)
candidates = [(x[0], x[1], self.distance.distance(x[0], nv)) for x
in candidates]
return candidates
|
def _append_distances(self, v, distance, candidates)
|
Apply distance implementation if specified
| 10.605128 | 9.059135 | 1.170656 |
if scipy.sparse.issparse(x):
x = x.toarray().ravel()
y = y.toarray().ravel()
return 1.0 - numpy.dot(x, y)
|
def distance(self, x, y)
|
Computes distance measure between vectors x and y. Returns float.
| 3.428064 | 3.135672 | 1.093247 |
# We will fill this array with measures for all the engines.
result = []
# For each engine, first index vectors and then retrieve neighbours
for endine_idx, engine in enumerate(engine_list):
print('Engine %d / %d' % (endine_idx, len(engine_list)))
# Clean storage
engine.clean_all_buckets()
# Use this to compute average recall
avg_recall = 0.0
# Use this to compute average precision
avg_precision = 0.0
# Use this to compute average search time
avg_search_time = 0.0
# Index all vectors and store them
for index, v in enumerate(self.vectors):
engine.store_vector(v, 'data_%d' % index)
# Look for N nearest neighbours for query vectors
for index in self.query_indices:
# Get indices of the real nearest as set
real_nearest = set(self.closest[index])
# We have to time the search
search_time_start = time.time()
# Get nearest N according to engine
nearest = engine.neighbours(self.vectors[index])
# Get search time
search_time = time.time() - search_time_start
# For comparance we need their indices (as set)
nearest = set([self.__index_of_vector(x[0]) for x in nearest])
# Remove query index from search result to make sure that
# recall and precision make sense in terms of "neighbours".
# If ONLY the query vector is retrieved, we want recall to be
# zero!
nearest.remove(index)
# If the result list is empty, recall and precision are 0.0
if len(nearest) == 0:
recall = 0.0
precision = 0.0
else:
# Get intersection count
inter_count = float(len(real_nearest & nearest))
# Normalize recall for this vector
recall = inter_count/float(len(real_nearest))
# Normalize precision for this vector
precision = inter_count/float(len(nearest))
# Add to accumulator
avg_recall += recall
# Add to accumulator
avg_precision += precision
# Add to accumulator
avg_search_time += search_time
# Normalize recall over query set
avg_recall /= float(len(self.query_indices))
# Normalize precision over query set
avg_precision /= float(len(self.query_indices))
# Normalize search time over query set
avg_search_time = avg_search_time / float(len(self.query_indices))
# Normalize search time with respect to exact search
avg_search_time /= self.exact_search_time_per_vector
print(' recall=%f, precision=%f, time=%f' % (avg_recall,
avg_precision,
avg_search_time))
result.append((avg_recall, avg_precision, avg_search_time))
# Return (recall, precision, search_time) tuple
return result
|
def perform_experiment(self, engine_list)
|
Performs nearest neighbour recall experiments with custom vector data
for all engines in the specified list.
Returns self.result contains list of (recall, precision, search_time)
tuple. All are the averaged values over all request vectors.
search_time is the average retrieval/search time compared to the
average exact search time.
| 3.637051 | 3.40652 | 1.067674 |
return numpy.array_str(numpy.round(unitvec(vector), decimals=3))
|
def __vector_to_string(self, vector)
|
Returns string representation of vector.
| 11.811519 | 9.403504 | 1.256076 |
unique_dict = {}
for v in input_list:
unique_dict[v[1]] = v
return list(unique_dict.values())
|
def filter_vectors(self, input_list)
|
Returns subset of specified input list.
| 3.785749 | 3.659706 | 1.034441 |
self.dim = dim
# Reset all child hashes
for child_hash in self.child_hashes:
child_hash['hash'].reset(dim)
child_hash['bucket_keys'] = {}
|
def reset(self, dim)
|
Resets / Initializes the hash for the specified dimension.
| 6.033827 | 5.264774 | 1.146075 |
bucket_keys = []
if querying:
# If we are querying, use the permuted indexes to get bucket keys
for child_hash in self.child_hashes:
lshash = child_hash['hash']
# Make sure the permuted index for this hash is existing
if not lshash.hash_name in self.permutation.permutedIndexs:
raise AttributeError('Permuted index is not existing for hash with name %s' % lshash.hash_name)
# Get regular bucket keys from hash
for bucket_key in lshash.hash_vector(v, querying):
#print 'Regular bucket key %s' % bucket_key
# Get neighbour keys from permuted index
neighbour_keys = self.permutation.get_neighbour_keys(lshash.hash_name,bucket_key)
# Add them to result, but prefix with hash name
for n in neighbour_keys:
bucket_keys.append(lshash.hash_name+'_'+n)
else:
# If we are indexing (storing) just use child hashes without permuted index
for child_hash in self.child_hashes:
lshash = child_hash['hash']
# Get regular bucket keys from hash
for bucket_key in lshash.hash_vector(v, querying):
# Register bucket key in child hash dict
child_hash['bucket_keys'][bucket_key] = bucket_key
# Append bucket key to result prefixed with child hash name
bucket_keys.append(lshash.hash_name+'_'+bucket_key)
# Return all the bucket keys
return bucket_keys
|
def hash_vector(self, v, querying=False)
|
Hashes the vector and returns the bucket key as string.
| 3.841644 | 3.75306 | 1.023603 |
for child_hash in self.child_hashes:
# Get config values for child hash
config = child_hash['config']
num_permutation = config['num_permutation']
beam_size = config['beam_size']
num_neighbour = config['num_neighbour']
# Get used buckets keys for child hash
bucket_keys = child_hash['bucket_keys'].keys()
# Get actual child hash
lshash = child_hash['hash']
# Compute permuted index for this hash
self.permutation.build_permuted_index(lshash,bucket_keys,num_permutation,beam_size,num_neighbour)
|
def build_permuted_index(self)
|
Build PermutedIndex for all your binary hashings.
PermutedIndex would be used to find the neighbour bucket key
in terms of Hamming distance. Permute_configs is nested dict
in the following format:
permuted_config = {"<hash_name>":
{ "num_permutation":50,
"beam_size":10,
"num_neighbour":100 }
}
| 4.514143 | 3.165601 | 1.425999 |
if scipy.sparse.issparse(x):
return numpy.sum(numpy.absolute((x-y).toarray().ravel()))
else:
return numpy.sum(numpy.absolute(x-y))
|
def distance(self, x, y)
|
Computes the Manhattan distance between vectors x and y. Returns float.
| 3.038918 | 2.722173 | 1.116357 |
if scipy.sparse.issparse(x):
return numpy.linalg.norm((x-y).toarray().ravel())
else:
return numpy.linalg.norm(x-y)
|
def distance(self, x, y)
|
Computes distance measure between vectors x and y. Returns float.
| 3.179564 | 2.906702 | 1.093874 |
self._add_vector(hash_name, bucket_key, v, data, self.redis_object)
|
def store_vector(self, hash_name, bucket_key, v, data)
|
Stores vector and JSON-serializable data in bucket with specified key.
| 5.77138 | 5.912184 | 0.976184 |
with self.redis_object.pipeline() as pipeline:
if data is None:
data = [None] * len(vs)
for bucket_key, data, v in zip(bucket_keys, data, vs):
self._add_vector(hash_name, bucket_key, v, data, pipeline)
pipeline.execute()
|
def store_many_vectors(self, hash_name, bucket_keys, vs, data)
|
Store a batch of vectors in Redis.
Stores vector and JSON-serializable data in bucket with specified key.
| 2.77481 | 2.727688 | 1.017276 |
'''
Store vector and JSON-serializable data in bucket with specified key.
'''
redis_key = self._format_redis_key(hash_name, bucket_key)
val_dict = {}
# Depending on type (sparse or not) fill value dict
if scipy.sparse.issparse(v):
# Make sure that we are using COO format (easy to handle)
if not scipy.sparse.isspmatrix_coo(v):
v = scipy.sparse.coo_matrix(v)
# Construct list of [index, value] items,
# one for each non-zero element of the sparse vector
encoded_values = []
for k in range(v.data.size):
row_index = v.row[k]
value = v.data[k]
encoded_values.append([int(row_index), value])
val_dict['sparse'] = 1
val_dict['nonzeros'] = encoded_values
val_dict['dim'] = v.shape[0]
else:
# Make sure it is a 1d vector
v = numpy.reshape(v, v.shape[0])
val_dict['vector'] = v.tostring()
val_dict['dtype'] = v.dtype.name
# Add data if set
if data is not None:
val_dict['data'] = data
# Push JSON representation of dict to end of bucket list
self.redis_object.rpush(redis_key, pickle.dumps(val_dict, protocol=2))
|
def _add_vector(self, hash_name, bucket_key, v, data, redis_object)
|
Store vector and JSON-serializable data in bucket with specified key.
| 3.551115 | 3.187419 | 1.114103 |
with self.redis_object.pipeline() as pipeline:
for key in bucket_keys:
redis_key = self._format_redis_key(hash_name, key)
rows = [(row, pickle.loads(row).get('data'))
for row in self._get_bucket_rows(hash_name, key)]
for _, id_data in rows:
if id_data == data:
break
else:
# Deleted data is not present in this bucket
continue
pipeline.delete(redis_key)
pipeline.rpush(redis_key, *(row for row, id_data in rows
if id_data != data))
pipeline.execute()
|
def delete_vector(self, hash_name, bucket_keys, data)
|
Deletes vector and JSON-serializable data in buckets with specified keys.
| 3.643746 | 3.674469 | 0.991639 |
results = []
for row in self._get_bucket_rows(hash_name, bucket_key):
val_dict = pickle.loads(row)
# Depending on type (sparse or not) reconstruct vector
if 'sparse' in val_dict:
# Fill these for COO creation
row = []
col = []
data = []
# For each non-zero element, append values
for e in val_dict['nonzeros']:
row.append(e[0]) # Row index
data.append(e[1]) # Value
col.append(0) # Column index (always 0)
# Create numpy arrays for COO creation
coo_row = numpy.array(row, dtype=numpy.int32)
coo_col = numpy.array(col, dtype=numpy.int32)
coo_data = numpy.array(data)
# Create COO sparse vector
vector = scipy.sparse.coo_matrix((coo_data, (coo_row, coo_col)), shape=(val_dict['dim'], 1))
else:
vector = numpy.fromstring(val_dict['vector'],
dtype=val_dict['dtype'])
# Add data to result tuple, if present
results.append((vector, val_dict.get('data')))
return results
|
def get_bucket(self, hash_name, bucket_key)
|
Returns bucket content as list of tuples (vector, data).
| 3.167316 | 2.97832 | 1.063457 |
bucket_keys = self._iter_bucket_keys(hash_name)
self.redis_object.delete(*bucket_keys)
|
def clean_buckets(self, hash_name)
|
Removes all buckets and their content for specified hash.
| 5.657625 | 4.925003 | 1.148756 |
bucket_keys = self.redis_object.keys(pattern='nearpy_*')
if len(bucket_keys) > 0:
self.redis_object.delete(*bucket_keys)
|
def clean_all_buckets(self)
|
Removes all buckets from all hashes and their content.
| 5.311347 | 4.835534 | 1.098399 |
self.redis_object.set(lshash.hash_name+'_conf', pickle.dumps(lshash.get_config()))
|
def store_hash_configuration(self, lshash)
|
Stores hash configuration
| 6.086175 | 5.469382 | 1.112772 |
conf = self.redis_object.get(hash_name+'_conf')
return pickle.loads(conf) if conf is not None else None
|
def load_hash_configuration(self, hash_name)
|
Loads and returns hash configuration
| 6.038921 | 5.42087 | 1.114013 |
bucket_keys = []
if querying:
# If we are querying, use the bucket key map
for lshash in self.child_hashes:
# Get regular bucket keys from hash
for bucket_key in lshash.hash_vector(v, querying):
prefixed_key = lshash.hash_name+'_'+bucket_key
# Get entries from map (bucket keys with hamming distance of 1)
if prefixed_key in self.bucket_key_map:
bucket_keys.extend(self.bucket_key_map[prefixed_key].keys())
else:
# If we are indexing (storing) just use child hashes without permuted index
for lshash in self.child_hashes:
# Get regular bucket keys from hash
for bucket_key in lshash.hash_vector(v, querying):
# Get permuted keys
perm_keys = self.permuted_keys(bucket_key)
# Put extact hit key into list
perm_keys.append(bucket_key)
# Append key for storage (not the permutations)
bucket_keys.append(lshash.hash_name+'_'+bucket_key)
# For every permutation register all the variants
for perm_key in perm_keys:
prefixed_key = lshash.hash_name+'_'+perm_key
# Make sure dictionary exists
if not prefixed_key in self.bucket_key_map:
self.bucket_key_map[prefixed_key] = {}
for variant in perm_keys:
prefixed_variant = lshash.hash_name+'_'+variant
self.bucket_key_map[prefixed_key][prefixed_variant] = 1
# Return all the bucket keys
return bucket_keys
|
def hash_vector(self, v, querying=False)
|
Hashes the vector and returns the bucket key as string.
| 3.694949 | 3.624125 | 1.019542 |
self.hash_name = config['hash_name']
self.dim = config['dim']
self.bucket_key_map = config['bucket_key_map']
|
def apply_config(self, config)
|
Applies config
| 4.958089 | 5.133315 | 0.965865 |
# Hash must generate binary keys
if not (isinstance(child_hash,PCABinaryProjections) or isinstance(child_hash,RandomBinaryProjections) or isinstance(child_hash,RandomBinaryProjectionTree)):
raise ValueError('Child hashes must generate binary keys')
# Add both hash and config to array of child hashes. Also we are going to
# accumulate used bucket keys for every hash in order to build the permuted index
self.child_hashes.append(child_hash)
|
def add_child_hash(self, child_hash)
|
Adds specified child hash.
The hash must be one of the binary types.
| 12.130672 | 12.457427 | 0.97377 |
# convert query_key into bitarray
query_key = bitarray(bucket_key)
topk = set()
for i in xrange(len(self.permutes)):
p = self.permutes[i]
plist = self.permuted_lists[i]
candidates = p.search_revert(plist, query_key, self.beam_size)
topk = topk.union(set(candidates))
topk = list(topk)
# sort the topk neighbour keys according to the Hamming distance to qurey key
topk = sorted(topk, key=lambda x: self.hamming_distance(x, query_key))
# return the top k items
topk_bin = [x.to01() for x in topk[:k]]
return topk_bin
|
def get_neighbour_keys(self, bucket_key, k)
|
The computing complexity is O( np*beam*log(np*beam) )
where,
np = number of permutations
beam = self.beam_size
Make sure np*beam is much less than the number of bucket keys,
otherwise we could use brute-force to get the neighbours
| 4.526962 | 4.003171 | 1.130844 |
try:
# Return filtered (vector, data, distance )tuple list. Will fail
# if input is list of (vector, data) tuples.
return [x for x in input_list if x[2] < self.distance_threshold]
except:
# Otherwise just return input list
return input_list
|
def filter_vectors(self, input_list)
|
Returns subset of specified input list.
| 6.897334 | 6.469519 | 1.066128 |
# We will fill this array with measures for all the engines.
result = []
# For each engine, first index vectors and then retrieve neighbours
for engine in engine_list:
print('Engine %d / %d' % (engine_list.index(engine),
len(engine_list)))
# Clean storage
engine.clean_all_buckets()
# Use this to compute average distance_ratio
avg_distance_ratio = 0.0
# Use this to compute average result set size
avg_result_size = 0.0
# Use this to compute average search time
avg_search_time = 0.0
# Index all vectors and store them
for index in range(self.vectors.shape[1]):
engine.store_vector(self.vectors[:, index],
'data_%d' % index)
# Look for N nearest neighbours for query vectors
for index in self.query_indices:
# We have to time the search
search_time_start = time.time()
# Get nearest N according to engine
nearest = engine.neighbours(self.vectors[:, index])
# Get search time
search_time = time.time() - search_time_start
# Get average distance ratio (with respect to radius
# of real N closest neighbours)
distance_ratio = 0.0
for n in nearest:
# If the vector is outside the real neighbour radius
if n[2] > self.nearest_radius[index]:
# Compute distance to real neighbour radius
d = (n[2] - self.nearest_radius[index])
# And normalize it. 1.0 means: distance to
# real neighbour radius is identical to radius
d /= self.nearest_radius[index]
# If all neighbours are in the radius, the
# distance ratio is 0.0
distance_ratio += d
# Normalize distance ratio over all neighbours
distance_ratio /= len(nearest)
# Add to accumulator
avg_distance_ratio += distance_ratio
# Add to accumulator
avg_result_size += len(nearest)
# Add to accumulator
avg_search_time += search_time
# Normalize distance ratio over query set
avg_distance_ratio /= float(len(self.query_indices))
# Normalize avg result size
avg_result_size /= float(len(self.query_indices))
# Normalize search time over query set
avg_search_time = avg_search_time / float(len(self.query_indices))
# Normalize search time with respect to exact search
avg_search_time /= self.exact_search_time_per_vector
print(' distance_ratio=%f, result_size=%f, time=%f' % (avg_distance_ratio,
avg_result_size,
avg_search_time))
result.append((avg_distance_ratio, avg_result_size, avg_search_time))
return result
|
def perform_experiment(self, engine_list)
|
Performs nearest neighbour experiments with custom vector data
for all engines in the specified list.
Returns self.result contains list of (distance_ratio, search_time)
tuple. All are the averaged values over all request vectors.
search_time is the average retrieval/search time compared to the
average exact search time.
| 3.395542 | 3.206398 | 1.05899 |
if self.dim != dim:
self.dim = dim
self.normals = self.rand.randn(self.projection_count, dim)
|
def reset(self, dim)
|
Resets / Initializes the hash for the specified dimension.
| 6.743516 | 6.618704 | 1.018857 |
if scipy.sparse.issparse(v):
# If vector is sparse, make sure we have the CSR representation
# of the projection matrix
if self.normals_csr == None:
self.normals_csr = scipy.sparse.csr_matrix(self.normals)
# Make sure that we are using CSR format for multiplication
if not scipy.sparse.isspmatrix_csr(v):
v = scipy.sparse.csr_matrix(v)
# Project vector onto all hyperplane normals
projection = self.normals_csr.dot(v)
else:
# Project vector onto all hyperplane normals
projection = numpy.dot(self.normals, v)
# Return binary key
return [''.join(['1' if x > 0.0 else '0' for x in projection])]
|
def hash_vector(self, v, querying=False)
|
Hashes the vector and returns the binary bucket key as string.
| 3.632188 | 3.365052 | 1.079385 |
try:
# Return filtered (vector, data, distance )tuple list. Will fail
# if input is list of (vector, data) tuples.
sorted_list = sorted(input_list, key=lambda x: x[2])
return sorted_list[:self.N]
except:
# Otherwise just return input list
return input_list
|
def filter_vectors(self, input_list)
|
Returns subset of specified input list.
| 6.309251 | 5.994796 | 1.052455 |
if scipy.sparse.issparse(v):
# If vector is sparse, make sure we have the CSR representation
# of the projection matrix
if self.components_csr == None:
self.components_csr = scipy.sparse.csr_matrix(self.components)
# Make sure that we are using CSR format for multiplication
if not scipy.sparse.isspmatrix_csr(v):
v = scipy.sparse.csr_matrix(v)
# Project vector onto all hyperplane normals
projection = (self.components_csr.dot(v) / self.bin_width).floor().toarray()
else:
# Project vector onto components
projection = numpy.dot(self.components, v)
projection = numpy.floor(projection / self.bin_width)
# Return key
return ['_'.join([str(int(x)) for x in projection])]
|
def hash_vector(self, v, querying=False)
|
Hashes the vector and returns the binary bucket key as string.
| 4.194028 | 3.929011 | 1.067452 |
# Fill this dict with config data
return {
'hash_name': self.hash_name,
'dim': self.dim,
'bin_width': self.bin_width,
'projection_count': self.projection_count,
'components': self.components
}
|
def get_config(self)
|
Returns pickle-serializable configuration struct for storage.
| 5.551315 | 4.925254 | 1.127113 |
self.hash_name = config['hash_name']
self.dim = config['dim']
self.bin_width = config['bin_width']
self.projection_count = config['projection_count']
self.components = config['components']
|
def apply_config(self, config)
|
Applies config
| 3.933044 | 3.965411 | 0.991838 |
# Fill this dict with config data
return {
'hash_name': self.hash_name,
'dim': self.dim,
'bin_width': self.bin_width,
'projection_count': self.projection_count,
'normals': self.normals
}
|
def get_config(self)
|
Returns pickle-serializable configuration struct for storage.
| 5.763402 | 5.125233 | 1.124515 |
self.hash_name = config['hash_name']
self.dim = config['dim']
self.bin_width = config['bin_width']
self.projection_count = config['projection_count']
self.normals = config['normals']
|
def apply_config(self, config)
|
Applies config
| 4.118427 | 4.154921 | 0.991217 |
# Init a PermutedIndex
pi = PermutedIndex(
lshash,
buckets,
num_permutation,
beam_size,
num_neighbour)
# get hash_name
hash_name = lshash.hash_name
self.permutedIndexs[hash_name] = pi
|
def build_permuted_index(
self,
lshash,
buckets,
num_permutation,
beam_size,
num_neighbour)
|
Build a permutedIndex and store it into the dict self.permutedIndexs.
lshash: the binary lshash object (nearpy.hashes.lshash).
buckets: the buckets object corresponding to lshash. It's a dict object
which can get from nearpy.storage.buckets[lshash.hash_name]
num_permutation: the number of sorted randomly-permuted bucket key lists (SRPBKL).
beam_size: beam size, details please refer to __init__() in nearpy.hashes.permutation.PermutedIndex
num_neighbour: the number of neighbour bucket keys needed to return in self.get_neighbour_keys().
| 3.268638 | 2.487446 | 1.314054 |
# get the permutedIndex given hash_name
permutedIndex = self.permutedIndexs[hash_name]
# return neighbour bucket keys of query bucket key
return permutedIndex.get_neighbour_keys(
bucket_key,
permutedIndex.num_neighbour)
|
def get_neighbour_keys(self, hash_name, bucket_key)
|
Return the neighbour buckets given hash_name and query bucket key.
| 7.742778 | 5.945529 | 1.302286 |
assert isinstance(editor, Editor)
assert isinstance(rc_file, six.string_types)
# Expand tildes.
rc_file = os.path.expanduser(rc_file)
# Check whether this file exists.
if not os.path.exists(rc_file):
print('Impossible to read %r' % rc_file)
_press_enter_to_continue()
return
# Run the rc file in an empty namespace.
try:
namespace = {}
with open(rc_file, 'r') as f:
code = compile(f.read(), rc_file, 'exec')
six.exec_(code, namespace, namespace)
# Now we should have a 'configure' method in this namespace. We call this
# method with editor as an argument.
if 'configure' in namespace:
namespace['configure'](editor)
except Exception as e:
# Handle possible exceptions in rc file.
traceback.print_exc()
_press_enter_to_continue()
|
def run_rc_file(editor, rc_file)
|
Run rc file.
| 3.013687 | 2.944743 | 1.023413 |
if character.encode(encoding, 'replace') == b'?':
return backup
else:
return character
|
def _try_char(character, backup, encoding=sys.stdout.encoding)
|
Return `character` if it can be encoded using sys.stdout, else return the
backup character.
| 4.857211 | 4.501878 | 1.07893 |
@Condition
def overlay_is_visible():
app = get_app()
text = editor.command_buffer.text.lstrip()
return app.layout.has_focus(editor.command_buffer) and (
any(text.startswith(p) for p in ['b ', 'b! ', 'buffer', 'buffer!']))
return overlay_is_visible
|
def _bufferlist_overlay_visible(editor)
|
True when the buffer list overlay should be displayed.
(This is when someone starts typing ':b' or ':buffer' in the command line.)
| 7.24599 | 6.01845 | 1.203963 |
# Start with an empty frames list everytime, to avoid memory leaks.
existing_frames = self._frames
self._frames = {}
def create_layout_from_node(node):
if isinstance(node, window_arrangement.Window):
# Create frame for Window, or reuse it, if we had one already.
key = (node, node.editor_buffer)
frame = existing_frames.get(key)
if frame is None:
frame, pt_window = self._create_window_frame(node.editor_buffer)
# Link layout Window to arrangement.
node.pt_window = pt_window
self._frames[key] = frame
return frame
elif isinstance(node, window_arrangement.VSplit):
return VSplit(
[create_layout_from_node(n) for n in node],
padding=1,
padding_char=self.get_vertical_border_char(),
padding_style='class:frameborder')
if isinstance(node, window_arrangement.HSplit):
return HSplit([create_layout_from_node(n) for n in node])
layout = create_layout_from_node(self.window_arrangement.active_tab.root)
self._fc.content = layout
|
def update(self)
|
Update layout to match the layout as described in the
WindowArrangement.
| 5.147858 | 4.825688 | 1.066762 |
@Condition
def wrap_lines():
return self.editor.wrap_lines
window = Window(
self._create_buffer_control(editor_buffer),
allow_scroll_beyond_bottom=True,
scroll_offsets=ScrollOffsets(
left=0, right=0,
top=(lambda: self.editor.scroll_offset),
bottom=(lambda: self.editor.scroll_offset)),
wrap_lines=wrap_lines,
left_margins=[ConditionalMargin(
margin=NumberedMargin(
display_tildes=True,
relative=Condition(lambda: self.editor.relative_number)),
filter=Condition(lambda: self.editor.show_line_numbers))],
cursorline=Condition(lambda: self.editor.cursorline),
cursorcolumn=Condition(lambda: self.editor.cursorcolumn),
colorcolumns=(
lambda: [ColorColumn(pos) for pos in self.editor.colorcolumn]),
ignore_content_width=True,
ignore_content_height=True,
get_line_prefix=partial(self._get_line_prefix, editor_buffer.buffer))
return HSplit([
window,
VSplit([
WindowStatusBar(self.editor, editor_buffer),
WindowStatusBarRuler(self.editor, window, editor_buffer.buffer),
], width=Dimension()), # Ignore actual status bar width.
]), window
|
def _create_window_frame(self, editor_buffer)
|
Create a Window for the buffer, with underneat a status bar.
| 4.71348 | 4.754698 | 0.991331 |
@Condition
def preview_search():
return self.editor.incsearch
input_processors = [
# Processor for visualising spaces. (should come before the
# selection processor, otherwise, we won't see these spaces
# selected.)
ConditionalProcessor(
ShowTrailingWhiteSpaceProcessor(),
Condition(lambda: self.editor.display_unprintable_characters)),
# Replace tabs by spaces.
TabsProcessor(
tabstop=(lambda: self.editor.tabstop),
char1=(lambda: '|' if self.editor.display_unprintable_characters else ' '),
char2=(lambda: _try_char('\u2508', '.', get_app().output.encoding())
if self.editor.display_unprintable_characters else ' '),
),
# Reporting of errors, for Pyflakes.
ReportingProcessor(editor_buffer),
HighlightSelectionProcessor(),
ConditionalProcessor(
HighlightSearchProcessor(),
Condition(lambda: self.editor.highlight_search)),
ConditionalProcessor(
HighlightIncrementalSearchProcessor(),
Condition(lambda: self.editor.highlight_search) & preview_search),
HighlightMatchingBracketProcessor(),
DisplayMultipleCursors(),
]
return BufferControl(
lexer=DocumentLexer(editor_buffer),
include_default_input_processors=False,
input_processors=input_processors,
buffer=editor_buffer.buffer,
preview_search=preview_search,
search_buffer_control=self.search_control,
focus_on_click=True)
|
def _create_buffer_control(self, editor_buffer)
|
Create a new BufferControl for a given location.
| 6.037454 | 5.971081 | 1.011116 |
# Match with grammar and extract variables.
m = COMMAND_GRAMMAR.match(input_string)
if m is None:
return
variables = m.variables()
command = variables.get('command')
go_to_line = variables.get('go_to_line')
shell_command = variables.get('shell_command')
# Call command handler.
if go_to_line is not None:
# Handle go-to-line.
_go_to_line(editor, go_to_line)
elif shell_command is not None:
# Handle shell commands.
editor.application.run_system_command(shell_command)
elif has_command_handler(command):
# Handle other 'normal' commands.
call_command_handler(command, editor, variables)
else:
# For unknown commands, show error message.
editor.show_message('Not an editor command: %s' % input_string)
return
# After execution of commands, make sure to update the layout and focus
# stack.
editor.sync_with_prompt_toolkit()
|
def handle_command(editor, input_string)
|
Handle commands entered on the Vi command line.
| 3.991045 | 3.87886 | 1.028922 |
b = editor.application.current_buffer
b.cursor_position = b.document.translate_row_col_to_index(max(0, int(line) - 1), 0)
|
def _go_to_line(editor, line)
|
Move cursor to this line in the current buffer.
| 3.605351 | 3.530185 | 1.021292 |
assert isinstance(location, six.string_types)
if location.endswith('.py'):
return report_pyflakes(document)
else:
return []
|
def report(location, document)
|
Run reporter on document and return list of ReporterError instances.
(Depending on the location it will or won't run anything.)
Returns a list of `ReporterError`.
| 5.557182 | 6.052848 | 0.91811 |
# Run pyflakes on input.
reporter = _FlakesReporter()
pyflakes.api.check(document.text, '', reporter=reporter)
def format_flake_message(message):
return [
('class:flakemessage.prefix', 'pyflakes:'),
('', ' '),
('class:flakemessage', message.message % message.message_args)
]
def message_to_reporter_error(message):
start_index = document.translate_row_col_to_index(message.lineno - 1, message.col)
end_index = start_index
while end_index < len(document.text) and document.text[end_index] in WORD_CHARACTERS:
end_index += 1
return ReporterError(lineno=message.lineno - 1,
start_column=message.col,
end_column=message.col + end_index - start_index,
formatted_text=format_flake_message(message))
# Construct list of ReporterError instances.
return [message_to_reporter_error(m) for m in reporter.messages]
|
def report_pyflakes(document)
|
Run pyflakes on document and return list of ReporterError instances.
| 3.532773 | 3.282362 | 1.07629 |
assert in_tab_pages + hsplit + vsplit <= 1 # Max one of these options.
# When no files were given, open at least one empty buffer.
locations2 = locations or [None]
# First file
self.window_arrangement.open_buffer(locations2[0])
for f in locations2[1:]:
if in_tab_pages:
self.window_arrangement.create_tab(f)
elif hsplit:
self.window_arrangement.hsplit(location=f)
elif vsplit:
self.window_arrangement.vsplit(location=f)
else:
self.window_arrangement.open_buffer(f)
self.window_arrangement.active_tab_index = 0
if locations and len(locations) > 1:
self.show_message('%i files loaded.' % len(locations))
|
def load_initial_files(self, locations, in_tab_pages=False, hsplit=False, vsplit=False)
|
Load a list of files.
| 3.458459 | 3.377812 | 1.023875 |
# Create Application.
application = Application(
input=self.input,
output=self.output,
editing_mode=EditingMode.VI,
layout=self.editor_layout.layout,
key_bindings=self.key_bindings,
# get_title=lambda: get_terminal_title(self),
style=DynamicStyle(lambda: self.current_style),
paste_mode=Condition(lambda: self.paste_mode),
# ignore_case=Condition(lambda: self.ignore_case), # TODO
include_default_pygments_style=False,
mouse_support=Condition(lambda: self.enable_mouse_support),
full_screen=True,
enable_page_navigation_bindings=True)
# Handle command line previews.
# (e.g. when typing ':colorscheme blue', it should already show the
# preview before pressing enter.)
def preview(_):
if self.application.layout.has_focus(self.command_buffer):
self.previewer.preview(self.command_buffer.text)
self.command_buffer.on_text_changed += preview
return application
|
def _create_application(self)
|
Create CommandLineInterface instance.
| 4.983061 | 4.609399 | 1.081065 |
current_buffer = self.application.current_buffer
# Find/return the EditorBuffer with this name.
for b in self.window_arrangement.editor_buffers:
if b.buffer == current_buffer:
return b
|
def current_editor_buffer(self)
|
Return the `EditorBuffer` that is currently active.
| 6.660748 | 5.755131 | 1.157358 |
try:
self.current_style = get_editor_style_by_name(name)
except pygments.util.ClassNotFound:
pass
|
def use_colorscheme(self, name='default')
|
Apply new colorscheme. (By name.)
| 5.462061 | 5.788658 | 0.94358 |
# After executing a command, make sure that the layout of
# prompt-toolkit matches our WindowArrangement.
self.editor_layout.update()
# Make sure that the focus stack of prompt-toolkit has the current
# page.
window = self.window_arrangement.active_pt_window
if window:
self.application.layout.focus(window)
|
def sync_with_prompt_toolkit(self)
|
Update the prompt-toolkit Layout and FocusStack.
| 10.708269 | 8.058528 | 1.328812 |
# Make sure everything is in sync, before starting.
self.sync_with_prompt_toolkit()
def pre_run():
# Start in navigation mode.
self.application.vi_state.input_mode = InputMode.NAVIGATION
# Run eventloop of prompt_toolkit.
self.application.run(pre_run=pre_run)
|
def run(self)
|
Run the event loop for the interface.
This starts the interaction.
| 7.867511 | 7.109513 | 1.106617 |
self.application.layout.focus(self.command_buffer)
self.application.vi_state.input_mode = InputMode.INSERT
self.previewer.save()
|
def enter_command_mode(self)
|
Go into command mode.
| 9.96522 | 9.641544 | 1.033571 |
self.previewer.restore()
self.application.layout.focus_last()
self.application.vi_state.input_mode = InputMode.NAVIGATION
self.command_buffer.reset(append_to_history=append_to_history)
|
def leave_command_mode(self, append_to_history=False)
|
Leave command mode. Focus document window again.
| 6.189885 | 5.841394 | 1.059659 |
if name == 'vim':
vim_style = Style.from_dict(default_vim_style)
else:
vim_style = style_from_pygments_cls(get_style_by_name(name))
return merge_styles([
vim_style,
Style.from_dict(style_extensions),
])
|
def get_editor_style_by_name(name)
|
Get Style class.
This raises `pygments.util.ClassNotFound` when there is no style with this
name.
| 3.574379 | 4.014341 | 0.890403 |
location = self.editor_buffer.location
if location:
if self.editor_buffer.in_file_explorer_mode:
return PygmentsLexer(DirectoryListingLexer, sync_from_start=False).lex_document(document)
return PygmentsLexer.from_filename(location, sync_from_start=False).lex_document(document)
return SimpleLexer().lex_document(document)
|
def lex_document(self, document)
|
Call the lexer and return a get_tokens_for_line function.
| 6.660595 | 5.857017 | 1.137199 |
assert isinstance(data, six.binary_type)
for e in ENCODINGS:
try:
return data.decode(e), e
except UnicodeDecodeError:
pass
return data.decode('utf-8', 'ignore')
|
def _auto_decode(data)
|
Decode bytes. Return a (text, encoding) tuple.
| 3.10901 | 2.772999 | 1.121173 |
location = os.path.expanduser(location)
# Try to open this file, using different encodings.
for e in ENCODINGS:
try:
with codecs.open(location, 'r', e) as f:
return f.read(), e
except UnicodeDecodeError:
pass # Try next codec.
# Unable to open.
raise Exception('Unable to open file: %r' % location)
|
def read(self, location)
|
Read file from disk.
| 3.654578 | 3.431651 | 1.064962 |
location = os.path.expanduser(location)
with codecs.open(location, 'w', encoding) as f:
f.write(text)
|
def write(self, location, text, encoding)
|
Write file to disk.
| 2.498857 | 2.472361 | 1.010717 |
location = os.path.expanduser(location)
with gzip.open(location, 'wb') as f:
f.write(text.encode(encoding))
|
def write(self, location, text, encoding)
|
Write file to disk.
| 2.821256 | 2.746268 | 1.027306 |
for io in self.editor.io_backends:
if io.can_open_location(location):
# Found an I/O backend.
exists = io.exists(location)
self.isdir = io.isdir(location)
if exists in (True, NotImplemented):
# File could exist. Read it.
self.is_new = False
try:
text, self.encoding = io.read(location)
# Replace \r\n by \n.
text = text.replace('\r\n', '\n')
# Drop trailing newline while editing.
# (prompt-toolkit doesn't enforce the trailing newline.)
if text.endswith('\n'):
text = text[:-1]
except Exception as e:
self.editor.show_message('Cannot read %r: %r' % (location, e))
return ''
else:
return text
else:
# File doesn't exist.
self.is_new = True
return ''
self.editor.show_message('Cannot read: %r' % location)
return ''
|
def _read(self, location)
|
Read file I/O backend.
| 3.973068 | 3.717978 | 1.06861 |
text = self._read(self.location)
cursor_position = min(self.buffer.cursor_position, len(text))
self.buffer.document = Document(text, cursor_position)
self._file_content = text
|
def reload(self)
|
Reload file again from storage.
| 5.758167 | 5.186309 | 1.110263 |
# Take location and expand tilde.
if location is not None:
self.location = location
assert self.location
# Find I/O backend that handles this location.
for io in self.editor.io_backends:
if io.can_open_location(self.location):
break
else:
self.editor.show_message('Unknown location: %r' % location)
# Write it.
try:
io.write(self.location, self.buffer.text + '\n', self.encoding)
self.is_new = False
except Exception as e:
# E.g. "No such file or directory."
self.editor.show_message('%s' % e)
else:
# When the save succeeds: update: _file_content.
self._file_content = self.buffer.text
|
def write(self, location=None)
|
Write file to I/O backend.
| 4.900016 | 4.475902 | 1.094755 |
if self.location is None:
return '[New file]'
elif short:
return os.path.basename(self.location)
else:
return self.location
|
def get_display_name(self, short=False)
|
Return name as displayed.
| 4.55729 | 4.103029 | 1.110714 |
" Buffer text changed. "
if not self._reporter_is_running:
self._reporter_is_running = True
text = self.buffer.text
self.report_errors = []
# Don't run reporter when we don't have a location. (We need to
# know the filetype, actually.)
if self.location is None:
return
# Better not to access the document in an executor.
document = self.buffer.document
def in_executor():
# Call reporter
report_errors = report(self.location, document)
def ready():
self._reporter_is_running = False
# If the text has not been changed yet in the meantime, set
# reporter errors. (We were running in another thread.)
if text == self.buffer.text:
self.report_errors = report_errors
get_app().invalidate()
else:
# Restart reporter when the text was changed.
self.run_reporter()
call_from_executor(ready)
run_in_executor(in_executor)
|
def run_reporter(self)
|
Buffer text changed.
| 6.195288 | 5.546898 | 1.116892 |
e = self.editor
self._style = e.current_style
self._show_line_numbers = e.show_line_numbers
self._highlight_search = e.highlight_search
self._show_ruler = e.show_ruler
self._relative_number = e.relative_number
self._cursorcolumn = e.cursorcolumn
self._cursorline = e.cursorline
self._colorcolumn = e.colorcolumn
|
def save(self)
|
Back up current editor state.
| 3.712824 | 3.294992 | 1.126808 |
e = self.editor
# Parse command.
m = COMMAND_GRAMMAR.match(input_string)
if m is None:
return
variables = m.variables()
command = variables.get('command')
set_option = variables.get('set_option')
# Preview colorschemes.
if command == 'colorscheme':
colorscheme = variables.get('colorscheme')
if colorscheme:
e.use_colorscheme(colorscheme)
# Preview some set commands.
if command == 'set':
if set_option in ('hlsearch', 'hls'):
e.highlight_search = True
elif set_option in ('nohlsearch', 'nohls'):
e.highlight_search = False
elif set_option in ('nu', 'number'):
e.show_line_numbers = True
elif set_option in ('nonu', 'nonumber'):
e.show_line_numbers = False
elif set_option in ('ruler', 'ru'):
e.show_ruler = True
elif set_option in ('noruler', 'noru'):
e.show_ruler = False
elif set_option in ('relativenumber', 'rnu'):
e.relative_number = True
elif set_option in ('norelativenumber', 'nornu'):
e.relative_number = False
elif set_option in ('cursorline', 'cul'):
e.cursorline = True
elif set_option in ('cursorcolumn', 'cuc'):
e.cursorcolumn = True
elif set_option in ('nocursorline', 'nocul'):
e.cursorline = False
elif set_option in ('nocursorcolumn', 'nocuc'):
e.cursorcolumn = False
elif set_option in ('colorcolumn', 'cc'):
value = variables.get('set_value', '')
if value:
e.colorcolumn = [
int(v) for v in value.split(',') if v.isdigit()]
|
def _apply(self, input_string)
|
Apply command.
| 2.554737 | 2.4914 | 1.025422 |
COMMANDS_TAKING_LOCATIONS.add(name)
def decorator(func):
@_cmd(name)
def command_wrapper(editor, variables):
location = variables.get('location')
force = bool(variables['force'])
if force and not accepts_force:
editor.show_message('No ! allowed')
elif accepts_force:
func(editor, location, force=force)
else:
func(editor, location)
return func
return decorator
|
def location_cmd(name, accepts_force=False)
|
Decorator that registers a command that takes a location as (optional)
parameter.
| 5.397895 | 5.280519 | 1.022228 |
def decorator(func):
@_cmd(name)
def command_wrapper(editor, variables):
force = bool(variables['force'])
if force and not accepts_force:
editor.show_message('No ! allowed')
elif accepts_force:
func(editor, force=force)
else:
func(editor)
return func
return decorator
|
def cmd(name, accepts_force=False)
|
Decarator that registers a command that doesn't take any parameters.
| 5.486564 | 5.572062 | 0.984656 |
def decorator(func):
SET_COMMANDS[name] = func
if accepts_value:
SET_COMMANDS_TAKING_VALUE.add(name)
return func
return decorator
|
def set_cmd(name, accepts_value=False)
|
Docorator that registers a ':set'-command.
| 3.611934 | 3.55584 | 1.015775 |
option = variables.get('set_option')
value = variables.get('set_value')
if option in SET_COMMANDS:
# Call the correct handler.
if option in SET_COMMANDS_TAKING_VALUE:
SET_COMMANDS[option](editor, value)
else:
SET_COMMANDS[option](editor)
else:
editor.show_message('Unknown option: %s' % option)
|
def set_command_execute(editor, variables)
|
Execute a ':set' command.
| 3.302206 | 3.116886 | 1.059457 |
eb = editor.window_arrangement.active_editor_buffer
if not force and eb.has_unsaved_changes:
editor.show_message(_NO_WRITE_SINCE_LAST_CHANGE_TEXT)
else:
editor.window_arrangement.go_to_next_buffer()
|
def _bn(editor, force=False)
|
Go to next buffer.
| 6.376222 | 5.087568 | 1.253295 |
eb = editor.window_arrangement.active_editor_buffer
if not force and eb.has_unsaved_changes:
editor.show_message(_NO_WRITE_SINCE_LAST_CHANGE_TEXT)
else:
editor.window_arrangement.go_to_previous_buffer()
|
def _bp(editor, force=False)
|
Go to previous buffer.
| 6.527079 | 4.945643 | 1.319763 |
def handler():
wa = editor.window_arrangement
for info in wa.list_open_buffers():
char = '%' if info.is_active else ''
eb = info.editor_buffer
print(' %3i %-2s %-20s line %i' % (
info.index, char, eb.location, (eb.buffer.document.cursor_position_row + 1)))
six.moves.input('\nPress ENTER to continue...')
run_in_terminal(handler)
|
def buffer_list(editor)
|
List all buffers.
| 8.67805 | 8.562952 | 1.013441 |
eb = editor.window_arrangement.active_editor_buffer
force = bool(variables['force'])
buffer_name = variables.get('buffer_name')
if buffer_name:
if not force and eb.has_unsaved_changes:
editor.show_message(_NO_WRITE_SINCE_LAST_CHANGE_TEXT)
else:
editor.window_arrangement.go_to_buffer(buffer_name)
|
def _buffer(editor, variables, force=False)
|
Go to one of the open buffers.
| 5.095321 | 4.856764 | 1.049118 |
eb = editor.window_arrangement.active_editor_buffer
if not force and eb.has_unsaved_changes:
editor.show_message(_NO_WRITE_SINCE_LAST_CHANGE_TEXT)
else:
editor.window_arrangement.close_buffer()
|
def buffer_wipe(editor, force=False)
|
Wipe buffer.
| 6.286629 | 5.889089 | 1.067505 |
if location is None:
# Edit/open without a location will reload the current file, if there are
# no changes.
eb = editor.window_arrangement.active_editor_buffer
if eb.location is None:
editor.show_message(_NO_FILE_NAME)
elif not force and eb.has_unsaved_changes:
editor.show_message(_NO_WRITE_SINCE_LAST_CHANGE_TEXT)
else:
eb.reload()
else:
editor.file_explorer = ''
editor.window_arrangement.open_buffer(location, show_in_current_window=True)
|
def buffer_edit(editor, location, force=False)
|
Edit new buffer.
| 6.675186 | 6.445652 | 1.035611 |
ebs = editor.window_arrangement.editor_buffers
# When there are buffers that have unsaved changes, show balloon.
if not force and any(eb.has_unsaved_changes for eb in ebs):
editor.show_message(_NO_WRITE_SINCE_LAST_CHANGE_TEXT)
# When there is more than one buffer open.
elif not all_ and len(ebs) > 1:
editor.show_message('%i more files to edit' % (len(ebs) - 1))
else:
editor.application.exit()
|
def quit(editor, all_=False, force=False)
|
Quit.
| 6.191875 | 6.091478 | 1.016482 |
quit(editor, all_=True, force=force)
|
def quit_all(editor, force=False)
|
Quit all.
| 9.433381 | 8.623781 | 1.09388 |
if location and not force and os.path.exists(location):
editor.show_message('File exists (add ! to overriwe)')
else:
eb = editor.window_arrangement.active_editor_buffer
if location is None and eb.location is None:
editor.show_message(_NO_FILE_NAME)
else:
eb.write(location)
|
def write(editor, location, force=False)
|
Write file.
| 7.254652 | 6.951474 | 1.043613 |
write(editor, location, force=force)
editor.application.exit()
|
def write_and_quit(editor, location, force=False)
|
Write file and quit.
| 6.757911 | 6.241162 | 1.082797 |
eb = editor.window_arrangement.active_editor_buffer
if eb.location is None:
editor.show_message(_NO_FILE_NAME)
else:
eb.write()
quit(editor, all_=True, force=False)
|
def write_and_quit_all(editor)
|
Write current buffer and quit all.
| 9.65898 | 8.477084 | 1.139422 |
" Change working directory. "
try:
os.chdir(location)
except OSError as e:
editor.show_message('{}'.format(e))
|
def pwd(editor, location)
|
Change working directory.
| 6.484644 | 6.259264 | 1.036007 |
colorscheme = variables.get('colorscheme')
if colorscheme:
editor.use_colorscheme(colorscheme)
|
def color_scheme(editor, variables)
|
Go to one of the open buffers.
| 3.533829 | 3.44703 | 1.025181 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.