code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
keys = [self.key] keys.extend([other.key for other in others]) self.database.zunionstore(dest, keys, **kwargs) return self.database.ZSet(dest)
def unionstore(self, dest, *others, **kwargs)
Store the union of the current set and one or more others in a new key. :param dest: the name of the key to store union :param others: One or more :py:class:`ZSet` instances :returns: A :py:class:`ZSet` referencing ``dest``.
3.668188
4.400105
0.833659
res = self.database.bzpopmin(self.key, timeout) if res is not None: return (res[1], res[2])
def bpopmin(self, timeout=0)
Atomically remove the lowest-scoring item from the set, blocking until an item becomes available or timeout is reached (0 for no timeout, default). Returns a 2-tuple of (item, score).
5.178885
5.81546
0.890537
res = self.database.bzpopmax(self.key, timeout) if res is not None: return (res[1], res[2])
def bpopmax(self, timeout=0)
Atomically remove the highest-scoring item from the set, blocking until an item becomes available or timeout is reached (0 for no timeout, default). Returns a 2-tuple of (item, score).
5.099967
5.869108
0.868951
pipe = self.database.pipeline() r1, r2 = (pipe .zrange(self.key, 0, count - 1, withscores=True) .zremrangebyrank(self.key, 0, count - 1) .execute()) return r1
def popmin_compat(self, count=1)
Atomically remove the lowest-scoring item(s) in the set. Compatible with Redis versions < 5.0. :returns: a list of item, score tuples or ``None`` if the set is empty.
3.16976
3.493674
0.907286
items = self.database.zrange(self.key, 0, -1, withscores=True) if decode: items = [(_decode(k), score) for k, score in items] return items
def as_items(self, decode=False)
Return a list of 2-tuples consisting of key/score.
3.399841
2.707441
1.255739
zset = cls(database, key) if clear: zset.clear() zset.add(data) return zset
def from_dict(cls, database, key, data, clear=False)
Create and populate a ZSet object from a data dictionary.
3.329164
2.316194
1.437343
items = [self.key] items.extend([other.key for other in others]) self.database.pfmerge(dest, *items) return HyperLogLog(self.database, dest)
def merge(self, dest, *others)
Merge one or more :py:class:`HyperLogLog` instances. :param dest: Key to store merged result. :param others: One or more ``HyperLogLog`` instances.
5.802872
5.095122
1.138907
self.database.run_script( 'array_append', keys=[self.key], args=[value])
def append(self, value)
Append a new value to the end of the array.
10.974706
9.185058
1.194843
self.database.run_script( 'array_extend', keys=[self.key], args=values)
def extend(self, values)
Extend the array, appending the given values.
14.466039
12.844639
1.126232
if idx is not None: return self.database.run_script( 'array_remove', keys=[self.key], args=[idx]) else: return self.database.run_script( 'array_pop', keys=[self.key], args=[])
def pop(self, idx=None)
Remove an item from the array. By default this will be the last item by index, but any index can be specified.
3.475972
3.183329
1.09193
return [_decode(i) for i in self] if decode else list(self)
def as_list(self, decode=False)
Return a list of items in the array.
7.806697
5.88662
1.326176
arr = cls(database, key) if clear: arr.clear() arr.extend(data) return arr
def from_list(cls, database, key, data, clear=False)
Create and populate an Array object from a data dictionary.
3.380315
3.213288
1.05198
return self.database.xadd(self.key, data, id, maxlen, approximate)
def add(self, data, id='*', maxlen=None, approximate=True)
Add data to a stream. :param dict data: data to add to stream :param id: identifier for message ('*' to automatically append) :param maxlen: maximum length for stream :param approximate: allow stream max length to be approximate :returns: the added message id.
5.150805
9.538892
0.539979
return self.database.xrange(self.key, start, stop, count)
def range(self, start='-', stop='+', count=None)
Read a range of values from a stream. :param start: start key of range (inclusive) or '-' for oldest message :param stop: stop key of range (inclusive) or '+' for newest message :param count: limit number of messages returned
7.179705
13.120528
0.547212
return self.database.xrevrange(self.key, start, stop, count)
def revrange(self, start='+', stop='-', count=None)
Read a range of values from a stream in reverse. :param start: start key of range (inclusive) or '+' for newest message :param stop: stop key of range (inclusive) or '-' for oldest message :param count: limit number of messages returned
5.011096
10.065559
0.497846
if last_id is None: last_id = '0-0' resp = self.database.xread({self.key: _decode(last_id)}, count, block) # resp is a 2-tuple of stream name -> message list. return resp[0][1] if resp else []
def read(self, count=None, block=None, last_id=None)
Monitor stream for new data. :param int count: limit number of messages returned :param int block: milliseconds to block, 0 for indefinitely :param last_id: Last id read (an exclusive lower-bound). If the '$' value is given, we will only read values added *after* our command started blocking. :returns: a list of (message id, data) 2-tuples.
8.311342
9.351092
0.88881
return self.database.xtrim(self.key, count, approximate)
def trim(self, count, approximate=True)
Trim the stream to the given "count" of messages, discarding the oldest messages first. :param count: maximum size of stream :param approximate: allow size to be approximate
10.969607
17.016792
0.644634
return self.database.xack(self.key, self.group, *id_list)
def ack(self, *id_list)
Acknowledge that the message(s) were been processed by the consumer associated with the parent :py:class:`ConsumerGroup`. :param id_list: one or more message ids to acknowledge :returns: number of messages marked acknowledged
6.020226
11.016615
0.546468
min_idle_time = kwargs.pop('min_idle_time', None) or 0 if kwargs: raise ValueError('incorrect arguments for claim()') return self.database.xclaim(self.key, self.group, self._consumer, min_idle_time, id_list)
def claim(self, *id_list, **kwargs)
Claim pending - but unacknowledged - messages for this stream within the context of the parent :py:class:`ConsumerGroup`. :param id_list: one or more message ids to acknowledge :param min_idle_time: minimum idle time in milliseconds (keyword-arg). :returns: list of (message id, data) 2-tuples of messages that were successfully claimed
6.063408
6.302401
0.962079
return self.database.xpending_range(self.key, self.group, start, stop, count, consumer)
def pending(self, start='-', stop='+', count=1000, consumer=None)
List pending messages within the consumer group for this stream. :param start: start id (or '-' for oldest pending) :param stop: stop id (or '+' for newest pending) :param count: limit number of messages returned :param consumer: restrict message list to the given consumer :returns: A list containing status for each pending message. Each pending message returns [id, consumer, idle time, deliveries].
7.113994
11.925116
0.596556
key = {self.key: '>' if last_id is None else last_id} resp = self.database.xreadgroup(self.group, self._consumer, key, count, block) return resp[0][1] if resp else []
def read(self, count=None, block=None, last_id=None)
Monitor the stream for new messages within the context of the parent :py:class:`ConsumerGroup`. :param int count: limit number of messages returned :param int block: milliseconds to block, 0 for indefinitely. :param str last_id: optional last ID, by default uses the special token ">", which reads the oldest unread message. :returns: a list of (message id, data) 2-tuples.
6.533313
8.404257
0.777381
return self.database.xgroup_setid(self.key, self.group, id)
def set_id(self, id='$')
Set the last-read message id for the stream within the context of the parent :py:class:`ConsumerGroup`. By default this will be the special "$" identifier, meaning all messages are marked as having been read. :param id: id of last-read message (or "$").
14.462604
19.436007
0.744114
if consumer is None: consumer = self._consumer return self.database.xgroup_delconsumer(self.key, self.group, consumer)
def delete_consumer(self, consumer=None)
Remove a specific consumer from a consumer group. :consumer: name of consumer to delete. If not provided, will be the default consumer for this stream. :returns: number of pending messages that the consumer had before being deleted.
7.473904
8.627715
0.866267
return type(self)(self.database, self.name, self.keys, name)
def consumer(self, name)
Create a new consumer for the :py:class:`ConsumerGroup`. :param name: name of consumer :returns: a :py:class:`ConsumerGroup` using the given consumer name.
11.492962
21.778112
0.52773
if ensure_keys_exist: for key in self.keys: if not self.database.exists(key): msg_id = self.database.xadd(key, {'': ''}, id=b'0-1') self.database.xdel(key, msg_id) elif self.database.type(key) != b'stream': raise ValueError('Consumer group key "%s" exists and is ' 'not a stream. To prevent data-loss ' 'this key will not be deleted.') resp = {} # Mapping of key -> last-read message ID. for key, value in self.keys.items(): try: resp[key] = self.database.xgroup_create(key, self.name, value, mkstream) except ResponseError as exc: if exception_message(exc).startswith('BUSYGROUP'): resp[key] = False else: raise return resp
def create(self, ensure_keys_exist=True, mkstream=False)
Create the consumer group and register it with the group's stream keys. :param ensure_keys_exist: Ensure that the streams exist before creating the consumer group. Streams that do not exist will be created. :param mkstream: Use the "MKSTREAM" option to ensure stream exists (may require unstable version of Redis).
4.745125
4.508671
1.052444
resp = {} for key in self.keys: resp[key] = self.database.xgroup_destroy(key, self.name) return resp
def destroy(self)
Destroy the consumer group.
7.349317
6.099319
1.204941
if consumer is None: consumer = self._consumer return self.database.xreadgroup(self.name, consumer, self._read_keys, count, block)
def read(self, count=None, block=None, consumer=None)
Read unseen messages from all streams in the consumer group. Wrapper for :py:class:`Database.xreadgroup` method. :param int count: limit number of messages returned :param int block: milliseconds to block, 0 for indefinitely. :param consumer: consumer name :returns: a list of (stream key, messages) tuples, where messages is a list of (message id, data) 2-tuples.
7.009909
6.456967
1.085635
accum = {} for key in self.keys: accum[key] = self.database.xgroup_setid(key, self.name, id) return accum
def set_id(self, id='$')
Set the last-read message id for each stream in the consumer group. By default, this will be the special "$" identifier, meaning all messages are marked as having been read. :param id: id of last-read message (or "$").
9.359042
9.716362
0.963225
accum = {} for key in self.keys: accum[key] = self.database.xinfo_stream(key) return accum
def stream_info(self)
Retrieve information for each stream managed by the consumer group. Calls :py:meth:`~Database.xinfo_stream` for each stream. :returns: a dictionary mapping stream key to a dictionary of metadata
7.473217
5.882152
1.27049
if overflow is not None and overflow != self._last_overflow: self._last_overflow = overflow self.operations.append(('OVERFLOW', overflow)) self.operations.append(('INCRBY', fmt, offset, increment)) return self
def incrby(self, fmt, offset, increment, overflow=None)
Increment a bitfield by a given amount. :param fmt: format-string for the bitfield being updated, e.g. u8 for an unsigned 8-bit integer. :param int offset: offset (in number of bits). :param int increment: value to increment the bitfield by. :param str overflow: overflow algorithm. Defaults to WRAP, but other acceptable values are SAT and FAIL. See the Redis docs for descriptions of these algorithms. :returns: a :py:class:`BitFieldOperation` instance.
3.22063
4.10442
0.784674
bfo = BitFieldOperation(self.database, self.key) return bfo.incrby(fmt, offset, increment, overflow)
def incrby(self, fmt, offset, increment, overflow=None)
Increment a bitfield by a given amount. :param fmt: format-string for the bitfield being updated, e.g. u8 for an unsigned 8-bit integer. :param int offset: offset (in number of bits). :param int increment: value to increment the bitfield by. :param str overflow: overflow algorithm. Defaults to WRAP, but other acceptable values are SAT and FAIL. See the Redis docs for descriptions of these algorithms. :returns: a :py:class:`BitFieldOperation` instance.
7.28795
6.674006
1.09199
bfo = BitFieldOperation(self.database, self.key) return bfo.get(fmt, offset)
def get(self, fmt, offset)
Get the value of a given bitfield. :param fmt: format-string for the bitfield being read, e.g. u8 for an unsigned 8-bit integer. :param int offset: offset (in number of bits). :returns: a :py:class:`BitFieldOperation` instance.
10.402707
10.716537
0.970715
bfo = BitFieldOperation(self.database, self.key) return bfo.set(fmt, offset, value)
def set(self, fmt, offset, value)
Set the value of a given bitfield. :param fmt: format-string for the bitfield being read, e.g. u8 for an unsigned 8-bit integer. :param int offset: offset (in number of bits). :param int value: value to set at the given position. :returns: a :py:class:`BitFieldOperation` instance.
8.817634
9.050794
0.974239
return self.database.bitcount(self.key, start, end)
def bit_count(self, start=None, end=None)
Count the set bits in a string. Note that the `start` and `end` parameters are offsets in **bytes**.
8.530146
10.104048
0.844231
return self.database.setbit(self.key, offset, value)
def set_bit(self, offset, value)
Set the bit value at the given offset (in bits). :param int offset: bit offset :param int value: new value for bit, 1 or 0 :returns: previous value at bit offset, 1 or 0
7.685291
13.998349
0.549014
bfo = BitFieldOperation(self.database, self.key) for bit_index in self._get_seeds(data): bfo.set('u1', bit_index, 1) bfo.execute()
def add(self, data)
Add an item to the bloomfilter. :param bytes data: a bytestring representing the item to add.
9.832397
10.082339
0.97521
bfo = BitFieldOperation(self.database, self.key) for bit_index in self._get_seeds(data): bfo.get('u1', bit_index) return all(bfo.execute())
def contains(self, data)
Check if an item has been added to the bloomfilter. :param bytes data: a bytestring representing the item to check. :returns: a boolean indicating whether or not the item is present in the bloomfilter. False-positives are possible, but a negative return value is definitive.
11.51265
14.342352
0.802703
def cons(self, i): if (self.b[i] == 'a' or self.b[i] == 'e' or self.b[i] == 'i' or self.b[i] == 'o' or self.b[i] == 'u'): return 0 if self.b[i] == 'y': if i == self.k0: return 1 else: return (not self.cons(i - 1)) return 1
cons(i) is TRUE <=> b[i] is a consonant.
null
null
null
def m(self): n = 0 i = self.k0 while 1: if i > self.j: return n if not self.cons(i): break i = i + 1 i = i + 1 while 1: while 1: if i > self.j: return n if self.cons(i): break i = i + 1 i = i + 1 n = n + 1 while 1: if i > self.j: return n if not self.cons(i): break i = i + 1 i = i + 1
m() measures the number of consonant sequences between k0 and j. if c is a consonant sequence and v a vowel sequence, and <..> indicates arbitrary presence, <c><v> gives 0 <c>vc<v> gives 1 <c>vcvc<v> gives 2 <c>vcvcvc<v> gives 3 ....
null
null
null
def vowelinstem(self): for i in range(self.k0, self.j + 1): if not self.cons(i): return 1 return 0
vowelinstem() is TRUE <=> k0,...j contains a vowel
null
null
null
def step3(self): if self.b[self.k] == 'e': if self.ends("icate"): self.r("ic") elif self.ends("ative"): self.r("") elif self.ends("alize"): self.r("al") elif self.b[self.k] == 'i': if self.ends("iciti"): self.r("ic") elif self.b[self.k] == 'l': if self.ends("ical"): self.r("ic") elif self.ends("ful"): self.r("") elif self.b[self.k] == 's': if self.ends("ness"): self.r("")
step3() dels with -ic-, -full, -ness etc. similar strategy to step2.
null
null
null
def step4(self): if self.b[self.k - 1] == 'a': if self.ends("al"): pass else: return elif self.b[self.k - 1] == 'c': if self.ends("ance"): pass elif self.ends("ence"): pass else: return elif self.b[self.k - 1] == 'e': if self.ends("er"): pass else: return elif self.b[self.k - 1] == 'i': if self.ends("ic"): pass else: return elif self.b[self.k - 1] == 'l': if self.ends("able"): pass elif self.ends("ible"): pass else: return elif self.b[self.k - 1] == 'n': if self.ends("ant"): pass elif self.ends("ement"): pass elif self.ends("ment"): pass elif self.ends("ent"): pass else: return elif self.b[self.k - 1] == 'o': if (self.ends("ion") and (self.b[self.j] == 's' or self.b[self.j] == 't')): pass elif self.ends("ou"): pass # takes care of -ous else: return elif self.b[self.k - 1] == 's': if self.ends("ism"): pass else: return elif self.b[self.k - 1] == 't': if self.ends("ate"): pass elif self.ends("iti"): pass else: return elif self.b[self.k - 1] == 'u': if self.ends("ous"): pass else: return elif self.b[self.k - 1] == 'v': if self.ends("ive"): pass else: return elif self.b[self.k - 1] == 'z': if self.ends("ize"): pass else: return else: return if self.m() > 1: self.k = self.j
step4() takes off -ant, -ence etc., in context <c>vcvc<v>.
null
null
null
with self.walrus.atomic(): for item in items: self.store(*item)
def store_many(self, items)
Store multiple subject-predicate-object triples in the database. :param items: A list of (subj, pred, obj) 3-tuples.
9.608813
9.771141
0.983387
with self.walrus.atomic(): for key in self.keys_for_values(s, p, o): del self._z[key]
def delete(self, s, p, o)
Remove the given subj-pred-obj triple from the database.
10.241015
10.224916
1.001574
start, end = self.keys_for_query(s, p, o) if end is None: if start in self._z: yield {'s': s, 'p': p, 'o': o} else: raise StopIteration else: for key in self._z.range_by_lex('[' + start, '[' + end): keys, p1, p2, p3 = decode(key).split('::') yield dict(zip(keys, (p1, p2, p3)))
def query(self, s=None, p=None, o=None)
Return all triples that satisfy the given expression. You may specify all or none of the fields (s, p, and o). For instance, if I wanted to query for all the people who live in Kansas, I might write: .. code-block:: python for triple in graph.query(p='lives', o='Kansas'): print triple['s'], 'lives in Kansas!'
4.652463
5.186103
0.897102
results = {} for condition in conditions: if isinstance(condition, tuple): query = dict(zip('spo', condition)) else: query = condition.copy() materialized = {} targets = [] for part in ('s', 'p', 'o'): if isinstance(query[part], Variable): variable = query.pop(part) materialized[part] = set() targets.append((variable, part)) # Potentially rather than popping all the variables, we could use # the result values from a previous condition and do O(results) # loops looking for a single variable. for result in self.query(**query): ok = True for var, part in targets: if var in results and result[part] not in results[var]: ok = False break if ok: for var, part in targets: materialized[part].add(result[part]) for var, part in targets: if var in results: results[var] &= materialized[part] else: results[var] = materialized[part] return dict((var.name, vals) for (var, vals) in results.items())
def search(self, *conditions)
Given a set of conditions, return all values that satisfy the conditions for a given set of variables. For example, suppose I wanted to find all of my friends who live in Kansas: .. code-block:: python X = graph.v.X results = graph.search( {'s': 'charlie', 'p': 'friends', 'o': X}, {'s': X, 'p': 'lives', 'o': 'Kansas'}) The return value consists of a dictionary keyed by variable, whose values are ``set`` objects containing the values that satisfy the query clauses, e.g.: .. code-block:: python print results # Result has one key, for our "X" variable. The value is the set # of my friends that live in Kansas. # {'X': {'huey', 'nuggie'}} # We can assume the following triples exist: # ('charlie', 'friends', 'huey') # ('charlie', 'friends', 'nuggie') # ('huey', 'lives', 'Kansas') # ('nuggie', 'lives', 'Kansas')
3.860719
3.638235
1.061152
with self._transaction_lock: local = self._transaction_local if not local.pipes: raise ValueError('No transaction is currently active.') return local.commit()
def commit_transaction(self)
Commit the currently active transaction (Pipeline). If no transaction is active in the current thread, an exception will be raised. :returns: The return value of executing the Pipeline. :raises: ``ValueError`` if no transaction is active.
7.611694
6.385573
1.192014
with self._transaction_lock: local = self._transaction_local if not local.pipes: raise ValueError('No transaction is currently active.') local.abort()
def clear_transaction(self)
Clear the currently active transaction (if exists). If the transaction stack is not empty, then a new pipeline will be initialized. :returns: No return value. :raises: ``ValueError`` if no transaction is active.
8.906896
7.772751
1.145913
return self._scripts[script_name](keys, args)
def run_script(self, script_name, keys=None, args=None)
Execute a walrus script with the given arguments. :param script_name: The base name of the script to execute. :param list keys: Keys referenced by the script. :param list args: Arguments passed in to the script. :returns: Return value of script. .. note:: Redis scripts require two parameters, ``keys`` and ``args``, which are referenced in lua as ``KEYS`` and ``ARGV``.
8.119143
17.217409
0.471566
return self.__mapping.get(self.type(key), self.__getitem__)(key)
def get_key(self, key)
Return a rich object for the given key. For instance, if a hash key is requested, then a :py:class:`Hash` will be returned. :param str key: Key to retrieve. :returns: A hash, set, list, zset or array.
12.887856
20.626144
0.624831
return Cache(self, name=name, default_timeout=default_timeout)
def cache(self, name='cache', default_timeout=3600)
Create a :py:class:`Cache` instance. :param str name: The name used to prefix keys used to store cached data. :param int default_timeout: The default key expiry. :returns: A :py:class:`Cache` instance.
4.560459
5.946244
0.766948
return Graph(self, name, *args, **kwargs)
def graph(self, name, *args, **kwargs)
Creates a :py:class:`Graph` instance. :param str name: The namespace for the graph metadata. :returns: a :py:class:`Graph` instance.
5.125934
7.220407
0.709923
return Lock(self, name, ttl, lock_id)
def lock(self, name, ttl=None, lock_id=None)
Create a named :py:class:`Lock` instance. The lock implements an API similar to the standard library's ``threading.Lock``, and can also be used as a context manager or decorator. :param str name: The name of the lock. :param int ttl: The time-to-live for the lock in milliseconds (optional). If the ttl is ``None`` then the lock will not expire. :param str lock_id: Optional identifier for the lock instance.
5.37169
13.69986
0.392098
return RateLimit(self, name, limit, per, debug)
def rate_limit(self, name, limit=5, per=60, debug=False)
Rate limit implementation. Allows up to `limit` of events every `per` seconds. See :ref:`rate-limit` for more information.
5.291183
6.235798
0.848517
return ConsumerGroup(self, group, keys, consumer=consumer)
def consumer_group(self, group, keys, consumer=None)
Create a named :py:class:`ConsumerGroup` instance for the given key(s). :param group: name of consumer group :param keys: stream identifier(s) to monitor. May be a single stream key, a list of stream keys, or a key-to-minimum id mapping. The minimum id for each stream should be considered an exclusive lower-bound. The '$' value can also be used to only read values added *after* our command started blocking. :param consumer: name for consumer within group :returns: a :py:class:`ConsumerGroup` instance
5.941664
10.258245
0.579209
return TimeSeries(self, group, keys, consumer=consumer)
def time_series(self, group, keys, consumer=None)
Create a named :py:class:`TimeSeries` consumer-group for the given key(s). TimeSeries objects are almost identical to :py:class:`ConsumerGroup` except they offer a higher level of abstraction and read/write message ids as datetimes. :param group: name of consumer group :param keys: stream identifier(s) to monitor. May be a single stream key, a list of stream keys, or a key-to-minimum id mapping. The minimum id for each stream should be considered an exclusive lower-bound. The '$' value can also be used to only read values added *after* our command started blocking. :param consumer: name for consumer within group :returns: a :py:class:`TimeSeries` instance
5.576793
11.306471
0.493239
return self.run_script('cas', keys=[key], args=[value, new_value])
def cas(self, key, value, new_value)
Perform an atomic compare-and-set on the value in "key", using a prefix match on the provided value.
6.863271
7.613097
0.901508
def decorator(fn): _channels = channels or [] _patterns = patterns or [] @wraps(fn) def inner(): pubsub = self.pubsub() def listen(): for channel in _channels: pubsub.subscribe(channel) for pattern in _patterns: pubsub.psubscribe(pattern) for data_dict in pubsub.listen(): try: ret = fn(**data_dict) except StopIteration: pubsub.close() break if is_async: worker = threading.Thread(target=listen) worker.start() return worker else: listen() return inner return decorator
def listener(self, channels=None, patterns=None, is_async=False)
Decorator for wrapping functions used to listen for Redis pub-sub messages. The listener will listen until the decorated function raises a ``StopIteration`` exception. :param list channels: Channels to listen on. :param list patterns: Patterns to match. :param bool is_async: Whether to start the listener in a separate thread.
2.553625
2.384698
1.070838
conn = self.connection_pool.get_connection(connection_id, None) conn.send_command('monitor') while callback(conn.read_response()): pass
def stream_log(self, callback, connection_id='monitor')
Stream Redis activity one line at a time to the given callback. :param callback: A function that accepts a single argument, the Redis command.
5.164163
4.481431
1.152347
while True: acquired = self.database.run_script( 'lock_acquire', keys=[self.key], args=[self._lock_id, self.ttl]) if acquired == 1 or not block: return acquired == 1 # Perform a blocking pop on the event key. When a lock # is released, a value is pushed into the list, which # signals listeners that the lock is available. self.database.blpop(self.event, self.ttl)
def acquire(self, block=True)
Acquire the lock. The lock will be held until it is released by calling :py:meth:`Lock.release`. If the lock was initialized with a ``ttl``, then the lock will be released automatically after the given number of milliseconds. By default this method will block until the lock becomes free (either by being released or expiring). The blocking is accomplished by performing a blocking left-pop on a list, as opposed to a spin-loop. If you specify ``block=False``, then the method will return ``False`` if the lock could not be acquired. :param bool block: Whether to block while waiting to acquire the lock. :returns: Returns ``True`` if the lock was acquired.
7.695618
7.630171
1.008577
unlocked = self.database.run_script( 'lock_release', keys=[self.key, self.event], args=[self._lock_id]) return unlocked != 0
def release(self)
Release the lock. :returns: Returns ``True`` if the lock was released.
11.280205
10.213012
1.104493
self.database.delete(self.key) self.database.delete(self.event)
def clear(self)
Clear the lock, allowing it to be acquired. Do not use this method except to recover from a deadlock. Otherwise you should use :py:meth:`Lock.release`.
8.024844
8.384972
0.957051
separator = getattr(self.model_class, 'index_separator', '.') parts = map(decode, parts) return '%s%s' % (self._base_key, separator.join(map(str, parts)))
def make_key(self, *parts)
Generate a namespaced key for the given path.
5.731831
5.037852
1.137753
model_hash = self.to_hash() # Remove the value from the index. for index in field.get_indexes(): index.remove(self) if isinstance(incr_by, int): new_val = model_hash.incr(field.name, incr_by) else: new_val = model_hash.incr_float(field.name, incr_by) setattr(self, field.name, new_val) # Re-index the new value. for index in field.get_indexes(): index.save(self) return new_val
def incr(self, field, incr_by=1)
Increment the value stored in the given field by the specified amount. Any indexes will be updated at the time ``incr()`` is called. :param Field field: A field instance. :param incr_by: An ``int`` or ``float``. Example: .. code-block:: python # Retrieve a page counter object for the given URL. page_count = PageCounter.get(PageCounter.url == url) # Update the hit count, persisting to the database and # updating secondary indexes in one go. page_count.incr(PageCounter.hits)
2.891429
3.159689
0.915099
for result in cls._query.all_index(): yield cls.load(result, convert_key=False)
def all(cls)
Return an iterator that successively yields saved model instances. Models are saved in an unordered :py:class:`Set`, so the iterator will return them in arbitrary order. Example:: for note in Note.all(): print note.content To return models in sorted order, see :py:meth:`Model.query`. Example returning all records, sorted newest to oldest:: for note in Note.query(order_by=Note.timestamp.desc()): print note.timestamp, note.content
14.464169
29.04217
0.49804
if expression is not None: executor = Executor(cls.__database__) result = executor.execute(expression) else: result = cls._query.all_index() if order_by is not None: desc = False if isinstance(order_by, Desc): desc = True order_by = order_by.node alpha = not isinstance(order_by, _ScalarField) result = cls.__database__.sort( result.key, by='*->%s' % order_by.name, alpha=alpha, desc=desc) elif isinstance(result, ZSet): result = result.iterator(reverse=True) for hash_id in result: yield cls.load(hash_id, convert_key=False)
def query(cls, expression=None, order_by=None)
Return model instances matching the given expression (if specified). Additionally, matching instances can be returned sorted by field value. Example:: # Get administrators sorted by username. admin_users = User.query( (User.admin == True), order_by=User.username) # List blog entries newest to oldest. entries = Entry.query(order_by=Entry.timestamp.desc()) # Perform a complex filter. values = StatData.query( (StatData.timestamp < datetime.date.today()) & ((StatData.type == 'pv') | (StatData.type == 'cv'))) :param expression: A boolean expression to filter by. :param order_by: A field whose value should be used to sort returned instances.
5.128137
5.512197
0.930326
if expression is not None: executor = Executor(cls.__database__) result = executor.execute(expression) else: result = cls._query.all_index() for hash_id in result: cls.load(hash_id, convert_key=False).delete()
def query_delete(cls, expression=None)
Delete model instances matching the given expression (if specified). If no expression is provided, then all model instances will be deleted. :param expression: A boolean expression to filter by.
6.972572
8.059607
0.865126
executor = Executor(cls.__database__) result = executor.execute(expression) if len(result) != 1: raise ValueError('Got %s results, expected 1.' % len(result)) return cls.load(result._first_or_any(), convert_key=False)
def get(cls, expression)
Retrieve the model instance matching the given expression. If the number of matching results is not equal to one, then a ``ValueError`` will be raised. :param expression: A boolean expression to filter by. :returns: The matching :py:class:`Model` instance. :raises: ``ValueError`` if result set size is not 1.
5.663513
5.925825
0.955734
if convert_key: primary_key = cls._query.get_primary_hash_key(primary_key) if not cls.__database__.hash_exists(primary_key): raise KeyError('Object not found.') raw_data = cls.__database__.hgetall(primary_key) if PY3: raw_data = decode_dict_keys(raw_data) data = {} for name, field in cls._fields.items(): if isinstance(field, _ContainerField): continue elif name in raw_data: data[name] = field.python_value(raw_data[name]) else: data[name] = None return cls(**data)
def load(cls, primary_key, convert_key=True)
Retrieve a model instance by primary key. :param primary_key: The primary key of the model instance. :returns: Corresponding :py:class:`Model` instance. :raises: ``KeyError`` if object with given primary key does not exist.
3.196403
3.392653
0.942154
hash_key = self.get_hash_id() try: original_instance = self.load(hash_key, convert_key=False) except KeyError: return # Remove from the `all` index. all_index = self._query.all_index() all_index.remove(hash_key) # Remove from the secondary indexes. for field in self._indexes: for index in field.get_indexes(): index.remove(original_instance) if not for_update: for field in self._fields.values(): if isinstance(field, _ContainerField): field._delete(self) # Remove the object itself. self.__database__.delete(hash_key)
def delete(self, for_update=False)
Delete the given model instance.
4.367851
4.224982
1.033815
pk_field = self._fields[self._primary_key] if not self._data.get(self._primary_key): setattr(self, self._primary_key, pk_field._generate_key()) require_delete = False else: require_delete = True if require_delete: self.delete(for_update=True) data = self._get_data_dict() hash_obj = self.to_hash() hash_obj.clear() hash_obj.update(data) all_index = self._query.all_index() all_index.add(self.get_hash_id()) for field in self._indexes: for index in field.get_indexes(): index.save(self)
def save(self)
Save the given model instance. If the model does not have a primary key value, Walrus will call the primary key field's ``generate_key()`` method to attempt to generate a suitable value.
4.186691
3.954817
1.058631
if self._debug: return False counter = self.database.List(self.name + ':' + key) n = len(counter) is_limited = False if n < self._limit: counter.prepend(str(time.time())) else: oldest = float(counter[-1]) if time.time() - oldest < self._per: is_limited = True else: counter.prepend(str(time.time())) del counter[:self._limit] counter.pexpire(int(self._per * 2000)) return is_limited
def limit(self, key)
Function to log an event with the given key. If the ``key`` has not exceeded their alotted events, then the function returns ``False`` to indicate that no limit is being imposed. If the ``key`` has exceeded the number of events, then the function returns ``True`` indicating rate-limiting should occur. :param str key: A key identifying the source of the event. :returns: Boolean indicating whether the event should be rate-limited or not.
4.776032
4.582753
1.042175
if key_function is None: def key_function(*args, **kwargs): data = pickle.dumps((args, sorted(kwargs.items()))) return hashlib.md5(data).hexdigest() def decorator(fn): @wraps(fn) def inner(*args, **kwargs): key = key_function(*args, **kwargs) if self.limit(key): raise RateLimitException( 'Call to %s exceeded %s events in %s seconds.' % ( fn.__name__, self._limit, self._per)) return fn(*args, **kwargs) return inner return decorator
def rate_limited(self, key_function=None)
Function or method decorator that will prevent calls to the decorated function when the number of events has been exceeded for the given time period. It is probably important that you take care to choose an appropiate key function. For instance, if rate-limiting a web-page you might use the requesting user's IP as the key. If the number of allowed events has been exceedd, a ``RateLimitException`` will be raised. :param key_function: Function that accepts the params of the decorated function and returns a string key. If not provided, a hash of the args and kwargs will be used. :returns: If the call is not rate-limited, then the return value will be that of the decorated function. :raises: ``RateLimitException``.
2.637225
2.48993
1.059156
resp = super(TimeSeries, self).read(count, block) return xread_to_messages(resp)
def read(self, count=None, block=None)
Read unseen messages from all streams in the consumer group. Wrapper for :py:class:`Database.xreadgroup` method. :param int count: limit number of messages returned :param int block: milliseconds to block, 0 for indefinitely. :returns: a list of :py:class:`Message` objects
12.372722
18.645844
0.663565
try: body_str = body.decode() except UnicodeDecodeError: self.logger.log(self.log_level, "(multipart/form)", logging_context) return parts = body_str.split(self.boundary) last = len(parts) - 1 for i, part in enumerate(parts): if 'Content-Type:' in part: match = BINARY_REGEX.search(part) if match and match.group(2) in BINARY_TYPES and not match.group(4) in ('', '\r\n'): part = match.expand(r'\1\2/\3\r\n\r\n(binary data)\r\n') if i != last: part = part + self.boundary self.logger.log(self.log_level, part, logging_context)
def _log_multipart(self, body, logging_context)
Splits multipart body into parts separated by "boundary", then matches each part to BINARY_REGEX which searches for existence of "Content-Type" and capture of what type is this part. If it is an image or an application replace that content with "(binary data)" string. This function will log "(multipart/form)" if body can't be decoded by utf-8.
3.324992
2.504033
1.327855
if columns is None: columns = df.columns try: assert not df[columns].isnull().any().any() except AssertionError as e: missing = df[columns].isnull() msg = generic.bad_locations(missing) e.args = msg raise return df
def none_missing(df, columns=None)
Asserts that there are no missing values (NaNs) in the DataFrame. Parameters ---------- df : DataFrame columns : list list of columns to restrict the check to Returns ------- df : DataFrame same as the original
4.286572
5.507538
0.77831
if items is None: items = {k: (increasing, strict) for k in df} for col, (increasing, strict) in items.items(): s = pd.Index(df[col]) if increasing: good = getattr(s, 'is_monotonic_increasing') elif increasing is None: good = getattr(s, 'is_monotonic') | getattr(s, 'is_monotonic_decreasing') else: good = getattr(s, 'is_monotonic_decreasing') if strict: if increasing: good = good & (s.to_series().diff().dropna() > 0).all() elif increasing is None: good = good & ((s.to_series().diff().dropna() > 0).all() | (s.to_series().diff().dropna() < 0).all()) else: good = good & (s.to_series().diff().dropna() < 0).all() if not good: raise AssertionError return df
def is_monotonic(df, items=None, increasing=None, strict=False)
Asserts that the DataFrame is monotonic. Parameters ========== df : Series or DataFrame items : dict mapping columns to conditions (increasing, strict) increasing : None or bool None is either increasing or decreasing. strict : whether the comparison should be strict Returns ======= df : DataFrame
2.227591
2.190248
1.01705
try: check = np.all(np.equal(df.shape, shape) | (np.equal(shape, [-1, -1]) | np.equal(shape, [None, None]))) assert check except AssertionError as e: msg = ("Expected shape: {}\n" "\t\tActual shape: {}".format(shape, df.shape)) e.args = (msg,) raise return df
def is_shape(df, shape)
Asserts that the DataFrame is of a known shape. Parameters ========== df : DataFrame shape : tuple (n_rows, n_columns). Use None or -1 if you don't care about a dimension. Returns ======= df : DataFrame
3.904929
3.858092
1.01214
if columns is None: columns = df.columns for col in columns: if not df[col].is_unique: raise AssertionError("Column {!r} contains non-unique values".format(col)) return df
def unique(df, columns=None)
Asserts that columns in the DataFrame only have unique values. Parameters ---------- df : DataFrame columns : list list of columns to restrict the check to. If None, check all columns. Returns ------- df : DataFrame same as the original
2.711272
3.307976
0.819617
try: assert df.index.is_unique except AssertionError as e: e.args = df.index.get_duplicates() raise return df
def unique_index(df)
Assert that the index is unique Parameters ========== df : DataFrame Returns ======= df : DataFrame
4.028777
4.115049
0.979035
for k, v in items.items(): if not df[k].isin(v).all(): bad = df.loc[~df[k].isin(v), k] raise AssertionError('Not in set', bad) return df
def within_set(df, items=None)
Assert that df is a subset of items Parameters ========== df : DataFrame items : dict mapping of columns (k) to array-like of values (v) that ``df[k]`` is expected to be a subset of Returns ======= df : DataFrame
3.510352
3.454692
1.016112
for k, (lower, upper) in items.items(): if (lower > df[k]).any() or (upper < df[k]).any(): bad = (lower > df[k]) | (upper < df[k]) raise AssertionError("Outside range", bad) return df
def within_range(df, items=None)
Assert that a DataFrame is within a range. Parameters ========== df : DataFame items : dict mapping of columns (k) to a (low, high) tuple (v) that ``df[k]`` is expected to be between. Returns ======= df : DataFrame
3.87809
4.155046
0.933345
means = df.mean() stds = df.std() inliers = (np.abs(df[means.index] - means) < n * stds) if not np.all(inliers): msg = generic.bad_locations(~inliers) raise AssertionError(msg) return df
def within_n_std(df, n=3)
Assert that every value is within ``n`` standard deviations of its column's mean. Parameters ========== df : DataFame n : int number of standard deviations from the mean Returns ======= df : DataFrame
4.551261
5.04416
0.902283
dtypes = df.dtypes for k, v in items.items(): if not dtypes[k] == v: raise AssertionError("{} has the wrong dtype. Should be ({}), is ({})".format(k, v,dtypes[k])) return df
def has_dtypes(df, items)
Assert that a DataFrame has ``dtypes`` Parameters ========== df: DataFrame items: dict mapping of columns to dtype. Returns ======= df : DataFrame
4.002544
3.946872
1.014105
subset = df[[manycol, unitcol]].drop_duplicates() for many in subset[manycol].unique(): if subset[subset[manycol] == many].shape[0] > 1: msg = "{} in {} has multiple values for {}".format(many, manycol, unitcol) raise AssertionError(msg) return df
def one_to_many(df, unitcol, manycol)
Assert that a many-to-one relationship is preserved between two columns. For example, a retail store will have have distinct departments, each with several employees. If each employee may only work in a single department, then the relationship of the department to the employees is one to many. Parameters ========== df : DataFrame unitcol : str The column that encapulates the groups in ``manycol``. manycol : str The column that must remain unique in the distict pairs between ``manycol`` and ``unitcol`` Returns ======= df : DataFrame
3.319258
3.782592
0.877509
try: tm.assert_frame_equal(df, df_to_compare, **kwargs) except AssertionError as exc: six.raise_from(AssertionError("DataFrames are not equal"), exc) return df
def is_same_as(df, df_to_compare, **kwargs)
Assert that two pandas dataframes are the equal Parameters ========== df : pandas DataFrame df_to_compare : pandas DataFrame **kwargs : dict keyword arguments passed through to panda's ``assert_frame_equal`` Returns ======= df : DataFrame
2.910223
3.096654
0.939796
result = check(df, *args, **kwargs) try: assert result except AssertionError as e: msg = '{} is not true'.format(check.__name__) e.args = (msg, df) raise return df
def verify(df, check, *args, **kwargs)
Generic verify. Assert that ``check(df, *args, **kwargs)`` is true. Parameters ========== df : DataFrame check : function Should take DataFrame and **kwargs. Returns bool Returns ======= df : DataFrame same as the input.
3.274031
3.50613
0.933802
result = check(df, *args, **kwargs) try: assert np.all(result) except AssertionError as e: msg = "{} not true for all".format(check.__name__) e.args = (msg, df[~result]) raise return df
def verify_all(df, check, *args, **kwargs)
Verify that all the entries in ``check(df, *args, **kwargs)`` are true.
3.5342
3.383445
1.044557
result = check(df, *args, **kwargs) try: assert np.any(result) except AssertionError as e: msg = '{} not true for any'.format(check.__name__) e.args = (msg, df) raise return df
def verify_any(df, check, *args, **kwargs)
Verify that any of the entries in ``check(df, *args, **kwargs)`` is true
3.697556
3.815572
0.96907
TAB = ' ' * 4 if is_doctest and output is not None: found = output found = found.strip() submitted = data.strip() if self.directive is None: source = 'Unavailable' content = 'Unavailable' else: source = self.directive.state.document.current_source content = self.directive.content # Add tabs and join into a single string. content = '\n'.join([TAB + line for line in content]) # Make sure the output contains the output prompt. ind = found.find(output_prompt) if ind < 0: e = ('output does not contain output prompt\n\n' 'Document source: {0}\n\n' 'Raw content: \n{1}\n\n' 'Input line(s):\n{TAB}{2}\n\n' 'Output line(s):\n{TAB}{3}\n\n') e = e.format(source, content, '\n'.join(input_lines), repr(found), TAB=TAB) raise RuntimeError(e) found = found[len(output_prompt):].strip() # Handle the actual doctest comparison. if decorator.strip() == '@doctest': # Standard doctest if found != submitted: e = ('doctest failure\n\n' 'Document source: {0}\n\n' 'Raw content: \n{1}\n\n' 'On input line(s):\n{TAB}{2}\n\n' 'we found output:\n{TAB}{3}\n\n' 'instead of the expected:\n{TAB}{4}\n\n') e = e.format(source, content, '\n'.join(input_lines), repr(found), repr(submitted), TAB=TAB) raise RuntimeError(e) else: self.custom_doctest(decorator, input_lines, found, submitted)
def process_output(self, data, output_prompt, input_lines, output, is_doctest, decorator, image_file)
Process data block for OUTPUT token.
3.039189
3.105668
0.978594
ret = [] output = None input_lines = None lineno = self.IP.execution_count input_prompt = self.promptin % lineno output_prompt = self.promptout % lineno image_file = None image_directive = None for token, data in block: if token == COMMENT: out_data = self.process_comment(data) elif token == INPUT: (out_data, input_lines, output, is_doctest, decorator, image_file, image_directive) = \ self.process_input(data, input_prompt, lineno) elif token == OUTPUT: out_data = \ self.process_output(data, output_prompt, input_lines, output, is_doctest, decorator, image_file) if out_data: ret.extend(out_data) # save the image files if image_file is not None: self.save_image(image_file) return ret, image_directive
def process_block(self, block)
process block from the block_parser and return a list of processed lines
4.157688
4.06084
1.023849
# We are here if the @figure pseudo decorator was used. Thus, it's # possible that we could be here even if python_mplbackend were set to # `None`. That's also strange and perhaps worthy of raising an # exception, but for now, we just set the backend to 'agg'. if not self._pyplot_imported: if 'matplotlib.backends' not in sys.modules: # Then ipython_matplotlib was set to None but there was a # call to the @figure decorator (and ipython_execlines did # not set a backend). #raise Exception("No backend was set, but @figure was used!") import matplotlib matplotlib.use('agg') # Always import pyplot into embedded shell. self.process_input_line('import matplotlib.pyplot as plt', store_history=False) self._pyplot_imported = True
def ensure_pyplot(self)
Ensures that pyplot has been imported into the embedded IPython shell. Also, makes sure to set the backend appropriately if not set already.
10.325671
9.827502
1.050691
if not params: params = {} request_url, response = _fetch_remote(service_url, params, use_http_post) if six.PY3: str_response = response.read().decode('utf-8') return (request_url, json.loads(str_response, parse_float=Decimal)) return (request_url, json.load(response, parse_float=Decimal))
def _fetch_remote_json(service_url, params=None, use_http_post=False)
Retrieves a JSON object from a URL.
2.625453
2.528692
1.038265
if not params: params = {} request_url, response = _fetch_remote(service_url, params, use_http_post) dummy, params = cgi.parse_header( response.headers.get('Content-Disposition', '')) fn = params['filename'] return (response.headers.get('content-type'), fn, response.read(), response.geturl())
def _fetch_remote_file(service_url, params=None, use_http_post=False)
Retrieves a file from a URL. Returns a tuple (mimetype, filename, data)
4.070279
3.85273
1.056466
params = {'address': location, 'sensor': str(sensor).lower()} if api_key is not None: params['key'] = api_key url, geo_response = _fetch_remote_json( GooglePlaces.GEOCODE_API_URL, params) _validate_response(url, geo_response) if geo_response['status'] == GooglePlaces.RESPONSE_STATUS_ZERO_RESULTS: error_detail = ('Lat/Lng for location \'%s\' can\'t be determined.' % location) raise GooglePlacesError(error_detail) return geo_response['results'][0]['geometry']['location']
def geocode_location(location, sensor=False, api_key=None)
Converts a human-readable location to lat-lng. Returns a dict with lat and lng keys. keyword arguments: location -- A human-readable location, e.g 'London, England' sensor -- Boolean flag denoting if the location came from a device using its' location sensor (default False) api_key -- A valid Google Places API key. raises: GooglePlacesError -- if the geocoder fails to find a location.
3.502207
3.459509
1.012342
url, detail_response = _fetch_remote_json(GooglePlaces.DETAIL_API_URL, {'placeid': place_id, 'sensor': str(sensor).lower(), 'key': api_key, 'language': language}) _validate_response(url, detail_response) return detail_response['result']
def _get_place_details(place_id, api_key, sensor=False, language=lang.ENGLISH)
Gets a detailed place response. keyword arguments: place_id -- The unique identifier for the required place.
3.369961
4.336484
0.777118
params = {'photoreference': photoreference, 'sensor': str(sensor).lower(), 'key': api_key} if maxheight: params['maxheight'] = maxheight if maxwidth: params['maxwidth'] = maxwidth return _fetch_remote_file(GooglePlaces.PHOTO_API_URL, params)
def _get_place_photo(photoreference, api_key, maxheight=None, maxwidth=None, sensor=False)
Gets a place's photo by reference. See detailed documentation at https://developers.google.com/places/documentation/photos Arguments: photoreference -- The unique Google reference for the required photo. Keyword arguments: maxheight -- The maximum desired photo height in pixels maxwidth -- The maximum desired photo width in pixels You must specify one of this keyword arguments. Acceptable value is an integer between 1 and 1600.
2.514718
3.038176
0.827706
if response['status'] not in [GooglePlaces.RESPONSE_STATUS_OK, GooglePlaces.RESPONSE_STATUS_ZERO_RESULTS]: error_detail = ('Request to URL %s failed with response code: %s' % (url, response['status'])) raise GooglePlacesError(error_detail)
def _validate_response(url, response)
Validates that the response from Google was successful.
4.169454
3.731852
1.117261
if location is None and lat_lng is None and pagetoken is None: raise ValueError('One of location, lat_lng or pagetoken must be passed in.') if rankby == 'distance': # As per API docs rankby == distance: # One or more of keyword, name, or types is required. if keyword is None and types == [] and name is None: raise ValueError('When rankby = googleplaces.ranking.DISTANCE, ' + 'name, keyword or types kwargs ' + 'must be specified.') self._sensor = sensor radius = (radius if radius <= GooglePlaces.MAXIMUM_SEARCH_RADIUS else GooglePlaces.MAXIMUM_SEARCH_RADIUS) lat_lng_str = self._generate_lat_lng_string(lat_lng, location) self._request_params = {'location': lat_lng_str} if rankby == 'prominence': self._request_params['radius'] = radius else: self._request_params['rankby'] = rankby if type: self._request_params['type'] = type elif types: if len(types) == 1: self._request_params['type'] = types[0] elif len(types) > 1: self._request_params['types'] = '|'.join(types) if keyword is not None: self._request_params['keyword'] = keyword if name is not None: self._request_params['name'] = name if pagetoken is not None: self._request_params['pagetoken'] = pagetoken if language is not None: self._request_params['language'] = language self._add_required_param_keys() url, places_response = _fetch_remote_json( GooglePlaces.NEARBY_SEARCH_API_URL, self._request_params) _validate_response(url, places_response) return GooglePlacesSearchResult(self, places_response)
def nearby_search(self, language=lang.ENGLISH, keyword=None, location=None, lat_lng=None, name=None, radius=3200, rankby=ranking.PROMINENCE, sensor=False, type=None, types=[], pagetoken=None)
Perform a nearby search using the Google Places API. One of either location, lat_lng or pagetoken are required, the rest of the keyword arguments are optional. keyword arguments: keyword -- A term to be matched against all available fields, including but not limited to name, type, and address (default None) location -- A human readable location, e.g 'London, England' (default None) language -- The language code, indicating in which language the results should be returned, if possible. (default lang.ENGLISH) lat_lng -- A dict containing the following keys: lat, lng (default None) name -- A term to be matched against the names of the Places. Results will be restricted to those containing the passed name value. (default None) radius -- The radius (in meters) around the location/lat_lng to restrict the search to. The maximum is 50000 meters. (default 3200) rankby -- Specifies the order in which results are listed : ranking.PROMINENCE (default) or ranking.DISTANCE (imply no radius argument). sensor -- Indicates whether or not the Place request came from a device using a location sensor (default False). type -- Optional type param used to indicate place category. types -- An optional list of types, restricting the results to Places (default []). If there is only one item the request will be send as type param. pagetoken-- Optional parameter to force the search result to return the next 20 results from a previously run search. Setting this parameter will execute a search with the same parameters used previously. (default None)
2.693323
2.671571
1.008142
self._request_params = {'query': query} if lat_lng is not None or location is not None: lat_lng_str = self._generate_lat_lng_string(lat_lng, location) self._request_params['location'] = lat_lng_str self._request_params['radius'] = radius if type: self._request_params['type'] = type elif types: if len(types) == 1: self._request_params['type'] = types[0] elif len(types) > 1: self._request_params['types'] = '|'.join(types) if language is not None: self._request_params['language'] = language if pagetoken is not None: self._request_params['pagetoken'] = pagetoken self._add_required_param_keys() url, places_response = _fetch_remote_json( GooglePlaces.TEXT_SEARCH_API_URL, self._request_params) _validate_response(url, places_response) return GooglePlacesSearchResult(self, places_response)
def text_search(self, query=None, language=lang.ENGLISH, lat_lng=None, radius=3200, type=None, types=[], location=None, pagetoken=None)
Perform a text search using the Google Places API. Only the one of the query or pagetoken kwargs are required, the rest of the keyword arguments are optional. keyword arguments: lat_lng -- A dict containing the following keys: lat, lng (default None) location -- A human readable location, e.g 'London, England' (default None) pagetoken-- Optional parameter to force the search result to return the next 20 results from a previously run search. Setting this parameter will execute a search with the same parameters used previously. (default None) radius -- The radius (in meters) around the location/lat_lng to restrict the search to. The maximum is 50000 meters. (default 3200) query -- The text string on which to search, for example: "Restaurant in New York". type -- Optional type param used to indicate place category. types -- An optional list of types, restricting the results to Places (default []). If there is only one item the request will be send as type param.
2.253706
2.368831
0.9514