code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
if self.version:
endpoint_path = join(self.version, endpoint)
else:
endpoint_path = endpoint
url = urljoin(self.uri, endpoint_path)
if authenticate: # sign off kwargs and url before sending request
url, request_kwargs = self.sign(url, endpoint, endpoint_path,
method_verb, *args, **kwargs)
else:
request_kwargs = kwargs
log.debug("Making request to: %s, kwargs: %s", url, request_kwargs)
r = self.api_request(method_verb, url, timeout=self.timeout,
**request_kwargs)
log.debug("Made %s request made to %s, with headers %s and body %s. "
"Status code %s", r.request.method,
r.request.url, r.request.headers,
r.request.body, r.status_code)
return r | def query(self, method_verb, endpoint, authenticate=False,
*args, **kwargs) | Queries exchange using given data. Defaults to unauthenticated query.
:param method_verb: valid request type (PUT, GET, POST etc)
:param endpoint: endpoint path for the resource to query, sans the url &
API version (i.e. '/btcusd/ticker/').
:param authenticate: Bool to determine whether or not a signature is
required.
:param args: Optional args for requests.request()
:param kwargs: Optional Kwargs for self.sign() and requests.request()
:return: request.response() obj | 2.984413 | 3.006339 | 0.992707 |
if cmd == 'restart':
self.restart(soft=True)
elif cmd == 'stop':
self.stop() | def eval_command(self, cmd) | Thread func to allow restarting / stopping of threads, for example
when receiving a connection reset info message from the wss server.
:return: | 4.901457 | 4.534968 | 1.080814 |
for chan_id in self._heartbeats:
if ts - self._heartbeats[chan_id] >= 10:
if chan_id not in self._late_heartbeats:
try:
# This is newly late; escalate
log.warning("BitfinexWSS.heartbeats: Channel %s hasn't "
"sent a heartbeat in %s seconds!",
self.channel_labels[chan_id],
ts - self._heartbeats[chan_id])
self._late_heartbeats[chan_id] = ts
except KeyError:
# This channel ID Is not known to us - log and raise
log.error("BitfinexWSS.heartbeats: Channel %s is not "
"registered in the connector's registry! "
"Restarting Connection to avoid errors..",
chan_id)
raise UnknownChannelError
else:
# We know of this already
continue
else:
# its not late
try:
self._late_heartbeats.pop(chan_id)
except KeyError:
# wasn't late before, check next channel
continue
log.info("BitfinexWSS.heartbeats: Channel %s has sent a "
"heartbeat again!", self.channel_labels[chan_id])
self.ping() | def _check_heartbeats(self, ts, *args, **kwargs) | Checks if the heartbeats are on-time. If not, the channel id is escalated
to self._late_heartbeats and a warning is issued; once a hb is received
again from this channel, it'll be removed from this dict, and an Info
message logged.
:param ts: timestamp, declares when data was received by the client
:return: | 4.563959 | 4.290187 | 1.063813 |
super(BitfinexWSS, self).start()
log.info("BitfinexWSS.start(): Initializing Websocket connection..")
while self.conn is None:
try:
self.conn = create_connection(self.addr, timeout=10)
except WebSocketTimeoutException:
self.conn = None
print("Couldn't create websocket connection - retrying!")
log.info("BitfinexWSS.start(): Initializing receiver thread..")
if not self.receiver_thread:
self.receiver_thread = Thread(target=self.receive, name='Receiver Thread')
self.receiver_thread.start()
else:
log.info("BitfinexWSS.start(): Thread not started! "
"self.receiver_thread is populated!")
log.info("BitfinexWSS.start(): Initializing processing thread..")
if not self.processing_thread:
self.processing_thread = Thread(target=self.process, name='Processing Thread')
self.processing_thread.start()
else:
log.info("BitfinexWSS.start(): Thread not started! "
"self.processing_thread is populated!")
self.setup_subscriptions() | def start(self) | Start the websocket client threads
:return: | 2.571036 | 2.579033 | 0.996899 |
super(BitfinexWSS, self).stop()
log.info("BitfinexWSS.stop(): Stopping client..")
log.info("BitfinexWSS.stop(): Joining receiver thread..")
try:
self.receiver_thread.join()
if self.receiver_thread.is_alive():
time.time(1)
except AttributeError:
log.debug("BitfinexWSS.stop(): Receiver thread was not running!")
log.info("BitfinexWSS.stop(): Joining processing thread..")
try:
self.processing_thread.join()
if self.processing_thread.is_alive():
time.time(1)
except AttributeError:
log.debug("BitfinexWSS.stop(): Processing thread was not running!")
log.info("BitfinexWSS.stop(): Closing websocket conection..")
try:
self.conn.close()
except WebSocketConnectionClosedException:
pass
except AttributeError:
# Connection is None
pass
self.conn = None
self.processing_thread = None
self.receiver_thread = None
log.info("BitfinexWSS.stop(): Done!") | def stop(self) | Stop all threads and modules of the client.
:return: | 2.415157 | 2.431209 | 0.993397 |
log.info("BitfinexWSS.restart(): Restarting client..")
super(BitfinexWSS, self).restart()
# cache channel labels temporarily if soft == True
channel_labels = [self.channel_labels[k] for k in self.channel_labels] if soft else None
# clear previous channel caches
self.channels = {}
self.channel_labels = {}
self.channel_states = {}
if channel_labels:
# re-subscribe to channels
for channel_name, kwargs in channel_labels:
self._subscribe(channel_name, **kwargs) | def restart(self, soft=False) | Restarts client. If soft is True, the client attempts to re-subscribe
to all channels which it was previously subscribed to.
:return: | 5.271916 | 4.62517 | 1.139832 |
while self.running:
if self._receiver_lock.acquire(blocking=False):
try:
raw = self.conn.recv()
except WebSocketTimeoutException:
self._receiver_lock.release()
continue
except WebSocketConnectionClosedException:
# this needs to restart the client, while keeping track
# of the currently subscribed channels!
self.conn = None
self._controller_q.put('restart')
except AttributeError:
# self.conn is None, idle loop until shutdown of thread
self._receiver_lock.release()
continue
msg = time.time(), json.loads(raw)
log.debug("receiver Thread: Data Received: %s", msg)
self.receiver_q.put(msg)
self._receiver_lock.release()
else:
# The receiver_lock was locked, idling until available
time.sleep(0.5) | def receive(self) | Receives incoming websocket messages, and puts them on the Client queue
for processing.
:return: | 5.797147 | 5.667915 | 1.022801 |
while self.running:
if self._processor_lock.acquire(blocking=False):
if self.ping_timer:
try:
self._check_ping()
except TimeoutError:
log.exception("BitfinexWSS.ping(): TimedOut! (%ss)" %
self.ping_timer)
except (WebSocketConnectionClosedException,
ConnectionResetError):
log.exception("BitfinexWSS.ping(): Connection Error!")
self.conn = None
if not self.conn:
# The connection was killed - initiate restart
self._controller_q.put('restart')
skip_processing = False
try:
ts, data = self.receiver_q.get(timeout=0.1)
except queue.Empty:
skip_processing = True
ts = time.time()
data = None
if not skip_processing:
log.debug("Processing Data: %s", data)
if isinstance(data, list):
self.handle_data(ts, data)
else: # Not a list, hence it could be a response
try:
self.handle_response(ts, data)
except UnknownEventError:
# We don't know what event this is- Raise an
# error & log data!
log.exception("main() - UnknownEventError: %s",
data)
log.info("main() - Shutting Down due to "
"Unknown Error!")
self._controller_q.put('stop')
except ConnectionResetError:
log.info("processor Thread: Connection Was reset, "
"initiating restart")
self._controller_q.put('restart')
self._check_heartbeats(ts)
self._processor_lock.release()
else:
time.sleep(0.5) | def process(self) | Processes the Client queue, and passes the data to the respective
methods.
:return: | 4.792687 | 4.803019 | 0.997849 |
log.info("handle_response: Handling response %s", resp)
event = resp['event']
try:
self._event_handlers[event](ts, **resp)
# Handle Non-Critical Errors
except (InvalidChannelError, InvalidPairError, InvalidBookLengthError,
InvalidBookPrecisionError) as e:
log.exception(e)
print(e)
except (NotSubscribedError, AlreadySubscribedError) as e:
log.exception(e)
print(e)
except GenericSubscriptionError as e:
log.exception(e)
print(e)
# Handle Critical Errors
except InvalidEventError as e:
log.critical("handle_response(): %s; %s", e, resp)
log.exception(e)
raise SystemError(e)
except KeyError:
# unsupported event!
raise UnknownEventError("handle_response(): %s" % resp) | def handle_response(self, ts, resp) | Passes a response message to the corresponding event handler, and also
takes care of handling errors raised by the _raise_error handler.
:param ts: timestamp, declares when data was received by the client
:param resp: dict, containing info or error keys, among others
:return: | 4.354029 | 4.129108 | 1.054472 |
log.debug("_handle_subscribed: %s - %s - %s", chanId, channel, kwargs)
if chanId in self.channels:
raise AlreadyRegisteredError()
self._heartbeats[chanId] = time.time()
try:
channel_key = ('raw_'+channel
if kwargs['prec'].startswith('R') and channel == 'book'
else channel)
except KeyError:
channel_key = channel
try:
self.channels[chanId] = self._data_handlers[channel_key]
except KeyError:
raise UnknownChannelError()
# prep kwargs to be used as secondary value in dict key
try:
kwargs.pop('event')
except KeyError:
pass
try:
kwargs.pop('len')
except KeyError:
pass
try:
kwargs.pop('chanId')
except KeyError:
pass
self.channel_labels[chanId] = (channel_key, kwargs) | def _handle_subscribed(self, *args, chanId=None, channel=None, **kwargs) | Handles responses to subscribe() commands - registers a channel id with
the client and assigns a data handler to it.
:param chanId: int, represent channel id as assigned by server
:param channel: str, represents channel name | 4.31024 | 4.363985 | 0.987685 |
log.debug("_handle_unsubscribed: %s - %s", chanId, kwargs)
try:
self.channels.pop(chanId)
except KeyError:
raise NotRegisteredError()
try:
self._heartbeats.pop(chanId)
except KeyError:
pass
try:
self._late_heartbeats.pop(chanId)
except KeyError:
pass | def _handle_unsubscribed(self, *args, chanId=None, **kwargs) | Handles responses to unsubscribe() commands - removes a channel id from
the client.
:param chanId: int, represent channel id as assigned by server | 3.369247 | 3.499911 | 0.962667 |
log.debug("_raise_error(): %s" % kwargs)
try:
error_code = str(kwargs['code'])
except KeyError as e:
raise FaultyPayloadError('_raise_error(): %s' % kwargs)
try:
raise self._code_handlers[error_code]()
except KeyError:
raise UnknownWSSError() | def _raise_error(self, *args, **kwargs) | Raises the proper exception for passed error code. These must then be
handled by the layer calling _raise_error() | 6.073382 | 5.272314 | 1.151939 |
if 'version' in kwargs:
# set api version number and exit
self.api_version = kwargs['version']
print("Initialized API with version %s" % self.api_version)
return
try:
info_code = str(kwargs['code'])
except KeyError:
raise FaultyPayloadError("_handle_info: %s" % kwargs)
if not info_code.startswith('2'):
raise ValueError("Info Code must start with 2! %s", kwargs)
output_msg = "_handle_info(): %s" % kwargs
log.info(output_msg)
try:
self._code_handlers[info_code]()
except KeyError:
raise UnknownWSSInfo(output_msg) | def _handle_info(self, *args, **kwargs) | Handles info messages and executed corresponding code | 5.486063 | 5.091052 | 1.077589 |
log.info("BitfinexWSS.ping(): Ping received! (%ss)",
ts - self.ping_timer)
self.ping_timer = None | def _handle_pong(self, ts, *args, **kwargs) | Handles pong messages; resets the self.ping_timer variable and logs
info message.
:param ts: timestamp, declares when data was received by the client
:return: | 15.416952 | 12.011221 | 1.283546 |
try:
chan_id, *data = msg
except ValueError as e:
# Too many or too few values
raise FaultyPayloadError("handle_data(): %s - %s" % (msg, e))
self._heartbeats[chan_id] = ts
if data[0] == 'hb':
self._handle_hearbeat(ts, chan_id)
return
try:
self.channels[chan_id](ts, chan_id, data)
except KeyError:
raise NotRegisteredError("handle_data: %s not registered - "
"Payload: %s" % (chan_id, msg)) | def handle_data(self, ts, msg) | Passes msg to responding data handler, determined by its channel id,
which is expected at index 0.
:param ts: timestamp, declares when data was received by the client
:param msg: list or dict of websocket data
:return: | 4.538214 | 4.546062 | 0.998274 |
pair = self.channel_labels[chan_id][1]['pair']
entry = (*data, ts)
self.data_q.put(('ticker', pair, entry)) | def _handle_ticker(self, ts, chan_id, data) | Adds received ticker data to self.tickers dict, filed under its channel
id.
:param ts: timestamp, declares when data was received by the client
:param chan_id: int, channel id
:param data: tuple or list of data received via wss
:return: | 9.297436 | 10.430694 | 0.891354 |
pair = self.channel_labels[chan_id][1]['key'].split(':')[-1][1:]
entry = data, ts
self.data_q.put(('ohlc', pair, entry)) | def _handle_candles(self, ts, chan_id, data) | Stores OHLC data received via wss in self.candles[chan_id]
:param ts: timestamp, declares when data was received by the client
:param chan_id: int, channel id
:param data: list of data received via wss
:return: | 10.096549 | 11.079621 | 0.911272 |
flags = 0
if decimals_as_strings:
flags += 8
if ts_as_dates:
flags += 32
if sequencing:
flags += 65536
payload = {'event': 'conf', 'flags': flags}
payload.update(kwargs)
self.send(payload) | def config(self, decimals_as_strings=True, ts_as_dates=False,
sequencing=False, **kwargs) | Send configuration to websocket server
:param decimals_as_strings: bool, turn on/off decimals as strings
:param ts_as_dates: bool, decide to request timestamps as dates instead
:param sequencing: bool, turn on sequencing
:param kwargs:
:return: | 2.965723 | 2.767636 | 1.071572 |
prec = 'R0' if prec is None else prec
self._subscribe('book', pair=pair, prec=prec, **kwargs) | def raw_order_book(self, pair, prec=None, **kwargs) | Subscribe to the passed pair's raw order book channel.
:param pair: str, Pair to request data for.
:param kwargs:
:return: | 5.347772 | 6.748256 | 0.792467 |
valid_tfs = ['1m', '5m', '15m', '30m', '1h', '3h', '6h', '12h', '1D',
'7D', '14D', '1M']
if timeframe:
if timeframe not in valid_tfs:
raise ValueError("timeframe must be any of %s" % valid_tfs)
else:
timeframe = '1m'
pair = 't' + pair if not pair.startswith('t') else pair
key = 'trade:' + timeframe + ':' + pair
self._subscribe('candles', key=key, **kwargs) | def ohlc(self, pair, timeframe=None, **kwargs) | Subscribe to the passed pair's OHLC data channel.
:param pair: str, Pair to request data for.
:param timeframe: str, {1m, 5m, 15m, 30m, 1h, 3h, 6h, 12h,
1D, 7D, 14D, 1M}
:param kwargs:
:return: | 2.37431 | 2.165389 | 1.096482 |
global _is_sphinx
_is_sphinx = True
app.add_config_value('no_underscore_emphasis', False, 'env')
app.add_config_value('m2r_parse_relative_links', False, 'env')
app.add_config_value('m2r_anonymous_references', False, 'env')
app.add_config_value('m2r_disable_inline_math', False, 'env')
app.add_source_parser('.md', M2RParser)
app.add_directive('mdinclude', MdInclude)
metadata = dict(
version=__version__,
parallel_read_safe=True,
parallel_write_safe=True,
)
return metadata | def setup(app) | When used for sphinx extension. | 2.468541 | 2.360953 | 1.04557 |
return '\n{0}\n{1}\n'.format(text,
self.hmarks[level] * column_width(text)) | def header(self, text, level, raw=None) | Rendering header/heading tags like ``<h1>`` ``<h2>``.
:param text: rendered text content for the header.
:param level: a number for the header level, for example: 1.
:param raw: raw text content of the header. | 10.735586 | 15.476532 | 0.693669 |
if self.anonymous_references:
underscore = '__'
else:
underscore = '_'
if title:
return self._raw_html(
'<a href="{link}" title="{title}">{text}</a>'.format(
link=link, title=title, text=text
)
)
if not self.parse_relative_links:
return '\ `{text} <{target}>`{underscore}\ '.format(
target=link,
text=text,
underscore=underscore
)
else:
url_info = urlparse(link)
if url_info.scheme:
return '\ `{text} <{target}>`{underscore}\ '.format(
target=link,
text=text,
underscore=underscore
)
else:
link_type = 'doc'
anchor = url_info.fragment
if url_info.fragment:
if url_info.path:
# Can't link to anchors via doc directive.
anchor = ''
else:
# Example: [text](#anchor)
link_type = 'ref'
doc_link = '{doc_name}{anchor}'.format(
# splittext approach works whether or not path is set. It
# will return an empty string if unset, which leads to
# anchor only ref.
doc_name=os.path.splitext(url_info.path)[0],
anchor=anchor
)
return '\ :{link_type}:`{text} <{doc_link}>`\ '.format(
link_type=link_type,
doc_link=doc_link,
text=text
) | def link(self, link, title, text) | Rendering a given link with content and title.
:param link: href link for ``<a>`` tag.
:param title: title content for `title` attribute.
:param text: text content for description. | 4.087728 | 4.204866 | 0.972142 |
found_events = []
content = None
if url:
content = ICalDownload().data_from_url(url, apple_fix=fix_apple)
if not content and file:
content = ICalDownload().data_from_file(file, apple_fix=fix_apple)
if not content and string_content:
content = ICalDownload().data_from_string(string_content,
apple_fix=fix_apple)
found_events += parse_events(content, start=start, end=end)
return found_events | def events(url=None, file=None, string_content=None, start=None, end=None, fix_apple=False) | Get all events form the given iCal URL occurring in the given time range.
:param url: iCal URL
:param file: iCal file path
:param string_content: iCal content as string
:param start: start date (see dateutils.date)
:param end: end date (see dateutils.date)
:param fix_apple: fix known Apple iCal issues
:return: events as list of dictionaries | 2.667983 | 2.716491 | 0.982143 |
data = []
try:
data += events(url=url, file=file, string_content=string_content,
start=start, end=end, fix_apple=fix_apple)
finally:
update_events(key, data)
request_finished(key) | def request_data(key, url, file, string_content, start, end, fix_apple) | Request data, update local data cache and remove this Thread form queue.
:param key: key for data source to get result later
:param url: iCal URL
:param file: iCal file path
:param string_content: iCal content as string
:param start: start date
:param end: end date
:param fix_apple: fix known Apple iCal issues | 4.254364 | 3.947546 | 1.077724 |
t = Thread(target=request_data, args=(key, url, file, string_content, start, end, fix_apple))
with event_lock:
if key not in threads:
threads[key] = []
threads[key].append(t)
if not threads[key][0].is_alive():
threads[key][0].start() | def events_async(key, url=None, file=None, start=None, string_content=None,
end=None, fix_apple=False) | Trigger an asynchronous data request.
:param key: key for data source to get result later
:param url: iCal URL
:param file: iCal file path
:param string_content: iCal content as string
:param start: start date
:param end: end date
:param fix_apple: fix known Apple iCal issues | 2.714393 | 3.198634 | 0.84861 |
with event_lock:
threads[key] = threads[key][1:]
if threads[key]:
threads[key][0].run() | def request_finished(key) | Remove finished Thread from queue.
:param key: data source key | 8.463311 | 9.921423 | 0.853034 |
if apple_fix:
url = apple_url_fix(url)
_, content = self.http.request(url)
if not content:
raise ConnectionError('Could not get data from %s!' % url)
return self.decode(content, apple_fix=apple_fix) | def data_from_url(self, url, apple_fix=False) | Download iCal data from URL.
:param url: URL to download
:param apple_fix: fix Apple bugs (protocol type and tzdata in iCal)
:return: decoded (and fixed) iCal data | 3.957871 | 4.251952 | 0.930836 |
with open(file, mode='rb') as f:
content = f.read()
if not content:
raise IOError("File %f is not readable or is empty!" % file)
return self.decode(content, apple_fix=apple_fix) | def data_from_file(self, file, apple_fix=False) | Read iCal data from file.
:param file: file to read
:param apple_fix: fix wrong Apple tzdata in iCal
:return: decoded (and fixed) iCal data | 4.086866 | 4.258953 | 0.959594 |
content = content.decode(self.encoding)
content = content.replace('\r', '')
if apple_fix:
content = apple_data_fix(content)
return content | def decode(self, content, apple_fix=False) | Decode content using the set charset.
:param content: content do decode
:param apple_fix: fix Apple txdata bug
:return: decoded (and fixed) content | 3.574647 | 4.770026 | 0.749398 |
event = Event()
event.start = normalize(component.get('dtstart').dt, tz=tz)
if component.get('dtend'):
event.end = normalize(component.get('dtend').dt, tz=tz)
elif component.get('duration'): # compute implicit end as start + duration
event.end = event.start + component.get('duration').dt
else: # compute implicit end as start + 0
event.end = event.start
try:
event.summary = str(component.get('summary'))
except UnicodeEncodeError as e:
event.summary = str(component.get('summary').encode('utf-8'))
try:
event.description = str(component.get('description'))
except UnicodeEncodeError as e:
event.description = str(component.get('description').encode('utf-8'))
event.all_day = type(component.get('dtstart').dt) is date
if component.get('rrule'):
event.recurring = True
try:
event.location = str(component.get('location'))
except UnicodeEncodeError as e:
event.location = str(component.get('location').encode('utf-8'))
if component.get('attendee'):
event.attendee = component.get('attendee')
if type(event.attendee) is list:
temp = []
for a in event.attendee:
temp.append(a.encode('utf-8').decode('ascii'))
event.attendee = temp
else:
event.attendee = event.attendee.encode('utf-8').decode('ascii')
if component.get('uid'):
event.uid = component.get('uid').encode('utf-8').decode('ascii')
if component.get('organizer'):
event.organizer = component.get('organizer').encode('utf-8').decode('ascii')
return event | def create_event(component, tz=UTC) | Create an event from its iCal representation.
:param component: iCal component
:param tz: timezone for start and end times
:return: event | 1.812618 | 1.82454 | 0.993466 |
if type(dt) is date:
dt = dt + relativedelta(hour=0)
elif type(dt) is datetime:
pass
else:
raise ValueError("unknown type %s" % type(dt))
if dt.tzinfo:
dt = dt.astimezone(tz)
else:
dt = dt.replace(tzinfo=tz)
return dt | def normalize(dt, tz=UTC) | Convert date or datetime to datetime with timezone.
:param dt: date to normalize
:param tz: the normalized date's timezone
:return: date as datetime with timezone | 2.245072 | 2.494692 | 0.89994 |
if not start:
start = now()
if not end:
end = start + default_span
if not content:
raise ValueError('Content is invalid!')
calendar = Calendar.from_ical(content)
# Find the calendar's timezone info, or use UTC
for c in calendar.walk():
if c.name == 'VTIMEZONE':
cal_tz = gettz(str(c['TZID']))
break;
else:
cal_tz = UTC
start = normalize(start, cal_tz)
end = normalize(end, cal_tz)
found = []
for component in calendar.walk():
if component.name == "VEVENT":
e = create_event(component)
if e.recurring:
# Unfold recurring events according to their rrule
rule = parse_rrule(component, cal_tz)
dur = e.end - e.start
found.extend(e.copy_to(dt) for dt in rule.between(start - dur, end, inc=True))
elif e.end >= start and e.start <= end:
found.append(e)
return found | def parse_events(content, start=None, end=None, default_span=timedelta(days=7)) | Query the events occurring in a given time range.
:param content: iCal URL/file content as String
:param start: start date for search, default today
:param end: end date for search
:param default_span: default query length (one week)
:return: events as list | 3.39033 | 3.422898 | 0.990485 |
if component.get('rrule'):
# component['rrule'] can be both a scalar and a list
rrules = component['rrule']
if not isinstance(rrules, list):
rrules = [rrules]
# Since DTSTART are always made timezone aware, UNTIL with no tzinfo
# must be converted to UTC.
for rule in rrules:
until = rule.get("until")
for idx, dt in enumerate(until or []):
if not hasattr(dt, 'tzinfo'):
until[idx] = normalize(normalize(dt, tz=tz), tz=UTC)
# Parse the rrules, might return a rruleset instance, instead of rrule
rule = rrulestr('\n'.join(x.to_ical().decode() for x in rrules), dtstart=normalize(component['dtstart'].dt, tz=tz))
if component.get('exdate'):
# Make sure, to work with a rruleset
if isinstance(rule, rrule):
rules = rruleset()
rules.rrule(rule)
rule = rules
# Add exdates to the rruleset
for exd in extract_exdates(component):
rule.exdate(exd)
#TODO: What about rdates and exrules?
# You really want an rrule for a component without rrule? Here you are.
else:
rule = rruleset()
rule.rdate(normalize(component['dtstart'].dt, tz=tz))
return rule | def parse_rrule(component, tz=UTC) | Extract a dateutil.rrule object from an icalendar component. Also includes
the component's dtstart and exdate properties. The rdate and exrule
properties are not yet supported.
:param component: icalendar component
:param tz: timezone for DST handling
:return: extracted rrule or rruleset | 4.520349 | 4.484992 | 1.007884 |
dates = []
exd_prop = component.get('exdate')
if exd_prop:
if isinstance(exd_prop, list): # In case there is more than one exdate property
for exd_list in exd_prop:
dates.extend(normalize(exd.dt) for exd in exd_list.dts)
elif isinstance(exd_prop, vDDDLists):
dates.extend(normalize(exd.dt) for exd in exd_prop.dts)
return dates | def extract_exdates(component) | Compile a list of all exception dates stored with a component.
:param component: icalendar iCal component
:return: list of exception dates | 3.334395 | 3.049067 | 1.093579 |
if not new_start:
new_start = self.start
if not uid:
uid = "%s_%d" % (self.uid, randint(0, 1000000))
ne = Event()
ne.summary = self.summary
ne.description = self.description
ne.start = new_start
if self.end:
duration = self.end - self.start
ne.end = (new_start + duration)
ne.all_day = self.all_day
ne.recurring = self.recurring
ne.location = self.location
ne.uid = uid
return ne | def copy_to(self, new_start=None, uid=None) | Create a new event equal to this with new start date.
:param new_start: new start date
:param uid: UID of new event
:return: new event | 2.410759 | 2.280795 | 1.056982 |
keyurl = 'https://api.travis-ci.org/repos/{0}/key'.format(repo)
data = json.loads(urlopen(keyurl).read())
if 'key' not in data:
errmsg = "Could not find public key for repo: {}.\n".format(repo)
errmsg += "Have you already added your GitHub repo to Travis?"
raise ValueError(errmsg)
return data['key'] | def fetch_public_key(repo) | Download RSA public key Travis will use for this repo.
Travis API docs: http://docs.travis-ci.com/api/#repository-keys | 3.14793 | 3.055668 | 1.030194 |
# extra safeguard to make sure we are handling a bot branch here
assert new_branch.startswith(prefix)
comp = repo.compare(base_branch, new_branch)
logger.info("Got a total of {} commits in {}".format(comp.total_commits, new_branch))
return comp.total_commits == 0 | def is_empty_branch(self, repo, base_branch, new_branch, prefix) | Compares the top commits of two branches.
Please note: This function isn't checking if `base_branch` is a direct
parent of `new_branch`, see
http://stackoverflow.com/questions/3161204/find-the-parent-branch-of-a-git-branch
:param repo: github.Repository
:param base_branch: string name of the base branch
:param new_branch: string name of the new branch
:param prefix: string branch prefix, default 'pyup-'
:return: bool -- True if empty | 5.722629 | 5.78876 | 0.988576 |
# extra safeguard to make sure we are handling a bot branch here
assert branch.startswith(prefix)
ref = repo.get_git_ref("/".join(["heads", branch]))
ref.delete() | def delete_branch(self, repo, branch, prefix) | Deletes a branch.
:param repo: github.Repository
:param branch: string name of the branch to delete | 10.03028 | 11.346408 | 0.884005 |
self.configure(**kwargs)
self.get_all_requirements()
self.apply_updates(
initial=kwargs.get("initial", False),
scheduled=kwargs.get("scheduled", False)
)
return self.req_bundle | def update(self, **kwargs) | Main entrypoint to kick off an update run.
:param kwargs:
:return: RequirementsBundle | 7.301205 | 5.787397 | 1.26157 |
if not initial and self.config.is_valid_schedule():
# if the config has a valid schedule, return True if this is a scheduled run
return scheduled
return True | def can_pull(self, initial, scheduled) | Determines if pull requests should be created
:return: bool | 10.187984 | 9.538879 | 1.068048 |
closed = []
if self.bot_token and not pull_request.is_initial:
for pr in self.pull_requests:
close_pr = False
same_title = \
pr.canonical_title(self.config.pr_prefix) == \
pull_request.canonical_title(self.config.pr_prefix)
if scheduled and pull_request.is_scheduled:
# check that the PR is open and the title does not match
if pr.is_open and not same_title:
# we want to close the previous scheduled PR if it is not merged yet
# and we want to close all previous updates if the user choose to
# switch to a scheduled update
if pr.is_scheduled or pr.is_update:
close_pr = True
elif pull_request.is_update:
# check that, the pr is an update, is open, the titles are not equal and that
# the requirement matches
if pr.is_update and \
pr.is_open and \
not same_title and \
pr.get_requirement(self.config.pr_prefix) == update.requirement.key:
# there's a possible race condition where multiple updates with more than
# one target version conflict with each other (closing each others PRs).
# Check that's not the case here
if not self.has_conflicting_update(update):
close_pr = True
if close_pr and self.is_bot_the_only_committer(pr=pr):
logger.info("Closing stale PR {} for {}".format(pr.title, pull_request.title))
self.provider.close_pull_request(
bot_repo=self.bot_repo,
user_repo=self.user_repo,
pull_request=pr,
comment="Closing this in favor of #{}".format(
pull_request.number),
prefix=self.config.branch_prefix
)
pr.state = "closed"
closed.append(pr)
for closed_pr in closed:
self.pull_requests.remove(closed_pr) | def close_stale_prs(self, update, pull_request, scheduled) | Closes stale pull requests for the given update, links to the new pull request and deletes
the stale branch.
A stale PR is a PR that:
- Is not merged
- Is not closed
- Has no commits (except the bot commit)
:param update:
:param pull_request: | 4.597079 | 4.638033 | 0.99117 |
committer = self.provider.get_pull_request_committer(
self.user_repo,
pr)
# flatten the list and remove duplicates
committer_set = set([c.login for c in committer])
# it's impossible to get the bots login if this is an integration, just check that
# there's only one commit in the commit history.
if self.integration or getattr(self.provider, 'name', '') == 'gitlab':
return len(committer_set) == 1
# check that there's exactly one committer in this PRs commit history and
# that the committer is the bot
return len(committer_set) == 1 and self.provider.is_same_user(self.bot, committer[0]) | def is_bot_the_only_committer(self, pr) | Checks if the bot is the only committer for the given pull request.
:param update: Update to check
:return: bool - True if conflict found | 5.221005 | 5.205876 | 1.002906 |
# we explicitly want a flat list of updates here, that's why we call iter_updates
# with both `initial` and `scheduled` == False
for _, _, _, updates in self.iter_updates(initial=False, scheduled=False):
for _update in updates:
if (
update.requirement.key == _update.requirement.key and
(
update.commit_message != _update.commit_message or
update.requirement.latest_version_within_specs !=
_update.requirement.latest_version_within_specs
)
):
logger.info("{} conflicting with {}/{}".format(
update.requirement.key,
update.requirement.latest_version_within_specs,
_update.requirement.latest_version_within_specs)
)
return True
return False | def has_conflicting_update(self, update) | Checks if there are conflicting updates. Conflicting updates are updates that have the
same requirement but different target versions to update to.
:param update: Update to check
:return: bool - True if conflict found | 4.399598 | 4.237749 | 1.038192 |
logger.info("Preparing to create branch {} from {}".format(new_branch, self.config.branch))
try:
# create new branch
self.provider.create_branch(
base_branch=self.config.branch,
new_branch=new_branch,
repo=self.user_repo
)
logger.info("Created branch {} from {}".format(new_branch, self.config.branch))
return True
except BranchExistsError:
logger.info("Branch {} exists.".format(new_branch))
# if the branch exists, is empty and delete_empty is set, delete it and call
# this function again
if delete_empty:
if self.provider.is_empty_branch(self.user_repo, self.config.branch, new_branch,
self.config.branch_prefix):
self.provider.delete_branch(self.user_repo, new_branch,
self.config.branch_prefix)
logger.info("Branch {} was empty and has been deleted".format(new_branch))
return self.create_branch(new_branch, delete_empty=False)
logger.info("Branch {} is not empty".format(new_branch))
return False | def create_branch(self, new_branch, delete_empty=False) | Creates a new branch.
:param new_branch: string name of the new branch
:param delete_empty: bool -- delete the branch if it is empty
:return: bool -- True if successfull | 2.457505 | 2.453094 | 1.001798 |
# extra safeguard to make sure we are handling a bot branch here
assert new_branch.startswith(prefix)
comp = repo.repository_compare(base_branch, new_branch)
n = len(comp.commits)
logger.info("Got a total of {} commits in {}".format(n, new_branch))
return n == 0 | def is_empty_branch(self, repo, base_branch, new_branch, prefix) | Compares the top commits of two branches.
Please note: This function isn't checking if `base_branch` is a direct
parent of `new_branch`, see
http://stackoverflow.com/questions/3161204/find-the-parent-branch-of-a-git-branch
:param repo: github.Repository
:param base_branch: string name of the base branch
:param new_branch: string name of the new branch
:param prefix: string branch prefix, default 'pyup-'
:return: bool -- True if empty | 6.387463 | 6.758666 | 0.945077 |
# make sure that the name of the branch begins with pyup.
assert branch.startswith(prefix)
obj = repo.branches.get(branch)
obj.delete() | def delete_branch(self, repo, branch, prefix) | Deletes a branch.
:param repo: github.Repository
:param branch: string name of the branch to delete | 9.03313 | 9.40424 | 0.960538 |
for key, value in d.items():
if hasattr(self, key):
if key == "requirements":
items, value = value, []
for item in items:
if isinstance(item, basestring):
req = RequirementConfig(path=item)
elif isinstance(item, dict):
path, item = item.popitem()
req = RequirementConfig(
path=path,
pin=item.get("pin", None),
compile=item.get("compile", False),
update=item.get("update", Config.UPDATE_ALL)
)
value.append(req)
# add constraint requirement files to config
if req.compile:
for spec in req.compile.specs:
value.append(RequirementConfig(path=spec, pin=False))
elif key == "assignees":
# assignees can be a string or a list. If it's a string, convert it to a list
# to make things consistent
if isinstance(value, basestring):
value = [value, ]
elif key == 'gitlab':
value = GitlabConfig(**value)
elif key == 'pr_prefix':
# make sure that pr prefixes don't contain a PIPE
if "|" in value:
continue
# cast ints and floats to str
if isinstance(value, (int, float)) and not isinstance(value, bool):
value = str(value)
setattr(self, key, value) | def update_config(self, d) | Updates the config object.
:param d: dict | 3.812207 | 3.839148 | 0.992983 |
for req_file in self.requirements:
if path.strip("/") == req_file.path.strip("/"):
return getattr(req_file, attr)
return getattr(self, attr) | def _get_requirement_attr(self, attr, path) | Gets the attribute for a given requirement file in path
:param attr: string, attribute
:param path: string, path
:return: The attribute for the requirement, or the global default | 3.658159 | 4.374763 | 0.836196 |
return self._get_requirement_attr("update", path=path) in (Config.UPDATE_ALL,
Config.UPDATE_INSECURE,
Config.UPDATE_INSECURE_TYPO) | def can_update_insecure(self, path) | Checks if requirements in `path` can be updated if insecure.
:param path: string, path to requirement file
:return: bool | 9.272858 | 8.559816 | 1.083301 |
path = requirement_file.path
if self.config.can_update_all(path) or \
(self.config.can_update_insecure(path) and requirement.is_insecure):
# handle unpinned requirements only if pin is set
if not requirement.is_pinned:
return self.config.can_pin(path)
return True
return False | def should_update(self, requirement, requirement_file) | Determines if a requirement can be updated
:param requirement: Requirement
:param requirement_file: RequirementFile
:return: bool | 4.997765 | 4.807475 | 1.039582 |
log.critical('Kill reason: ' + warning)
if self.DEBUG:
return
try:
self.mail_this(warning)
except socket.gaierror:
current_time = time.localtime()
formatted_time = time.strftime('%Y-%m-%d %I:%M:%S%p', current_time)
with open(self.config['global']['killer_file'], 'a', encoding='utf-8') as killer_file:
killer_file.write('Time: {0}\nInternet is out.\n'
'Failure: {1}\n\n'.format(formatted_time, warning)) | def kill_the_system(self, warning: str) | Send an e-mail, and then
shut the system down quickly. | 4.72939 | 4.34865 | 1.087554 |
if POSIX:
log.debug('Platform: POSIX')
from killer.killer_posix import KillerPosix
return KillerPosix(config_path=args.config, debug=args.debug)
elif WINDOWS:
log.debug('Platform: Windows')
from killer.killer_windows import KillerWindows
return KillerWindows(config_path=args.config, debug=args.debug)
else:
# TODO: WSL
# TODO: OSX
# TODO: BSD
raise NotImplementedError("Your platform is not currently supported."
"If you would like support to be added, or "
"if your platform is supported and this is "
"a bug, please open an issue on GitHub!") | def get_killer(args) | Returns a KillerBase instance subclassed based on the OS. | 3.603131 | 3.237969 | 1.112775 |
for device in BASE_PATH.iterdir():
with open(str(Path(device, 'type'))) as type_file:
if type_file.readline().strip() == device_type.value:
yield device.name | def get_devices(device_type: DeviceType) -> Iterator[str] | Gets names of power devices of the specified type.
:param str device_type: the type of the devices to retrieve
:return: the device names
:rtype: Iterator[str] | 3.928053 | 4.21771 | 0.931324 |
with open(str(Path(device_path, property_name))) as file:
return file.readline().strip() | def _get_property(device_path: Union[Path, str], property_name: str) -> str | Gets the given property for a device. | 5.979769 | 4.859853 | 1.230442 |
get_system_power_status = ctypes.windll.kernel32.GetSystemPowerStatus
get_system_power_status.argtypes = [ctypes.POINTER(SystemPowerStatus)]
get_system_power_status.restype = wintypes.BOOL
status = SystemPowerStatus()
if not get_system_power_status(ctypes.pointer(status)):
raise ctypes.WinError()
else:
return status | def get_power_status() -> SystemPowerStatus | Retrieves the power status of the system.
The status indicates whether the system is running on AC or DC power,
whether the battery is currently charging, how much battery life remains,
and if battery saver is on or off.
:raises OSError: if the call to GetSystemPowerStatus fails
:return: the power status
:rtype: SystemPowerStatus | 1.952022 | 2.271946 | 0.859185 |
client = ClientMixin(api_key=api_key)
result = client.request('GET', endpoint='abilities',
add_headers=add_headers,)
return result['abilities'] | def abilities(api_key=None, add_headers=None) | Fetch a list of permission-like strings for this account. | 5.547652 | 5.439783 | 1.01983 |
client = ClientMixin(api_key=None)
try:
client.request('GET', endpoint='abilities/%s' % ability,
add_headers=add_headers)
return True
except Exception:
pass
return False | def can(ability, add_headers=None) | Test whether an ability is allowed. | 4.918044 | 4.765584 | 1.031992 |
with open(path, 'r+b') as f:
global api_key
api_key = f.read().strip()
return api_key | def set_api_key_from_file(path, set_global=True) | Set the global api_key from a file path. | 3.12755 | 2.994766 | 1.044339 |
if from_email is None or not isinstance(from_email, six.string_types):
raise MissingFromEmail(from_email)
endpoint = '/'.join((self.endpoint, self.id,))
add_headers = {'from': from_email, }
data = {
'incident': {
'type': 'incident',
'status': 'resolved',
}
}
if resolution is not None:
data['resolution'] = resolution
result = self.request('PUT',
endpoint=endpoint,
add_headers=add_headers,
data=data,)
return result | def resolve(self, from_email, resolution=None) | Resolve an incident using a valid email address. | 3.405084 | 3.121981 | 1.09068 |
endpoint = '/'.join((self.endpoint, self.id,))
if from_email is None or not isinstance(from_email, six.string_types):
raise MissingFromEmail(from_email)
if user_ids is None or not isinstance(user_ids, list):
raise InvalidArguments(user_ids)
if not all([isinstance(i, six.string_types) for i in user_ids]):
raise InvalidArguments(user_ids)
assignees = [
{
'assignee': {
'id': user_id,
'type': 'user_reference',
}
}
for user_id in user_ids
]
add_headers = {'from': from_email, }
data = {
'incident': {
'type': 'incident',
'assignments': assignees,
}
}
result = self.request('PUT',
endpoint=endpoint,
add_headers=add_headers,
data=data,)
return result | def reassign(self, from_email, user_ids) | Reassign an incident to other users using a valid email address. | 2.677202 | 2.495812 | 1.072678 |
endpoint = '/'.join((self.endpoint, self.id, 'log_entries'))
query_params = {
'time_zone': time_zone,
'is_overview': json.dumps(is_overview),
}
if include:
query_params['include'] = include
result = self.logEntryFactory.find(
endpoint=endpoint,
api_key=self.api_key,
fetch_all=fetch_all,
**query_params
)
return result | def log_entries(self, time_zone='UTC', is_overview=False,
include=None, fetch_all=True) | Query for log entries on an incident instance. | 2.779596 | 2.827339 | 0.983114 |
endpoint = '/'.join((self.endpoint, self.id, 'notes'))
return self.noteFactory.find(
endpoint=endpoint,
api_key=self.api_key,
) | def notes(self) | Query for notes attached to this incident. | 6.452033 | 6.319837 | 1.020918 |
if from_email is None or not isinstance(from_email, six.string_types):
raise MissingFromEmail(from_email)
endpoint = '/'.join((self.endpoint, self.id, 'notes'))
add_headers = {'from': from_email, }
return self.noteFactory.create(
endpoint=endpoint,
api_key=self.api_key,
add_headers=add_headers,
data={'content': content},
) | def create_note(self, from_email, content) | Create a note for this incident. | 3.784086 | 3.680076 | 1.028263 |
if from_email is None or not isinstance(from_email, six.string_types):
raise MissingFromEmail(from_email)
endpoint = '/'.join((self.endpoint, self.id, 'snooze'))
add_headers = {'from': from_email, }
return self.__class__.create(
endpoint=endpoint,
api_key=self.api_key,
add_headers=add_headers,
data_key='duration',
data=duration,
) | def snooze(self, from_email, duration) | Snooze this incident for `duration` seconds. | 3.547358 | 3.503214 | 1.012601 |
if from_email is None or not isinstance(from_email, six.string_types):
raise MissingFromEmail(from_email)
add_headers = {'from': from_email, }
endpoint = '/'.join((self.endpoint, self.id, 'merge'))
incident_ids = [entity['id'] if isinstance(entity, Entity) else entity
for entity in source_incidents]
incident_references = [{'type': 'incident_reference', 'id': id_}
for id_ in incident_ids]
return self.__class__.create(
endpoint=endpoint,
api_key=self.api_key,
add_headers=add_headers,
data_key='source_incidents',
data=incident_references,
method='PUT',
) | def merge(self, from_email, source_incidents) | Merge other incidents into this incident. | 3.478366 | 3.298223 | 1.054618 |
endpoint = '/'.join((self.endpoint, self.id, 'alerts'))
return self.alertFactory.find(
endpoint=endpoint,
api_key=self.api_key,
) | def alerts(self) | Query for alerts attached to this incident. | 6.49601 | 6.002985 | 1.08213 |
seconds = 60 * 60 * 24 * 30 # seconds in 30 days
until = kwargs.pop('until', None)
since = kwargs.pop('since', None)
if until is None:
until = datetime.datetime.now()
if since is None:
since = until - datetime.timedelta(seconds=seconds)
dt = until - since
if dt > datetime.timedelta(seconds=seconds):
raise InvalidArguments(until, since)
kwargs['since'] = since.isoformat()
kwargs['until'] = until.isoformat()
return getattr(Entity, 'find').__func__(cls, *args, **kwargs) | def find(cls, *args, **kwargs) | Find notifications.
Optional kwargs are:
since:
datetime instance
until:
datetime instance
If not specified, until will default to now(), and since will default
to 30 days prior to until.
As per PD spec, date range must not exceed 1 month. | 2.759432 | 2.574035 | 1.072026 |
ids = [ref['id'] for ref in self['services']]
return [Service.fetch(id) for id in ids] | def services(self) | Fetch all instances of services for this EP. | 8.118745 | 6.093814 | 1.332293 |
if incident is None and endpoint is None:
raise InvalidArguments(incident, endpoint)
if endpoint is None:
iid = incident['id'] if isinstance(incident, Entity) else incident
endpoint = 'incidents/{0}/alerts'.format(iid)
return getattr(Entity, 'fetch').__func__(cls, id, endpoint=endpoint,
*args, **kwargs) | def fetch(cls, id, incident=None, endpoint=None, *args, **kwargs) | Customize fetch because this is a nested resource. | 4.44319 | 4.240832 | 1.047717 |
if from_email is None or not isinstance(from_email, six.string_types):
raise MissingFromEmail(from_email)
parent_incident_id = self['incident']['id']
endpoint_format = 'incidents/{0}/alerts/{1}'
endpoint = endpoint_format.format(parent_incident_id, self['id'])
add_headers = {'from': from_email, }
data = {
'alert': {
'id': self['id'],
'type': 'alert',
'status': 'resolved',
}
}
result = self.request('PUT',
endpoint=endpoint,
add_headers=add_headers,
data=data,)
return result | def resolve(self, from_email) | Resolve an alert using a valid email address. | 3.261289 | 3.052168 | 1.068516 |
if from_email is None or not isinstance(from_email, six.string_types):
raise MissingFromEmail(from_email)
if new_parent_incident is None:
raise InvalidArguments(new_parent_incident)
parent_incident_id = self['incident']['id']
endpoint_format = 'incidents/{0}/alerts/{1}'
endpoint = endpoint_format.format(parent_incident_id, self['id'])
if isinstance(new_parent_incident, Entity):
new_parent_incident_id = new_parent_incident['id']
else:
new_parent_incident_id = new_parent_incident
add_headers = {'from': from_email, }
data = {
'alert': {
'id': self['id'],
'type': 'alert',
'incident': {
'type': 'incident',
'id': new_parent_incident_id,
}
}
}
result = self.request('PUT',
endpoint=endpoint,
add_headers=add_headers,
data=data,)
return result | def associate(self, from_email, new_parent_incident=None) | Associate an alert with an incident using a valid email address. | 2.408015 | 2.292055 | 1.050592 |
if service is None and endpoint is None:
raise InvalidArguments(service, endpoint)
if endpoint is None:
sid = service['id'] if isinstance(service, Entity) else service
endpoint = 'services/{0}/integrations'.format(sid)
return getattr(Entity, 'fetch').__func__(cls, id, endpoint=endpoint,
*args, **kwargs) | def fetch(cls, id, service=None, endpoint=None, *args, **kwargs) | Customize fetch because it lives on a special endpoint. | 4.310519 | 4.138662 | 1.041525 |
cls.validate(data)
if service is None and endpoint is None:
raise InvalidArguments(service, endpoint)
if endpoint is None:
sid = service['id'] if isinstance(service, Entity) else service
endpoint = 'services/{0}/integrations'.format(sid)
# otherwise endpoint should contain the service path too
return getattr(Entity, 'create').__func__(cls, endpoint=endpoint,
data=data, *args, **kwargs) | def create(cls, service=None, endpoint=None, data=None, *args, **kwargs) | Create an integration within the scope of an service.
Make sure that they should reasonably be able to query with an
service or endpoint that knows about an service. | 5.374928 | 5.010247 | 1.072787 |
endpoint = '/'.join((self.endpoint, self.id, 'users'))
return self.request('GET', endpoint=endpoint, query_params=kwargs) | def get_oncall(self, **kwargs) | Retrieve this schedule's "on call" users. | 5.716635 | 4.426125 | 1.291567 |
log('Doing HTTP [{3}] request: {0} - headers: {1} - payload: {2}'.format(
args[0], kwargs.get('headers'), kwargs.get('json'), method,),
level=logging.DEBUG,)
requests_method = getattr(requests, method)
return self._handle_response(requests_method(*args, **kwargs)) | def _do_request(self, method, *args, **kwargs) | Modularized because API was broken.
Need to be able to inject Mocked response objects here. | 4.659506 | 4.725911 | 0.985949 |
# if we need a plural endpoint (acessing lists)
if plural:
if endpoint.endswith('y'):
endpoint = endpoint[:-1] + 'ies'
elif not endpoint.endswith('s'):
endpoint += 's'
else:
# otherwise make sure it's singular form
if endpoint.endswith('ies'):
endpoint = endpoint[:-3] + 'y'
elif endpoint.endswith('s'):
endpoint = endpoint[:-1]
return endpoint | def sanitize_ep(endpoint, plural=False) | Sanitize an endpoint to a singular or plural form.
Used mostly for convenience in the `_parse` method to grab the raw
data from queried datasets.
XXX: this is el cheapo (no bastante bien) | 3.024533 | 3.08151 | 0.98151 |
if cls.endpoint is not None:
return cls.endpoint
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', cls.__name__)
return cls.sanitize_ep(
re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower(),
plural=True
) | def get_endpoint(cls) | Accessor method to enable omition of endpoint name.
In general we want the class name to be translated to endpoint name,
this way unless otherwise specified will translate class name to
endpoint name. | 2.809424 | 2.675056 | 1.05023 |
output = []
qp = kwargs.copy()
limit = max(1, min(100, limit))
maximum = kwargs.get('maximum')
qp['limit'] = min(limit, maximum) if maximum is not None else limit
qp['offset'] = offset
more, total = None, None
while True:
entities, options = cls._fetch_page(
api_key=api_key, endpoint=endpoint, **qp
)
output += entities
more = options.get('more')
limit = options.get('limit')
offset = options.get('offset')
total = options.get('total')
if more is None:
if total is None or offset is None:
break
more = (limit + offset) < total
if not more or (maximum is not None and len(output) >= maximum):
break
qp['limit'] = limit
qp['offset'] = offset + limit
return output | def _fetch_all(cls, api_key, endpoint=None, offset=0, limit=25, **kwargs) | Call `self._fetch_page` for as many pages as exist.
TODO: should be extended to do async page fetches if API allows it via
exposing total value.
Returns a list of `cls` instances. | 2.695126 | 2.712184 | 0.993711 |
# if offset is provided have it overwrite the page_index provided
if offset is not None:
page_index = int(offset / limit)
# limit can be maximum MAX_LIMIT_VALUE for most PD queries
limit = max(1, min(cls.MAX_LIMIT_VALUE, limit))
# make an tmp instance to do query work
inst = cls(api_key=api_key)
kwargs['offset'] = int(page_index * limit)
maximum = kwargs.pop('maximum', None)
# if maximum is valid, make the limit <= maximum
kwargs['limit'] = min(limit, maximum) if maximum is not None else limit
ep = parse_key = cls.sanitize_ep(cls.get_endpoint(), plural=True)
# if an override to the endpoint is provided use that instead
# this is useful for nested value searches ie. for
# `incident_log_entries` but instead of /log_entries querying with
# context of /incident/INCIDENTID/log_entries.
# XXX: could be cleaner
if endpoint is not None:
ep = endpoint
response = inst.request('GET', endpoint=ep, query_params=kwargs)
# XXX: this is a little gross right now. Seems like the best way
# to do the parsing out of something and then return everything else
datas = cls._parse(response, key=parse_key)
response.pop(parse_key, None)
entities = [cls(api_key=api_key, _data=d) for d in datas]
# return a tuple
return entities, response | def _fetch_page(cls, api_key, endpoint=None, page_index=0, offset=None,
limit=25, **kwargs) | Fetch a single page of `limit` number of results.
Optionally provide `page_index` an integer (0-based) index for the
page to return. Calculated based on `limit` and `offset`.
Optionally provide `offset` which will override `page_index` if both
are passed, will be used to calculate the integer offset of items.
Optionally provide `limit` integer describing how many items pages
ought to have.
Returns a tuple containing a list of `cls` instances and response
options. | 7.789159 | 7.745026 | 1.005698 |
if endpoint is None:
endpoint = cls.get_endpoint()
inst = cls(api_key=api_key)
parse_key = cls.sanitize_ep(endpoint).split("/")[-1]
endpoint = '/'.join((endpoint, id))
data = cls._parse(inst.request('GET',
endpoint=endpoint,
add_headers=add_headers,
query_params=kwargs),
key=parse_key)
inst._set(data)
return inst | def fetch(cls, id, api_key=None, endpoint=None, add_headers=None,
**kwargs) | Fetch a single entity from the API endpoint.
Used when you know the exact ID that must be queried. | 4.32384 | 4.443339 | 0.973106 |
# if exclude is left blank (not a list) then the predicate will just
# be true
if excludes is None:
return False
# oh my...
def test_each_exclude(exclude_value):
# excluded_value is one of excludes = (...,)
def exclude_equals_value_test(exclude_filter):
# exclude_filter is one of EXCLUDE_FILTERS = (...,)
if callable(exclude_filter):
return exclude_filter(cls, item, exclude_value,)
return item.get(exclude_filter) == exclude_value
return any(map(exclude_equals_value_test, cls.EXCLUDE_FILTERS))
return any(map(test_each_exclude, excludes)) | def _find_exclude_filter(cls, excludes, item) | For each item returned by a `find()` maybe filter it out.
Called for each item returned to find a function or string to exclude
`item` from a filtered list. This method should return truthy values
where `True`-like values will allow `item` to be included into the
set, and `False` values will not allow them into the set (filter
predicate).
This is a dynamic filtering method such that users of the library
will be able to do something like:
Class.find(exclude=('[email protected]', 1,))
Where the coreesponding EXCLUDE_FILTERS = ('email', 'id',). Similar to
matching any value on any indexed field, where EXCLUDE_FILTERS are the
indexes.
XXX: Even explaining this was difficult. Probably an easier more
pragmatic way to do this. | 5.269067 | 5.337525 | 0.987174 |
values = []
output = kwargs.copy()
query = kwargs.pop('query', None)
# remove any of the TRANSLATE_QUERY_PARAMs in output
for param in (cls.TRANSLATE_QUERY_PARAM or []):
popped = output.pop(param, None)
if popped is not None:
values.append(popped)
# if query is provided, just use it
if query is not None:
output['query'] = query
return output
# if query is not provided, use the first parameter we removed from
# the kwargs
try:
output['query'] = next(iter(values))
except StopIteration:
pass
return output | def translate_query_params(cls, **kwargs) | Translate an arbirtary keyword argument to the expected query.
TODO: refactor this into something less insane.
XXX: Clean this up. It's *too* flexible.
In the v2 API, many endpoints expect a particular query argument to be
in the form of `query=xxx` where `xxx` would be the name of perhaps
the name, ID or otherwise. This function ought to take a more aptly
named parameter specified in `TRANSLATE_QUERY_PARAM`, and substitute it
into the `query` keyword argument. The purpose is so that some models
(optionally) have nicer named keyword arguments than `query` for easier
to read python.
If a query argument is given then the output should be that value. If a
substitute value is given as a keyword specified in
`TRANSLATE_QUERY_PARAM`(and query is not) then the `query` argument
will be that keyword argument.
Eg. No query param
TRANSLATE_QUERY_PARAM = ('name',)
kwargs = {'name': 'PagerDuty',}
...
output = {'query': 'PagerDuty'}
or, query param explicitly
TRANSLATE_QUERY_PARAM = ('name',)
kwargs = {'name': 'PagerDuty', 'query': 'XXXXPlopperDuty'}
...
output = {'query': 'XXXXPlopperDuty'}
or, TRANSLATE_QUERY_PARAM is None
TRANSLATE_QUERY_PARAM = None
kwargs = {'name': 'PagerDuty', 'query': 'XXXXPlopperDuty'}
...
output = {'output': 'XXXXPlopperDuty', 'name': 'PagerDuty'} | 3.804328 | 3.42329 | 1.111308 |
exclude = kwargs.pop('exclude', None)
# if exclude param was passed a a string, list-ify it
if isinstance(exclude, six.string_types):
exclude = [exclude, ]
query_params = cls.translate_query_params(**kwargs)
# unless otherwise specified use the class variable for the endpoint
if endpoint is None:
endpoint = cls.get_endpoint()
if fetch_all:
result = cls._fetch_all(api_key=api_key, endpoint=endpoint,
maximum=maximum,
**query_params)
else:
result = cls._fetch_page(api_key=api_key, endpoint=endpoint,
maximum=maximum,
**query_params)
# for each result run it through an exlcusion filter
collection = [r for r in result
if not cls._find_exclude_filter(exclude, r)]
return collection | def find(cls, api_key=None, fetch_all=True, endpoint=None, maximum=None,
**kwargs) | Find some entities from the API endpoint.
If no api_key is provided, the global api key will be used.
If fetch_all is True, page through all the data and find every record
that exists.
If add_headers is provided (as a dict) use it to add headers to the
HTTP request, eg.
{'host': 'some.hidden.host'}
Capitalizing header keys does not matter.
Remaining keyword arguments will be passed as `query_params` to the
instant method `request` (ClientMixin). | 3.451822 | 3.708782 | 0.930716 |
# ensure that maximum is supplied so that a big query is not happening
# behind the scenes
if 'maximum' not in kwargs:
kwargs['maximum'] = 1
try:
# call find and extract the first iterated value from the result
iterable = iter(cls.find(*args, **kwargs))
return next(iterable)
except StopIteration:
# no result was found
return None | def find_one(cls, *args, **kwargs) | Like `find()` except ensure that only one result is returned. | 6.817843 | 6.585216 | 1.035326 |
inst = cls(api_key=api_key)
if data_key is None:
data_key = cls.sanitize_ep(cls.get_endpoint())
if response_data_key is None:
response_data_key = cls.sanitize_ep(cls.get_endpoint())
body = {}
body[data_key] = data
if endpoint is None:
endpoint = cls.get_endpoint()
inst._set(cls._parse(inst.request(method,
endpoint=endpoint,
data=body,
query_params=kwargs,
add_headers=add_headers,
),
key=response_data_key))
return inst | def create(cls, data=None, api_key=None, endpoint=None, add_headers=None,
data_key=None, response_data_key=None, method='POST', **kwargs) | Create an instance of the Entity model by calling to the API endpoint.
This ensures that server knows about the creation before returning
the class instance.
NOTE: The server must return a response with the schema containing
the entire entity value. A True or False response is no bueno. | 3.022403 | 3.080876 | 0.981021 |
inst = cls(api_key=api_key)
endpoint = '/'.join((cls.get_endpoint(), id))
inst.request('DELETE', endpoint=endpoint, query_params=kwargs)
inst._is_deleted = True
return True | def delete(cls, id, api_key=None, **kwargs) | Delete an entity from the server by ID. | 4.153623 | 3.8946 | 1.066508 |
inst = cls(api_key=api_key)
endpoint = '/'.join((cls.get_endpoint(), id))
return inst.request('PUT', endpoint=endpoint, query_params=kwargs) | def put(cls, id, api_key=None, **kwargs) | Delete an entity from the server by ID. | 3.68934 | 3.281367 | 1.12433 |
parse = cls.parse if cls.parse is not None else cls.get_endpoint()
if callable(parse):
data = parse(data)
elif isinstance(parse, str):
data = data[key]
else:
raise Exception('"parse" should be a callable or string got, {0}'
.format(parse))
return data | def _parse(cls, data, key=None) | Parse a set of data to extract entity-only data.
Use classmethod `parse` if available, otherwise use the `endpoint`
class variable to extract data from a data blob. | 5.03026 | 4.277373 | 1.176016 |
level = kwargs.pop('level', logging.INFO)
logger.log(level, *args, **kwargs) | def log(*args, **kwargs) | Log things with the global logger. | 2.841604 | 2.512788 | 1.130857 |
assert 'service_key' in event_info
assert isinstance(event_info['service_key'], six.string_types)
assert 'event_type' in event_info
assert event_info['event_type'] in cls.EVENT_TYPES
if event_info['event_type'] != cls.EVENT_TYPES[0]:
assert 'incident_key' in event_info
assert isinstance(event_info['incident_key'], six.string_types)
else:
assert 'description' in event_info
if 'details' in event_info:
assert isinstance(event_info['details'], dict)
if 'contexts' in event_info:
assert isinstance(event_info['contexts'], (list, tuple,)) | def validate(cls, event_info) | Validate that provided event information is valid. | 2.122889 | 2.068359 | 1.026363 |
cls.validate(data)
inst = cls(api_key=api_key)
endpoint = ''
return inst.request('POST',
endpoint=endpoint,
data=data,
query_params=kwargs,
add_headers=add_headers,
) | def create(cls, data=None, api_key=None, endpoint=None, add_headers=None,
**kwargs) | Create an event on your PagerDuty account. | 3.946698 | 3.755279 | 1.050973 |
assert 'routing_key' in event_info
assert isinstance(event_info['routing_key'], six.string_types)
assert 'event_action' in event_info
assert event_info['event_action'] in cls.EVENT_TYPES
assert 'payload' in event_info
payload = event_info['payload']
assert payload['summary']
assert payload['source']
assert payload['severity'] in cls.SEVERITY_TYPES | def validate(cls, event_info) | Validate that provided event information is valid. | 2.712725 | 2.617295 | 1.036461 |
if isinstance(escalation_policy, Entity):
escalation_policy = escalation_policy['id']
assert isinstance(escalation_policy, six.string_types)
endpoint = '{0}/{1}/escalation_policies/{2}'.format(
self.endpoint,
self['id'],
escalation_policy,
)
return self.request('DELETE', endpoint=endpoint, query_params=kwargs) | def remove_escalation_policy(self, escalation_policy, **kwargs) | Remove an escalation policy from this team. | 2.647358 | 2.460618 | 1.075891 |
if isinstance(user, Entity):
user = user['id']
assert isinstance(user, six.string_types)
endpoint = '{0}/{1}/users/{2}'.format(
self.endpoint,
self['id'],
user,
)
return self.request('DELETE', endpoint=endpoint, query_params=kwargs) | def remove_user(self, user, **kwargs) | Remove a user from this team. | 3.450479 | 3.144856 | 1.097182 |
if isinstance(user, User):
user = user['id']
assert isinstance(user, six.string_types)
endpoint = '{0}/{1}/users/{2}'.format(
self.endpoint,
self['id'],
user,
)
result = self.request('PUT', endpoint=endpoint, query_params=kwargs)
return result | def add_user(self, user, **kwargs) | Add a user to this team. | 3.518825 | 3.077553 | 1.143384 |
service_info = integration_info.get('service')
vendor_info = integration_info.get('vendor')
if service_info is not None:
self.__class__.validate(service_info)
if vendor_info is not None:
self.vendorFactory.validate(vendor_info)
endpoint = '{0}/{1}/integrations'.format(
self.endpoint,
self['id'],
)
return self.integrationFactory.create(
endpoint=endpoint,
api_key=self.api_key,
data=integration_info,
query_params=kwargs
) | def create_integration(self, integration_info, **kwargs) | Create an integration for this service.
See: https://v2.developer.pagerduty.com/v2/page/api-reference#!/
Services/post_services_id_integrations | 3.45776 | 3.186384 | 1.085167 |
ids = [ref['id'] for ref in self['integrations']]
return [Integration.fetch(id, service=self, query_params=kwargs) for id in ids] | def integrations(self, **kwargs) | Retrieve all this services integrations. | 7.795366 | 6.453489 | 1.20793 |
return Integration.fetch(id, service=self, query_params=kwargs) | def get_integration(self, id, **kwargs) | Retrieve a single integration by id. | 10.851089 | 10.260458 | 1.057564 |
endpoint = '{0}/{1}/contact_methods'.format(
self.endpoint,
self['id'],
)
result = self.request('GET', endpoint=endpoint, query_params=kwargs)
return result['contact_methods'] | def contact_methods(self, **kwargs) | Get all contact methods for this user. | 3.839162 | 3.445189 | 1.114355 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.