code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
# If we're already waiting on an/some outstanding disconnects # make sure we continue to wait for them... log.debug("%r: close", self) self._closing = True # Close down any clients we have brokerclients, self.clients = self.clients, None self._close_brokerclients(brokerclients.values()) # clean up other outstanding operations self.reset_all_metadata() return self.close_dlist or defer.succeed(None)
def close(self)
Permanently dispose of the client - Immediately mark the client as closed, causing current operations to fail with :exc:`~afkak.common.CancelledError` and future operations to fail with :exc:`~afkak.common.ClientError`. - Clear cached metadata. - Close any connections to Kafka brokers. :returns: deferred that fires when all resources have been released
12.649477
12.638518
1.000867
topics = tuple(_coerce_topic(t) for t in topics) log.debug("%r: load_metadata_for_topics(%s)", self, ', '.join(repr(t) for t in topics)) fetch_all_metadata = not topics # create the request requestId = self._next_id() request = KafkaCodec.encode_metadata_request(self._clientIdBytes, requestId, topics) # Callbacks for the request deferred... def _handleMetadataResponse(response): # Decode the response brokers, topics = KafkaCodec.decode_metadata_response(response) log.debug("%r: got metadata brokers=%r topics=%r", self, brokers, topics) # If we fetched the metadata for all topics, then store away the # received metadata for diagnostics. if fetch_all_metadata: self._brokers = brokers self._topics = topics # Iff we were fetching for all topics, and we got at least one # broker back, then remove brokers when we update our brokers ok_to_remove = (fetch_all_metadata and len(brokers)) # Take the metadata we got back, update our self.clients, and # if needed disconnect or connect from/to old/new brokers self._update_brokers(brokers.values(), remove=ok_to_remove) # Now loop through all the topics/partitions in the response # and setup our cache/data-structures for topic, topic_metadata in topics.items(): _, topic_error, partitions = topic_metadata self.reset_topic_metadata(topic) self.topic_errors[topic] = topic_error if not partitions: log.warning('No partitions for %s, Err:%d', topic, topic_error) continue self.topic_partitions[topic] = [] for partition, meta in partitions.items(): self.topic_partitions[topic].append(partition) topic_part = TopicAndPartition(topic, partition) self.partition_meta[topic_part] = meta if meta.leader == -1: log.warning('No leader for topic %s partition %s', topic, partition) self.topics_to_brokers[topic_part] = None else: self.topics_to_brokers[ topic_part] = brokers[meta.leader] self.topic_partitions[topic] = sorted( self.topic_partitions[topic]) return True def _handleMetadataErr(err): # This should maybe do more cleanup? if err.check(t_CancelledError, CancelledError): # Eat the error # XXX Shouldn't this return False? The success branch # returns True. return None log.error("Failed to retrieve metadata:%s", err) raise KafkaUnavailableError( "Unable to load metadata from configured " "hosts: {!r}".format(err)) # Send the request, add the handlers d = self._send_broker_unaware_request(requestId, request) d.addCallbacks(_handleMetadataResponse, _handleMetadataErr) return d
def load_metadata_for_topics(self, *topics)
Discover topic metadata and brokers Afkak internally calls this method whenever metadata is required. :param str topics: Topic names to look up. The resulting metadata includes the list of topic partitions, brokers owning those partitions, and which partitions are in sync. Fetching metadata for a topic may trigger auto-creation if that is enabled on the Kafka broker. When no topic name is given metadata for *all* topics is fetched. This is an expensive operation, but it does not trigger topic creation. :returns: :class:`Deferred` for the completion of the metadata fetch. This will fire with ``True`` on success, ``None`` on cancellation, or fail with an exception on error. On success, topic metadata is available from the attributes of :class:`KafkaClient`: :data:`~KafkaClient.topic_partitions`, :data:`~KafkaClient.topics_to_brokers`, etc.
4.519485
4.418212
1.022922
group = _coerce_consumer_group(group) log.debug("%r: load_consumer_metadata_for_group(%r)", self, group) # If we are already loading the metadata for this group, then # just return the outstanding deferred if group in self.coordinator_fetches: d = defer.Deferred() self.coordinator_fetches[group][1].append(d) return d # No outstanding request, create a new one requestId = self._next_id() request = KafkaCodec.encode_consumermetadata_request( self._clientIdBytes, requestId, group) # Callbacks for the request deferred... def _handleConsumerMetadataResponse(response_bytes): # Decode the response (returns ConsumerMetadataResponse) response = KafkaCodec.decode_consumermetadata_response(response_bytes) log.debug("%r: load_consumer_metadata_for_group(%r) -> %r", self, group, response) if response.error: raise BrokerResponseError.errnos.get(response.error, UnknownError)(response) bm = BrokerMetadata(response.node_id, response.host, response.port) self.consumer_group_to_brokers[group] = bm self._update_brokers([bm]) return True def _handleConsumerMetadataErr(err): log.error("Failed to retrieve consumer metadata for group %r", group, exc_info=(err.type, err.value, err.getTracebackObject())) # Clear any stored value for the group's coordinator self.reset_consumer_group_metadata(group) # FIXME: This exception should chain from err. raise ConsumerCoordinatorNotAvailableError( "Coordinator for group {!r} not available".format(group), ) def _propagate(result): [_, ds] = self.coordinator_fetches.pop(group, None) for d in ds: d.callback(result) # Send the request, add the handlers request_d = self._send_broker_unaware_request(requestId, request) d = defer.Deferred() # Save the deferred under the fetches for this group self.coordinator_fetches[group] = (request_d, [d]) request_d.addCallback(_handleConsumerMetadataResponse) request_d.addErrback(_handleConsumerMetadataErr) request_d.addBoth(_propagate) return d
def load_consumer_metadata_for_group(self, group)
Determine broker for the consumer metadata for the specified group Returns a deferred which callbacks with True if the group's coordinator could be determined, or errbacks with ConsumerCoordinatorNotAvailableError if not. Parameters ---------- group: group name as `str`
3.982506
3.842242
1.036506
encoder = partial( KafkaCodec.encode_produce_request, acks=acks, timeout=timeout) if acks == 0: decoder = None else: decoder = KafkaCodec.decode_produce_response resps = yield self._send_broker_aware_request( payloads, encoder, decoder) returnValue(self._handle_responses(resps, fail_on_error, callback))
def send_produce_request(self, payloads=None, acks=1, timeout=DEFAULT_REPLICAS_ACK_MSECS, fail_on_error=True, callback=None)
Encode and send some ProduceRequests ProduceRequests will be grouped by (topic, partition) and then sent to a specific broker. Output is a list of responses in the same order as the list of payloads specified Parameters ---------- payloads: list of ProduceRequest acks: How many Kafka broker replicas need to write before the leader replies with a response timeout: How long the server has to receive the acks from the replicas before returning an error. fail_on_error: boolean, should we raise an Exception if we encounter an API error? callback: function, instead of returning the ProduceResponse, first pass it through this function Return ------ a deferred which callbacks with a list of ProduceResponse Raises ------ FailedPayloadsError, LeaderUnavailableError, PartitionUnavailableError
3.821197
4.54659
0.840453
if (max_wait_time / 1000) > (self.timeout - 0.1): raise ValueError( "%r: max_wait_time: %d must be less than client.timeout by " "at least 100 milliseconds.", self, max_wait_time) encoder = partial(KafkaCodec.encode_fetch_request, max_wait_time=max_wait_time, min_bytes=min_bytes) # resps is a list of FetchResponse() objects, each of which can hold # 1-n messages. resps = yield self._send_broker_aware_request( payloads, encoder, KafkaCodec.decode_fetch_response) returnValue(self._handle_responses(resps, fail_on_error, callback))
def send_fetch_request(self, payloads=None, fail_on_error=True, callback=None, max_wait_time=DEFAULT_FETCH_SERVER_WAIT_MSECS, min_bytes=DEFAULT_FETCH_MIN_BYTES)
Encode and send a FetchRequest Payloads are grouped by topic and partition so they can be pipelined to the same brokers. Raises ====== FailedPayloadsError, LeaderUnavailableError, PartitionUnavailableError
4.641526
4.567288
1.016254
encoder = partial(KafkaCodec.encode_offset_fetch_request, group=group) decoder = KafkaCodec.decode_offset_fetch_response resps = yield self._send_broker_aware_request( payloads, encoder, decoder, consumer_group=group) returnValue(self._handle_responses( resps, fail_on_error, callback, group))
def send_offset_fetch_request(self, group, payloads=None, fail_on_error=True, callback=None)
Takes a group (string) and list of OffsetFetchRequest and returns a list of OffsetFetchResponse objects
4.75981
4.953605
0.960878
group = _coerce_consumer_group(group) encoder = partial(KafkaCodec.encode_offset_commit_request, group=group, group_generation_id=group_generation_id, consumer_id=consumer_id) decoder = KafkaCodec.decode_offset_commit_response resps = yield self._send_broker_aware_request( payloads, encoder, decoder, consumer_group=group) returnValue(self._handle_responses( resps, fail_on_error, callback, group))
def send_offset_commit_request(self, group, payloads=None, fail_on_error=True, callback=None, group_generation_id=-1, consumer_id='')
Send a list of OffsetCommitRequests to the Kafka broker for the given consumer group. Args: group (str): The consumer group to which to commit the offsets payloads ([OffsetCommitRequest]): List of topic, partition, offsets to commit. fail_on_error (bool): Whether to raise an exception if a response from the Kafka broker indicates an error callback (callable): a function to call with each of the responses before returning the returned value to the caller. group_generation_id (int): Must currently always be -1 consumer_id (str): Must currently always be empty string Returns: [OffsetCommitResponse]: List of OffsetCommitResponse objects. Will raise KafkaError for failed requests if fail_on_error is True
3.608326
4.057764
0.88924
if self._closing: raise ClientError("Cannot get broker client for node_id={}: {} has been closed".format(node_id, self)) if node_id not in self.clients: broker_metadata = self._brokers[node_id] log.debug("%r: creating client for %s", self, broker_metadata) self.clients[node_id] = _KafkaBrokerClient( self.reactor, self._endpoint_factory, broker_metadata, self.clientId, self._retry_policy, ) return self.clients[node_id]
def _get_brokerclient(self, node_id)
Get a broker client. :param int node_id: Broker node ID :raises KeyError: for an unknown node ID :returns: :class:`_KafkaBrokerClient`
3.887404
3.640951
1.067689
def _log_close_failure(failure, brokerclient): log.debug( 'BrokerClient: %s close result: %s: %s', brokerclient, failure.type.__name__, failure.getErrorMessage()) def _clean_close_dlist(result, close_dlist): # If there aren't any other outstanding closings going on, then # close_dlist == self.close_dlist, and we can reset it. if close_dlist == self.close_dlist: self.close_dlist = None if not self.close_dlist: dList = [] else: log.debug("%r: _close_brokerclients has nested deferredlist: %r", self, self.close_dlist) dList = [self.close_dlist] for brokerClient in clients: log.debug("Calling close on: %r", brokerClient) d = brokerClient.close().addErrback(_log_close_failure, brokerClient) dList.append(d) self.close_dlist = DeferredList(dList) self.close_dlist.addBoth(_clean_close_dlist, self.close_dlist)
def _close_brokerclients(self, clients)
Close the given broker clients. :param clients: Iterable of `_KafkaBrokerClient`
3.221581
3.361144
0.958478
log.debug("%r: _update_brokers(%r, remove=%r)", self, brokers, remove) brokers_by_id = {bm.node_id: bm for bm in brokers} self._brokers.update(brokers_by_id) # Update the metadata of broker clients that already exist. for node_id, broker_meta in brokers_by_id.items(): if node_id not in self.clients: continue self.clients[node_id].updateMetadata(broker_meta) # Remove any clients for brokers which no longer exist. if remove: to_close = [ self.clients.pop(node_id) for node_id in set(self.clients) - set(brokers_by_id) ] if to_close: self._close_brokerclients(to_close)
def _update_brokers(self, brokers, remove=False)
Update `self._brokers` and `self.clients` Update our self.clients based on brokers in received metadata Take the received dict of brokers and reconcile it with our current list of brokers (self.clients). If there is a new one, bring up a new connection to it, and if remove is True, and any in our current list aren't in the metadata returned, disconnect from it. :param brokers: Iterable of `BrokerMetadata`. A client will be created for every broker given if it doesn't yet exist. :param bool remove: Is this metadata for *all* brokers? If so, clients for brokers which are no longer found in the metadata will be closed.
2.842386
2.589721
1.097564
key = TopicAndPartition(topic, partition) # reload metadata whether the partition is not available # or has no leader (broker is None) if self.topics_to_brokers.get(key) is None: yield self.load_metadata_for_topics(topic) if key not in self.topics_to_brokers: raise PartitionUnavailableError("%s not available" % str(key)) returnValue(self.topics_to_brokers[key])
def _get_leader_for_partition(self, topic, partition)
Returns the leader for a partition or None if the partition exists but has no leader. PartitionUnavailableError will be raised if the topic or partition is not part of the metadata.
5.435142
4.770844
1.139241
if self.consumer_group_to_brokers.get(consumer_group) is None: yield self.load_consumer_metadata_for_group(consumer_group) returnValue(self.consumer_group_to_brokers.get(consumer_group))
def _get_coordinator_for_group(self, consumer_group)
Returns the coordinator (broker) for a consumer group Returns the broker for a given consumer group or Raises ConsumerCoordinatorNotAvailableError
3.899415
3.777193
1.032358
def _timeout_request(broker, requestId): try: # FIXME: This should be done by calling .cancel() on the Deferred # returned by the broker client. broker.cancelRequest(requestId, reason=RequestTimedOutError( 'Request: {} cancelled due to timeout'.format(requestId))) except KeyError: # pragma: no cover This should never happen... log.exception('ERROR: Failed to find key for timed-out ' 'request. Broker: %r Req: %d', broker, requestId) raise if self._disconnect_on_timeout: broker.disconnect() def _alert_blocked_reactor(timeout, start): now = self.reactor.seconds() if now >= (start + timeout): log.warning('Reactor was starved for %r seconds', now - start) def _cancel_timeout(result, dc): if dc.active(): dc.cancel() return result # Make the request to the specified broker log.debug('_mrtb: sending request: %d to broker: %r', requestId, broker) d = broker.makeRequest(requestId, request, **kwArgs) # Set a delayedCall to fire if we don't get a reply in time dc = self.reactor.callLater( self.timeout, _timeout_request, broker, requestId) # Set a delayedCall to complain if the reactor has been blocked rc = self.reactor.callLater( (self.timeout * 0.9), _alert_blocked_reactor, self.timeout, self.reactor.seconds()) # Setup a callback on the request deferred to cancel both callLater d.addBoth(_cancel_timeout, dc) d.addBoth(_cancel_timeout, rc) return d
def _make_request_to_broker(self, broker, requestId, request, **kwArgs)
Send a request to the specified broker.
4.953167
4.890821
1.012748
node_ids = list(self._brokers.keys()) # Randomly shuffle the brokers to distribute the load random.shuffle(node_ids) # Prioritize connected brokers def connected(node_id): try: return self.clients[node_id].connected() except KeyError: return False node_ids.sort(reverse=True, key=connected) for node_id in node_ids: broker = self._get_brokerclient(node_id) try: log.debug('_sbur: sending request %d to broker %r', requestId, broker) d = self._make_request_to_broker(broker, requestId, request) resp = yield d returnValue(resp) except KafkaError as e: log.warning(( "Will try next server after request with correlationId=%d" " failed against server %s:%i. Error: %s" ), requestId, broker.host, broker.port, e) # The request was not handled, likely because no broker metadata has # loaded yet (or all broker connections have failed). Fall back to # boostrapping. returnValue((yield self._send_bootstrap_request(request)))
def _send_broker_unaware_request(self, requestId, request)
Attempt to send a broker-agnostic request to one of the known brokers: 1. Try each connected broker (in random order) 2. Try each known but unconnected broker (in random order) 3. Try each of the bootstrap hosts (in random order) :param bytes request: The bytes of a Kafka `RequestMessage`_ structure. It must have a unique (to this connection) correlation ID. :returns: API response message for *request* :rtype: Deferred[bytes] :raises: `KafkaUnavailableError` when making the request of all known hosts has failed.
5.327105
5.042294
1.056484
hostports = list(self._bootstrap_hosts) random.shuffle(hostports) for host, port in hostports: ep = self._endpoint_factory(self.reactor, host, port) try: protocol = yield ep.connect(_bootstrapFactory) except Exception as e: log.debug("%s: bootstrap connect to %s:%s -> %s", self, host, port, e) continue try: response = yield protocol.request(request).addTimeout(self.timeout, self.reactor) except Exception: log.debug("%s: bootstrap request to %s:%s failed", self, host, port, exc_info=True) else: returnValue(response) finally: protocol.transport.loseConnection() raise KafkaUnavailableError("Failed to bootstrap from hosts {}".format(hostports))
def _send_bootstrap_request(self, request)
Make a request using an ephemeral broker connection This routine is used to make broker-unaware requests to get the initial cluster metadata. It cycles through the configured hosts, trying to connect and send the request to each in turn. This temporary connection is closed once a response is received. Note that most Kafka APIs require requests be sent to a specific broker. This method will only function for broker-agnostic requests like: * `Metadata <https://kafka.apache.org/protocol.html#The_Messages_Metadata>`_ * `FindCoordinator <https://kafka.apache.org/protocol.html#The_Messages_FindCoordinator>`_ :param bytes request: The bytes of a Kafka `RequestMessage`_ structure. It must have a unique (to this connection) correlation ID. :returns: API response message for *request* :rtype: Deferred[bytes] :raises: - `KafkaUnavailableError` when making the request of all known hosts has failed. - `twisted.internet.defer.TimeoutError` when connecting or making a request exceeds the timeout.
3.35803
3.196571
1.05051
descripter = ExpressionDescriptor(expression, options) return descripter.get_description(DescriptionTypeEnum.FULL)
def get_description(expression, options=None)
Generates a human readable string for the Cron Expression Args: expression: The cron expression string options: Options to control the output description Returns: The cron expression description
10.10568
14.089204
0.717264
try: if self._parsed is False: parser = ExpressionParser(self._expression, self._options) self._expression_parts = parser.parse() self._parsed = True choices = { DescriptionTypeEnum.FULL: self.get_full_description, DescriptionTypeEnum.TIMEOFDAY: self.get_time_of_day_description, DescriptionTypeEnum.HOURS: self.get_hours_description, DescriptionTypeEnum.MINUTES: self.get_minutes_description, DescriptionTypeEnum.SECONDS: self.get_seconds_description, DescriptionTypeEnum.DAYOFMONTH: self.get_day_of_month_description, DescriptionTypeEnum.MONTH: self.get_month_description, DescriptionTypeEnum.DAYOFWEEK: self.get_day_of_week_description, DescriptionTypeEnum.YEAR: self.get_year_description, } description = choices.get(description_type, self.get_seconds_description)() except Exception as ex: if self._options.throw_exception_on_parse_error: raise else: description = str(ex) return description
def get_description(self, description_type=DescriptionTypeEnum.FULL)
Generates a human readable string for the Cron Expression Args: description_type: Which part(s) of the expression to describe Returns: The cron expression description Raises: Exception: if throw_exception_on_parse_error is True
2.122979
1.832478
1.158529
try: time_segment = self.get_time_of_day_description() day_of_month_desc = self.get_day_of_month_description() month_desc = self.get_month_description() day_of_week_desc = self.get_day_of_week_description() year_desc = self.get_year_description() description = "{0}{1}{2}{3}{4}".format( time_segment, day_of_month_desc, day_of_week_desc, month_desc, year_desc) description = self.transform_verbosity( description, self._options.verbose) description = self.transform_case( description, self._options.casing_type) except Exception: description = _( "An error occured when generating the expression description. Check the cron expression syntax.") if self._options.throw_exception_on_parse_error: raise FormatException(description) return description
def get_full_description(self)
Generates the FULL description Returns: The FULL description Raises: FormatException: if formating fails and throw_exception_on_parse_error is True
3.25047
2.81008
1.156718
seconds_expression = self._expression_parts[0] minute_expression = self._expression_parts[1] hour_expression = self._expression_parts[2] description = StringBuilder() # handle special cases first if any(exp in minute_expression for exp in self._special_characters) is False and \ any(exp in hour_expression for exp in self._special_characters) is False and \ any(exp in seconds_expression for exp in self._special_characters) is False: # specific time of day (i.e. 10 14) description.append(_("At ")) description.append( self.format_time( hour_expression, minute_expression, seconds_expression)) elif "-" in minute_expression and \ "," not in minute_expression and \ any(exp in hour_expression for exp in self._special_characters) is False: # minute range in single hour (i.e. 0-10 11) minute_parts = minute_expression.split('-') description.append(_("Every minute between {0} and {1}").format( self.format_time(hour_expression, minute_parts[0]), self.format_time(hour_expression, minute_parts[1]))) elif "," in hour_expression and "-" not in hour_expression and \ any(exp in minute_expression for exp in self._special_characters) is False: # hours list with single minute (o.e. 30 6,14,16) hour_parts = hour_expression.split(',') description.append(_("At")) for i, hour_part in enumerate(hour_parts): description.append(" ") description.append( self.format_time(hour_part, minute_expression)) if i < (len(hour_parts) - 2): description.append(",") if i == len(hour_parts) - 2: description.append(_(" and")) else: # default time description seconds_description = self.get_seconds_description() minutes_description = self.get_minutes_description() hours_description = self.get_hours_description() description.append(seconds_description) if description: description.append(", ") description.append(minutes_description) if description: description.append(", ") description.append(hours_description) return str(description)
def get_time_of_day_description(self)
Generates a description for only the TIMEOFDAY portion of the expression Returns: The TIMEOFDAY description
2.447194
2.469211
0.991083
return self.get_segment_description( self._expression_parts[0], _("every second"), lambda s: s, lambda s: _("every {0} seconds").format(s), lambda s: _("seconds {0} through {1} past the minute"), lambda s: _("at {0} seconds past the minute") )
def get_seconds_description(self)
Generates a description for only the SECONDS portion of the expression Returns: The SECONDS description
6.297512
5.665463
1.111562
return self.get_segment_description( self._expression_parts[1], _("every minute"), lambda s: s, lambda s: _("every {0} minutes").format(s), lambda s: _("minutes {0} through {1} past the hour"), lambda s: '' if s == "0" else _("at {0} minutes past the hour") )
def get_minutes_description(self)
Generates a description for only the MINUTE portion of the expression Returns: The MINUTE description
6.657679
5.897062
1.128982
expression = self._expression_parts[2] return self.get_segment_description( expression, _("every hour"), lambda s: self.format_time(s, "0"), lambda s: _("every {0} hours").format(s), lambda s: _("between {0} and {1}"), lambda s: _("at {0}") )
def get_hours_description(self)
Generates a description for only the HOUR portion of the expression Returns: The HOUR description
4.995983
5.456813
0.91555
if self._expression_parts[5] == "*" and self._expression_parts[3] != "*": # DOM is specified and DOW is * so to prevent contradiction like "on day 1 of the month, every day" # we will not specified a DOW description. return "" def get_day_name(s): exp = s if "#" in s: exp, useless = s.split("#", 2) elif "L" in s: exp = exp.replace("L", '') return self.number_to_day(int(exp)) def get_format(s): if "#" in s: day_of_week_of_month = s[s.find("#") + 1:] try: day_of_week_of_month_number = int(day_of_week_of_month) choices = { 1: _("first"), 2: _("second"), 3: _("third"), 4: _("forth"), 5: _("fifth"), } day_of_week_of_month_description = choices.get(day_of_week_of_month_number, '') except ValueError: day_of_week_of_month_description = '' formated = "{}{}{}".format(_(", on the "), day_of_week_of_month_description, _(" {0} of the month")) elif "L" in s: formated = _(", on the last {0} of the month") else: formated = _(", only on {0}") return formated return self.get_segment_description( self._expression_parts[5], _(", every day"), lambda s: get_day_name(s), lambda s: _(", every {0} days of the week").format(s), lambda s: _(", {0} through {1}"), lambda s: get_format(s) )
def get_day_of_week_description(self)
Generates a description for only the DAYOFWEEK portion of the expression Returns: The DAYOFWEEK description
3.747551
3.696458
1.013822
return self.get_segment_description( self._expression_parts[4], '', lambda s: datetime.date(datetime.date.today().year, int(s), 1).strftime("%B"), lambda s: _(", every {0} months").format(s), lambda s: _(", {0} through {1}"), lambda s: _(", only in {0}") )
def get_month_description(self)
Generates a description for only the MONTH portion of the expression Returns: The MONTH description
6.048262
5.959369
1.014917
expression = self._expression_parts[3] expression = expression.replace("?", "*") if expression == "L": description = _(", on the last day of the month") elif expression == "LW" or expression == "WL": description = _(", on the last weekday of the month") else: regex = re.compile("(\\d{1,2}W)|(W\\d{1,2})") if regex.match(expression): m = regex.match(expression) day_number = int(m.group().replace("W", "")) day_string = _("first weekday") if day_number == 1 else _("weekday nearest day {0}").format( day_number) description = _(", on the {0} of the month").format( day_string) else: description = self.get_segment_description( expression, _(", every day"), lambda s: s, lambda s: _(", every day") if s == "1" else _(", every {0} days"), lambda s: _(", between day {0} and {1} of the month"), lambda s: _(", on day {0} of the month") ) return description
def get_day_of_month_description(self)
Generates a description for only the DAYOFMONTH portion of the expression Returns: The DAYOFMONTH description
3.281713
3.385772
0.969266
def format_year(s): regex = re.compile(r"^\d+$") if regex.match(s): year_int = int(s) if year_int < 1900: return year_int return datetime.date(year_int, 1, 1).strftime("%Y") else: return s return self.get_segment_description( self._expression_parts[6], '', lambda s: format_year(s), lambda s: _(", every {0} years").format(s), lambda s: _(", {0} through {1}"), lambda s: _(", only in {0}") )
def get_year_description(self)
Generates a description for only the YEAR portion of the expression Returns: The YEAR description
4.000232
3.852698
1.038294
description = None if expression is None or expression == '': description = '' elif expression == "*": description = all_description elif any(ext in expression for ext in ['/', '-', ',']) is False: description = get_description_format(expression).format( get_single_item_description(expression)) elif "/" in expression: segments = expression.split('/') description = get_interval_description_format( segments[1]).format(get_single_item_description(segments[1])) # interval contains 'between' piece (i.e. 2-59/3 ) if "-" in segments[0]: between_segment_description = self.generate_between_segment_description( segments[0], get_between_description_format, get_single_item_description) if not between_segment_description.startswith(", "): description += ", " description += between_segment_description elif any(ext in segments[0] for ext in ['*', ',']) is False: range_item_description = get_description_format(segments[0]).format( get_single_item_description(segments[0]) ) range_item_description = range_item_description.replace(", ", "") description += _(", starting {0}").format(range_item_description) elif "," in expression: segments = expression.split(',') description_content = '' for i, segment in enumerate(segments): if i > 0 and len(segments) > 2: description_content += "," if i < len(segments) - 1: description_content += " " if i > 0 and len(segments) > 1 and (i == len(segments) - 1 or len(segments) == 2): description_content += _(" and ") if "-" in segment: between_description = self.generate_between_segment_description( segment, lambda s: _(", {0} through {1}"), get_single_item_description ) between_description = between_description.replace(", ", "") description_content += between_description else: description_content += get_single_item_description(segment) description = get_description_format( expression).format( description_content) elif "-" in expression: description = self.generate_between_segment_description( expression, get_between_description_format, get_single_item_description) return description
def get_segment_description( self, expression, all_description, get_single_item_description, get_interval_description_format, get_between_description_format, get_description_format )
Returns segment description Args: expression: Segment to descript all_description: * get_single_item_description: 1 get_interval_description_format: 1/2 get_between_description_format: 1-2 get_description_format: format get_single_item_description Returns: segment description
2.411245
2.412489
0.999484
description = "" between_segments = between_expression.split('-') between_segment_1_description = get_single_item_description(between_segments[0]) between_segment_2_description = get_single_item_description(between_segments[1]) between_segment_2_description = between_segment_2_description.replace( ":00", ":59") between_description_format = get_between_description_format(between_expression) description += between_description_format.format(between_segment_1_description, between_segment_2_description) return description
def generate_between_segment_description( self, between_expression, get_between_description_format, get_single_item_description )
Generates the between segment description :param between_expression: :param get_between_description_format: :param get_single_item_description: :return: The between segment description
2.059759
2.117849
0.972571
hour = int(hour_expression) period = '' if self._options.use_24hour_time_format is False: period = " PM" if (hour >= 12) else " AM" if hour > 12: hour -= 12 minute = str(int(minute_expression)) # !FIXME WUT ??? second = '' if second_expression is not None and second_expression: second = "{}{}".format(":", str(int(second_expression)).zfill(2)) return "{0}:{1}{2}{3}".format(str(hour).zfill(2), minute.zfill(2), second, period)
def format_time( self, hour_expression, minute_expression, second_expression='' )
Given time parts, will contruct a formatted time description Args: hour_expression: Hours part minute_expression: Minutes part second_expression: Seconds part Returns: Formatted time description
3.520932
3.702982
0.950837
if use_verbose_format is False: description = description.replace( _(", every minute"), '') description = description.replace(_(", every hour"), '') description = description.replace(_(", every day"), '') return description
def transform_verbosity(self, description, use_verbose_format)
Transforms the verbosity of the expression description by stripping verbosity from original description Args: description: The description to transform use_verbose_format: If True, will leave description as it, if False, will strip verbose parts second_expression: Seconds part Returns: The transformed description with proper verbosity
4.293999
4.824533
0.890034
if case_type == CasingTypeEnum.Sentence: description = "{}{}".format( description[0].upper(), description[1:]) elif case_type == CasingTypeEnum.Title: description = description.title() else: description = description.lower() return description
def transform_case(self, description, case_type)
Transforms the case of the expression description, based on options Args: description: The description to transform case_type: The casing type that controls the output casing second_expression: Seconds part Returns: The transformed description with proper casing
3.541681
3.030559
1.168656
return [ calendar.day_name[6], calendar.day_name[0], calendar.day_name[1], calendar.day_name[2], calendar.day_name[3], calendar.day_name[4], calendar.day_name[5] ][day_number]
def number_to_day(self, day_number)
Returns localized day name by its CRON number Args: day_number: Number of a day Returns: Day corresponding to day_number Raises: IndexError: When day_number is not found
1.819823
2.083226
0.87356
# Have we been started already, and not stopped? if self._start_d is not None: raise RestartError("Start called on already-started consumer") # Keep track of state for debugging self._state = '[started]' # Create and return a deferred for alerting on errors/stoppage start_d = self._start_d = Deferred() # Start a new fetch request, possibly just for the starting offset self._fetch_offset = start_offset self._do_fetch() # Set up the auto-commit timer, if needed if self.consumer_group and self.auto_commit_every_s: self._commit_looper = LoopingCall(self._auto_commit) self._commit_looper.clock = self.client.reactor self._commit_looper_d = self._commit_looper.start( self.auto_commit_every_s, now=False) self._commit_looper_d.addCallbacks(self._commit_timer_stopped, self._commit_timer_failed) return start_d
def start(self, start_offset)
Starts fetching messages from Kafka and delivering them to the :attr:`.processor` function. :param int start_offset: The offset within the partition from which to start fetching. Special values include: :const:`OFFSET_EARLIEST`, :const:`OFFSET_LATEST`, and :const:`OFFSET_COMMITTED`. If the supplied offset is :const:`OFFSET_EARLIEST` or :const:`OFFSET_LATEST` the :class:`Consumer` will use the OffsetRequest Kafka API to retrieve the actual offset used for fetching. In the case :const:`OFFSET_COMMITTED` is used, `commit_policy` MUST be set on the Consumer, and the Consumer will use the OffsetFetchRequest Kafka API to retrieve the actual offset used for fetching. :returns: A :class:`~twisted.internet.defer.Deferred` which will resolve successfully when the consumer is cleanly stopped, or with a failure if the :class:`Consumer` encounters an error from which it is unable to recover. :raises: :exc:`RestartError` if already running.
4.843915
4.422919
1.095185
def _handle_shutdown_commit_success(result): self._shutdown_d, d = None, self._shutdown_d self.stop() self._shuttingdown = False # Shutdown complete d.callback((self._last_processed_offset, self._last_committed_offset)) def _handle_shutdown_commit_failure(failure): if failure.check(OperationInProgress): failure.value.deferred.addCallback(_commit_and_stop) return self._shutdown_d, d = None, self._shutdown_d self.stop() self._shuttingdown = False # Shutdown complete d.errback(failure) def _commit_and_stop(result): if not self.consumer_group: # No consumer group, no committing return _handle_shutdown_commit_success(None) # Need to commit prior to stopping self.commit().addCallbacks(_handle_shutdown_commit_success, _handle_shutdown_commit_failure) # If we're not running, return an failure if self._start_d is None: return fail(Failure( RestopError("Shutdown called on non-running consumer"))) # If we're called multiple times, return a failure if self._shutdown_d: return fail(Failure( RestopError("Shutdown called more than once."))) # Set our _shuttingdown flag, so our _process_message routine will stop # feeding new messages to the processor, and fetches won't be retried self._shuttingdown = True # Keep track of state for debugging self._state = '[shutting down]' # Create a deferred to track the shutdown self._shutdown_d = d = Deferred() # Are we waiting for the processor to complete? If so, when it's done, # commit our offsets and stop. if self._processor_d: self._processor_d.addCallback(_commit_and_stop) else: # No need to wait for the processor, we can commit and stop now _commit_and_stop(None) # return the deferred return d
def shutdown(self)
Gracefully shutdown the consumer Consumer will complete any outstanding processing, commit its current offsets (if so configured) and stop. Returns deferred which callbacks with a tuple of: (last processed offset, last committed offset) if it was able to successfully commit, or errbacks with the commit failure, if any, or fail(RestopError) if consumer is not running.
4.166119
3.672454
1.134424
if self._start_d is None: raise RestopError("Stop called on non-running consumer") self._stopping = True # Keep track of state for debugging self._state = '[stopping]' # Are we waiting for a request to come back? if self._request_d: self._request_d.cancel() # Are we working our way through a block of messages? if self._msg_block_d: # Need to add a cancel handler... _msg_block_d, self._msg_block_d = self._msg_block_d, None _msg_block_d.addErrback(lambda fail: fail.trap(CancelledError)) _msg_block_d.cancel() # Are we waiting for the processor to complete? if self._processor_d: self._processor_d.cancel() # Are we waiting to retry a request? if self._retry_call: self._retry_call.cancel() # Are we waiting on a commit request? if self._commit_ds: while self._commit_ds: d = self._commit_ds.pop() d.cancel() if self._commit_req: self._commit_req.cancel() # Are we waiting to retry a commit? if self._commit_call: self._commit_call.cancel() # Do we have an auto-commit looping call? if self._commit_looper is not None: self._commit_looper.stop() # Done stopping self._stopping = False # Keep track of state for debugging self._state = '[stopped]' # Clear and possibly callback our start() Deferred self._start_d, d = None, self._start_d if not d.called: d.callback((self._last_processed_offset, self._last_committed_offset)) # Return the offset of the message we last processed return self._last_processed_offset
def stop(self)
Stop the consumer and return offset of last processed message. This cancels all outstanding operations. Also, if the deferred returned by `start` hasn't been called, it is called with a tuple consisting of the last processed offset and the last committed offset. :raises: :exc:`RestopError` if the :class:`Consumer` is not running.
3.602508
3.119198
1.154947
# Can't commit without a consumer_group if not self.consumer_group: return fail(Failure(InvalidConsumerGroupError( "Bad Group_id:{0!r}".format(self.consumer_group)))) # short circuit if we are 'up to date', or haven't processed anything if ((self._last_processed_offset is None) or (self._last_processed_offset == self._last_committed_offset)): return succeed(self._last_committed_offset) # If we're currently processing a commit we return a failure # with a deferred we'll fire when the in-progress one completes if self._commit_ds: d = Deferred() self._commit_ds.append(d) return fail(OperationInProgress(d)) # Ok, we have processed messages since our last commit attempt, and # we're not currently waiting on a commit request to complete: # Start a new one d = Deferred() self._commit_ds.append(d) # Send the request self._send_commit_request() # Reset the commit_looper here, rather than on success to give # more stability to the commit interval. if self._commit_looper is not None: self._commit_looper.reset() # return the deferred return d
def commit(self)
Commit the offset of the message we last processed if it is different from what we believe is the last offset committed to Kafka. .. note:: It is possible to commit a smaller offset than Kafka has stored. This is by design, so we can reprocess a Kafka message stream if desired. On error, will retry according to :attr:`request_retry_max_attempts` (by default, forever). If called while a commit operation is in progress, and new messages have been processed since the last request was sent then the commit will fail with :exc:`OperationInProgress`. The :exc:`OperationInProgress` exception wraps a :class:`~twisted.internet.defer.Deferred` which fires when the outstanding commit operation completes. :returns: A :class:`~twisted.internet.defer.Deferred` which resolves with the committed offset when the operation has completed. It will resolve immediately if the current offset and the last committed offset do not differ.
5.812636
5.061138
1.148484
# Check if we are even supposed to do any auto-committing if (self._stopping or self._shuttingdown or (not self._start_d) or (self._last_processed_offset is None) or (not self.consumer_group) or (by_count and not self.auto_commit_every_n)): return # If we're auto_committing because the timer expired, or by count and # we don't have a record of our last_committed_offset, or we've # processed enough messages since our last commit, then try to commit if (not by_count or self._last_committed_offset is None or (self._last_processed_offset - self._last_committed_offset ) >= self.auto_commit_every_n): if not self._commit_ds: commit_d = self.commit() commit_d.addErrback(self._handle_auto_commit_error) else: # We're waiting on the last commit to complete, so add a # callback to be called when the current request completes d = Deferred() d.addCallback(self._retry_auto_commit, by_count) self._commit_ds.append(d)
def _auto_commit(self, by_count=False)
Check if we should start a new commit operation and commit
4.209057
4.185456
1.005639
# Have we been told to stop or shutdown? Then don't actually retry. if self._stopping or self._shuttingdown or self._start_d is None: # Stopping, or stopped already? No more fetching. return if self._retry_call is None: if after is None: after = self.retry_delay self.retry_delay = min(self.retry_delay * REQUEST_RETRY_FACTOR, self.retry_max_delay) self._fetch_attempt_count += 1 self._retry_call = self.client.reactor.callLater( after, self._do_fetch)
def _retry_fetch(self, after=None)
Schedule a delayed :meth:`_do_fetch` call after a failure :param float after: The delay in seconds after which to do the retried fetch. If `None`, our internal :attr:`retry_delay` is used, and adjusted by :const:`REQUEST_RETRY_FACTOR`.
5.378815
4.648486
1.157111
# Got a response, clear our outstanding request deferred self._request_d = None # Successful request, reset our retry delay, count, etc self.retry_delay = self.retry_init_delay self._fetch_attempt_count = 1 response = response[0] if hasattr(response, 'offsets'): # It's a response to an OffsetRequest self._fetch_offset = response.offsets[0] else: # It's a response to an OffsetFetchRequest # Make sure we got a valid offset back. Kafka uses -1 to indicate # no committed offset was retrieved if response.offset == OFFSET_NOT_COMMITTED: self._fetch_offset = OFFSET_EARLIEST else: self._fetch_offset = response.offset + 1 self._last_committed_offset = response.offset self._do_fetch()
def _handle_offset_response(self, response)
Handle responses to both OffsetRequest and OffsetFetchRequest, since they are similar enough. :param response: A tuple of a single OffsetFetchResponse or OffsetResponse
5.559131
5.209544
1.067105
# outstanding request got errback'd, clear it self._request_d = None if self._stopping and failure.check(CancelledError): # Not really an error return # Do we need to abort? if (self.request_retry_max_attempts != 0 and self._fetch_attempt_count >= self.request_retry_max_attempts): log.debug( "%r: Exhausted attempts: %d fetching offset from kafka: %r", self, self.request_retry_max_attempts, failure) self._start_d.errback(failure) return # Decide how to log this failure... If we have retried so many times # we're at the retry_max_delay, then we log at warning every other time # debug otherwise if (self.retry_delay < self.retry_max_delay or 0 == (self._fetch_attempt_count % 2)): log.debug("%r: Failure fetching offset from kafka: %r", self, failure) else: # We've retried until we hit the max delay, log at warn log.warning("%r: Still failing fetching offset from kafka: %r", self, failure) self._retry_fetch()
def _handle_offset_error(self, failure)
Retry the offset fetch request if appropriate. Once the :attr:`.retry_delay` reaches our :attr:`.retry_max_delay`, we log a warning. This should perhaps be extended to abort sooner on certain errors.
5.496176
5.023037
1.094194
# If there's a _commit_call, and it's not active, clear it, it probably # just called us... if self._commit_call and not self._commit_call.active(): self._commit_call = None # Make sure we only have one outstanding commit request at a time if self._commit_req is not None: raise OperationInProgress(self._commit_req) # Handle defaults if retry_delay is None: retry_delay = self.retry_init_delay if attempt is None: attempt = 1 # Create new OffsetCommitRequest with the latest processed offset commit_offset = self._last_processed_offset commit_request = OffsetCommitRequest( self.topic, self.partition, commit_offset, TIMESTAMP_INVALID, self.commit_metadata) log.debug("Committing off=%d grp=%s tpc=%s part=%s req=%r", self._last_processed_offset, self.consumer_group, self.topic, self.partition, commit_request) # Send the request, add our callbacks self._commit_req = d = self.client.send_offset_commit_request( self.consumer_group, [commit_request]) d.addBoth(self._clear_commit_req) d.addCallbacks( self._update_committed_offset, self._handle_commit_error, callbackArgs=(commit_offset,), errbackArgs=(retry_delay, attempt))
def _send_commit_request(self, retry_delay=None, attempt=None)
Send a commit request with our last_processed_offset
3.740995
3.561855
1.050294
# Check if we are stopping and the request was cancelled if self._stopping and failure.check(CancelledError): # Not really an error return self._deliver_commit_result(self._last_committed_offset) # Check that the failure type is a Kafka error...this could maybe be # a tighter check to determine whether a retry will succeed... if not failure.check(KafkaError): log.error("Unhandleable failure during commit attempt: %r\n\t%r", failure, failure.getBriefTraceback()) return self._deliver_commit_result(failure) # Do we need to abort? if (self.request_retry_max_attempts != 0 and attempt >= self.request_retry_max_attempts): log.debug("%r: Exhausted attempts: %d to commit offset: %r", self, self.request_retry_max_attempts, failure) return self._deliver_commit_result(failure) # Check the retry_delay to see if we should log at the higher level # Using attempts % 2 gets us 1-warn/minute with defaults timings if retry_delay < self.retry_max_delay or 0 == (attempt % 2): log.debug("%r: Failure committing offset to kafka: %r", self, failure) else: # We've retried until we hit the max delay, log alternately at warn log.warning("%r: Still failing committing offset to kafka: %r", self, failure) # Schedule a delayed call to retry the commit retry_delay = min(retry_delay * REQUEST_RETRY_FACTOR, self.retry_max_delay) self._commit_call = self.client.reactor.callLater( retry_delay, self._send_commit_request, retry_delay, attempt + 1)
def _handle_commit_error(self, failure, retry_delay, attempt)
Retry the commit request, depending on failure type Depending on the type of the failure, we retry the commit request with the latest processed offset, or callback/errback self._commit_ds
5.260506
5.257211
1.000627
# Check if we're stopping/stopped and the errback of the processor # deferred is just the cancelling we initiated. If so, we skip # notifying via the _start_d deferred, as it will be 'callback'd at the # end of stop() if not (self._stopping and failure.check(CancelledError)): if self._start_d: # Make sure we're not already stopped self._start_d.errback(failure)
def _handle_processor_error(self, failure)
Handle a failure in the processing of a block of messages This method is called when the processor func fails while processing a block of messages. Since we can't know how best to handle a processor failure, we just :func:`errback` our :func:`start` method's deferred to let our user know about the failure.
11.043701
10.882706
1.014794
# The _request_d deferred has fired, clear it. self._request_d = None if failure.check(OffsetOutOfRangeError): if self.auto_offset_reset is None: self._start_d.errback(failure) return self._fetch_offset = self.auto_offset_reset if self._stopping and failure.check(CancelledError): # Not really an error return # Do we need to abort? if (self.request_retry_max_attempts != 0 and self._fetch_attempt_count >= self.request_retry_max_attempts): log.debug( "%r: Exhausted attempts: %d fetching messages from kafka: %r", self, self.request_retry_max_attempts, failure) self._start_d.errback(failure) return # Decide how to log this failure... If we have retried so many times # we're at the retry_max_delay, then we log at warning every other time # debug otherwise if (self.retry_delay < self.retry_max_delay or 0 == (self._fetch_attempt_count % 2)): log.debug("%r: Failure fetching messages from kafka: %r", self, failure) else: # We've retried until we hit the max delay, log at warn log.warning("%r: Still failing fetching messages from kafka: %r", self, failure) self._retry_fetch()
def _handle_fetch_error(self, failure)
A fetch request resulted in an error. Retry after our current delay When a fetch error occurs, we check to see if the Consumer is being stopped, and if so just return, trapping the CancelledError. If not, we check if the Consumer has a non-zero setting for :attr:`request_retry_max_attempts` and if so and we have reached that limit we errback() the Consumer's start() deferred with the failure. If not, we determine whether to log at debug or warning (we log at warning every other retry after backing off to the max retry delay, resulting in a warning message approximately once per minute with the default timings) We then wait our current :attr:`retry_delay`, and retry the fetch. We also increase our retry_delay by Apery's constant (1.20205) and note the failed fetch by incrementing :attr:`_fetch_attempt_count`. NOTE: this may retry forever. TODO: Possibly make this differentiate based on the failure
4.729671
3.955642
1.195677
# Successful fetch, reset our retry delay self.retry_delay = self.retry_init_delay self._fetch_attempt_count = 1 # Check to see if we are still processing the last block we fetched... if self._msg_block_d: # We are still working through the last block of messages... # We have to wait until it's done, then process this response self._msg_block_d.addCallback( lambda _: self._handle_fetch_response(responses)) return # No ongoing processing, great, let's get some started. # Request no longer outstanding, clear the deferred tracker so we # can refetch self._request_d = None messages = [] try: for resp in responses: # We should really only ever get one... if resp.partition != self.partition: log.warning( "%r: Got response with partition: %r not our own: %r", self, resp.partition, self.partition) continue # resp.messages is a KafkaCodec._decode_message_set_iter # Note that 'message' here is really an OffsetAndMessage for message in resp.messages: # Check for messages included which are from prior to our # desired offset: can happen due to compressed message sets if message.offset < self._fetch_offset: log.debug( 'Skipping message at offset: %d, because its ' 'offset is less that our fetch offset: %d.', message.offset, self._fetch_offset) continue # Create a 'SourcedMessage' and add it to the messages list messages.append( SourcedMessage( message=message.message, offset=message.offset, topic=self.topic, partition=self.partition)) # Update our notion of from where to fetch. self._fetch_offset = message.offset + 1 except ConsumerFetchSizeTooSmall: # A message was too large for us to receive, given our current # buffer size. Grow it until it works, or we hit our max # Grow by 16x up to 1MB (could result in 16MB buf), then by 2x factor = 2 if self.buffer_size <= 2**20: factor = 16 if self.max_buffer_size is None: # No limit, increase until we succeed or fail to alloc RAM self.buffer_size *= factor elif (self.max_buffer_size is not None and self.buffer_size < self.max_buffer_size): # Limited, but currently below it. self.buffer_size = min( self.buffer_size * factor, self.max_buffer_size) else: # We failed, and are already at our max. Nothing we can do but # create a Failure and errback() our start() deferred log.error("Max fetch size %d too small", self.max_buffer_size) failure = Failure( ConsumerFetchSizeTooSmall( "Max buffer size:%d too small for message", self.max_buffer_size)) self._start_d.errback(failure) return log.debug( "Next message larger than fetch size, increasing " "to %d (~2x) and retrying", self.buffer_size) finally: # If we were able to extract any messages, deliver them to the # processor now. if messages: self._msg_block_d = Deferred() self._process_messages(messages) # start another fetch, if needed, but use callLater to avoid recursion self._retry_fetch(0)
def _handle_fetch_response(self, responses)
The callback handling the successful response from the fetch request Delivers the message list to the processor, handles per-message errors (ConsumerFetchSizeTooSmall), triggers another fetch request If the processor is still processing the last batch of messages, we defer this processing until it's done. Otherwise, we start another fetch request and submit the messages to the processor
5.757731
5.641209
1.020656
# Have we been told to shutdown? if self._shuttingdown: return # Do we have any messages to process? if not messages: # No, we're done with this block. If we had another fetch result # waiting, this callback will trigger the processing thereof. if self._msg_block_d: _msg_block_d, self._msg_block_d = self._msg_block_d, None _msg_block_d.callback(True) return # Yes, we've got some messages to process. # Default to processing the entire block... proc_block_size = sys.maxsize # Unless our auto commit_policy restricts us to process less if self.auto_commit_every_n: proc_block_size = self.auto_commit_every_n # Divide messages into two lists: one to process now, and remainder msgs_to_proc = messages[:proc_block_size] msgs_remainder = messages[proc_block_size:] # Call our processor callable and handle the possibility it returned # a deferred... last_offset = msgs_to_proc[-1].offset self._processor_d = d = maybeDeferred(self.processor, self, msgs_to_proc) log.debug('self.processor return: %r, last_offset: %r', d, last_offset) # Once the processor completes, clear our _processor_d d.addBoth(self._clear_processor_deferred) # Record the offset of the last processed message and check autocommit d.addCallback(self._update_processed_offset, last_offset) # If we were stopped, cancel the processor deferred. Note, we have to # do this here, in addition to in stop() because the processor func # itself could have called stop(), and then when it returned, we re-set # self._processor_d to the return of maybeDeferred(). if self._stopping or self._start_d is None: d.cancel() else: # Setup to process the rest of our messages d.addCallback(lambda _: self._process_messages(msgs_remainder)) # Add an error handler d.addErrback(self._handle_processor_error)
def _process_messages(self, messages)
Send messages to the `processor` callback to be processed In the case we have a commit policy, we send messages to the processor in blocks no bigger than auto_commit_every_n (if set). Otherwise, we send the entire message block to be processed.
5.270091
4.93671
1.067531
# Check for outstanding request. if self._request_d: log.debug("_do_fetch: Outstanding request: %r", self._request_d) return # Cleanup our _retry_call, if we have one if self._retry_call is not None: if self._retry_call.active(): self._retry_call.cancel() self._retry_call = None # Do we know our offset yet, or do we need to figure it out? if (self._fetch_offset == OFFSET_EARLIEST or self._fetch_offset == OFFSET_LATEST): # We need to fetch the offset for our topic/partition offset_request = OffsetRequest( self.topic, self.partition, self._fetch_offset, 1) self._request_d = self.client.send_offset_request([offset_request]) self._request_d.addCallbacks( self._handle_offset_response, self._handle_offset_error) elif self._fetch_offset == OFFSET_COMMITTED: # We need to fetch the committed offset for our topic/partition # Note we use the same callbacks, as the responses are "close # enough" for our needs here if not self.consumer_group: # consumer_group must be set for OFFSET_COMMITTED failure = Failure( InvalidConsumerGroupError("Bad Group_id:{0!r}".format( self.consumer_group))) self._start_d.errback(failure) request = OffsetFetchRequest(self.topic, self.partition) self._request_d = self.client.send_offset_fetch_request( self.consumer_group, [request]) self._request_d.addCallbacks( self._handle_offset_response, self._handle_offset_error) else: # Create fetch request payload for our partition request = FetchRequest( self.topic, self.partition, self._fetch_offset, self.buffer_size) # Send request and add handlers for the response self._request_d = self.client.send_fetch_request( [request], max_wait_time=self.fetch_max_wait_time, min_bytes=self.fetch_min_bytes) # We need a temp for this because if the response is already # available, _handle_fetch_response() will clear self._request_d d = self._request_d d.addCallback(self._handle_fetch_response) d.addErrback(self._handle_fetch_error)
def _do_fetch(self)
Send a fetch request if there isn't a request outstanding Sends a fetch request to the Kafka cluster to get messages at the current offset. When the response comes back, if there are messages, it delivers them to the :attr:`processor` callback and initiates another fetch request. If there is a recoverable error, the fetch is retried after :attr:`retry_delay`. In the case of an unrecoverable error, :func:`errback` is called on the :class:`Deferred` returned by :meth:`start()`.
3.034919
2.8821
1.053024
log.warning( '_commit_timer_failed: uncaught error %r: %s in _auto_commit', fail, fail.getBriefTraceback()) self._commit_looper_d = self._commit_looper.start( self.auto_commit_every_s, now=False)
def _commit_timer_failed(self, fail)
Handle an error in the commit() function Our commit() function called by the LoopingCall failed. Some error probably came back from Kafka and _check_error() raised the exception For now, just log the failure and restart the loop
8.353142
6.859516
1.217745
if self._commit_looper is not lCall: log.warning('_commit_timer_stopped with wrong timer:%s not:%s', lCall, self._commit_looper) else: log.debug('_commit_timer_stopped: %s %s', lCall, self._commit_looper) self._commit_looper = None self._commit_looper_d = None
def _commit_timer_stopped(self, lCall)
We're shutting down, clean up our looping call...
3.216471
3.051546
1.054046
# Ensure byte_array arg is a bytearray if not isinstance(byte_array, bytearray): raise TypeError("Type: %r of 'byte_array' arg must be 'bytearray'", type(byte_array)) length = len(byte_array) # 'm' and 'r' are mixing constants generated offline. # They're not really 'magic', they just happen to work well. m = 0x5bd1e995 r = 24 mod32bits = 0xffffffff # Initialize the hash to a random value h = seed ^ length length4 = length // 4 for i in range(length4): i4 = i * 4 k = ((byte_array[i4 + 0] & 0xff) + ((byte_array[i4 + 1] & 0xff) << 8) + ((byte_array[i4 + 2] & 0xff) << 16) + ((byte_array[i4 + 3] & 0xff) << 24)) k &= mod32bits k *= m k &= mod32bits k ^= (k % 0x100000000) >> r # k ^= k >>> r k &= mod32bits k *= m k &= mod32bits h *= m h &= mod32bits h ^= k h &= mod32bits # Handle the last few bytes of the input array extra_bytes = length % 4 if extra_bytes == 3: h ^= (byte_array[(length & ~3) + 2] & 0xff) << 16 h &= mod32bits if extra_bytes >= 2: h ^= (byte_array[(length & ~3) + 1] & 0xff) << 8 h &= mod32bits if extra_bytes >= 1: h ^= (byte_array[length & ~3] & 0xff) h &= mod32bits h *= m h &= mod32bits h ^= (h % 0x100000000) >> 13 # h >>> 13; h &= mod32bits h *= m h &= mod32bits h ^= (h % 0x100000000) >> 15 # h >>> 15; h &= mod32bits return h
def pure_murmur2(byte_array, seed=0x9747b28c)
Pure-python Murmur2 implementation. Based on java client, see org.apache.kafka.common.utils.Utils.murmur2 https://github.com/apache/kafka/blob/0.8.2/clients/src/main/java/org/apache/kafka/common/utils/Utils.java#L244 Args: byte_array: bytearray - Raises TypeError otherwise Returns: MurmurHash2 of byte_array bytearray Raises: TypeError if byte_array arg is not of type bytearray
1.670649
1.637852
1.020024
return partitions[(self._hash(key) & 0x7FFFFFFF) % len(partitions)]
def partition(self, key, partitions)
Select a partition based on the hash of the key. :param key: Partition key :type key: text string or UTF-8 `bytes` or `bytearray` :param list partitions: An indexed sequence of partition identifiers. :returns: One of the given partition identifiers. The result will be the same each time the same key and partition list is passed.
5.204875
5.750541
0.90511
if not has_snappy(): # FIXME This should be static, not checked every call. raise NotImplementedError("Snappy codec is not available") if xerial_compatible: def _chunker(): for i in range(0, len(payload), xerial_blocksize): yield payload[i:i+xerial_blocksize] out = BytesIO() out.write(_XERIAL_HEADER) for chunk in _chunker(): block = snappy.compress(chunk) out.write(struct.pack('!i', len(block))) out.write(block) out.seek(0) return out.read() else: return snappy.compress(payload)
def snappy_encode(payload, xerial_compatible=False, xerial_blocksize=32 * 1024)
Compress the given data with the Snappy algorithm. :param bytes payload: Data to compress. :param bool xerial_compatible: If set then the stream is broken into length-prefixed blocks in a fashion compatible with the xerial snappy library. The format winds up being:: +-------------+------------+--------------+------------+--------------+ | Header | Block1_len | Block1 data | BlockN len | BlockN data | |-------------+------------+--------------+------------+--------------| | 16 bytes | BE int32 | snappy bytes | BE int32 | snappy bytes | +-------------+------------+--------------+------------+--------------+ :param int xerial_blocksize: Number of bytes per chunk to independently Snappy encode. 32k is the default in the xerial library. :returns: Compressed bytes. :rtype: :class:`bytes`
2.89749
3.142877
0.921923
if not hasattr(self, '_info_cache'): encoding_backend = get_backend() try: path = os.path.abspath(self.path) except AttributeError: path = os.path.abspath(self.name) self._info_cache = encoding_backend.get_media_info(path) return self._info_cache
def _get_video_info(self)
Returns basic information about the video as dictionary.
3.183213
2.948523
1.079596
# any more data? out = process.stderr.read(10) if not out: break out = out.decode(console_encoding) output += out buf += out try: line, buf = buf.split('\r', 1) except ValueError: continue try: time_str = RE_TIMECODE.findall(line)[0] except IndexError: continue # convert progress to percent time = 0 for part in time_str.split(':'): time = 60 * time + float(part) percent = time / total_time logger.debug('yield {}%'.format(percent)) yield percent if os.path.getsize(target_path) == 0: raise exceptions.FFmpegError("File size of generated file is 0") # wait for process to exit self._check_returncode(process) logger.debug(output) if not output: raise exceptions.FFmpegError("No output from FFmpeg.") yield 100
def encode(self, source_path, target_path, params): # NOQA: C901 total_time = self.get_media_info(source_path)['duration'] cmds = [self.ffmpeg_path, '-i', source_path] cmds.extend(self.params) cmds.extend(params) cmds.extend([target_path]) process = self._spawn(cmds) buf = output = '' # update progress while True
Encodes a video to a specified file. All encoder specific options are passed in using `params`.
3.619004
3.726352
0.971192
cmds = [self.ffprobe_path, '-i', video_path] cmds.extend(['-print_format', 'json']) cmds.extend(['-show_format', '-show_streams']) process = self._spawn(cmds) stdout, __ = self._check_returncode(process) media_info = self._parse_media_info(stdout) return { 'duration': float(media_info['format']['duration']), 'width': int(media_info['video'][0]['width']), 'height': int(media_info['video'][0]['height']), }
def get_media_info(self, video_path)
Returns information about the given video as dict.
2.497351
2.477324
1.008084
filename = os.path.basename(video_path) filename, __ = os.path.splitext(filename) _, image_path = tempfile.mkstemp(suffix='_{}.jpg'.format(filename)) video_duration = self.get_media_info(video_path)['duration'] if at_time > video_duration: raise exceptions.InvalidTimeError() thumbnail_time = at_time cmds = [self.ffmpeg_path, '-i', video_path, '-vframes', '1'] cmds.extend(['-ss', str(thumbnail_time), '-y', image_path]) process = self._spawn(cmds) self._check_returncode(process) if not os.path.getsize(image_path): # we somehow failed to generate thumbnail os.unlink(image_path) raise exceptions.InvalidTimeError() return image_path
def get_thumbnail(self, video_path, at_time=0.5)
Extracts an image of a video and returns its path. If the requested thumbnail is not within the duration of the video an `InvalidTimeError` is thrown.
2.745295
2.626843
1.045093
# get instance Model = apps.get_model(app_label=app_label, model_name=model_name) instance = Model.objects.get(pk=object_pk) # search for `VideoFields` fields = instance._meta.fields for field in fields: if isinstance(field, VideoField): if not getattr(instance, field.name): # ignore empty fields continue # trigger conversion fieldfile = getattr(instance, field.name) convert_video(fieldfile)
def convert_all_videos(app_label, model_name, object_pk)
Automatically converts all videos of a given instance.
3.115634
3.040208
1.02481
instance = fieldfile.instance field = fieldfile.field filename = os.path.basename(fieldfile.path) source_path = fieldfile.path encoding_backend = get_backend() for options in settings.VIDEO_ENCODING_FORMATS[encoding_backend.name]: video_format, created = Format.objects.get_or_create( object_id=instance.pk, content_type=ContentType.objects.get_for_model(instance), field_name=field.name, format=options['name']) # do not reencode if not requested if video_format.file and not force: continue else: # set progress to 0 video_format.reset_progress() # TODO do not upscale videos _, target_path = tempfile.mkstemp( suffix='_{name}.{extension}'.format(**options)) try: encoding = encoding_backend.encode( source_path, target_path, options['params']) while encoding: try: progress = next(encoding) except StopIteration: break video_format.update_progress(progress) except VideoEncodingError: # TODO handle with more care video_format.delete() os.remove(target_path) continue # save encoded file video_format.file.save( '{filename}_{name}.{extension}'.format(filename=filename, **options), File(open(target_path, mode='rb'))) video_format.update_progress(100) # now we are ready # remove temporary file os.remove(target_path)
def convert_video(fieldfile, force=False)
Converts a given video file into all defined formats.
3.570161
3.603588
0.990724
results = [] cache = {} #scale variables in x because PSO works with velocities to visit different configurations tuning_options["scaling"] = True #using this instead of get_bounds because scaling is used bounds, _, _ = get_bounds_x0_eps(tuning_options) args = (kernel_options, tuning_options, runner, results, cache) num_particles = 20 maxiter = 100 #parameters needed by the Firefly Algorithm B0 = 1.0 gamma = 1.0 alpha = 0.20 best_time_global = 1e20 best_position_global = [] # init particle swarm swarm = [] for i in range(0, num_particles): swarm.append(Firefly(bounds, args)) # compute initial intensities for j in range(num_particles): swarm[j].compute_intensity(_cost_func) for c in range(maxiter): if tuning_options.verbose: print("start iteration ", c, "best time global", best_time_global) # compare all to all and compute attractiveness for i in range(num_particles): for j in range(num_particles): if swarm[i].intensity < swarm[j].intensity: dist = swarm[i].distance_to(swarm[j]) beta = B0 * np.exp(-gamma * dist * dist) swarm[i].move_towards(swarm[j], beta, alpha) swarm[i].compute_intensity(_cost_func) # update global best if needed, actually only used for printing if swarm[i].time <= best_time_global: best_position_global = swarm[i].position best_time_global = swarm[i].time swarm.sort(key=lambda x: x.time) if tuning_options.verbose: print('Final result:') print(best_position_global) print(best_time_global) return results, runner.dev.get_environment()
def tune(runner, kernel_options, device_options, tuning_options)
Find the best performing kernel configuration in the parameter space :params runner: A runner from kernel_tuner.runners :type runner: kernel_tuner.runner :param kernel_options: A dictionary with all options for the kernel. :type kernel_options: dict :param device_options: A dictionary with all options for the device on which the kernel should be tuned. :type device_options: dict :param tuning_options: A dictionary with all options regarding the tuning process. :type tuning_options: dict :returns: A list of dictionaries for executed kernel configurations and their execution times. And a dictionary that contains a information about the hardware/software environment on which the tuning took place. :rtype: list(dict()), dict()
4.580303
4.503734
1.017001
return np.linalg.norm(self.position-other.position)
def distance_to(self, other)
Return Euclidian distance between self and other Firefly
4.445734
3.354374
1.325354
self.evaluate(_cost_func) self.intensity = 1 / self.time
def compute_intensity(self, _cost_func)
Evaluate cost function and compute intensity at this position
8.875723
7.301169
1.215658
self.position += beta * (other.position - self.position) self.position += alpha * (np.random.uniform(-0.5, 0.5, len(self.position))) self.position = np.minimum(self.position, [b[1] for b in self.bounds]) self.position = np.maximum(self.position, [b[0] for b in self.bounds])
def move_towards(self, other, beta, alpha)
Move firefly towards another given beta and alpha values
2.293157
2.062778
1.111684
#first check if the length is the same if len(instance.arguments) != len(answer): raise TypeError("The length of argument list and provided results do not match.") #for each element in the argument list, check if the types match for i, arg in enumerate(instance.arguments): if answer[i] is not None: #skip None elements in the answer list if isinstance(answer[i], numpy.ndarray) and isinstance(arg, numpy.ndarray): if answer[i].dtype != arg.dtype: raise TypeError("Element " + str(i) + " of the expected results list is not of the same dtype as the kernel output: " + str(answer[i].dtype) + " != " + str(arg.dtype) + ".") if answer[i].size != arg.size: raise TypeError("Element " + str(i) + " of the expected results list has a size different from " + "the kernel argument: " + str(answer[i].size) + " != " + str(arg.size) + ".") elif isinstance(answer[i], numpy.number) and isinstance(arg, numpy.number): if answer[i].dtype != arg.dtype: raise TypeError("Element " + str(i) + " of the expected results list is not the same as the kernel output: " + str(answer[i].dtype) + " != " + str(arg.dtype) + ".") else: #either answer[i] and argument have different types or answer[i] is not a numpy type if not isinstance(answer[i], numpy.ndarray) or not isinstance(answer[i], numpy.number): raise TypeError("Element " + str(i) + " of expected results list is not a numpy array or numpy scalar.") else: raise TypeError("Element " + str(i) + " of expected results list and kernel arguments have different types.") def _ravel(a): if hasattr(a, 'ravel') and len(a.shape) > 1: return a.ravel() return a def _flatten(a): if hasattr(a, 'flatten'): return a.flatten() return a correct = True for i, arg in enumerate(instance.arguments): expected = answer[i] if expected is not None: result = _ravel(result_host[i]) expected = _flatten(expected) output_test = numpy.allclose(expected, result, atol=atol) if not output_test and verbose: print("Error: " + util.get_config_string(instance.params) + " detected during correctness check") print("this error occured when checking value of the %oth kernel argument" % (i,)) print("Printing kernel output and expected result, set verbose=False to suppress this debug print") numpy.set_printoptions(edgeitems=50) print("Kernel output:") print(result) print("Expected:") print(expected) correct = correct and output_test if not correct: logging.debug('correctness check has found a correctness issue') raise Exception("Error: " + util.get_config_string(instance.params) + " failed correctness check") return correct
def _default_verify_function(instance, answer, result_host, atol, verbose)
default verify function based on numpy.allclose
2.809583
2.775992
1.0121
logging.debug('benchmark ' + instance.name) logging.debug('thread block dimensions x,y,z=%d,%d,%d', *instance.threads) logging.debug('grid dimensions x,y,z=%d,%d,%d', *instance.grid) time = None try: time = self.dev.benchmark(func, gpu_args, instance.threads, instance.grid, times) except Exception as e: #some launches may fail because too many registers are required #to run the kernel given the current thread block size #the desired behavior is to simply skip over this configuration #and proceed to try the next one skippable_exceptions = ["too many resources requested for launch", "OUT_OF_RESOURCES", "INVALID_WORK_GROUP_SIZE"] if any([skip_str in str(e) for skip_str in skippable_exceptions]): logging.debug('benchmark fails due to runtime failure too many resources required') if verbose: print("skipping config", instance.name, "reason: too many resources requested for launch") else: logging.debug('benchmark encountered runtime failure: ' + str(e)) print("Error while benchmarking:", instance.name) raise e return time
def benchmark(self, func, gpu_args, instance, times, verbose)
benchmark the kernel instance
5.339433
5.213839
1.024089
logging.debug('check_kernel_output') #if not using custom verify function, check if the length is the same if not verify and len(instance.arguments) != len(answer): raise TypeError("The length of argument list and provided results do not match.") #zero GPU memory for output arguments for i, arg in enumerate(instance.arguments): if verify or answer[i] is not None: if isinstance(arg, numpy.ndarray): self.dev.memcpy_htod(gpu_args[i], arg) #run the kernel check = self.run_kernel(func, gpu_args, instance) if not check: return True #runtime failure occured that should be ignored, skip correctness check #retrieve gpu results to host memory result_host = [] for i, arg in enumerate(instance.arguments): if verify or answer[i] is not None: if isinstance(arg, numpy.ndarray): result_host.append(numpy.zeros_like(arg)) self.dev.memcpy_dtoh(result_host[-1], gpu_args[i]) else: result_host.append(None) #if the user has specified a custom verify function, then call it, else use default based on numpy allclose if verify: try: return verify(answer, result_host, atol=atol) except TypeError: return verify(answer, result_host) else: return _default_verify_function(instance, answer, result_host, atol, verbose)
def check_kernel_output(self, func, gpu_args, instance, answer, atol, verify, verbose)
runs the kernel once and checks the result against answer
3.698081
3.678184
1.00541
instance_string = util.get_instance_string(params) logging.debug('compile_and_benchmark ' + instance_string) mem_usage = round(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/1024.0, 1) logging.debug('Memory usage : %2.2f MB', mem_usage) verbose = tuning_options.verbose instance = self.create_kernel_instance(kernel_options, params, verbose) if instance is None: return None try: #compile the kernel func = self.compile_kernel(instance, verbose) if func is None: return None #add constant memory arguments to compiled module if kernel_options.cmem_args is not None: self.dev.copy_constant_memory_args(kernel_options.cmem_args) #add texture memory arguments to compiled module if kernel_options.texmem_args is not None: self.dev.copy_texture_memory_args(kernel_options.texmem_args) #test kernel for correctness and benchmark if tuning_options.answer is not None: self.check_kernel_output(func, gpu_args, instance, tuning_options.answer, tuning_options.atol, tuning_options.verify, verbose) #benchmark time = self.benchmark(func, gpu_args, instance, tuning_options.times, verbose) except Exception as e: #dump kernel_string to temp file temp_filename = util.get_temp_filename(suffix=".c") util.write_file(temp_filename, instance.kernel_string) print("Error while compiling or benchmarking, see source files: " + temp_filename + " ".join(instance.temp_files.values())) raise e #clean up any temporary files, if no error occured for v in instance.temp_files.values(): util.delete_temp_file(v) return time
def compile_and_benchmark(self, gpu_args, params, kernel_options, tuning_options)
Compile and benchmark a kernel instance based on kernel strings and parameters
3.504588
3.438639
1.019179
logging.debug('compile_kernel ' + instance.name) #compile kernel_string into device func func = None try: func = self.dev.compile(instance.name, instance.kernel_string) except Exception as e: #compiles may fail because certain kernel configurations use too #much shared memory for example, the desired behavior is to simply #skip over this configuration and try the next one if "uses too much shared data" in str(e): logging.debug('compile_kernel failed due to kernel using too much shared memory') if verbose: print("skipping config", instance.name, "reason: too much shared memory used") else: logging.debug('compile_kernel failed due to error: ' + str(e)) print("Error while compiling:", instance.name) raise e return func
def compile_kernel(self, instance, verbose)
compile the kernel for this specific instance
5.941555
5.941059
1.000083
if self.lang == "CUDA": self.dev.copy_constant_memory_args(cmem_args) else: raise Exception("Error cannot copy constant memory arguments when language is not CUDA")
def copy_constant_memory_args(self, cmem_args)
adds constant memory arguments to the most recently compiled module, if using CUDA
4.772903
3.904177
1.222512
if self.lang == "CUDA": self.dev.copy_texture_memory_args(texmem_args) else: raise Exception("Error cannot copy texture memory arguments when language is not CUDA")
def copy_texture_memory_args(self, texmem_args)
adds texture memory arguments to the most recently compiled module, if using CUDA
4.884994
3.916668
1.247232
instance_string = util.get_instance_string(params) grid_div = (kernel_options.grid_div_x, kernel_options.grid_div_y, kernel_options.grid_div_z) #insert default block_size_names if needed if not kernel_options.block_size_names: kernel_options.block_size_names = util.default_block_size_names #setup thread block and grid dimensions threads, grid = util.setup_block_and_grid(kernel_options.problem_size, grid_div, params, kernel_options.block_size_names) if numpy.prod(threads) > self.dev.max_threads: if verbose: print("skipping config", instance_string, "reason: too many threads per block") return None #obtain the kernel_string and prepare additional files, if any temp_files = dict() kernel_source = kernel_options.kernel_string if not isinstance(kernel_source, list): kernel_source = [kernel_source] name, kernel_string, temp_files = util.prepare_list_of_files(kernel_options.kernel_name, kernel_source, params, grid, threads, kernel_options.block_size_names) #collect everything we know about this instance and return it return KernelInstance(name, kernel_string, temp_files, threads, grid, params, kernel_options.arguments)
def create_kernel_instance(self, kernel_options, params, verbose)
create kernel instance from kernel source, parameters, problem size, grid divisors, and so on
4.014205
3.824275
1.049664
logging.debug('run_kernel %s', instance.name) logging.debug('thread block dims (%d, %d, %d)', *instance.threads) logging.debug('grid dims (%d, %d, %d)', *instance.grid) try: self.dev.run_kernel(func, gpu_args, instance.threads, instance.grid) except Exception as e: if "too many resources requested for launch" in str(e) or "OUT_OF_RESOURCES" in str(e): logging.debug('ignoring runtime failure due to too many resources required') return False else: logging.debug('encountered unexpected runtime failure: ' + str(e)) raise e return True
def run_kernel(self, func, gpu_args, instance)
Run a compiled kernel instance on a device
3.532151
3.488238
1.012589
for i in range(0, len(l), n): yield l[i:i + n]
def _chunk_list(l, n)
Yield successive n-sized chunks from l.
2.003627
1.847176
1.084697
workflow = self._parameter_sweep(parameter_space, kernel_options, self.device_options, tuning_options) if tuning_options.verbose: with NCDisplay(_error_filter) as display: answer = run_parallel_with_display(workflow, self.max_threads, display) else: answer = run_parallel(workflow, self.max_threads) if answer is None: print("Tuning did not return any results, did an error occur?") return None # Filter out None times result = [] for chunk in answer: result += [d for d in chunk if d['time']] return result, {}
def run(self, parameter_space, kernel_options, tuning_options)
Tune all instances in parameter_space using a multiple threads :param parameter_space: The parameter space as an iterable. :type parameter_space: iterable :param kernel_options: A dictionary with all options for the kernel. :type kernel_options: kernel_tuner.interface.Options :param tuning_options: A dictionary with all options regarding the tuning process. :type tuning_options: kernel_tuner.interface.Options :returns: A list of dictionaries for executed kernel configurations and their execution times. And a dictionary that contains a information about the hardware/software environment on which the tuning took place. :rtype: list(dict()), dict()
6.757653
7.053584
0.958045
results = [] #randomize parameter space to do pseudo load balancing parameter_space = list(parameter_space) random.shuffle(parameter_space) #split parameter space into chunks work_per_thread = int(numpy.ceil(len(parameter_space) / float(self.max_threads))) chunks = _chunk_list(parameter_space, work_per_thread) for chunk in chunks: chunked_result = self._run_chunk(chunk, kernel_options, device_options, tuning_options) results.append(lift(chunked_result)) return gather(*results)
def _parameter_sweep(self, parameter_space, kernel_options, device_options, tuning_options)
Build a Noodles workflow by sweeping the parameter space
4.297132
4.054715
1.059787
#detect language and create high-level device interface self.dev = DeviceInterface(kernel_options.kernel_string, iterations=tuning_options.iterations, **device_options) #move data to the GPU gpu_args = self.dev.ready_argument_list(kernel_options.arguments) results = [] for element in chunk: params = dict(OrderedDict(zip(tuning_options.tune_params.keys(), element))) try: time = self.dev.compile_and_benchmark(gpu_args, params, kernel_options, tuning_options) params['time'] = time results.append(params) except Exception: params['time'] = None results.append(params) return results
def _run_chunk(self, chunk, kernel_options, device_options, tuning_options)
Benchmark a single kernel instance in the parameter space
5.653329
5.453789
1.036588
tune_params = tuning_options.tune_params #compute cartesian product of all tunable parameters parameter_space = itertools.product(*tune_params.values()) #check for search space restrictions if tuning_options.restrictions is not None: parameter_space = filter(lambda p: util.check_restrictions(tuning_options.restrictions, p, tune_params.keys(), tuning_options.verbose), parameter_space) #reduce parameter space to a random sample using sample_fraction parameter_space = numpy.array(list(parameter_space)) size = len(parameter_space) fraction = int(numpy.ceil(size * float(tuning_options.sample_fraction))) sample_indices = numpy.random.choice(range(size), size=fraction, replace=False) parameter_space = parameter_space[sample_indices] #call the runner results, env = runner.run(parameter_space, kernel_options, tuning_options) return results, env
def tune(runner, kernel_options, device_options, tuning_options)
Tune a random sample of sample_fraction fraction in the parameter space :params runner: A runner from kernel_tuner.runners :type runner: kernel_tuner.runner :param kernel_options: A dictionary with all options for the kernel. :type kernel_options: kernel_tuner.interface.Options :param device_options: A dictionary with all options for the device on which the kernel should be tuned. :type device_options: kernel_tuner.interface.Options :param tuning_options: A dictionary with all options regarding the tuning process. :type tuning_options: kernel_tuner.interface.Options :returns: A list of dictionaries for executed kernel configurations and their execution times. And a dictionary that contains a information about the hardware/software environment on which the tuning took place. :rtype: list(dict()), dict()
3.278509
3.143378
1.042989
types_map = {"uint8": ["uchar", "unsigned char", "uint8_t"], "int8": ["char", "int8_t"], "uint16": ["ushort", "unsigned short", "uint16_t"], "int16": ["short", "int16_t"], "uint32": ["uint", "unsigned int", "uint32_t"], "int32": ["int", "int32_t"], #discrepancy between OpenCL and C here, long may be 32bits in C "uint64": ["ulong", "unsigned long", "uint64_t"], "int64": ["long", "int64_t"], "float16": ["half"], "float32": ["float"], "float64": ["double"]} if dtype in types_map: return any([substr in kernel_argument for substr in types_map[dtype]]) else: return False
def check_argument_type(dtype, kernel_argument, i)
check if the numpy.dtype matches the type used in the code
2.14612
2.135399
1.005021
kernel_arguments = list() collected_errors = list() for iterator in re.finditer(kernel_name + "[ \n\t]*" + "\(", kernel_string): kernel_start = iterator.end() kernel_end = kernel_string.find(")", kernel_start) if kernel_start != 0: kernel_arguments.append(kernel_string[kernel_start:kernel_end].split(",")) for arguments_set, arguments in enumerate(kernel_arguments): collected_errors.append(list()) if len(arguments) != len(args): collected_errors[arguments_set].append("Kernel and host argument lists do not match in size.") continue for (i, arg) in enumerate(args): kernel_argument = arguments[i] if not isinstance(arg, (numpy.ndarray, numpy.generic)): raise TypeError("Argument at position " + str(i) + " of type: " + str(type(arg)) + " should be of type numpy.ndarray or numpy scalar") correct = True if isinstance(arg, numpy.ndarray) and not "*" in kernel_argument: correct = False #array is passed to non-pointer kernel argument if correct and check_argument_type(str(arg.dtype), kernel_argument, i): continue collected_errors[arguments_set].append("Argument at position " + str(i) + " of dtype: " + str(arg.dtype) + " does not match " + kernel_argument + ".") if not collected_errors[arguments_set]: # We assume that if there is a possible list of arguments that matches with the provided one # it is the right one return for errors in collected_errors: warnings.warn(errors[0], UserWarning)
def check_argument_list(kernel_name, kernel_string, args)
raise an exception if a kernel arguments do not match host arguments
3.620862
3.498674
1.034924
forbidden_names = ("grid_size_x", "grid_size_y", "grid_size_z") forbidden_name_substr = ("time", "times") for name, param in tune_params.items(): if name in forbidden_names: raise ValueError("Tune parameter " + name + " with value " + str(param) + " has a forbidden name!") for forbidden_substr in forbidden_name_substr: if forbidden_substr in name: raise ValueError("Tune parameter " + name + " with value " + str(param) + " has a forbidden name: not allowed to use " + forbidden_substr + " in tune parameter names!")
def check_tune_params_list(tune_params)
raise an exception if a tune parameter has a forbidden name
2.85054
2.532871
1.125418
params = OrderedDict(zip(keys, element)) for restrict in restrictions: if not eval(replace_param_occurrences(restrict, params)): if verbose: print("skipping config", get_instance_string(params), "reason: config fails restriction") return False return True
def check_restrictions(restrictions, element, keys, verbose)
check whether a specific instance meets the search space restrictions
7.684898
6.61769
1.161266
if lang is None: if callable(kernel_source): raise TypeError("Please specify language when using a code generator function") kernel_string = get_kernel_string(kernel_source) if "__global__" in kernel_string: lang = "CUDA" elif "__kernel" in kernel_string: lang = "OpenCL" else: lang = "C" return lang
def detect_language(lang, kernel_source)
attempt to detect language from the kernel_string if not specified
3.71569
3.302067
1.125262
compact_str_items = [] # first make a list of compact strings for each parameter for k, v in params.items(): unit = "" if isinstance(units, dict): #check if not None not enough, units could be mocked which causes errors unit = units.get(k, "") compact_str_items.append(k + "=" + str(v) + unit) # and finally join them compact_str = ", ".join(compact_str_items) return compact_str
def get_config_string(params, units=None)
return a compact string representation of a dictionary
5.590233
5.289203
1.056914
def get_dimension_divisor(divisor_list, default, params): if divisor_list is None: if default in params: divisor_list = [default] else: return 1 return numpy.prod([int(eval(replace_param_occurrences(s, params))) for s in divisor_list]) divisors = [get_dimension_divisor(d, block_size_names[i], params) for i, d in enumerate(grid_div)] return tuple(int(numpy.ceil(float(current_problem_size[i]) / float(d))) for i, d in enumerate(divisors))
def get_grid_dimensions(current_problem_size, params, grid_div, block_size_names)
compute grid dims based on problem sizes and listed grid divisors
3.119802
2.979399
1.047125
#logging.debug('get_kernel_string called with %s', str(kernel_source)) logging.debug('get_kernel_string called') kernel_string = None if callable(kernel_source): kernel_string = kernel_source(params) elif isinstance(kernel_source, str): if looks_like_a_filename(kernel_source): kernel_string = read_file(kernel_source) or kernel_source else: kernel_string = kernel_source else: raise TypeError("Error kernel_source is not a string nor a callable function") return kernel_string
def get_kernel_string(kernel_source, params=None)
retrieve the kernel source and return as a string This function processes the passed kernel_source argument, which could be a function, a string with a filename, or just a string with code already. If kernel_source is a function, the function is called with instance parameters in 'params' as the only argument. If kernel_source looks like filename, the file is read in, but if the file does not exist, it is assumed that the string is not a filename after all. :param kernel_source: One of the sources for the kernel, could be a function that generates the kernel code, a string containing a filename that points to the kernel source, or just a string that contains the code. :type kernel_source: string or callable :param params: Dictionary containing the tunable parameters for this specific kernel instance, only needed when kernel_source is a generator. :type param: dict :returns: A string containing the kernel code. :rtype: string
2.856332
2.751482
1.038107
if isinstance(problem_size, (str, int, numpy.integer)): problem_size = (problem_size, ) current_problem_size = [1, 1, 1] for i, s in enumerate(problem_size): if isinstance(s, str): current_problem_size[i] = int(eval(replace_param_occurrences(s, params))) elif isinstance(s, (int, numpy.integer)): current_problem_size[i] = s else: raise TypeError("Error: problem_size should only contain strings or integers") return current_problem_size
def get_problem_size(problem_size, params)
compute current problem size
2.645934
2.508815
1.054655
file = tempfile.mkstemp(suffix=suffix or "", prefix="temp_", dir=os.getcwd()) # or "" for Python 2 compatibility os.close(file[0]) return file[1]
def get_temp_filename(suffix=None)
return a string in the form of temp_X, where X is a large integer
4.897072
4.757533
1.02933
if not block_size_names: block_size_names = default_block_size_names block_size_x = params.get(block_size_names[0], 256) block_size_y = params.get(block_size_names[1], 1) block_size_z = params.get(block_size_names[2], 1) return (int(block_size_x), int(block_size_y), int(block_size_z))
def get_thread_block_dimensions(params, block_size_names=None)
thread block size from tuning params, currently using convention
1.688576
1.660776
1.016739
logging.debug('looks_like_a_filename called') result = False if isinstance(kernel_source, str): result = True #test if not too long if len(kernel_source) > 250: result = False #test if not contains special characters for c in "();{}\\": if c in kernel_source: result = False #just a safeguard for stuff that looks like code for s in ["__global__ ", "__kernel ", "void ", "float "]: if s in kernel_source: result = False #string must contain substring ".c", ".opencl", or ".F" result = result and any([s in kernel_source for s in (".c", ".opencl", ".F")]) logging.debug('kernel_source is a filename: %s' % str(result)) return result
def looks_like_a_filename(kernel_source)
attempt to detect whether source code or a filename was passed
4.222158
4.13382
1.02137
logging.debug('prepare_kernel_string called for %s', kernel_name) grid_dim_names = ["grid_size_x", "grid_size_y", "grid_size_z"] for i, g in enumerate(grid): kernel_string = "#define " + grid_dim_names[i] + " " + str(g) + "\n" + kernel_string for i, g in enumerate(threads): kernel_string = "#define " + block_size_names[i] + " " + str(g) + "\n" + kernel_string for k, v in params.items(): if k not in block_size_names: kernel_string = "#define " + k + " " + str(v) + "\n" + kernel_string name = kernel_name #name = kernel_name + "_" + get_instance_string(params) #kernel_string = kernel_string.replace(kernel_name, name) return name, kernel_string
def prepare_kernel_string(kernel_name, kernel_string, params, grid, threads, block_size_names)
prepare kernel string for compilation Prepends the kernel with a series of C preprocessor defines specific to this kernel instance: * the thread block dimensions * the grid dimensions * tunable parameters Additionally the name of kernel is replace with an instance specific name. This is done to prevent that the kernel compilation could be skipped by PyCUDA and/or PyOpenCL, which may use caching to save compilation time. This feature could lead to strange bugs in the source code if the name of the kernel is also used for other stuff. :param kernel_name: Name of the kernel. :type kernel_name: string :param kernel_string: One of the source files of the kernel as a string containing code. :type kernel_string: string :param params: A dictionary containing the tunable parameters specific to this instance. :type params: dict :param grid: A tuple with the grid dimensions for this specific instance. :type grid: tuple(x,y,z) :param threads: A tuple with the thread block dimensions for this specific instance. :type threads: tuple(x,y,z) :param block_size_names: A tuple with the names of the thread block dimensions used in the code. By default this is ["block_size_x", ...], but the user may supply different names if they prefer. :type block_size_names: tuple(string) :returns: A string containing the source code made specific to this kernel instance. :rtype: string
2.140274
2.085354
1.026336
temp_files = dict() kernel_string = get_kernel_string(kernel_file_list[0], params) name, kernel_string = prepare_kernel_string(kernel_name, kernel_string, params, grid, threads, block_size_names) if len(kernel_file_list) > 1: for f in kernel_file_list[1:]: #generate temp filename with the same extension temp_file = get_temp_filename(suffix="." + f.split(".")[-1]) temp_files[f] = temp_file #add preprocessor statements to the additional file _, temp_file_string = prepare_kernel_string(kernel_name, get_kernel_string(f, params), params, grid, threads, block_size_names) write_file(temp_file, temp_file_string) #replace occurences of the additional file's name in the first kernel_string with the name of the temp file kernel_string = kernel_string.replace(f, temp_file) return name, kernel_string, temp_files
def prepare_list_of_files(kernel_name, kernel_file_list, params, grid, threads, block_size_names)
prepare the kernel string along with any additional files The first file in the list is allowed to include or read in the others The files beyond the first are considered additional files that may also contain tunable parameters For each file beyond the first this function creates a temporary file with preprocessors statements inserted. Occurences of the original filenames in the first file are replaced with their temporary counterparts. :param kernel_file_list: A list of filenames. The first file in the list is allowed to read or include the other files in the list. All files may will have access to the tunable parameters. :type kernel_file_list: list(string) :param params: A dictionary with the tunable parameters for this particular instance. :type params: dict() :param grid: The grid dimensions for this instance. The grid dimensions are also inserted into the code as if they are tunable parameters for convenience. :type grid: tuple()
2.778987
2.630539
1.056432
if os.path.isfile(filename): with open(filename, 'r') as f: return f.read()
def read_file(filename)
return the contents of the file named filename or None if file not found
2.394426
2.059848
1.162428
for k, v in params.items(): string = string.replace(k, str(v)) return string
def replace_param_occurrences(string, params)
replace occurrences of the tuning params with their current value
2.501508
2.256708
1.108477
threads = get_thread_block_dimensions(params, block_size_names) current_problem_size = get_problem_size(problem_size, params) grid = get_grid_dimensions(current_problem_size, params, grid_div, block_size_names) return threads, grid
def setup_block_and_grid(problem_size, grid_div, params, block_size_names=None)
compute problem size, thread block and grid dimensions for this kernel
3.293582
2.874445
1.145815
import sys #ugly fix, hopefully we can find a better one if sys.version_info[0] >= 3: with open(filename, 'w', encoding="utf-8") as f: f.write(string) else: with open(filename, 'w') as f: f.write(string.encode("utf-8"))
def write_file(filename, string)
dump the contents of string to a file called filename
2.721849
2.680082
1.015584
gpu_args = [] for arg in arguments: # if arg i is a numpy array copy to device if isinstance(arg, numpy.ndarray): gpu_args.append(cl.Buffer(self.ctx, self.mf.READ_WRITE | self.mf.COPY_HOST_PTR, hostbuf=arg)) else: # if not an array, just pass argument along gpu_args.append(arg) return gpu_args
def ready_argument_list(self, arguments)
ready argument list to be passed to the kernel, allocates gpu mem :param arguments: List of arguments to be passed to the kernel. The order should match the argument list on the OpenCL kernel. Allowed values are numpy.ndarray, and/or numpy.int32, numpy.float32, and so on. :type arguments: list(numpy objects) :returns: A list of arguments that can be passed to an OpenCL kernel. :rtype: list( pyopencl.Buffer, numpy.int32, ... )
3.539785
3.317511
1.067
prg = cl.Program(self.ctx, kernel_string).build(options=self.compiler_options) func = getattr(prg, kernel_name) return func
def compile(self, kernel_name, kernel_string)
call the OpenCL compiler to compile the kernel, return the device function :param kernel_name: The name of the kernel to be compiled, used to lookup the function after compilation. :type kernel_name: string :param kernel_string: The OpenCL kernel code that contains the function `kernel_name` :type kernel_string: string :returns: An OpenCL kernel that can be called directly. :rtype: pyopencl.Kernel
2.864553
3.720062
0.770028
global_size = (grid[0]*threads[0], grid[1]*threads[1], grid[2]*threads[2]) local_size = threads time = [] for _ in range(self.iterations): event = func(self.queue, global_size, local_size, *gpu_args) event.wait() time.append((event.profile.end - event.profile.start)*1e-6) time = sorted(time) if times: return time else: if self.iterations > 4: return numpy.mean(time[1:-1]) else: return numpy.mean(time)
def benchmark(self, func, gpu_args, threads, grid, times)
runs the kernel and measures time repeatedly, returns average time Runs the kernel and measures kernel execution time repeatedly, number of iterations is set during the creation of OpenCLFunctions. Benchmark returns a robust average, from all measurements the fastest and slowest runs are discarded and the rest is included in the returned average. The reason for this is to be robust against initialization artifacts and other exceptional cases. :param func: A PyOpenCL kernel compiled for this specific kernel configuration :type func: pyopencl.Kernel :param gpu_args: A list of arguments to the kernel, order should match the order in the code. Allowed values are either variables in global memory or single values passed by value. :type gpu_args: list( pyopencl.Buffer, numpy.int32, ...) :param threads: A tuple listing the number of work items in each dimension of the work group. :type threads: tuple(int, int, int) :param grid: A tuple listing the number of work groups in each dimension of the NDRange. :type grid: tuple(int, int) :param times: Return the execution time of all iterations. :type times: bool :returns: All execution times, if times=True, or a robust average for the kernel execution time. :rtype: float
2.851524
2.476219
1.151564
global_size = (grid[0]*threads[0], grid[1]*threads[1], grid[2]*threads[2]) local_size = threads event = func(self.queue, global_size, local_size, *gpu_args) event.wait()
def run_kernel(self, func, gpu_args, threads, grid)
runs the OpenCL kernel passed as 'func' :param func: An OpenCL Kernel :type func: pyopencl.Kernel :param gpu_args: A list of arguments to the kernel, order should match the order in the code. Allowed values are either variables in global memory or single values passed by value. :type gpu_args: list( pyopencl.Buffer, numpy.int32, ...) :param threads: A tuple listing the number of work items in each dimension of the work group. :type threads: tuple(int, int, int) :param grid: A tuple listing the number of work groups in each dimension of the NDRange. :type grid: tuple(int, int)
2.799035
2.957211
0.946512
if isinstance(buffer, cl.Buffer): try: cl.enqueue_fill_buffer(self.queue, buffer, numpy.uint32(value), 0, size) except AttributeError: src=numpy.zeros(size, dtype='uint8')+numpy.uint8(value) cl.enqueue_copy(self.queue, buffer, src)
def memset(self, buffer, value, size)
set the memory in allocation to the value in value :param allocation: An OpenCL Buffer to fill :type allocation: pyopencl.Buffer :param value: The value to set the memory to :type value: a single 32-bit int :param size: The size of to the allocation unit in bytes :type size: int
3.042678
3.532301
0.861387
if isinstance(src, cl.Buffer): cl.enqueue_copy(self.queue, dest, src)
def memcpy_dtoh(self, dest, src)
perform a device to host memory copy :param dest: A numpy array in host memory to store the data :type dest: numpy.ndarray :param src: An OpenCL Buffer to copy data from :type src: pyopencl.Buffer
3.720823
4.606407
0.807749