repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
ciena/afkak
afkak/kafkacodec.py
KafkaCodec.encode_fetch_request
def encode_fetch_request(cls, client_id, correlation_id, payloads=None, max_wait_time=100, min_bytes=4096): """ Encodes some FetchRequest structs :param bytes client_id: :param int correlation_id: :param list payloads: list of :class:`FetchRequest` :param int max_wait_time: how long to block waiting on min_bytes of data :param int min_bytes: the minimum number of bytes to accumulate before returning the response """ payloads = [] if payloads is None else payloads grouped_payloads = group_by_topic_and_partition(payloads) message = cls._encode_message_header(client_id, correlation_id, KafkaCodec.FETCH_KEY) assert isinstance(max_wait_time, int) # -1 is the replica id message += struct.pack('>iiii', -1, max_wait_time, min_bytes, len(grouped_payloads)) for topic, topic_payloads in grouped_payloads.items(): message += write_short_ascii(topic) message += struct.pack('>i', len(topic_payloads)) for partition, payload in topic_payloads.items(): message += struct.pack('>iqi', partition, payload.offset, payload.max_bytes) return message
python
def encode_fetch_request(cls, client_id, correlation_id, payloads=None, max_wait_time=100, min_bytes=4096): """ Encodes some FetchRequest structs :param bytes client_id: :param int correlation_id: :param list payloads: list of :class:`FetchRequest` :param int max_wait_time: how long to block waiting on min_bytes of data :param int min_bytes: the minimum number of bytes to accumulate before returning the response """ payloads = [] if payloads is None else payloads grouped_payloads = group_by_topic_and_partition(payloads) message = cls._encode_message_header(client_id, correlation_id, KafkaCodec.FETCH_KEY) assert isinstance(max_wait_time, int) # -1 is the replica id message += struct.pack('>iiii', -1, max_wait_time, min_bytes, len(grouped_payloads)) for topic, topic_payloads in grouped_payloads.items(): message += write_short_ascii(topic) message += struct.pack('>i', len(topic_payloads)) for partition, payload in topic_payloads.items(): message += struct.pack('>iqi', partition, payload.offset, payload.max_bytes) return message
[ "def", "encode_fetch_request", "(", "cls", ",", "client_id", ",", "correlation_id", ",", "payloads", "=", "None", ",", "max_wait_time", "=", "100", ",", "min_bytes", "=", "4096", ")", ":", "payloads", "=", "[", "]", "if", "payloads", "is", "None", "else", "payloads", "grouped_payloads", "=", "group_by_topic_and_partition", "(", "payloads", ")", "message", "=", "cls", ".", "_encode_message_header", "(", "client_id", ",", "correlation_id", ",", "KafkaCodec", ".", "FETCH_KEY", ")", "assert", "isinstance", "(", "max_wait_time", ",", "int", ")", "# -1 is the replica id", "message", "+=", "struct", ".", "pack", "(", "'>iiii'", ",", "-", "1", ",", "max_wait_time", ",", "min_bytes", ",", "len", "(", "grouped_payloads", ")", ")", "for", "topic", ",", "topic_payloads", "in", "grouped_payloads", ".", "items", "(", ")", ":", "message", "+=", "write_short_ascii", "(", "topic", ")", "message", "+=", "struct", ".", "pack", "(", "'>i'", ",", "len", "(", "topic_payloads", ")", ")", "for", "partition", ",", "payload", "in", "topic_payloads", ".", "items", "(", ")", ":", "message", "+=", "struct", ".", "pack", "(", "'>iqi'", ",", "partition", ",", "payload", ".", "offset", ",", "payload", ".", "max_bytes", ")", "return", "message" ]
Encodes some FetchRequest structs :param bytes client_id: :param int correlation_id: :param list payloads: list of :class:`FetchRequest` :param int max_wait_time: how long to block waiting on min_bytes of data :param int min_bytes: the minimum number of bytes to accumulate before returning the response
[ "Encodes", "some", "FetchRequest", "structs" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/kafkacodec.py#L268-L300
ciena/afkak
afkak/kafkacodec.py
KafkaCodec.decode_fetch_response
def decode_fetch_response(cls, data): """ Decode bytes to a FetchResponse :param bytes data: bytes to decode """ ((correlation_id, num_topics), cur) = relative_unpack('>ii', data, 0) for _i in range(num_topics): (topic, cur) = read_short_ascii(data, cur) ((num_partitions,), cur) = relative_unpack('>i', data, cur) for _i in range(num_partitions): ((partition, error, highwater_mark_offset), cur) = \ relative_unpack('>ihq', data, cur) (message_set, cur) = read_int_string(data, cur) yield FetchResponse( topic, partition, error, highwater_mark_offset, KafkaCodec._decode_message_set_iter(message_set))
python
def decode_fetch_response(cls, data): """ Decode bytes to a FetchResponse :param bytes data: bytes to decode """ ((correlation_id, num_topics), cur) = relative_unpack('>ii', data, 0) for _i in range(num_topics): (topic, cur) = read_short_ascii(data, cur) ((num_partitions,), cur) = relative_unpack('>i', data, cur) for _i in range(num_partitions): ((partition, error, highwater_mark_offset), cur) = \ relative_unpack('>ihq', data, cur) (message_set, cur) = read_int_string(data, cur) yield FetchResponse( topic, partition, error, highwater_mark_offset, KafkaCodec._decode_message_set_iter(message_set))
[ "def", "decode_fetch_response", "(", "cls", ",", "data", ")", ":", "(", "(", "correlation_id", ",", "num_topics", ")", ",", "cur", ")", "=", "relative_unpack", "(", "'>ii'", ",", "data", ",", "0", ")", "for", "_i", "in", "range", "(", "num_topics", ")", ":", "(", "topic", ",", "cur", ")", "=", "read_short_ascii", "(", "data", ",", "cur", ")", "(", "(", "num_partitions", ",", ")", ",", "cur", ")", "=", "relative_unpack", "(", "'>i'", ",", "data", ",", "cur", ")", "for", "_i", "in", "range", "(", "num_partitions", ")", ":", "(", "(", "partition", ",", "error", ",", "highwater_mark_offset", ")", ",", "cur", ")", "=", "relative_unpack", "(", "'>ihq'", ",", "data", ",", "cur", ")", "(", "message_set", ",", "cur", ")", "=", "read_int_string", "(", "data", ",", "cur", ")", "yield", "FetchResponse", "(", "topic", ",", "partition", ",", "error", ",", "highwater_mark_offset", ",", "KafkaCodec", ".", "_decode_message_set_iter", "(", "message_set", ")", ")" ]
Decode bytes to a FetchResponse :param bytes data: bytes to decode
[ "Decode", "bytes", "to", "a", "FetchResponse" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/kafkacodec.py#L303-L324
ciena/afkak
afkak/kafkacodec.py
KafkaCodec.decode_offset_response
def decode_offset_response(cls, data): """ Decode bytes to an :class:`OffsetResponse` :param bytes data: bytes to decode """ ((correlation_id, num_topics), cur) = relative_unpack('>ii', data, 0) for _i in range(num_topics): (topic, cur) = read_short_ascii(data, cur) ((num_partitions,), cur) = relative_unpack('>i', data, cur) for _i in range(num_partitions): ((partition, error, num_offsets), cur) = \ relative_unpack('>ihi', data, cur) offsets = [] for _i in range(num_offsets): ((offset,), cur) = relative_unpack('>q', data, cur) offsets.append(offset) yield OffsetResponse(topic, partition, error, tuple(offsets))
python
def decode_offset_response(cls, data): """ Decode bytes to an :class:`OffsetResponse` :param bytes data: bytes to decode """ ((correlation_id, num_topics), cur) = relative_unpack('>ii', data, 0) for _i in range(num_topics): (topic, cur) = read_short_ascii(data, cur) ((num_partitions,), cur) = relative_unpack('>i', data, cur) for _i in range(num_partitions): ((partition, error, num_offsets), cur) = \ relative_unpack('>ihi', data, cur) offsets = [] for _i in range(num_offsets): ((offset,), cur) = relative_unpack('>q', data, cur) offsets.append(offset) yield OffsetResponse(topic, partition, error, tuple(offsets))
[ "def", "decode_offset_response", "(", "cls", ",", "data", ")", ":", "(", "(", "correlation_id", ",", "num_topics", ")", ",", "cur", ")", "=", "relative_unpack", "(", "'>ii'", ",", "data", ",", "0", ")", "for", "_i", "in", "range", "(", "num_topics", ")", ":", "(", "topic", ",", "cur", ")", "=", "read_short_ascii", "(", "data", ",", "cur", ")", "(", "(", "num_partitions", ",", ")", ",", "cur", ")", "=", "relative_unpack", "(", "'>i'", ",", "data", ",", "cur", ")", "for", "_i", "in", "range", "(", "num_partitions", ")", ":", "(", "(", "partition", ",", "error", ",", "num_offsets", ")", ",", "cur", ")", "=", "relative_unpack", "(", "'>ihi'", ",", "data", ",", "cur", ")", "offsets", "=", "[", "]", "for", "_i", "in", "range", "(", "num_offsets", ")", ":", "(", "(", "offset", ",", ")", ",", "cur", ")", "=", "relative_unpack", "(", "'>q'", ",", "data", ",", "cur", ")", "offsets", ".", "append", "(", "offset", ")", "yield", "OffsetResponse", "(", "topic", ",", "partition", ",", "error", ",", "tuple", "(", "offsets", ")", ")" ]
Decode bytes to an :class:`OffsetResponse` :param bytes data: bytes to decode
[ "Decode", "bytes", "to", "an", ":", "class", ":", "OffsetResponse" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/kafkacodec.py#L348-L369
ciena/afkak
afkak/kafkacodec.py
KafkaCodec.encode_metadata_request
def encode_metadata_request(cls, client_id, correlation_id, topics=None): """ Encode a MetadataRequest :param bytes client_id: string :param int correlation_id: int :param list topics: list of text """ topics = [] if topics is None else topics message = [ cls._encode_message_header(client_id, correlation_id, KafkaCodec.METADATA_KEY), struct.pack('>i', len(topics)), ] for topic in topics: message.append(write_short_ascii(topic)) return b''.join(message)
python
def encode_metadata_request(cls, client_id, correlation_id, topics=None): """ Encode a MetadataRequest :param bytes client_id: string :param int correlation_id: int :param list topics: list of text """ topics = [] if topics is None else topics message = [ cls._encode_message_header(client_id, correlation_id, KafkaCodec.METADATA_KEY), struct.pack('>i', len(topics)), ] for topic in topics: message.append(write_short_ascii(topic)) return b''.join(message)
[ "def", "encode_metadata_request", "(", "cls", ",", "client_id", ",", "correlation_id", ",", "topics", "=", "None", ")", ":", "topics", "=", "[", "]", "if", "topics", "is", "None", "else", "topics", "message", "=", "[", "cls", ".", "_encode_message_header", "(", "client_id", ",", "correlation_id", ",", "KafkaCodec", ".", "METADATA_KEY", ")", ",", "struct", ".", "pack", "(", "'>i'", ",", "len", "(", "topics", ")", ")", ",", "]", "for", "topic", "in", "topics", ":", "message", ".", "append", "(", "write_short_ascii", "(", "topic", ")", ")", "return", "b''", ".", "join", "(", "message", ")" ]
Encode a MetadataRequest :param bytes client_id: string :param int correlation_id: int :param list topics: list of text
[ "Encode", "a", "MetadataRequest" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/kafkacodec.py#L372-L388
ciena/afkak
afkak/kafkacodec.py
KafkaCodec.decode_metadata_response
def decode_metadata_response(cls, data): """ Decode bytes to a MetadataResponse :param bytes data: bytes to decode """ ((correlation_id, numbrokers), cur) = relative_unpack('>ii', data, 0) # In testing, I saw this routine swap my machine to death when # passed bad data. So, some checks are in order... if numbrokers > MAX_BROKERS: raise InvalidMessageError( "Brokers:{} exceeds max:{}".format(numbrokers, MAX_BROKERS)) # Broker info brokers = {} for _i in range(numbrokers): ((nodeId, ), cur) = relative_unpack('>i', data, cur) (host, cur) = read_short_ascii(data, cur) ((port,), cur) = relative_unpack('>i', data, cur) brokers[nodeId] = BrokerMetadata(nodeId, nativeString(host), port) # Topic info ((num_topics,), cur) = relative_unpack('>i', data, cur) topic_metadata = {} for _i in range(num_topics): ((topic_error,), cur) = relative_unpack('>h', data, cur) (topic_name, cur) = read_short_ascii(data, cur) ((num_partitions,), cur) = relative_unpack('>i', data, cur) partition_metadata = {} for _j in range(num_partitions): ((partition_error_code, partition, leader, numReplicas), cur) = relative_unpack('>hiii', data, cur) (replicas, cur) = relative_unpack( '>%di' % numReplicas, data, cur) ((num_isr,), cur) = relative_unpack('>i', data, cur) (isr, cur) = relative_unpack('>%di' % num_isr, data, cur) partition_metadata[partition] = \ PartitionMetadata( topic_name, partition, partition_error_code, leader, replicas, isr) topic_metadata[topic_name] = TopicMetadata( topic_name, topic_error, partition_metadata) return brokers, topic_metadata
python
def decode_metadata_response(cls, data): """ Decode bytes to a MetadataResponse :param bytes data: bytes to decode """ ((correlation_id, numbrokers), cur) = relative_unpack('>ii', data, 0) # In testing, I saw this routine swap my machine to death when # passed bad data. So, some checks are in order... if numbrokers > MAX_BROKERS: raise InvalidMessageError( "Brokers:{} exceeds max:{}".format(numbrokers, MAX_BROKERS)) # Broker info brokers = {} for _i in range(numbrokers): ((nodeId, ), cur) = relative_unpack('>i', data, cur) (host, cur) = read_short_ascii(data, cur) ((port,), cur) = relative_unpack('>i', data, cur) brokers[nodeId] = BrokerMetadata(nodeId, nativeString(host), port) # Topic info ((num_topics,), cur) = relative_unpack('>i', data, cur) topic_metadata = {} for _i in range(num_topics): ((topic_error,), cur) = relative_unpack('>h', data, cur) (topic_name, cur) = read_short_ascii(data, cur) ((num_partitions,), cur) = relative_unpack('>i', data, cur) partition_metadata = {} for _j in range(num_partitions): ((partition_error_code, partition, leader, numReplicas), cur) = relative_unpack('>hiii', data, cur) (replicas, cur) = relative_unpack( '>%di' % numReplicas, data, cur) ((num_isr,), cur) = relative_unpack('>i', data, cur) (isr, cur) = relative_unpack('>%di' % num_isr, data, cur) partition_metadata[partition] = \ PartitionMetadata( topic_name, partition, partition_error_code, leader, replicas, isr) topic_metadata[topic_name] = TopicMetadata( topic_name, topic_error, partition_metadata) return brokers, topic_metadata
[ "def", "decode_metadata_response", "(", "cls", ",", "data", ")", ":", "(", "(", "correlation_id", ",", "numbrokers", ")", ",", "cur", ")", "=", "relative_unpack", "(", "'>ii'", ",", "data", ",", "0", ")", "# In testing, I saw this routine swap my machine to death when", "# passed bad data. So, some checks are in order...", "if", "numbrokers", ">", "MAX_BROKERS", ":", "raise", "InvalidMessageError", "(", "\"Brokers:{} exceeds max:{}\"", ".", "format", "(", "numbrokers", ",", "MAX_BROKERS", ")", ")", "# Broker info", "brokers", "=", "{", "}", "for", "_i", "in", "range", "(", "numbrokers", ")", ":", "(", "(", "nodeId", ",", ")", ",", "cur", ")", "=", "relative_unpack", "(", "'>i'", ",", "data", ",", "cur", ")", "(", "host", ",", "cur", ")", "=", "read_short_ascii", "(", "data", ",", "cur", ")", "(", "(", "port", ",", ")", ",", "cur", ")", "=", "relative_unpack", "(", "'>i'", ",", "data", ",", "cur", ")", "brokers", "[", "nodeId", "]", "=", "BrokerMetadata", "(", "nodeId", ",", "nativeString", "(", "host", ")", ",", "port", ")", "# Topic info", "(", "(", "num_topics", ",", ")", ",", "cur", ")", "=", "relative_unpack", "(", "'>i'", ",", "data", ",", "cur", ")", "topic_metadata", "=", "{", "}", "for", "_i", "in", "range", "(", "num_topics", ")", ":", "(", "(", "topic_error", ",", ")", ",", "cur", ")", "=", "relative_unpack", "(", "'>h'", ",", "data", ",", "cur", ")", "(", "topic_name", ",", "cur", ")", "=", "read_short_ascii", "(", "data", ",", "cur", ")", "(", "(", "num_partitions", ",", ")", ",", "cur", ")", "=", "relative_unpack", "(", "'>i'", ",", "data", ",", "cur", ")", "partition_metadata", "=", "{", "}", "for", "_j", "in", "range", "(", "num_partitions", ")", ":", "(", "(", "partition_error_code", ",", "partition", ",", "leader", ",", "numReplicas", ")", ",", "cur", ")", "=", "relative_unpack", "(", "'>hiii'", ",", "data", ",", "cur", ")", "(", "replicas", ",", "cur", ")", "=", "relative_unpack", "(", "'>%di'", "%", "numReplicas", ",", "data", ",", "cur", ")", "(", "(", "num_isr", ",", ")", ",", "cur", ")", "=", "relative_unpack", "(", "'>i'", ",", "data", ",", "cur", ")", "(", "isr", ",", "cur", ")", "=", "relative_unpack", "(", "'>%di'", "%", "num_isr", ",", "data", ",", "cur", ")", "partition_metadata", "[", "partition", "]", "=", "PartitionMetadata", "(", "topic_name", ",", "partition", ",", "partition_error_code", ",", "leader", ",", "replicas", ",", "isr", ")", "topic_metadata", "[", "topic_name", "]", "=", "TopicMetadata", "(", "topic_name", ",", "topic_error", ",", "partition_metadata", ")", "return", "brokers", ",", "topic_metadata" ]
Decode bytes to a MetadataResponse :param bytes data: bytes to decode
[ "Decode", "bytes", "to", "a", "MetadataResponse" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/kafkacodec.py#L391-L441
ciena/afkak
afkak/kafkacodec.py
KafkaCodec.encode_consumermetadata_request
def encode_consumermetadata_request(cls, client_id, correlation_id, consumer_group): """ Encode a ConsumerMetadataRequest :param bytes client_id: string :param int correlation_id: int :param str consumer_group: string """ message = cls._encode_message_header(client_id, correlation_id, KafkaCodec.CONSUMER_METADATA_KEY) message += write_short_ascii(consumer_group) return message
python
def encode_consumermetadata_request(cls, client_id, correlation_id, consumer_group): """ Encode a ConsumerMetadataRequest :param bytes client_id: string :param int correlation_id: int :param str consumer_group: string """ message = cls._encode_message_header(client_id, correlation_id, KafkaCodec.CONSUMER_METADATA_KEY) message += write_short_ascii(consumer_group) return message
[ "def", "encode_consumermetadata_request", "(", "cls", ",", "client_id", ",", "correlation_id", ",", "consumer_group", ")", ":", "message", "=", "cls", ".", "_encode_message_header", "(", "client_id", ",", "correlation_id", ",", "KafkaCodec", ".", "CONSUMER_METADATA_KEY", ")", "message", "+=", "write_short_ascii", "(", "consumer_group", ")", "return", "message" ]
Encode a ConsumerMetadataRequest :param bytes client_id: string :param int correlation_id: int :param str consumer_group: string
[ "Encode", "a", "ConsumerMetadataRequest" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/kafkacodec.py#L444-L456
ciena/afkak
afkak/kafkacodec.py
KafkaCodec.decode_consumermetadata_response
def decode_consumermetadata_response(cls, data): """ Decode bytes to a ConsumerMetadataResponse :param bytes data: bytes to decode """ (correlation_id, error_code, node_id), cur = \ relative_unpack('>ihi', data, 0) host, cur = read_short_ascii(data, cur) (port,), cur = relative_unpack('>i', data, cur) return ConsumerMetadataResponse( error_code, node_id, nativeString(host), port)
python
def decode_consumermetadata_response(cls, data): """ Decode bytes to a ConsumerMetadataResponse :param bytes data: bytes to decode """ (correlation_id, error_code, node_id), cur = \ relative_unpack('>ihi', data, 0) host, cur = read_short_ascii(data, cur) (port,), cur = relative_unpack('>i', data, cur) return ConsumerMetadataResponse( error_code, node_id, nativeString(host), port)
[ "def", "decode_consumermetadata_response", "(", "cls", ",", "data", ")", ":", "(", "correlation_id", ",", "error_code", ",", "node_id", ")", ",", "cur", "=", "relative_unpack", "(", "'>ihi'", ",", "data", ",", "0", ")", "host", ",", "cur", "=", "read_short_ascii", "(", "data", ",", "cur", ")", "(", "port", ",", ")", ",", "cur", "=", "relative_unpack", "(", "'>i'", ",", "data", ",", "cur", ")", "return", "ConsumerMetadataResponse", "(", "error_code", ",", "node_id", ",", "nativeString", "(", "host", ")", ",", "port", ")" ]
Decode bytes to a ConsumerMetadataResponse :param bytes data: bytes to decode
[ "Decode", "bytes", "to", "a", "ConsumerMetadataResponse" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/kafkacodec.py#L459-L471
ciena/afkak
afkak/kafkacodec.py
KafkaCodec.encode_offset_commit_request
def encode_offset_commit_request(cls, client_id, correlation_id, group, group_generation_id, consumer_id, payloads): """ Encode some OffsetCommitRequest structs (v1) :param bytes client_id: string :param int correlation_id: int :param str group: the consumer group to which you are committing offsets :param int group_generation_id: int32, generation ID of the group :param str consumer_id: string, Identifier for the consumer :param list payloads: list of :class:`OffsetCommitRequest` """ grouped_payloads = group_by_topic_and_partition(payloads) message = cls._encode_message_header( client_id, correlation_id, KafkaCodec.OFFSET_COMMIT_KEY, api_version=1, ) message += write_short_ascii(group) message += struct.pack('>i', group_generation_id) message += write_short_ascii(consumer_id) message += struct.pack('>i', len(grouped_payloads)) for topic, topic_payloads in grouped_payloads.items(): message += write_short_ascii(topic) message += struct.pack('>i', len(topic_payloads)) for partition, payload in topic_payloads.items(): message += struct.pack('>iqq', partition, payload.offset, payload.timestamp) message += write_short_bytes(payload.metadata) return message
python
def encode_offset_commit_request(cls, client_id, correlation_id, group, group_generation_id, consumer_id, payloads): """ Encode some OffsetCommitRequest structs (v1) :param bytes client_id: string :param int correlation_id: int :param str group: the consumer group to which you are committing offsets :param int group_generation_id: int32, generation ID of the group :param str consumer_id: string, Identifier for the consumer :param list payloads: list of :class:`OffsetCommitRequest` """ grouped_payloads = group_by_topic_and_partition(payloads) message = cls._encode_message_header( client_id, correlation_id, KafkaCodec.OFFSET_COMMIT_KEY, api_version=1, ) message += write_short_ascii(group) message += struct.pack('>i', group_generation_id) message += write_short_ascii(consumer_id) message += struct.pack('>i', len(grouped_payloads)) for topic, topic_payloads in grouped_payloads.items(): message += write_short_ascii(topic) message += struct.pack('>i', len(topic_payloads)) for partition, payload in topic_payloads.items(): message += struct.pack('>iqq', partition, payload.offset, payload.timestamp) message += write_short_bytes(payload.metadata) return message
[ "def", "encode_offset_commit_request", "(", "cls", ",", "client_id", ",", "correlation_id", ",", "group", ",", "group_generation_id", ",", "consumer_id", ",", "payloads", ")", ":", "grouped_payloads", "=", "group_by_topic_and_partition", "(", "payloads", ")", "message", "=", "cls", ".", "_encode_message_header", "(", "client_id", ",", "correlation_id", ",", "KafkaCodec", ".", "OFFSET_COMMIT_KEY", ",", "api_version", "=", "1", ",", ")", "message", "+=", "write_short_ascii", "(", "group", ")", "message", "+=", "struct", ".", "pack", "(", "'>i'", ",", "group_generation_id", ")", "message", "+=", "write_short_ascii", "(", "consumer_id", ")", "message", "+=", "struct", ".", "pack", "(", "'>i'", ",", "len", "(", "grouped_payloads", ")", ")", "for", "topic", ",", "topic_payloads", "in", "grouped_payloads", ".", "items", "(", ")", ":", "message", "+=", "write_short_ascii", "(", "topic", ")", "message", "+=", "struct", ".", "pack", "(", "'>i'", ",", "len", "(", "topic_payloads", ")", ")", "for", "partition", ",", "payload", "in", "topic_payloads", ".", "items", "(", ")", ":", "message", "+=", "struct", ".", "pack", "(", "'>iqq'", ",", "partition", ",", "payload", ".", "offset", ",", "payload", ".", "timestamp", ")", "message", "+=", "write_short_bytes", "(", "payload", ".", "metadata", ")", "return", "message" ]
Encode some OffsetCommitRequest structs (v1) :param bytes client_id: string :param int correlation_id: int :param str group: the consumer group to which you are committing offsets :param int group_generation_id: int32, generation ID of the group :param str consumer_id: string, Identifier for the consumer :param list payloads: list of :class:`OffsetCommitRequest`
[ "Encode", "some", "OffsetCommitRequest", "structs", "(", "v1", ")" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/kafkacodec.py#L474-L508
ciena/afkak
afkak/kafkacodec.py
KafkaCodec.decode_offset_commit_response
def decode_offset_commit_response(cls, data): """ Decode bytes to an OffsetCommitResponse :param bytes data: bytes to decode """ ((correlation_id,), cur) = relative_unpack('>i', data, 0) ((num_topics,), cur) = relative_unpack('>i', data, cur) for _i in range(num_topics): (topic, cur) = read_short_ascii(data, cur) ((num_partitions,), cur) = relative_unpack('>i', data, cur) for _i in range(num_partitions): ((partition, error), cur) = relative_unpack('>ih', data, cur) yield OffsetCommitResponse(topic, partition, error)
python
def decode_offset_commit_response(cls, data): """ Decode bytes to an OffsetCommitResponse :param bytes data: bytes to decode """ ((correlation_id,), cur) = relative_unpack('>i', data, 0) ((num_topics,), cur) = relative_unpack('>i', data, cur) for _i in range(num_topics): (topic, cur) = read_short_ascii(data, cur) ((num_partitions,), cur) = relative_unpack('>i', data, cur) for _i in range(num_partitions): ((partition, error), cur) = relative_unpack('>ih', data, cur) yield OffsetCommitResponse(topic, partition, error)
[ "def", "decode_offset_commit_response", "(", "cls", ",", "data", ")", ":", "(", "(", "correlation_id", ",", ")", ",", "cur", ")", "=", "relative_unpack", "(", "'>i'", ",", "data", ",", "0", ")", "(", "(", "num_topics", ",", ")", ",", "cur", ")", "=", "relative_unpack", "(", "'>i'", ",", "data", ",", "cur", ")", "for", "_i", "in", "range", "(", "num_topics", ")", ":", "(", "topic", ",", "cur", ")", "=", "read_short_ascii", "(", "data", ",", "cur", ")", "(", "(", "num_partitions", ",", ")", ",", "cur", ")", "=", "relative_unpack", "(", "'>i'", ",", "data", ",", "cur", ")", "for", "_i", "in", "range", "(", "num_partitions", ")", ":", "(", "(", "partition", ",", "error", ")", ",", "cur", ")", "=", "relative_unpack", "(", "'>ih'", ",", "data", ",", "cur", ")", "yield", "OffsetCommitResponse", "(", "topic", ",", "partition", ",", "error", ")" ]
Decode bytes to an OffsetCommitResponse :param bytes data: bytes to decode
[ "Decode", "bytes", "to", "an", "OffsetCommitResponse" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/kafkacodec.py#L511-L526
ciena/afkak
afkak/kafkacodec.py
KafkaCodec.encode_offset_fetch_request
def encode_offset_fetch_request(cls, client_id, correlation_id, group, payloads): """ Encode some OffsetFetchRequest structs :param bytes client_id: string :param int correlation_id: int :param bytes group: string, the consumer group you are fetching offsets for :param list payloads: list of :class:`OffsetFetchRequest` """ grouped_payloads = group_by_topic_and_partition(payloads) message = cls._encode_message_header( client_id, correlation_id, KafkaCodec.OFFSET_FETCH_KEY, api_version=1) message += write_short_ascii(group) message += struct.pack('>i', len(grouped_payloads)) for topic, topic_payloads in grouped_payloads.items(): message += write_short_ascii(topic) message += struct.pack('>i', len(topic_payloads)) for partition in topic_payloads: message += struct.pack('>i', partition) return message
python
def encode_offset_fetch_request(cls, client_id, correlation_id, group, payloads): """ Encode some OffsetFetchRequest structs :param bytes client_id: string :param int correlation_id: int :param bytes group: string, the consumer group you are fetching offsets for :param list payloads: list of :class:`OffsetFetchRequest` """ grouped_payloads = group_by_topic_and_partition(payloads) message = cls._encode_message_header( client_id, correlation_id, KafkaCodec.OFFSET_FETCH_KEY, api_version=1) message += write_short_ascii(group) message += struct.pack('>i', len(grouped_payloads)) for topic, topic_payloads in grouped_payloads.items(): message += write_short_ascii(topic) message += struct.pack('>i', len(topic_payloads)) for partition in topic_payloads: message += struct.pack('>i', partition) return message
[ "def", "encode_offset_fetch_request", "(", "cls", ",", "client_id", ",", "correlation_id", ",", "group", ",", "payloads", ")", ":", "grouped_payloads", "=", "group_by_topic_and_partition", "(", "payloads", ")", "message", "=", "cls", ".", "_encode_message_header", "(", "client_id", ",", "correlation_id", ",", "KafkaCodec", ".", "OFFSET_FETCH_KEY", ",", "api_version", "=", "1", ")", "message", "+=", "write_short_ascii", "(", "group", ")", "message", "+=", "struct", ".", "pack", "(", "'>i'", ",", "len", "(", "grouped_payloads", ")", ")", "for", "topic", ",", "topic_payloads", "in", "grouped_payloads", ".", "items", "(", ")", ":", "message", "+=", "write_short_ascii", "(", "topic", ")", "message", "+=", "struct", ".", "pack", "(", "'>i'", ",", "len", "(", "topic_payloads", ")", ")", "for", "partition", "in", "topic_payloads", ":", "message", "+=", "struct", ".", "pack", "(", "'>i'", ",", "partition", ")", "return", "message" ]
Encode some OffsetFetchRequest structs :param bytes client_id: string :param int correlation_id: int :param bytes group: string, the consumer group you are fetching offsets for :param list payloads: list of :class:`OffsetFetchRequest`
[ "Encode", "some", "OffsetFetchRequest", "structs" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/kafkacodec.py#L529-L554
ciena/afkak
afkak/kafkacodec.py
KafkaCodec.decode_offset_fetch_response
def decode_offset_fetch_response(cls, data): """ Decode bytes to an OffsetFetchResponse :param bytes data: bytes to decode """ ((correlation_id,), cur) = relative_unpack('>i', data, 0) ((num_topics,), cur) = relative_unpack('>i', data, cur) for _i in range(num_topics): (topic, cur) = read_short_ascii(data, cur) ((num_partitions,), cur) = relative_unpack('>i', data, cur) for _i in range(num_partitions): ((partition, offset), cur) = relative_unpack('>iq', data, cur) (metadata, cur) = read_short_bytes(data, cur) ((error,), cur) = relative_unpack('>h', data, cur) yield OffsetFetchResponse(topic, partition, offset, metadata, error)
python
def decode_offset_fetch_response(cls, data): """ Decode bytes to an OffsetFetchResponse :param bytes data: bytes to decode """ ((correlation_id,), cur) = relative_unpack('>i', data, 0) ((num_topics,), cur) = relative_unpack('>i', data, cur) for _i in range(num_topics): (topic, cur) = read_short_ascii(data, cur) ((num_partitions,), cur) = relative_unpack('>i', data, cur) for _i in range(num_partitions): ((partition, offset), cur) = relative_unpack('>iq', data, cur) (metadata, cur) = read_short_bytes(data, cur) ((error,), cur) = relative_unpack('>h', data, cur) yield OffsetFetchResponse(topic, partition, offset, metadata, error)
[ "def", "decode_offset_fetch_response", "(", "cls", ",", "data", ")", ":", "(", "(", "correlation_id", ",", ")", ",", "cur", ")", "=", "relative_unpack", "(", "'>i'", ",", "data", ",", "0", ")", "(", "(", "num_topics", ",", ")", ",", "cur", ")", "=", "relative_unpack", "(", "'>i'", ",", "data", ",", "cur", ")", "for", "_i", "in", "range", "(", "num_topics", ")", ":", "(", "topic", ",", "cur", ")", "=", "read_short_ascii", "(", "data", ",", "cur", ")", "(", "(", "num_partitions", ",", ")", ",", "cur", ")", "=", "relative_unpack", "(", "'>i'", ",", "data", ",", "cur", ")", "for", "_i", "in", "range", "(", "num_partitions", ")", ":", "(", "(", "partition", ",", "offset", ")", ",", "cur", ")", "=", "relative_unpack", "(", "'>iq'", ",", "data", ",", "cur", ")", "(", "metadata", ",", "cur", ")", "=", "read_short_bytes", "(", "data", ",", "cur", ")", "(", "(", "error", ",", ")", ",", "cur", ")", "=", "relative_unpack", "(", "'>h'", ",", "data", ",", "cur", ")", "yield", "OffsetFetchResponse", "(", "topic", ",", "partition", ",", "offset", ",", "metadata", ",", "error", ")" ]
Decode bytes to an OffsetFetchResponse :param bytes data: bytes to decode
[ "Decode", "bytes", "to", "an", "OffsetFetchResponse" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/kafkacodec.py#L557-L577
ciena/afkak
afkak/_protocol.py
KafkaBootstrapProtocol.stringReceived
def stringReceived(self, response): """ Handle a response from the broker. """ correlation_id = response[0:4] try: d = self._pending.pop(correlation_id) except KeyError: self._log.warn(( "Response has unknown correlation ID {correlation_id!r}." " Dropping connection to {peer}." ), correlation_id=correlation_id, peer=self.transport.getPeer()) self.transport.loseConnection() else: d.callback(response)
python
def stringReceived(self, response): """ Handle a response from the broker. """ correlation_id = response[0:4] try: d = self._pending.pop(correlation_id) except KeyError: self._log.warn(( "Response has unknown correlation ID {correlation_id!r}." " Dropping connection to {peer}." ), correlation_id=correlation_id, peer=self.transport.getPeer()) self.transport.loseConnection() else: d.callback(response)
[ "def", "stringReceived", "(", "self", ",", "response", ")", ":", "correlation_id", "=", "response", "[", "0", ":", "4", "]", "try", ":", "d", "=", "self", ".", "_pending", ".", "pop", "(", "correlation_id", ")", "except", "KeyError", ":", "self", ".", "_log", ".", "warn", "(", "(", "\"Response has unknown correlation ID {correlation_id!r}.\"", "\" Dropping connection to {peer}.\"", ")", ",", "correlation_id", "=", "correlation_id", ",", "peer", "=", "self", ".", "transport", ".", "getPeer", "(", ")", ")", "self", ".", "transport", ".", "loseConnection", "(", ")", "else", ":", "d", ".", "callback", "(", "response", ")" ]
Handle a response from the broker.
[ "Handle", "a", "response", "from", "the", "broker", "." ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/_protocol.py#L78-L92
ciena/afkak
afkak/_protocol.py
KafkaBootstrapProtocol.connectionLost
def connectionLost(self, reason=connectionDone): """ Mark the protocol as failed and fail all pending operations. """ self._failed = reason pending, self._pending = self._pending, None for d in pending.values(): d.errback(reason)
python
def connectionLost(self, reason=connectionDone): """ Mark the protocol as failed and fail all pending operations. """ self._failed = reason pending, self._pending = self._pending, None for d in pending.values(): d.errback(reason)
[ "def", "connectionLost", "(", "self", ",", "reason", "=", "connectionDone", ")", ":", "self", ".", "_failed", "=", "reason", "pending", ",", "self", ".", "_pending", "=", "self", ".", "_pending", ",", "None", "for", "d", "in", "pending", ".", "values", "(", ")", ":", "d", ".", "errback", "(", "reason", ")" ]
Mark the protocol as failed and fail all pending operations.
[ "Mark", "the", "protocol", "as", "failed", "and", "fail", "all", "pending", "operations", "." ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/_protocol.py#L94-L101
ciena/afkak
afkak/_protocol.py
KafkaBootstrapProtocol.request
def request(self, request): """ Send a request to the Kafka broker. :param bytes request: The bytes of a Kafka `RequestMessage`_ structure. It must have a unique (to this connection) correlation ID. :returns: `Deferred` which will: - Succeed with the bytes of a Kafka `ResponseMessage`_ - Fail when the connection terminates .. _RequestMessage:: https://kafka.apache.org/protocol.html#protocol_messages """ if self._failed is not None: return fail(self._failed) correlation_id = request[4:8] assert correlation_id not in self._pending d = Deferred() self.sendString(request) self._pending[correlation_id] = d return d
python
def request(self, request): """ Send a request to the Kafka broker. :param bytes request: The bytes of a Kafka `RequestMessage`_ structure. It must have a unique (to this connection) correlation ID. :returns: `Deferred` which will: - Succeed with the bytes of a Kafka `ResponseMessage`_ - Fail when the connection terminates .. _RequestMessage:: https://kafka.apache.org/protocol.html#protocol_messages """ if self._failed is not None: return fail(self._failed) correlation_id = request[4:8] assert correlation_id not in self._pending d = Deferred() self.sendString(request) self._pending[correlation_id] = d return d
[ "def", "request", "(", "self", ",", "request", ")", ":", "if", "self", ".", "_failed", "is", "not", "None", ":", "return", "fail", "(", "self", ".", "_failed", ")", "correlation_id", "=", "request", "[", "4", ":", "8", "]", "assert", "correlation_id", "not", "in", "self", ".", "_pending", "d", "=", "Deferred", "(", ")", "self", ".", "sendString", "(", "request", ")", "self", ".", "_pending", "[", "correlation_id", "]", "=", "d", "return", "d" ]
Send a request to the Kafka broker. :param bytes request: The bytes of a Kafka `RequestMessage`_ structure. It must have a unique (to this connection) correlation ID. :returns: `Deferred` which will: - Succeed with the bytes of a Kafka `ResponseMessage`_ - Fail when the connection terminates .. _RequestMessage:: https://kafka.apache.org/protocol.html#protocol_messages
[ "Send", "a", "request", "to", "the", "Kafka", "broker", "." ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/_protocol.py#L110-L134
ciena/afkak
afkak/producer.py
Producer.send_messages
def send_messages(self, topic, key=None, msgs=()): """ Given a topic, and optional key (for partitioning) and a list of messages, send them to Kafka, either immediately, or when a batch is ready, depending on the Producer's batch settings. :param str topic: Kafka topic to send the messages to :param str key: Message key used to determine the topic partition to which the messages will be written. Either `bytes` or `None`. `None` means that there is no key, but note that that: - Kafka does not permit producing unkeyed messages to a compacted topic. - The *partitioner_class* may require a non-`None` key (`HashedPartitioner` does so). :param list msgs: A non-empty sequence of message bytestrings to send. `None` indicates a ``null`` message (i.e. a tombstone on a compacted topic). :returns: A :class:`~twisted.internet.defer.Deferred` that fires when the messages have been received by the Kafka cluster. It will fail with `TypeError` when: - *topic* is not text (`str` on Python 3, `str` or `unicode` on Python 2) - *key* is not `bytes` or `None` - *msgs* is not a sequence of `bytes` or `None` It will fail with `ValueError` when *msgs* is empty. """ try: topic = _coerce_topic(topic) if key is not None and not isinstance(key, bytes): raise TypeError('key={!r} must be bytes or None'.format(key)) if not msgs: raise ValueError("msgs must be a non-empty sequence") msg_cnt = len(msgs) byte_cnt = 0 for index, m in enumerate(msgs): if m is None: continue if not isinstance(m, bytes): raise TypeError('Message {} to topic {} ({!r:.100}) has type {}, but must have type {}'.format( index, topic, m, type(m).__name__, type(bytes).__name__)) byte_cnt += len(m) except Exception: return fail() d = Deferred(self._cancel_send_messages) self._batch_reqs.append(SendRequest(topic, key, msgs, d)) self._waitingMsgCount += msg_cnt self._waitingByteCount += byte_cnt # Add request to list of outstanding reqs' callback to remove self._outstanding.append(d) d.addBoth(self._remove_from_outstanding, d) # See if we have enough messages in the batch to do a send. self._check_send_batch() return d
python
def send_messages(self, topic, key=None, msgs=()): """ Given a topic, and optional key (for partitioning) and a list of messages, send them to Kafka, either immediately, or when a batch is ready, depending on the Producer's batch settings. :param str topic: Kafka topic to send the messages to :param str key: Message key used to determine the topic partition to which the messages will be written. Either `bytes` or `None`. `None` means that there is no key, but note that that: - Kafka does not permit producing unkeyed messages to a compacted topic. - The *partitioner_class* may require a non-`None` key (`HashedPartitioner` does so). :param list msgs: A non-empty sequence of message bytestrings to send. `None` indicates a ``null`` message (i.e. a tombstone on a compacted topic). :returns: A :class:`~twisted.internet.defer.Deferred` that fires when the messages have been received by the Kafka cluster. It will fail with `TypeError` when: - *topic* is not text (`str` on Python 3, `str` or `unicode` on Python 2) - *key* is not `bytes` or `None` - *msgs* is not a sequence of `bytes` or `None` It will fail with `ValueError` when *msgs* is empty. """ try: topic = _coerce_topic(topic) if key is not None and not isinstance(key, bytes): raise TypeError('key={!r} must be bytes or None'.format(key)) if not msgs: raise ValueError("msgs must be a non-empty sequence") msg_cnt = len(msgs) byte_cnt = 0 for index, m in enumerate(msgs): if m is None: continue if not isinstance(m, bytes): raise TypeError('Message {} to topic {} ({!r:.100}) has type {}, but must have type {}'.format( index, topic, m, type(m).__name__, type(bytes).__name__)) byte_cnt += len(m) except Exception: return fail() d = Deferred(self._cancel_send_messages) self._batch_reqs.append(SendRequest(topic, key, msgs, d)) self._waitingMsgCount += msg_cnt self._waitingByteCount += byte_cnt # Add request to list of outstanding reqs' callback to remove self._outstanding.append(d) d.addBoth(self._remove_from_outstanding, d) # See if we have enough messages in the batch to do a send. self._check_send_batch() return d
[ "def", "send_messages", "(", "self", ",", "topic", ",", "key", "=", "None", ",", "msgs", "=", "(", ")", ")", ":", "try", ":", "topic", "=", "_coerce_topic", "(", "topic", ")", "if", "key", "is", "not", "None", "and", "not", "isinstance", "(", "key", ",", "bytes", ")", ":", "raise", "TypeError", "(", "'key={!r} must be bytes or None'", ".", "format", "(", "key", ")", ")", "if", "not", "msgs", ":", "raise", "ValueError", "(", "\"msgs must be a non-empty sequence\"", ")", "msg_cnt", "=", "len", "(", "msgs", ")", "byte_cnt", "=", "0", "for", "index", ",", "m", "in", "enumerate", "(", "msgs", ")", ":", "if", "m", "is", "None", ":", "continue", "if", "not", "isinstance", "(", "m", ",", "bytes", ")", ":", "raise", "TypeError", "(", "'Message {} to topic {} ({!r:.100}) has type {}, but must have type {}'", ".", "format", "(", "index", ",", "topic", ",", "m", ",", "type", "(", "m", ")", ".", "__name__", ",", "type", "(", "bytes", ")", ".", "__name__", ")", ")", "byte_cnt", "+=", "len", "(", "m", ")", "except", "Exception", ":", "return", "fail", "(", ")", "d", "=", "Deferred", "(", "self", ".", "_cancel_send_messages", ")", "self", ".", "_batch_reqs", ".", "append", "(", "SendRequest", "(", "topic", ",", "key", ",", "msgs", ",", "d", ")", ")", "self", ".", "_waitingMsgCount", "+=", "msg_cnt", "self", ".", "_waitingByteCount", "+=", "byte_cnt", "# Add request to list of outstanding reqs' callback to remove", "self", ".", "_outstanding", ".", "append", "(", "d", ")", "d", ".", "addBoth", "(", "self", ".", "_remove_from_outstanding", ",", "d", ")", "# See if we have enough messages in the batch to do a send.", "self", ".", "_check_send_batch", "(", ")", "return", "d" ]
Given a topic, and optional key (for partitioning) and a list of messages, send them to Kafka, either immediately, or when a batch is ready, depending on the Producer's batch settings. :param str topic: Kafka topic to send the messages to :param str key: Message key used to determine the topic partition to which the messages will be written. Either `bytes` or `None`. `None` means that there is no key, but note that that: - Kafka does not permit producing unkeyed messages to a compacted topic. - The *partitioner_class* may require a non-`None` key (`HashedPartitioner` does so). :param list msgs: A non-empty sequence of message bytestrings to send. `None` indicates a ``null`` message (i.e. a tombstone on a compacted topic). :returns: A :class:`~twisted.internet.defer.Deferred` that fires when the messages have been received by the Kafka cluster. It will fail with `TypeError` when: - *topic* is not text (`str` on Python 3, `str` or `unicode` on Python 2) - *key* is not `bytes` or `None` - *msgs* is not a sequence of `bytes` or `None` It will fail with `ValueError` when *msgs* is empty.
[ "Given", "a", "topic", "and", "optional", "key", "(", "for", "partitioning", ")", "and", "a", "list", "of", "messages", "send", "them", "to", "Kafka", "either", "immediately", "or", "when", "a", "batch", "is", "ready", "depending", "on", "the", "Producer", "s", "batch", "settings", "." ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/producer.py#L166-L233
ciena/afkak
afkak/producer.py
Producer.stop
def stop(self): """ Terminate any outstanding requests. :returns: :class:``Deferred` which fires when fully stopped. """ self.stopping = True # Cancel any outstanding request to our client if self._batch_send_d: self._batch_send_d.cancel() # Do we have to worry about our looping call? if self.batch_every_t is not None: # Stop our looping call, and wait for the deferred to be called if self._sendLooper is not None: self._sendLooper.stop() # Make sure requests that wasn't cancelled above are now self._cancel_outstanding() return self._sendLooperD or succeed(None)
python
def stop(self): """ Terminate any outstanding requests. :returns: :class:``Deferred` which fires when fully stopped. """ self.stopping = True # Cancel any outstanding request to our client if self._batch_send_d: self._batch_send_d.cancel() # Do we have to worry about our looping call? if self.batch_every_t is not None: # Stop our looping call, and wait for the deferred to be called if self._sendLooper is not None: self._sendLooper.stop() # Make sure requests that wasn't cancelled above are now self._cancel_outstanding() return self._sendLooperD or succeed(None)
[ "def", "stop", "(", "self", ")", ":", "self", ".", "stopping", "=", "True", "# Cancel any outstanding request to our client", "if", "self", ".", "_batch_send_d", ":", "self", ".", "_batch_send_d", ".", "cancel", "(", ")", "# Do we have to worry about our looping call?", "if", "self", ".", "batch_every_t", "is", "not", "None", ":", "# Stop our looping call, and wait for the deferred to be called", "if", "self", ".", "_sendLooper", "is", "not", "None", ":", "self", ".", "_sendLooper", ".", "stop", "(", ")", "# Make sure requests that wasn't cancelled above are now", "self", ".", "_cancel_outstanding", "(", ")", "return", "self", ".", "_sendLooperD", "or", "succeed", "(", "None", ")" ]
Terminate any outstanding requests. :returns: :class:``Deferred` which fires when fully stopped.
[ "Terminate", "any", "outstanding", "requests", "." ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/producer.py#L235-L252
ciena/afkak
afkak/producer.py
Producer._send_timer_failed
def _send_timer_failed(self, fail): """ Our _send_batch() function called by the LoopingCall failed. Some error probably came back from Kafka and _check_error() raised the exception For now, just log the failure and restart the loop """ log.warning('_send_timer_failed:%r: %s', fail, fail.getBriefTraceback()) self._sendLooperD = self._sendLooper.start( self.batch_every_t, now=False)
python
def _send_timer_failed(self, fail): """ Our _send_batch() function called by the LoopingCall failed. Some error probably came back from Kafka and _check_error() raised the exception For now, just log the failure and restart the loop """ log.warning('_send_timer_failed:%r: %s', fail, fail.getBriefTraceback()) self._sendLooperD = self._sendLooper.start( self.batch_every_t, now=False)
[ "def", "_send_timer_failed", "(", "self", ",", "fail", ")", ":", "log", ".", "warning", "(", "'_send_timer_failed:%r: %s'", ",", "fail", ",", "fail", ".", "getBriefTraceback", "(", ")", ")", "self", ".", "_sendLooperD", "=", "self", ".", "_sendLooper", ".", "start", "(", "self", ".", "batch_every_t", ",", "now", "=", "False", ")" ]
Our _send_batch() function called by the LoopingCall failed. Some error probably came back from Kafka and _check_error() raised the exception For now, just log the failure and restart the loop
[ "Our", "_send_batch", "()", "function", "called", "by", "the", "LoopingCall", "failed", ".", "Some", "error", "probably", "came", "back", "from", "Kafka", "and", "_check_error", "()", "raised", "the", "exception", "For", "now", "just", "log", "the", "failure", "and", "restart", "the", "loop" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/producer.py#L256-L266
ciena/afkak
afkak/producer.py
Producer._send_timer_stopped
def _send_timer_stopped(self, lCall): """ We're shutting down, clean up our looping call... """ if self._sendLooper is not lCall: log.warning('commitTimerStopped with wrong timer:%s not:%s', lCall, self._sendLooper) else: self._sendLooper = None self._sendLooperD = None
python
def _send_timer_stopped(self, lCall): """ We're shutting down, clean up our looping call... """ if self._sendLooper is not lCall: log.warning('commitTimerStopped with wrong timer:%s not:%s', lCall, self._sendLooper) else: self._sendLooper = None self._sendLooperD = None
[ "def", "_send_timer_stopped", "(", "self", ",", "lCall", ")", ":", "if", "self", ".", "_sendLooper", "is", "not", "lCall", ":", "log", ".", "warning", "(", "'commitTimerStopped with wrong timer:%s not:%s'", ",", "lCall", ",", "self", ".", "_sendLooper", ")", "else", ":", "self", ".", "_sendLooper", "=", "None", "self", ".", "_sendLooperD", "=", "None" ]
We're shutting down, clean up our looping call...
[ "We", "re", "shutting", "down", "clean", "up", "our", "looping", "call", "..." ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/producer.py#L268-L277
ciena/afkak
afkak/producer.py
Producer._next_partition
def _next_partition(self, topic, key=None): """get the next partition to which to publish Check with our client for the latest partitions for the topic, then ask our partitioner for the next partition to which we should publish for the give key. If needed, create a new partitioner for the topic. """ # check if the client has metadata for the topic while self.client.metadata_error_for_topic(topic): # client doesn't have good metadata for topic. ask to fetch... # check if we have request attempts left if self._req_attempts >= self._max_attempts: # No, no attempts left, so raise the error _check_error(self.client.metadata_error_for_topic(topic)) yield self.client.load_metadata_for_topics(topic) if not self.client.metadata_error_for_topic(topic): break self._req_attempts += 1 d = Deferred() self.client.reactor.callLater( self._retry_interval, d.callback, True) self._retry_interval *= self.RETRY_INTERVAL_FACTOR yield d # Ok, should be safe to get the partitions now... partitions = self.client.topic_partitions[topic] # Do we have a partitioner for this topic already? if topic not in self.partitioners: # No, create a new paritioner for topic, partitions self.partitioners[topic] = \ self.partitioner_class(topic, partitions) # Lookup the next partition partition = self.partitioners[topic].partition(key, partitions) returnValue(partition)
python
def _next_partition(self, topic, key=None): """get the next partition to which to publish Check with our client for the latest partitions for the topic, then ask our partitioner for the next partition to which we should publish for the give key. If needed, create a new partitioner for the topic. """ # check if the client has metadata for the topic while self.client.metadata_error_for_topic(topic): # client doesn't have good metadata for topic. ask to fetch... # check if we have request attempts left if self._req_attempts >= self._max_attempts: # No, no attempts left, so raise the error _check_error(self.client.metadata_error_for_topic(topic)) yield self.client.load_metadata_for_topics(topic) if not self.client.metadata_error_for_topic(topic): break self._req_attempts += 1 d = Deferred() self.client.reactor.callLater( self._retry_interval, d.callback, True) self._retry_interval *= self.RETRY_INTERVAL_FACTOR yield d # Ok, should be safe to get the partitions now... partitions = self.client.topic_partitions[topic] # Do we have a partitioner for this topic already? if topic not in self.partitioners: # No, create a new paritioner for topic, partitions self.partitioners[topic] = \ self.partitioner_class(topic, partitions) # Lookup the next partition partition = self.partitioners[topic].partition(key, partitions) returnValue(partition)
[ "def", "_next_partition", "(", "self", ",", "topic", ",", "key", "=", "None", ")", ":", "# check if the client has metadata for the topic", "while", "self", ".", "client", ".", "metadata_error_for_topic", "(", "topic", ")", ":", "# client doesn't have good metadata for topic. ask to fetch...", "# check if we have request attempts left", "if", "self", ".", "_req_attempts", ">=", "self", ".", "_max_attempts", ":", "# No, no attempts left, so raise the error", "_check_error", "(", "self", ".", "client", ".", "metadata_error_for_topic", "(", "topic", ")", ")", "yield", "self", ".", "client", ".", "load_metadata_for_topics", "(", "topic", ")", "if", "not", "self", ".", "client", ".", "metadata_error_for_topic", "(", "topic", ")", ":", "break", "self", ".", "_req_attempts", "+=", "1", "d", "=", "Deferred", "(", ")", "self", ".", "client", ".", "reactor", ".", "callLater", "(", "self", ".", "_retry_interval", ",", "d", ".", "callback", ",", "True", ")", "self", ".", "_retry_interval", "*=", "self", ".", "RETRY_INTERVAL_FACTOR", "yield", "d", "# Ok, should be safe to get the partitions now...", "partitions", "=", "self", ".", "client", ".", "topic_partitions", "[", "topic", "]", "# Do we have a partitioner for this topic already?", "if", "topic", "not", "in", "self", ".", "partitioners", ":", "# No, create a new paritioner for topic, partitions", "self", ".", "partitioners", "[", "topic", "]", "=", "self", ".", "partitioner_class", "(", "topic", ",", "partitions", ")", "# Lookup the next partition", "partition", "=", "self", ".", "partitioners", "[", "topic", "]", ".", "partition", "(", "key", ",", "partitions", ")", "returnValue", "(", "partition", ")" ]
get the next partition to which to publish Check with our client for the latest partitions for the topic, then ask our partitioner for the next partition to which we should publish for the give key. If needed, create a new partitioner for the topic.
[ "get", "the", "next", "partition", "to", "which", "to", "publish" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/producer.py#L280-L313
ciena/afkak
afkak/producer.py
Producer._send_requests
def _send_requests(self, parts_results, requests): """Send the requests We've determined the partition for each message group in the batch, or got errors for them. """ # We use these dictionaries to be able to combine all the messages # destined to the same topic/partition into one request # the messages & deferreds, both by topic+partition reqsByTopicPart = defaultdict(list) payloadsByTopicPart = defaultdict(list) deferredsByTopicPart = defaultdict(list) # We now have a list of (succeeded/failed, partition/None) tuples # for the partition lookups we did on each message group, zipped with # the requests for (success, part_or_failure), req in zip(parts_results, requests): if req.deferred.called: # Submitter cancelled the request while we were waiting for # the topic/partition, skip it continue if not success: # We failed to get a partition for this request, errback to the # caller with the failure. Maybe this should retry? However, # since this failure is likely to affect an entire Topic, there # should be no issues with ordering of messages within a # partition of a topic getting out of order. Let the caller # retry the particular request if they like, or they could # cancel all their outstanding requests in req.deferred.errback(part_or_failure) continue # Ok, we now have a partition for this request, we can add the # request for this topic/partition to reqsByTopicPart, and the # caller's deferred to deferredsByTopicPart topicPart = TopicAndPartition(req.topic, part_or_failure) reqsByTopicPart[topicPart].append(req) deferredsByTopicPart[topicPart].append(req.deferred) # Build list of payloads grouped by topic/partition # That is, we bundle all the messages destined for a given # topic/partition, even if they were submitted by different # requests into a single 'payload', and then we submit all the # payloads as a list to the client for sending to the various # brokers. The finest granularity of success/failure is at the # payload (topic/partition) level. payloads = [] for (topic, partition), reqs in reqsByTopicPart.items(): msgSet = create_message_set(reqs, self.codec) req = ProduceRequest(topic, partition, msgSet) topicPart = TopicAndPartition(topic, partition) payloads.append(req) payloadsByTopicPart[topicPart] = req # Make sure we have some payloads to send if not payloads: return # send the request d = self.client.send_produce_request( payloads, acks=self.req_acks, timeout=self.ack_timeout, fail_on_error=False) self._req_attempts += 1 # add our handlers d.addBoth(self._handle_send_response, payloadsByTopicPart, deferredsByTopicPart) return d
python
def _send_requests(self, parts_results, requests): """Send the requests We've determined the partition for each message group in the batch, or got errors for them. """ # We use these dictionaries to be able to combine all the messages # destined to the same topic/partition into one request # the messages & deferreds, both by topic+partition reqsByTopicPart = defaultdict(list) payloadsByTopicPart = defaultdict(list) deferredsByTopicPart = defaultdict(list) # We now have a list of (succeeded/failed, partition/None) tuples # for the partition lookups we did on each message group, zipped with # the requests for (success, part_or_failure), req in zip(parts_results, requests): if req.deferred.called: # Submitter cancelled the request while we were waiting for # the topic/partition, skip it continue if not success: # We failed to get a partition for this request, errback to the # caller with the failure. Maybe this should retry? However, # since this failure is likely to affect an entire Topic, there # should be no issues with ordering of messages within a # partition of a topic getting out of order. Let the caller # retry the particular request if they like, or they could # cancel all their outstanding requests in req.deferred.errback(part_or_failure) continue # Ok, we now have a partition for this request, we can add the # request for this topic/partition to reqsByTopicPart, and the # caller's deferred to deferredsByTopicPart topicPart = TopicAndPartition(req.topic, part_or_failure) reqsByTopicPart[topicPart].append(req) deferredsByTopicPart[topicPart].append(req.deferred) # Build list of payloads grouped by topic/partition # That is, we bundle all the messages destined for a given # topic/partition, even if they were submitted by different # requests into a single 'payload', and then we submit all the # payloads as a list to the client for sending to the various # brokers. The finest granularity of success/failure is at the # payload (topic/partition) level. payloads = [] for (topic, partition), reqs in reqsByTopicPart.items(): msgSet = create_message_set(reqs, self.codec) req = ProduceRequest(topic, partition, msgSet) topicPart = TopicAndPartition(topic, partition) payloads.append(req) payloadsByTopicPart[topicPart] = req # Make sure we have some payloads to send if not payloads: return # send the request d = self.client.send_produce_request( payloads, acks=self.req_acks, timeout=self.ack_timeout, fail_on_error=False) self._req_attempts += 1 # add our handlers d.addBoth(self._handle_send_response, payloadsByTopicPart, deferredsByTopicPart) return d
[ "def", "_send_requests", "(", "self", ",", "parts_results", ",", "requests", ")", ":", "# We use these dictionaries to be able to combine all the messages", "# destined to the same topic/partition into one request", "# the messages & deferreds, both by topic+partition", "reqsByTopicPart", "=", "defaultdict", "(", "list", ")", "payloadsByTopicPart", "=", "defaultdict", "(", "list", ")", "deferredsByTopicPart", "=", "defaultdict", "(", "list", ")", "# We now have a list of (succeeded/failed, partition/None) tuples", "# for the partition lookups we did on each message group, zipped with", "# the requests", "for", "(", "success", ",", "part_or_failure", ")", ",", "req", "in", "zip", "(", "parts_results", ",", "requests", ")", ":", "if", "req", ".", "deferred", ".", "called", ":", "# Submitter cancelled the request while we were waiting for", "# the topic/partition, skip it", "continue", "if", "not", "success", ":", "# We failed to get a partition for this request, errback to the", "# caller with the failure. Maybe this should retry? However,", "# since this failure is likely to affect an entire Topic, there", "# should be no issues with ordering of messages within a", "# partition of a topic getting out of order. Let the caller", "# retry the particular request if they like, or they could", "# cancel all their outstanding requests in", "req", ".", "deferred", ".", "errback", "(", "part_or_failure", ")", "continue", "# Ok, we now have a partition for this request, we can add the", "# request for this topic/partition to reqsByTopicPart, and the", "# caller's deferred to deferredsByTopicPart", "topicPart", "=", "TopicAndPartition", "(", "req", ".", "topic", ",", "part_or_failure", ")", "reqsByTopicPart", "[", "topicPart", "]", ".", "append", "(", "req", ")", "deferredsByTopicPart", "[", "topicPart", "]", ".", "append", "(", "req", ".", "deferred", ")", "# Build list of payloads grouped by topic/partition", "# That is, we bundle all the messages destined for a given", "# topic/partition, even if they were submitted by different", "# requests into a single 'payload', and then we submit all the", "# payloads as a list to the client for sending to the various", "# brokers. The finest granularity of success/failure is at the", "# payload (topic/partition) level.", "payloads", "=", "[", "]", "for", "(", "topic", ",", "partition", ")", ",", "reqs", "in", "reqsByTopicPart", ".", "items", "(", ")", ":", "msgSet", "=", "create_message_set", "(", "reqs", ",", "self", ".", "codec", ")", "req", "=", "ProduceRequest", "(", "topic", ",", "partition", ",", "msgSet", ")", "topicPart", "=", "TopicAndPartition", "(", "topic", ",", "partition", ")", "payloads", ".", "append", "(", "req", ")", "payloadsByTopicPart", "[", "topicPart", "]", "=", "req", "# Make sure we have some payloads to send", "if", "not", "payloads", ":", "return", "# send the request", "d", "=", "self", ".", "client", ".", "send_produce_request", "(", "payloads", ",", "acks", "=", "self", ".", "req_acks", ",", "timeout", "=", "self", ".", "ack_timeout", ",", "fail_on_error", "=", "False", ")", "self", ".", "_req_attempts", "+=", "1", "# add our handlers", "d", ".", "addBoth", "(", "self", ".", "_handle_send_response", ",", "payloadsByTopicPart", ",", "deferredsByTopicPart", ")", "return", "d" ]
Send the requests We've determined the partition for each message group in the batch, or got errors for them.
[ "Send", "the", "requests" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/producer.py#L315-L378
ciena/afkak
afkak/producer.py
Producer._complete_batch_send
def _complete_batch_send(self, resp): """Complete the processing of our batch send operation Clear the deferred tracking our current batch processing and reset our retry count and retry interval Return none to eat any errors coming from up the deferred chain """ self._batch_send_d = None self._req_attempts = 0 self._retry_interval = self._init_retry_interval if isinstance(resp, Failure) and not resp.check(tid_CancelledError, CancelledError): log.error("Failure detected in _complete_batch_send: %r\n%r", resp, resp.getTraceback()) return
python
def _complete_batch_send(self, resp): """Complete the processing of our batch send operation Clear the deferred tracking our current batch processing and reset our retry count and retry interval Return none to eat any errors coming from up the deferred chain """ self._batch_send_d = None self._req_attempts = 0 self._retry_interval = self._init_retry_interval if isinstance(resp, Failure) and not resp.check(tid_CancelledError, CancelledError): log.error("Failure detected in _complete_batch_send: %r\n%r", resp, resp.getTraceback()) return
[ "def", "_complete_batch_send", "(", "self", ",", "resp", ")", ":", "self", ".", "_batch_send_d", "=", "None", "self", ".", "_req_attempts", "=", "0", "self", ".", "_retry_interval", "=", "self", ".", "_init_retry_interval", "if", "isinstance", "(", "resp", ",", "Failure", ")", "and", "not", "resp", ".", "check", "(", "tid_CancelledError", ",", "CancelledError", ")", ":", "log", ".", "error", "(", "\"Failure detected in _complete_batch_send: %r\\n%r\"", ",", "resp", ",", "resp", ".", "getTraceback", "(", ")", ")", "return" ]
Complete the processing of our batch send operation Clear the deferred tracking our current batch processing and reset our retry count and retry interval Return none to eat any errors coming from up the deferred chain
[ "Complete", "the", "processing", "of", "our", "batch", "send", "operation" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/producer.py#L380-L394
ciena/afkak
afkak/producer.py
Producer._check_send_batch
def _check_send_batch(self, result=None): """Check if we have enough messages/bytes to send Since this can be called from the callback chain, we pass through our first (non-self) arg """ if ( (self.batch_every_n and self.batch_every_n <= self._waitingMsgCount) or (self.batch_every_b and self.batch_every_b <= self._waitingByteCount) ): self._send_batch() return result
python
def _check_send_batch(self, result=None): """Check if we have enough messages/bytes to send Since this can be called from the callback chain, we pass through our first (non-self) arg """ if ( (self.batch_every_n and self.batch_every_n <= self._waitingMsgCount) or (self.batch_every_b and self.batch_every_b <= self._waitingByteCount) ): self._send_batch() return result
[ "def", "_check_send_batch", "(", "self", ",", "result", "=", "None", ")", ":", "if", "(", "(", "self", ".", "batch_every_n", "and", "self", ".", "batch_every_n", "<=", "self", ".", "_waitingMsgCount", ")", "or", "(", "self", ".", "batch_every_b", "and", "self", ".", "batch_every_b", "<=", "self", ".", "_waitingByteCount", ")", ")", ":", "self", ".", "_send_batch", "(", ")", "return", "result" ]
Check if we have enough messages/bytes to send Since this can be called from the callback chain, we pass through our first (non-self) arg
[ "Check", "if", "we", "have", "enough", "messages", "/", "bytes", "to", "send", "Since", "this", "can", "be", "called", "from", "the", "callback", "chain", "we", "pass", "through", "our", "first", "(", "non", "-", "self", ")", "arg" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/producer.py#L396-L406
ciena/afkak
afkak/producer.py
Producer._send_batch
def _send_batch(self): """ Send the waiting messages, if there are any, and we can... This is called by our LoopingCall every send_every_t interval, and from send_messages everytime we have enough messages to send. This is also called from py:method:`send_messages` via py:method:`_check_send_batch` if there are enough messages/bytes to require a send. Note, the send will be delayed (triggered by completion or failure of previous) if we are currently trying to complete the last batch send. """ # We can be triggered by the LoopingCall, and have nothing to send... # Or, we've got SendRequest(s) to send, but are still processing the # previous batch... if (not self._batch_reqs) or self._batch_send_d: return # Save a local copy, and clear the global list & metrics requests, self._batch_reqs = self._batch_reqs, [] self._waitingByteCount = 0 self._waitingMsgCount = 0 # Iterate over them, fetching the partition for each message batch d_list = [] for req in requests: # For each request, we get the topic & key and use that to lookup # the next partition on which we should produce d_list.append(self._next_partition(req.topic, req.key)) d = self._batch_send_d = Deferred() # Since DeferredList doesn't propagate cancel() calls to deferreds it # might be waiting on for a result, we need to use this structure, # rather than just using the DeferredList directly d.addCallback(lambda r: DeferredList(d_list, consumeErrors=True)) d.addCallback(self._send_requests, requests) # Once we finish fully processing the current batch, clear the # _batch_send_d and check if any more requests piled up when we # were busy. d.addBoth(self._complete_batch_send) d.addBoth(self._check_send_batch) # Fire off the callback to start processing... d.callback(None)
python
def _send_batch(self): """ Send the waiting messages, if there are any, and we can... This is called by our LoopingCall every send_every_t interval, and from send_messages everytime we have enough messages to send. This is also called from py:method:`send_messages` via py:method:`_check_send_batch` if there are enough messages/bytes to require a send. Note, the send will be delayed (triggered by completion or failure of previous) if we are currently trying to complete the last batch send. """ # We can be triggered by the LoopingCall, and have nothing to send... # Or, we've got SendRequest(s) to send, but are still processing the # previous batch... if (not self._batch_reqs) or self._batch_send_d: return # Save a local copy, and clear the global list & metrics requests, self._batch_reqs = self._batch_reqs, [] self._waitingByteCount = 0 self._waitingMsgCount = 0 # Iterate over them, fetching the partition for each message batch d_list = [] for req in requests: # For each request, we get the topic & key and use that to lookup # the next partition on which we should produce d_list.append(self._next_partition(req.topic, req.key)) d = self._batch_send_d = Deferred() # Since DeferredList doesn't propagate cancel() calls to deferreds it # might be waiting on for a result, we need to use this structure, # rather than just using the DeferredList directly d.addCallback(lambda r: DeferredList(d_list, consumeErrors=True)) d.addCallback(self._send_requests, requests) # Once we finish fully processing the current batch, clear the # _batch_send_d and check if any more requests piled up when we # were busy. d.addBoth(self._complete_batch_send) d.addBoth(self._check_send_batch) # Fire off the callback to start processing... d.callback(None)
[ "def", "_send_batch", "(", "self", ")", ":", "# We can be triggered by the LoopingCall, and have nothing to send...", "# Or, we've got SendRequest(s) to send, but are still processing the", "# previous batch...", "if", "(", "not", "self", ".", "_batch_reqs", ")", "or", "self", ".", "_batch_send_d", ":", "return", "# Save a local copy, and clear the global list & metrics", "requests", ",", "self", ".", "_batch_reqs", "=", "self", ".", "_batch_reqs", ",", "[", "]", "self", ".", "_waitingByteCount", "=", "0", "self", ".", "_waitingMsgCount", "=", "0", "# Iterate over them, fetching the partition for each message batch", "d_list", "=", "[", "]", "for", "req", "in", "requests", ":", "# For each request, we get the topic & key and use that to lookup", "# the next partition on which we should produce", "d_list", ".", "append", "(", "self", ".", "_next_partition", "(", "req", ".", "topic", ",", "req", ".", "key", ")", ")", "d", "=", "self", ".", "_batch_send_d", "=", "Deferred", "(", ")", "# Since DeferredList doesn't propagate cancel() calls to deferreds it", "# might be waiting on for a result, we need to use this structure,", "# rather than just using the DeferredList directly", "d", ".", "addCallback", "(", "lambda", "r", ":", "DeferredList", "(", "d_list", ",", "consumeErrors", "=", "True", ")", ")", "d", ".", "addCallback", "(", "self", ".", "_send_requests", ",", "requests", ")", "# Once we finish fully processing the current batch, clear the", "# _batch_send_d and check if any more requests piled up when we", "# were busy.", "d", ".", "addBoth", "(", "self", ".", "_complete_batch_send", ")", "d", ".", "addBoth", "(", "self", ".", "_check_send_batch", ")", "# Fire off the callback to start processing...", "d", ".", "callback", "(", "None", ")" ]
Send the waiting messages, if there are any, and we can... This is called by our LoopingCall every send_every_t interval, and from send_messages everytime we have enough messages to send. This is also called from py:method:`send_messages` via py:method:`_check_send_batch` if there are enough messages/bytes to require a send. Note, the send will be delayed (triggered by completion or failure of previous) if we are currently trying to complete the last batch send.
[ "Send", "the", "waiting", "messages", "if", "there", "are", "any", "and", "we", "can", "..." ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/producer.py#L408-L449
ciena/afkak
afkak/producer.py
Producer._cancel_send_messages
def _cancel_send_messages(self, d): """Cancel a `send_messages` request First check if the request is in a waiting batch, of so, great, remove it from the batch. If it's not found, we errback() the deferred and the downstream processing steps take care of aborting further processing. We check if there's a current _batch_send_d to determine where in the chain we were (getting partitions, or already sent request to Kafka) and errback differently. """ # Is the request in question in an unsent batch? for req in self._batch_reqs: if req.deferred == d: # Found the request, remove it and return. msgs = req.messages self._waitingMsgCount -= len(msgs) for m in (_m for _m in msgs if _m is not None): self._waitingByteCount -= len(m) # This _should_ be safe as we abort the iteration upon removal self._batch_reqs.remove(req) d.errback(CancelledError(request_sent=False)) return # If it wasn't found in the unsent batch. We just rely on the # downstream processing of the request to check if the deferred # has been called and skip further processing for this request # Errback the deferred with whether or not we sent the request # to Kafka already d.errback( CancelledError(request_sent=(self._batch_send_d is not None))) return
python
def _cancel_send_messages(self, d): """Cancel a `send_messages` request First check if the request is in a waiting batch, of so, great, remove it from the batch. If it's not found, we errback() the deferred and the downstream processing steps take care of aborting further processing. We check if there's a current _batch_send_d to determine where in the chain we were (getting partitions, or already sent request to Kafka) and errback differently. """ # Is the request in question in an unsent batch? for req in self._batch_reqs: if req.deferred == d: # Found the request, remove it and return. msgs = req.messages self._waitingMsgCount -= len(msgs) for m in (_m for _m in msgs if _m is not None): self._waitingByteCount -= len(m) # This _should_ be safe as we abort the iteration upon removal self._batch_reqs.remove(req) d.errback(CancelledError(request_sent=False)) return # If it wasn't found in the unsent batch. We just rely on the # downstream processing of the request to check if the deferred # has been called and skip further processing for this request # Errback the deferred with whether or not we sent the request # to Kafka already d.errback( CancelledError(request_sent=(self._batch_send_d is not None))) return
[ "def", "_cancel_send_messages", "(", "self", ",", "d", ")", ":", "# Is the request in question in an unsent batch?", "for", "req", "in", "self", ".", "_batch_reqs", ":", "if", "req", ".", "deferred", "==", "d", ":", "# Found the request, remove it and return.", "msgs", "=", "req", ".", "messages", "self", ".", "_waitingMsgCount", "-=", "len", "(", "msgs", ")", "for", "m", "in", "(", "_m", "for", "_m", "in", "msgs", "if", "_m", "is", "not", "None", ")", ":", "self", ".", "_waitingByteCount", "-=", "len", "(", "m", ")", "# This _should_ be safe as we abort the iteration upon removal", "self", ".", "_batch_reqs", ".", "remove", "(", "req", ")", "d", ".", "errback", "(", "CancelledError", "(", "request_sent", "=", "False", ")", ")", "return", "# If it wasn't found in the unsent batch. We just rely on the", "# downstream processing of the request to check if the deferred", "# has been called and skip further processing for this request", "# Errback the deferred with whether or not we sent the request", "# to Kafka already", "d", ".", "errback", "(", "CancelledError", "(", "request_sent", "=", "(", "self", ".", "_batch_send_d", "is", "not", "None", ")", ")", ")", "return" ]
Cancel a `send_messages` request First check if the request is in a waiting batch, of so, great, remove it from the batch. If it's not found, we errback() the deferred and the downstream processing steps take care of aborting further processing. We check if there's a current _batch_send_d to determine where in the chain we were (getting partitions, or already sent request to Kafka) and errback differently.
[ "Cancel", "a", "send_messages", "request", "First", "check", "if", "the", "request", "is", "in", "a", "waiting", "batch", "of", "so", "great", "remove", "it", "from", "the", "batch", ".", "If", "it", "s", "not", "found", "we", "errback", "()", "the", "deferred", "and", "the", "downstream", "processing", "steps", "take", "care", "of", "aborting", "further", "processing", ".", "We", "check", "if", "there", "s", "a", "current", "_batch_send_d", "to", "determine", "where", "in", "the", "chain", "we", "were", "(", "getting", "partitions", "or", "already", "sent", "request", "to", "Kafka", ")", "and", "errback", "differently", "." ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/producer.py#L451-L481
ciena/afkak
afkak/producer.py
Producer._handle_send_response
def _handle_send_response(self, result, payloadsByTopicPart, deferredsByTopicPart): """Handle the response from our client to our send_produce_request This is a bit complex. Failures can happen in a few ways: 1. The client sent an empty list, False, None or some similar thing as the result, but we were expecting real responses. 2. The client had a failure before it even tried sending any requests to any brokers. a. Kafka error: See if we can retry the whole request b. Non-kafka: Figure it's a programming error, fail all deferreds 3. The client sent all the requests (it's all or none) to the brokers but one or more request failed (timed out before receiving a response, or the brokerclient threw some sort of exception on send In this case, the client throws FailedPayloadsError, and attaches the responses (NOTE: some can have errors!), and the payloads where the send itself failed to the exception. 4. The client sent all the requests, all responses were received, but the Kafka broker indicated an error with servicing the request on some of the responses. """ def _deliver_result(d_list, result=None): """Possibly callback each deferred in a list with single result""" for d in d_list: if not isinstance(d, Deferred): # nested list... _deliver_result(d, result) else: # We check d.called since the request could have been # cancelled while we waited for the response if not d.called: d.callback(result) def _do_retry(payloads): # We use 'fail_on_error=False' because we want our client to # process every response that comes back from the brokers so # we can determine which requests were successful, and which # failed for retry d = self.client.send_produce_request( payloads, acks=self.req_acks, timeout=self.ack_timeout, fail_on_error=False) self._req_attempts += 1 # add our handlers d.addBoth(self._handle_send_response, payloadsByTopicPart, deferredsByTopicPart) return d def _cancel_retry(failure, dc): # Cancel the retry callLater and pass-thru the failure dc.cancel() # cancel all the top-level deferreds associated with the request _deliver_result(deferredsByTopicPart.values(), failure) return failure def _check_retry_payloads(failed_payloads_with_errs): """Check our retry count and retry after a delay or errback If we have more retries to try, create a deferred that will fire with the result of delayed retry. If not, errback the remaining deferreds with failure Params: failed_payloads - list of (payload, failure) tuples """ # Do we have retries left? if self._req_attempts >= self._max_attempts: # No, no retries left, fail each failed_payload with its # associated failure for p, f in failed_payloads_with_errs: t_and_p = TopicAndPartition(p.topic, p.partition) _deliver_result(deferredsByTopicPart[t_and_p], f) return # Retries remain! Schedule one... d = Deferred() dc = self.client.reactor.callLater( self._retry_interval, d.callback, [p for p, f in failed_payloads]) self._retry_interval *= self.RETRY_INTERVAL_FACTOR # Cancel the callLater when request is cancelled before it fires d.addErrback(_cancel_retry, dc) # Reset the topic metadata for all topics which had failed_requests # where the failures were of the kind UnknownTopicOrPartitionError # or NotLeaderForPartitionError, since those indicate our client's # metadata is out of date. reset_topics = set() for payload, e in failed_payloads: if (isinstance(e, NotLeaderForPartitionError) or isinstance(e, UnknownTopicOrPartitionError)): reset_topics.add(payload.topic) if reset_topics: self.client.reset_topic_metadata(*reset_topics) d.addCallback(_do_retry) return d # The payloads we need to retry, if we still can.. failed_payloads = [] # In the case we are sending requests without requiring acks, the # brokerclient will immediately callback() the deferred upon send with # None. In that case, we just iterate over all the deferreds in # deferredsByTopicPart and callback them with None # If we are expecting responses/acks, and we get an empty result, we # callback with a Failure of NoResponseError if not result: # Success, but no results, is that what we're expecting? if self.req_acks == PRODUCER_ACK_NOT_REQUIRED: result = None else: # We got no result, but we were expecting one? Fail everything! result = Failure(NoResponseError()) _deliver_result(deferredsByTopicPart.values(), result) return elif isinstance(result, Failure): # Failure! Was it total, or partial? if not result.check(FailedPayloadsError): # Total failure of some sort! # The client was unable to send the request at all. If it's # a KafkaError (probably Leader/Partition unavailable), retry if result.check(KafkaError): # Yep, a kafak error. Set failed_payloads, and we'll retry # them all below. Set failure for errback to callers if we # are all out of retries failure, result = result, [] # no succesful results, retry failed_payloads = [(p, failure) for p in payloadsByTopicPart.values()] else: # Was the request cancelled? if not result.check(tid_CancelledError): # Uh Oh, programming error? Log it! log.error("Unexpected failure: %r in " "_handle_send_response", result) # Cancelled, or programming error, we fail the requests _deliver_result(deferredsByTopicPart.values(), result) return else: # FailedPayloadsError: This means that some/all of the # requests to a/some brokerclients failed to send. # Pull the successful responses and the failed_payloads off # the exception and handle them below. Preserve the # FailedPayloadsError as 'failure' failure = result result = failure.value.args[0] failed_payloads = failure.value.args[1] # Do we have results? Iterate over them and if the response indicates # success, then callback the associated deferred. If the response # indicates an error, then setup that request for retry. # NOTE: In this case, each failed_payload get it's own error... for res in result: t_and_p = TopicAndPartition(res.topic, res.partition) t_and_p_err = _check_error(res, raiseException=False) if not t_and_p_err: # Success for this topic/partition d_list = deferredsByTopicPart[t_and_p] _deliver_result(d_list, res) else: p = payloadsByTopicPart[t_and_p] failed_payloads.append((p, t_and_p_err)) # Were there any failed requests to possibly retry? if failed_payloads: return _check_retry_payloads(failed_payloads) return
python
def _handle_send_response(self, result, payloadsByTopicPart, deferredsByTopicPart): """Handle the response from our client to our send_produce_request This is a bit complex. Failures can happen in a few ways: 1. The client sent an empty list, False, None or some similar thing as the result, but we were expecting real responses. 2. The client had a failure before it even tried sending any requests to any brokers. a. Kafka error: See if we can retry the whole request b. Non-kafka: Figure it's a programming error, fail all deferreds 3. The client sent all the requests (it's all or none) to the brokers but one or more request failed (timed out before receiving a response, or the brokerclient threw some sort of exception on send In this case, the client throws FailedPayloadsError, and attaches the responses (NOTE: some can have errors!), and the payloads where the send itself failed to the exception. 4. The client sent all the requests, all responses were received, but the Kafka broker indicated an error with servicing the request on some of the responses. """ def _deliver_result(d_list, result=None): """Possibly callback each deferred in a list with single result""" for d in d_list: if not isinstance(d, Deferred): # nested list... _deliver_result(d, result) else: # We check d.called since the request could have been # cancelled while we waited for the response if not d.called: d.callback(result) def _do_retry(payloads): # We use 'fail_on_error=False' because we want our client to # process every response that comes back from the brokers so # we can determine which requests were successful, and which # failed for retry d = self.client.send_produce_request( payloads, acks=self.req_acks, timeout=self.ack_timeout, fail_on_error=False) self._req_attempts += 1 # add our handlers d.addBoth(self._handle_send_response, payloadsByTopicPart, deferredsByTopicPart) return d def _cancel_retry(failure, dc): # Cancel the retry callLater and pass-thru the failure dc.cancel() # cancel all the top-level deferreds associated with the request _deliver_result(deferredsByTopicPart.values(), failure) return failure def _check_retry_payloads(failed_payloads_with_errs): """Check our retry count and retry after a delay or errback If we have more retries to try, create a deferred that will fire with the result of delayed retry. If not, errback the remaining deferreds with failure Params: failed_payloads - list of (payload, failure) tuples """ # Do we have retries left? if self._req_attempts >= self._max_attempts: # No, no retries left, fail each failed_payload with its # associated failure for p, f in failed_payloads_with_errs: t_and_p = TopicAndPartition(p.topic, p.partition) _deliver_result(deferredsByTopicPart[t_and_p], f) return # Retries remain! Schedule one... d = Deferred() dc = self.client.reactor.callLater( self._retry_interval, d.callback, [p for p, f in failed_payloads]) self._retry_interval *= self.RETRY_INTERVAL_FACTOR # Cancel the callLater when request is cancelled before it fires d.addErrback(_cancel_retry, dc) # Reset the topic metadata for all topics which had failed_requests # where the failures were of the kind UnknownTopicOrPartitionError # or NotLeaderForPartitionError, since those indicate our client's # metadata is out of date. reset_topics = set() for payload, e in failed_payloads: if (isinstance(e, NotLeaderForPartitionError) or isinstance(e, UnknownTopicOrPartitionError)): reset_topics.add(payload.topic) if reset_topics: self.client.reset_topic_metadata(*reset_topics) d.addCallback(_do_retry) return d # The payloads we need to retry, if we still can.. failed_payloads = [] # In the case we are sending requests without requiring acks, the # brokerclient will immediately callback() the deferred upon send with # None. In that case, we just iterate over all the deferreds in # deferredsByTopicPart and callback them with None # If we are expecting responses/acks, and we get an empty result, we # callback with a Failure of NoResponseError if not result: # Success, but no results, is that what we're expecting? if self.req_acks == PRODUCER_ACK_NOT_REQUIRED: result = None else: # We got no result, but we were expecting one? Fail everything! result = Failure(NoResponseError()) _deliver_result(deferredsByTopicPart.values(), result) return elif isinstance(result, Failure): # Failure! Was it total, or partial? if not result.check(FailedPayloadsError): # Total failure of some sort! # The client was unable to send the request at all. If it's # a KafkaError (probably Leader/Partition unavailable), retry if result.check(KafkaError): # Yep, a kafak error. Set failed_payloads, and we'll retry # them all below. Set failure for errback to callers if we # are all out of retries failure, result = result, [] # no succesful results, retry failed_payloads = [(p, failure) for p in payloadsByTopicPart.values()] else: # Was the request cancelled? if not result.check(tid_CancelledError): # Uh Oh, programming error? Log it! log.error("Unexpected failure: %r in " "_handle_send_response", result) # Cancelled, or programming error, we fail the requests _deliver_result(deferredsByTopicPart.values(), result) return else: # FailedPayloadsError: This means that some/all of the # requests to a/some brokerclients failed to send. # Pull the successful responses and the failed_payloads off # the exception and handle them below. Preserve the # FailedPayloadsError as 'failure' failure = result result = failure.value.args[0] failed_payloads = failure.value.args[1] # Do we have results? Iterate over them and if the response indicates # success, then callback the associated deferred. If the response # indicates an error, then setup that request for retry. # NOTE: In this case, each failed_payload get it's own error... for res in result: t_and_p = TopicAndPartition(res.topic, res.partition) t_and_p_err = _check_error(res, raiseException=False) if not t_and_p_err: # Success for this topic/partition d_list = deferredsByTopicPart[t_and_p] _deliver_result(d_list, res) else: p = payloadsByTopicPart[t_and_p] failed_payloads.append((p, t_and_p_err)) # Were there any failed requests to possibly retry? if failed_payloads: return _check_retry_payloads(failed_payloads) return
[ "def", "_handle_send_response", "(", "self", ",", "result", ",", "payloadsByTopicPart", ",", "deferredsByTopicPart", ")", ":", "def", "_deliver_result", "(", "d_list", ",", "result", "=", "None", ")", ":", "\"\"\"Possibly callback each deferred in a list with single result\"\"\"", "for", "d", "in", "d_list", ":", "if", "not", "isinstance", "(", "d", ",", "Deferred", ")", ":", "# nested list...", "_deliver_result", "(", "d", ",", "result", ")", "else", ":", "# We check d.called since the request could have been", "# cancelled while we waited for the response", "if", "not", "d", ".", "called", ":", "d", ".", "callback", "(", "result", ")", "def", "_do_retry", "(", "payloads", ")", ":", "# We use 'fail_on_error=False' because we want our client to", "# process every response that comes back from the brokers so", "# we can determine which requests were successful, and which", "# failed for retry", "d", "=", "self", ".", "client", ".", "send_produce_request", "(", "payloads", ",", "acks", "=", "self", ".", "req_acks", ",", "timeout", "=", "self", ".", "ack_timeout", ",", "fail_on_error", "=", "False", ")", "self", ".", "_req_attempts", "+=", "1", "# add our handlers", "d", ".", "addBoth", "(", "self", ".", "_handle_send_response", ",", "payloadsByTopicPart", ",", "deferredsByTopicPart", ")", "return", "d", "def", "_cancel_retry", "(", "failure", ",", "dc", ")", ":", "# Cancel the retry callLater and pass-thru the failure", "dc", ".", "cancel", "(", ")", "# cancel all the top-level deferreds associated with the request", "_deliver_result", "(", "deferredsByTopicPart", ".", "values", "(", ")", ",", "failure", ")", "return", "failure", "def", "_check_retry_payloads", "(", "failed_payloads_with_errs", ")", ":", "\"\"\"Check our retry count and retry after a delay or errback\n\n If we have more retries to try, create a deferred that will fire\n with the result of delayed retry. If not, errback the remaining\n deferreds with failure\n\n Params:\n failed_payloads - list of (payload, failure) tuples\n \"\"\"", "# Do we have retries left?", "if", "self", ".", "_req_attempts", ">=", "self", ".", "_max_attempts", ":", "# No, no retries left, fail each failed_payload with its", "# associated failure", "for", "p", ",", "f", "in", "failed_payloads_with_errs", ":", "t_and_p", "=", "TopicAndPartition", "(", "p", ".", "topic", ",", "p", ".", "partition", ")", "_deliver_result", "(", "deferredsByTopicPart", "[", "t_and_p", "]", ",", "f", ")", "return", "# Retries remain! Schedule one...", "d", "=", "Deferred", "(", ")", "dc", "=", "self", ".", "client", ".", "reactor", ".", "callLater", "(", "self", ".", "_retry_interval", ",", "d", ".", "callback", ",", "[", "p", "for", "p", ",", "f", "in", "failed_payloads", "]", ")", "self", ".", "_retry_interval", "*=", "self", ".", "RETRY_INTERVAL_FACTOR", "# Cancel the callLater when request is cancelled before it fires", "d", ".", "addErrback", "(", "_cancel_retry", ",", "dc", ")", "# Reset the topic metadata for all topics which had failed_requests", "# where the failures were of the kind UnknownTopicOrPartitionError", "# or NotLeaderForPartitionError, since those indicate our client's", "# metadata is out of date.", "reset_topics", "=", "set", "(", ")", "for", "payload", ",", "e", "in", "failed_payloads", ":", "if", "(", "isinstance", "(", "e", ",", "NotLeaderForPartitionError", ")", "or", "isinstance", "(", "e", ",", "UnknownTopicOrPartitionError", ")", ")", ":", "reset_topics", ".", "add", "(", "payload", ".", "topic", ")", "if", "reset_topics", ":", "self", ".", "client", ".", "reset_topic_metadata", "(", "*", "reset_topics", ")", "d", ".", "addCallback", "(", "_do_retry", ")", "return", "d", "# The payloads we need to retry, if we still can..", "failed_payloads", "=", "[", "]", "# In the case we are sending requests without requiring acks, the", "# brokerclient will immediately callback() the deferred upon send with", "# None. In that case, we just iterate over all the deferreds in", "# deferredsByTopicPart and callback them with None", "# If we are expecting responses/acks, and we get an empty result, we", "# callback with a Failure of NoResponseError", "if", "not", "result", ":", "# Success, but no results, is that what we're expecting?", "if", "self", ".", "req_acks", "==", "PRODUCER_ACK_NOT_REQUIRED", ":", "result", "=", "None", "else", ":", "# We got no result, but we were expecting one? Fail everything!", "result", "=", "Failure", "(", "NoResponseError", "(", ")", ")", "_deliver_result", "(", "deferredsByTopicPart", ".", "values", "(", ")", ",", "result", ")", "return", "elif", "isinstance", "(", "result", ",", "Failure", ")", ":", "# Failure! Was it total, or partial?", "if", "not", "result", ".", "check", "(", "FailedPayloadsError", ")", ":", "# Total failure of some sort!", "# The client was unable to send the request at all. If it's", "# a KafkaError (probably Leader/Partition unavailable), retry", "if", "result", ".", "check", "(", "KafkaError", ")", ":", "# Yep, a kafak error. Set failed_payloads, and we'll retry", "# them all below. Set failure for errback to callers if we", "# are all out of retries", "failure", ",", "result", "=", "result", ",", "[", "]", "# no succesful results, retry", "failed_payloads", "=", "[", "(", "p", ",", "failure", ")", "for", "p", "in", "payloadsByTopicPart", ".", "values", "(", ")", "]", "else", ":", "# Was the request cancelled?", "if", "not", "result", ".", "check", "(", "tid_CancelledError", ")", ":", "# Uh Oh, programming error? Log it!", "log", ".", "error", "(", "\"Unexpected failure: %r in \"", "\"_handle_send_response\"", ",", "result", ")", "# Cancelled, or programming error, we fail the requests", "_deliver_result", "(", "deferredsByTopicPart", ".", "values", "(", ")", ",", "result", ")", "return", "else", ":", "# FailedPayloadsError: This means that some/all of the", "# requests to a/some brokerclients failed to send.", "# Pull the successful responses and the failed_payloads off", "# the exception and handle them below. Preserve the", "# FailedPayloadsError as 'failure'", "failure", "=", "result", "result", "=", "failure", ".", "value", ".", "args", "[", "0", "]", "failed_payloads", "=", "failure", ".", "value", ".", "args", "[", "1", "]", "# Do we have results? Iterate over them and if the response indicates", "# success, then callback the associated deferred. If the response", "# indicates an error, then setup that request for retry.", "# NOTE: In this case, each failed_payload get it's own error...", "for", "res", "in", "result", ":", "t_and_p", "=", "TopicAndPartition", "(", "res", ".", "topic", ",", "res", ".", "partition", ")", "t_and_p_err", "=", "_check_error", "(", "res", ",", "raiseException", "=", "False", ")", "if", "not", "t_and_p_err", ":", "# Success for this topic/partition", "d_list", "=", "deferredsByTopicPart", "[", "t_and_p", "]", "_deliver_result", "(", "d_list", ",", "res", ")", "else", ":", "p", "=", "payloadsByTopicPart", "[", "t_and_p", "]", "failed_payloads", ".", "append", "(", "(", "p", ",", "t_and_p_err", ")", ")", "# Were there any failed requests to possibly retry?", "if", "failed_payloads", ":", "return", "_check_retry_payloads", "(", "failed_payloads", ")", "return" ]
Handle the response from our client to our send_produce_request This is a bit complex. Failures can happen in a few ways: 1. The client sent an empty list, False, None or some similar thing as the result, but we were expecting real responses. 2. The client had a failure before it even tried sending any requests to any brokers. a. Kafka error: See if we can retry the whole request b. Non-kafka: Figure it's a programming error, fail all deferreds 3. The client sent all the requests (it's all or none) to the brokers but one or more request failed (timed out before receiving a response, or the brokerclient threw some sort of exception on send In this case, the client throws FailedPayloadsError, and attaches the responses (NOTE: some can have errors!), and the payloads where the send itself failed to the exception. 4. The client sent all the requests, all responses were received, but the Kafka broker indicated an error with servicing the request on some of the responses.
[ "Handle", "the", "response", "from", "our", "client", "to", "our", "send_produce_request" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/producer.py#L483-L650
ciena/afkak
afkak/producer.py
Producer._cancel_outstanding
def _cancel_outstanding(self): """Cancel all of our outstanding requests""" for d in list(self._outstanding): d.addErrback(lambda _: None) # Eat any uncaught errors d.cancel()
python
def _cancel_outstanding(self): """Cancel all of our outstanding requests""" for d in list(self._outstanding): d.addErrback(lambda _: None) # Eat any uncaught errors d.cancel()
[ "def", "_cancel_outstanding", "(", "self", ")", ":", "for", "d", "in", "list", "(", "self", ".", "_outstanding", ")", ":", "d", ".", "addErrback", "(", "lambda", "_", ":", "None", ")", "# Eat any uncaught errors", "d", ".", "cancel", "(", ")" ]
Cancel all of our outstanding requests
[ "Cancel", "all", "of", "our", "outstanding", "requests" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/producer.py#L657-L661
ciena/afkak
afkak/client.py
_normalize_hosts
def _normalize_hosts(hosts): """ Canonicalize the *hosts* parameter. >>> _normalize_hosts("host,127.0.0.2:2909") [('127.0.0.2', 2909), ('host', 9092)] :param hosts: A list or comma-separated string of hostnames which may also include port numbers. All of the following are valid:: b'host' u'host' b'host:1234' u'host:1234,host:2345' b'host:1234 , host:2345 ' [u'host1', b'host2'] [b'host:1234', b'host:2345'] Hostnames must be ASCII (IDN is not supported). The default Kafka port of 9092 is implied when no port is given. :returns: A list of unique (host, port) tuples. :rtype: :class:`list` of (:class:`str`, :class:`int`) tuples """ if isinstance(hosts, bytes): hosts = hosts.split(b',') elif isinstance(hosts, _unicode): hosts = hosts.split(u',') result = set() for host_port in hosts: # FIXME This won't handle IPv6 addresses res = nativeString(host_port).split(':') host = res[0].strip() port = int(res[1].strip()) if len(res) > 1 else DefaultKafkaPort result.add((host, port)) return sorted(result)
python
def _normalize_hosts(hosts): """ Canonicalize the *hosts* parameter. >>> _normalize_hosts("host,127.0.0.2:2909") [('127.0.0.2', 2909), ('host', 9092)] :param hosts: A list or comma-separated string of hostnames which may also include port numbers. All of the following are valid:: b'host' u'host' b'host:1234' u'host:1234,host:2345' b'host:1234 , host:2345 ' [u'host1', b'host2'] [b'host:1234', b'host:2345'] Hostnames must be ASCII (IDN is not supported). The default Kafka port of 9092 is implied when no port is given. :returns: A list of unique (host, port) tuples. :rtype: :class:`list` of (:class:`str`, :class:`int`) tuples """ if isinstance(hosts, bytes): hosts = hosts.split(b',') elif isinstance(hosts, _unicode): hosts = hosts.split(u',') result = set() for host_port in hosts: # FIXME This won't handle IPv6 addresses res = nativeString(host_port).split(':') host = res[0].strip() port = int(res[1].strip()) if len(res) > 1 else DefaultKafkaPort result.add((host, port)) return sorted(result)
[ "def", "_normalize_hosts", "(", "hosts", ")", ":", "if", "isinstance", "(", "hosts", ",", "bytes", ")", ":", "hosts", "=", "hosts", ".", "split", "(", "b','", ")", "elif", "isinstance", "(", "hosts", ",", "_unicode", ")", ":", "hosts", "=", "hosts", ".", "split", "(", "u','", ")", "result", "=", "set", "(", ")", "for", "host_port", "in", "hosts", ":", "# FIXME This won't handle IPv6 addresses", "res", "=", "nativeString", "(", "host_port", ")", ".", "split", "(", "':'", ")", "host", "=", "res", "[", "0", "]", ".", "strip", "(", ")", "port", "=", "int", "(", "res", "[", "1", "]", ".", "strip", "(", ")", ")", "if", "len", "(", "res", ")", ">", "1", "else", "DefaultKafkaPort", "result", ".", "add", "(", "(", "host", ",", "port", ")", ")", "return", "sorted", "(", "result", ")" ]
Canonicalize the *hosts* parameter. >>> _normalize_hosts("host,127.0.0.2:2909") [('127.0.0.2', 2909), ('host', 9092)] :param hosts: A list or comma-separated string of hostnames which may also include port numbers. All of the following are valid:: b'host' u'host' b'host:1234' u'host:1234,host:2345' b'host:1234 , host:2345 ' [u'host1', b'host2'] [b'host:1234', b'host:2345'] Hostnames must be ASCII (IDN is not supported). The default Kafka port of 9092 is implied when no port is given. :returns: A list of unique (host, port) tuples. :rtype: :class:`list` of (:class:`str`, :class:`int`) tuples
[ "Canonicalize", "the", "*", "hosts", "*", "parameter", "." ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/client.py#L1083-L1120
ciena/afkak
afkak/client.py
KafkaClient.reset_consumer_group_metadata
def reset_consumer_group_metadata(self, *groups): """Reset cache of what broker manages the offset for specified groups Remove the cache of what Kafka broker should be contacted when fetching or updating the committed offsets for a given consumer group or groups. NOTE: Does not cancel any outstanding requests for updates to the consumer group metadata for the specified groups. """ groups = tuple(_coerce_consumer_group(g) for g in groups) for group in groups: if group in self.consumer_group_to_brokers: del self.consumer_group_to_brokers[group]
python
def reset_consumer_group_metadata(self, *groups): """Reset cache of what broker manages the offset for specified groups Remove the cache of what Kafka broker should be contacted when fetching or updating the committed offsets for a given consumer group or groups. NOTE: Does not cancel any outstanding requests for updates to the consumer group metadata for the specified groups. """ groups = tuple(_coerce_consumer_group(g) for g in groups) for group in groups: if group in self.consumer_group_to_brokers: del self.consumer_group_to_brokers[group]
[ "def", "reset_consumer_group_metadata", "(", "self", ",", "*", "groups", ")", ":", "groups", "=", "tuple", "(", "_coerce_consumer_group", "(", "g", ")", "for", "g", "in", "groups", ")", "for", "group", "in", "groups", ":", "if", "group", "in", "self", ".", "consumer_group_to_brokers", ":", "del", "self", ".", "consumer_group_to_brokers", "[", "group", "]" ]
Reset cache of what broker manages the offset for specified groups Remove the cache of what Kafka broker should be contacted when fetching or updating the committed offsets for a given consumer group or groups. NOTE: Does not cancel any outstanding requests for updates to the consumer group metadata for the specified groups.
[ "Reset", "cache", "of", "what", "broker", "manages", "the", "offset", "for", "specified", "groups" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/client.py#L252-L265
ciena/afkak
afkak/client.py
KafkaClient.reset_all_metadata
def reset_all_metadata(self): """Clear all cached metadata Metadata will be re-fetched as required to satisfy requests. """ self.topics_to_brokers.clear() self.topic_partitions.clear() self.topic_errors.clear() self.consumer_group_to_brokers.clear()
python
def reset_all_metadata(self): """Clear all cached metadata Metadata will be re-fetched as required to satisfy requests. """ self.topics_to_brokers.clear() self.topic_partitions.clear() self.topic_errors.clear() self.consumer_group_to_brokers.clear()
[ "def", "reset_all_metadata", "(", "self", ")", ":", "self", ".", "topics_to_brokers", ".", "clear", "(", ")", "self", ".", "topic_partitions", ".", "clear", "(", ")", "self", ".", "topic_errors", ".", "clear", "(", ")", "self", ".", "consumer_group_to_brokers", ".", "clear", "(", ")" ]
Clear all cached metadata Metadata will be re-fetched as required to satisfy requests.
[ "Clear", "all", "cached", "metadata" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/client.py#L267-L275
ciena/afkak
afkak/client.py
KafkaClient.topic_fully_replicated
def topic_fully_replicated(self, topic): """ Determine if the given topic is fully replicated according to the currently known cluster metadata. .. note:: This relies on cached cluster metadata. You may call :meth:`load_metadata_for_topics()` first to refresh this cache. :param str topic: Topic name :returns: A boolean indicating that: 1. The number of partitions in the topic is non-zero. 2. For each partition, all replicas are in the in-sync replica (ISR) set. :rtype: :class:`bool` """ topic = _coerce_topic(topic) if topic not in self.topic_partitions: return False if not self.topic_partitions[topic]: # Don't consider an empty partition list 'fully replicated' return False return all( self.partition_fully_replicated(TopicAndPartition(topic, p)) for p in self.topic_partitions[topic] )
python
def topic_fully_replicated(self, topic): """ Determine if the given topic is fully replicated according to the currently known cluster metadata. .. note:: This relies on cached cluster metadata. You may call :meth:`load_metadata_for_topics()` first to refresh this cache. :param str topic: Topic name :returns: A boolean indicating that: 1. The number of partitions in the topic is non-zero. 2. For each partition, all replicas are in the in-sync replica (ISR) set. :rtype: :class:`bool` """ topic = _coerce_topic(topic) if topic not in self.topic_partitions: return False if not self.topic_partitions[topic]: # Don't consider an empty partition list 'fully replicated' return False return all( self.partition_fully_replicated(TopicAndPartition(topic, p)) for p in self.topic_partitions[topic] )
[ "def", "topic_fully_replicated", "(", "self", ",", "topic", ")", ":", "topic", "=", "_coerce_topic", "(", "topic", ")", "if", "topic", "not", "in", "self", ".", "topic_partitions", ":", "return", "False", "if", "not", "self", ".", "topic_partitions", "[", "topic", "]", ":", "# Don't consider an empty partition list 'fully replicated'", "return", "False", "return", "all", "(", "self", ".", "partition_fully_replicated", "(", "TopicAndPartition", "(", "topic", ",", "p", ")", ")", "for", "p", "in", "self", ".", "topic_partitions", "[", "topic", "]", ")" ]
Determine if the given topic is fully replicated according to the currently known cluster metadata. .. note:: This relies on cached cluster metadata. You may call :meth:`load_metadata_for_topics()` first to refresh this cache. :param str topic: Topic name :returns: A boolean indicating that: 1. The number of partitions in the topic is non-zero. 2. For each partition, all replicas are in the in-sync replica (ISR) set. :rtype: :class:`bool`
[ "Determine", "if", "the", "given", "topic", "is", "fully", "replicated", "according", "to", "the", "currently", "known", "cluster", "metadata", "." ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/client.py#L290-L319
ciena/afkak
afkak/client.py
KafkaClient.close
def close(self): """Permanently dispose of the client - Immediately mark the client as closed, causing current operations to fail with :exc:`~afkak.common.CancelledError` and future operations to fail with :exc:`~afkak.common.ClientError`. - Clear cached metadata. - Close any connections to Kafka brokers. :returns: deferred that fires when all resources have been released """ # If we're already waiting on an/some outstanding disconnects # make sure we continue to wait for them... log.debug("%r: close", self) self._closing = True # Close down any clients we have brokerclients, self.clients = self.clients, None self._close_brokerclients(brokerclients.values()) # clean up other outstanding operations self.reset_all_metadata() return self.close_dlist or defer.succeed(None)
python
def close(self): """Permanently dispose of the client - Immediately mark the client as closed, causing current operations to fail with :exc:`~afkak.common.CancelledError` and future operations to fail with :exc:`~afkak.common.ClientError`. - Clear cached metadata. - Close any connections to Kafka brokers. :returns: deferred that fires when all resources have been released """ # If we're already waiting on an/some outstanding disconnects # make sure we continue to wait for them... log.debug("%r: close", self) self._closing = True # Close down any clients we have brokerclients, self.clients = self.clients, None self._close_brokerclients(brokerclients.values()) # clean up other outstanding operations self.reset_all_metadata() return self.close_dlist or defer.succeed(None)
[ "def", "close", "(", "self", ")", ":", "# If we're already waiting on an/some outstanding disconnects", "# make sure we continue to wait for them...", "log", ".", "debug", "(", "\"%r: close\"", ",", "self", ")", "self", ".", "_closing", "=", "True", "# Close down any clients we have", "brokerclients", ",", "self", ".", "clients", "=", "self", ".", "clients", ",", "None", "self", ".", "_close_brokerclients", "(", "brokerclients", ".", "values", "(", ")", ")", "# clean up other outstanding operations", "self", ".", "reset_all_metadata", "(", ")", "return", "self", ".", "close_dlist", "or", "defer", ".", "succeed", "(", "None", ")" ]
Permanently dispose of the client - Immediately mark the client as closed, causing current operations to fail with :exc:`~afkak.common.CancelledError` and future operations to fail with :exc:`~afkak.common.ClientError`. - Clear cached metadata. - Close any connections to Kafka brokers. :returns: deferred that fires when all resources have been released
[ "Permanently", "dispose", "of", "the", "client" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/client.py#L321-L342
ciena/afkak
afkak/client.py
KafkaClient.load_metadata_for_topics
def load_metadata_for_topics(self, *topics): """Discover topic metadata and brokers Afkak internally calls this method whenever metadata is required. :param str topics: Topic names to look up. The resulting metadata includes the list of topic partitions, brokers owning those partitions, and which partitions are in sync. Fetching metadata for a topic may trigger auto-creation if that is enabled on the Kafka broker. When no topic name is given metadata for *all* topics is fetched. This is an expensive operation, but it does not trigger topic creation. :returns: :class:`Deferred` for the completion of the metadata fetch. This will fire with ``True`` on success, ``None`` on cancellation, or fail with an exception on error. On success, topic metadata is available from the attributes of :class:`KafkaClient`: :data:`~KafkaClient.topic_partitions`, :data:`~KafkaClient.topics_to_brokers`, etc. """ topics = tuple(_coerce_topic(t) for t in topics) log.debug("%r: load_metadata_for_topics(%s)", self, ', '.join(repr(t) for t in topics)) fetch_all_metadata = not topics # create the request requestId = self._next_id() request = KafkaCodec.encode_metadata_request(self._clientIdBytes, requestId, topics) # Callbacks for the request deferred... def _handleMetadataResponse(response): # Decode the response brokers, topics = KafkaCodec.decode_metadata_response(response) log.debug("%r: got metadata brokers=%r topics=%r", self, brokers, topics) # If we fetched the metadata for all topics, then store away the # received metadata for diagnostics. if fetch_all_metadata: self._brokers = brokers self._topics = topics # Iff we were fetching for all topics, and we got at least one # broker back, then remove brokers when we update our brokers ok_to_remove = (fetch_all_metadata and len(brokers)) # Take the metadata we got back, update our self.clients, and # if needed disconnect or connect from/to old/new brokers self._update_brokers(brokers.values(), remove=ok_to_remove) # Now loop through all the topics/partitions in the response # and setup our cache/data-structures for topic, topic_metadata in topics.items(): _, topic_error, partitions = topic_metadata self.reset_topic_metadata(topic) self.topic_errors[topic] = topic_error if not partitions: log.warning('No partitions for %s, Err:%d', topic, topic_error) continue self.topic_partitions[topic] = [] for partition, meta in partitions.items(): self.topic_partitions[topic].append(partition) topic_part = TopicAndPartition(topic, partition) self.partition_meta[topic_part] = meta if meta.leader == -1: log.warning('No leader for topic %s partition %s', topic, partition) self.topics_to_brokers[topic_part] = None else: self.topics_to_brokers[ topic_part] = brokers[meta.leader] self.topic_partitions[topic] = sorted( self.topic_partitions[topic]) return True def _handleMetadataErr(err): # This should maybe do more cleanup? if err.check(t_CancelledError, CancelledError): # Eat the error # XXX Shouldn't this return False? The success branch # returns True. return None log.error("Failed to retrieve metadata:%s", err) raise KafkaUnavailableError( "Unable to load metadata from configured " "hosts: {!r}".format(err)) # Send the request, add the handlers d = self._send_broker_unaware_request(requestId, request) d.addCallbacks(_handleMetadataResponse, _handleMetadataErr) return d
python
def load_metadata_for_topics(self, *topics): """Discover topic metadata and brokers Afkak internally calls this method whenever metadata is required. :param str topics: Topic names to look up. The resulting metadata includes the list of topic partitions, brokers owning those partitions, and which partitions are in sync. Fetching metadata for a topic may trigger auto-creation if that is enabled on the Kafka broker. When no topic name is given metadata for *all* topics is fetched. This is an expensive operation, but it does not trigger topic creation. :returns: :class:`Deferred` for the completion of the metadata fetch. This will fire with ``True`` on success, ``None`` on cancellation, or fail with an exception on error. On success, topic metadata is available from the attributes of :class:`KafkaClient`: :data:`~KafkaClient.topic_partitions`, :data:`~KafkaClient.topics_to_brokers`, etc. """ topics = tuple(_coerce_topic(t) for t in topics) log.debug("%r: load_metadata_for_topics(%s)", self, ', '.join(repr(t) for t in topics)) fetch_all_metadata = not topics # create the request requestId = self._next_id() request = KafkaCodec.encode_metadata_request(self._clientIdBytes, requestId, topics) # Callbacks for the request deferred... def _handleMetadataResponse(response): # Decode the response brokers, topics = KafkaCodec.decode_metadata_response(response) log.debug("%r: got metadata brokers=%r topics=%r", self, brokers, topics) # If we fetched the metadata for all topics, then store away the # received metadata for diagnostics. if fetch_all_metadata: self._brokers = brokers self._topics = topics # Iff we were fetching for all topics, and we got at least one # broker back, then remove brokers when we update our brokers ok_to_remove = (fetch_all_metadata and len(brokers)) # Take the metadata we got back, update our self.clients, and # if needed disconnect or connect from/to old/new brokers self._update_brokers(brokers.values(), remove=ok_to_remove) # Now loop through all the topics/partitions in the response # and setup our cache/data-structures for topic, topic_metadata in topics.items(): _, topic_error, partitions = topic_metadata self.reset_topic_metadata(topic) self.topic_errors[topic] = topic_error if not partitions: log.warning('No partitions for %s, Err:%d', topic, topic_error) continue self.topic_partitions[topic] = [] for partition, meta in partitions.items(): self.topic_partitions[topic].append(partition) topic_part = TopicAndPartition(topic, partition) self.partition_meta[topic_part] = meta if meta.leader == -1: log.warning('No leader for topic %s partition %s', topic, partition) self.topics_to_brokers[topic_part] = None else: self.topics_to_brokers[ topic_part] = brokers[meta.leader] self.topic_partitions[topic] = sorted( self.topic_partitions[topic]) return True def _handleMetadataErr(err): # This should maybe do more cleanup? if err.check(t_CancelledError, CancelledError): # Eat the error # XXX Shouldn't this return False? The success branch # returns True. return None log.error("Failed to retrieve metadata:%s", err) raise KafkaUnavailableError( "Unable to load metadata from configured " "hosts: {!r}".format(err)) # Send the request, add the handlers d = self._send_broker_unaware_request(requestId, request) d.addCallbacks(_handleMetadataResponse, _handleMetadataErr) return d
[ "def", "load_metadata_for_topics", "(", "self", ",", "*", "topics", ")", ":", "topics", "=", "tuple", "(", "_coerce_topic", "(", "t", ")", "for", "t", "in", "topics", ")", "log", ".", "debug", "(", "\"%r: load_metadata_for_topics(%s)\"", ",", "self", ",", "', '", ".", "join", "(", "repr", "(", "t", ")", "for", "t", "in", "topics", ")", ")", "fetch_all_metadata", "=", "not", "topics", "# create the request", "requestId", "=", "self", ".", "_next_id", "(", ")", "request", "=", "KafkaCodec", ".", "encode_metadata_request", "(", "self", ".", "_clientIdBytes", ",", "requestId", ",", "topics", ")", "# Callbacks for the request deferred...", "def", "_handleMetadataResponse", "(", "response", ")", ":", "# Decode the response", "brokers", ",", "topics", "=", "KafkaCodec", ".", "decode_metadata_response", "(", "response", ")", "log", ".", "debug", "(", "\"%r: got metadata brokers=%r topics=%r\"", ",", "self", ",", "brokers", ",", "topics", ")", "# If we fetched the metadata for all topics, then store away the", "# received metadata for diagnostics.", "if", "fetch_all_metadata", ":", "self", ".", "_brokers", "=", "brokers", "self", ".", "_topics", "=", "topics", "# Iff we were fetching for all topics, and we got at least one", "# broker back, then remove brokers when we update our brokers", "ok_to_remove", "=", "(", "fetch_all_metadata", "and", "len", "(", "brokers", ")", ")", "# Take the metadata we got back, update our self.clients, and", "# if needed disconnect or connect from/to old/new brokers", "self", ".", "_update_brokers", "(", "brokers", ".", "values", "(", ")", ",", "remove", "=", "ok_to_remove", ")", "# Now loop through all the topics/partitions in the response", "# and setup our cache/data-structures", "for", "topic", ",", "topic_metadata", "in", "topics", ".", "items", "(", ")", ":", "_", ",", "topic_error", ",", "partitions", "=", "topic_metadata", "self", ".", "reset_topic_metadata", "(", "topic", ")", "self", ".", "topic_errors", "[", "topic", "]", "=", "topic_error", "if", "not", "partitions", ":", "log", ".", "warning", "(", "'No partitions for %s, Err:%d'", ",", "topic", ",", "topic_error", ")", "continue", "self", ".", "topic_partitions", "[", "topic", "]", "=", "[", "]", "for", "partition", ",", "meta", "in", "partitions", ".", "items", "(", ")", ":", "self", ".", "topic_partitions", "[", "topic", "]", ".", "append", "(", "partition", ")", "topic_part", "=", "TopicAndPartition", "(", "topic", ",", "partition", ")", "self", ".", "partition_meta", "[", "topic_part", "]", "=", "meta", "if", "meta", ".", "leader", "==", "-", "1", ":", "log", ".", "warning", "(", "'No leader for topic %s partition %s'", ",", "topic", ",", "partition", ")", "self", ".", "topics_to_brokers", "[", "topic_part", "]", "=", "None", "else", ":", "self", ".", "topics_to_brokers", "[", "topic_part", "]", "=", "brokers", "[", "meta", ".", "leader", "]", "self", ".", "topic_partitions", "[", "topic", "]", "=", "sorted", "(", "self", ".", "topic_partitions", "[", "topic", "]", ")", "return", "True", "def", "_handleMetadataErr", "(", "err", ")", ":", "# This should maybe do more cleanup?", "if", "err", ".", "check", "(", "t_CancelledError", ",", "CancelledError", ")", ":", "# Eat the error", "# XXX Shouldn't this return False? The success branch", "# returns True.", "return", "None", "log", ".", "error", "(", "\"Failed to retrieve metadata:%s\"", ",", "err", ")", "raise", "KafkaUnavailableError", "(", "\"Unable to load metadata from configured \"", "\"hosts: {!r}\"", ".", "format", "(", "err", ")", ")", "# Send the request, add the handlers", "d", "=", "self", ".", "_send_broker_unaware_request", "(", "requestId", ",", "request", ")", "d", ".", "addCallbacks", "(", "_handleMetadataResponse", ",", "_handleMetadataErr", ")", "return", "d" ]
Discover topic metadata and brokers Afkak internally calls this method whenever metadata is required. :param str topics: Topic names to look up. The resulting metadata includes the list of topic partitions, brokers owning those partitions, and which partitions are in sync. Fetching metadata for a topic may trigger auto-creation if that is enabled on the Kafka broker. When no topic name is given metadata for *all* topics is fetched. This is an expensive operation, but it does not trigger topic creation. :returns: :class:`Deferred` for the completion of the metadata fetch. This will fire with ``True`` on success, ``None`` on cancellation, or fail with an exception on error. On success, topic metadata is available from the attributes of :class:`KafkaClient`: :data:`~KafkaClient.topic_partitions`, :data:`~KafkaClient.topics_to_brokers`, etc.
[ "Discover", "topic", "metadata", "and", "brokers" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/client.py#L344-L440
ciena/afkak
afkak/client.py
KafkaClient.load_consumer_metadata_for_group
def load_consumer_metadata_for_group(self, group): """ Determine broker for the consumer metadata for the specified group Returns a deferred which callbacks with True if the group's coordinator could be determined, or errbacks with ConsumerCoordinatorNotAvailableError if not. Parameters ---------- group: group name as `str` """ group = _coerce_consumer_group(group) log.debug("%r: load_consumer_metadata_for_group(%r)", self, group) # If we are already loading the metadata for this group, then # just return the outstanding deferred if group in self.coordinator_fetches: d = defer.Deferred() self.coordinator_fetches[group][1].append(d) return d # No outstanding request, create a new one requestId = self._next_id() request = KafkaCodec.encode_consumermetadata_request( self._clientIdBytes, requestId, group) # Callbacks for the request deferred... def _handleConsumerMetadataResponse(response_bytes): # Decode the response (returns ConsumerMetadataResponse) response = KafkaCodec.decode_consumermetadata_response(response_bytes) log.debug("%r: load_consumer_metadata_for_group(%r) -> %r", self, group, response) if response.error: raise BrokerResponseError.errnos.get(response.error, UnknownError)(response) bm = BrokerMetadata(response.node_id, response.host, response.port) self.consumer_group_to_brokers[group] = bm self._update_brokers([bm]) return True def _handleConsumerMetadataErr(err): log.error("Failed to retrieve consumer metadata for group %r", group, exc_info=(err.type, err.value, err.getTracebackObject())) # Clear any stored value for the group's coordinator self.reset_consumer_group_metadata(group) # FIXME: This exception should chain from err. raise ConsumerCoordinatorNotAvailableError( "Coordinator for group {!r} not available".format(group), ) def _propagate(result): [_, ds] = self.coordinator_fetches.pop(group, None) for d in ds: d.callback(result) # Send the request, add the handlers request_d = self._send_broker_unaware_request(requestId, request) d = defer.Deferred() # Save the deferred under the fetches for this group self.coordinator_fetches[group] = (request_d, [d]) request_d.addCallback(_handleConsumerMetadataResponse) request_d.addErrback(_handleConsumerMetadataErr) request_d.addBoth(_propagate) return d
python
def load_consumer_metadata_for_group(self, group): """ Determine broker for the consumer metadata for the specified group Returns a deferred which callbacks with True if the group's coordinator could be determined, or errbacks with ConsumerCoordinatorNotAvailableError if not. Parameters ---------- group: group name as `str` """ group = _coerce_consumer_group(group) log.debug("%r: load_consumer_metadata_for_group(%r)", self, group) # If we are already loading the metadata for this group, then # just return the outstanding deferred if group in self.coordinator_fetches: d = defer.Deferred() self.coordinator_fetches[group][1].append(d) return d # No outstanding request, create a new one requestId = self._next_id() request = KafkaCodec.encode_consumermetadata_request( self._clientIdBytes, requestId, group) # Callbacks for the request deferred... def _handleConsumerMetadataResponse(response_bytes): # Decode the response (returns ConsumerMetadataResponse) response = KafkaCodec.decode_consumermetadata_response(response_bytes) log.debug("%r: load_consumer_metadata_for_group(%r) -> %r", self, group, response) if response.error: raise BrokerResponseError.errnos.get(response.error, UnknownError)(response) bm = BrokerMetadata(response.node_id, response.host, response.port) self.consumer_group_to_brokers[group] = bm self._update_brokers([bm]) return True def _handleConsumerMetadataErr(err): log.error("Failed to retrieve consumer metadata for group %r", group, exc_info=(err.type, err.value, err.getTracebackObject())) # Clear any stored value for the group's coordinator self.reset_consumer_group_metadata(group) # FIXME: This exception should chain from err. raise ConsumerCoordinatorNotAvailableError( "Coordinator for group {!r} not available".format(group), ) def _propagate(result): [_, ds] = self.coordinator_fetches.pop(group, None) for d in ds: d.callback(result) # Send the request, add the handlers request_d = self._send_broker_unaware_request(requestId, request) d = defer.Deferred() # Save the deferred under the fetches for this group self.coordinator_fetches[group] = (request_d, [d]) request_d.addCallback(_handleConsumerMetadataResponse) request_d.addErrback(_handleConsumerMetadataErr) request_d.addBoth(_propagate) return d
[ "def", "load_consumer_metadata_for_group", "(", "self", ",", "group", ")", ":", "group", "=", "_coerce_consumer_group", "(", "group", ")", "log", ".", "debug", "(", "\"%r: load_consumer_metadata_for_group(%r)\"", ",", "self", ",", "group", ")", "# If we are already loading the metadata for this group, then", "# just return the outstanding deferred", "if", "group", "in", "self", ".", "coordinator_fetches", ":", "d", "=", "defer", ".", "Deferred", "(", ")", "self", ".", "coordinator_fetches", "[", "group", "]", "[", "1", "]", ".", "append", "(", "d", ")", "return", "d", "# No outstanding request, create a new one", "requestId", "=", "self", ".", "_next_id", "(", ")", "request", "=", "KafkaCodec", ".", "encode_consumermetadata_request", "(", "self", ".", "_clientIdBytes", ",", "requestId", ",", "group", ")", "# Callbacks for the request deferred...", "def", "_handleConsumerMetadataResponse", "(", "response_bytes", ")", ":", "# Decode the response (returns ConsumerMetadataResponse)", "response", "=", "KafkaCodec", ".", "decode_consumermetadata_response", "(", "response_bytes", ")", "log", ".", "debug", "(", "\"%r: load_consumer_metadata_for_group(%r) -> %r\"", ",", "self", ",", "group", ",", "response", ")", "if", "response", ".", "error", ":", "raise", "BrokerResponseError", ".", "errnos", ".", "get", "(", "response", ".", "error", ",", "UnknownError", ")", "(", "response", ")", "bm", "=", "BrokerMetadata", "(", "response", ".", "node_id", ",", "response", ".", "host", ",", "response", ".", "port", ")", "self", ".", "consumer_group_to_brokers", "[", "group", "]", "=", "bm", "self", ".", "_update_brokers", "(", "[", "bm", "]", ")", "return", "True", "def", "_handleConsumerMetadataErr", "(", "err", ")", ":", "log", ".", "error", "(", "\"Failed to retrieve consumer metadata for group %r\"", ",", "group", ",", "exc_info", "=", "(", "err", ".", "type", ",", "err", ".", "value", ",", "err", ".", "getTracebackObject", "(", ")", ")", ")", "# Clear any stored value for the group's coordinator", "self", ".", "reset_consumer_group_metadata", "(", "group", ")", "# FIXME: This exception should chain from err.", "raise", "ConsumerCoordinatorNotAvailableError", "(", "\"Coordinator for group {!r} not available\"", ".", "format", "(", "group", ")", ",", ")", "def", "_propagate", "(", "result", ")", ":", "[", "_", ",", "ds", "]", "=", "self", ".", "coordinator_fetches", ".", "pop", "(", "group", ",", "None", ")", "for", "d", "in", "ds", ":", "d", ".", "callback", "(", "result", ")", "# Send the request, add the handlers", "request_d", "=", "self", ".", "_send_broker_unaware_request", "(", "requestId", ",", "request", ")", "d", "=", "defer", ".", "Deferred", "(", ")", "# Save the deferred under the fetches for this group", "self", ".", "coordinator_fetches", "[", "group", "]", "=", "(", "request_d", ",", "[", "d", "]", ")", "request_d", ".", "addCallback", "(", "_handleConsumerMetadataResponse", ")", "request_d", ".", "addErrback", "(", "_handleConsumerMetadataErr", ")", "request_d", ".", "addBoth", "(", "_propagate", ")", "return", "d" ]
Determine broker for the consumer metadata for the specified group Returns a deferred which callbacks with True if the group's coordinator could be determined, or errbacks with ConsumerCoordinatorNotAvailableError if not. Parameters ---------- group: group name as `str`
[ "Determine", "broker", "for", "the", "consumer", "metadata", "for", "the", "specified", "group" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/client.py#L442-L506
ciena/afkak
afkak/client.py
KafkaClient.send_produce_request
def send_produce_request(self, payloads=None, acks=1, timeout=DEFAULT_REPLICAS_ACK_MSECS, fail_on_error=True, callback=None): """ Encode and send some ProduceRequests ProduceRequests will be grouped by (topic, partition) and then sent to a specific broker. Output is a list of responses in the same order as the list of payloads specified Parameters ---------- payloads: list of ProduceRequest acks: How many Kafka broker replicas need to write before the leader replies with a response timeout: How long the server has to receive the acks from the replicas before returning an error. fail_on_error: boolean, should we raise an Exception if we encounter an API error? callback: function, instead of returning the ProduceResponse, first pass it through this function Return ------ a deferred which callbacks with a list of ProduceResponse Raises ------ FailedPayloadsError, LeaderUnavailableError, PartitionUnavailableError """ encoder = partial( KafkaCodec.encode_produce_request, acks=acks, timeout=timeout) if acks == 0: decoder = None else: decoder = KafkaCodec.decode_produce_response resps = yield self._send_broker_aware_request( payloads, encoder, decoder) returnValue(self._handle_responses(resps, fail_on_error, callback))
python
def send_produce_request(self, payloads=None, acks=1, timeout=DEFAULT_REPLICAS_ACK_MSECS, fail_on_error=True, callback=None): """ Encode and send some ProduceRequests ProduceRequests will be grouped by (topic, partition) and then sent to a specific broker. Output is a list of responses in the same order as the list of payloads specified Parameters ---------- payloads: list of ProduceRequest acks: How many Kafka broker replicas need to write before the leader replies with a response timeout: How long the server has to receive the acks from the replicas before returning an error. fail_on_error: boolean, should we raise an Exception if we encounter an API error? callback: function, instead of returning the ProduceResponse, first pass it through this function Return ------ a deferred which callbacks with a list of ProduceResponse Raises ------ FailedPayloadsError, LeaderUnavailableError, PartitionUnavailableError """ encoder = partial( KafkaCodec.encode_produce_request, acks=acks, timeout=timeout) if acks == 0: decoder = None else: decoder = KafkaCodec.decode_produce_response resps = yield self._send_broker_aware_request( payloads, encoder, decoder) returnValue(self._handle_responses(resps, fail_on_error, callback))
[ "def", "send_produce_request", "(", "self", ",", "payloads", "=", "None", ",", "acks", "=", "1", ",", "timeout", "=", "DEFAULT_REPLICAS_ACK_MSECS", ",", "fail_on_error", "=", "True", ",", "callback", "=", "None", ")", ":", "encoder", "=", "partial", "(", "KafkaCodec", ".", "encode_produce_request", ",", "acks", "=", "acks", ",", "timeout", "=", "timeout", ")", "if", "acks", "==", "0", ":", "decoder", "=", "None", "else", ":", "decoder", "=", "KafkaCodec", ".", "decode_produce_response", "resps", "=", "yield", "self", ".", "_send_broker_aware_request", "(", "payloads", ",", "encoder", ",", "decoder", ")", "returnValue", "(", "self", ".", "_handle_responses", "(", "resps", ",", "fail_on_error", ",", "callback", ")", ")" ]
Encode and send some ProduceRequests ProduceRequests will be grouped by (topic, partition) and then sent to a specific broker. Output is a list of responses in the same order as the list of payloads specified Parameters ---------- payloads: list of ProduceRequest acks: How many Kafka broker replicas need to write before the leader replies with a response timeout: How long the server has to receive the acks from the replicas before returning an error. fail_on_error: boolean, should we raise an Exception if we encounter an API error? callback: function, instead of returning the ProduceResponse, first pass it through this function Return ------ a deferred which callbacks with a list of ProduceResponse Raises ------ FailedPayloadsError, LeaderUnavailableError, PartitionUnavailableError
[ "Encode", "and", "send", "some", "ProduceRequests" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/client.py#L509-L557
ciena/afkak
afkak/client.py
KafkaClient.send_fetch_request
def send_fetch_request(self, payloads=None, fail_on_error=True, callback=None, max_wait_time=DEFAULT_FETCH_SERVER_WAIT_MSECS, min_bytes=DEFAULT_FETCH_MIN_BYTES): """ Encode and send a FetchRequest Payloads are grouped by topic and partition so they can be pipelined to the same brokers. Raises ====== FailedPayloadsError, LeaderUnavailableError, PartitionUnavailableError """ if (max_wait_time / 1000) > (self.timeout - 0.1): raise ValueError( "%r: max_wait_time: %d must be less than client.timeout by " "at least 100 milliseconds.", self, max_wait_time) encoder = partial(KafkaCodec.encode_fetch_request, max_wait_time=max_wait_time, min_bytes=min_bytes) # resps is a list of FetchResponse() objects, each of which can hold # 1-n messages. resps = yield self._send_broker_aware_request( payloads, encoder, KafkaCodec.decode_fetch_response) returnValue(self._handle_responses(resps, fail_on_error, callback))
python
def send_fetch_request(self, payloads=None, fail_on_error=True, callback=None, max_wait_time=DEFAULT_FETCH_SERVER_WAIT_MSECS, min_bytes=DEFAULT_FETCH_MIN_BYTES): """ Encode and send a FetchRequest Payloads are grouped by topic and partition so they can be pipelined to the same brokers. Raises ====== FailedPayloadsError, LeaderUnavailableError, PartitionUnavailableError """ if (max_wait_time / 1000) > (self.timeout - 0.1): raise ValueError( "%r: max_wait_time: %d must be less than client.timeout by " "at least 100 milliseconds.", self, max_wait_time) encoder = partial(KafkaCodec.encode_fetch_request, max_wait_time=max_wait_time, min_bytes=min_bytes) # resps is a list of FetchResponse() objects, each of which can hold # 1-n messages. resps = yield self._send_broker_aware_request( payloads, encoder, KafkaCodec.decode_fetch_response) returnValue(self._handle_responses(resps, fail_on_error, callback))
[ "def", "send_fetch_request", "(", "self", ",", "payloads", "=", "None", ",", "fail_on_error", "=", "True", ",", "callback", "=", "None", ",", "max_wait_time", "=", "DEFAULT_FETCH_SERVER_WAIT_MSECS", ",", "min_bytes", "=", "DEFAULT_FETCH_MIN_BYTES", ")", ":", "if", "(", "max_wait_time", "/", "1000", ")", ">", "(", "self", ".", "timeout", "-", "0.1", ")", ":", "raise", "ValueError", "(", "\"%r: max_wait_time: %d must be less than client.timeout by \"", "\"at least 100 milliseconds.\"", ",", "self", ",", "max_wait_time", ")", "encoder", "=", "partial", "(", "KafkaCodec", ".", "encode_fetch_request", ",", "max_wait_time", "=", "max_wait_time", ",", "min_bytes", "=", "min_bytes", ")", "# resps is a list of FetchResponse() objects, each of which can hold", "# 1-n messages.", "resps", "=", "yield", "self", ".", "_send_broker_aware_request", "(", "payloads", ",", "encoder", ",", "KafkaCodec", ".", "decode_fetch_response", ")", "returnValue", "(", "self", ".", "_handle_responses", "(", "resps", ",", "fail_on_error", ",", "callback", ")", ")" ]
Encode and send a FetchRequest Payloads are grouped by topic and partition so they can be pipelined to the same brokers. Raises ====== FailedPayloadsError, LeaderUnavailableError, PartitionUnavailableError
[ "Encode", "and", "send", "a", "FetchRequest" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/client.py#L560-L589
ciena/afkak
afkak/client.py
KafkaClient.send_offset_fetch_request
def send_offset_fetch_request(self, group, payloads=None, fail_on_error=True, callback=None): """ Takes a group (string) and list of OffsetFetchRequest and returns a list of OffsetFetchResponse objects """ encoder = partial(KafkaCodec.encode_offset_fetch_request, group=group) decoder = KafkaCodec.decode_offset_fetch_response resps = yield self._send_broker_aware_request( payloads, encoder, decoder, consumer_group=group) returnValue(self._handle_responses( resps, fail_on_error, callback, group))
python
def send_offset_fetch_request(self, group, payloads=None, fail_on_error=True, callback=None): """ Takes a group (string) and list of OffsetFetchRequest and returns a list of OffsetFetchResponse objects """ encoder = partial(KafkaCodec.encode_offset_fetch_request, group=group) decoder = KafkaCodec.decode_offset_fetch_response resps = yield self._send_broker_aware_request( payloads, encoder, decoder, consumer_group=group) returnValue(self._handle_responses( resps, fail_on_error, callback, group))
[ "def", "send_offset_fetch_request", "(", "self", ",", "group", ",", "payloads", "=", "None", ",", "fail_on_error", "=", "True", ",", "callback", "=", "None", ")", ":", "encoder", "=", "partial", "(", "KafkaCodec", ".", "encode_offset_fetch_request", ",", "group", "=", "group", ")", "decoder", "=", "KafkaCodec", ".", "decode_offset_fetch_response", "resps", "=", "yield", "self", ".", "_send_broker_aware_request", "(", "payloads", ",", "encoder", ",", "decoder", ",", "consumer_group", "=", "group", ")", "returnValue", "(", "self", ".", "_handle_responses", "(", "resps", ",", "fail_on_error", ",", "callback", ",", "group", ")", ")" ]
Takes a group (string) and list of OffsetFetchRequest and returns a list of OffsetFetchResponse objects
[ "Takes", "a", "group", "(", "string", ")", "and", "list", "of", "OffsetFetchRequest", "and", "returns", "a", "list", "of", "OffsetFetchResponse", "objects" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/client.py#L602-L615
ciena/afkak
afkak/client.py
KafkaClient.send_offset_commit_request
def send_offset_commit_request(self, group, payloads=None, fail_on_error=True, callback=None, group_generation_id=-1, consumer_id=''): """Send a list of OffsetCommitRequests to the Kafka broker for the given consumer group. Args: group (str): The consumer group to which to commit the offsets payloads ([OffsetCommitRequest]): List of topic, partition, offsets to commit. fail_on_error (bool): Whether to raise an exception if a response from the Kafka broker indicates an error callback (callable): a function to call with each of the responses before returning the returned value to the caller. group_generation_id (int): Must currently always be -1 consumer_id (str): Must currently always be empty string Returns: [OffsetCommitResponse]: List of OffsetCommitResponse objects. Will raise KafkaError for failed requests if fail_on_error is True """ group = _coerce_consumer_group(group) encoder = partial(KafkaCodec.encode_offset_commit_request, group=group, group_generation_id=group_generation_id, consumer_id=consumer_id) decoder = KafkaCodec.decode_offset_commit_response resps = yield self._send_broker_aware_request( payloads, encoder, decoder, consumer_group=group) returnValue(self._handle_responses( resps, fail_on_error, callback, group))
python
def send_offset_commit_request(self, group, payloads=None, fail_on_error=True, callback=None, group_generation_id=-1, consumer_id=''): """Send a list of OffsetCommitRequests to the Kafka broker for the given consumer group. Args: group (str): The consumer group to which to commit the offsets payloads ([OffsetCommitRequest]): List of topic, partition, offsets to commit. fail_on_error (bool): Whether to raise an exception if a response from the Kafka broker indicates an error callback (callable): a function to call with each of the responses before returning the returned value to the caller. group_generation_id (int): Must currently always be -1 consumer_id (str): Must currently always be empty string Returns: [OffsetCommitResponse]: List of OffsetCommitResponse objects. Will raise KafkaError for failed requests if fail_on_error is True """ group = _coerce_consumer_group(group) encoder = partial(KafkaCodec.encode_offset_commit_request, group=group, group_generation_id=group_generation_id, consumer_id=consumer_id) decoder = KafkaCodec.decode_offset_commit_response resps = yield self._send_broker_aware_request( payloads, encoder, decoder, consumer_group=group) returnValue(self._handle_responses( resps, fail_on_error, callback, group))
[ "def", "send_offset_commit_request", "(", "self", ",", "group", ",", "payloads", "=", "None", ",", "fail_on_error", "=", "True", ",", "callback", "=", "None", ",", "group_generation_id", "=", "-", "1", ",", "consumer_id", "=", "''", ")", ":", "group", "=", "_coerce_consumer_group", "(", "group", ")", "encoder", "=", "partial", "(", "KafkaCodec", ".", "encode_offset_commit_request", ",", "group", "=", "group", ",", "group_generation_id", "=", "group_generation_id", ",", "consumer_id", "=", "consumer_id", ")", "decoder", "=", "KafkaCodec", ".", "decode_offset_commit_response", "resps", "=", "yield", "self", ".", "_send_broker_aware_request", "(", "payloads", ",", "encoder", ",", "decoder", ",", "consumer_group", "=", "group", ")", "returnValue", "(", "self", ".", "_handle_responses", "(", "resps", ",", "fail_on_error", ",", "callback", ",", "group", ")", ")" ]
Send a list of OffsetCommitRequests to the Kafka broker for the given consumer group. Args: group (str): The consumer group to which to commit the offsets payloads ([OffsetCommitRequest]): List of topic, partition, offsets to commit. fail_on_error (bool): Whether to raise an exception if a response from the Kafka broker indicates an error callback (callable): a function to call with each of the responses before returning the returned value to the caller. group_generation_id (int): Must currently always be -1 consumer_id (str): Must currently always be empty string Returns: [OffsetCommitResponse]: List of OffsetCommitResponse objects. Will raise KafkaError for failed requests if fail_on_error is True
[ "Send", "a", "list", "of", "OffsetCommitRequests", "to", "the", "Kafka", "broker", "for", "the", "given", "consumer", "group", "." ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/client.py#L618-L648
ciena/afkak
afkak/client.py
KafkaClient._get_brokerclient
def _get_brokerclient(self, node_id): """ Get a broker client. :param int node_id: Broker node ID :raises KeyError: for an unknown node ID :returns: :class:`_KafkaBrokerClient` """ if self._closing: raise ClientError("Cannot get broker client for node_id={}: {} has been closed".format(node_id, self)) if node_id not in self.clients: broker_metadata = self._brokers[node_id] log.debug("%r: creating client for %s", self, broker_metadata) self.clients[node_id] = _KafkaBrokerClient( self.reactor, self._endpoint_factory, broker_metadata, self.clientId, self._retry_policy, ) return self.clients[node_id]
python
def _get_brokerclient(self, node_id): """ Get a broker client. :param int node_id: Broker node ID :raises KeyError: for an unknown node ID :returns: :class:`_KafkaBrokerClient` """ if self._closing: raise ClientError("Cannot get broker client for node_id={}: {} has been closed".format(node_id, self)) if node_id not in self.clients: broker_metadata = self._brokers[node_id] log.debug("%r: creating client for %s", self, broker_metadata) self.clients[node_id] = _KafkaBrokerClient( self.reactor, self._endpoint_factory, broker_metadata, self.clientId, self._retry_policy, ) return self.clients[node_id]
[ "def", "_get_brokerclient", "(", "self", ",", "node_id", ")", ":", "if", "self", ".", "_closing", ":", "raise", "ClientError", "(", "\"Cannot get broker client for node_id={}: {} has been closed\"", ".", "format", "(", "node_id", ",", "self", ")", ")", "if", "node_id", "not", "in", "self", ".", "clients", ":", "broker_metadata", "=", "self", ".", "_brokers", "[", "node_id", "]", "log", ".", "debug", "(", "\"%r: creating client for %s\"", ",", "self", ",", "broker_metadata", ")", "self", ".", "clients", "[", "node_id", "]", "=", "_KafkaBrokerClient", "(", "self", ".", "reactor", ",", "self", ".", "_endpoint_factory", ",", "broker_metadata", ",", "self", ".", "clientId", ",", "self", ".", "_retry_policy", ",", ")", "return", "self", ".", "clients", "[", "node_id", "]" ]
Get a broker client. :param int node_id: Broker node ID :raises KeyError: for an unknown node ID :returns: :class:`_KafkaBrokerClient`
[ "Get", "a", "broker", "client", "." ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/client.py#L678-L695
ciena/afkak
afkak/client.py
KafkaClient._close_brokerclients
def _close_brokerclients(self, clients): """ Close the given broker clients. :param clients: Iterable of `_KafkaBrokerClient` """ def _log_close_failure(failure, brokerclient): log.debug( 'BrokerClient: %s close result: %s: %s', brokerclient, failure.type.__name__, failure.getErrorMessage()) def _clean_close_dlist(result, close_dlist): # If there aren't any other outstanding closings going on, then # close_dlist == self.close_dlist, and we can reset it. if close_dlist == self.close_dlist: self.close_dlist = None if not self.close_dlist: dList = [] else: log.debug("%r: _close_brokerclients has nested deferredlist: %r", self, self.close_dlist) dList = [self.close_dlist] for brokerClient in clients: log.debug("Calling close on: %r", brokerClient) d = brokerClient.close().addErrback(_log_close_failure, brokerClient) dList.append(d) self.close_dlist = DeferredList(dList) self.close_dlist.addBoth(_clean_close_dlist, self.close_dlist)
python
def _close_brokerclients(self, clients): """ Close the given broker clients. :param clients: Iterable of `_KafkaBrokerClient` """ def _log_close_failure(failure, brokerclient): log.debug( 'BrokerClient: %s close result: %s: %s', brokerclient, failure.type.__name__, failure.getErrorMessage()) def _clean_close_dlist(result, close_dlist): # If there aren't any other outstanding closings going on, then # close_dlist == self.close_dlist, and we can reset it. if close_dlist == self.close_dlist: self.close_dlist = None if not self.close_dlist: dList = [] else: log.debug("%r: _close_brokerclients has nested deferredlist: %r", self, self.close_dlist) dList = [self.close_dlist] for brokerClient in clients: log.debug("Calling close on: %r", brokerClient) d = brokerClient.close().addErrback(_log_close_failure, brokerClient) dList.append(d) self.close_dlist = DeferredList(dList) self.close_dlist.addBoth(_clean_close_dlist, self.close_dlist)
[ "def", "_close_brokerclients", "(", "self", ",", "clients", ")", ":", "def", "_log_close_failure", "(", "failure", ",", "brokerclient", ")", ":", "log", ".", "debug", "(", "'BrokerClient: %s close result: %s: %s'", ",", "brokerclient", ",", "failure", ".", "type", ".", "__name__", ",", "failure", ".", "getErrorMessage", "(", ")", ")", "def", "_clean_close_dlist", "(", "result", ",", "close_dlist", ")", ":", "# If there aren't any other outstanding closings going on, then", "# close_dlist == self.close_dlist, and we can reset it.", "if", "close_dlist", "==", "self", ".", "close_dlist", ":", "self", ".", "close_dlist", "=", "None", "if", "not", "self", ".", "close_dlist", ":", "dList", "=", "[", "]", "else", ":", "log", ".", "debug", "(", "\"%r: _close_brokerclients has nested deferredlist: %r\"", ",", "self", ",", "self", ".", "close_dlist", ")", "dList", "=", "[", "self", ".", "close_dlist", "]", "for", "brokerClient", "in", "clients", ":", "log", ".", "debug", "(", "\"Calling close on: %r\"", ",", "brokerClient", ")", "d", "=", "brokerClient", ".", "close", "(", ")", ".", "addErrback", "(", "_log_close_failure", ",", "brokerClient", ")", "dList", ".", "append", "(", "d", ")", "self", ".", "close_dlist", "=", "DeferredList", "(", "dList", ")", "self", ".", "close_dlist", ".", "addBoth", "(", "_clean_close_dlist", ",", "self", ".", "close_dlist", ")" ]
Close the given broker clients. :param clients: Iterable of `_KafkaBrokerClient`
[ "Close", "the", "given", "broker", "clients", "." ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/client.py#L697-L725
ciena/afkak
afkak/client.py
KafkaClient._update_brokers
def _update_brokers(self, brokers, remove=False): """ Update `self._brokers` and `self.clients` Update our self.clients based on brokers in received metadata Take the received dict of brokers and reconcile it with our current list of brokers (self.clients). If there is a new one, bring up a new connection to it, and if remove is True, and any in our current list aren't in the metadata returned, disconnect from it. :param brokers: Iterable of `BrokerMetadata`. A client will be created for every broker given if it doesn't yet exist. :param bool remove: Is this metadata for *all* brokers? If so, clients for brokers which are no longer found in the metadata will be closed. """ log.debug("%r: _update_brokers(%r, remove=%r)", self, brokers, remove) brokers_by_id = {bm.node_id: bm for bm in brokers} self._brokers.update(brokers_by_id) # Update the metadata of broker clients that already exist. for node_id, broker_meta in brokers_by_id.items(): if node_id not in self.clients: continue self.clients[node_id].updateMetadata(broker_meta) # Remove any clients for brokers which no longer exist. if remove: to_close = [ self.clients.pop(node_id) for node_id in set(self.clients) - set(brokers_by_id) ] if to_close: self._close_brokerclients(to_close)
python
def _update_brokers(self, brokers, remove=False): """ Update `self._brokers` and `self.clients` Update our self.clients based on brokers in received metadata Take the received dict of brokers and reconcile it with our current list of brokers (self.clients). If there is a new one, bring up a new connection to it, and if remove is True, and any in our current list aren't in the metadata returned, disconnect from it. :param brokers: Iterable of `BrokerMetadata`. A client will be created for every broker given if it doesn't yet exist. :param bool remove: Is this metadata for *all* brokers? If so, clients for brokers which are no longer found in the metadata will be closed. """ log.debug("%r: _update_brokers(%r, remove=%r)", self, brokers, remove) brokers_by_id = {bm.node_id: bm for bm in brokers} self._brokers.update(brokers_by_id) # Update the metadata of broker clients that already exist. for node_id, broker_meta in brokers_by_id.items(): if node_id not in self.clients: continue self.clients[node_id].updateMetadata(broker_meta) # Remove any clients for brokers which no longer exist. if remove: to_close = [ self.clients.pop(node_id) for node_id in set(self.clients) - set(brokers_by_id) ] if to_close: self._close_brokerclients(to_close)
[ "def", "_update_brokers", "(", "self", ",", "brokers", ",", "remove", "=", "False", ")", ":", "log", ".", "debug", "(", "\"%r: _update_brokers(%r, remove=%r)\"", ",", "self", ",", "brokers", ",", "remove", ")", "brokers_by_id", "=", "{", "bm", ".", "node_id", ":", "bm", "for", "bm", "in", "brokers", "}", "self", ".", "_brokers", ".", "update", "(", "brokers_by_id", ")", "# Update the metadata of broker clients that already exist.", "for", "node_id", ",", "broker_meta", "in", "brokers_by_id", ".", "items", "(", ")", ":", "if", "node_id", "not", "in", "self", ".", "clients", ":", "continue", "self", ".", "clients", "[", "node_id", "]", ".", "updateMetadata", "(", "broker_meta", ")", "# Remove any clients for brokers which no longer exist.", "if", "remove", ":", "to_close", "=", "[", "self", ".", "clients", ".", "pop", "(", "node_id", ")", "for", "node_id", "in", "set", "(", "self", ".", "clients", ")", "-", "set", "(", "brokers_by_id", ")", "]", "if", "to_close", ":", "self", ".", "_close_brokerclients", "(", "to_close", ")" ]
Update `self._brokers` and `self.clients` Update our self.clients based on brokers in received metadata Take the received dict of brokers and reconcile it with our current list of brokers (self.clients). If there is a new one, bring up a new connection to it, and if remove is True, and any in our current list aren't in the metadata returned, disconnect from it. :param brokers: Iterable of `BrokerMetadata`. A client will be created for every broker given if it doesn't yet exist. :param bool remove: Is this metadata for *all* brokers? If so, clients for brokers which are no longer found in the metadata will be closed.
[ "Update", "self", ".", "_brokers", "and", "self", ".", "clients" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/client.py#L727-L762
ciena/afkak
afkak/client.py
KafkaClient._get_leader_for_partition
def _get_leader_for_partition(self, topic, partition): """ Returns the leader for a partition or None if the partition exists but has no leader. PartitionUnavailableError will be raised if the topic or partition is not part of the metadata. """ key = TopicAndPartition(topic, partition) # reload metadata whether the partition is not available # or has no leader (broker is None) if self.topics_to_brokers.get(key) is None: yield self.load_metadata_for_topics(topic) if key not in self.topics_to_brokers: raise PartitionUnavailableError("%s not available" % str(key)) returnValue(self.topics_to_brokers[key])
python
def _get_leader_for_partition(self, topic, partition): """ Returns the leader for a partition or None if the partition exists but has no leader. PartitionUnavailableError will be raised if the topic or partition is not part of the metadata. """ key = TopicAndPartition(topic, partition) # reload metadata whether the partition is not available # or has no leader (broker is None) if self.topics_to_brokers.get(key) is None: yield self.load_metadata_for_topics(topic) if key not in self.topics_to_brokers: raise PartitionUnavailableError("%s not available" % str(key)) returnValue(self.topics_to_brokers[key])
[ "def", "_get_leader_for_partition", "(", "self", ",", "topic", ",", "partition", ")", ":", "key", "=", "TopicAndPartition", "(", "topic", ",", "partition", ")", "# reload metadata whether the partition is not available", "# or has no leader (broker is None)", "if", "self", ".", "topics_to_brokers", ".", "get", "(", "key", ")", "is", "None", ":", "yield", "self", ".", "load_metadata_for_topics", "(", "topic", ")", "if", "key", "not", "in", "self", ".", "topics_to_brokers", ":", "raise", "PartitionUnavailableError", "(", "\"%s not available\"", "%", "str", "(", "key", ")", ")", "returnValue", "(", "self", ".", "topics_to_brokers", "[", "key", "]", ")" ]
Returns the leader for a partition or None if the partition exists but has no leader. PartitionUnavailableError will be raised if the topic or partition is not part of the metadata.
[ "Returns", "the", "leader", "for", "a", "partition", "or", "None", "if", "the", "partition", "exists", "but", "has", "no", "leader", "." ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/client.py#L765-L783
ciena/afkak
afkak/client.py
KafkaClient._get_coordinator_for_group
def _get_coordinator_for_group(self, consumer_group): """Returns the coordinator (broker) for a consumer group Returns the broker for a given consumer group or Raises ConsumerCoordinatorNotAvailableError """ if self.consumer_group_to_brokers.get(consumer_group) is None: yield self.load_consumer_metadata_for_group(consumer_group) returnValue(self.consumer_group_to_brokers.get(consumer_group))
python
def _get_coordinator_for_group(self, consumer_group): """Returns the coordinator (broker) for a consumer group Returns the broker for a given consumer group or Raises ConsumerCoordinatorNotAvailableError """ if self.consumer_group_to_brokers.get(consumer_group) is None: yield self.load_consumer_metadata_for_group(consumer_group) returnValue(self.consumer_group_to_brokers.get(consumer_group))
[ "def", "_get_coordinator_for_group", "(", "self", ",", "consumer_group", ")", ":", "if", "self", ".", "consumer_group_to_brokers", ".", "get", "(", "consumer_group", ")", "is", "None", ":", "yield", "self", ".", "load_consumer_metadata_for_group", "(", "consumer_group", ")", "returnValue", "(", "self", ".", "consumer_group_to_brokers", ".", "get", "(", "consumer_group", ")", ")" ]
Returns the coordinator (broker) for a consumer group Returns the broker for a given consumer group or Raises ConsumerCoordinatorNotAvailableError
[ "Returns", "the", "coordinator", "(", "broker", ")", "for", "a", "consumer", "group" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/client.py#L786-L795
ciena/afkak
afkak/client.py
KafkaClient._make_request_to_broker
def _make_request_to_broker(self, broker, requestId, request, **kwArgs): """Send a request to the specified broker.""" def _timeout_request(broker, requestId): """The time we allotted for the request expired, cancel it.""" try: # FIXME: This should be done by calling .cancel() on the Deferred # returned by the broker client. broker.cancelRequest(requestId, reason=RequestTimedOutError( 'Request: {} cancelled due to timeout'.format(requestId))) except KeyError: # pragma: no cover This should never happen... log.exception('ERROR: Failed to find key for timed-out ' 'request. Broker: %r Req: %d', broker, requestId) raise if self._disconnect_on_timeout: broker.disconnect() def _alert_blocked_reactor(timeout, start): """Complain if this timer didn't fire before the timeout elapsed""" now = self.reactor.seconds() if now >= (start + timeout): log.warning('Reactor was starved for %r seconds', now - start) def _cancel_timeout(result, dc): """Request completed/cancelled, cancel the timeout delayedCall.""" if dc.active(): dc.cancel() return result # Make the request to the specified broker log.debug('_mrtb: sending request: %d to broker: %r', requestId, broker) d = broker.makeRequest(requestId, request, **kwArgs) # Set a delayedCall to fire if we don't get a reply in time dc = self.reactor.callLater( self.timeout, _timeout_request, broker, requestId) # Set a delayedCall to complain if the reactor has been blocked rc = self.reactor.callLater( (self.timeout * 0.9), _alert_blocked_reactor, self.timeout, self.reactor.seconds()) # Setup a callback on the request deferred to cancel both callLater d.addBoth(_cancel_timeout, dc) d.addBoth(_cancel_timeout, rc) return d
python
def _make_request_to_broker(self, broker, requestId, request, **kwArgs): """Send a request to the specified broker.""" def _timeout_request(broker, requestId): """The time we allotted for the request expired, cancel it.""" try: # FIXME: This should be done by calling .cancel() on the Deferred # returned by the broker client. broker.cancelRequest(requestId, reason=RequestTimedOutError( 'Request: {} cancelled due to timeout'.format(requestId))) except KeyError: # pragma: no cover This should never happen... log.exception('ERROR: Failed to find key for timed-out ' 'request. Broker: %r Req: %d', broker, requestId) raise if self._disconnect_on_timeout: broker.disconnect() def _alert_blocked_reactor(timeout, start): """Complain if this timer didn't fire before the timeout elapsed""" now = self.reactor.seconds() if now >= (start + timeout): log.warning('Reactor was starved for %r seconds', now - start) def _cancel_timeout(result, dc): """Request completed/cancelled, cancel the timeout delayedCall.""" if dc.active(): dc.cancel() return result # Make the request to the specified broker log.debug('_mrtb: sending request: %d to broker: %r', requestId, broker) d = broker.makeRequest(requestId, request, **kwArgs) # Set a delayedCall to fire if we don't get a reply in time dc = self.reactor.callLater( self.timeout, _timeout_request, broker, requestId) # Set a delayedCall to complain if the reactor has been blocked rc = self.reactor.callLater( (self.timeout * 0.9), _alert_blocked_reactor, self.timeout, self.reactor.seconds()) # Setup a callback on the request deferred to cancel both callLater d.addBoth(_cancel_timeout, dc) d.addBoth(_cancel_timeout, rc) return d
[ "def", "_make_request_to_broker", "(", "self", ",", "broker", ",", "requestId", ",", "request", ",", "*", "*", "kwArgs", ")", ":", "def", "_timeout_request", "(", "broker", ",", "requestId", ")", ":", "\"\"\"The time we allotted for the request expired, cancel it.\"\"\"", "try", ":", "# FIXME: This should be done by calling .cancel() on the Deferred", "# returned by the broker client.", "broker", ".", "cancelRequest", "(", "requestId", ",", "reason", "=", "RequestTimedOutError", "(", "'Request: {} cancelled due to timeout'", ".", "format", "(", "requestId", ")", ")", ")", "except", "KeyError", ":", "# pragma: no cover This should never happen...", "log", ".", "exception", "(", "'ERROR: Failed to find key for timed-out '", "'request. Broker: %r Req: %d'", ",", "broker", ",", "requestId", ")", "raise", "if", "self", ".", "_disconnect_on_timeout", ":", "broker", ".", "disconnect", "(", ")", "def", "_alert_blocked_reactor", "(", "timeout", ",", "start", ")", ":", "\"\"\"Complain if this timer didn't fire before the timeout elapsed\"\"\"", "now", "=", "self", ".", "reactor", ".", "seconds", "(", ")", "if", "now", ">=", "(", "start", "+", "timeout", ")", ":", "log", ".", "warning", "(", "'Reactor was starved for %r seconds'", ",", "now", "-", "start", ")", "def", "_cancel_timeout", "(", "result", ",", "dc", ")", ":", "\"\"\"Request completed/cancelled, cancel the timeout delayedCall.\"\"\"", "if", "dc", ".", "active", "(", ")", ":", "dc", ".", "cancel", "(", ")", "return", "result", "# Make the request to the specified broker", "log", ".", "debug", "(", "'_mrtb: sending request: %d to broker: %r'", ",", "requestId", ",", "broker", ")", "d", "=", "broker", ".", "makeRequest", "(", "requestId", ",", "request", ",", "*", "*", "kwArgs", ")", "# Set a delayedCall to fire if we don't get a reply in time", "dc", "=", "self", ".", "reactor", ".", "callLater", "(", "self", ".", "timeout", ",", "_timeout_request", ",", "broker", ",", "requestId", ")", "# Set a delayedCall to complain if the reactor has been blocked", "rc", "=", "self", ".", "reactor", ".", "callLater", "(", "(", "self", ".", "timeout", "*", "0.9", ")", ",", "_alert_blocked_reactor", ",", "self", ".", "timeout", ",", "self", ".", "reactor", ".", "seconds", "(", ")", ")", "# Setup a callback on the request deferred to cancel both callLater", "d", ".", "addBoth", "(", "_cancel_timeout", ",", "dc", ")", "d", ".", "addBoth", "(", "_cancel_timeout", ",", "rc", ")", "return", "d" ]
Send a request to the specified broker.
[ "Send", "a", "request", "to", "the", "specified", "broker", "." ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/client.py#L803-L846
ciena/afkak
afkak/client.py
KafkaClient._send_broker_unaware_request
def _send_broker_unaware_request(self, requestId, request): """ Attempt to send a broker-agnostic request to one of the known brokers: 1. Try each connected broker (in random order) 2. Try each known but unconnected broker (in random order) 3. Try each of the bootstrap hosts (in random order) :param bytes request: The bytes of a Kafka `RequestMessage`_ structure. It must have a unique (to this connection) correlation ID. :returns: API response message for *request* :rtype: Deferred[bytes] :raises: `KafkaUnavailableError` when making the request of all known hosts has failed. """ node_ids = list(self._brokers.keys()) # Randomly shuffle the brokers to distribute the load random.shuffle(node_ids) # Prioritize connected brokers def connected(node_id): try: return self.clients[node_id].connected() except KeyError: return False node_ids.sort(reverse=True, key=connected) for node_id in node_ids: broker = self._get_brokerclient(node_id) try: log.debug('_sbur: sending request %d to broker %r', requestId, broker) d = self._make_request_to_broker(broker, requestId, request) resp = yield d returnValue(resp) except KafkaError as e: log.warning(( "Will try next server after request with correlationId=%d" " failed against server %s:%i. Error: %s" ), requestId, broker.host, broker.port, e) # The request was not handled, likely because no broker metadata has # loaded yet (or all broker connections have failed). Fall back to # boostrapping. returnValue((yield self._send_bootstrap_request(request)))
python
def _send_broker_unaware_request(self, requestId, request): """ Attempt to send a broker-agnostic request to one of the known brokers: 1. Try each connected broker (in random order) 2. Try each known but unconnected broker (in random order) 3. Try each of the bootstrap hosts (in random order) :param bytes request: The bytes of a Kafka `RequestMessage`_ structure. It must have a unique (to this connection) correlation ID. :returns: API response message for *request* :rtype: Deferred[bytes] :raises: `KafkaUnavailableError` when making the request of all known hosts has failed. """ node_ids = list(self._brokers.keys()) # Randomly shuffle the brokers to distribute the load random.shuffle(node_ids) # Prioritize connected brokers def connected(node_id): try: return self.clients[node_id].connected() except KeyError: return False node_ids.sort(reverse=True, key=connected) for node_id in node_ids: broker = self._get_brokerclient(node_id) try: log.debug('_sbur: sending request %d to broker %r', requestId, broker) d = self._make_request_to_broker(broker, requestId, request) resp = yield d returnValue(resp) except KafkaError as e: log.warning(( "Will try next server after request with correlationId=%d" " failed against server %s:%i. Error: %s" ), requestId, broker.host, broker.port, e) # The request was not handled, likely because no broker metadata has # loaded yet (or all broker connections have failed). Fall back to # boostrapping. returnValue((yield self._send_bootstrap_request(request)))
[ "def", "_send_broker_unaware_request", "(", "self", ",", "requestId", ",", "request", ")", ":", "node_ids", "=", "list", "(", "self", ".", "_brokers", ".", "keys", "(", ")", ")", "# Randomly shuffle the brokers to distribute the load", "random", ".", "shuffle", "(", "node_ids", ")", "# Prioritize connected brokers", "def", "connected", "(", "node_id", ")", ":", "try", ":", "return", "self", ".", "clients", "[", "node_id", "]", ".", "connected", "(", ")", "except", "KeyError", ":", "return", "False", "node_ids", ".", "sort", "(", "reverse", "=", "True", ",", "key", "=", "connected", ")", "for", "node_id", "in", "node_ids", ":", "broker", "=", "self", ".", "_get_brokerclient", "(", "node_id", ")", "try", ":", "log", ".", "debug", "(", "'_sbur: sending request %d to broker %r'", ",", "requestId", ",", "broker", ")", "d", "=", "self", ".", "_make_request_to_broker", "(", "broker", ",", "requestId", ",", "request", ")", "resp", "=", "yield", "d", "returnValue", "(", "resp", ")", "except", "KafkaError", "as", "e", ":", "log", ".", "warning", "(", "(", "\"Will try next server after request with correlationId=%d\"", "\" failed against server %s:%i. Error: %s\"", ")", ",", "requestId", ",", "broker", ".", "host", ",", "broker", ".", "port", ",", "e", ")", "# The request was not handled, likely because no broker metadata has", "# loaded yet (or all broker connections have failed). Fall back to", "# boostrapping.", "returnValue", "(", "(", "yield", "self", ".", "_send_bootstrap_request", "(", "request", ")", ")", ")" ]
Attempt to send a broker-agnostic request to one of the known brokers: 1. Try each connected broker (in random order) 2. Try each known but unconnected broker (in random order) 3. Try each of the bootstrap hosts (in random order) :param bytes request: The bytes of a Kafka `RequestMessage`_ structure. It must have a unique (to this connection) correlation ID. :returns: API response message for *request* :rtype: Deferred[bytes] :raises: `KafkaUnavailableError` when making the request of all known hosts has failed.
[ "Attempt", "to", "send", "a", "broker", "-", "agnostic", "request", "to", "one", "of", "the", "known", "brokers", ":" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/client.py#L849-L897
ciena/afkak
afkak/client.py
KafkaClient._send_bootstrap_request
def _send_bootstrap_request(self, request): """Make a request using an ephemeral broker connection This routine is used to make broker-unaware requests to get the initial cluster metadata. It cycles through the configured hosts, trying to connect and send the request to each in turn. This temporary connection is closed once a response is received. Note that most Kafka APIs require requests be sent to a specific broker. This method will only function for broker-agnostic requests like: * `Metadata <https://kafka.apache.org/protocol.html#The_Messages_Metadata>`_ * `FindCoordinator <https://kafka.apache.org/protocol.html#The_Messages_FindCoordinator>`_ :param bytes request: The bytes of a Kafka `RequestMessage`_ structure. It must have a unique (to this connection) correlation ID. :returns: API response message for *request* :rtype: Deferred[bytes] :raises: - `KafkaUnavailableError` when making the request of all known hosts has failed. - `twisted.internet.defer.TimeoutError` when connecting or making a request exceeds the timeout. """ hostports = list(self._bootstrap_hosts) random.shuffle(hostports) for host, port in hostports: ep = self._endpoint_factory(self.reactor, host, port) try: protocol = yield ep.connect(_bootstrapFactory) except Exception as e: log.debug("%s: bootstrap connect to %s:%s -> %s", self, host, port, e) continue try: response = yield protocol.request(request).addTimeout(self.timeout, self.reactor) except Exception: log.debug("%s: bootstrap request to %s:%s failed", self, host, port, exc_info=True) else: returnValue(response) finally: protocol.transport.loseConnection() raise KafkaUnavailableError("Failed to bootstrap from hosts {}".format(hostports))
python
def _send_bootstrap_request(self, request): """Make a request using an ephemeral broker connection This routine is used to make broker-unaware requests to get the initial cluster metadata. It cycles through the configured hosts, trying to connect and send the request to each in turn. This temporary connection is closed once a response is received. Note that most Kafka APIs require requests be sent to a specific broker. This method will only function for broker-agnostic requests like: * `Metadata <https://kafka.apache.org/protocol.html#The_Messages_Metadata>`_ * `FindCoordinator <https://kafka.apache.org/protocol.html#The_Messages_FindCoordinator>`_ :param bytes request: The bytes of a Kafka `RequestMessage`_ structure. It must have a unique (to this connection) correlation ID. :returns: API response message for *request* :rtype: Deferred[bytes] :raises: - `KafkaUnavailableError` when making the request of all known hosts has failed. - `twisted.internet.defer.TimeoutError` when connecting or making a request exceeds the timeout. """ hostports = list(self._bootstrap_hosts) random.shuffle(hostports) for host, port in hostports: ep = self._endpoint_factory(self.reactor, host, port) try: protocol = yield ep.connect(_bootstrapFactory) except Exception as e: log.debug("%s: bootstrap connect to %s:%s -> %s", self, host, port, e) continue try: response = yield protocol.request(request).addTimeout(self.timeout, self.reactor) except Exception: log.debug("%s: bootstrap request to %s:%s failed", self, host, port, exc_info=True) else: returnValue(response) finally: protocol.transport.loseConnection() raise KafkaUnavailableError("Failed to bootstrap from hosts {}".format(hostports))
[ "def", "_send_bootstrap_request", "(", "self", ",", "request", ")", ":", "hostports", "=", "list", "(", "self", ".", "_bootstrap_hosts", ")", "random", ".", "shuffle", "(", "hostports", ")", "for", "host", ",", "port", "in", "hostports", ":", "ep", "=", "self", ".", "_endpoint_factory", "(", "self", ".", "reactor", ",", "host", ",", "port", ")", "try", ":", "protocol", "=", "yield", "ep", ".", "connect", "(", "_bootstrapFactory", ")", "except", "Exception", "as", "e", ":", "log", ".", "debug", "(", "\"%s: bootstrap connect to %s:%s -> %s\"", ",", "self", ",", "host", ",", "port", ",", "e", ")", "continue", "try", ":", "response", "=", "yield", "protocol", ".", "request", "(", "request", ")", ".", "addTimeout", "(", "self", ".", "timeout", ",", "self", ".", "reactor", ")", "except", "Exception", ":", "log", ".", "debug", "(", "\"%s: bootstrap request to %s:%s failed\"", ",", "self", ",", "host", ",", "port", ",", "exc_info", "=", "True", ")", "else", ":", "returnValue", "(", "response", ")", "finally", ":", "protocol", ".", "transport", ".", "loseConnection", "(", ")", "raise", "KafkaUnavailableError", "(", "\"Failed to bootstrap from hosts {}\"", ".", "format", "(", "hostports", ")", ")" ]
Make a request using an ephemeral broker connection This routine is used to make broker-unaware requests to get the initial cluster metadata. It cycles through the configured hosts, trying to connect and send the request to each in turn. This temporary connection is closed once a response is received. Note that most Kafka APIs require requests be sent to a specific broker. This method will only function for broker-agnostic requests like: * `Metadata <https://kafka.apache.org/protocol.html#The_Messages_Metadata>`_ * `FindCoordinator <https://kafka.apache.org/protocol.html#The_Messages_FindCoordinator>`_ :param bytes request: The bytes of a Kafka `RequestMessage`_ structure. It must have a unique (to this connection) correlation ID. :returns: API response message for *request* :rtype: Deferred[bytes] :raises: - `KafkaUnavailableError` when making the request of all known hosts has failed. - `twisted.internet.defer.TimeoutError` when connecting or making a request exceeds the timeout.
[ "Make", "a", "request", "using", "an", "ephemeral", "broker", "connection" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/client.py#L900-L947
ciena/afkak
afkak/client.py
KafkaClient._send_broker_aware_request
def _send_broker_aware_request(self, payloads, encoder_fn, decode_fn, consumer_group=None): """ Group a list of request payloads by topic+partition and send them to the leader broker for that partition using the supplied encode/decode functions Params ====== payloads: list of object-like entities with a topic and partition attribute. payloads must be grouped by (topic, partition) tuples. encode_fn: a method to encode the list of payloads to a request body, must accept client_id, correlation_id, and payloads as keyword arguments decode_fn: a method to decode a response body into response objects. The response objects must be object-like and have topic and partition attributes consumer_group: [string], optional. Indicates the request should be directed to the Offset Coordinator for the specified consumer_group. Return ====== deferred yielding a list of response objects in the same order as the supplied payloads, or None if decode_fn is None. Raises ====== FailedPayloadsError, LeaderUnavailableError, PartitionUnavailableError, """ # Calling this without payloads is nonsensical if not payloads: raise ValueError("Payloads parameter is empty") # Group the requests by topic+partition original_keys = [] payloads_by_broker = collections.defaultdict(list) # Go through all the payloads, lookup the leader/coordinator for that # payload's topic/partition or consumer group. If there's no # leader/coordinator (broker), raise. For each broker, keep # a list of the payloads to be sent to it. Also, for each payload in # the list of payloads, make a corresponding list (original_keys) with # the topic/partition in the same order, so we can lookup the returned # result(s) by that topic/partition key in the set of returned results # and return them in a list the same order the payloads were supplied for payload in payloads: # get leader/coordinator, depending on consumer_group if consumer_group is None: leader = yield self._get_leader_for_partition( payload.topic, payload.partition) if leader is None: raise LeaderUnavailableError( "Leader not available for topic %s partition %s" % (payload.topic, payload.partition)) else: leader = yield self._get_coordinator_for_group(consumer_group) if leader is None: raise ConsumerCoordinatorNotAvailableError( "Coordinator not available for group: %s" % (consumer_group)) payloads_by_broker[leader].append(payload) original_keys.append((payload.topic, payload.partition)) # Accumulate the responses in a dictionary acc = {} # The kafka server doesn't send replies to produce requests # with acks=0. In that case, our decode_fn will be # None, and we need to let the brokerclient know not # to expect a reply. makeRequest() returns a deferred # regardless, but in the expectResponse=False case, it will # fire as soon as the request is sent, and it can errBack() # due to being cancelled prior to the broker being able to # send the request. expectResponse = decode_fn is not None # keep a list of payloads that were failed to be sent to brokers failed_payloads = [] # Keep track of outstanding requests in a list of deferreds inFlight = [] # and the payloads that go along with them payloadsList = [] # For each broker, send the list of request payloads, for broker_meta, payloads in payloads_by_broker.items(): broker = self._get_brokerclient(broker_meta.node_id) requestId = self._next_id() request = encoder_fn(client_id=self._clientIdBytes, correlation_id=requestId, payloads=payloads) # Make the request d = self._make_request_to_broker(broker, requestId, request, expectResponse=expectResponse) inFlight.append(d) payloadsList.append(payloads) # Wait for all the responses to come back, or the requests to fail results = yield DeferredList(inFlight, consumeErrors=True) # We now have a list of (succeeded, response/Failure) tuples. Check 'em for (success, response), payloads in zip(results, payloadsList): if not success: # The brokerclient deferred was errback()'d: # The send failed, or this request was cancelled (by timeout) log.debug("%r: request:%r to broker failed: %r", self, payloads, response) failed_payloads.extend([(p, response) for p in payloads]) continue if not expectResponse: continue # Successful request/response. Decode it and store by topic/part for response in decode_fn(response): acc[(response.topic, response.partition)] = response # Order the accumulated responses by the original key order # Note that this scheme will throw away responses which we did # not request. See test_send_fetch_request, where the response # includes an error, but for a topic/part we didn't request. # Since that topic/partition isn't in original_keys, we don't pass # it back from here and it doesn't error out. # If any of the payloads failed, fail responses = [acc[k] for k in original_keys if k in acc] if acc else [] if failed_payloads: self.reset_all_metadata() raise FailedPayloadsError(responses, failed_payloads) returnValue(responses)
python
def _send_broker_aware_request(self, payloads, encoder_fn, decode_fn, consumer_group=None): """ Group a list of request payloads by topic+partition and send them to the leader broker for that partition using the supplied encode/decode functions Params ====== payloads: list of object-like entities with a topic and partition attribute. payloads must be grouped by (topic, partition) tuples. encode_fn: a method to encode the list of payloads to a request body, must accept client_id, correlation_id, and payloads as keyword arguments decode_fn: a method to decode a response body into response objects. The response objects must be object-like and have topic and partition attributes consumer_group: [string], optional. Indicates the request should be directed to the Offset Coordinator for the specified consumer_group. Return ====== deferred yielding a list of response objects in the same order as the supplied payloads, or None if decode_fn is None. Raises ====== FailedPayloadsError, LeaderUnavailableError, PartitionUnavailableError, """ # Calling this without payloads is nonsensical if not payloads: raise ValueError("Payloads parameter is empty") # Group the requests by topic+partition original_keys = [] payloads_by_broker = collections.defaultdict(list) # Go through all the payloads, lookup the leader/coordinator for that # payload's topic/partition or consumer group. If there's no # leader/coordinator (broker), raise. For each broker, keep # a list of the payloads to be sent to it. Also, for each payload in # the list of payloads, make a corresponding list (original_keys) with # the topic/partition in the same order, so we can lookup the returned # result(s) by that topic/partition key in the set of returned results # and return them in a list the same order the payloads were supplied for payload in payloads: # get leader/coordinator, depending on consumer_group if consumer_group is None: leader = yield self._get_leader_for_partition( payload.topic, payload.partition) if leader is None: raise LeaderUnavailableError( "Leader not available for topic %s partition %s" % (payload.topic, payload.partition)) else: leader = yield self._get_coordinator_for_group(consumer_group) if leader is None: raise ConsumerCoordinatorNotAvailableError( "Coordinator not available for group: %s" % (consumer_group)) payloads_by_broker[leader].append(payload) original_keys.append((payload.topic, payload.partition)) # Accumulate the responses in a dictionary acc = {} # The kafka server doesn't send replies to produce requests # with acks=0. In that case, our decode_fn will be # None, and we need to let the brokerclient know not # to expect a reply. makeRequest() returns a deferred # regardless, but in the expectResponse=False case, it will # fire as soon as the request is sent, and it can errBack() # due to being cancelled prior to the broker being able to # send the request. expectResponse = decode_fn is not None # keep a list of payloads that were failed to be sent to brokers failed_payloads = [] # Keep track of outstanding requests in a list of deferreds inFlight = [] # and the payloads that go along with them payloadsList = [] # For each broker, send the list of request payloads, for broker_meta, payloads in payloads_by_broker.items(): broker = self._get_brokerclient(broker_meta.node_id) requestId = self._next_id() request = encoder_fn(client_id=self._clientIdBytes, correlation_id=requestId, payloads=payloads) # Make the request d = self._make_request_to_broker(broker, requestId, request, expectResponse=expectResponse) inFlight.append(d) payloadsList.append(payloads) # Wait for all the responses to come back, or the requests to fail results = yield DeferredList(inFlight, consumeErrors=True) # We now have a list of (succeeded, response/Failure) tuples. Check 'em for (success, response), payloads in zip(results, payloadsList): if not success: # The brokerclient deferred was errback()'d: # The send failed, or this request was cancelled (by timeout) log.debug("%r: request:%r to broker failed: %r", self, payloads, response) failed_payloads.extend([(p, response) for p in payloads]) continue if not expectResponse: continue # Successful request/response. Decode it and store by topic/part for response in decode_fn(response): acc[(response.topic, response.partition)] = response # Order the accumulated responses by the original key order # Note that this scheme will throw away responses which we did # not request. See test_send_fetch_request, where the response # includes an error, but for a topic/part we didn't request. # Since that topic/partition isn't in original_keys, we don't pass # it back from here and it doesn't error out. # If any of the payloads failed, fail responses = [acc[k] for k in original_keys if k in acc] if acc else [] if failed_payloads: self.reset_all_metadata() raise FailedPayloadsError(responses, failed_payloads) returnValue(responses)
[ "def", "_send_broker_aware_request", "(", "self", ",", "payloads", ",", "encoder_fn", ",", "decode_fn", ",", "consumer_group", "=", "None", ")", ":", "# Calling this without payloads is nonsensical", "if", "not", "payloads", ":", "raise", "ValueError", "(", "\"Payloads parameter is empty\"", ")", "# Group the requests by topic+partition", "original_keys", "=", "[", "]", "payloads_by_broker", "=", "collections", ".", "defaultdict", "(", "list", ")", "# Go through all the payloads, lookup the leader/coordinator for that", "# payload's topic/partition or consumer group. If there's no", "# leader/coordinator (broker), raise. For each broker, keep", "# a list of the payloads to be sent to it. Also, for each payload in", "# the list of payloads, make a corresponding list (original_keys) with", "# the topic/partition in the same order, so we can lookup the returned", "# result(s) by that topic/partition key in the set of returned results", "# and return them in a list the same order the payloads were supplied", "for", "payload", "in", "payloads", ":", "# get leader/coordinator, depending on consumer_group", "if", "consumer_group", "is", "None", ":", "leader", "=", "yield", "self", ".", "_get_leader_for_partition", "(", "payload", ".", "topic", ",", "payload", ".", "partition", ")", "if", "leader", "is", "None", ":", "raise", "LeaderUnavailableError", "(", "\"Leader not available for topic %s partition %s\"", "%", "(", "payload", ".", "topic", ",", "payload", ".", "partition", ")", ")", "else", ":", "leader", "=", "yield", "self", ".", "_get_coordinator_for_group", "(", "consumer_group", ")", "if", "leader", "is", "None", ":", "raise", "ConsumerCoordinatorNotAvailableError", "(", "\"Coordinator not available for group: %s\"", "%", "(", "consumer_group", ")", ")", "payloads_by_broker", "[", "leader", "]", ".", "append", "(", "payload", ")", "original_keys", ".", "append", "(", "(", "payload", ".", "topic", ",", "payload", ".", "partition", ")", ")", "# Accumulate the responses in a dictionary", "acc", "=", "{", "}", "# The kafka server doesn't send replies to produce requests", "# with acks=0. In that case, our decode_fn will be", "# None, and we need to let the brokerclient know not", "# to expect a reply. makeRequest() returns a deferred", "# regardless, but in the expectResponse=False case, it will", "# fire as soon as the request is sent, and it can errBack()", "# due to being cancelled prior to the broker being able to", "# send the request.", "expectResponse", "=", "decode_fn", "is", "not", "None", "# keep a list of payloads that were failed to be sent to brokers", "failed_payloads", "=", "[", "]", "# Keep track of outstanding requests in a list of deferreds", "inFlight", "=", "[", "]", "# and the payloads that go along with them", "payloadsList", "=", "[", "]", "# For each broker, send the list of request payloads,", "for", "broker_meta", ",", "payloads", "in", "payloads_by_broker", ".", "items", "(", ")", ":", "broker", "=", "self", ".", "_get_brokerclient", "(", "broker_meta", ".", "node_id", ")", "requestId", "=", "self", ".", "_next_id", "(", ")", "request", "=", "encoder_fn", "(", "client_id", "=", "self", ".", "_clientIdBytes", ",", "correlation_id", "=", "requestId", ",", "payloads", "=", "payloads", ")", "# Make the request", "d", "=", "self", ".", "_make_request_to_broker", "(", "broker", ",", "requestId", ",", "request", ",", "expectResponse", "=", "expectResponse", ")", "inFlight", ".", "append", "(", "d", ")", "payloadsList", ".", "append", "(", "payloads", ")", "# Wait for all the responses to come back, or the requests to fail", "results", "=", "yield", "DeferredList", "(", "inFlight", ",", "consumeErrors", "=", "True", ")", "# We now have a list of (succeeded, response/Failure) tuples. Check 'em", "for", "(", "success", ",", "response", ")", ",", "payloads", "in", "zip", "(", "results", ",", "payloadsList", ")", ":", "if", "not", "success", ":", "# The brokerclient deferred was errback()'d:", "# The send failed, or this request was cancelled (by timeout)", "log", ".", "debug", "(", "\"%r: request:%r to broker failed: %r\"", ",", "self", ",", "payloads", ",", "response", ")", "failed_payloads", ".", "extend", "(", "[", "(", "p", ",", "response", ")", "for", "p", "in", "payloads", "]", ")", "continue", "if", "not", "expectResponse", ":", "continue", "# Successful request/response. Decode it and store by topic/part", "for", "response", "in", "decode_fn", "(", "response", ")", ":", "acc", "[", "(", "response", ".", "topic", ",", "response", ".", "partition", ")", "]", "=", "response", "# Order the accumulated responses by the original key order", "# Note that this scheme will throw away responses which we did", "# not request. See test_send_fetch_request, where the response", "# includes an error, but for a topic/part we didn't request.", "# Since that topic/partition isn't in original_keys, we don't pass", "# it back from here and it doesn't error out.", "# If any of the payloads failed, fail", "responses", "=", "[", "acc", "[", "k", "]", "for", "k", "in", "original_keys", "if", "k", "in", "acc", "]", "if", "acc", "else", "[", "]", "if", "failed_payloads", ":", "self", ".", "reset_all_metadata", "(", ")", "raise", "FailedPayloadsError", "(", "responses", ",", "failed_payloads", ")", "returnValue", "(", "responses", ")" ]
Group a list of request payloads by topic+partition and send them to the leader broker for that partition using the supplied encode/decode functions Params ====== payloads: list of object-like entities with a topic and partition attribute. payloads must be grouped by (topic, partition) tuples. encode_fn: a method to encode the list of payloads to a request body, must accept client_id, correlation_id, and payloads as keyword arguments decode_fn: a method to decode a response body into response objects. The response objects must be object-like and have topic and partition attributes consumer_group: [string], optional. Indicates the request should be directed to the Offset Coordinator for the specified consumer_group. Return ====== deferred yielding a list of response objects in the same order as the supplied payloads, or None if decode_fn is None. Raises ====== FailedPayloadsError, LeaderUnavailableError, PartitionUnavailableError,
[ "Group", "a", "list", "of", "request", "payloads", "by", "topic", "+", "partition", "and", "send", "them", "to", "the", "leader", "broker", "for", "that", "partition", "using", "the", "supplied", "encode", "/", "decode", "functions" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/client.py#L950-L1080
Salamek/cron-descriptor
cron_descriptor/ExpressionDescriptor.py
get_description
def get_description(expression, options=None): """Generates a human readable string for the Cron Expression Args: expression: The cron expression string options: Options to control the output description Returns: The cron expression description """ descripter = ExpressionDescriptor(expression, options) return descripter.get_description(DescriptionTypeEnum.FULL)
python
def get_description(expression, options=None): """Generates a human readable string for the Cron Expression Args: expression: The cron expression string options: Options to control the output description Returns: The cron expression description """ descripter = ExpressionDescriptor(expression, options) return descripter.get_description(DescriptionTypeEnum.FULL)
[ "def", "get_description", "(", "expression", ",", "options", "=", "None", ")", ":", "descripter", "=", "ExpressionDescriptor", "(", "expression", ",", "options", ")", "return", "descripter", ".", "get_description", "(", "DescriptionTypeEnum", ".", "FULL", ")" ]
Generates a human readable string for the Cron Expression Args: expression: The cron expression string options: Options to control the output description Returns: The cron expression description
[ "Generates", "a", "human", "readable", "string", "for", "the", "Cron", "Expression", "Args", ":", "expression", ":", "The", "cron", "expression", "string", "options", ":", "Options", "to", "control", "the", "output", "description", "Returns", ":", "The", "cron", "expression", "description" ]
train
https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionDescriptor.py#L605-L615
Salamek/cron-descriptor
cron_descriptor/ExpressionDescriptor.py
ExpressionDescriptor.get_description
def get_description(self, description_type=DescriptionTypeEnum.FULL): """Generates a human readable string for the Cron Expression Args: description_type: Which part(s) of the expression to describe Returns: The cron expression description Raises: Exception: if throw_exception_on_parse_error is True """ try: if self._parsed is False: parser = ExpressionParser(self._expression, self._options) self._expression_parts = parser.parse() self._parsed = True choices = { DescriptionTypeEnum.FULL: self.get_full_description, DescriptionTypeEnum.TIMEOFDAY: self.get_time_of_day_description, DescriptionTypeEnum.HOURS: self.get_hours_description, DescriptionTypeEnum.MINUTES: self.get_minutes_description, DescriptionTypeEnum.SECONDS: self.get_seconds_description, DescriptionTypeEnum.DAYOFMONTH: self.get_day_of_month_description, DescriptionTypeEnum.MONTH: self.get_month_description, DescriptionTypeEnum.DAYOFWEEK: self.get_day_of_week_description, DescriptionTypeEnum.YEAR: self.get_year_description, } description = choices.get(description_type, self.get_seconds_description)() except Exception as ex: if self._options.throw_exception_on_parse_error: raise else: description = str(ex) return description
python
def get_description(self, description_type=DescriptionTypeEnum.FULL): """Generates a human readable string for the Cron Expression Args: description_type: Which part(s) of the expression to describe Returns: The cron expression description Raises: Exception: if throw_exception_on_parse_error is True """ try: if self._parsed is False: parser = ExpressionParser(self._expression, self._options) self._expression_parts = parser.parse() self._parsed = True choices = { DescriptionTypeEnum.FULL: self.get_full_description, DescriptionTypeEnum.TIMEOFDAY: self.get_time_of_day_description, DescriptionTypeEnum.HOURS: self.get_hours_description, DescriptionTypeEnum.MINUTES: self.get_minutes_description, DescriptionTypeEnum.SECONDS: self.get_seconds_description, DescriptionTypeEnum.DAYOFMONTH: self.get_day_of_month_description, DescriptionTypeEnum.MONTH: self.get_month_description, DescriptionTypeEnum.DAYOFWEEK: self.get_day_of_week_description, DescriptionTypeEnum.YEAR: self.get_year_description, } description = choices.get(description_type, self.get_seconds_description)() except Exception as ex: if self._options.throw_exception_on_parse_error: raise else: description = str(ex) return description
[ "def", "get_description", "(", "self", ",", "description_type", "=", "DescriptionTypeEnum", ".", "FULL", ")", ":", "try", ":", "if", "self", ".", "_parsed", "is", "False", ":", "parser", "=", "ExpressionParser", "(", "self", ".", "_expression", ",", "self", ".", "_options", ")", "self", ".", "_expression_parts", "=", "parser", ".", "parse", "(", ")", "self", ".", "_parsed", "=", "True", "choices", "=", "{", "DescriptionTypeEnum", ".", "FULL", ":", "self", ".", "get_full_description", ",", "DescriptionTypeEnum", ".", "TIMEOFDAY", ":", "self", ".", "get_time_of_day_description", ",", "DescriptionTypeEnum", ".", "HOURS", ":", "self", ".", "get_hours_description", ",", "DescriptionTypeEnum", ".", "MINUTES", ":", "self", ".", "get_minutes_description", ",", "DescriptionTypeEnum", ".", "SECONDS", ":", "self", ".", "get_seconds_description", ",", "DescriptionTypeEnum", ".", "DAYOFMONTH", ":", "self", ".", "get_day_of_month_description", ",", "DescriptionTypeEnum", ".", "MONTH", ":", "self", ".", "get_month_description", ",", "DescriptionTypeEnum", ".", "DAYOFWEEK", ":", "self", ".", "get_day_of_week_description", ",", "DescriptionTypeEnum", ".", "YEAR", ":", "self", ".", "get_year_description", ",", "}", "description", "=", "choices", ".", "get", "(", "description_type", ",", "self", ".", "get_seconds_description", ")", "(", ")", "except", "Exception", "as", "ex", ":", "if", "self", ".", "_options", ".", "throw_exception_on_parse_error", ":", "raise", "else", ":", "description", "=", "str", "(", "ex", ")", "return", "description" ]
Generates a human readable string for the Cron Expression Args: description_type: Which part(s) of the expression to describe Returns: The cron expression description Raises: Exception: if throw_exception_on_parse_error is True
[ "Generates", "a", "human", "readable", "string", "for", "the", "Cron", "Expression" ]
train
https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionDescriptor.py#L76-L112
Salamek/cron-descriptor
cron_descriptor/ExpressionDescriptor.py
ExpressionDescriptor.get_full_description
def get_full_description(self): """Generates the FULL description Returns: The FULL description Raises: FormatException: if formating fails and throw_exception_on_parse_error is True """ try: time_segment = self.get_time_of_day_description() day_of_month_desc = self.get_day_of_month_description() month_desc = self.get_month_description() day_of_week_desc = self.get_day_of_week_description() year_desc = self.get_year_description() description = "{0}{1}{2}{3}{4}".format( time_segment, day_of_month_desc, day_of_week_desc, month_desc, year_desc) description = self.transform_verbosity( description, self._options.verbose) description = self.transform_case( description, self._options.casing_type) except Exception: description = _( "An error occured when generating the expression description. Check the cron expression syntax.") if self._options.throw_exception_on_parse_error: raise FormatException(description) return description
python
def get_full_description(self): """Generates the FULL description Returns: The FULL description Raises: FormatException: if formating fails and throw_exception_on_parse_error is True """ try: time_segment = self.get_time_of_day_description() day_of_month_desc = self.get_day_of_month_description() month_desc = self.get_month_description() day_of_week_desc = self.get_day_of_week_description() year_desc = self.get_year_description() description = "{0}{1}{2}{3}{4}".format( time_segment, day_of_month_desc, day_of_week_desc, month_desc, year_desc) description = self.transform_verbosity( description, self._options.verbose) description = self.transform_case( description, self._options.casing_type) except Exception: description = _( "An error occured when generating the expression description. Check the cron expression syntax.") if self._options.throw_exception_on_parse_error: raise FormatException(description) return description
[ "def", "get_full_description", "(", "self", ")", ":", "try", ":", "time_segment", "=", "self", ".", "get_time_of_day_description", "(", ")", "day_of_month_desc", "=", "self", ".", "get_day_of_month_description", "(", ")", "month_desc", "=", "self", ".", "get_month_description", "(", ")", "day_of_week_desc", "=", "self", ".", "get_day_of_week_description", "(", ")", "year_desc", "=", "self", ".", "get_year_description", "(", ")", "description", "=", "\"{0}{1}{2}{3}{4}\"", ".", "format", "(", "time_segment", ",", "day_of_month_desc", ",", "day_of_week_desc", ",", "month_desc", ",", "year_desc", ")", "description", "=", "self", ".", "transform_verbosity", "(", "description", ",", "self", ".", "_options", ".", "verbose", ")", "description", "=", "self", ".", "transform_case", "(", "description", ",", "self", ".", "_options", ".", "casing_type", ")", "except", "Exception", ":", "description", "=", "_", "(", "\"An error occured when generating the expression description. Check the cron expression syntax.\"", ")", "if", "self", ".", "_options", ".", "throw_exception_on_parse_error", ":", "raise", "FormatException", "(", "description", ")", "return", "description" ]
Generates the FULL description Returns: The FULL description Raises: FormatException: if formating fails and throw_exception_on_parse_error is True
[ "Generates", "the", "FULL", "description" ]
train
https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionDescriptor.py#L114-L149
Salamek/cron-descriptor
cron_descriptor/ExpressionDescriptor.py
ExpressionDescriptor.get_time_of_day_description
def get_time_of_day_description(self): """Generates a description for only the TIMEOFDAY portion of the expression Returns: The TIMEOFDAY description """ seconds_expression = self._expression_parts[0] minute_expression = self._expression_parts[1] hour_expression = self._expression_parts[2] description = StringBuilder() # handle special cases first if any(exp in minute_expression for exp in self._special_characters) is False and \ any(exp in hour_expression for exp in self._special_characters) is False and \ any(exp in seconds_expression for exp in self._special_characters) is False: # specific time of day (i.e. 10 14) description.append(_("At ")) description.append( self.format_time( hour_expression, minute_expression, seconds_expression)) elif "-" in minute_expression and \ "," not in minute_expression and \ any(exp in hour_expression for exp in self._special_characters) is False: # minute range in single hour (i.e. 0-10 11) minute_parts = minute_expression.split('-') description.append(_("Every minute between {0} and {1}").format( self.format_time(hour_expression, minute_parts[0]), self.format_time(hour_expression, minute_parts[1]))) elif "," in hour_expression and "-" not in hour_expression and \ any(exp in minute_expression for exp in self._special_characters) is False: # hours list with single minute (o.e. 30 6,14,16) hour_parts = hour_expression.split(',') description.append(_("At")) for i, hour_part in enumerate(hour_parts): description.append(" ") description.append( self.format_time(hour_part, minute_expression)) if i < (len(hour_parts) - 2): description.append(",") if i == len(hour_parts) - 2: description.append(_(" and")) else: # default time description seconds_description = self.get_seconds_description() minutes_description = self.get_minutes_description() hours_description = self.get_hours_description() description.append(seconds_description) if description: description.append(", ") description.append(minutes_description) if description: description.append(", ") description.append(hours_description) return str(description)
python
def get_time_of_day_description(self): """Generates a description for only the TIMEOFDAY portion of the expression Returns: The TIMEOFDAY description """ seconds_expression = self._expression_parts[0] minute_expression = self._expression_parts[1] hour_expression = self._expression_parts[2] description = StringBuilder() # handle special cases first if any(exp in minute_expression for exp in self._special_characters) is False and \ any(exp in hour_expression for exp in self._special_characters) is False and \ any(exp in seconds_expression for exp in self._special_characters) is False: # specific time of day (i.e. 10 14) description.append(_("At ")) description.append( self.format_time( hour_expression, minute_expression, seconds_expression)) elif "-" in minute_expression and \ "," not in minute_expression and \ any(exp in hour_expression for exp in self._special_characters) is False: # minute range in single hour (i.e. 0-10 11) minute_parts = minute_expression.split('-') description.append(_("Every minute between {0} and {1}").format( self.format_time(hour_expression, minute_parts[0]), self.format_time(hour_expression, minute_parts[1]))) elif "," in hour_expression and "-" not in hour_expression and \ any(exp in minute_expression for exp in self._special_characters) is False: # hours list with single minute (o.e. 30 6,14,16) hour_parts = hour_expression.split(',') description.append(_("At")) for i, hour_part in enumerate(hour_parts): description.append(" ") description.append( self.format_time(hour_part, minute_expression)) if i < (len(hour_parts) - 2): description.append(",") if i == len(hour_parts) - 2: description.append(_(" and")) else: # default time description seconds_description = self.get_seconds_description() minutes_description = self.get_minutes_description() hours_description = self.get_hours_description() description.append(seconds_description) if description: description.append(", ") description.append(minutes_description) if description: description.append(", ") description.append(hours_description) return str(description)
[ "def", "get_time_of_day_description", "(", "self", ")", ":", "seconds_expression", "=", "self", ".", "_expression_parts", "[", "0", "]", "minute_expression", "=", "self", ".", "_expression_parts", "[", "1", "]", "hour_expression", "=", "self", ".", "_expression_parts", "[", "2", "]", "description", "=", "StringBuilder", "(", ")", "# handle special cases first", "if", "any", "(", "exp", "in", "minute_expression", "for", "exp", "in", "self", ".", "_special_characters", ")", "is", "False", "and", "any", "(", "exp", "in", "hour_expression", "for", "exp", "in", "self", ".", "_special_characters", ")", "is", "False", "and", "any", "(", "exp", "in", "seconds_expression", "for", "exp", "in", "self", ".", "_special_characters", ")", "is", "False", ":", "# specific time of day (i.e. 10 14)", "description", ".", "append", "(", "_", "(", "\"At \"", ")", ")", "description", ".", "append", "(", "self", ".", "format_time", "(", "hour_expression", ",", "minute_expression", ",", "seconds_expression", ")", ")", "elif", "\"-\"", "in", "minute_expression", "and", "\",\"", "not", "in", "minute_expression", "and", "any", "(", "exp", "in", "hour_expression", "for", "exp", "in", "self", ".", "_special_characters", ")", "is", "False", ":", "# minute range in single hour (i.e. 0-10 11)", "minute_parts", "=", "minute_expression", ".", "split", "(", "'-'", ")", "description", ".", "append", "(", "_", "(", "\"Every minute between {0} and {1}\"", ")", ".", "format", "(", "self", ".", "format_time", "(", "hour_expression", ",", "minute_parts", "[", "0", "]", ")", ",", "self", ".", "format_time", "(", "hour_expression", ",", "minute_parts", "[", "1", "]", ")", ")", ")", "elif", "\",\"", "in", "hour_expression", "and", "\"-\"", "not", "in", "hour_expression", "and", "any", "(", "exp", "in", "minute_expression", "for", "exp", "in", "self", ".", "_special_characters", ")", "is", "False", ":", "# hours list with single minute (o.e. 30 6,14,16)", "hour_parts", "=", "hour_expression", ".", "split", "(", "','", ")", "description", ".", "append", "(", "_", "(", "\"At\"", ")", ")", "for", "i", ",", "hour_part", "in", "enumerate", "(", "hour_parts", ")", ":", "description", ".", "append", "(", "\" \"", ")", "description", ".", "append", "(", "self", ".", "format_time", "(", "hour_part", ",", "minute_expression", ")", ")", "if", "i", "<", "(", "len", "(", "hour_parts", ")", "-", "2", ")", ":", "description", ".", "append", "(", "\",\"", ")", "if", "i", "==", "len", "(", "hour_parts", ")", "-", "2", ":", "description", ".", "append", "(", "_", "(", "\" and\"", ")", ")", "else", ":", "# default time description", "seconds_description", "=", "self", ".", "get_seconds_description", "(", ")", "minutes_description", "=", "self", ".", "get_minutes_description", "(", ")", "hours_description", "=", "self", ".", "get_hours_description", "(", ")", "description", ".", "append", "(", "seconds_description", ")", "if", "description", ":", "description", ".", "append", "(", "\", \"", ")", "description", ".", "append", "(", "minutes_description", ")", "if", "description", ":", "description", ".", "append", "(", "\", \"", ")", "description", ".", "append", "(", "hours_description", ")", "return", "str", "(", "description", ")" ]
Generates a description for only the TIMEOFDAY portion of the expression Returns: The TIMEOFDAY description
[ "Generates", "a", "description", "for", "only", "the", "TIMEOFDAY", "portion", "of", "the", "expression" ]
train
https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionDescriptor.py#L151-L214
Salamek/cron-descriptor
cron_descriptor/ExpressionDescriptor.py
ExpressionDescriptor.get_seconds_description
def get_seconds_description(self): """Generates a description for only the SECONDS portion of the expression Returns: The SECONDS description """ return self.get_segment_description( self._expression_parts[0], _("every second"), lambda s: s, lambda s: _("every {0} seconds").format(s), lambda s: _("seconds {0} through {1} past the minute"), lambda s: _("at {0} seconds past the minute") )
python
def get_seconds_description(self): """Generates a description for only the SECONDS portion of the expression Returns: The SECONDS description """ return self.get_segment_description( self._expression_parts[0], _("every second"), lambda s: s, lambda s: _("every {0} seconds").format(s), lambda s: _("seconds {0} through {1} past the minute"), lambda s: _("at {0} seconds past the minute") )
[ "def", "get_seconds_description", "(", "self", ")", ":", "return", "self", ".", "get_segment_description", "(", "self", ".", "_expression_parts", "[", "0", "]", ",", "_", "(", "\"every second\"", ")", ",", "lambda", "s", ":", "s", ",", "lambda", "s", ":", "_", "(", "\"every {0} seconds\"", ")", ".", "format", "(", "s", ")", ",", "lambda", "s", ":", "_", "(", "\"seconds {0} through {1} past the minute\"", ")", ",", "lambda", "s", ":", "_", "(", "\"at {0} seconds past the minute\"", ")", ")" ]
Generates a description for only the SECONDS portion of the expression Returns: The SECONDS description
[ "Generates", "a", "description", "for", "only", "the", "SECONDS", "portion", "of", "the", "expression" ]
train
https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionDescriptor.py#L216-L231
Salamek/cron-descriptor
cron_descriptor/ExpressionDescriptor.py
ExpressionDescriptor.get_minutes_description
def get_minutes_description(self): """Generates a description for only the MINUTE portion of the expression Returns: The MINUTE description """ return self.get_segment_description( self._expression_parts[1], _("every minute"), lambda s: s, lambda s: _("every {0} minutes").format(s), lambda s: _("minutes {0} through {1} past the hour"), lambda s: '' if s == "0" else _("at {0} minutes past the hour") )
python
def get_minutes_description(self): """Generates a description for only the MINUTE portion of the expression Returns: The MINUTE description """ return self.get_segment_description( self._expression_parts[1], _("every minute"), lambda s: s, lambda s: _("every {0} minutes").format(s), lambda s: _("minutes {0} through {1} past the hour"), lambda s: '' if s == "0" else _("at {0} minutes past the hour") )
[ "def", "get_minutes_description", "(", "self", ")", ":", "return", "self", ".", "get_segment_description", "(", "self", ".", "_expression_parts", "[", "1", "]", ",", "_", "(", "\"every minute\"", ")", ",", "lambda", "s", ":", "s", ",", "lambda", "s", ":", "_", "(", "\"every {0} minutes\"", ")", ".", "format", "(", "s", ")", ",", "lambda", "s", ":", "_", "(", "\"minutes {0} through {1} past the hour\"", ")", ",", "lambda", "s", ":", "''", "if", "s", "==", "\"0\"", "else", "_", "(", "\"at {0} minutes past the hour\"", ")", ")" ]
Generates a description for only the MINUTE portion of the expression Returns: The MINUTE description
[ "Generates", "a", "description", "for", "only", "the", "MINUTE", "portion", "of", "the", "expression" ]
train
https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionDescriptor.py#L233-L248
Salamek/cron-descriptor
cron_descriptor/ExpressionDescriptor.py
ExpressionDescriptor.get_hours_description
def get_hours_description(self): """Generates a description for only the HOUR portion of the expression Returns: The HOUR description """ expression = self._expression_parts[2] return self.get_segment_description( expression, _("every hour"), lambda s: self.format_time(s, "0"), lambda s: _("every {0} hours").format(s), lambda s: _("between {0} and {1}"), lambda s: _("at {0}") )
python
def get_hours_description(self): """Generates a description for only the HOUR portion of the expression Returns: The HOUR description """ expression = self._expression_parts[2] return self.get_segment_description( expression, _("every hour"), lambda s: self.format_time(s, "0"), lambda s: _("every {0} hours").format(s), lambda s: _("between {0} and {1}"), lambda s: _("at {0}") )
[ "def", "get_hours_description", "(", "self", ")", ":", "expression", "=", "self", ".", "_expression_parts", "[", "2", "]", "return", "self", ".", "get_segment_description", "(", "expression", ",", "_", "(", "\"every hour\"", ")", ",", "lambda", "s", ":", "self", ".", "format_time", "(", "s", ",", "\"0\"", ")", ",", "lambda", "s", ":", "_", "(", "\"every {0} hours\"", ")", ".", "format", "(", "s", ")", ",", "lambda", "s", ":", "_", "(", "\"between {0} and {1}\"", ")", ",", "lambda", "s", ":", "_", "(", "\"at {0}\"", ")", ")" ]
Generates a description for only the HOUR portion of the expression Returns: The HOUR description
[ "Generates", "a", "description", "for", "only", "the", "HOUR", "portion", "of", "the", "expression" ]
train
https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionDescriptor.py#L250-L265
Salamek/cron-descriptor
cron_descriptor/ExpressionDescriptor.py
ExpressionDescriptor.get_day_of_week_description
def get_day_of_week_description(self): """Generates a description for only the DAYOFWEEK portion of the expression Returns: The DAYOFWEEK description """ if self._expression_parts[5] == "*" and self._expression_parts[3] != "*": # DOM is specified and DOW is * so to prevent contradiction like "on day 1 of the month, every day" # we will not specified a DOW description. return "" def get_day_name(s): exp = s if "#" in s: exp, useless = s.split("#", 2) elif "L" in s: exp = exp.replace("L", '') return self.number_to_day(int(exp)) def get_format(s): if "#" in s: day_of_week_of_month = s[s.find("#") + 1:] try: day_of_week_of_month_number = int(day_of_week_of_month) choices = { 1: _("first"), 2: _("second"), 3: _("third"), 4: _("forth"), 5: _("fifth"), } day_of_week_of_month_description = choices.get(day_of_week_of_month_number, '') except ValueError: day_of_week_of_month_description = '' formated = "{}{}{}".format(_(", on the "), day_of_week_of_month_description, _(" {0} of the month")) elif "L" in s: formated = _(", on the last {0} of the month") else: formated = _(", only on {0}") return formated return self.get_segment_description( self._expression_parts[5], _(", every day"), lambda s: get_day_name(s), lambda s: _(", every {0} days of the week").format(s), lambda s: _(", {0} through {1}"), lambda s: get_format(s) )
python
def get_day_of_week_description(self): """Generates a description for only the DAYOFWEEK portion of the expression Returns: The DAYOFWEEK description """ if self._expression_parts[5] == "*" and self._expression_parts[3] != "*": # DOM is specified and DOW is * so to prevent contradiction like "on day 1 of the month, every day" # we will not specified a DOW description. return "" def get_day_name(s): exp = s if "#" in s: exp, useless = s.split("#", 2) elif "L" in s: exp = exp.replace("L", '') return self.number_to_day(int(exp)) def get_format(s): if "#" in s: day_of_week_of_month = s[s.find("#") + 1:] try: day_of_week_of_month_number = int(day_of_week_of_month) choices = { 1: _("first"), 2: _("second"), 3: _("third"), 4: _("forth"), 5: _("fifth"), } day_of_week_of_month_description = choices.get(day_of_week_of_month_number, '') except ValueError: day_of_week_of_month_description = '' formated = "{}{}{}".format(_(", on the "), day_of_week_of_month_description, _(" {0} of the month")) elif "L" in s: formated = _(", on the last {0} of the month") else: formated = _(", only on {0}") return formated return self.get_segment_description( self._expression_parts[5], _(", every day"), lambda s: get_day_name(s), lambda s: _(", every {0} days of the week").format(s), lambda s: _(", {0} through {1}"), lambda s: get_format(s) )
[ "def", "get_day_of_week_description", "(", "self", ")", ":", "if", "self", ".", "_expression_parts", "[", "5", "]", "==", "\"*\"", "and", "self", ".", "_expression_parts", "[", "3", "]", "!=", "\"*\"", ":", "# DOM is specified and DOW is * so to prevent contradiction like \"on day 1 of the month, every day\"", "# we will not specified a DOW description.", "return", "\"\"", "def", "get_day_name", "(", "s", ")", ":", "exp", "=", "s", "if", "\"#\"", "in", "s", ":", "exp", ",", "useless", "=", "s", ".", "split", "(", "\"#\"", ",", "2", ")", "elif", "\"L\"", "in", "s", ":", "exp", "=", "exp", ".", "replace", "(", "\"L\"", ",", "''", ")", "return", "self", ".", "number_to_day", "(", "int", "(", "exp", ")", ")", "def", "get_format", "(", "s", ")", ":", "if", "\"#\"", "in", "s", ":", "day_of_week_of_month", "=", "s", "[", "s", ".", "find", "(", "\"#\"", ")", "+", "1", ":", "]", "try", ":", "day_of_week_of_month_number", "=", "int", "(", "day_of_week_of_month", ")", "choices", "=", "{", "1", ":", "_", "(", "\"first\"", ")", ",", "2", ":", "_", "(", "\"second\"", ")", ",", "3", ":", "_", "(", "\"third\"", ")", ",", "4", ":", "_", "(", "\"forth\"", ")", ",", "5", ":", "_", "(", "\"fifth\"", ")", ",", "}", "day_of_week_of_month_description", "=", "choices", ".", "get", "(", "day_of_week_of_month_number", ",", "''", ")", "except", "ValueError", ":", "day_of_week_of_month_description", "=", "''", "formated", "=", "\"{}{}{}\"", ".", "format", "(", "_", "(", "\", on the \"", ")", ",", "day_of_week_of_month_description", ",", "_", "(", "\" {0} of the month\"", ")", ")", "elif", "\"L\"", "in", "s", ":", "formated", "=", "_", "(", "\", on the last {0} of the month\"", ")", "else", ":", "formated", "=", "_", "(", "\", only on {0}\"", ")", "return", "formated", "return", "self", ".", "get_segment_description", "(", "self", ".", "_expression_parts", "[", "5", "]", ",", "_", "(", "\", every day\"", ")", ",", "lambda", "s", ":", "get_day_name", "(", "s", ")", ",", "lambda", "s", ":", "_", "(", "\", every {0} days of the week\"", ")", ".", "format", "(", "s", ")", ",", "lambda", "s", ":", "_", "(", "\", {0} through {1}\"", ")", ",", "lambda", "s", ":", "get_format", "(", "s", ")", ")" ]
Generates a description for only the DAYOFWEEK portion of the expression Returns: The DAYOFWEEK description
[ "Generates", "a", "description", "for", "only", "the", "DAYOFWEEK", "portion", "of", "the", "expression" ]
train
https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionDescriptor.py#L267-L321
Salamek/cron-descriptor
cron_descriptor/ExpressionDescriptor.py
ExpressionDescriptor.get_month_description
def get_month_description(self): """Generates a description for only the MONTH portion of the expression Returns: The MONTH description """ return self.get_segment_description( self._expression_parts[4], '', lambda s: datetime.date(datetime.date.today().year, int(s), 1).strftime("%B"), lambda s: _(", every {0} months").format(s), lambda s: _(", {0} through {1}"), lambda s: _(", only in {0}") )
python
def get_month_description(self): """Generates a description for only the MONTH portion of the expression Returns: The MONTH description """ return self.get_segment_description( self._expression_parts[4], '', lambda s: datetime.date(datetime.date.today().year, int(s), 1).strftime("%B"), lambda s: _(", every {0} months").format(s), lambda s: _(", {0} through {1}"), lambda s: _(", only in {0}") )
[ "def", "get_month_description", "(", "self", ")", ":", "return", "self", ".", "get_segment_description", "(", "self", ".", "_expression_parts", "[", "4", "]", ",", "''", ",", "lambda", "s", ":", "datetime", ".", "date", "(", "datetime", ".", "date", ".", "today", "(", ")", ".", "year", ",", "int", "(", "s", ")", ",", "1", ")", ".", "strftime", "(", "\"%B\"", ")", ",", "lambda", "s", ":", "_", "(", "\", every {0} months\"", ")", ".", "format", "(", "s", ")", ",", "lambda", "s", ":", "_", "(", "\", {0} through {1}\"", ")", ",", "lambda", "s", ":", "_", "(", "\", only in {0}\"", ")", ")" ]
Generates a description for only the MONTH portion of the expression Returns: The MONTH description
[ "Generates", "a", "description", "for", "only", "the", "MONTH", "portion", "of", "the", "expression" ]
train
https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionDescriptor.py#L323-L337
Salamek/cron-descriptor
cron_descriptor/ExpressionDescriptor.py
ExpressionDescriptor.get_day_of_month_description
def get_day_of_month_description(self): """Generates a description for only the DAYOFMONTH portion of the expression Returns: The DAYOFMONTH description """ expression = self._expression_parts[3] expression = expression.replace("?", "*") if expression == "L": description = _(", on the last day of the month") elif expression == "LW" or expression == "WL": description = _(", on the last weekday of the month") else: regex = re.compile("(\\d{1,2}W)|(W\\d{1,2})") if regex.match(expression): m = regex.match(expression) day_number = int(m.group().replace("W", "")) day_string = _("first weekday") if day_number == 1 else _("weekday nearest day {0}").format( day_number) description = _(", on the {0} of the month").format( day_string) else: description = self.get_segment_description( expression, _(", every day"), lambda s: s, lambda s: _(", every day") if s == "1" else _(", every {0} days"), lambda s: _(", between day {0} and {1} of the month"), lambda s: _(", on day {0} of the month") ) return description
python
def get_day_of_month_description(self): """Generates a description for only the DAYOFMONTH portion of the expression Returns: The DAYOFMONTH description """ expression = self._expression_parts[3] expression = expression.replace("?", "*") if expression == "L": description = _(", on the last day of the month") elif expression == "LW" or expression == "WL": description = _(", on the last weekday of the month") else: regex = re.compile("(\\d{1,2}W)|(W\\d{1,2})") if regex.match(expression): m = regex.match(expression) day_number = int(m.group().replace("W", "")) day_string = _("first weekday") if day_number == 1 else _("weekday nearest day {0}").format( day_number) description = _(", on the {0} of the month").format( day_string) else: description = self.get_segment_description( expression, _(", every day"), lambda s: s, lambda s: _(", every day") if s == "1" else _(", every {0} days"), lambda s: _(", between day {0} and {1} of the month"), lambda s: _(", on day {0} of the month") ) return description
[ "def", "get_day_of_month_description", "(", "self", ")", ":", "expression", "=", "self", ".", "_expression_parts", "[", "3", "]", "expression", "=", "expression", ".", "replace", "(", "\"?\"", ",", "\"*\"", ")", "if", "expression", "==", "\"L\"", ":", "description", "=", "_", "(", "\", on the last day of the month\"", ")", "elif", "expression", "==", "\"LW\"", "or", "expression", "==", "\"WL\"", ":", "description", "=", "_", "(", "\", on the last weekday of the month\"", ")", "else", ":", "regex", "=", "re", ".", "compile", "(", "\"(\\\\d{1,2}W)|(W\\\\d{1,2})\"", ")", "if", "regex", ".", "match", "(", "expression", ")", ":", "m", "=", "regex", ".", "match", "(", "expression", ")", "day_number", "=", "int", "(", "m", ".", "group", "(", ")", ".", "replace", "(", "\"W\"", ",", "\"\"", ")", ")", "day_string", "=", "_", "(", "\"first weekday\"", ")", "if", "day_number", "==", "1", "else", "_", "(", "\"weekday nearest day {0}\"", ")", ".", "format", "(", "day_number", ")", "description", "=", "_", "(", "\", on the {0} of the month\"", ")", ".", "format", "(", "day_string", ")", "else", ":", "description", "=", "self", ".", "get_segment_description", "(", "expression", ",", "_", "(", "\", every day\"", ")", ",", "lambda", "s", ":", "s", ",", "lambda", "s", ":", "_", "(", "\", every day\"", ")", "if", "s", "==", "\"1\"", "else", "_", "(", "\", every {0} days\"", ")", ",", "lambda", "s", ":", "_", "(", "\", between day {0} and {1} of the month\"", ")", ",", "lambda", "s", ":", "_", "(", "\", on day {0} of the month\"", ")", ")", "return", "description" ]
Generates a description for only the DAYOFMONTH portion of the expression Returns: The DAYOFMONTH description
[ "Generates", "a", "description", "for", "only", "the", "DAYOFMONTH", "portion", "of", "the", "expression" ]
train
https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionDescriptor.py#L339-L373
Salamek/cron-descriptor
cron_descriptor/ExpressionDescriptor.py
ExpressionDescriptor.get_year_description
def get_year_description(self): """Generates a description for only the YEAR portion of the expression Returns: The YEAR description """ def format_year(s): regex = re.compile(r"^\d+$") if regex.match(s): year_int = int(s) if year_int < 1900: return year_int return datetime.date(year_int, 1, 1).strftime("%Y") else: return s return self.get_segment_description( self._expression_parts[6], '', lambda s: format_year(s), lambda s: _(", every {0} years").format(s), lambda s: _(", {0} through {1}"), lambda s: _(", only in {0}") )
python
def get_year_description(self): """Generates a description for only the YEAR portion of the expression Returns: The YEAR description """ def format_year(s): regex = re.compile(r"^\d+$") if regex.match(s): year_int = int(s) if year_int < 1900: return year_int return datetime.date(year_int, 1, 1).strftime("%Y") else: return s return self.get_segment_description( self._expression_parts[6], '', lambda s: format_year(s), lambda s: _(", every {0} years").format(s), lambda s: _(", {0} through {1}"), lambda s: _(", only in {0}") )
[ "def", "get_year_description", "(", "self", ")", ":", "def", "format_year", "(", "s", ")", ":", "regex", "=", "re", ".", "compile", "(", "r\"^\\d+$\"", ")", "if", "regex", ".", "match", "(", "s", ")", ":", "year_int", "=", "int", "(", "s", ")", "if", "year_int", "<", "1900", ":", "return", "year_int", "return", "datetime", ".", "date", "(", "year_int", ",", "1", ",", "1", ")", ".", "strftime", "(", "\"%Y\"", ")", "else", ":", "return", "s", "return", "self", ".", "get_segment_description", "(", "self", ".", "_expression_parts", "[", "6", "]", ",", "''", ",", "lambda", "s", ":", "format_year", "(", "s", ")", ",", "lambda", "s", ":", "_", "(", "\", every {0} years\"", ")", ".", "format", "(", "s", ")", ",", "lambda", "s", ":", "_", "(", "\", {0} through {1}\"", ")", ",", "lambda", "s", ":", "_", "(", "\", only in {0}\"", ")", ")" ]
Generates a description for only the YEAR portion of the expression Returns: The YEAR description
[ "Generates", "a", "description", "for", "only", "the", "YEAR", "portion", "of", "the", "expression" ]
train
https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionDescriptor.py#L375-L400
Salamek/cron-descriptor
cron_descriptor/ExpressionDescriptor.py
ExpressionDescriptor.get_segment_description
def get_segment_description( self, expression, all_description, get_single_item_description, get_interval_description_format, get_between_description_format, get_description_format ): """Returns segment description Args: expression: Segment to descript all_description: * get_single_item_description: 1 get_interval_description_format: 1/2 get_between_description_format: 1-2 get_description_format: format get_single_item_description Returns: segment description """ description = None if expression is None or expression == '': description = '' elif expression == "*": description = all_description elif any(ext in expression for ext in ['/', '-', ',']) is False: description = get_description_format(expression).format( get_single_item_description(expression)) elif "/" in expression: segments = expression.split('/') description = get_interval_description_format( segments[1]).format(get_single_item_description(segments[1])) # interval contains 'between' piece (i.e. 2-59/3 ) if "-" in segments[0]: between_segment_description = self.generate_between_segment_description( segments[0], get_between_description_format, get_single_item_description) if not between_segment_description.startswith(", "): description += ", " description += between_segment_description elif any(ext in segments[0] for ext in ['*', ',']) is False: range_item_description = get_description_format(segments[0]).format( get_single_item_description(segments[0]) ) range_item_description = range_item_description.replace(", ", "") description += _(", starting {0}").format(range_item_description) elif "," in expression: segments = expression.split(',') description_content = '' for i, segment in enumerate(segments): if i > 0 and len(segments) > 2: description_content += "," if i < len(segments) - 1: description_content += " " if i > 0 and len(segments) > 1 and (i == len(segments) - 1 or len(segments) == 2): description_content += _(" and ") if "-" in segment: between_description = self.generate_between_segment_description( segment, lambda s: _(", {0} through {1}"), get_single_item_description ) between_description = between_description.replace(", ", "") description_content += between_description else: description_content += get_single_item_description(segment) description = get_description_format( expression).format( description_content) elif "-" in expression: description = self.generate_between_segment_description( expression, get_between_description_format, get_single_item_description) return description
python
def get_segment_description( self, expression, all_description, get_single_item_description, get_interval_description_format, get_between_description_format, get_description_format ): """Returns segment description Args: expression: Segment to descript all_description: * get_single_item_description: 1 get_interval_description_format: 1/2 get_between_description_format: 1-2 get_description_format: format get_single_item_description Returns: segment description """ description = None if expression is None or expression == '': description = '' elif expression == "*": description = all_description elif any(ext in expression for ext in ['/', '-', ',']) is False: description = get_description_format(expression).format( get_single_item_description(expression)) elif "/" in expression: segments = expression.split('/') description = get_interval_description_format( segments[1]).format(get_single_item_description(segments[1])) # interval contains 'between' piece (i.e. 2-59/3 ) if "-" in segments[0]: between_segment_description = self.generate_between_segment_description( segments[0], get_between_description_format, get_single_item_description) if not between_segment_description.startswith(", "): description += ", " description += between_segment_description elif any(ext in segments[0] for ext in ['*', ',']) is False: range_item_description = get_description_format(segments[0]).format( get_single_item_description(segments[0]) ) range_item_description = range_item_description.replace(", ", "") description += _(", starting {0}").format(range_item_description) elif "," in expression: segments = expression.split(',') description_content = '' for i, segment in enumerate(segments): if i > 0 and len(segments) > 2: description_content += "," if i < len(segments) - 1: description_content += " " if i > 0 and len(segments) > 1 and (i == len(segments) - 1 or len(segments) == 2): description_content += _(" and ") if "-" in segment: between_description = self.generate_between_segment_description( segment, lambda s: _(", {0} through {1}"), get_single_item_description ) between_description = between_description.replace(", ", "") description_content += between_description else: description_content += get_single_item_description(segment) description = get_description_format( expression).format( description_content) elif "-" in expression: description = self.generate_between_segment_description( expression, get_between_description_format, get_single_item_description) return description
[ "def", "get_segment_description", "(", "self", ",", "expression", ",", "all_description", ",", "get_single_item_description", ",", "get_interval_description_format", ",", "get_between_description_format", ",", "get_description_format", ")", ":", "description", "=", "None", "if", "expression", "is", "None", "or", "expression", "==", "''", ":", "description", "=", "''", "elif", "expression", "==", "\"*\"", ":", "description", "=", "all_description", "elif", "any", "(", "ext", "in", "expression", "for", "ext", "in", "[", "'/'", ",", "'-'", ",", "','", "]", ")", "is", "False", ":", "description", "=", "get_description_format", "(", "expression", ")", ".", "format", "(", "get_single_item_description", "(", "expression", ")", ")", "elif", "\"/\"", "in", "expression", ":", "segments", "=", "expression", ".", "split", "(", "'/'", ")", "description", "=", "get_interval_description_format", "(", "segments", "[", "1", "]", ")", ".", "format", "(", "get_single_item_description", "(", "segments", "[", "1", "]", ")", ")", "# interval contains 'between' piece (i.e. 2-59/3 )", "if", "\"-\"", "in", "segments", "[", "0", "]", ":", "between_segment_description", "=", "self", ".", "generate_between_segment_description", "(", "segments", "[", "0", "]", ",", "get_between_description_format", ",", "get_single_item_description", ")", "if", "not", "between_segment_description", ".", "startswith", "(", "\", \"", ")", ":", "description", "+=", "\", \"", "description", "+=", "between_segment_description", "elif", "any", "(", "ext", "in", "segments", "[", "0", "]", "for", "ext", "in", "[", "'*'", ",", "','", "]", ")", "is", "False", ":", "range_item_description", "=", "get_description_format", "(", "segments", "[", "0", "]", ")", ".", "format", "(", "get_single_item_description", "(", "segments", "[", "0", "]", ")", ")", "range_item_description", "=", "range_item_description", ".", "replace", "(", "\", \"", ",", "\"\"", ")", "description", "+=", "_", "(", "\", starting {0}\"", ")", ".", "format", "(", "range_item_description", ")", "elif", "\",\"", "in", "expression", ":", "segments", "=", "expression", ".", "split", "(", "','", ")", "description_content", "=", "''", "for", "i", ",", "segment", "in", "enumerate", "(", "segments", ")", ":", "if", "i", ">", "0", "and", "len", "(", "segments", ")", ">", "2", ":", "description_content", "+=", "\",\"", "if", "i", "<", "len", "(", "segments", ")", "-", "1", ":", "description_content", "+=", "\" \"", "if", "i", ">", "0", "and", "len", "(", "segments", ")", ">", "1", "and", "(", "i", "==", "len", "(", "segments", ")", "-", "1", "or", "len", "(", "segments", ")", "==", "2", ")", ":", "description_content", "+=", "_", "(", "\" and \"", ")", "if", "\"-\"", "in", "segment", ":", "between_description", "=", "self", ".", "generate_between_segment_description", "(", "segment", ",", "lambda", "s", ":", "_", "(", "\", {0} through {1}\"", ")", ",", "get_single_item_description", ")", "between_description", "=", "between_description", ".", "replace", "(", "\", \"", ",", "\"\"", ")", "description_content", "+=", "between_description", "else", ":", "description_content", "+=", "get_single_item_description", "(", "segment", ")", "description", "=", "get_description_format", "(", "expression", ")", ".", "format", "(", "description_content", ")", "elif", "\"-\"", "in", "expression", ":", "description", "=", "self", ".", "generate_between_segment_description", "(", "expression", ",", "get_between_description_format", ",", "get_single_item_description", ")", "return", "description" ]
Returns segment description Args: expression: Segment to descript all_description: * get_single_item_description: 1 get_interval_description_format: 1/2 get_between_description_format: 1-2 get_description_format: format get_single_item_description Returns: segment description
[ "Returns", "segment", "description", "Args", ":", "expression", ":", "Segment", "to", "descript", "all_description", ":", "*", "get_single_item_description", ":", "1", "get_interval_description_format", ":", "1", "/", "2", "get_between_description_format", ":", "1", "-", "2", "get_description_format", ":", "format", "get_single_item_description", "Returns", ":", "segment", "description" ]
train
https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionDescriptor.py#L402-L484
Salamek/cron-descriptor
cron_descriptor/ExpressionDescriptor.py
ExpressionDescriptor.generate_between_segment_description
def generate_between_segment_description( self, between_expression, get_between_description_format, get_single_item_description ): """ Generates the between segment description :param between_expression: :param get_between_description_format: :param get_single_item_description: :return: The between segment description """ description = "" between_segments = between_expression.split('-') between_segment_1_description = get_single_item_description(between_segments[0]) between_segment_2_description = get_single_item_description(between_segments[1]) between_segment_2_description = between_segment_2_description.replace( ":00", ":59") between_description_format = get_between_description_format(between_expression) description += between_description_format.format(between_segment_1_description, between_segment_2_description) return description
python
def generate_between_segment_description( self, between_expression, get_between_description_format, get_single_item_description ): """ Generates the between segment description :param between_expression: :param get_between_description_format: :param get_single_item_description: :return: The between segment description """ description = "" between_segments = between_expression.split('-') between_segment_1_description = get_single_item_description(between_segments[0]) between_segment_2_description = get_single_item_description(between_segments[1]) between_segment_2_description = between_segment_2_description.replace( ":00", ":59") between_description_format = get_between_description_format(between_expression) description += between_description_format.format(between_segment_1_description, between_segment_2_description) return description
[ "def", "generate_between_segment_description", "(", "self", ",", "between_expression", ",", "get_between_description_format", ",", "get_single_item_description", ")", ":", "description", "=", "\"\"", "between_segments", "=", "between_expression", ".", "split", "(", "'-'", ")", "between_segment_1_description", "=", "get_single_item_description", "(", "between_segments", "[", "0", "]", ")", "between_segment_2_description", "=", "get_single_item_description", "(", "between_segments", "[", "1", "]", ")", "between_segment_2_description", "=", "between_segment_2_description", ".", "replace", "(", "\":00\"", ",", "\":59\"", ")", "between_description_format", "=", "get_between_description_format", "(", "between_expression", ")", "description", "+=", "between_description_format", ".", "format", "(", "between_segment_1_description", ",", "between_segment_2_description", ")", "return", "description" ]
Generates the between segment description :param between_expression: :param get_between_description_format: :param get_single_item_description: :return: The between segment description
[ "Generates", "the", "between", "segment", "description", ":", "param", "between_expression", ":", ":", "param", "get_between_description_format", ":", ":", "param", "get_single_item_description", ":", ":", "return", ":", "The", "between", "segment", "description" ]
train
https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionDescriptor.py#L486-L509
Salamek/cron-descriptor
cron_descriptor/ExpressionDescriptor.py
ExpressionDescriptor.format_time
def format_time( self, hour_expression, minute_expression, second_expression='' ): """Given time parts, will contruct a formatted time description Args: hour_expression: Hours part minute_expression: Minutes part second_expression: Seconds part Returns: Formatted time description """ hour = int(hour_expression) period = '' if self._options.use_24hour_time_format is False: period = " PM" if (hour >= 12) else " AM" if hour > 12: hour -= 12 minute = str(int(minute_expression)) # !FIXME WUT ??? second = '' if second_expression is not None and second_expression: second = "{}{}".format(":", str(int(second_expression)).zfill(2)) return "{0}:{1}{2}{3}".format(str(hour).zfill(2), minute.zfill(2), second, period)
python
def format_time( self, hour_expression, minute_expression, second_expression='' ): """Given time parts, will contruct a formatted time description Args: hour_expression: Hours part minute_expression: Minutes part second_expression: Seconds part Returns: Formatted time description """ hour = int(hour_expression) period = '' if self._options.use_24hour_time_format is False: period = " PM" if (hour >= 12) else " AM" if hour > 12: hour -= 12 minute = str(int(minute_expression)) # !FIXME WUT ??? second = '' if second_expression is not None and second_expression: second = "{}{}".format(":", str(int(second_expression)).zfill(2)) return "{0}:{1}{2}{3}".format(str(hour).zfill(2), minute.zfill(2), second, period)
[ "def", "format_time", "(", "self", ",", "hour_expression", ",", "minute_expression", ",", "second_expression", "=", "''", ")", ":", "hour", "=", "int", "(", "hour_expression", ")", "period", "=", "''", "if", "self", ".", "_options", ".", "use_24hour_time_format", "is", "False", ":", "period", "=", "\" PM\"", "if", "(", "hour", ">=", "12", ")", "else", "\" AM\"", "if", "hour", ">", "12", ":", "hour", "-=", "12", "minute", "=", "str", "(", "int", "(", "minute_expression", ")", ")", "# !FIXME WUT ???", "second", "=", "''", "if", "second_expression", "is", "not", "None", "and", "second_expression", ":", "second", "=", "\"{}{}\"", ".", "format", "(", "\":\"", ",", "str", "(", "int", "(", "second_expression", ")", ")", ".", "zfill", "(", "2", ")", ")", "return", "\"{0}:{1}{2}{3}\"", ".", "format", "(", "str", "(", "hour", ")", ".", "zfill", "(", "2", ")", ",", "minute", ".", "zfill", "(", "2", ")", ",", "second", ",", "period", ")" ]
Given time parts, will contruct a formatted time description Args: hour_expression: Hours part minute_expression: Minutes part second_expression: Seconds part Returns: Formatted time description
[ "Given", "time", "parts", "will", "contruct", "a", "formatted", "time", "description", "Args", ":", "hour_expression", ":", "Hours", "part", "minute_expression", ":", "Minutes", "part", "second_expression", ":", "Seconds", "part", "Returns", ":", "Formatted", "time", "description" ]
train
https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionDescriptor.py#L511-L539
Salamek/cron-descriptor
cron_descriptor/ExpressionDescriptor.py
ExpressionDescriptor.transform_verbosity
def transform_verbosity(self, description, use_verbose_format): """Transforms the verbosity of the expression description by stripping verbosity from original description Args: description: The description to transform use_verbose_format: If True, will leave description as it, if False, will strip verbose parts second_expression: Seconds part Returns: The transformed description with proper verbosity """ if use_verbose_format is False: description = description.replace( _(", every minute"), '') description = description.replace(_(", every hour"), '') description = description.replace(_(", every day"), '') return description
python
def transform_verbosity(self, description, use_verbose_format): """Transforms the verbosity of the expression description by stripping verbosity from original description Args: description: The description to transform use_verbose_format: If True, will leave description as it, if False, will strip verbose parts second_expression: Seconds part Returns: The transformed description with proper verbosity """ if use_verbose_format is False: description = description.replace( _(", every minute"), '') description = description.replace(_(", every hour"), '') description = description.replace(_(", every day"), '') return description
[ "def", "transform_verbosity", "(", "self", ",", "description", ",", "use_verbose_format", ")", ":", "if", "use_verbose_format", "is", "False", ":", "description", "=", "description", ".", "replace", "(", "_", "(", "\", every minute\"", ")", ",", "''", ")", "description", "=", "description", ".", "replace", "(", "_", "(", "\", every hour\"", ")", ",", "''", ")", "description", "=", "description", ".", "replace", "(", "_", "(", "\", every day\"", ")", ",", "''", ")", "return", "description" ]
Transforms the verbosity of the expression description by stripping verbosity from original description Args: description: The description to transform use_verbose_format: If True, will leave description as it, if False, will strip verbose parts second_expression: Seconds part Returns: The transformed description with proper verbosity
[ "Transforms", "the", "verbosity", "of", "the", "expression", "description", "by", "stripping", "verbosity", "from", "original", "description", "Args", ":", "description", ":", "The", "description", "to", "transform", "use_verbose_format", ":", "If", "True", "will", "leave", "description", "as", "it", "if", "False", "will", "strip", "verbose", "parts", "second_expression", ":", "Seconds", "part", "Returns", ":", "The", "transformed", "description", "with", "proper", "verbosity" ]
train
https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionDescriptor.py#L541-L556
Salamek/cron-descriptor
cron_descriptor/ExpressionDescriptor.py
ExpressionDescriptor.transform_case
def transform_case(self, description, case_type): """Transforms the case of the expression description, based on options Args: description: The description to transform case_type: The casing type that controls the output casing second_expression: Seconds part Returns: The transformed description with proper casing """ if case_type == CasingTypeEnum.Sentence: description = "{}{}".format( description[0].upper(), description[1:]) elif case_type == CasingTypeEnum.Title: description = description.title() else: description = description.lower() return description
python
def transform_case(self, description, case_type): """Transforms the case of the expression description, based on options Args: description: The description to transform case_type: The casing type that controls the output casing second_expression: Seconds part Returns: The transformed description with proper casing """ if case_type == CasingTypeEnum.Sentence: description = "{}{}".format( description[0].upper(), description[1:]) elif case_type == CasingTypeEnum.Title: description = description.title() else: description = description.lower() return description
[ "def", "transform_case", "(", "self", ",", "description", ",", "case_type", ")", ":", "if", "case_type", "==", "CasingTypeEnum", ".", "Sentence", ":", "description", "=", "\"{}{}\"", ".", "format", "(", "description", "[", "0", "]", ".", "upper", "(", ")", ",", "description", "[", "1", ":", "]", ")", "elif", "case_type", "==", "CasingTypeEnum", ".", "Title", ":", "description", "=", "description", ".", "title", "(", ")", "else", ":", "description", "=", "description", ".", "lower", "(", ")", "return", "description" ]
Transforms the case of the expression description, based on options Args: description: The description to transform case_type: The casing type that controls the output casing second_expression: Seconds part Returns: The transformed description with proper casing
[ "Transforms", "the", "case", "of", "the", "expression", "description", "based", "on", "options", "Args", ":", "description", ":", "The", "description", "to", "transform", "case_type", ":", "The", "casing", "type", "that", "controls", "the", "output", "casing", "second_expression", ":", "Seconds", "part", "Returns", ":", "The", "transformed", "description", "with", "proper", "casing" ]
train
https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionDescriptor.py#L558-L576
Salamek/cron-descriptor
cron_descriptor/ExpressionDescriptor.py
ExpressionDescriptor.number_to_day
def number_to_day(self, day_number): """Returns localized day name by its CRON number Args: day_number: Number of a day Returns: Day corresponding to day_number Raises: IndexError: When day_number is not found """ return [ calendar.day_name[6], calendar.day_name[0], calendar.day_name[1], calendar.day_name[2], calendar.day_name[3], calendar.day_name[4], calendar.day_name[5] ][day_number]
python
def number_to_day(self, day_number): """Returns localized day name by its CRON number Args: day_number: Number of a day Returns: Day corresponding to day_number Raises: IndexError: When day_number is not found """ return [ calendar.day_name[6], calendar.day_name[0], calendar.day_name[1], calendar.day_name[2], calendar.day_name[3], calendar.day_name[4], calendar.day_name[5] ][day_number]
[ "def", "number_to_day", "(", "self", ",", "day_number", ")", ":", "return", "[", "calendar", ".", "day_name", "[", "6", "]", ",", "calendar", ".", "day_name", "[", "0", "]", ",", "calendar", ".", "day_name", "[", "1", "]", ",", "calendar", ".", "day_name", "[", "2", "]", ",", "calendar", ".", "day_name", "[", "3", "]", ",", "calendar", ".", "day_name", "[", "4", "]", ",", "calendar", ".", "day_name", "[", "5", "]", "]", "[", "day_number", "]" ]
Returns localized day name by its CRON number Args: day_number: Number of a day Returns: Day corresponding to day_number Raises: IndexError: When day_number is not found
[ "Returns", "localized", "day", "name", "by", "its", "CRON", "number" ]
train
https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionDescriptor.py#L578-L596
ciena/afkak
afkak/consumer.py
Consumer.start
def start(self, start_offset): """ Starts fetching messages from Kafka and delivering them to the :attr:`.processor` function. :param int start_offset: The offset within the partition from which to start fetching. Special values include: :const:`OFFSET_EARLIEST`, :const:`OFFSET_LATEST`, and :const:`OFFSET_COMMITTED`. If the supplied offset is :const:`OFFSET_EARLIEST` or :const:`OFFSET_LATEST` the :class:`Consumer` will use the OffsetRequest Kafka API to retrieve the actual offset used for fetching. In the case :const:`OFFSET_COMMITTED` is used, `commit_policy` MUST be set on the Consumer, and the Consumer will use the OffsetFetchRequest Kafka API to retrieve the actual offset used for fetching. :returns: A :class:`~twisted.internet.defer.Deferred` which will resolve successfully when the consumer is cleanly stopped, or with a failure if the :class:`Consumer` encounters an error from which it is unable to recover. :raises: :exc:`RestartError` if already running. """ # Have we been started already, and not stopped? if self._start_d is not None: raise RestartError("Start called on already-started consumer") # Keep track of state for debugging self._state = '[started]' # Create and return a deferred for alerting on errors/stoppage start_d = self._start_d = Deferred() # Start a new fetch request, possibly just for the starting offset self._fetch_offset = start_offset self._do_fetch() # Set up the auto-commit timer, if needed if self.consumer_group and self.auto_commit_every_s: self._commit_looper = LoopingCall(self._auto_commit) self._commit_looper.clock = self.client.reactor self._commit_looper_d = self._commit_looper.start( self.auto_commit_every_s, now=False) self._commit_looper_d.addCallbacks(self._commit_timer_stopped, self._commit_timer_failed) return start_d
python
def start(self, start_offset): """ Starts fetching messages from Kafka and delivering them to the :attr:`.processor` function. :param int start_offset: The offset within the partition from which to start fetching. Special values include: :const:`OFFSET_EARLIEST`, :const:`OFFSET_LATEST`, and :const:`OFFSET_COMMITTED`. If the supplied offset is :const:`OFFSET_EARLIEST` or :const:`OFFSET_LATEST` the :class:`Consumer` will use the OffsetRequest Kafka API to retrieve the actual offset used for fetching. In the case :const:`OFFSET_COMMITTED` is used, `commit_policy` MUST be set on the Consumer, and the Consumer will use the OffsetFetchRequest Kafka API to retrieve the actual offset used for fetching. :returns: A :class:`~twisted.internet.defer.Deferred` which will resolve successfully when the consumer is cleanly stopped, or with a failure if the :class:`Consumer` encounters an error from which it is unable to recover. :raises: :exc:`RestartError` if already running. """ # Have we been started already, and not stopped? if self._start_d is not None: raise RestartError("Start called on already-started consumer") # Keep track of state for debugging self._state = '[started]' # Create and return a deferred for alerting on errors/stoppage start_d = self._start_d = Deferred() # Start a new fetch request, possibly just for the starting offset self._fetch_offset = start_offset self._do_fetch() # Set up the auto-commit timer, if needed if self.consumer_group and self.auto_commit_every_s: self._commit_looper = LoopingCall(self._auto_commit) self._commit_looper.clock = self.client.reactor self._commit_looper_d = self._commit_looper.start( self.auto_commit_every_s, now=False) self._commit_looper_d.addCallbacks(self._commit_timer_stopped, self._commit_timer_failed) return start_d
[ "def", "start", "(", "self", ",", "start_offset", ")", ":", "# Have we been started already, and not stopped?", "if", "self", ".", "_start_d", "is", "not", "None", ":", "raise", "RestartError", "(", "\"Start called on already-started consumer\"", ")", "# Keep track of state for debugging", "self", ".", "_state", "=", "'[started]'", "# Create and return a deferred for alerting on errors/stoppage", "start_d", "=", "self", ".", "_start_d", "=", "Deferred", "(", ")", "# Start a new fetch request, possibly just for the starting offset", "self", ".", "_fetch_offset", "=", "start_offset", "self", ".", "_do_fetch", "(", ")", "# Set up the auto-commit timer, if needed", "if", "self", ".", "consumer_group", "and", "self", ".", "auto_commit_every_s", ":", "self", ".", "_commit_looper", "=", "LoopingCall", "(", "self", ".", "_auto_commit", ")", "self", ".", "_commit_looper", ".", "clock", "=", "self", ".", "client", ".", "reactor", "self", ".", "_commit_looper_d", "=", "self", ".", "_commit_looper", ".", "start", "(", "self", ".", "auto_commit_every_s", ",", "now", "=", "False", ")", "self", ".", "_commit_looper_d", ".", "addCallbacks", "(", "self", ".", "_commit_timer_stopped", ",", "self", ".", "_commit_timer_failed", ")", "return", "start_d" ]
Starts fetching messages from Kafka and delivering them to the :attr:`.processor` function. :param int start_offset: The offset within the partition from which to start fetching. Special values include: :const:`OFFSET_EARLIEST`, :const:`OFFSET_LATEST`, and :const:`OFFSET_COMMITTED`. If the supplied offset is :const:`OFFSET_EARLIEST` or :const:`OFFSET_LATEST` the :class:`Consumer` will use the OffsetRequest Kafka API to retrieve the actual offset used for fetching. In the case :const:`OFFSET_COMMITTED` is used, `commit_policy` MUST be set on the Consumer, and the Consumer will use the OffsetFetchRequest Kafka API to retrieve the actual offset used for fetching. :returns: A :class:`~twisted.internet.defer.Deferred` which will resolve successfully when the consumer is cleanly stopped, or with a failure if the :class:`Consumer` encounters an error from which it is unable to recover. :raises: :exc:`RestartError` if already running.
[ "Starts", "fetching", "messages", "from", "Kafka", "and", "delivering", "them", "to", "the", ":", "attr", ":", ".", "processor", "function", "." ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/consumer.py#L236-L283
ciena/afkak
afkak/consumer.py
Consumer.shutdown
def shutdown(self): """Gracefully shutdown the consumer Consumer will complete any outstanding processing, commit its current offsets (if so configured) and stop. Returns deferred which callbacks with a tuple of: (last processed offset, last committed offset) if it was able to successfully commit, or errbacks with the commit failure, if any, or fail(RestopError) if consumer is not running. """ def _handle_shutdown_commit_success(result): """Handle the result of the commit attempted by shutdown""" self._shutdown_d, d = None, self._shutdown_d self.stop() self._shuttingdown = False # Shutdown complete d.callback((self._last_processed_offset, self._last_committed_offset)) def _handle_shutdown_commit_failure(failure): """Handle failure of commit() attempted by shutdown""" if failure.check(OperationInProgress): failure.value.deferred.addCallback(_commit_and_stop) return self._shutdown_d, d = None, self._shutdown_d self.stop() self._shuttingdown = False # Shutdown complete d.errback(failure) def _commit_and_stop(result): """Commit the current offsets (if needed) and stop the consumer""" if not self.consumer_group: # No consumer group, no committing return _handle_shutdown_commit_success(None) # Need to commit prior to stopping self.commit().addCallbacks(_handle_shutdown_commit_success, _handle_shutdown_commit_failure) # If we're not running, return an failure if self._start_d is None: return fail(Failure( RestopError("Shutdown called on non-running consumer"))) # If we're called multiple times, return a failure if self._shutdown_d: return fail(Failure( RestopError("Shutdown called more than once."))) # Set our _shuttingdown flag, so our _process_message routine will stop # feeding new messages to the processor, and fetches won't be retried self._shuttingdown = True # Keep track of state for debugging self._state = '[shutting down]' # Create a deferred to track the shutdown self._shutdown_d = d = Deferred() # Are we waiting for the processor to complete? If so, when it's done, # commit our offsets and stop. if self._processor_d: self._processor_d.addCallback(_commit_and_stop) else: # No need to wait for the processor, we can commit and stop now _commit_and_stop(None) # return the deferred return d
python
def shutdown(self): """Gracefully shutdown the consumer Consumer will complete any outstanding processing, commit its current offsets (if so configured) and stop. Returns deferred which callbacks with a tuple of: (last processed offset, last committed offset) if it was able to successfully commit, or errbacks with the commit failure, if any, or fail(RestopError) if consumer is not running. """ def _handle_shutdown_commit_success(result): """Handle the result of the commit attempted by shutdown""" self._shutdown_d, d = None, self._shutdown_d self.stop() self._shuttingdown = False # Shutdown complete d.callback((self._last_processed_offset, self._last_committed_offset)) def _handle_shutdown_commit_failure(failure): """Handle failure of commit() attempted by shutdown""" if failure.check(OperationInProgress): failure.value.deferred.addCallback(_commit_and_stop) return self._shutdown_d, d = None, self._shutdown_d self.stop() self._shuttingdown = False # Shutdown complete d.errback(failure) def _commit_and_stop(result): """Commit the current offsets (if needed) and stop the consumer""" if not self.consumer_group: # No consumer group, no committing return _handle_shutdown_commit_success(None) # Need to commit prior to stopping self.commit().addCallbacks(_handle_shutdown_commit_success, _handle_shutdown_commit_failure) # If we're not running, return an failure if self._start_d is None: return fail(Failure( RestopError("Shutdown called on non-running consumer"))) # If we're called multiple times, return a failure if self._shutdown_d: return fail(Failure( RestopError("Shutdown called more than once."))) # Set our _shuttingdown flag, so our _process_message routine will stop # feeding new messages to the processor, and fetches won't be retried self._shuttingdown = True # Keep track of state for debugging self._state = '[shutting down]' # Create a deferred to track the shutdown self._shutdown_d = d = Deferred() # Are we waiting for the processor to complete? If so, when it's done, # commit our offsets and stop. if self._processor_d: self._processor_d.addCallback(_commit_and_stop) else: # No need to wait for the processor, we can commit and stop now _commit_and_stop(None) # return the deferred return d
[ "def", "shutdown", "(", "self", ")", ":", "def", "_handle_shutdown_commit_success", "(", "result", ")", ":", "\"\"\"Handle the result of the commit attempted by shutdown\"\"\"", "self", ".", "_shutdown_d", ",", "d", "=", "None", ",", "self", ".", "_shutdown_d", "self", ".", "stop", "(", ")", "self", ".", "_shuttingdown", "=", "False", "# Shutdown complete", "d", ".", "callback", "(", "(", "self", ".", "_last_processed_offset", ",", "self", ".", "_last_committed_offset", ")", ")", "def", "_handle_shutdown_commit_failure", "(", "failure", ")", ":", "\"\"\"Handle failure of commit() attempted by shutdown\"\"\"", "if", "failure", ".", "check", "(", "OperationInProgress", ")", ":", "failure", ".", "value", ".", "deferred", ".", "addCallback", "(", "_commit_and_stop", ")", "return", "self", ".", "_shutdown_d", ",", "d", "=", "None", ",", "self", ".", "_shutdown_d", "self", ".", "stop", "(", ")", "self", ".", "_shuttingdown", "=", "False", "# Shutdown complete", "d", ".", "errback", "(", "failure", ")", "def", "_commit_and_stop", "(", "result", ")", ":", "\"\"\"Commit the current offsets (if needed) and stop the consumer\"\"\"", "if", "not", "self", ".", "consumer_group", ":", "# No consumer group, no committing", "return", "_handle_shutdown_commit_success", "(", "None", ")", "# Need to commit prior to stopping", "self", ".", "commit", "(", ")", ".", "addCallbacks", "(", "_handle_shutdown_commit_success", ",", "_handle_shutdown_commit_failure", ")", "# If we're not running, return an failure", "if", "self", ".", "_start_d", "is", "None", ":", "return", "fail", "(", "Failure", "(", "RestopError", "(", "\"Shutdown called on non-running consumer\"", ")", ")", ")", "# If we're called multiple times, return a failure", "if", "self", ".", "_shutdown_d", ":", "return", "fail", "(", "Failure", "(", "RestopError", "(", "\"Shutdown called more than once.\"", ")", ")", ")", "# Set our _shuttingdown flag, so our _process_message routine will stop", "# feeding new messages to the processor, and fetches won't be retried", "self", ".", "_shuttingdown", "=", "True", "# Keep track of state for debugging", "self", ".", "_state", "=", "'[shutting down]'", "# Create a deferred to track the shutdown", "self", ".", "_shutdown_d", "=", "d", "=", "Deferred", "(", ")", "# Are we waiting for the processor to complete? If so, when it's done,", "# commit our offsets and stop.", "if", "self", ".", "_processor_d", ":", "self", ".", "_processor_d", ".", "addCallback", "(", "_commit_and_stop", ")", "else", ":", "# No need to wait for the processor, we can commit and stop now", "_commit_and_stop", "(", "None", ")", "# return the deferred", "return", "d" ]
Gracefully shutdown the consumer Consumer will complete any outstanding processing, commit its current offsets (if so configured) and stop. Returns deferred which callbacks with a tuple of: (last processed offset, last committed offset) if it was able to successfully commit, or errbacks with the commit failure, if any, or fail(RestopError) if consumer is not running.
[ "Gracefully", "shutdown", "the", "consumer" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/consumer.py#L285-L350
ciena/afkak
afkak/consumer.py
Consumer.stop
def stop(self): """ Stop the consumer and return offset of last processed message. This cancels all outstanding operations. Also, if the deferred returned by `start` hasn't been called, it is called with a tuple consisting of the last processed offset and the last committed offset. :raises: :exc:`RestopError` if the :class:`Consumer` is not running. """ if self._start_d is None: raise RestopError("Stop called on non-running consumer") self._stopping = True # Keep track of state for debugging self._state = '[stopping]' # Are we waiting for a request to come back? if self._request_d: self._request_d.cancel() # Are we working our way through a block of messages? if self._msg_block_d: # Need to add a cancel handler... _msg_block_d, self._msg_block_d = self._msg_block_d, None _msg_block_d.addErrback(lambda fail: fail.trap(CancelledError)) _msg_block_d.cancel() # Are we waiting for the processor to complete? if self._processor_d: self._processor_d.cancel() # Are we waiting to retry a request? if self._retry_call: self._retry_call.cancel() # Are we waiting on a commit request? if self._commit_ds: while self._commit_ds: d = self._commit_ds.pop() d.cancel() if self._commit_req: self._commit_req.cancel() # Are we waiting to retry a commit? if self._commit_call: self._commit_call.cancel() # Do we have an auto-commit looping call? if self._commit_looper is not None: self._commit_looper.stop() # Done stopping self._stopping = False # Keep track of state for debugging self._state = '[stopped]' # Clear and possibly callback our start() Deferred self._start_d, d = None, self._start_d if not d.called: d.callback((self._last_processed_offset, self._last_committed_offset)) # Return the offset of the message we last processed return self._last_processed_offset
python
def stop(self): """ Stop the consumer and return offset of last processed message. This cancels all outstanding operations. Also, if the deferred returned by `start` hasn't been called, it is called with a tuple consisting of the last processed offset and the last committed offset. :raises: :exc:`RestopError` if the :class:`Consumer` is not running. """ if self._start_d is None: raise RestopError("Stop called on non-running consumer") self._stopping = True # Keep track of state for debugging self._state = '[stopping]' # Are we waiting for a request to come back? if self._request_d: self._request_d.cancel() # Are we working our way through a block of messages? if self._msg_block_d: # Need to add a cancel handler... _msg_block_d, self._msg_block_d = self._msg_block_d, None _msg_block_d.addErrback(lambda fail: fail.trap(CancelledError)) _msg_block_d.cancel() # Are we waiting for the processor to complete? if self._processor_d: self._processor_d.cancel() # Are we waiting to retry a request? if self._retry_call: self._retry_call.cancel() # Are we waiting on a commit request? if self._commit_ds: while self._commit_ds: d = self._commit_ds.pop() d.cancel() if self._commit_req: self._commit_req.cancel() # Are we waiting to retry a commit? if self._commit_call: self._commit_call.cancel() # Do we have an auto-commit looping call? if self._commit_looper is not None: self._commit_looper.stop() # Done stopping self._stopping = False # Keep track of state for debugging self._state = '[stopped]' # Clear and possibly callback our start() Deferred self._start_d, d = None, self._start_d if not d.called: d.callback((self._last_processed_offset, self._last_committed_offset)) # Return the offset of the message we last processed return self._last_processed_offset
[ "def", "stop", "(", "self", ")", ":", "if", "self", ".", "_start_d", "is", "None", ":", "raise", "RestopError", "(", "\"Stop called on non-running consumer\"", ")", "self", ".", "_stopping", "=", "True", "# Keep track of state for debugging", "self", ".", "_state", "=", "'[stopping]'", "# Are we waiting for a request to come back?", "if", "self", ".", "_request_d", ":", "self", ".", "_request_d", ".", "cancel", "(", ")", "# Are we working our way through a block of messages?", "if", "self", ".", "_msg_block_d", ":", "# Need to add a cancel handler...", "_msg_block_d", ",", "self", ".", "_msg_block_d", "=", "self", ".", "_msg_block_d", ",", "None", "_msg_block_d", ".", "addErrback", "(", "lambda", "fail", ":", "fail", ".", "trap", "(", "CancelledError", ")", ")", "_msg_block_d", ".", "cancel", "(", ")", "# Are we waiting for the processor to complete?", "if", "self", ".", "_processor_d", ":", "self", ".", "_processor_d", ".", "cancel", "(", ")", "# Are we waiting to retry a request?", "if", "self", ".", "_retry_call", ":", "self", ".", "_retry_call", ".", "cancel", "(", ")", "# Are we waiting on a commit request?", "if", "self", ".", "_commit_ds", ":", "while", "self", ".", "_commit_ds", ":", "d", "=", "self", ".", "_commit_ds", ".", "pop", "(", ")", "d", ".", "cancel", "(", ")", "if", "self", ".", "_commit_req", ":", "self", ".", "_commit_req", ".", "cancel", "(", ")", "# Are we waiting to retry a commit?", "if", "self", ".", "_commit_call", ":", "self", ".", "_commit_call", ".", "cancel", "(", ")", "# Do we have an auto-commit looping call?", "if", "self", ".", "_commit_looper", "is", "not", "None", ":", "self", ".", "_commit_looper", ".", "stop", "(", ")", "# Done stopping", "self", ".", "_stopping", "=", "False", "# Keep track of state for debugging", "self", ".", "_state", "=", "'[stopped]'", "# Clear and possibly callback our start() Deferred", "self", ".", "_start_d", ",", "d", "=", "None", ",", "self", ".", "_start_d", "if", "not", "d", ".", "called", ":", "d", ".", "callback", "(", "(", "self", ".", "_last_processed_offset", ",", "self", ".", "_last_committed_offset", ")", ")", "# Return the offset of the message we last processed", "return", "self", ".", "_last_processed_offset" ]
Stop the consumer and return offset of last processed message. This cancels all outstanding operations. Also, if the deferred returned by `start` hasn't been called, it is called with a tuple consisting of the last processed offset and the last committed offset. :raises: :exc:`RestopError` if the :class:`Consumer` is not running.
[ "Stop", "the", "consumer", "and", "return", "offset", "of", "last", "processed", "message", ".", "This", "cancels", "all", "outstanding", "operations", ".", "Also", "if", "the", "deferred", "returned", "by", "start", "hasn", "t", "been", "called", "it", "is", "called", "with", "a", "tuple", "consisting", "of", "the", "last", "processed", "offset", "and", "the", "last", "committed", "offset", "." ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/consumer.py#L352-L407
ciena/afkak
afkak/consumer.py
Consumer.commit
def commit(self): """ Commit the offset of the message we last processed if it is different from what we believe is the last offset committed to Kafka. .. note:: It is possible to commit a smaller offset than Kafka has stored. This is by design, so we can reprocess a Kafka message stream if desired. On error, will retry according to :attr:`request_retry_max_attempts` (by default, forever). If called while a commit operation is in progress, and new messages have been processed since the last request was sent then the commit will fail with :exc:`OperationInProgress`. The :exc:`OperationInProgress` exception wraps a :class:`~twisted.internet.defer.Deferred` which fires when the outstanding commit operation completes. :returns: A :class:`~twisted.internet.defer.Deferred` which resolves with the committed offset when the operation has completed. It will resolve immediately if the current offset and the last committed offset do not differ. """ # Can't commit without a consumer_group if not self.consumer_group: return fail(Failure(InvalidConsumerGroupError( "Bad Group_id:{0!r}".format(self.consumer_group)))) # short circuit if we are 'up to date', or haven't processed anything if ((self._last_processed_offset is None) or (self._last_processed_offset == self._last_committed_offset)): return succeed(self._last_committed_offset) # If we're currently processing a commit we return a failure # with a deferred we'll fire when the in-progress one completes if self._commit_ds: d = Deferred() self._commit_ds.append(d) return fail(OperationInProgress(d)) # Ok, we have processed messages since our last commit attempt, and # we're not currently waiting on a commit request to complete: # Start a new one d = Deferred() self._commit_ds.append(d) # Send the request self._send_commit_request() # Reset the commit_looper here, rather than on success to give # more stability to the commit interval. if self._commit_looper is not None: self._commit_looper.reset() # return the deferred return d
python
def commit(self): """ Commit the offset of the message we last processed if it is different from what we believe is the last offset committed to Kafka. .. note:: It is possible to commit a smaller offset than Kafka has stored. This is by design, so we can reprocess a Kafka message stream if desired. On error, will retry according to :attr:`request_retry_max_attempts` (by default, forever). If called while a commit operation is in progress, and new messages have been processed since the last request was sent then the commit will fail with :exc:`OperationInProgress`. The :exc:`OperationInProgress` exception wraps a :class:`~twisted.internet.defer.Deferred` which fires when the outstanding commit operation completes. :returns: A :class:`~twisted.internet.defer.Deferred` which resolves with the committed offset when the operation has completed. It will resolve immediately if the current offset and the last committed offset do not differ. """ # Can't commit without a consumer_group if not self.consumer_group: return fail(Failure(InvalidConsumerGroupError( "Bad Group_id:{0!r}".format(self.consumer_group)))) # short circuit if we are 'up to date', or haven't processed anything if ((self._last_processed_offset is None) or (self._last_processed_offset == self._last_committed_offset)): return succeed(self._last_committed_offset) # If we're currently processing a commit we return a failure # with a deferred we'll fire when the in-progress one completes if self._commit_ds: d = Deferred() self._commit_ds.append(d) return fail(OperationInProgress(d)) # Ok, we have processed messages since our last commit attempt, and # we're not currently waiting on a commit request to complete: # Start a new one d = Deferred() self._commit_ds.append(d) # Send the request self._send_commit_request() # Reset the commit_looper here, rather than on success to give # more stability to the commit interval. if self._commit_looper is not None: self._commit_looper.reset() # return the deferred return d
[ "def", "commit", "(", "self", ")", ":", "# Can't commit without a consumer_group", "if", "not", "self", ".", "consumer_group", ":", "return", "fail", "(", "Failure", "(", "InvalidConsumerGroupError", "(", "\"Bad Group_id:{0!r}\"", ".", "format", "(", "self", ".", "consumer_group", ")", ")", ")", ")", "# short circuit if we are 'up to date', or haven't processed anything", "if", "(", "(", "self", ".", "_last_processed_offset", "is", "None", ")", "or", "(", "self", ".", "_last_processed_offset", "==", "self", ".", "_last_committed_offset", ")", ")", ":", "return", "succeed", "(", "self", ".", "_last_committed_offset", ")", "# If we're currently processing a commit we return a failure", "# with a deferred we'll fire when the in-progress one completes", "if", "self", ".", "_commit_ds", ":", "d", "=", "Deferred", "(", ")", "self", ".", "_commit_ds", ".", "append", "(", "d", ")", "return", "fail", "(", "OperationInProgress", "(", "d", ")", ")", "# Ok, we have processed messages since our last commit attempt, and", "# we're not currently waiting on a commit request to complete:", "# Start a new one", "d", "=", "Deferred", "(", ")", "self", ".", "_commit_ds", ".", "append", "(", "d", ")", "# Send the request", "self", ".", "_send_commit_request", "(", ")", "# Reset the commit_looper here, rather than on success to give", "# more stability to the commit interval.", "if", "self", ".", "_commit_looper", "is", "not", "None", ":", "self", ".", "_commit_looper", ".", "reset", "(", ")", "# return the deferred", "return", "d" ]
Commit the offset of the message we last processed if it is different from what we believe is the last offset committed to Kafka. .. note:: It is possible to commit a smaller offset than Kafka has stored. This is by design, so we can reprocess a Kafka message stream if desired. On error, will retry according to :attr:`request_retry_max_attempts` (by default, forever). If called while a commit operation is in progress, and new messages have been processed since the last request was sent then the commit will fail with :exc:`OperationInProgress`. The :exc:`OperationInProgress` exception wraps a :class:`~twisted.internet.defer.Deferred` which fires when the outstanding commit operation completes. :returns: A :class:`~twisted.internet.defer.Deferred` which resolves with the committed offset when the operation has completed. It will resolve immediately if the current offset and the last committed offset do not differ.
[ "Commit", "the", "offset", "of", "the", "message", "we", "last", "processed", "if", "it", "is", "different", "from", "what", "we", "believe", "is", "the", "last", "offset", "committed", "to", "Kafka", "." ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/consumer.py#L409-L467
ciena/afkak
afkak/consumer.py
Consumer._auto_commit
def _auto_commit(self, by_count=False): """Check if we should start a new commit operation and commit""" # Check if we are even supposed to do any auto-committing if (self._stopping or self._shuttingdown or (not self._start_d) or (self._last_processed_offset is None) or (not self.consumer_group) or (by_count and not self.auto_commit_every_n)): return # If we're auto_committing because the timer expired, or by count and # we don't have a record of our last_committed_offset, or we've # processed enough messages since our last commit, then try to commit if (not by_count or self._last_committed_offset is None or (self._last_processed_offset - self._last_committed_offset ) >= self.auto_commit_every_n): if not self._commit_ds: commit_d = self.commit() commit_d.addErrback(self._handle_auto_commit_error) else: # We're waiting on the last commit to complete, so add a # callback to be called when the current request completes d = Deferred() d.addCallback(self._retry_auto_commit, by_count) self._commit_ds.append(d)
python
def _auto_commit(self, by_count=False): """Check if we should start a new commit operation and commit""" # Check if we are even supposed to do any auto-committing if (self._stopping or self._shuttingdown or (not self._start_d) or (self._last_processed_offset is None) or (not self.consumer_group) or (by_count and not self.auto_commit_every_n)): return # If we're auto_committing because the timer expired, or by count and # we don't have a record of our last_committed_offset, or we've # processed enough messages since our last commit, then try to commit if (not by_count or self._last_committed_offset is None or (self._last_processed_offset - self._last_committed_offset ) >= self.auto_commit_every_n): if not self._commit_ds: commit_d = self.commit() commit_d.addErrback(self._handle_auto_commit_error) else: # We're waiting on the last commit to complete, so add a # callback to be called when the current request completes d = Deferred() d.addCallback(self._retry_auto_commit, by_count) self._commit_ds.append(d)
[ "def", "_auto_commit", "(", "self", ",", "by_count", "=", "False", ")", ":", "# Check if we are even supposed to do any auto-committing", "if", "(", "self", ".", "_stopping", "or", "self", ".", "_shuttingdown", "or", "(", "not", "self", ".", "_start_d", ")", "or", "(", "self", ".", "_last_processed_offset", "is", "None", ")", "or", "(", "not", "self", ".", "consumer_group", ")", "or", "(", "by_count", "and", "not", "self", ".", "auto_commit_every_n", ")", ")", ":", "return", "# If we're auto_committing because the timer expired, or by count and", "# we don't have a record of our last_committed_offset, or we've", "# processed enough messages since our last commit, then try to commit", "if", "(", "not", "by_count", "or", "self", ".", "_last_committed_offset", "is", "None", "or", "(", "self", ".", "_last_processed_offset", "-", "self", ".", "_last_committed_offset", ")", ">=", "self", ".", "auto_commit_every_n", ")", ":", "if", "not", "self", ".", "_commit_ds", ":", "commit_d", "=", "self", ".", "commit", "(", ")", "commit_d", ".", "addErrback", "(", "self", ".", "_handle_auto_commit_error", ")", "else", ":", "# We're waiting on the last commit to complete, so add a", "# callback to be called when the current request completes", "d", "=", "Deferred", "(", ")", "d", ".", "addCallback", "(", "self", ".", "_retry_auto_commit", ",", "by_count", ")", "self", ".", "_commit_ds", ".", "append", "(", "d", ")" ]
Check if we should start a new commit operation and commit
[ "Check", "if", "we", "should", "start", "a", "new", "commit", "operation", "and", "commit" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/consumer.py#L475-L498
ciena/afkak
afkak/consumer.py
Consumer._retry_fetch
def _retry_fetch(self, after=None): """ Schedule a delayed :meth:`_do_fetch` call after a failure :param float after: The delay in seconds after which to do the retried fetch. If `None`, our internal :attr:`retry_delay` is used, and adjusted by :const:`REQUEST_RETRY_FACTOR`. """ # Have we been told to stop or shutdown? Then don't actually retry. if self._stopping or self._shuttingdown or self._start_d is None: # Stopping, or stopped already? No more fetching. return if self._retry_call is None: if after is None: after = self.retry_delay self.retry_delay = min(self.retry_delay * REQUEST_RETRY_FACTOR, self.retry_max_delay) self._fetch_attempt_count += 1 self._retry_call = self.client.reactor.callLater( after, self._do_fetch)
python
def _retry_fetch(self, after=None): """ Schedule a delayed :meth:`_do_fetch` call after a failure :param float after: The delay in seconds after which to do the retried fetch. If `None`, our internal :attr:`retry_delay` is used, and adjusted by :const:`REQUEST_RETRY_FACTOR`. """ # Have we been told to stop or shutdown? Then don't actually retry. if self._stopping or self._shuttingdown or self._start_d is None: # Stopping, or stopped already? No more fetching. return if self._retry_call is None: if after is None: after = self.retry_delay self.retry_delay = min(self.retry_delay * REQUEST_RETRY_FACTOR, self.retry_max_delay) self._fetch_attempt_count += 1 self._retry_call = self.client.reactor.callLater( after, self._do_fetch)
[ "def", "_retry_fetch", "(", "self", ",", "after", "=", "None", ")", ":", "# Have we been told to stop or shutdown? Then don't actually retry.", "if", "self", ".", "_stopping", "or", "self", ".", "_shuttingdown", "or", "self", ".", "_start_d", "is", "None", ":", "# Stopping, or stopped already? No more fetching.", "return", "if", "self", ".", "_retry_call", "is", "None", ":", "if", "after", "is", "None", ":", "after", "=", "self", ".", "retry_delay", "self", ".", "retry_delay", "=", "min", "(", "self", ".", "retry_delay", "*", "REQUEST_RETRY_FACTOR", ",", "self", ".", "retry_max_delay", ")", "self", ".", "_fetch_attempt_count", "+=", "1", "self", ".", "_retry_call", "=", "self", ".", "client", ".", "reactor", ".", "callLater", "(", "after", ",", "self", ".", "_do_fetch", ")" ]
Schedule a delayed :meth:`_do_fetch` call after a failure :param float after: The delay in seconds after which to do the retried fetch. If `None`, our internal :attr:`retry_delay` is used, and adjusted by :const:`REQUEST_RETRY_FACTOR`.
[ "Schedule", "a", "delayed", ":", "meth", ":", "_do_fetch", "call", "after", "a", "failure" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/consumer.py#L500-L522
ciena/afkak
afkak/consumer.py
Consumer._handle_offset_response
def _handle_offset_response(self, response): """ Handle responses to both OffsetRequest and OffsetFetchRequest, since they are similar enough. :param response: A tuple of a single OffsetFetchResponse or OffsetResponse """ # Got a response, clear our outstanding request deferred self._request_d = None # Successful request, reset our retry delay, count, etc self.retry_delay = self.retry_init_delay self._fetch_attempt_count = 1 response = response[0] if hasattr(response, 'offsets'): # It's a response to an OffsetRequest self._fetch_offset = response.offsets[0] else: # It's a response to an OffsetFetchRequest # Make sure we got a valid offset back. Kafka uses -1 to indicate # no committed offset was retrieved if response.offset == OFFSET_NOT_COMMITTED: self._fetch_offset = OFFSET_EARLIEST else: self._fetch_offset = response.offset + 1 self._last_committed_offset = response.offset self._do_fetch()
python
def _handle_offset_response(self, response): """ Handle responses to both OffsetRequest and OffsetFetchRequest, since they are similar enough. :param response: A tuple of a single OffsetFetchResponse or OffsetResponse """ # Got a response, clear our outstanding request deferred self._request_d = None # Successful request, reset our retry delay, count, etc self.retry_delay = self.retry_init_delay self._fetch_attempt_count = 1 response = response[0] if hasattr(response, 'offsets'): # It's a response to an OffsetRequest self._fetch_offset = response.offsets[0] else: # It's a response to an OffsetFetchRequest # Make sure we got a valid offset back. Kafka uses -1 to indicate # no committed offset was retrieved if response.offset == OFFSET_NOT_COMMITTED: self._fetch_offset = OFFSET_EARLIEST else: self._fetch_offset = response.offset + 1 self._last_committed_offset = response.offset self._do_fetch()
[ "def", "_handle_offset_response", "(", "self", ",", "response", ")", ":", "# Got a response, clear our outstanding request deferred", "self", ".", "_request_d", "=", "None", "# Successful request, reset our retry delay, count, etc", "self", ".", "retry_delay", "=", "self", ".", "retry_init_delay", "self", ".", "_fetch_attempt_count", "=", "1", "response", "=", "response", "[", "0", "]", "if", "hasattr", "(", "response", ",", "'offsets'", ")", ":", "# It's a response to an OffsetRequest", "self", ".", "_fetch_offset", "=", "response", ".", "offsets", "[", "0", "]", "else", ":", "# It's a response to an OffsetFetchRequest", "# Make sure we got a valid offset back. Kafka uses -1 to indicate", "# no committed offset was retrieved", "if", "response", ".", "offset", "==", "OFFSET_NOT_COMMITTED", ":", "self", ".", "_fetch_offset", "=", "OFFSET_EARLIEST", "else", ":", "self", ".", "_fetch_offset", "=", "response", ".", "offset", "+", "1", "self", ".", "_last_committed_offset", "=", "response", ".", "offset", "self", ".", "_do_fetch", "(", ")" ]
Handle responses to both OffsetRequest and OffsetFetchRequest, since they are similar enough. :param response: A tuple of a single OffsetFetchResponse or OffsetResponse
[ "Handle", "responses", "to", "both", "OffsetRequest", "and", "OffsetFetchRequest", "since", "they", "are", "similar", "enough", "." ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/consumer.py#L524-L552
ciena/afkak
afkak/consumer.py
Consumer._handle_offset_error
def _handle_offset_error(self, failure): """ Retry the offset fetch request if appropriate. Once the :attr:`.retry_delay` reaches our :attr:`.retry_max_delay`, we log a warning. This should perhaps be extended to abort sooner on certain errors. """ # outstanding request got errback'd, clear it self._request_d = None if self._stopping and failure.check(CancelledError): # Not really an error return # Do we need to abort? if (self.request_retry_max_attempts != 0 and self._fetch_attempt_count >= self.request_retry_max_attempts): log.debug( "%r: Exhausted attempts: %d fetching offset from kafka: %r", self, self.request_retry_max_attempts, failure) self._start_d.errback(failure) return # Decide how to log this failure... If we have retried so many times # we're at the retry_max_delay, then we log at warning every other time # debug otherwise if (self.retry_delay < self.retry_max_delay or 0 == (self._fetch_attempt_count % 2)): log.debug("%r: Failure fetching offset from kafka: %r", self, failure) else: # We've retried until we hit the max delay, log at warn log.warning("%r: Still failing fetching offset from kafka: %r", self, failure) self._retry_fetch()
python
def _handle_offset_error(self, failure): """ Retry the offset fetch request if appropriate. Once the :attr:`.retry_delay` reaches our :attr:`.retry_max_delay`, we log a warning. This should perhaps be extended to abort sooner on certain errors. """ # outstanding request got errback'd, clear it self._request_d = None if self._stopping and failure.check(CancelledError): # Not really an error return # Do we need to abort? if (self.request_retry_max_attempts != 0 and self._fetch_attempt_count >= self.request_retry_max_attempts): log.debug( "%r: Exhausted attempts: %d fetching offset from kafka: %r", self, self.request_retry_max_attempts, failure) self._start_d.errback(failure) return # Decide how to log this failure... If we have retried so many times # we're at the retry_max_delay, then we log at warning every other time # debug otherwise if (self.retry_delay < self.retry_max_delay or 0 == (self._fetch_attempt_count % 2)): log.debug("%r: Failure fetching offset from kafka: %r", self, failure) else: # We've retried until we hit the max delay, log at warn log.warning("%r: Still failing fetching offset from kafka: %r", self, failure) self._retry_fetch()
[ "def", "_handle_offset_error", "(", "self", ",", "failure", ")", ":", "# outstanding request got errback'd, clear it", "self", ".", "_request_d", "=", "None", "if", "self", ".", "_stopping", "and", "failure", ".", "check", "(", "CancelledError", ")", ":", "# Not really an error", "return", "# Do we need to abort?", "if", "(", "self", ".", "request_retry_max_attempts", "!=", "0", "and", "self", ".", "_fetch_attempt_count", ">=", "self", ".", "request_retry_max_attempts", ")", ":", "log", ".", "debug", "(", "\"%r: Exhausted attempts: %d fetching offset from kafka: %r\"", ",", "self", ",", "self", ".", "request_retry_max_attempts", ",", "failure", ")", "self", ".", "_start_d", ".", "errback", "(", "failure", ")", "return", "# Decide how to log this failure... If we have retried so many times", "# we're at the retry_max_delay, then we log at warning every other time", "# debug otherwise", "if", "(", "self", ".", "retry_delay", "<", "self", ".", "retry_max_delay", "or", "0", "==", "(", "self", ".", "_fetch_attempt_count", "%", "2", ")", ")", ":", "log", ".", "debug", "(", "\"%r: Failure fetching offset from kafka: %r\"", ",", "self", ",", "failure", ")", "else", ":", "# We've retried until we hit the max delay, log at warn", "log", ".", "warning", "(", "\"%r: Still failing fetching offset from kafka: %r\"", ",", "self", ",", "failure", ")", "self", ".", "_retry_fetch", "(", ")" ]
Retry the offset fetch request if appropriate. Once the :attr:`.retry_delay` reaches our :attr:`.retry_max_delay`, we log a warning. This should perhaps be extended to abort sooner on certain errors.
[ "Retry", "the", "offset", "fetch", "request", "if", "appropriate", "." ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/consumer.py#L554-L587
ciena/afkak
afkak/consumer.py
Consumer._send_commit_request
def _send_commit_request(self, retry_delay=None, attempt=None): """Send a commit request with our last_processed_offset""" # If there's a _commit_call, and it's not active, clear it, it probably # just called us... if self._commit_call and not self._commit_call.active(): self._commit_call = None # Make sure we only have one outstanding commit request at a time if self._commit_req is not None: raise OperationInProgress(self._commit_req) # Handle defaults if retry_delay is None: retry_delay = self.retry_init_delay if attempt is None: attempt = 1 # Create new OffsetCommitRequest with the latest processed offset commit_offset = self._last_processed_offset commit_request = OffsetCommitRequest( self.topic, self.partition, commit_offset, TIMESTAMP_INVALID, self.commit_metadata) log.debug("Committing off=%d grp=%s tpc=%s part=%s req=%r", self._last_processed_offset, self.consumer_group, self.topic, self.partition, commit_request) # Send the request, add our callbacks self._commit_req = d = self.client.send_offset_commit_request( self.consumer_group, [commit_request]) d.addBoth(self._clear_commit_req) d.addCallbacks( self._update_committed_offset, self._handle_commit_error, callbackArgs=(commit_offset,), errbackArgs=(retry_delay, attempt))
python
def _send_commit_request(self, retry_delay=None, attempt=None): """Send a commit request with our last_processed_offset""" # If there's a _commit_call, and it's not active, clear it, it probably # just called us... if self._commit_call and not self._commit_call.active(): self._commit_call = None # Make sure we only have one outstanding commit request at a time if self._commit_req is not None: raise OperationInProgress(self._commit_req) # Handle defaults if retry_delay is None: retry_delay = self.retry_init_delay if attempt is None: attempt = 1 # Create new OffsetCommitRequest with the latest processed offset commit_offset = self._last_processed_offset commit_request = OffsetCommitRequest( self.topic, self.partition, commit_offset, TIMESTAMP_INVALID, self.commit_metadata) log.debug("Committing off=%d grp=%s tpc=%s part=%s req=%r", self._last_processed_offset, self.consumer_group, self.topic, self.partition, commit_request) # Send the request, add our callbacks self._commit_req = d = self.client.send_offset_commit_request( self.consumer_group, [commit_request]) d.addBoth(self._clear_commit_req) d.addCallbacks( self._update_committed_offset, self._handle_commit_error, callbackArgs=(commit_offset,), errbackArgs=(retry_delay, attempt))
[ "def", "_send_commit_request", "(", "self", ",", "retry_delay", "=", "None", ",", "attempt", "=", "None", ")", ":", "# If there's a _commit_call, and it's not active, clear it, it probably", "# just called us...", "if", "self", ".", "_commit_call", "and", "not", "self", ".", "_commit_call", ".", "active", "(", ")", ":", "self", ".", "_commit_call", "=", "None", "# Make sure we only have one outstanding commit request at a time", "if", "self", ".", "_commit_req", "is", "not", "None", ":", "raise", "OperationInProgress", "(", "self", ".", "_commit_req", ")", "# Handle defaults", "if", "retry_delay", "is", "None", ":", "retry_delay", "=", "self", ".", "retry_init_delay", "if", "attempt", "is", "None", ":", "attempt", "=", "1", "# Create new OffsetCommitRequest with the latest processed offset", "commit_offset", "=", "self", ".", "_last_processed_offset", "commit_request", "=", "OffsetCommitRequest", "(", "self", ".", "topic", ",", "self", ".", "partition", ",", "commit_offset", ",", "TIMESTAMP_INVALID", ",", "self", ".", "commit_metadata", ")", "log", ".", "debug", "(", "\"Committing off=%d grp=%s tpc=%s part=%s req=%r\"", ",", "self", ".", "_last_processed_offset", ",", "self", ".", "consumer_group", ",", "self", ".", "topic", ",", "self", ".", "partition", ",", "commit_request", ")", "# Send the request, add our callbacks", "self", ".", "_commit_req", "=", "d", "=", "self", ".", "client", ".", "send_offset_commit_request", "(", "self", ".", "consumer_group", ",", "[", "commit_request", "]", ")", "d", ".", "addBoth", "(", "self", ".", "_clear_commit_req", ")", "d", ".", "addCallbacks", "(", "self", ".", "_update_committed_offset", ",", "self", ".", "_handle_commit_error", ",", "callbackArgs", "=", "(", "commit_offset", ",", ")", ",", "errbackArgs", "=", "(", "retry_delay", ",", "attempt", ")", ")" ]
Send a commit request with our last_processed_offset
[ "Send", "a", "commit", "request", "with", "our", "last_processed_offset" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/consumer.py#L616-L650
ciena/afkak
afkak/consumer.py
Consumer._handle_commit_error
def _handle_commit_error(self, failure, retry_delay, attempt): """ Retry the commit request, depending on failure type Depending on the type of the failure, we retry the commit request with the latest processed offset, or callback/errback self._commit_ds """ # Check if we are stopping and the request was cancelled if self._stopping and failure.check(CancelledError): # Not really an error return self._deliver_commit_result(self._last_committed_offset) # Check that the failure type is a Kafka error...this could maybe be # a tighter check to determine whether a retry will succeed... if not failure.check(KafkaError): log.error("Unhandleable failure during commit attempt: %r\n\t%r", failure, failure.getBriefTraceback()) return self._deliver_commit_result(failure) # Do we need to abort? if (self.request_retry_max_attempts != 0 and attempt >= self.request_retry_max_attempts): log.debug("%r: Exhausted attempts: %d to commit offset: %r", self, self.request_retry_max_attempts, failure) return self._deliver_commit_result(failure) # Check the retry_delay to see if we should log at the higher level # Using attempts % 2 gets us 1-warn/minute with defaults timings if retry_delay < self.retry_max_delay or 0 == (attempt % 2): log.debug("%r: Failure committing offset to kafka: %r", self, failure) else: # We've retried until we hit the max delay, log alternately at warn log.warning("%r: Still failing committing offset to kafka: %r", self, failure) # Schedule a delayed call to retry the commit retry_delay = min(retry_delay * REQUEST_RETRY_FACTOR, self.retry_max_delay) self._commit_call = self.client.reactor.callLater( retry_delay, self._send_commit_request, retry_delay, attempt + 1)
python
def _handle_commit_error(self, failure, retry_delay, attempt): """ Retry the commit request, depending on failure type Depending on the type of the failure, we retry the commit request with the latest processed offset, or callback/errback self._commit_ds """ # Check if we are stopping and the request was cancelled if self._stopping and failure.check(CancelledError): # Not really an error return self._deliver_commit_result(self._last_committed_offset) # Check that the failure type is a Kafka error...this could maybe be # a tighter check to determine whether a retry will succeed... if not failure.check(KafkaError): log.error("Unhandleable failure during commit attempt: %r\n\t%r", failure, failure.getBriefTraceback()) return self._deliver_commit_result(failure) # Do we need to abort? if (self.request_retry_max_attempts != 0 and attempt >= self.request_retry_max_attempts): log.debug("%r: Exhausted attempts: %d to commit offset: %r", self, self.request_retry_max_attempts, failure) return self._deliver_commit_result(failure) # Check the retry_delay to see if we should log at the higher level # Using attempts % 2 gets us 1-warn/minute with defaults timings if retry_delay < self.retry_max_delay or 0 == (attempt % 2): log.debug("%r: Failure committing offset to kafka: %r", self, failure) else: # We've retried until we hit the max delay, log alternately at warn log.warning("%r: Still failing committing offset to kafka: %r", self, failure) # Schedule a delayed call to retry the commit retry_delay = min(retry_delay * REQUEST_RETRY_FACTOR, self.retry_max_delay) self._commit_call = self.client.reactor.callLater( retry_delay, self._send_commit_request, retry_delay, attempt + 1)
[ "def", "_handle_commit_error", "(", "self", ",", "failure", ",", "retry_delay", ",", "attempt", ")", ":", "# Check if we are stopping and the request was cancelled", "if", "self", ".", "_stopping", "and", "failure", ".", "check", "(", "CancelledError", ")", ":", "# Not really an error", "return", "self", ".", "_deliver_commit_result", "(", "self", ".", "_last_committed_offset", ")", "# Check that the failure type is a Kafka error...this could maybe be", "# a tighter check to determine whether a retry will succeed...", "if", "not", "failure", ".", "check", "(", "KafkaError", ")", ":", "log", ".", "error", "(", "\"Unhandleable failure during commit attempt: %r\\n\\t%r\"", ",", "failure", ",", "failure", ".", "getBriefTraceback", "(", ")", ")", "return", "self", ".", "_deliver_commit_result", "(", "failure", ")", "# Do we need to abort?", "if", "(", "self", ".", "request_retry_max_attempts", "!=", "0", "and", "attempt", ">=", "self", ".", "request_retry_max_attempts", ")", ":", "log", ".", "debug", "(", "\"%r: Exhausted attempts: %d to commit offset: %r\"", ",", "self", ",", "self", ".", "request_retry_max_attempts", ",", "failure", ")", "return", "self", ".", "_deliver_commit_result", "(", "failure", ")", "# Check the retry_delay to see if we should log at the higher level", "# Using attempts % 2 gets us 1-warn/minute with defaults timings", "if", "retry_delay", "<", "self", ".", "retry_max_delay", "or", "0", "==", "(", "attempt", "%", "2", ")", ":", "log", ".", "debug", "(", "\"%r: Failure committing offset to kafka: %r\"", ",", "self", ",", "failure", ")", "else", ":", "# We've retried until we hit the max delay, log alternately at warn", "log", ".", "warning", "(", "\"%r: Still failing committing offset to kafka: %r\"", ",", "self", ",", "failure", ")", "# Schedule a delayed call to retry the commit", "retry_delay", "=", "min", "(", "retry_delay", "*", "REQUEST_RETRY_FACTOR", ",", "self", ".", "retry_max_delay", ")", "self", ".", "_commit_call", "=", "self", ".", "client", ".", "reactor", ".", "callLater", "(", "retry_delay", ",", "self", ".", "_send_commit_request", ",", "retry_delay", ",", "attempt", "+", "1", ")" ]
Retry the commit request, depending on failure type Depending on the type of the failure, we retry the commit request with the latest processed offset, or callback/errback self._commit_ds
[ "Retry", "the", "commit", "request", "depending", "on", "failure", "type" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/consumer.py#L652-L691
ciena/afkak
afkak/consumer.py
Consumer._handle_processor_error
def _handle_processor_error(self, failure): """Handle a failure in the processing of a block of messages This method is called when the processor func fails while processing a block of messages. Since we can't know how best to handle a processor failure, we just :func:`errback` our :func:`start` method's deferred to let our user know about the failure. """ # Check if we're stopping/stopped and the errback of the processor # deferred is just the cancelling we initiated. If so, we skip # notifying via the _start_d deferred, as it will be 'callback'd at the # end of stop() if not (self._stopping and failure.check(CancelledError)): if self._start_d: # Make sure we're not already stopped self._start_d.errback(failure)
python
def _handle_processor_error(self, failure): """Handle a failure in the processing of a block of messages This method is called when the processor func fails while processing a block of messages. Since we can't know how best to handle a processor failure, we just :func:`errback` our :func:`start` method's deferred to let our user know about the failure. """ # Check if we're stopping/stopped and the errback of the processor # deferred is just the cancelling we initiated. If so, we skip # notifying via the _start_d deferred, as it will be 'callback'd at the # end of stop() if not (self._stopping and failure.check(CancelledError)): if self._start_d: # Make sure we're not already stopped self._start_d.errback(failure)
[ "def", "_handle_processor_error", "(", "self", ",", "failure", ")", ":", "# Check if we're stopping/stopped and the errback of the processor", "# deferred is just the cancelling we initiated. If so, we skip", "# notifying via the _start_d deferred, as it will be 'callback'd at the", "# end of stop()", "if", "not", "(", "self", ".", "_stopping", "and", "failure", ".", "check", "(", "CancelledError", ")", ")", ":", "if", "self", ".", "_start_d", ":", "# Make sure we're not already stopped", "self", ".", "_start_d", ".", "errback", "(", "failure", ")" ]
Handle a failure in the processing of a block of messages This method is called when the processor func fails while processing a block of messages. Since we can't know how best to handle a processor failure, we just :func:`errback` our :func:`start` method's deferred to let our user know about the failure.
[ "Handle", "a", "failure", "in", "the", "processing", "of", "a", "block", "of", "messages" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/consumer.py#L697-L711
ciena/afkak
afkak/consumer.py
Consumer._handle_fetch_error
def _handle_fetch_error(self, failure): """A fetch request resulted in an error. Retry after our current delay When a fetch error occurs, we check to see if the Consumer is being stopped, and if so just return, trapping the CancelledError. If not, we check if the Consumer has a non-zero setting for :attr:`request_retry_max_attempts` and if so and we have reached that limit we errback() the Consumer's start() deferred with the failure. If not, we determine whether to log at debug or warning (we log at warning every other retry after backing off to the max retry delay, resulting in a warning message approximately once per minute with the default timings) We then wait our current :attr:`retry_delay`, and retry the fetch. We also increase our retry_delay by Apery's constant (1.20205) and note the failed fetch by incrementing :attr:`_fetch_attempt_count`. NOTE: this may retry forever. TODO: Possibly make this differentiate based on the failure """ # The _request_d deferred has fired, clear it. self._request_d = None if failure.check(OffsetOutOfRangeError): if self.auto_offset_reset is None: self._start_d.errback(failure) return self._fetch_offset = self.auto_offset_reset if self._stopping and failure.check(CancelledError): # Not really an error return # Do we need to abort? if (self.request_retry_max_attempts != 0 and self._fetch_attempt_count >= self.request_retry_max_attempts): log.debug( "%r: Exhausted attempts: %d fetching messages from kafka: %r", self, self.request_retry_max_attempts, failure) self._start_d.errback(failure) return # Decide how to log this failure... If we have retried so many times # we're at the retry_max_delay, then we log at warning every other time # debug otherwise if (self.retry_delay < self.retry_max_delay or 0 == (self._fetch_attempt_count % 2)): log.debug("%r: Failure fetching messages from kafka: %r", self, failure) else: # We've retried until we hit the max delay, log at warn log.warning("%r: Still failing fetching messages from kafka: %r", self, failure) self._retry_fetch()
python
def _handle_fetch_error(self, failure): """A fetch request resulted in an error. Retry after our current delay When a fetch error occurs, we check to see if the Consumer is being stopped, and if so just return, trapping the CancelledError. If not, we check if the Consumer has a non-zero setting for :attr:`request_retry_max_attempts` and if so and we have reached that limit we errback() the Consumer's start() deferred with the failure. If not, we determine whether to log at debug or warning (we log at warning every other retry after backing off to the max retry delay, resulting in a warning message approximately once per minute with the default timings) We then wait our current :attr:`retry_delay`, and retry the fetch. We also increase our retry_delay by Apery's constant (1.20205) and note the failed fetch by incrementing :attr:`_fetch_attempt_count`. NOTE: this may retry forever. TODO: Possibly make this differentiate based on the failure """ # The _request_d deferred has fired, clear it. self._request_d = None if failure.check(OffsetOutOfRangeError): if self.auto_offset_reset is None: self._start_d.errback(failure) return self._fetch_offset = self.auto_offset_reset if self._stopping and failure.check(CancelledError): # Not really an error return # Do we need to abort? if (self.request_retry_max_attempts != 0 and self._fetch_attempt_count >= self.request_retry_max_attempts): log.debug( "%r: Exhausted attempts: %d fetching messages from kafka: %r", self, self.request_retry_max_attempts, failure) self._start_d.errback(failure) return # Decide how to log this failure... If we have retried so many times # we're at the retry_max_delay, then we log at warning every other time # debug otherwise if (self.retry_delay < self.retry_max_delay or 0 == (self._fetch_attempt_count % 2)): log.debug("%r: Failure fetching messages from kafka: %r", self, failure) else: # We've retried until we hit the max delay, log at warn log.warning("%r: Still failing fetching messages from kafka: %r", self, failure) self._retry_fetch()
[ "def", "_handle_fetch_error", "(", "self", ",", "failure", ")", ":", "# The _request_d deferred has fired, clear it.", "self", ".", "_request_d", "=", "None", "if", "failure", ".", "check", "(", "OffsetOutOfRangeError", ")", ":", "if", "self", ".", "auto_offset_reset", "is", "None", ":", "self", ".", "_start_d", ".", "errback", "(", "failure", ")", "return", "self", ".", "_fetch_offset", "=", "self", ".", "auto_offset_reset", "if", "self", ".", "_stopping", "and", "failure", ".", "check", "(", "CancelledError", ")", ":", "# Not really an error", "return", "# Do we need to abort?", "if", "(", "self", ".", "request_retry_max_attempts", "!=", "0", "and", "self", ".", "_fetch_attempt_count", ">=", "self", ".", "request_retry_max_attempts", ")", ":", "log", ".", "debug", "(", "\"%r: Exhausted attempts: %d fetching messages from kafka: %r\"", ",", "self", ",", "self", ".", "request_retry_max_attempts", ",", "failure", ")", "self", ".", "_start_d", ".", "errback", "(", "failure", ")", "return", "# Decide how to log this failure... If we have retried so many times", "# we're at the retry_max_delay, then we log at warning every other time", "# debug otherwise", "if", "(", "self", ".", "retry_delay", "<", "self", ".", "retry_max_delay", "or", "0", "==", "(", "self", ".", "_fetch_attempt_count", "%", "2", ")", ")", ":", "log", ".", "debug", "(", "\"%r: Failure fetching messages from kafka: %r\"", ",", "self", ",", "failure", ")", "else", ":", "# We've retried until we hit the max delay, log at warn", "log", ".", "warning", "(", "\"%r: Still failing fetching messages from kafka: %r\"", ",", "self", ",", "failure", ")", "self", ".", "_retry_fetch", "(", ")" ]
A fetch request resulted in an error. Retry after our current delay When a fetch error occurs, we check to see if the Consumer is being stopped, and if so just return, trapping the CancelledError. If not, we check if the Consumer has a non-zero setting for :attr:`request_retry_max_attempts` and if so and we have reached that limit we errback() the Consumer's start() deferred with the failure. If not, we determine whether to log at debug or warning (we log at warning every other retry after backing off to the max retry delay, resulting in a warning message approximately once per minute with the default timings) We then wait our current :attr:`retry_delay`, and retry the fetch. We also increase our retry_delay by Apery's constant (1.20205) and note the failed fetch by incrementing :attr:`_fetch_attempt_count`. NOTE: this may retry forever. TODO: Possibly make this differentiate based on the failure
[ "A", "fetch", "request", "resulted", "in", "an", "error", ".", "Retry", "after", "our", "current", "delay" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/consumer.py#L713-L763
ciena/afkak
afkak/consumer.py
Consumer._handle_fetch_response
def _handle_fetch_response(self, responses): """The callback handling the successful response from the fetch request Delivers the message list to the processor, handles per-message errors (ConsumerFetchSizeTooSmall), triggers another fetch request If the processor is still processing the last batch of messages, we defer this processing until it's done. Otherwise, we start another fetch request and submit the messages to the processor """ # Successful fetch, reset our retry delay self.retry_delay = self.retry_init_delay self._fetch_attempt_count = 1 # Check to see if we are still processing the last block we fetched... if self._msg_block_d: # We are still working through the last block of messages... # We have to wait until it's done, then process this response self._msg_block_d.addCallback( lambda _: self._handle_fetch_response(responses)) return # No ongoing processing, great, let's get some started. # Request no longer outstanding, clear the deferred tracker so we # can refetch self._request_d = None messages = [] try: for resp in responses: # We should really only ever get one... if resp.partition != self.partition: log.warning( "%r: Got response with partition: %r not our own: %r", self, resp.partition, self.partition) continue # resp.messages is a KafkaCodec._decode_message_set_iter # Note that 'message' here is really an OffsetAndMessage for message in resp.messages: # Check for messages included which are from prior to our # desired offset: can happen due to compressed message sets if message.offset < self._fetch_offset: log.debug( 'Skipping message at offset: %d, because its ' 'offset is less that our fetch offset: %d.', message.offset, self._fetch_offset) continue # Create a 'SourcedMessage' and add it to the messages list messages.append( SourcedMessage( message=message.message, offset=message.offset, topic=self.topic, partition=self.partition)) # Update our notion of from where to fetch. self._fetch_offset = message.offset + 1 except ConsumerFetchSizeTooSmall: # A message was too large for us to receive, given our current # buffer size. Grow it until it works, or we hit our max # Grow by 16x up to 1MB (could result in 16MB buf), then by 2x factor = 2 if self.buffer_size <= 2**20: factor = 16 if self.max_buffer_size is None: # No limit, increase until we succeed or fail to alloc RAM self.buffer_size *= factor elif (self.max_buffer_size is not None and self.buffer_size < self.max_buffer_size): # Limited, but currently below it. self.buffer_size = min( self.buffer_size * factor, self.max_buffer_size) else: # We failed, and are already at our max. Nothing we can do but # create a Failure and errback() our start() deferred log.error("Max fetch size %d too small", self.max_buffer_size) failure = Failure( ConsumerFetchSizeTooSmall( "Max buffer size:%d too small for message", self.max_buffer_size)) self._start_d.errback(failure) return log.debug( "Next message larger than fetch size, increasing " "to %d (~2x) and retrying", self.buffer_size) finally: # If we were able to extract any messages, deliver them to the # processor now. if messages: self._msg_block_d = Deferred() self._process_messages(messages) # start another fetch, if needed, but use callLater to avoid recursion self._retry_fetch(0)
python
def _handle_fetch_response(self, responses): """The callback handling the successful response from the fetch request Delivers the message list to the processor, handles per-message errors (ConsumerFetchSizeTooSmall), triggers another fetch request If the processor is still processing the last batch of messages, we defer this processing until it's done. Otherwise, we start another fetch request and submit the messages to the processor """ # Successful fetch, reset our retry delay self.retry_delay = self.retry_init_delay self._fetch_attempt_count = 1 # Check to see if we are still processing the last block we fetched... if self._msg_block_d: # We are still working through the last block of messages... # We have to wait until it's done, then process this response self._msg_block_d.addCallback( lambda _: self._handle_fetch_response(responses)) return # No ongoing processing, great, let's get some started. # Request no longer outstanding, clear the deferred tracker so we # can refetch self._request_d = None messages = [] try: for resp in responses: # We should really only ever get one... if resp.partition != self.partition: log.warning( "%r: Got response with partition: %r not our own: %r", self, resp.partition, self.partition) continue # resp.messages is a KafkaCodec._decode_message_set_iter # Note that 'message' here is really an OffsetAndMessage for message in resp.messages: # Check for messages included which are from prior to our # desired offset: can happen due to compressed message sets if message.offset < self._fetch_offset: log.debug( 'Skipping message at offset: %d, because its ' 'offset is less that our fetch offset: %d.', message.offset, self._fetch_offset) continue # Create a 'SourcedMessage' and add it to the messages list messages.append( SourcedMessage( message=message.message, offset=message.offset, topic=self.topic, partition=self.partition)) # Update our notion of from where to fetch. self._fetch_offset = message.offset + 1 except ConsumerFetchSizeTooSmall: # A message was too large for us to receive, given our current # buffer size. Grow it until it works, or we hit our max # Grow by 16x up to 1MB (could result in 16MB buf), then by 2x factor = 2 if self.buffer_size <= 2**20: factor = 16 if self.max_buffer_size is None: # No limit, increase until we succeed or fail to alloc RAM self.buffer_size *= factor elif (self.max_buffer_size is not None and self.buffer_size < self.max_buffer_size): # Limited, but currently below it. self.buffer_size = min( self.buffer_size * factor, self.max_buffer_size) else: # We failed, and are already at our max. Nothing we can do but # create a Failure and errback() our start() deferred log.error("Max fetch size %d too small", self.max_buffer_size) failure = Failure( ConsumerFetchSizeTooSmall( "Max buffer size:%d too small for message", self.max_buffer_size)) self._start_d.errback(failure) return log.debug( "Next message larger than fetch size, increasing " "to %d (~2x) and retrying", self.buffer_size) finally: # If we were able to extract any messages, deliver them to the # processor now. if messages: self._msg_block_d = Deferred() self._process_messages(messages) # start another fetch, if needed, but use callLater to avoid recursion self._retry_fetch(0)
[ "def", "_handle_fetch_response", "(", "self", ",", "responses", ")", ":", "# Successful fetch, reset our retry delay", "self", ".", "retry_delay", "=", "self", ".", "retry_init_delay", "self", ".", "_fetch_attempt_count", "=", "1", "# Check to see if we are still processing the last block we fetched...", "if", "self", ".", "_msg_block_d", ":", "# We are still working through the last block of messages...", "# We have to wait until it's done, then process this response", "self", ".", "_msg_block_d", ".", "addCallback", "(", "lambda", "_", ":", "self", ".", "_handle_fetch_response", "(", "responses", ")", ")", "return", "# No ongoing processing, great, let's get some started.", "# Request no longer outstanding, clear the deferred tracker so we", "# can refetch", "self", ".", "_request_d", "=", "None", "messages", "=", "[", "]", "try", ":", "for", "resp", "in", "responses", ":", "# We should really only ever get one...", "if", "resp", ".", "partition", "!=", "self", ".", "partition", ":", "log", ".", "warning", "(", "\"%r: Got response with partition: %r not our own: %r\"", ",", "self", ",", "resp", ".", "partition", ",", "self", ".", "partition", ")", "continue", "# resp.messages is a KafkaCodec._decode_message_set_iter", "# Note that 'message' here is really an OffsetAndMessage", "for", "message", "in", "resp", ".", "messages", ":", "# Check for messages included which are from prior to our", "# desired offset: can happen due to compressed message sets", "if", "message", ".", "offset", "<", "self", ".", "_fetch_offset", ":", "log", ".", "debug", "(", "'Skipping message at offset: %d, because its '", "'offset is less that our fetch offset: %d.'", ",", "message", ".", "offset", ",", "self", ".", "_fetch_offset", ")", "continue", "# Create a 'SourcedMessage' and add it to the messages list", "messages", ".", "append", "(", "SourcedMessage", "(", "message", "=", "message", ".", "message", ",", "offset", "=", "message", ".", "offset", ",", "topic", "=", "self", ".", "topic", ",", "partition", "=", "self", ".", "partition", ")", ")", "# Update our notion of from where to fetch.", "self", ".", "_fetch_offset", "=", "message", ".", "offset", "+", "1", "except", "ConsumerFetchSizeTooSmall", ":", "# A message was too large for us to receive, given our current", "# buffer size. Grow it until it works, or we hit our max", "# Grow by 16x up to 1MB (could result in 16MB buf), then by 2x", "factor", "=", "2", "if", "self", ".", "buffer_size", "<=", "2", "**", "20", ":", "factor", "=", "16", "if", "self", ".", "max_buffer_size", "is", "None", ":", "# No limit, increase until we succeed or fail to alloc RAM", "self", ".", "buffer_size", "*=", "factor", "elif", "(", "self", ".", "max_buffer_size", "is", "not", "None", "and", "self", ".", "buffer_size", "<", "self", ".", "max_buffer_size", ")", ":", "# Limited, but currently below it.", "self", ".", "buffer_size", "=", "min", "(", "self", ".", "buffer_size", "*", "factor", ",", "self", ".", "max_buffer_size", ")", "else", ":", "# We failed, and are already at our max. Nothing we can do but", "# create a Failure and errback() our start() deferred", "log", ".", "error", "(", "\"Max fetch size %d too small\"", ",", "self", ".", "max_buffer_size", ")", "failure", "=", "Failure", "(", "ConsumerFetchSizeTooSmall", "(", "\"Max buffer size:%d too small for message\"", ",", "self", ".", "max_buffer_size", ")", ")", "self", ".", "_start_d", ".", "errback", "(", "failure", ")", "return", "log", ".", "debug", "(", "\"Next message larger than fetch size, increasing \"", "\"to %d (~2x) and retrying\"", ",", "self", ".", "buffer_size", ")", "finally", ":", "# If we were able to extract any messages, deliver them to the", "# processor now.", "if", "messages", ":", "self", ".", "_msg_block_d", "=", "Deferred", "(", ")", "self", ".", "_process_messages", "(", "messages", ")", "# start another fetch, if needed, but use callLater to avoid recursion", "self", ".", "_retry_fetch", "(", "0", ")" ]
The callback handling the successful response from the fetch request Delivers the message list to the processor, handles per-message errors (ConsumerFetchSizeTooSmall), triggers another fetch request If the processor is still processing the last batch of messages, we defer this processing until it's done. Otherwise, we start another fetch request and submit the messages to the processor
[ "The", "callback", "handling", "the", "successful", "response", "from", "the", "fetch", "request" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/consumer.py#L765-L856
ciena/afkak
afkak/consumer.py
Consumer._process_messages
def _process_messages(self, messages): """Send messages to the `processor` callback to be processed In the case we have a commit policy, we send messages to the processor in blocks no bigger than auto_commit_every_n (if set). Otherwise, we send the entire message block to be processed. """ # Have we been told to shutdown? if self._shuttingdown: return # Do we have any messages to process? if not messages: # No, we're done with this block. If we had another fetch result # waiting, this callback will trigger the processing thereof. if self._msg_block_d: _msg_block_d, self._msg_block_d = self._msg_block_d, None _msg_block_d.callback(True) return # Yes, we've got some messages to process. # Default to processing the entire block... proc_block_size = sys.maxsize # Unless our auto commit_policy restricts us to process less if self.auto_commit_every_n: proc_block_size = self.auto_commit_every_n # Divide messages into two lists: one to process now, and remainder msgs_to_proc = messages[:proc_block_size] msgs_remainder = messages[proc_block_size:] # Call our processor callable and handle the possibility it returned # a deferred... last_offset = msgs_to_proc[-1].offset self._processor_d = d = maybeDeferred(self.processor, self, msgs_to_proc) log.debug('self.processor return: %r, last_offset: %r', d, last_offset) # Once the processor completes, clear our _processor_d d.addBoth(self._clear_processor_deferred) # Record the offset of the last processed message and check autocommit d.addCallback(self._update_processed_offset, last_offset) # If we were stopped, cancel the processor deferred. Note, we have to # do this here, in addition to in stop() because the processor func # itself could have called stop(), and then when it returned, we re-set # self._processor_d to the return of maybeDeferred(). if self._stopping or self._start_d is None: d.cancel() else: # Setup to process the rest of our messages d.addCallback(lambda _: self._process_messages(msgs_remainder)) # Add an error handler d.addErrback(self._handle_processor_error)
python
def _process_messages(self, messages): """Send messages to the `processor` callback to be processed In the case we have a commit policy, we send messages to the processor in blocks no bigger than auto_commit_every_n (if set). Otherwise, we send the entire message block to be processed. """ # Have we been told to shutdown? if self._shuttingdown: return # Do we have any messages to process? if not messages: # No, we're done with this block. If we had another fetch result # waiting, this callback will trigger the processing thereof. if self._msg_block_d: _msg_block_d, self._msg_block_d = self._msg_block_d, None _msg_block_d.callback(True) return # Yes, we've got some messages to process. # Default to processing the entire block... proc_block_size = sys.maxsize # Unless our auto commit_policy restricts us to process less if self.auto_commit_every_n: proc_block_size = self.auto_commit_every_n # Divide messages into two lists: one to process now, and remainder msgs_to_proc = messages[:proc_block_size] msgs_remainder = messages[proc_block_size:] # Call our processor callable and handle the possibility it returned # a deferred... last_offset = msgs_to_proc[-1].offset self._processor_d = d = maybeDeferred(self.processor, self, msgs_to_proc) log.debug('self.processor return: %r, last_offset: %r', d, last_offset) # Once the processor completes, clear our _processor_d d.addBoth(self._clear_processor_deferred) # Record the offset of the last processed message and check autocommit d.addCallback(self._update_processed_offset, last_offset) # If we were stopped, cancel the processor deferred. Note, we have to # do this here, in addition to in stop() because the processor func # itself could have called stop(), and then when it returned, we re-set # self._processor_d to the return of maybeDeferred(). if self._stopping or self._start_d is None: d.cancel() else: # Setup to process the rest of our messages d.addCallback(lambda _: self._process_messages(msgs_remainder)) # Add an error handler d.addErrback(self._handle_processor_error)
[ "def", "_process_messages", "(", "self", ",", "messages", ")", ":", "# Have we been told to shutdown?", "if", "self", ".", "_shuttingdown", ":", "return", "# Do we have any messages to process?", "if", "not", "messages", ":", "# No, we're done with this block. If we had another fetch result", "# waiting, this callback will trigger the processing thereof.", "if", "self", ".", "_msg_block_d", ":", "_msg_block_d", ",", "self", ".", "_msg_block_d", "=", "self", ".", "_msg_block_d", ",", "None", "_msg_block_d", ".", "callback", "(", "True", ")", "return", "# Yes, we've got some messages to process.", "# Default to processing the entire block...", "proc_block_size", "=", "sys", ".", "maxsize", "# Unless our auto commit_policy restricts us to process less", "if", "self", ".", "auto_commit_every_n", ":", "proc_block_size", "=", "self", ".", "auto_commit_every_n", "# Divide messages into two lists: one to process now, and remainder", "msgs_to_proc", "=", "messages", "[", ":", "proc_block_size", "]", "msgs_remainder", "=", "messages", "[", "proc_block_size", ":", "]", "# Call our processor callable and handle the possibility it returned", "# a deferred...", "last_offset", "=", "msgs_to_proc", "[", "-", "1", "]", ".", "offset", "self", ".", "_processor_d", "=", "d", "=", "maybeDeferred", "(", "self", ".", "processor", ",", "self", ",", "msgs_to_proc", ")", "log", ".", "debug", "(", "'self.processor return: %r, last_offset: %r'", ",", "d", ",", "last_offset", ")", "# Once the processor completes, clear our _processor_d", "d", ".", "addBoth", "(", "self", ".", "_clear_processor_deferred", ")", "# Record the offset of the last processed message and check autocommit", "d", ".", "addCallback", "(", "self", ".", "_update_processed_offset", ",", "last_offset", ")", "# If we were stopped, cancel the processor deferred. Note, we have to", "# do this here, in addition to in stop() because the processor func", "# itself could have called stop(), and then when it returned, we re-set", "# self._processor_d to the return of maybeDeferred().", "if", "self", ".", "_stopping", "or", "self", ".", "_start_d", "is", "None", ":", "d", ".", "cancel", "(", ")", "else", ":", "# Setup to process the rest of our messages", "d", ".", "addCallback", "(", "lambda", "_", ":", "self", ".", "_process_messages", "(", "msgs_remainder", ")", ")", "# Add an error handler", "d", ".", "addErrback", "(", "self", ".", "_handle_processor_error", ")" ]
Send messages to the `processor` callback to be processed In the case we have a commit policy, we send messages to the processor in blocks no bigger than auto_commit_every_n (if set). Otherwise, we send the entire message block to be processed.
[ "Send", "messages", "to", "the", "processor", "callback", "to", "be", "processed" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/consumer.py#L858-L906
ciena/afkak
afkak/consumer.py
Consumer._do_fetch
def _do_fetch(self): """Send a fetch request if there isn't a request outstanding Sends a fetch request to the Kafka cluster to get messages at the current offset. When the response comes back, if there are messages, it delivers them to the :attr:`processor` callback and initiates another fetch request. If there is a recoverable error, the fetch is retried after :attr:`retry_delay`. In the case of an unrecoverable error, :func:`errback` is called on the :class:`Deferred` returned by :meth:`start()`. """ # Check for outstanding request. if self._request_d: log.debug("_do_fetch: Outstanding request: %r", self._request_d) return # Cleanup our _retry_call, if we have one if self._retry_call is not None: if self._retry_call.active(): self._retry_call.cancel() self._retry_call = None # Do we know our offset yet, or do we need to figure it out? if (self._fetch_offset == OFFSET_EARLIEST or self._fetch_offset == OFFSET_LATEST): # We need to fetch the offset for our topic/partition offset_request = OffsetRequest( self.topic, self.partition, self._fetch_offset, 1) self._request_d = self.client.send_offset_request([offset_request]) self._request_d.addCallbacks( self._handle_offset_response, self._handle_offset_error) elif self._fetch_offset == OFFSET_COMMITTED: # We need to fetch the committed offset for our topic/partition # Note we use the same callbacks, as the responses are "close # enough" for our needs here if not self.consumer_group: # consumer_group must be set for OFFSET_COMMITTED failure = Failure( InvalidConsumerGroupError("Bad Group_id:{0!r}".format( self.consumer_group))) self._start_d.errback(failure) request = OffsetFetchRequest(self.topic, self.partition) self._request_d = self.client.send_offset_fetch_request( self.consumer_group, [request]) self._request_d.addCallbacks( self._handle_offset_response, self._handle_offset_error) else: # Create fetch request payload for our partition request = FetchRequest( self.topic, self.partition, self._fetch_offset, self.buffer_size) # Send request and add handlers for the response self._request_d = self.client.send_fetch_request( [request], max_wait_time=self.fetch_max_wait_time, min_bytes=self.fetch_min_bytes) # We need a temp for this because if the response is already # available, _handle_fetch_response() will clear self._request_d d = self._request_d d.addCallback(self._handle_fetch_response) d.addErrback(self._handle_fetch_error)
python
def _do_fetch(self): """Send a fetch request if there isn't a request outstanding Sends a fetch request to the Kafka cluster to get messages at the current offset. When the response comes back, if there are messages, it delivers them to the :attr:`processor` callback and initiates another fetch request. If there is a recoverable error, the fetch is retried after :attr:`retry_delay`. In the case of an unrecoverable error, :func:`errback` is called on the :class:`Deferred` returned by :meth:`start()`. """ # Check for outstanding request. if self._request_d: log.debug("_do_fetch: Outstanding request: %r", self._request_d) return # Cleanup our _retry_call, if we have one if self._retry_call is not None: if self._retry_call.active(): self._retry_call.cancel() self._retry_call = None # Do we know our offset yet, or do we need to figure it out? if (self._fetch_offset == OFFSET_EARLIEST or self._fetch_offset == OFFSET_LATEST): # We need to fetch the offset for our topic/partition offset_request = OffsetRequest( self.topic, self.partition, self._fetch_offset, 1) self._request_d = self.client.send_offset_request([offset_request]) self._request_d.addCallbacks( self._handle_offset_response, self._handle_offset_error) elif self._fetch_offset == OFFSET_COMMITTED: # We need to fetch the committed offset for our topic/partition # Note we use the same callbacks, as the responses are "close # enough" for our needs here if not self.consumer_group: # consumer_group must be set for OFFSET_COMMITTED failure = Failure( InvalidConsumerGroupError("Bad Group_id:{0!r}".format( self.consumer_group))) self._start_d.errback(failure) request = OffsetFetchRequest(self.topic, self.partition) self._request_d = self.client.send_offset_fetch_request( self.consumer_group, [request]) self._request_d.addCallbacks( self._handle_offset_response, self._handle_offset_error) else: # Create fetch request payload for our partition request = FetchRequest( self.topic, self.partition, self._fetch_offset, self.buffer_size) # Send request and add handlers for the response self._request_d = self.client.send_fetch_request( [request], max_wait_time=self.fetch_max_wait_time, min_bytes=self.fetch_min_bytes) # We need a temp for this because if the response is already # available, _handle_fetch_response() will clear self._request_d d = self._request_d d.addCallback(self._handle_fetch_response) d.addErrback(self._handle_fetch_error)
[ "def", "_do_fetch", "(", "self", ")", ":", "# Check for outstanding request.", "if", "self", ".", "_request_d", ":", "log", ".", "debug", "(", "\"_do_fetch: Outstanding request: %r\"", ",", "self", ".", "_request_d", ")", "return", "# Cleanup our _retry_call, if we have one", "if", "self", ".", "_retry_call", "is", "not", "None", ":", "if", "self", ".", "_retry_call", ".", "active", "(", ")", ":", "self", ".", "_retry_call", ".", "cancel", "(", ")", "self", ".", "_retry_call", "=", "None", "# Do we know our offset yet, or do we need to figure it out?", "if", "(", "self", ".", "_fetch_offset", "==", "OFFSET_EARLIEST", "or", "self", ".", "_fetch_offset", "==", "OFFSET_LATEST", ")", ":", "# We need to fetch the offset for our topic/partition", "offset_request", "=", "OffsetRequest", "(", "self", ".", "topic", ",", "self", ".", "partition", ",", "self", ".", "_fetch_offset", ",", "1", ")", "self", ".", "_request_d", "=", "self", ".", "client", ".", "send_offset_request", "(", "[", "offset_request", "]", ")", "self", ".", "_request_d", ".", "addCallbacks", "(", "self", ".", "_handle_offset_response", ",", "self", ".", "_handle_offset_error", ")", "elif", "self", ".", "_fetch_offset", "==", "OFFSET_COMMITTED", ":", "# We need to fetch the committed offset for our topic/partition", "# Note we use the same callbacks, as the responses are \"close", "# enough\" for our needs here", "if", "not", "self", ".", "consumer_group", ":", "# consumer_group must be set for OFFSET_COMMITTED", "failure", "=", "Failure", "(", "InvalidConsumerGroupError", "(", "\"Bad Group_id:{0!r}\"", ".", "format", "(", "self", ".", "consumer_group", ")", ")", ")", "self", ".", "_start_d", ".", "errback", "(", "failure", ")", "request", "=", "OffsetFetchRequest", "(", "self", ".", "topic", ",", "self", ".", "partition", ")", "self", ".", "_request_d", "=", "self", ".", "client", ".", "send_offset_fetch_request", "(", "self", ".", "consumer_group", ",", "[", "request", "]", ")", "self", ".", "_request_d", ".", "addCallbacks", "(", "self", ".", "_handle_offset_response", ",", "self", ".", "_handle_offset_error", ")", "else", ":", "# Create fetch request payload for our partition", "request", "=", "FetchRequest", "(", "self", ".", "topic", ",", "self", ".", "partition", ",", "self", ".", "_fetch_offset", ",", "self", ".", "buffer_size", ")", "# Send request and add handlers for the response", "self", ".", "_request_d", "=", "self", ".", "client", ".", "send_fetch_request", "(", "[", "request", "]", ",", "max_wait_time", "=", "self", ".", "fetch_max_wait_time", ",", "min_bytes", "=", "self", ".", "fetch_min_bytes", ")", "# We need a temp for this because if the response is already", "# available, _handle_fetch_response() will clear self._request_d", "d", "=", "self", ".", "_request_d", "d", ".", "addCallback", "(", "self", ".", "_handle_fetch_response", ")", "d", ".", "addErrback", "(", "self", ".", "_handle_fetch_error", ")" ]
Send a fetch request if there isn't a request outstanding Sends a fetch request to the Kafka cluster to get messages at the current offset. When the response comes back, if there are messages, it delivers them to the :attr:`processor` callback and initiates another fetch request. If there is a recoverable error, the fetch is retried after :attr:`retry_delay`. In the case of an unrecoverable error, :func:`errback` is called on the :class:`Deferred` returned by :meth:`start()`.
[ "Send", "a", "fetch", "request", "if", "there", "isn", "t", "a", "request", "outstanding" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/consumer.py#L908-L968
ciena/afkak
afkak/consumer.py
Consumer._commit_timer_failed
def _commit_timer_failed(self, fail): """Handle an error in the commit() function Our commit() function called by the LoopingCall failed. Some error probably came back from Kafka and _check_error() raised the exception For now, just log the failure and restart the loop """ log.warning( '_commit_timer_failed: uncaught error %r: %s in _auto_commit', fail, fail.getBriefTraceback()) self._commit_looper_d = self._commit_looper.start( self.auto_commit_every_s, now=False)
python
def _commit_timer_failed(self, fail): """Handle an error in the commit() function Our commit() function called by the LoopingCall failed. Some error probably came back from Kafka and _check_error() raised the exception For now, just log the failure and restart the loop """ log.warning( '_commit_timer_failed: uncaught error %r: %s in _auto_commit', fail, fail.getBriefTraceback()) self._commit_looper_d = self._commit_looper.start( self.auto_commit_every_s, now=False)
[ "def", "_commit_timer_failed", "(", "self", ",", "fail", ")", ":", "log", ".", "warning", "(", "'_commit_timer_failed: uncaught error %r: %s in _auto_commit'", ",", "fail", ",", "fail", ".", "getBriefTraceback", "(", ")", ")", "self", ".", "_commit_looper_d", "=", "self", ".", "_commit_looper", ".", "start", "(", "self", ".", "auto_commit_every_s", ",", "now", "=", "False", ")" ]
Handle an error in the commit() function Our commit() function called by the LoopingCall failed. Some error probably came back from Kafka and _check_error() raised the exception For now, just log the failure and restart the loop
[ "Handle", "an", "error", "in", "the", "commit", "()", "function" ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/consumer.py#L970-L981
ciena/afkak
afkak/consumer.py
Consumer._commit_timer_stopped
def _commit_timer_stopped(self, lCall): """We're shutting down, clean up our looping call...""" if self._commit_looper is not lCall: log.warning('_commit_timer_stopped with wrong timer:%s not:%s', lCall, self._commit_looper) else: log.debug('_commit_timer_stopped: %s %s', lCall, self._commit_looper) self._commit_looper = None self._commit_looper_d = None
python
def _commit_timer_stopped(self, lCall): """We're shutting down, clean up our looping call...""" if self._commit_looper is not lCall: log.warning('_commit_timer_stopped with wrong timer:%s not:%s', lCall, self._commit_looper) else: log.debug('_commit_timer_stopped: %s %s', lCall, self._commit_looper) self._commit_looper = None self._commit_looper_d = None
[ "def", "_commit_timer_stopped", "(", "self", ",", "lCall", ")", ":", "if", "self", ".", "_commit_looper", "is", "not", "lCall", ":", "log", ".", "warning", "(", "'_commit_timer_stopped with wrong timer:%s not:%s'", ",", "lCall", ",", "self", ".", "_commit_looper", ")", "else", ":", "log", ".", "debug", "(", "'_commit_timer_stopped: %s %s'", ",", "lCall", ",", "self", ".", "_commit_looper", ")", "self", ".", "_commit_looper", "=", "None", "self", ".", "_commit_looper_d", "=", "None" ]
We're shutting down, clean up our looping call...
[ "We", "re", "shutting", "down", "clean", "up", "our", "looping", "call", "..." ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/consumer.py#L983-L992
ciena/afkak
afkak/partitioner.py
pure_murmur2
def pure_murmur2(byte_array, seed=0x9747b28c): """Pure-python Murmur2 implementation. Based on java client, see org.apache.kafka.common.utils.Utils.murmur2 https://github.com/apache/kafka/blob/0.8.2/clients/src/main/java/org/apache/kafka/common/utils/Utils.java#L244 Args: byte_array: bytearray - Raises TypeError otherwise Returns: MurmurHash2 of byte_array bytearray Raises: TypeError if byte_array arg is not of type bytearray """ # Ensure byte_array arg is a bytearray if not isinstance(byte_array, bytearray): raise TypeError("Type: %r of 'byte_array' arg must be 'bytearray'", type(byte_array)) length = len(byte_array) # 'm' and 'r' are mixing constants generated offline. # They're not really 'magic', they just happen to work well. m = 0x5bd1e995 r = 24 mod32bits = 0xffffffff # Initialize the hash to a random value h = seed ^ length length4 = length // 4 for i in range(length4): i4 = i * 4 k = ((byte_array[i4 + 0] & 0xff) + ((byte_array[i4 + 1] & 0xff) << 8) + ((byte_array[i4 + 2] & 0xff) << 16) + ((byte_array[i4 + 3] & 0xff) << 24)) k &= mod32bits k *= m k &= mod32bits k ^= (k % 0x100000000) >> r # k ^= k >>> r k &= mod32bits k *= m k &= mod32bits h *= m h &= mod32bits h ^= k h &= mod32bits # Handle the last few bytes of the input array extra_bytes = length % 4 if extra_bytes == 3: h ^= (byte_array[(length & ~3) + 2] & 0xff) << 16 h &= mod32bits if extra_bytes >= 2: h ^= (byte_array[(length & ~3) + 1] & 0xff) << 8 h &= mod32bits if extra_bytes >= 1: h ^= (byte_array[length & ~3] & 0xff) h &= mod32bits h *= m h &= mod32bits h ^= (h % 0x100000000) >> 13 # h >>> 13; h &= mod32bits h *= m h &= mod32bits h ^= (h % 0x100000000) >> 15 # h >>> 15; h &= mod32bits return h
python
def pure_murmur2(byte_array, seed=0x9747b28c): """Pure-python Murmur2 implementation. Based on java client, see org.apache.kafka.common.utils.Utils.murmur2 https://github.com/apache/kafka/blob/0.8.2/clients/src/main/java/org/apache/kafka/common/utils/Utils.java#L244 Args: byte_array: bytearray - Raises TypeError otherwise Returns: MurmurHash2 of byte_array bytearray Raises: TypeError if byte_array arg is not of type bytearray """ # Ensure byte_array arg is a bytearray if not isinstance(byte_array, bytearray): raise TypeError("Type: %r of 'byte_array' arg must be 'bytearray'", type(byte_array)) length = len(byte_array) # 'm' and 'r' are mixing constants generated offline. # They're not really 'magic', they just happen to work well. m = 0x5bd1e995 r = 24 mod32bits = 0xffffffff # Initialize the hash to a random value h = seed ^ length length4 = length // 4 for i in range(length4): i4 = i * 4 k = ((byte_array[i4 + 0] & 0xff) + ((byte_array[i4 + 1] & 0xff) << 8) + ((byte_array[i4 + 2] & 0xff) << 16) + ((byte_array[i4 + 3] & 0xff) << 24)) k &= mod32bits k *= m k &= mod32bits k ^= (k % 0x100000000) >> r # k ^= k >>> r k &= mod32bits k *= m k &= mod32bits h *= m h &= mod32bits h ^= k h &= mod32bits # Handle the last few bytes of the input array extra_bytes = length % 4 if extra_bytes == 3: h ^= (byte_array[(length & ~3) + 2] & 0xff) << 16 h &= mod32bits if extra_bytes >= 2: h ^= (byte_array[(length & ~3) + 1] & 0xff) << 8 h &= mod32bits if extra_bytes >= 1: h ^= (byte_array[length & ~3] & 0xff) h &= mod32bits h *= m h &= mod32bits h ^= (h % 0x100000000) >> 13 # h >>> 13; h &= mod32bits h *= m h &= mod32bits h ^= (h % 0x100000000) >> 15 # h >>> 15; h &= mod32bits return h
[ "def", "pure_murmur2", "(", "byte_array", ",", "seed", "=", "0x9747b28c", ")", ":", "# Ensure byte_array arg is a bytearray", "if", "not", "isinstance", "(", "byte_array", ",", "bytearray", ")", ":", "raise", "TypeError", "(", "\"Type: %r of 'byte_array' arg must be 'bytearray'\"", ",", "type", "(", "byte_array", ")", ")", "length", "=", "len", "(", "byte_array", ")", "# 'm' and 'r' are mixing constants generated offline.", "# They're not really 'magic', they just happen to work well.", "m", "=", "0x5bd1e995", "r", "=", "24", "mod32bits", "=", "0xffffffff", "# Initialize the hash to a random value", "h", "=", "seed", "^", "length", "length4", "=", "length", "//", "4", "for", "i", "in", "range", "(", "length4", ")", ":", "i4", "=", "i", "*", "4", "k", "=", "(", "(", "byte_array", "[", "i4", "+", "0", "]", "&", "0xff", ")", "+", "(", "(", "byte_array", "[", "i4", "+", "1", "]", "&", "0xff", ")", "<<", "8", ")", "+", "(", "(", "byte_array", "[", "i4", "+", "2", "]", "&", "0xff", ")", "<<", "16", ")", "+", "(", "(", "byte_array", "[", "i4", "+", "3", "]", "&", "0xff", ")", "<<", "24", ")", ")", "k", "&=", "mod32bits", "k", "*=", "m", "k", "&=", "mod32bits", "k", "^=", "(", "k", "%", "0x100000000", ")", ">>", "r", "# k ^= k >>> r", "k", "&=", "mod32bits", "k", "*=", "m", "k", "&=", "mod32bits", "h", "*=", "m", "h", "&=", "mod32bits", "h", "^=", "k", "h", "&=", "mod32bits", "# Handle the last few bytes of the input array", "extra_bytes", "=", "length", "%", "4", "if", "extra_bytes", "==", "3", ":", "h", "^=", "(", "byte_array", "[", "(", "length", "&", "~", "3", ")", "+", "2", "]", "&", "0xff", ")", "<<", "16", "h", "&=", "mod32bits", "if", "extra_bytes", ">=", "2", ":", "h", "^=", "(", "byte_array", "[", "(", "length", "&", "~", "3", ")", "+", "1", "]", "&", "0xff", ")", "<<", "8", "h", "&=", "mod32bits", "if", "extra_bytes", ">=", "1", ":", "h", "^=", "(", "byte_array", "[", "length", "&", "~", "3", "]", "&", "0xff", ")", "h", "&=", "mod32bits", "h", "*=", "m", "h", "&=", "mod32bits", "h", "^=", "(", "h", "%", "0x100000000", ")", ">>", "13", "# h >>> 13;", "h", "&=", "mod32bits", "h", "*=", "m", "h", "&=", "mod32bits", "h", "^=", "(", "h", "%", "0x100000000", ")", ">>", "15", "# h >>> 15;", "h", "&=", "mod32bits", "return", "h" ]
Pure-python Murmur2 implementation. Based on java client, see org.apache.kafka.common.utils.Utils.murmur2 https://github.com/apache/kafka/blob/0.8.2/clients/src/main/java/org/apache/kafka/common/utils/Utils.java#L244 Args: byte_array: bytearray - Raises TypeError otherwise Returns: MurmurHash2 of byte_array bytearray Raises: TypeError if byte_array arg is not of type bytearray
[ "Pure", "-", "python", "Murmur2", "implementation", "." ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/partitioner.py#L31-L98
ciena/afkak
afkak/partitioner.py
HashedPartitioner.partition
def partition(self, key, partitions): """ Select a partition based on the hash of the key. :param key: Partition key :type key: text string or UTF-8 `bytes` or `bytearray` :param list partitions: An indexed sequence of partition identifiers. :returns: One of the given partition identifiers. The result will be the same each time the same key and partition list is passed. """ return partitions[(self._hash(key) & 0x7FFFFFFF) % len(partitions)]
python
def partition(self, key, partitions): """ Select a partition based on the hash of the key. :param key: Partition key :type key: text string or UTF-8 `bytes` or `bytearray` :param list partitions: An indexed sequence of partition identifiers. :returns: One of the given partition identifiers. The result will be the same each time the same key and partition list is passed. """ return partitions[(self._hash(key) & 0x7FFFFFFF) % len(partitions)]
[ "def", "partition", "(", "self", ",", "key", ",", "partitions", ")", ":", "return", "partitions", "[", "(", "self", ".", "_hash", "(", "key", ")", "&", "0x7FFFFFFF", ")", "%", "len", "(", "partitions", ")", "]" ]
Select a partition based on the hash of the key. :param key: Partition key :type key: text string or UTF-8 `bytes` or `bytearray` :param list partitions: An indexed sequence of partition identifiers. :returns: One of the given partition identifiers. The result will be the same each time the same key and partition list is passed.
[ "Select", "a", "partition", "based", "on", "the", "hash", "of", "the", "key", "." ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/partitioner.py#L196-L208
ciena/afkak
afkak/codec.py
snappy_encode
def snappy_encode(payload, xerial_compatible=False, xerial_blocksize=32 * 1024): """ Compress the given data with the Snappy algorithm. :param bytes payload: Data to compress. :param bool xerial_compatible: If set then the stream is broken into length-prefixed blocks in a fashion compatible with the xerial snappy library. The format winds up being:: +-------------+------------+--------------+------------+--------------+ | Header | Block1_len | Block1 data | BlockN len | BlockN data | |-------------+------------+--------------+------------+--------------| | 16 bytes | BE int32 | snappy bytes | BE int32 | snappy bytes | +-------------+------------+--------------+------------+--------------+ :param int xerial_blocksize: Number of bytes per chunk to independently Snappy encode. 32k is the default in the xerial library. :returns: Compressed bytes. :rtype: :class:`bytes` """ if not has_snappy(): # FIXME This should be static, not checked every call. raise NotImplementedError("Snappy codec is not available") if xerial_compatible: def _chunker(): for i in range(0, len(payload), xerial_blocksize): yield payload[i:i+xerial_blocksize] out = BytesIO() out.write(_XERIAL_HEADER) for chunk in _chunker(): block = snappy.compress(chunk) out.write(struct.pack('!i', len(block))) out.write(block) out.seek(0) return out.read() else: return snappy.compress(payload)
python
def snappy_encode(payload, xerial_compatible=False, xerial_blocksize=32 * 1024): """ Compress the given data with the Snappy algorithm. :param bytes payload: Data to compress. :param bool xerial_compatible: If set then the stream is broken into length-prefixed blocks in a fashion compatible with the xerial snappy library. The format winds up being:: +-------------+------------+--------------+------------+--------------+ | Header | Block1_len | Block1 data | BlockN len | BlockN data | |-------------+------------+--------------+------------+--------------| | 16 bytes | BE int32 | snappy bytes | BE int32 | snappy bytes | +-------------+------------+--------------+------------+--------------+ :param int xerial_blocksize: Number of bytes per chunk to independently Snappy encode. 32k is the default in the xerial library. :returns: Compressed bytes. :rtype: :class:`bytes` """ if not has_snappy(): # FIXME This should be static, not checked every call. raise NotImplementedError("Snappy codec is not available") if xerial_compatible: def _chunker(): for i in range(0, len(payload), xerial_blocksize): yield payload[i:i+xerial_blocksize] out = BytesIO() out.write(_XERIAL_HEADER) for chunk in _chunker(): block = snappy.compress(chunk) out.write(struct.pack('!i', len(block))) out.write(block) out.seek(0) return out.read() else: return snappy.compress(payload)
[ "def", "snappy_encode", "(", "payload", ",", "xerial_compatible", "=", "False", ",", "xerial_blocksize", "=", "32", "*", "1024", ")", ":", "if", "not", "has_snappy", "(", ")", ":", "# FIXME This should be static, not checked every call.", "raise", "NotImplementedError", "(", "\"Snappy codec is not available\"", ")", "if", "xerial_compatible", ":", "def", "_chunker", "(", ")", ":", "for", "i", "in", "range", "(", "0", ",", "len", "(", "payload", ")", ",", "xerial_blocksize", ")", ":", "yield", "payload", "[", "i", ":", "i", "+", "xerial_blocksize", "]", "out", "=", "BytesIO", "(", ")", "out", ".", "write", "(", "_XERIAL_HEADER", ")", "for", "chunk", "in", "_chunker", "(", ")", ":", "block", "=", "snappy", ".", "compress", "(", "chunk", ")", "out", ".", "write", "(", "struct", ".", "pack", "(", "'!i'", ",", "len", "(", "block", ")", ")", ")", "out", ".", "write", "(", "block", ")", "out", ".", "seek", "(", "0", ")", "return", "out", ".", "read", "(", ")", "else", ":", "return", "snappy", ".", "compress", "(", "payload", ")" ]
Compress the given data with the Snappy algorithm. :param bytes payload: Data to compress. :param bool xerial_compatible: If set then the stream is broken into length-prefixed blocks in a fashion compatible with the xerial snappy library. The format winds up being:: +-------------+------------+--------------+------------+--------------+ | Header | Block1_len | Block1 data | BlockN len | BlockN data | |-------------+------------+--------------+------------+--------------| | 16 bytes | BE int32 | snappy bytes | BE int32 | snappy bytes | +-------------+------------+--------------+------------+--------------+ :param int xerial_blocksize: Number of bytes per chunk to independently Snappy encode. 32k is the default in the xerial library. :returns: Compressed bytes. :rtype: :class:`bytes`
[ "Compress", "the", "given", "data", "with", "the", "Snappy", "algorithm", "." ]
train
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/codec.py#L69-L114
escaped/django-video-encoding
video_encoding/files.py
VideoFile._get_video_info
def _get_video_info(self): """ Returns basic information about the video as dictionary. """ if not hasattr(self, '_info_cache'): encoding_backend = get_backend() try: path = os.path.abspath(self.path) except AttributeError: path = os.path.abspath(self.name) self._info_cache = encoding_backend.get_media_info(path) return self._info_cache
python
def _get_video_info(self): """ Returns basic information about the video as dictionary. """ if not hasattr(self, '_info_cache'): encoding_backend = get_backend() try: path = os.path.abspath(self.path) except AttributeError: path = os.path.abspath(self.name) self._info_cache = encoding_backend.get_media_info(path) return self._info_cache
[ "def", "_get_video_info", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_info_cache'", ")", ":", "encoding_backend", "=", "get_backend", "(", ")", "try", ":", "path", "=", "os", ".", "path", ".", "abspath", "(", "self", ".", "path", ")", "except", "AttributeError", ":", "path", "=", "os", ".", "path", ".", "abspath", "(", "self", ".", "name", ")", "self", ".", "_info_cache", "=", "encoding_backend", ".", "get_media_info", "(", "path", ")", "return", "self", ".", "_info_cache" ]
Returns basic information about the video as dictionary.
[ "Returns", "basic", "information", "about", "the", "video", "as", "dictionary", "." ]
train
https://github.com/escaped/django-video-encoding/blob/50d228dd91aca40acc7f9293808b1e87cb645e5d/video_encoding/files.py#L35-L46
escaped/django-video-encoding
video_encoding/backends/ffmpeg.py
FFmpegBackend.encode
def encode(self, source_path, target_path, params): # NOQA: C901 """ Encodes a video to a specified file. All encoder specific options are passed in using `params`. """ total_time = self.get_media_info(source_path)['duration'] cmds = [self.ffmpeg_path, '-i', source_path] cmds.extend(self.params) cmds.extend(params) cmds.extend([target_path]) process = self._spawn(cmds) buf = output = '' # update progress while True: # any more data? out = process.stderr.read(10) if not out: break out = out.decode(console_encoding) output += out buf += out try: line, buf = buf.split('\r', 1) except ValueError: continue try: time_str = RE_TIMECODE.findall(line)[0] except IndexError: continue # convert progress to percent time = 0 for part in time_str.split(':'): time = 60 * time + float(part) percent = time / total_time logger.debug('yield {}%'.format(percent)) yield percent if os.path.getsize(target_path) == 0: raise exceptions.FFmpegError("File size of generated file is 0") # wait for process to exit self._check_returncode(process) logger.debug(output) if not output: raise exceptions.FFmpegError("No output from FFmpeg.") yield 100
python
def encode(self, source_path, target_path, params): # NOQA: C901 """ Encodes a video to a specified file. All encoder specific options are passed in using `params`. """ total_time = self.get_media_info(source_path)['duration'] cmds = [self.ffmpeg_path, '-i', source_path] cmds.extend(self.params) cmds.extend(params) cmds.extend([target_path]) process = self._spawn(cmds) buf = output = '' # update progress while True: # any more data? out = process.stderr.read(10) if not out: break out = out.decode(console_encoding) output += out buf += out try: line, buf = buf.split('\r', 1) except ValueError: continue try: time_str = RE_TIMECODE.findall(line)[0] except IndexError: continue # convert progress to percent time = 0 for part in time_str.split(':'): time = 60 * time + float(part) percent = time / total_time logger.debug('yield {}%'.format(percent)) yield percent if os.path.getsize(target_path) == 0: raise exceptions.FFmpegError("File size of generated file is 0") # wait for process to exit self._check_returncode(process) logger.debug(output) if not output: raise exceptions.FFmpegError("No output from FFmpeg.") yield 100
[ "def", "encode", "(", "self", ",", "source_path", ",", "target_path", ",", "params", ")", ":", "# NOQA: C901", "total_time", "=", "self", ".", "get_media_info", "(", "source_path", ")", "[", "'duration'", "]", "cmds", "=", "[", "self", ".", "ffmpeg_path", ",", "'-i'", ",", "source_path", "]", "cmds", ".", "extend", "(", "self", ".", "params", ")", "cmds", ".", "extend", "(", "params", ")", "cmds", ".", "extend", "(", "[", "target_path", "]", ")", "process", "=", "self", ".", "_spawn", "(", "cmds", ")", "buf", "=", "output", "=", "''", "# update progress", "while", "True", ":", "# any more data?", "out", "=", "process", ".", "stderr", ".", "read", "(", "10", ")", "if", "not", "out", ":", "break", "out", "=", "out", ".", "decode", "(", "console_encoding", ")", "output", "+=", "out", "buf", "+=", "out", "try", ":", "line", ",", "buf", "=", "buf", ".", "split", "(", "'\\r'", ",", "1", ")", "except", "ValueError", ":", "continue", "try", ":", "time_str", "=", "RE_TIMECODE", ".", "findall", "(", "line", ")", "[", "0", "]", "except", "IndexError", ":", "continue", "# convert progress to percent", "time", "=", "0", "for", "part", "in", "time_str", ".", "split", "(", "':'", ")", ":", "time", "=", "60", "*", "time", "+", "float", "(", "part", ")", "percent", "=", "time", "/", "total_time", "logger", ".", "debug", "(", "'yield {}%'", ".", "format", "(", "percent", ")", ")", "yield", "percent", "if", "os", ".", "path", ".", "getsize", "(", "target_path", ")", "==", "0", ":", "raise", "exceptions", ".", "FFmpegError", "(", "\"File size of generated file is 0\"", ")", "# wait for process to exit", "self", ".", "_check_returncode", "(", "process", ")", "logger", ".", "debug", "(", "output", ")", "if", "not", "output", ":", "raise", "exceptions", ".", "FFmpegError", "(", "\"No output from FFmpeg.\"", ")", "yield", "100" ]
Encodes a video to a specified file. All encoder specific options are passed in using `params`.
[ "Encodes", "a", "video", "to", "a", "specified", "file", ".", "All", "encoder", "specific", "options", "are", "passed", "in", "using", "params", "." ]
train
https://github.com/escaped/django-video-encoding/blob/50d228dd91aca40acc7f9293808b1e87cb645e5d/video_encoding/backends/ffmpeg.py#L84-L139
escaped/django-video-encoding
video_encoding/backends/ffmpeg.py
FFmpegBackend.get_media_info
def get_media_info(self, video_path): """ Returns information about the given video as dict. """ cmds = [self.ffprobe_path, '-i', video_path] cmds.extend(['-print_format', 'json']) cmds.extend(['-show_format', '-show_streams']) process = self._spawn(cmds) stdout, __ = self._check_returncode(process) media_info = self._parse_media_info(stdout) return { 'duration': float(media_info['format']['duration']), 'width': int(media_info['video'][0]['width']), 'height': int(media_info['video'][0]['height']), }
python
def get_media_info(self, video_path): """ Returns information about the given video as dict. """ cmds = [self.ffprobe_path, '-i', video_path] cmds.extend(['-print_format', 'json']) cmds.extend(['-show_format', '-show_streams']) process = self._spawn(cmds) stdout, __ = self._check_returncode(process) media_info = self._parse_media_info(stdout) return { 'duration': float(media_info['format']['duration']), 'width': int(media_info['video'][0]['width']), 'height': int(media_info['video'][0]['height']), }
[ "def", "get_media_info", "(", "self", ",", "video_path", ")", ":", "cmds", "=", "[", "self", ".", "ffprobe_path", ",", "'-i'", ",", "video_path", "]", "cmds", ".", "extend", "(", "[", "'-print_format'", ",", "'json'", "]", ")", "cmds", ".", "extend", "(", "[", "'-show_format'", ",", "'-show_streams'", "]", ")", "process", "=", "self", ".", "_spawn", "(", "cmds", ")", "stdout", ",", "__", "=", "self", ".", "_check_returncode", "(", "process", ")", "media_info", "=", "self", ".", "_parse_media_info", "(", "stdout", ")", "return", "{", "'duration'", ":", "float", "(", "media_info", "[", "'format'", "]", "[", "'duration'", "]", ")", ",", "'width'", ":", "int", "(", "media_info", "[", "'video'", "]", "[", "0", "]", "[", "'width'", "]", ")", ",", "'height'", ":", "int", "(", "media_info", "[", "'video'", "]", "[", "0", "]", "[", "'height'", "]", ")", ",", "}" ]
Returns information about the given video as dict.
[ "Returns", "information", "about", "the", "given", "video", "as", "dict", "." ]
train
https://github.com/escaped/django-video-encoding/blob/50d228dd91aca40acc7f9293808b1e87cb645e5d/video_encoding/backends/ffmpeg.py#L152-L169
escaped/django-video-encoding
video_encoding/backends/ffmpeg.py
FFmpegBackend.get_thumbnail
def get_thumbnail(self, video_path, at_time=0.5): """ Extracts an image of a video and returns its path. If the requested thumbnail is not within the duration of the video an `InvalidTimeError` is thrown. """ filename = os.path.basename(video_path) filename, __ = os.path.splitext(filename) _, image_path = tempfile.mkstemp(suffix='_{}.jpg'.format(filename)) video_duration = self.get_media_info(video_path)['duration'] if at_time > video_duration: raise exceptions.InvalidTimeError() thumbnail_time = at_time cmds = [self.ffmpeg_path, '-i', video_path, '-vframes', '1'] cmds.extend(['-ss', str(thumbnail_time), '-y', image_path]) process = self._spawn(cmds) self._check_returncode(process) if not os.path.getsize(image_path): # we somehow failed to generate thumbnail os.unlink(image_path) raise exceptions.InvalidTimeError() return image_path
python
def get_thumbnail(self, video_path, at_time=0.5): """ Extracts an image of a video and returns its path. If the requested thumbnail is not within the duration of the video an `InvalidTimeError` is thrown. """ filename = os.path.basename(video_path) filename, __ = os.path.splitext(filename) _, image_path = tempfile.mkstemp(suffix='_{}.jpg'.format(filename)) video_duration = self.get_media_info(video_path)['duration'] if at_time > video_duration: raise exceptions.InvalidTimeError() thumbnail_time = at_time cmds = [self.ffmpeg_path, '-i', video_path, '-vframes', '1'] cmds.extend(['-ss', str(thumbnail_time), '-y', image_path]) process = self._spawn(cmds) self._check_returncode(process) if not os.path.getsize(image_path): # we somehow failed to generate thumbnail os.unlink(image_path) raise exceptions.InvalidTimeError() return image_path
[ "def", "get_thumbnail", "(", "self", ",", "video_path", ",", "at_time", "=", "0.5", ")", ":", "filename", "=", "os", ".", "path", ".", "basename", "(", "video_path", ")", "filename", ",", "__", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "_", ",", "image_path", "=", "tempfile", ".", "mkstemp", "(", "suffix", "=", "'_{}.jpg'", ".", "format", "(", "filename", ")", ")", "video_duration", "=", "self", ".", "get_media_info", "(", "video_path", ")", "[", "'duration'", "]", "if", "at_time", ">", "video_duration", ":", "raise", "exceptions", ".", "InvalidTimeError", "(", ")", "thumbnail_time", "=", "at_time", "cmds", "=", "[", "self", ".", "ffmpeg_path", ",", "'-i'", ",", "video_path", ",", "'-vframes'", ",", "'1'", "]", "cmds", ".", "extend", "(", "[", "'-ss'", ",", "str", "(", "thumbnail_time", ")", ",", "'-y'", ",", "image_path", "]", ")", "process", "=", "self", ".", "_spawn", "(", "cmds", ")", "self", ".", "_check_returncode", "(", "process", ")", "if", "not", "os", ".", "path", ".", "getsize", "(", "image_path", ")", ":", "# we somehow failed to generate thumbnail", "os", ".", "unlink", "(", "image_path", ")", "raise", "exceptions", ".", "InvalidTimeError", "(", ")", "return", "image_path" ]
Extracts an image of a video and returns its path. If the requested thumbnail is not within the duration of the video an `InvalidTimeError` is thrown.
[ "Extracts", "an", "image", "of", "a", "video", "and", "returns", "its", "path", "." ]
train
https://github.com/escaped/django-video-encoding/blob/50d228dd91aca40acc7f9293808b1e87cb645e5d/video_encoding/backends/ffmpeg.py#L171-L198
escaped/django-video-encoding
video_encoding/tasks.py
convert_all_videos
def convert_all_videos(app_label, model_name, object_pk): """ Automatically converts all videos of a given instance. """ # get instance Model = apps.get_model(app_label=app_label, model_name=model_name) instance = Model.objects.get(pk=object_pk) # search for `VideoFields` fields = instance._meta.fields for field in fields: if isinstance(field, VideoField): if not getattr(instance, field.name): # ignore empty fields continue # trigger conversion fieldfile = getattr(instance, field.name) convert_video(fieldfile)
python
def convert_all_videos(app_label, model_name, object_pk): """ Automatically converts all videos of a given instance. """ # get instance Model = apps.get_model(app_label=app_label, model_name=model_name) instance = Model.objects.get(pk=object_pk) # search for `VideoFields` fields = instance._meta.fields for field in fields: if isinstance(field, VideoField): if not getattr(instance, field.name): # ignore empty fields continue # trigger conversion fieldfile = getattr(instance, field.name) convert_video(fieldfile)
[ "def", "convert_all_videos", "(", "app_label", ",", "model_name", ",", "object_pk", ")", ":", "# get instance", "Model", "=", "apps", ".", "get_model", "(", "app_label", "=", "app_label", ",", "model_name", "=", "model_name", ")", "instance", "=", "Model", ".", "objects", ".", "get", "(", "pk", "=", "object_pk", ")", "# search for `VideoFields`", "fields", "=", "instance", ".", "_meta", ".", "fields", "for", "field", "in", "fields", ":", "if", "isinstance", "(", "field", ",", "VideoField", ")", ":", "if", "not", "getattr", "(", "instance", ",", "field", ".", "name", ")", ":", "# ignore empty fields", "continue", "# trigger conversion", "fieldfile", "=", "getattr", "(", "instance", ",", "field", ".", "name", ")", "convert_video", "(", "fieldfile", ")" ]
Automatically converts all videos of a given instance.
[ "Automatically", "converts", "all", "videos", "of", "a", "given", "instance", "." ]
train
https://github.com/escaped/django-video-encoding/blob/50d228dd91aca40acc7f9293808b1e87cb645e5d/video_encoding/tasks.py#L15-L33
escaped/django-video-encoding
video_encoding/tasks.py
convert_video
def convert_video(fieldfile, force=False): """ Converts a given video file into all defined formats. """ instance = fieldfile.instance field = fieldfile.field filename = os.path.basename(fieldfile.path) source_path = fieldfile.path encoding_backend = get_backend() for options in settings.VIDEO_ENCODING_FORMATS[encoding_backend.name]: video_format, created = Format.objects.get_or_create( object_id=instance.pk, content_type=ContentType.objects.get_for_model(instance), field_name=field.name, format=options['name']) # do not reencode if not requested if video_format.file and not force: continue else: # set progress to 0 video_format.reset_progress() # TODO do not upscale videos _, target_path = tempfile.mkstemp( suffix='_{name}.{extension}'.format(**options)) try: encoding = encoding_backend.encode( source_path, target_path, options['params']) while encoding: try: progress = next(encoding) except StopIteration: break video_format.update_progress(progress) except VideoEncodingError: # TODO handle with more care video_format.delete() os.remove(target_path) continue # save encoded file video_format.file.save( '{filename}_{name}.{extension}'.format(filename=filename, **options), File(open(target_path, mode='rb'))) video_format.update_progress(100) # now we are ready # remove temporary file os.remove(target_path)
python
def convert_video(fieldfile, force=False): """ Converts a given video file into all defined formats. """ instance = fieldfile.instance field = fieldfile.field filename = os.path.basename(fieldfile.path) source_path = fieldfile.path encoding_backend = get_backend() for options in settings.VIDEO_ENCODING_FORMATS[encoding_backend.name]: video_format, created = Format.objects.get_or_create( object_id=instance.pk, content_type=ContentType.objects.get_for_model(instance), field_name=field.name, format=options['name']) # do not reencode if not requested if video_format.file and not force: continue else: # set progress to 0 video_format.reset_progress() # TODO do not upscale videos _, target_path = tempfile.mkstemp( suffix='_{name}.{extension}'.format(**options)) try: encoding = encoding_backend.encode( source_path, target_path, options['params']) while encoding: try: progress = next(encoding) except StopIteration: break video_format.update_progress(progress) except VideoEncodingError: # TODO handle with more care video_format.delete() os.remove(target_path) continue # save encoded file video_format.file.save( '{filename}_{name}.{extension}'.format(filename=filename, **options), File(open(target_path, mode='rb'))) video_format.update_progress(100) # now we are ready # remove temporary file os.remove(target_path)
[ "def", "convert_video", "(", "fieldfile", ",", "force", "=", "False", ")", ":", "instance", "=", "fieldfile", ".", "instance", "field", "=", "fieldfile", ".", "field", "filename", "=", "os", ".", "path", ".", "basename", "(", "fieldfile", ".", "path", ")", "source_path", "=", "fieldfile", ".", "path", "encoding_backend", "=", "get_backend", "(", ")", "for", "options", "in", "settings", ".", "VIDEO_ENCODING_FORMATS", "[", "encoding_backend", ".", "name", "]", ":", "video_format", ",", "created", "=", "Format", ".", "objects", ".", "get_or_create", "(", "object_id", "=", "instance", ".", "pk", ",", "content_type", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "instance", ")", ",", "field_name", "=", "field", ".", "name", ",", "format", "=", "options", "[", "'name'", "]", ")", "# do not reencode if not requested", "if", "video_format", ".", "file", "and", "not", "force", ":", "continue", "else", ":", "# set progress to 0", "video_format", ".", "reset_progress", "(", ")", "# TODO do not upscale videos", "_", ",", "target_path", "=", "tempfile", ".", "mkstemp", "(", "suffix", "=", "'_{name}.{extension}'", ".", "format", "(", "*", "*", "options", ")", ")", "try", ":", "encoding", "=", "encoding_backend", ".", "encode", "(", "source_path", ",", "target_path", ",", "options", "[", "'params'", "]", ")", "while", "encoding", ":", "try", ":", "progress", "=", "next", "(", "encoding", ")", "except", "StopIteration", ":", "break", "video_format", ".", "update_progress", "(", "progress", ")", "except", "VideoEncodingError", ":", "# TODO handle with more care", "video_format", ".", "delete", "(", ")", "os", ".", "remove", "(", "target_path", ")", "continue", "# save encoded file", "video_format", ".", "file", ".", "save", "(", "'{filename}_{name}.{extension}'", ".", "format", "(", "filename", "=", "filename", ",", "*", "*", "options", ")", ",", "File", "(", "open", "(", "target_path", ",", "mode", "=", "'rb'", ")", ")", ")", "video_format", ".", "update_progress", "(", "100", ")", "# now we are ready", "# remove temporary file", "os", ".", "remove", "(", "target_path", ")" ]
Converts a given video file into all defined formats.
[ "Converts", "a", "given", "video", "file", "into", "all", "defined", "formats", "." ]
train
https://github.com/escaped/django-video-encoding/blob/50d228dd91aca40acc7f9293808b1e87cb645e5d/video_encoding/tasks.py#L36-L90
benvanwerkhoven/kernel_tuner
kernel_tuner/strategies/firefly_algorithm.py
tune
def tune(runner, kernel_options, device_options, tuning_options): """ Find the best performing kernel configuration in the parameter space :params runner: A runner from kernel_tuner.runners :type runner: kernel_tuner.runner :param kernel_options: A dictionary with all options for the kernel. :type kernel_options: dict :param device_options: A dictionary with all options for the device on which the kernel should be tuned. :type device_options: dict :param tuning_options: A dictionary with all options regarding the tuning process. :type tuning_options: dict :returns: A list of dictionaries for executed kernel configurations and their execution times. And a dictionary that contains a information about the hardware/software environment on which the tuning took place. :rtype: list(dict()), dict() """ results = [] cache = {} #scale variables in x because PSO works with velocities to visit different configurations tuning_options["scaling"] = True #using this instead of get_bounds because scaling is used bounds, _, _ = get_bounds_x0_eps(tuning_options) args = (kernel_options, tuning_options, runner, results, cache) num_particles = 20 maxiter = 100 #parameters needed by the Firefly Algorithm B0 = 1.0 gamma = 1.0 alpha = 0.20 best_time_global = 1e20 best_position_global = [] # init particle swarm swarm = [] for i in range(0, num_particles): swarm.append(Firefly(bounds, args)) # compute initial intensities for j in range(num_particles): swarm[j].compute_intensity(_cost_func) for c in range(maxiter): if tuning_options.verbose: print("start iteration ", c, "best time global", best_time_global) # compare all to all and compute attractiveness for i in range(num_particles): for j in range(num_particles): if swarm[i].intensity < swarm[j].intensity: dist = swarm[i].distance_to(swarm[j]) beta = B0 * np.exp(-gamma * dist * dist) swarm[i].move_towards(swarm[j], beta, alpha) swarm[i].compute_intensity(_cost_func) # update global best if needed, actually only used for printing if swarm[i].time <= best_time_global: best_position_global = swarm[i].position best_time_global = swarm[i].time swarm.sort(key=lambda x: x.time) if tuning_options.verbose: print('Final result:') print(best_position_global) print(best_time_global) return results, runner.dev.get_environment()
python
def tune(runner, kernel_options, device_options, tuning_options): """ Find the best performing kernel configuration in the parameter space :params runner: A runner from kernel_tuner.runners :type runner: kernel_tuner.runner :param kernel_options: A dictionary with all options for the kernel. :type kernel_options: dict :param device_options: A dictionary with all options for the device on which the kernel should be tuned. :type device_options: dict :param tuning_options: A dictionary with all options regarding the tuning process. :type tuning_options: dict :returns: A list of dictionaries for executed kernel configurations and their execution times. And a dictionary that contains a information about the hardware/software environment on which the tuning took place. :rtype: list(dict()), dict() """ results = [] cache = {} #scale variables in x because PSO works with velocities to visit different configurations tuning_options["scaling"] = True #using this instead of get_bounds because scaling is used bounds, _, _ = get_bounds_x0_eps(tuning_options) args = (kernel_options, tuning_options, runner, results, cache) num_particles = 20 maxiter = 100 #parameters needed by the Firefly Algorithm B0 = 1.0 gamma = 1.0 alpha = 0.20 best_time_global = 1e20 best_position_global = [] # init particle swarm swarm = [] for i in range(0, num_particles): swarm.append(Firefly(bounds, args)) # compute initial intensities for j in range(num_particles): swarm[j].compute_intensity(_cost_func) for c in range(maxiter): if tuning_options.verbose: print("start iteration ", c, "best time global", best_time_global) # compare all to all and compute attractiveness for i in range(num_particles): for j in range(num_particles): if swarm[i].intensity < swarm[j].intensity: dist = swarm[i].distance_to(swarm[j]) beta = B0 * np.exp(-gamma * dist * dist) swarm[i].move_towards(swarm[j], beta, alpha) swarm[i].compute_intensity(_cost_func) # update global best if needed, actually only used for printing if swarm[i].time <= best_time_global: best_position_global = swarm[i].position best_time_global = swarm[i].time swarm.sort(key=lambda x: x.time) if tuning_options.verbose: print('Final result:') print(best_position_global) print(best_time_global) return results, runner.dev.get_environment()
[ "def", "tune", "(", "runner", ",", "kernel_options", ",", "device_options", ",", "tuning_options", ")", ":", "results", "=", "[", "]", "cache", "=", "{", "}", "#scale variables in x because PSO works with velocities to visit different configurations", "tuning_options", "[", "\"scaling\"", "]", "=", "True", "#using this instead of get_bounds because scaling is used", "bounds", ",", "_", ",", "_", "=", "get_bounds_x0_eps", "(", "tuning_options", ")", "args", "=", "(", "kernel_options", ",", "tuning_options", ",", "runner", ",", "results", ",", "cache", ")", "num_particles", "=", "20", "maxiter", "=", "100", "#parameters needed by the Firefly Algorithm", "B0", "=", "1.0", "gamma", "=", "1.0", "alpha", "=", "0.20", "best_time_global", "=", "1e20", "best_position_global", "=", "[", "]", "# init particle swarm", "swarm", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "num_particles", ")", ":", "swarm", ".", "append", "(", "Firefly", "(", "bounds", ",", "args", ")", ")", "# compute initial intensities", "for", "j", "in", "range", "(", "num_particles", ")", ":", "swarm", "[", "j", "]", ".", "compute_intensity", "(", "_cost_func", ")", "for", "c", "in", "range", "(", "maxiter", ")", ":", "if", "tuning_options", ".", "verbose", ":", "print", "(", "\"start iteration \"", ",", "c", ",", "\"best time global\"", ",", "best_time_global", ")", "# compare all to all and compute attractiveness", "for", "i", "in", "range", "(", "num_particles", ")", ":", "for", "j", "in", "range", "(", "num_particles", ")", ":", "if", "swarm", "[", "i", "]", ".", "intensity", "<", "swarm", "[", "j", "]", ".", "intensity", ":", "dist", "=", "swarm", "[", "i", "]", ".", "distance_to", "(", "swarm", "[", "j", "]", ")", "beta", "=", "B0", "*", "np", ".", "exp", "(", "-", "gamma", "*", "dist", "*", "dist", ")", "swarm", "[", "i", "]", ".", "move_towards", "(", "swarm", "[", "j", "]", ",", "beta", ",", "alpha", ")", "swarm", "[", "i", "]", ".", "compute_intensity", "(", "_cost_func", ")", "# update global best if needed, actually only used for printing", "if", "swarm", "[", "i", "]", ".", "time", "<=", "best_time_global", ":", "best_position_global", "=", "swarm", "[", "i", "]", ".", "position", "best_time_global", "=", "swarm", "[", "i", "]", ".", "time", "swarm", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", ".", "time", ")", "if", "tuning_options", ".", "verbose", ":", "print", "(", "'Final result:'", ")", "print", "(", "best_position_global", ")", "print", "(", "best_time_global", ")", "return", "results", ",", "runner", ".", "dev", ".", "get_environment", "(", ")" ]
Find the best performing kernel configuration in the parameter space :params runner: A runner from kernel_tuner.runners :type runner: kernel_tuner.runner :param kernel_options: A dictionary with all options for the kernel. :type kernel_options: dict :param device_options: A dictionary with all options for the device on which the kernel should be tuned. :type device_options: dict :param tuning_options: A dictionary with all options regarding the tuning process. :type tuning_options: dict :returns: A list of dictionaries for executed kernel configurations and their execution times. And a dictionary that contains a information about the hardware/software environment on which the tuning took place. :rtype: list(dict()), dict()
[ "Find", "the", "best", "performing", "kernel", "configuration", "in", "the", "parameter", "space" ]
train
https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/strategies/firefly_algorithm.py#L8-L90
benvanwerkhoven/kernel_tuner
kernel_tuner/strategies/firefly_algorithm.py
Firefly.distance_to
def distance_to(self, other): """Return Euclidian distance between self and other Firefly""" return np.linalg.norm(self.position-other.position)
python
def distance_to(self, other): """Return Euclidian distance between self and other Firefly""" return np.linalg.norm(self.position-other.position)
[ "def", "distance_to", "(", "self", ",", "other", ")", ":", "return", "np", ".", "linalg", ".", "norm", "(", "self", ".", "position", "-", "other", ".", "position", ")" ]
Return Euclidian distance between self and other Firefly
[ "Return", "Euclidian", "distance", "between", "self", "and", "other", "Firefly" ]
train
https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/strategies/firefly_algorithm.py#L102-L104
benvanwerkhoven/kernel_tuner
kernel_tuner/strategies/firefly_algorithm.py
Firefly.compute_intensity
def compute_intensity(self, _cost_func): """Evaluate cost function and compute intensity at this position""" self.evaluate(_cost_func) self.intensity = 1 / self.time
python
def compute_intensity(self, _cost_func): """Evaluate cost function and compute intensity at this position""" self.evaluate(_cost_func) self.intensity = 1 / self.time
[ "def", "compute_intensity", "(", "self", ",", "_cost_func", ")", ":", "self", ".", "evaluate", "(", "_cost_func", ")", "self", ".", "intensity", "=", "1", "/", "self", ".", "time" ]
Evaluate cost function and compute intensity at this position
[ "Evaluate", "cost", "function", "and", "compute", "intensity", "at", "this", "position" ]
train
https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/strategies/firefly_algorithm.py#L106-L109
benvanwerkhoven/kernel_tuner
kernel_tuner/strategies/firefly_algorithm.py
Firefly.move_towards
def move_towards(self, other, beta, alpha): """Move firefly towards another given beta and alpha values""" self.position += beta * (other.position - self.position) self.position += alpha * (np.random.uniform(-0.5, 0.5, len(self.position))) self.position = np.minimum(self.position, [b[1] for b in self.bounds]) self.position = np.maximum(self.position, [b[0] for b in self.bounds])
python
def move_towards(self, other, beta, alpha): """Move firefly towards another given beta and alpha values""" self.position += beta * (other.position - self.position) self.position += alpha * (np.random.uniform(-0.5, 0.5, len(self.position))) self.position = np.minimum(self.position, [b[1] for b in self.bounds]) self.position = np.maximum(self.position, [b[0] for b in self.bounds])
[ "def", "move_towards", "(", "self", ",", "other", ",", "beta", ",", "alpha", ")", ":", "self", ".", "position", "+=", "beta", "*", "(", "other", ".", "position", "-", "self", ".", "position", ")", "self", ".", "position", "+=", "alpha", "*", "(", "np", ".", "random", ".", "uniform", "(", "-", "0.5", ",", "0.5", ",", "len", "(", "self", ".", "position", ")", ")", ")", "self", ".", "position", "=", "np", ".", "minimum", "(", "self", ".", "position", ",", "[", "b", "[", "1", "]", "for", "b", "in", "self", ".", "bounds", "]", ")", "self", ".", "position", "=", "np", ".", "maximum", "(", "self", ".", "position", ",", "[", "b", "[", "0", "]", "for", "b", "in", "self", ".", "bounds", "]", ")" ]
Move firefly towards another given beta and alpha values
[ "Move", "firefly", "towards", "another", "given", "beta", "and", "alpha", "values" ]
train
https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/strategies/firefly_algorithm.py#L111-L116
benvanwerkhoven/kernel_tuner
kernel_tuner/core.py
_default_verify_function
def _default_verify_function(instance, answer, result_host, atol, verbose): """default verify function based on numpy.allclose""" #first check if the length is the same if len(instance.arguments) != len(answer): raise TypeError("The length of argument list and provided results do not match.") #for each element in the argument list, check if the types match for i, arg in enumerate(instance.arguments): if answer[i] is not None: #skip None elements in the answer list if isinstance(answer[i], numpy.ndarray) and isinstance(arg, numpy.ndarray): if answer[i].dtype != arg.dtype: raise TypeError("Element " + str(i) + " of the expected results list is not of the same dtype as the kernel output: " + str(answer[i].dtype) + " != " + str(arg.dtype) + ".") if answer[i].size != arg.size: raise TypeError("Element " + str(i) + " of the expected results list has a size different from " + "the kernel argument: " + str(answer[i].size) + " != " + str(arg.size) + ".") elif isinstance(answer[i], numpy.number) and isinstance(arg, numpy.number): if answer[i].dtype != arg.dtype: raise TypeError("Element " + str(i) + " of the expected results list is not the same as the kernel output: " + str(answer[i].dtype) + " != " + str(arg.dtype) + ".") else: #either answer[i] and argument have different types or answer[i] is not a numpy type if not isinstance(answer[i], numpy.ndarray) or not isinstance(answer[i], numpy.number): raise TypeError("Element " + str(i) + " of expected results list is not a numpy array or numpy scalar.") else: raise TypeError("Element " + str(i) + " of expected results list and kernel arguments have different types.") def _ravel(a): if hasattr(a, 'ravel') and len(a.shape) > 1: return a.ravel() return a def _flatten(a): if hasattr(a, 'flatten'): return a.flatten() return a correct = True for i, arg in enumerate(instance.arguments): expected = answer[i] if expected is not None: result = _ravel(result_host[i]) expected = _flatten(expected) output_test = numpy.allclose(expected, result, atol=atol) if not output_test and verbose: print("Error: " + util.get_config_string(instance.params) + " detected during correctness check") print("this error occured when checking value of the %oth kernel argument" % (i,)) print("Printing kernel output and expected result, set verbose=False to suppress this debug print") numpy.set_printoptions(edgeitems=50) print("Kernel output:") print(result) print("Expected:") print(expected) correct = correct and output_test if not correct: logging.debug('correctness check has found a correctness issue') raise Exception("Error: " + util.get_config_string(instance.params) + " failed correctness check") return correct
python
def _default_verify_function(instance, answer, result_host, atol, verbose): """default verify function based on numpy.allclose""" #first check if the length is the same if len(instance.arguments) != len(answer): raise TypeError("The length of argument list and provided results do not match.") #for each element in the argument list, check if the types match for i, arg in enumerate(instance.arguments): if answer[i] is not None: #skip None elements in the answer list if isinstance(answer[i], numpy.ndarray) and isinstance(arg, numpy.ndarray): if answer[i].dtype != arg.dtype: raise TypeError("Element " + str(i) + " of the expected results list is not of the same dtype as the kernel output: " + str(answer[i].dtype) + " != " + str(arg.dtype) + ".") if answer[i].size != arg.size: raise TypeError("Element " + str(i) + " of the expected results list has a size different from " + "the kernel argument: " + str(answer[i].size) + " != " + str(arg.size) + ".") elif isinstance(answer[i], numpy.number) and isinstance(arg, numpy.number): if answer[i].dtype != arg.dtype: raise TypeError("Element " + str(i) + " of the expected results list is not the same as the kernel output: " + str(answer[i].dtype) + " != " + str(arg.dtype) + ".") else: #either answer[i] and argument have different types or answer[i] is not a numpy type if not isinstance(answer[i], numpy.ndarray) or not isinstance(answer[i], numpy.number): raise TypeError("Element " + str(i) + " of expected results list is not a numpy array or numpy scalar.") else: raise TypeError("Element " + str(i) + " of expected results list and kernel arguments have different types.") def _ravel(a): if hasattr(a, 'ravel') and len(a.shape) > 1: return a.ravel() return a def _flatten(a): if hasattr(a, 'flatten'): return a.flatten() return a correct = True for i, arg in enumerate(instance.arguments): expected = answer[i] if expected is not None: result = _ravel(result_host[i]) expected = _flatten(expected) output_test = numpy.allclose(expected, result, atol=atol) if not output_test and verbose: print("Error: " + util.get_config_string(instance.params) + " detected during correctness check") print("this error occured when checking value of the %oth kernel argument" % (i,)) print("Printing kernel output and expected result, set verbose=False to suppress this debug print") numpy.set_printoptions(edgeitems=50) print("Kernel output:") print(result) print("Expected:") print(expected) correct = correct and output_test if not correct: logging.debug('correctness check has found a correctness issue') raise Exception("Error: " + util.get_config_string(instance.params) + " failed correctness check") return correct
[ "def", "_default_verify_function", "(", "instance", ",", "answer", ",", "result_host", ",", "atol", ",", "verbose", ")", ":", "#first check if the length is the same", "if", "len", "(", "instance", ".", "arguments", ")", "!=", "len", "(", "answer", ")", ":", "raise", "TypeError", "(", "\"The length of argument list and provided results do not match.\"", ")", "#for each element in the argument list, check if the types match", "for", "i", ",", "arg", "in", "enumerate", "(", "instance", ".", "arguments", ")", ":", "if", "answer", "[", "i", "]", "is", "not", "None", ":", "#skip None elements in the answer list", "if", "isinstance", "(", "answer", "[", "i", "]", ",", "numpy", ".", "ndarray", ")", "and", "isinstance", "(", "arg", ",", "numpy", ".", "ndarray", ")", ":", "if", "answer", "[", "i", "]", ".", "dtype", "!=", "arg", ".", "dtype", ":", "raise", "TypeError", "(", "\"Element \"", "+", "str", "(", "i", ")", "+", "\" of the expected results list is not of the same dtype as the kernel output: \"", "+", "str", "(", "answer", "[", "i", "]", ".", "dtype", ")", "+", "\" != \"", "+", "str", "(", "arg", ".", "dtype", ")", "+", "\".\"", ")", "if", "answer", "[", "i", "]", ".", "size", "!=", "arg", ".", "size", ":", "raise", "TypeError", "(", "\"Element \"", "+", "str", "(", "i", ")", "+", "\" of the expected results list has a size different from \"", "+", "\"the kernel argument: \"", "+", "str", "(", "answer", "[", "i", "]", ".", "size", ")", "+", "\" != \"", "+", "str", "(", "arg", ".", "size", ")", "+", "\".\"", ")", "elif", "isinstance", "(", "answer", "[", "i", "]", ",", "numpy", ".", "number", ")", "and", "isinstance", "(", "arg", ",", "numpy", ".", "number", ")", ":", "if", "answer", "[", "i", "]", ".", "dtype", "!=", "arg", ".", "dtype", ":", "raise", "TypeError", "(", "\"Element \"", "+", "str", "(", "i", ")", "+", "\" of the expected results list is not the same as the kernel output: \"", "+", "str", "(", "answer", "[", "i", "]", ".", "dtype", ")", "+", "\" != \"", "+", "str", "(", "arg", ".", "dtype", ")", "+", "\".\"", ")", "else", ":", "#either answer[i] and argument have different types or answer[i] is not a numpy type", "if", "not", "isinstance", "(", "answer", "[", "i", "]", ",", "numpy", ".", "ndarray", ")", "or", "not", "isinstance", "(", "answer", "[", "i", "]", ",", "numpy", ".", "number", ")", ":", "raise", "TypeError", "(", "\"Element \"", "+", "str", "(", "i", ")", "+", "\" of expected results list is not a numpy array or numpy scalar.\"", ")", "else", ":", "raise", "TypeError", "(", "\"Element \"", "+", "str", "(", "i", ")", "+", "\" of expected results list and kernel arguments have different types.\"", ")", "def", "_ravel", "(", "a", ")", ":", "if", "hasattr", "(", "a", ",", "'ravel'", ")", "and", "len", "(", "a", ".", "shape", ")", ">", "1", ":", "return", "a", ".", "ravel", "(", ")", "return", "a", "def", "_flatten", "(", "a", ")", ":", "if", "hasattr", "(", "a", ",", "'flatten'", ")", ":", "return", "a", ".", "flatten", "(", ")", "return", "a", "correct", "=", "True", "for", "i", ",", "arg", "in", "enumerate", "(", "instance", ".", "arguments", ")", ":", "expected", "=", "answer", "[", "i", "]", "if", "expected", "is", "not", "None", ":", "result", "=", "_ravel", "(", "result_host", "[", "i", "]", ")", "expected", "=", "_flatten", "(", "expected", ")", "output_test", "=", "numpy", ".", "allclose", "(", "expected", ",", "result", ",", "atol", "=", "atol", ")", "if", "not", "output_test", "and", "verbose", ":", "print", "(", "\"Error: \"", "+", "util", ".", "get_config_string", "(", "instance", ".", "params", ")", "+", "\" detected during correctness check\"", ")", "print", "(", "\"this error occured when checking value of the %oth kernel argument\"", "%", "(", "i", ",", ")", ")", "print", "(", "\"Printing kernel output and expected result, set verbose=False to suppress this debug print\"", ")", "numpy", ".", "set_printoptions", "(", "edgeitems", "=", "50", ")", "print", "(", "\"Kernel output:\"", ")", "print", "(", "result", ")", "print", "(", "\"Expected:\"", ")", "print", "(", "expected", ")", "correct", "=", "correct", "and", "output_test", "if", "not", "correct", ":", "logging", ".", "debug", "(", "'correctness check has found a correctness issue'", ")", "raise", "Exception", "(", "\"Error: \"", "+", "util", ".", "get_config_string", "(", "instance", ".", "params", ")", "+", "\" failed correctness check\"", ")", "return", "correct" ]
default verify function based on numpy.allclose
[ "default", "verify", "function", "based", "on", "numpy", ".", "allclose" ]
train
https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/core.py#L278-L345
benvanwerkhoven/kernel_tuner
kernel_tuner/core.py
DeviceInterface.benchmark
def benchmark(self, func, gpu_args, instance, times, verbose): """benchmark the kernel instance""" logging.debug('benchmark ' + instance.name) logging.debug('thread block dimensions x,y,z=%d,%d,%d', *instance.threads) logging.debug('grid dimensions x,y,z=%d,%d,%d', *instance.grid) time = None try: time = self.dev.benchmark(func, gpu_args, instance.threads, instance.grid, times) except Exception as e: #some launches may fail because too many registers are required #to run the kernel given the current thread block size #the desired behavior is to simply skip over this configuration #and proceed to try the next one skippable_exceptions = ["too many resources requested for launch", "OUT_OF_RESOURCES", "INVALID_WORK_GROUP_SIZE"] if any([skip_str in str(e) for skip_str in skippable_exceptions]): logging.debug('benchmark fails due to runtime failure too many resources required') if verbose: print("skipping config", instance.name, "reason: too many resources requested for launch") else: logging.debug('benchmark encountered runtime failure: ' + str(e)) print("Error while benchmarking:", instance.name) raise e return time
python
def benchmark(self, func, gpu_args, instance, times, verbose): """benchmark the kernel instance""" logging.debug('benchmark ' + instance.name) logging.debug('thread block dimensions x,y,z=%d,%d,%d', *instance.threads) logging.debug('grid dimensions x,y,z=%d,%d,%d', *instance.grid) time = None try: time = self.dev.benchmark(func, gpu_args, instance.threads, instance.grid, times) except Exception as e: #some launches may fail because too many registers are required #to run the kernel given the current thread block size #the desired behavior is to simply skip over this configuration #and proceed to try the next one skippable_exceptions = ["too many resources requested for launch", "OUT_OF_RESOURCES", "INVALID_WORK_GROUP_SIZE"] if any([skip_str in str(e) for skip_str in skippable_exceptions]): logging.debug('benchmark fails due to runtime failure too many resources required') if verbose: print("skipping config", instance.name, "reason: too many resources requested for launch") else: logging.debug('benchmark encountered runtime failure: ' + str(e)) print("Error while benchmarking:", instance.name) raise e return time
[ "def", "benchmark", "(", "self", ",", "func", ",", "gpu_args", ",", "instance", ",", "times", ",", "verbose", ")", ":", "logging", ".", "debug", "(", "'benchmark '", "+", "instance", ".", "name", ")", "logging", ".", "debug", "(", "'thread block dimensions x,y,z=%d,%d,%d'", ",", "*", "instance", ".", "threads", ")", "logging", ".", "debug", "(", "'grid dimensions x,y,z=%d,%d,%d'", ",", "*", "instance", ".", "grid", ")", "time", "=", "None", "try", ":", "time", "=", "self", ".", "dev", ".", "benchmark", "(", "func", ",", "gpu_args", ",", "instance", ".", "threads", ",", "instance", ".", "grid", ",", "times", ")", "except", "Exception", "as", "e", ":", "#some launches may fail because too many registers are required", "#to run the kernel given the current thread block size", "#the desired behavior is to simply skip over this configuration", "#and proceed to try the next one", "skippable_exceptions", "=", "[", "\"too many resources requested for launch\"", ",", "\"OUT_OF_RESOURCES\"", ",", "\"INVALID_WORK_GROUP_SIZE\"", "]", "if", "any", "(", "[", "skip_str", "in", "str", "(", "e", ")", "for", "skip_str", "in", "skippable_exceptions", "]", ")", ":", "logging", ".", "debug", "(", "'benchmark fails due to runtime failure too many resources required'", ")", "if", "verbose", ":", "print", "(", "\"skipping config\"", ",", "instance", ".", "name", ",", "\"reason: too many resources requested for launch\"", ")", "else", ":", "logging", ".", "debug", "(", "'benchmark encountered runtime failure: '", "+", "str", "(", "e", ")", ")", "print", "(", "\"Error while benchmarking:\"", ",", "instance", ".", "name", ")", "raise", "e", "return", "time" ]
benchmark the kernel instance
[ "benchmark", "the", "kernel", "instance" ]
train
https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/core.py#L68-L91
benvanwerkhoven/kernel_tuner
kernel_tuner/core.py
DeviceInterface.check_kernel_output
def check_kernel_output(self, func, gpu_args, instance, answer, atol, verify, verbose): """runs the kernel once and checks the result against answer""" logging.debug('check_kernel_output') #if not using custom verify function, check if the length is the same if not verify and len(instance.arguments) != len(answer): raise TypeError("The length of argument list and provided results do not match.") #zero GPU memory for output arguments for i, arg in enumerate(instance.arguments): if verify or answer[i] is not None: if isinstance(arg, numpy.ndarray): self.dev.memcpy_htod(gpu_args[i], arg) #run the kernel check = self.run_kernel(func, gpu_args, instance) if not check: return True #runtime failure occured that should be ignored, skip correctness check #retrieve gpu results to host memory result_host = [] for i, arg in enumerate(instance.arguments): if verify or answer[i] is not None: if isinstance(arg, numpy.ndarray): result_host.append(numpy.zeros_like(arg)) self.dev.memcpy_dtoh(result_host[-1], gpu_args[i]) else: result_host.append(None) #if the user has specified a custom verify function, then call it, else use default based on numpy allclose if verify: try: return verify(answer, result_host, atol=atol) except TypeError: return verify(answer, result_host) else: return _default_verify_function(instance, answer, result_host, atol, verbose)
python
def check_kernel_output(self, func, gpu_args, instance, answer, atol, verify, verbose): """runs the kernel once and checks the result against answer""" logging.debug('check_kernel_output') #if not using custom verify function, check if the length is the same if not verify and len(instance.arguments) != len(answer): raise TypeError("The length of argument list and provided results do not match.") #zero GPU memory for output arguments for i, arg in enumerate(instance.arguments): if verify or answer[i] is not None: if isinstance(arg, numpy.ndarray): self.dev.memcpy_htod(gpu_args[i], arg) #run the kernel check = self.run_kernel(func, gpu_args, instance) if not check: return True #runtime failure occured that should be ignored, skip correctness check #retrieve gpu results to host memory result_host = [] for i, arg in enumerate(instance.arguments): if verify or answer[i] is not None: if isinstance(arg, numpy.ndarray): result_host.append(numpy.zeros_like(arg)) self.dev.memcpy_dtoh(result_host[-1], gpu_args[i]) else: result_host.append(None) #if the user has specified a custom verify function, then call it, else use default based on numpy allclose if verify: try: return verify(answer, result_host, atol=atol) except TypeError: return verify(answer, result_host) else: return _default_verify_function(instance, answer, result_host, atol, verbose)
[ "def", "check_kernel_output", "(", "self", ",", "func", ",", "gpu_args", ",", "instance", ",", "answer", ",", "atol", ",", "verify", ",", "verbose", ")", ":", "logging", ".", "debug", "(", "'check_kernel_output'", ")", "#if not using custom verify function, check if the length is the same", "if", "not", "verify", "and", "len", "(", "instance", ".", "arguments", ")", "!=", "len", "(", "answer", ")", ":", "raise", "TypeError", "(", "\"The length of argument list and provided results do not match.\"", ")", "#zero GPU memory for output arguments", "for", "i", ",", "arg", "in", "enumerate", "(", "instance", ".", "arguments", ")", ":", "if", "verify", "or", "answer", "[", "i", "]", "is", "not", "None", ":", "if", "isinstance", "(", "arg", ",", "numpy", ".", "ndarray", ")", ":", "self", ".", "dev", ".", "memcpy_htod", "(", "gpu_args", "[", "i", "]", ",", "arg", ")", "#run the kernel", "check", "=", "self", ".", "run_kernel", "(", "func", ",", "gpu_args", ",", "instance", ")", "if", "not", "check", ":", "return", "True", "#runtime failure occured that should be ignored, skip correctness check", "#retrieve gpu results to host memory", "result_host", "=", "[", "]", "for", "i", ",", "arg", "in", "enumerate", "(", "instance", ".", "arguments", ")", ":", "if", "verify", "or", "answer", "[", "i", "]", "is", "not", "None", ":", "if", "isinstance", "(", "arg", ",", "numpy", ".", "ndarray", ")", ":", "result_host", ".", "append", "(", "numpy", ".", "zeros_like", "(", "arg", ")", ")", "self", ".", "dev", ".", "memcpy_dtoh", "(", "result_host", "[", "-", "1", "]", ",", "gpu_args", "[", "i", "]", ")", "else", ":", "result_host", ".", "append", "(", "None", ")", "#if the user has specified a custom verify function, then call it, else use default based on numpy allclose", "if", "verify", ":", "try", ":", "return", "verify", "(", "answer", ",", "result_host", ",", "atol", "=", "atol", ")", "except", "TypeError", ":", "return", "verify", "(", "answer", ",", "result_host", ")", "else", ":", "return", "_default_verify_function", "(", "instance", ",", "answer", ",", "result_host", ",", "atol", ",", "verbose", ")" ]
runs the kernel once and checks the result against answer
[ "runs", "the", "kernel", "once", "and", "checks", "the", "result", "against", "answer" ]
train
https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/core.py#L93-L129
benvanwerkhoven/kernel_tuner
kernel_tuner/core.py
DeviceInterface.compile_and_benchmark
def compile_and_benchmark(self, gpu_args, params, kernel_options, tuning_options): """ Compile and benchmark a kernel instance based on kernel strings and parameters """ instance_string = util.get_instance_string(params) logging.debug('compile_and_benchmark ' + instance_string) mem_usage = round(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/1024.0, 1) logging.debug('Memory usage : %2.2f MB', mem_usage) verbose = tuning_options.verbose instance = self.create_kernel_instance(kernel_options, params, verbose) if instance is None: return None try: #compile the kernel func = self.compile_kernel(instance, verbose) if func is None: return None #add constant memory arguments to compiled module if kernel_options.cmem_args is not None: self.dev.copy_constant_memory_args(kernel_options.cmem_args) #add texture memory arguments to compiled module if kernel_options.texmem_args is not None: self.dev.copy_texture_memory_args(kernel_options.texmem_args) #test kernel for correctness and benchmark if tuning_options.answer is not None: self.check_kernel_output(func, gpu_args, instance, tuning_options.answer, tuning_options.atol, tuning_options.verify, verbose) #benchmark time = self.benchmark(func, gpu_args, instance, tuning_options.times, verbose) except Exception as e: #dump kernel_string to temp file temp_filename = util.get_temp_filename(suffix=".c") util.write_file(temp_filename, instance.kernel_string) print("Error while compiling or benchmarking, see source files: " + temp_filename + " ".join(instance.temp_files.values())) raise e #clean up any temporary files, if no error occured for v in instance.temp_files.values(): util.delete_temp_file(v) return time
python
def compile_and_benchmark(self, gpu_args, params, kernel_options, tuning_options): """ Compile and benchmark a kernel instance based on kernel strings and parameters """ instance_string = util.get_instance_string(params) logging.debug('compile_and_benchmark ' + instance_string) mem_usage = round(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/1024.0, 1) logging.debug('Memory usage : %2.2f MB', mem_usage) verbose = tuning_options.verbose instance = self.create_kernel_instance(kernel_options, params, verbose) if instance is None: return None try: #compile the kernel func = self.compile_kernel(instance, verbose) if func is None: return None #add constant memory arguments to compiled module if kernel_options.cmem_args is not None: self.dev.copy_constant_memory_args(kernel_options.cmem_args) #add texture memory arguments to compiled module if kernel_options.texmem_args is not None: self.dev.copy_texture_memory_args(kernel_options.texmem_args) #test kernel for correctness and benchmark if tuning_options.answer is not None: self.check_kernel_output(func, gpu_args, instance, tuning_options.answer, tuning_options.atol, tuning_options.verify, verbose) #benchmark time = self.benchmark(func, gpu_args, instance, tuning_options.times, verbose) except Exception as e: #dump kernel_string to temp file temp_filename = util.get_temp_filename(suffix=".c") util.write_file(temp_filename, instance.kernel_string) print("Error while compiling or benchmarking, see source files: " + temp_filename + " ".join(instance.temp_files.values())) raise e #clean up any temporary files, if no error occured for v in instance.temp_files.values(): util.delete_temp_file(v) return time
[ "def", "compile_and_benchmark", "(", "self", ",", "gpu_args", ",", "params", ",", "kernel_options", ",", "tuning_options", ")", ":", "instance_string", "=", "util", ".", "get_instance_string", "(", "params", ")", "logging", ".", "debug", "(", "'compile_and_benchmark '", "+", "instance_string", ")", "mem_usage", "=", "round", "(", "resource", ".", "getrusage", "(", "resource", ".", "RUSAGE_SELF", ")", ".", "ru_maxrss", "/", "1024.0", ",", "1", ")", "logging", ".", "debug", "(", "'Memory usage : %2.2f MB'", ",", "mem_usage", ")", "verbose", "=", "tuning_options", ".", "verbose", "instance", "=", "self", ".", "create_kernel_instance", "(", "kernel_options", ",", "params", ",", "verbose", ")", "if", "instance", "is", "None", ":", "return", "None", "try", ":", "#compile the kernel", "func", "=", "self", ".", "compile_kernel", "(", "instance", ",", "verbose", ")", "if", "func", "is", "None", ":", "return", "None", "#add constant memory arguments to compiled module", "if", "kernel_options", ".", "cmem_args", "is", "not", "None", ":", "self", ".", "dev", ".", "copy_constant_memory_args", "(", "kernel_options", ".", "cmem_args", ")", "#add texture memory arguments to compiled module", "if", "kernel_options", ".", "texmem_args", "is", "not", "None", ":", "self", ".", "dev", ".", "copy_texture_memory_args", "(", "kernel_options", ".", "texmem_args", ")", "#test kernel for correctness and benchmark", "if", "tuning_options", ".", "answer", "is", "not", "None", ":", "self", ".", "check_kernel_output", "(", "func", ",", "gpu_args", ",", "instance", ",", "tuning_options", ".", "answer", ",", "tuning_options", ".", "atol", ",", "tuning_options", ".", "verify", ",", "verbose", ")", "#benchmark", "time", "=", "self", ".", "benchmark", "(", "func", ",", "gpu_args", ",", "instance", ",", "tuning_options", ".", "times", ",", "verbose", ")", "except", "Exception", "as", "e", ":", "#dump kernel_string to temp file", "temp_filename", "=", "util", ".", "get_temp_filename", "(", "suffix", "=", "\".c\"", ")", "util", ".", "write_file", "(", "temp_filename", ",", "instance", ".", "kernel_string", ")", "print", "(", "\"Error while compiling or benchmarking, see source files: \"", "+", "temp_filename", "+", "\" \"", ".", "join", "(", "instance", ".", "temp_files", ".", "values", "(", ")", ")", ")", "raise", "e", "#clean up any temporary files, if no error occured", "for", "v", "in", "instance", ".", "temp_files", ".", "values", "(", ")", ":", "util", ".", "delete_temp_file", "(", "v", ")", "return", "time" ]
Compile and benchmark a kernel instance based on kernel strings and parameters
[ "Compile", "and", "benchmark", "a", "kernel", "instance", "based", "on", "kernel", "strings", "and", "parameters" ]
train
https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/core.py#L132-L178
benvanwerkhoven/kernel_tuner
kernel_tuner/core.py
DeviceInterface.compile_kernel
def compile_kernel(self, instance, verbose): """compile the kernel for this specific instance""" logging.debug('compile_kernel ' + instance.name) #compile kernel_string into device func func = None try: func = self.dev.compile(instance.name, instance.kernel_string) except Exception as e: #compiles may fail because certain kernel configurations use too #much shared memory for example, the desired behavior is to simply #skip over this configuration and try the next one if "uses too much shared data" in str(e): logging.debug('compile_kernel failed due to kernel using too much shared memory') if verbose: print("skipping config", instance.name, "reason: too much shared memory used") else: logging.debug('compile_kernel failed due to error: ' + str(e)) print("Error while compiling:", instance.name) raise e return func
python
def compile_kernel(self, instance, verbose): """compile the kernel for this specific instance""" logging.debug('compile_kernel ' + instance.name) #compile kernel_string into device func func = None try: func = self.dev.compile(instance.name, instance.kernel_string) except Exception as e: #compiles may fail because certain kernel configurations use too #much shared memory for example, the desired behavior is to simply #skip over this configuration and try the next one if "uses too much shared data" in str(e): logging.debug('compile_kernel failed due to kernel using too much shared memory') if verbose: print("skipping config", instance.name, "reason: too much shared memory used") else: logging.debug('compile_kernel failed due to error: ' + str(e)) print("Error while compiling:", instance.name) raise e return func
[ "def", "compile_kernel", "(", "self", ",", "instance", ",", "verbose", ")", ":", "logging", ".", "debug", "(", "'compile_kernel '", "+", "instance", ".", "name", ")", "#compile kernel_string into device func", "func", "=", "None", "try", ":", "func", "=", "self", ".", "dev", ".", "compile", "(", "instance", ".", "name", ",", "instance", ".", "kernel_string", ")", "except", "Exception", "as", "e", ":", "#compiles may fail because certain kernel configurations use too", "#much shared memory for example, the desired behavior is to simply", "#skip over this configuration and try the next one", "if", "\"uses too much shared data\"", "in", "str", "(", "e", ")", ":", "logging", ".", "debug", "(", "'compile_kernel failed due to kernel using too much shared memory'", ")", "if", "verbose", ":", "print", "(", "\"skipping config\"", ",", "instance", ".", "name", ",", "\"reason: too much shared memory used\"", ")", "else", ":", "logging", ".", "debug", "(", "'compile_kernel failed due to error: '", "+", "str", "(", "e", ")", ")", "print", "(", "\"Error while compiling:\"", ",", "instance", ".", "name", ")", "raise", "e", "return", "func" ]
compile the kernel for this specific instance
[ "compile", "the", "kernel", "for", "this", "specific", "instance" ]
train
https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/core.py#L180-L200
benvanwerkhoven/kernel_tuner
kernel_tuner/core.py
DeviceInterface.copy_constant_memory_args
def copy_constant_memory_args(self, cmem_args): """adds constant memory arguments to the most recently compiled module, if using CUDA""" if self.lang == "CUDA": self.dev.copy_constant_memory_args(cmem_args) else: raise Exception("Error cannot copy constant memory arguments when language is not CUDA")
python
def copy_constant_memory_args(self, cmem_args): """adds constant memory arguments to the most recently compiled module, if using CUDA""" if self.lang == "CUDA": self.dev.copy_constant_memory_args(cmem_args) else: raise Exception("Error cannot copy constant memory arguments when language is not CUDA")
[ "def", "copy_constant_memory_args", "(", "self", ",", "cmem_args", ")", ":", "if", "self", ".", "lang", "==", "\"CUDA\"", ":", "self", ".", "dev", ".", "copy_constant_memory_args", "(", "cmem_args", ")", "else", ":", "raise", "Exception", "(", "\"Error cannot copy constant memory arguments when language is not CUDA\"", ")" ]
adds constant memory arguments to the most recently compiled module, if using CUDA
[ "adds", "constant", "memory", "arguments", "to", "the", "most", "recently", "compiled", "module", "if", "using", "CUDA" ]
train
https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/core.py#L202-L207
benvanwerkhoven/kernel_tuner
kernel_tuner/core.py
DeviceInterface.copy_texture_memory_args
def copy_texture_memory_args(self, texmem_args): """adds texture memory arguments to the most recently compiled module, if using CUDA""" if self.lang == "CUDA": self.dev.copy_texture_memory_args(texmem_args) else: raise Exception("Error cannot copy texture memory arguments when language is not CUDA")
python
def copy_texture_memory_args(self, texmem_args): """adds texture memory arguments to the most recently compiled module, if using CUDA""" if self.lang == "CUDA": self.dev.copy_texture_memory_args(texmem_args) else: raise Exception("Error cannot copy texture memory arguments when language is not CUDA")
[ "def", "copy_texture_memory_args", "(", "self", ",", "texmem_args", ")", ":", "if", "self", ".", "lang", "==", "\"CUDA\"", ":", "self", ".", "dev", ".", "copy_texture_memory_args", "(", "texmem_args", ")", "else", ":", "raise", "Exception", "(", "\"Error cannot copy texture memory arguments when language is not CUDA\"", ")" ]
adds texture memory arguments to the most recently compiled module, if using CUDA
[ "adds", "texture", "memory", "arguments", "to", "the", "most", "recently", "compiled", "module", "if", "using", "CUDA" ]
train
https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/core.py#L209-L214