code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
if num is None: # Show None when data is missing return "None" for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: if abs(num) < 1024.0: return "%3.1f%s%s" % (num, unit, suffix) num /= 1024.0 return "%.1f%s%s" % (num, 'Yi', suffix)
def to_h(num, suffix='B')
Converts a byte value in human readable form.
1.810375
1.692813
1.069448
if sys.stdout.isatty(): return json.dumps(data, indent=4, separators=(',', ': ')) else: return json.dumps(data)
def format_to_json(data)
Converts `data` into json If stdout is a tty it performs a pretty print.
2.407888
2.048075
1.175684
for broker_id, metadata in six.iteritems(brokers): self.brokers[broker_id] = self._create_broker(broker_id, metadata)
def _build_brokers(self, brokers)
Build broker objects using broker-ids.
3.524285
2.850286
1.236467
broker = Broker(broker_id, metadata) if not metadata: broker.mark_inactive() rg_id = self.extract_group(broker) group = self.rgs.setdefault(rg_id, ReplicationGroup(rg_id)) group.add_broker(broker) broker.replication_group = group return broker
def _create_broker(self, broker_id, metadata=None)
Create a broker object and assign to a replication group. A broker object with no metadata is considered inactive. An inactive broker may or may not belong to a group.
4.359276
3.289475
1.325219
self.partitions = {} for partition_name, replica_ids in six.iteritems(assignment): # Get topic topic_id = partition_name[0] partition_id = partition_name[1] topic = self.topics.setdefault( topic_id, Topic(topic_id, replication_factor=len(replica_ids)) ) # Creating partition object partition = Partition( topic, partition_id, weight=self.partition_measurer.get_weight(partition_name), size=self.partition_measurer.get_size(partition_name), ) self.partitions[partition_name] = partition topic.add_partition(partition) # Updating corresponding broker objects for broker_id in replica_ids: # Check if broker-id is present in current active brokers if broker_id not in list(self.brokers.keys()): self.log.warning( "Broker %s containing partition %s is not in " "active brokers.", broker_id, partition, ) self.brokers[broker_id] = self._create_broker(broker_id) self.brokers[broker_id].add_partition(partition)
def _build_partitions(self, assignment)
Builds all partition objects and update corresponding broker and topic objects.
2.884298
2.68024
1.076134
return { broker for broker in six.itervalues(self.brokers) if not broker.inactive and not broker.decommissioned }
def active_brokers(self)
Set of brokers that are not inactive or decommissioned.
6.097746
3.918907
1.555981
try: source = self.brokers[source_id] dest = self.brokers[dest_id] # Move all partitions from source to destination broker for partition in source.partitions.copy(): # Partitions set changes # We cannot move partition directly since that re-orders the # replicas for the partition source.partitions.remove(partition) dest.partitions.add(partition) # Replace broker in replica partition.replace(source, dest) except KeyError as e: self.log.error("Invalid broker id %s.", e.args[0]) raise InvalidBrokerIdError( "Broker id {} does not exist in cluster".format(e.args[0]) )
def replace_broker(self, source_id, dest_id)
Move all partitions in source broker to destination broker. :param source_id: source broker-id :param dest_id: destination broker-id :raises: InvalidBrokerIdError, when either of given broker-ids is invalid.
4.208715
4.074975
1.03282
try: for partition_name, replica_ids in six.iteritems(assignment): try: new_replicas = [self.brokers[b_id] for b_id in replica_ids] except KeyError: self.log.error( "Invalid replicas %s for topic-partition %s-%s.", ', '.join([str(id) for id in replica_ids]), partition_name[0], partition_name[1], ) raise InvalidBrokerIdError( "Invalid replicas {0}.".format( ', '.join([str(id) for id in replica_ids]) ), ) try: partition = self.partitions[partition_name] old_replicas = [broker for broker in partition.replicas] # No change needed. Save ourself some CPU time. # Replica order matters as the first one is the leader. if new_replicas == old_replicas: continue # Remove old partitions from broker # This also updates partition replicas for broker in old_replicas: broker.remove_partition(partition) # Add new partition to brokers for broker in new_replicas: broker.add_partition(partition) except KeyError: self.log.error( "Invalid topic-partition %s-%s.", partition_name[0], partition_name[1], ) raise InvalidPartitionError( "Invalid topic-partition {0}-{1}." .format(partition_name[0], partition_name[1]), ) except KeyError: self.log.error("Could not parse given assignment {0}".format(assignment)) raise
def update_cluster_topology(self, assignment)
Modify the cluster-topology with given assignment. Change the replica set of partitions as in given assignment. :param assignment: dict representing actions to be used to update the current cluster-topology :raises: InvalidBrokerIdError when broker-id is invalid :raises: InvalidPartitionError when partition-name is invalid
2.850398
2.648517
1.076224
assignment = {} for elem in plan['partitions']: assignment[ (elem['topic'], elem['partition']) ] = elem['replicas'] return assignment
def plan_to_assignment(plan)
Convert the plan to the format used by cluster-topology.
6.093931
5.250883
1.160554
return { 'version': 1, 'partitions': [{'topic': t_p[0], 'partition': t_p[1], 'replicas': replica } for t_p, replica in six.iteritems(assignment)] }
def assignment_to_plan(assignment)
Convert an assignment to the format used by Kafka to describe a reassignment plan.
5.196504
4.619957
1.124795
if not _validate_plan(new_plan): _log.error('Invalid proposed-plan.') return False # Validate given plan in reference to base-plan if base_plan: if not _validate_plan(base_plan): _log.error('Invalid assignment from cluster.') return False if not _validate_plan_base( new_plan, base_plan, is_partition_subset, allow_rf_change ): return False # Plan validation successful return True
def validate_plan( new_plan, base_plan=None, is_partition_subset=True, allow_rf_change=False, )
Verify that the new plan is valid for execution. Given kafka-reassignment plan should affirm with following rules: - Plan should have at least one partition for re-assignment - Partition-name list should be subset of base-plan partition-list - Replication-factor for each partition of same topic is same - Replication-factor for each partition remains unchanged - No duplicate broker-ids in each replicas
4.341083
4.698713
0.923888
# Verify that partitions in plan are subset of base plan. new_partitions = set([ (p_data['topic'], p_data['partition']) for p_data in new_plan['partitions'] ]) base_partitions = set([ (p_data['topic'], p_data['partition']) for p_data in base_plan['partitions'] ]) if is_partition_subset: invalid_partitions = list(new_partitions - base_partitions) else: # partition set should be equal invalid_partitions = list( new_partitions.union(base_partitions) - new_partitions.intersection(base_partitions), ) if invalid_partitions: _log.error( 'Invalid partition(s) found: {p_list}'.format( p_list=invalid_partitions, ) ) return False # Verify replication-factor remains consistent base_partition_replicas = { (p_data['topic'], p_data['partition']): p_data['replicas'] for p_data in base_plan['partitions'] } new_partition_replicas = { (p_data['topic'], p_data['partition']): p_data['replicas'] for p_data in new_plan['partitions'] } if not allow_rf_change: invalid_replication_factor = False for new_partition, replicas in six.iteritems(new_partition_replicas): base_replica_cnt = len(base_partition_replicas[new_partition]) if len(replicas) != base_replica_cnt: invalid_replication_factor = True _log.error( 'Replication-factor Mismatch: Partition: {partition}: ' 'Base-replicas: {expected}, Proposed-replicas: {actual}' .format( partition=new_partition, expected=base_partition_replicas[new_partition], actual=replicas, ), ) if invalid_replication_factor: return False # Validation successful return True
def _validate_plan_base( new_plan, base_plan, is_partition_subset=True, allow_rf_change=False, )
Validate if given plan is valid comparing with given base-plan. Validate following assertions: - Partition-check: New partition-set should be subset of base-partition set - Replica-count check: Replication-factor for each partition remains same - Broker-check: New broker-set should be subset of base broker-set
2.112819
2.04128
1.035046
# Verify presence of required keys if set(plan.keys()) != set(['version', 'partitions']): _log.error( 'Invalid or incomplete keys in given plan. Expected: "version", ' '"partitions". Found:{keys}' .format(keys=', '.join(list(plan.keys()))), ) return False # Invalid version if plan['version'] != 1: _log.error( 'Invalid version of plan {version}' .format(version=plan['version']), ) return False # Empty partitions if not plan['partitions']: _log.error( '"partitions" list found empty"' .format(version=plan['partitions']), ) return False # Invalid partitions type if not isinstance(plan['partitions'], list): _log.error('"partitions" of type list expected.') return False # Invalid partition-data for p_data in plan['partitions']: if set(p_data.keys()) != set(['topic', 'partition', 'replicas']): _log.error( 'Invalid keys in partition-data {keys}' .format(keys=', '.join(list(p_data.keys()))), ) return False # Check types if not isinstance(p_data['topic'], six.text_type): _log.error( '"topic" of type unicode expected {p_data}, found {t_type}' .format(p_data=p_data, t_type=type(p_data['topic'])), ) return False if not isinstance(p_data['partition'], int): _log.error( '"partition" of type int expected {p_data}, found {p_type}' .format(p_data=p_data, p_type=type(p_data['partition'])), ) return False if not isinstance(p_data['replicas'], list): _log.error( '"replicas" of type list expected {p_data}, found {r_type}' .format(p_data=p_data, r_type=type(p_data['replicas'])), ) return False if not p_data['replicas']: _log.error( 'Non-empty "replicas" expected: {p_data}' .format(p_data=p_data), ) return False # Invalid broker-type for broker in p_data['replicas']: if not isinstance(broker, int): _log.error( '"replicas" of type integer list expected {p_data}' .format(p_data=p_data), ) return False return True
def _validate_format(plan)
Validate if the format of the plan as expected. Validate format of plan on following rules: a) Verify if it ONLY and MUST have keys and value, 'version' and 'partitions' b) Verify if each value of 'partitions' ONLY and MUST have keys 'replicas', 'partition', 'topic' c) Verify desired type of each value d) Verify non-empty partitions and replicas Sample-plan format: { "version": 1, "partitions": [ {"partition":0, "topic":'t1', "replicas":[0,1,2]}, {"partition":0, "topic":'t2', "replicas":[1,2]}, ... ]}
2.014891
1.91742
1.050835
# Validate format of plan if not _validate_format(plan): return False # Verify no duplicate partitions partition_names = [ (p_data['topic'], p_data['partition']) for p_data in plan['partitions'] ] duplicate_partitions = [ partition for partition, count in six.iteritems(Counter(partition_names)) if count > 1 ] if duplicate_partitions: _log.error( 'Duplicate partitions in plan {p_list}' .format(p_list=duplicate_partitions), ) return False # Verify no duplicate brokers in partition-replicas dup_replica_brokers = [] for p_data in plan['partitions']: dup_replica_brokers = [ broker for broker, count in Counter(p_data['replicas']).items() if count > 1 ] if dup_replica_brokers: _log.error( 'Duplicate brokers: ({topic}, {p_id}) in replicas {replicas}' .format( topic=p_data['topic'], p_id=p_data['partition'], replicas=p_data['replicas'], ) ) return False # Verify same replication-factor for every topic topic_replication_factor = {} for partition_info in plan['partitions']: topic = partition_info['topic'] replication_factor = len(partition_info['replicas']) if topic in list(topic_replication_factor.keys()): if topic_replication_factor[topic] != replication_factor: _log.error( 'Mismatch in replication-factor of partitions for topic ' '{topic}'.format(topic=topic), ) return False else: topic_replication_factor[topic] = replication_factor return True
def _validate_plan(plan)
Validate if given plan is valid based on kafka-cluster-assignment protocols. Validate following parameters: - Correct format of plan - Partition-list should be unique - Every partition of a topic should have same replication-factor - Replicas of a partition should have unique broker-set
2.321091
2.224782
1.043289
sorted_offsets = sorted( list(consumer_offsets_metadata.items()), key=lambda topic_offsets: sum([o.highmark - o.current for o in topic_offsets[1]]) ) return OrderedDict(sorted_offsets)
def sort_by_distance(cls, consumer_offsets_metadata)
Receives a dict of (topic_name: ConsumerPartitionOffset) and returns a similar dict where the topics are sorted by total offset distance.
4.468403
3.943188
1.133196
sorted_offsets = sorted( list(consumer_offsets_metadata.items()), key=lambda topic_offsets1: sum( [cls.percentage_distance(o.highmark, o.current) for o in topic_offsets1[1]] ) ) return OrderedDict(sorted_offsets)
def sort_by_distance_percentage(cls, consumer_offsets_metadata)
Receives a dict of (topic_name: ConsumerPartitionOffset) and returns an similar dict where the topics are sorted by average offset distance in percentage.
4.478616
4.519619
0.990928
highmark = int(highmark) current = int(current) if highmark > 0: return round( (highmark - current) * 100.0 / highmark, 2, ) else: return 0.0
def percentage_distance(cls, highmark, current)
Percentage of distance the current offset is behind the highmark.
2.56053
2.419116
1.058457
_log.debug( "ZK: Getting children of {path}".format(path=path), ) return self.zk.get_children(path, watch)
def get_children(self, path, watch=None)
Returns the children of the specified node.
5.809478
5.393952
1.077036
_log.debug( "ZK: Getting {path}".format(path=path), ) return self.zk.get(path, watch)
def get(self, path, watch=None)
Returns the data of the specified node.
7.33235
6.964991
1.052744
_log.debug( "ZK: Setting {path} to {value}".format(path=path, value=value) ) return self.zk.set(path, value)
def set(self, path, value)
Sets and returns new data for the specified node.
4.458139
4.520037
0.986306
data, _ = self.get(path, watch) return load_json(data) if data else None
def get_json(self, path, watch=None)
Reads the data of the specified node and converts it to json.
5.873008
5.649786
1.03951
try: broker_ids = self.get_children("/brokers/ids") except NoNodeError: _log.info( "cluster is empty." ) return {} # Return broker-ids only if names_only: return {int(b_id): None for b_id in broker_ids} return {int(b_id): self.get_broker_metadata(b_id) for b_id in broker_ids}
def get_brokers(self, names_only=False)
Get information on all the available brokers. :rtype : dict of brokers
3.836635
4.084472
0.939322
try: config_data = load_json( self.get( "/config/topics/{topic}".format(topic=topic) )[0] ) except NoNodeError as e: # Kafka version before 0.8.1 does not have "/config/topics/<topic_name>" path in ZK and # if the topic exists, return default dict instead of raising an Exception. # Ref: https://cwiki.apache.org/confluence/display/KAFKA/Kafka+data+structures+in+Zookeeper. topics = self.get_topics(topic_name=topic, fetch_partition_state=False) if len(topics) > 0: _log.info("Configuration not available for topic {topic}.".format(topic=topic)) config_data = {"config": {}} else: _log.error( "topic {topic} not found.".format(topic=topic) ) raise e return config_data
def get_topic_config(self, topic)
Get configuration information for specified topic. :rtype : dict of configuration
4.376017
4.418653
0.990351
config_data = dump_json(value) try: # Change value return_value = self.set( "/config/topics/{topic}".format(topic=topic), config_data ) # Create change version = kafka_version[1] # this feature is supported in kafka 9 and kafka 10 assert version in (9, 10), "Feature supported with kafka 9 and kafka 10" if version == 9: # https://github.com/apache/kafka/blob/0.9.0.1/ # core/src/main/scala/kafka/admin/AdminUtils.scala#L334 change_node = dump_json({ "version": 1, "entity_type": "topics", "entity_name": topic }) else: # kafka 10 # https://github.com/apache/kafka/blob/0.10.2.1/ # core/src/main/scala/kafka/admin/AdminUtils.scala#L574 change_node = dump_json({ "version": 2, "entity_path": "topics/" + topic, }) self.create( '/config/changes/config_change_', change_node, sequence=True ) except NoNodeError as e: _log.error( "topic {topic} not found.".format(topic=topic) ) raise e return return_value
def set_topic_config(self, topic, value, kafka_version=(0, 10, ))
Set configuration information for specified topic. :topic : topic whose configuration needs to be changed :value : config value with which the topic needs to be updated with. This would be of the form key=value. Example 'cleanup.policy=compact' :kafka_version :tuple kafka version the brokers are running on. Defaults to (0, 10, x). Kafka version 9 and kafka 10 support this feature.
3.745776
3.760302
0.996137
try: topic_ids = [topic_name] if topic_name else self.get_children( "/brokers/topics", ) except NoNodeError: _log.error( "Cluster is empty." ) return {} if names_only: return topic_ids topics_data = {} for topic_id in topic_ids: try: topic_info = self.get("/brokers/topics/{id}".format(id=topic_id)) topic_data = load_json(topic_info[0]) topic_ctime = topic_info[1].ctime / 1000.0 topic_data['ctime'] = topic_ctime except NoNodeError: _log.info( "topic '{topic}' not found.".format(topic=topic_id), ) return {} # Prepare data for each partition partitions_data = {} for p_id, replicas in six.iteritems(topic_data['partitions']): partitions_data[p_id] = {} if fetch_partition_state: # Fetch partition-state from zookeeper partition_state = self._fetch_partition_state(topic_id, p_id) partitions_data[p_id] = load_json(partition_state[0]) partitions_data[p_id]['ctime'] = partition_state[1].ctime / 1000.0 else: # Fetch partition-info from zookeeper partition_info = self._fetch_partition_info(topic_id, p_id) partitions_data[p_id]['ctime'] = partition_info.ctime / 1000.0 partitions_data[p_id]['replicas'] = replicas topic_data['partitions'] = partitions_data topics_data[topic_id] = topic_data return topics_data
def get_topics( self, topic_name=None, names_only=False, fetch_partition_state=True, )
Get information on all the available topics. Topic-data format with fetch_partition_state as False :- topic_data = { 'version': 1, 'partitions': { <p_id>: { replicas: <broker-ids> } } } Topic-data format with fetch_partition_state as True:- topic_data = { 'version': 1, 'ctime': <timestamp>, 'partitions': { <p_id>:{ replicas: [<broker_id>, <broker_id>, ...], isr: [<broker_id>, <broker_id>, ...], controller_epoch: <val>, leader_epoch: <val>, version: 1, leader: <broker-id>, ctime: <timestamp>, } } } Note: By default we also fetch partition-state which results in accessing the zookeeper twice. If just partition-replica information is required fetch_partition_state should be set to False.
2.291237
2.099976
1.091078
if consumer_group_id is None: group_ids = self.get_children("/consumers") else: group_ids = [consumer_group_id] # Return consumer-group-ids only if names_only: return {g_id: None for g_id in group_ids} consumer_offsets = {} for g_id in group_ids: consumer_offsets[g_id] = self.get_group_offsets(g_id) return consumer_offsets
def get_consumer_groups(self, consumer_group_id=None, names_only=False)
Get information on all the available consumer-groups. If names_only is False, only list of consumer-group ids are sent. If names_only is True, Consumer group offset details are returned for all consumer-groups or given consumer-group if given in dict format as:- { 'group-id': { 'topic': { 'partition': offset-value, ... ... } } } :rtype: dict of consumer-group offset details
2.67719
2.518602
1.062967
group_offsets = {} try: all_topics = self.get_my_subscribed_topics(group) except NoNodeError: # No offset information of given consumer-group _log.warning( "No topics subscribed to consumer-group {group}.".format( group=group, ), ) return group_offsets if topic: if topic in all_topics: topics = [topic] else: _log.error( "Topic {topic} not found in topic list {topics} for consumer" "-group {consumer_group}.".format( topic=topic, topics=', '.join(topic for topic in all_topics), consumer_group=group, ), ) return group_offsets else: topics = all_topics for topic in topics: group_offsets[topic] = {} try: partitions = self.get_my_subscribed_partitions(group, topic) except NoNodeError: _log.warning( "No partition offsets found for topic {topic}. " "Continuing to next one...".format(topic=topic), ) continue # Fetch offsets for each partition for partition in partitions: path = "/consumers/{group_id}/offsets/{topic}/{partition}".format( group_id=group, topic=topic, partition=partition, ) try: # Get current offset offset_json, _ = self.get(path) group_offsets[topic][partition] = load_json(offset_json) except NoNodeError: _log.error("Path {path} not found".format(path=path)) raise return group_offsets
def get_group_offsets(self, group, topic=None)
Fetch group offsets for given topic and partition otherwise all topics and partitions otherwise. { 'topic': { 'partition': offset-value, ... ... } }
2.45608
2.403612
1.021829
state_path = "/brokers/topics/{topic_id}/partitions/{p_id}/state" try: partition_state = self.get( state_path.format(topic_id=topic_id, p_id=partition_id), ) return partition_state except NoNodeError: return {}
def _fetch_partition_state(self, topic_id, partition_id)
Fetch partition-state for given topic-partition.
3.168543
2.989216
1.059991
info_path = "/brokers/topics/{topic_id}/partitions/{p_id}" try: _, partition_info = self.get( info_path.format(topic_id=topic_id, p_id=partition_id), ) return partition_info except NoNodeError: return {}
def _fetch_partition_info(self, topic_id, partition_id)
Fetch partition info for given topic-partition.
3.346204
3.185004
1.050612
path = "/consumers/{group_id}/offsets".format(group_id=groupid) return self.get_children(path)
def get_my_subscribed_topics(self, groupid)
Get the list of topics that a consumer is subscribed to :param: groupid: The consumer group ID for the consumer :returns list of kafka topics :rtype: list
5.859273
6.243835
0.938409
path = "/consumers/{group_id}/offsets/{topic}".format( group_id=groupid, topic=topic, ) return self.get_children(path)
def get_my_subscribed_partitions(self, groupid, topic)
Get the list of partitions of a topic that a consumer is subscribed to :param: groupid: The consumer group ID for the consumer :param: topic: The topic name :returns list of partitions :rtype: list
4.009365
4.398355
0.91156
plan = self.get_cluster_plan() assignment = {} for elem in plan['partitions']: assignment[ (elem['topic'], elem['partition']) ] = elem['replicas'] return assignment
def get_cluster_assignment(self)
Fetch the cluster layout in form of assignment from zookeeper
6.261844
5.28931
1.183868
_log.debug("ZK: Creating node " + path) return self.zk.create(path, value, acl, ephemeral, sequence, makepath)
def create( self, path, value='', acl=None, ephemeral=False, sequence=False, makepath=False )
Creates a Zookeeper node. :param: path: The zookeeper node path :param: value: Zookeeper node value :param: acl: ACL list :param: ephemeral: Boolean indicating where this node is tied to this session. :param: sequence: Boolean indicating whether path is suffixed with a unique index. :param: makepath: Whether the path should be created if it doesn't exist.
4.96396
5.080407
0.977079
_log.debug("ZK: Deleting node " + path) return self.zk.delete(path, recursive=recursive)
def delete(self, path, recursive=False)
Deletes a Zookeeper node. :param: path: The zookeeper node path :param: recursive: Recursively delete node and all its children.
7.105911
7.046541
1.008425
reassignment_path = '{admin}/{reassignment_node}'\ .format(admin=ADMIN_PATH, reassignment_node=REASSIGNMENT_NODE) plan_json = dump_json(plan) base_plan = self.get_cluster_plan() if not validate_plan(plan, base_plan, allow_rf_change=allow_rf_change): _log.error('Given plan is invalid. Aborting new reassignment plan ... {plan}'.format(plan=plan)) return False # Send proposed-plan to zookeeper try: _log.info('Sending plan to Zookeeper...') self.create(reassignment_path, plan_json, makepath=True) _log.info( 'Re-assign partitions node in Zookeeper updated successfully ' 'with {plan}'.format(plan=plan), ) return True except NodeExistsError: _log.warning('Previous plan in progress. Exiting..') _log.warning('Aborting new reassignment plan... {plan}'.format(plan=plan)) in_progress_plan = load_json(self.get(reassignment_path)[0]) in_progress_partitions = [ '{topic}-{p_id}'.format( topic=p_data['topic'], p_id=str(p_data['partition']), ) for p_data in in_progress_plan['partitions'] ] _log.warning( '{count} partition(s) reassignment currently in progress:-' .format(count=len(in_progress_partitions)), ) _log.warning( '{partitions}. In Progress reassignment plan...'.format( partitions=', '.join(in_progress_partitions), ), ) return False except Exception as e: _log.error( 'Could not re-assign partitions {plan}. Error: {e}' .format(plan=plan, e=e), ) return False
def execute_plan(self, plan, allow_rf_change=False)
Submit reassignment plan for execution.
3.699786
3.541151
1.044798
_log.info('Fetching current cluster-topology from Zookeeper...') cluster_layout = self.get_topics(fetch_partition_state=False) # Re-format cluster-layout partitions = [ { 'topic': topic_id, 'partition': int(p_id), 'replicas': partitions_data['replicas'] } for topic_id, topic_info in six.iteritems(cluster_layout) for p_id, partitions_data in six.iteritems(topic_info['partitions']) ] return { 'version': 1, 'partitions': partitions }
def get_cluster_plan(self)
Fetch cluster plan from zookeeper.
4.645053
4.125381
1.12597
reassignment_path = '{admin}/{reassignment_node}'\ .format(admin=ADMIN_PATH, reassignment_node=REASSIGNMENT_NODE) try: result = self.get(reassignment_path) return load_json(result[0]) except NoNodeError: return {}
def get_pending_plan(self)
Read the currently running plan on reassign_partitions node.
5.769791
4.338571
1.329883
out = {} partitions_count = len(partitions) out['raw'] = { 'offline_count': partitions_count, } if partitions_count == 0: out['message'] = 'No offline partitions.' else: out['message'] = "{count} offline partitions.".format(count=partitions_count) if verbose: lines = ( '{}:{}'.format(topic, partition) for (topic, partition) in partitions ) out['verbose'] = "Partitions:\n" + "\n".join(lines) else: cmdline = sys.argv[:] cmdline.insert(1, '-v') out['message'] += '\nTo see all offline partitions run: ' + ' '.join(cmdline) if verbose: out['raw']['partitions'] = [ {'topic': topic, 'partition': partition} for (topic, partition) in partitions ] return out
def _prepare_output(partitions, verbose)
Returns dict with 'raw' and 'message' keys filled.
3.111928
2.827708
1.100512
offline = get_topic_partition_with_error( self.cluster_config, LEADER_NOT_AVAILABLE_ERROR, ) errcode = status_code.OK if not offline else status_code.CRITICAL out = _prepare_output(offline, self.args.verbose) return errcode, out
def run_command(self)
Checks the number of offline partitions
12.09763
9.383515
1.289243
'''Get the group_id of groups committed into Kafka.''' kafka_group_reader = KafkaGroupReader(cluster_config) return list(kafka_group_reader.read_groups().keys())
def get_kafka_groups(cls, cluster_config)
Get the group_id of groups committed into Kafka.
7.850148
4.393604
1.786722
with closing(SSHClient()) as client: client.set_missing_host_key_policy(AutoAddPolicy()) cfg = { "hostname": host, "timeout": max_timeout, } if ssh_password: cfg['password'] = ssh_password ssh_config = SSHConfig() user_config_file = os.path.expanduser("~/.ssh/config") if os.path.exists(user_config_file): with open(user_config_file) as f: ssh_config.parse(f) host_config = ssh_config.lookup(host) if "user" in host_config: cfg["username"] = host_config["user"] if "proxycommand" in host_config: cfg["sock"] = ProxyCommand(host_config["proxycommand"]) if "identityfile" in host_config: cfg['key_filename'] = host_config['identityfile'] if "port" in host_config: cfg["port"] = int(host_config["port"]) attempts = 0 while attempts < max_attempts: try: attempts += 1 client.connect(**cfg) break except socket.error as e: if attempts < max_attempts: print("SSH to host {0} failed, retrying...".format(host)) time.sleep(max_timeout) else: print("SSH Exception: {0}".format(e)) else: raise MaxConnectionAttemptsError( "Exceeded max attempts to connect to host {0} after {1} retries".format(host, max_attempts) ) yield Connection(client, forward_agent, sudoable)
def ssh(host, forward_agent=False, sudoable=False, max_attempts=1, max_timeout=5, ssh_password=None)
Manages a SSH connection to the desired host. Will leverage your ssh config at ~/.ssh/config if available :param host: the server to connect to :type host: str :param forward_agent: forward the local agents :type forward_agent: bool :param sudoable: allow sudo commands :type sudoable: bool :param max_attempts: the maximum attempts to connect to the desired host :type max_attempts: int :param max_timeout: the maximum timeout in seconds to sleep between attempts :type max_timeout: int :param ssh_password: SSH password to use if needed :type ssh_password: str :returns a SSH connection to the desired host :rtype: Connection :raises MaxConnectionAttemptsError: Exceeded the maximum attempts to establish the SSH connection.
1.96627
1.963371
1.001477
lines = stdout.readlines() if lines: print("STDOUT from {host}:".format(host=host)) for line in lines: print(line.rstrip(), file=sys.stdout)
def report_stdout(host, stdout)
Take a stdout and print it's lines to output if lines are present. :param host: the host where the process is running :type host: str :param stdout: the std out of that process :type stdout: paramiko.channel.Channel
3.056203
3.422935
0.89286
lines = stderr.readlines() if lines: print("STDERR from {host}:".format(host=host)) for line in lines: print(line.rstrip(), file=sys.stderr)
def report_stderr(host, stderr)
Take a stderr and print it's lines to output if lines are present. :param host: the host where the process is running :type host: str :param stderr: the std error of that process :type stderr: paramiko.channel.Channel
2.985548
3.305669
0.90316
new_command = "sudo {0}".format(command) return self.exec_command(new_command, bufsize)
def sudo_command(self, command, bufsize=-1)
Sudo a command on the SSH server. Delegates to :func`~ssh.Connection.exec_command` :param command: the command to execute :type command: str :param bufsize: interpreted the same way as by the built-in C{file()} function in python :type bufsize: int :returns the stdin, stdout, and stderr of the executing command :rtype: tuple(L{ChannelFile}, L{ChannelFile}, L{ChannelFile}) :raises SSHException: if the server fails to execute the command
3.409538
5.380651
0.633666
channel = self.transport.open_session() if self.forward_agent: AgentRequestHandler(channel) if self.sudoable: channel.get_pty() channel.exec_command(command) if check_status and channel.recv_exit_status() != 0: raise RuntimeError("Command execution error: {}".format(command)) stdin = channel.makefile('wb', bufsize) stdout = channel.makefile('rb', bufsize) stderr = channel.makefile_stderr('rb', bufsize) return (stdin, stdout, stderr)
def exec_command(self, command, bufsize=-1, check_status=True)
Execute a command on the SSH server while preserving underling agent forwarding and sudo privileges. https://github.com/paramiko/paramiko/blob/1.8/paramiko/client.py#L348 :param command: the command to execute :type command: str :param bufsize: interpreted the same way as by the built-in C{file()} function in python :type bufsize: int :param check_staus: if enabled, waits for the command to complete and return an exception if the status is non-zero. :type check_staus: bool :returns the stdin, stdout, and stderr of the executing command :rtype: tuple(L{ChannelFile}, L{ChannelFile}, L{ChannelFile}) :raises SSHException: if the server fails to execute the command
2.451626
2.419378
1.013329
# Build consumer-offset data in desired format current_consumer_offsets = defaultdict(dict) for topic, topic_offsets in six.iteritems(consumer_offsets_metadata): for partition_offset in topic_offsets: current_consumer_offsets[topic][partition_offset.partition] = \ partition_offset.current consumer_offsets_data = {'groupid': groupid, 'offsets': current_consumer_offsets} cls.write_offsets_to_file(json_file, consumer_offsets_data)
def save_offsets( cls, consumer_offsets_metadata, topics_dict, json_file, groupid, )
Built offsets for given topic-partitions in required format from current offsets metadata and write to given json-file. :param consumer_offsets_metadata: Fetched consumer offsets from kafka. :param topics_dict: Dictionary of topic-partitions. :param json_file: Filename to store consumer-offsets. :param groupid: Current consumer-group.
2.999738
2.967907
1.010725
# Save consumer-offsets to file with open(json_file_name, "w") as json_file: try: json.dump(consumer_offsets_data, json_file) except ValueError: print("Error: Invalid json data {data}".format(data=consumer_offsets_data)) raise print("Consumer offset data saved in json-file {file}".format(file=json_file_name))
def write_offsets_to_file(cls, json_file_name, consumer_offsets_data)
Save built consumer-offsets data to given json file.
2.821776
2.656767
1.062109
groups = set() for b_id in broker_ids: try: broker = self.cluster_topology.brokers[b_id] except KeyError: self.log.error("Invalid broker id %s.", b_id) # Raise an error for now. As alternative we may ignore the # invalid id and continue with the others. raise InvalidBrokerIdError( "Broker id {} does not exist in cluster".format(b_id), ) broker.mark_decommissioned() groups.add(broker.replication_group) for group in groups: self._decommission_brokers_in_group(group)
def decommission_brokers(self, broker_ids)
Decommission a list of brokers trying to keep the replication group the brokers belong to balanced. :param broker_ids: list of string representing valid broker ids in the cluster :raises: InvalidBrokerIdError when the id is invalid.
4.209375
3.852952
1.092507
try: group.rebalance_brokers() except EmptyReplicationGroupError: self.log.warning("No active brokers left in replication group %s", group) for broker in group.brokers: if broker.decommissioned and not broker.empty(): # In this case we need to reassign the remaining partitions # to other replication groups self.log.info( "Broker %s can't be decommissioned within the same " "replication group %s. Moving partitions to other " "replication groups.", broker, broker.replication_group, ) self._force_broker_decommission(broker) # Broker should be empty now if not broker.empty(): # Decommission may be impossible if there are not enough # brokers to redistributed the replicas. self.log.error( "Could not decommission broker %s. " "Partitions %s cannot be reassigned.", broker, broker.partitions, ) raise BrokerDecommissionError("Broker decommission failed.")
def _decommission_brokers_in_group(self, group)
Decommission the marked brokers of a group.
4.488208
4.450014
1.008583
# Balance replicas over replication-groups for each partition if any(b.inactive for b in six.itervalues(self.cluster_topology.brokers)): self.log.error( "Impossible to rebalance replication groups because of inactive " "brokers." ) raise RebalanceError( "Impossible to rebalance replication groups because of inactive " "brokers" ) # Balance replica-count over replication-groups self.rebalance_replicas() # Balance partition-count over replication-groups self._rebalance_groups_partition_cnt()
def rebalance_replication_groups(self)
Rebalance partitions over replication groups. First step involves rebalancing replica-count for each partition across replication-groups. Second step involves rebalancing partition-count across replication-groups of the cluster.
5.432662
4.419684
1.229197
for rg in six.itervalues(self.cluster_topology.rgs): rg.rebalance_brokers()
def rebalance_brokers(self)
Rebalance partition-count across brokers within each replication-group.
8.712047
6.719898
1.296455
for b_id in broker_ids: try: broker = self.cluster_topology.brokers[b_id] except KeyError: self.log.error("Invalid broker id %s.", b_id) raise InvalidBrokerIdError( "Broker id {} does not exist in cluster".format(b_id), ) broker.mark_revoked_leadership() assert(len(self.cluster_topology.brokers) - len(broker_ids) > 0), "Not " \ "all brokers can be revoked for leadership" opt_leader_cnt = len(self.cluster_topology.partitions) // ( len(self.cluster_topology.brokers) - len(broker_ids) ) # Balanced brokers transfer leadership to their under-balanced followers self.rebalancing_non_followers(opt_leader_cnt) # If the broker-ids to be revoked from leadership are still leaders for any # partitions, try to forcefully move their leadership to followers if possible pending_brokers = [ b for b in six.itervalues(self.cluster_topology.brokers) if b.revoked_leadership and b.count_preferred_replica() > 0 ] for b in pending_brokers: self._force_revoke_leadership(b)
def revoke_leadership(self, broker_ids)
Revoke leadership for given brokers. :param broker_ids: List of broker-ids whose leadership needs to be revoked.
4.349168
4.397992
0.988899
owned_partitions = list(filter( lambda p: broker is p.leader, broker.partitions, )) for partition in owned_partitions: if len(partition.replicas) == 1: self.log.error( "Cannot be revoked leadership for broker {b} for partition {p}. Replica count: 1" .format(p=partition, b=broker), ) continue eligible_followers = [ follower for follower in partition.followers if not follower.revoked_leadership ] if eligible_followers: # Pick follower with least leader-count best_fit_follower = min( eligible_followers, key=lambda follower: follower.count_preferred_replica(), ) partition.swap_leader(best_fit_follower) else: self.log.error( "All replicas for partition {p} on broker {b} are to be revoked for leadership.".format( p=partition, b=broker, ) )
def _force_revoke_leadership(self, broker)
Revoke the leadership of given broker for any remaining partitions. Algorithm: 1. Find the partitions (owned_partitions) with given broker as leader. 2. For each partition find the eligible followers. Brokers which are not to be revoked from leadership are eligible followers. 3. Select the follower who is leader for minimum partitions. 4. Assign the selected follower as leader. 5. Notify for any pending owned_partitions whose leader cannot be changed. This could be due to replica size 1 or eligible followers are None.
3.599738
3.17266
1.134612
opt_leader_cnt = len(self.cluster_topology.partitions) // len(self.cluster_topology.brokers) # Balanced brokers transfer leadership to their under-balanced followers self.rebalancing_non_followers(opt_leader_cnt)
def rebalance_leaders(self)
Re-order brokers in replicas such that, every broker is assigned as preferred leader evenly.
13.584264
11.031417
1.231416
# Don't include leaders if they are marked for leadership removal under_brokers = list(filter( lambda b: b.count_preferred_replica() < opt_cnt and not b.revoked_leadership, six.itervalues(self.cluster_topology.brokers), )) if under_brokers: skip_brokers, skip_partitions = [], [] for broker in under_brokers: skip_brokers.append(broker) broker.request_leadership(opt_cnt, skip_brokers, skip_partitions) over_brokers = list(filter( lambda b: b.count_preferred_replica() > opt_cnt + 1, six.itervalues(self.cluster_topology.brokers), )) # Any over-balanced brokers tries to donate their leadership to followers if over_brokers: skip_brokers, used_edges = [], [] for broker in over_brokers: skip_brokers.append(broker) broker.donate_leadership(opt_cnt, skip_brokers, used_edges)
def rebalancing_non_followers(self, opt_cnt)
Transfer leadership to any under-balanced followers on the pretext that they remain leader-balanced or can be recursively balanced through non-followers (followers of other leaders). Context: Consider a graph G: Nodes: Brokers (e.g. b1, b2, b3) Edges: From b1 to b2 s.t. b1 is a leader and b2 is its follower State of nodes: 1. Over-balanced/Optimally-balanced: (OB) if leadership-count(broker) >= opt-count 2. Under-balanced (UB) if leadership-count(broker) < opt-count leader-balanced: leadership-count(broker) is in [opt-count, opt-count+1] Algorithm: 1. Use Depth-first-search algorithm to find path between between some UB-broker to some OB-broker. 2. If path found, update UB-broker and delete path-edges (skip-partitions). 3. Continue with step-1 until all possible paths explored.
3.771863
3.501202
1.077305
# Segregate replication-groups based on partition-count total_elements = sum(len(rg.partitions) for rg in six.itervalues(self.cluster_topology.rgs)) over_loaded_rgs, under_loaded_rgs = separate_groups( list(self.cluster_topology.rgs.values()), lambda rg: len(rg.partitions), total_elements, ) if over_loaded_rgs and under_loaded_rgs: self.cluster_topology.log.info( 'Over-loaded replication-groups {over_loaded}, under-loaded ' 'replication-groups {under_loaded} based on partition-count' .format( over_loaded=[rg.id for rg in over_loaded_rgs], under_loaded=[rg.id for rg in under_loaded_rgs], ) ) else: self.cluster_topology.log.info('Replication-groups are balanced based on partition-count.') return # Get optimal partition-count per replication-group opt_partition_cnt, _ = compute_optimum( len(self.cluster_topology.rgs), total_elements, ) # Balance replication-groups for over_loaded_rg in over_loaded_rgs: for under_loaded_rg in under_loaded_rgs: # Filter unique partition with replica-count > opt-replica-count # in over-loaded-rgs and <= opt-replica-count in under-loaded-rgs eligible_partitions = set(filter( lambda partition: over_loaded_rg.count_replica(partition) > len(partition.replicas) // len(self.cluster_topology.rgs) and under_loaded_rg.count_replica(partition) <= len(partition.replicas) // len(self.cluster_topology.rgs), over_loaded_rg.partitions, )) # Move all possible partitions for eligible_partition in eligible_partitions: # The difference of partition-count b/w the over-loaded and under-loaded # replication-groups should be greater than 1 for convergence if len(over_loaded_rg.partitions) - len(under_loaded_rg.partitions) > 1: over_loaded_rg.move_partition_replica( under_loaded_rg, eligible_partition, ) else: break # Move to next replication-group if either of the groups got # balanced, otherwise try with next eligible partition if (len(under_loaded_rg.partitions) == opt_partition_cnt or len(over_loaded_rg.partitions) == opt_partition_cnt): break if len(over_loaded_rg.partitions) == opt_partition_cnt: # Move to next over-loaded replication-group if balanced break
def _rebalance_groups_partition_cnt(self)
Re-balance partition-count across replication-groups. Algorithm: The key constraint is not to create any replica-count imbalance while moving partitions across replication-groups. 1) Divide replication-groups into over and under loaded groups in terms of partition-count. 2) For each over-loaded replication-group, select eligible partitions which can be moved to under-replicated groups. Partitions with greater than optimum replica-count for the group have the ability to donate one of their replicas without creating replica-count imbalance. 3) Destination replication-group is selected based on minimum partition-count and ability to accept one of the eligible partition-replicas. 4) Source and destination brokers are selected based on :- * their ability to donate and accept extra partition-replica respectively. * maximum and minimum partition-counts respectively. 5) Move partition-replica from source to destination-broker. 6) Repeat steps 1) to 5) until groups are balanced or cannot be balanced further.
2.828613
2.581626
1.095671
try: partition = self.cluster_topology.partitions[partition_name] except KeyError: raise InvalidPartitionError( "Partition name {name} not found".format(name=partition_name), ) if partition.replication_factor + count > len(self.cluster_topology.brokers): raise InvalidReplicationFactorError( "Cannot increase replication factor to {0}. There are only " "{1} brokers." .format( partition.replication_factor + count, len(self.cluster_topology.brokers), ) ) non_full_rgs = [ rg for rg in self.cluster_topology.rgs.values() if rg.count_replica(partition) < len(rg.brokers) ] for _ in range(count): total_replicas = sum( rg.count_replica(partition) for rg in non_full_rgs ) opt_replicas, _ = compute_optimum( len(non_full_rgs), total_replicas, ) under_replicated_rgs = [ rg for rg in non_full_rgs if rg.count_replica(partition) < opt_replicas ] candidate_rgs = under_replicated_rgs or non_full_rgs rg = min(candidate_rgs, key=lambda rg: len(rg.partitions)) rg.add_replica(partition) if rg.count_replica(partition) >= len(rg.brokers): non_full_rgs.remove(rg)
def add_replica(self, partition_name, count=1)
Increase the replication-factor for a partition. The replication-group to add to is determined as follows: 1. Find all replication-groups that have brokers not already replicating the partition. 2. Of these, find replication-groups that have fewer than the average number of replicas for this partition. 3. Choose the replication-group with the fewest overall partitions. :param partition_name: (topic_id, partition_id) of the partition to add replicas of. :param count: The number of replicas to add. :raises InvalidReplicationFactorError when the resulting replication factor is greater than the number of brokers in the cluster.
2.447947
2.27033
1.078234
try: partition = self.cluster_topology.partitions[partition_name] except KeyError: raise InvalidPartitionError( "Partition name {name} not found".format(name=partition_name), ) if partition.replication_factor <= count: raise InvalidReplicationFactorError( "Cannot remove {0} replicas. Replication factor is only {1}." .format(count, partition.replication_factor) ) osr = [] for broker_id in osr_broker_ids: try: osr.append(self.cluster_topology.brokers[broker_id]) except KeyError: raise InvalidBrokerIdError( "No broker found with id {bid}".format(bid=broker_id), ) non_empty_rgs = [ rg for rg in self.cluster_topology.rgs.values() if rg.count_replica(partition) > 0 ] rgs_with_osr = [ rg for rg in non_empty_rgs if any(b in osr for b in rg.brokers) ] for _ in range(count): candidate_rgs = rgs_with_osr or non_empty_rgs total_replicas = sum( rg.count_replica(partition) for rg in candidate_rgs ) opt_replica_cnt, _ = compute_optimum( len(candidate_rgs), total_replicas, ) over_replicated_rgs = [ rg for rg in candidate_rgs if rg.count_replica(partition) > opt_replica_cnt ] candidate_rgs = over_replicated_rgs or candidate_rgs rg = max(candidate_rgs, key=lambda rg: len(rg.partitions)) osr_in_rg = [b for b in rg.brokers if b in osr] rg.remove_replica(partition, osr_in_rg) osr = [b for b in osr if b in partition.replicas] if rg in rgs_with_osr and len(osr_in_rg) == 1: rgs_with_osr.remove(rg) if rg.count_replica(partition) == 0: non_empty_rgs.remove(rg) new_leader = min( partition.replicas, key=lambda broker: broker.count_preferred_replica(), ) partition.swap_leader(new_leader)
def remove_replica(self, partition_name, osr_broker_ids, count=1)
Remove one replica of a partition from the cluster. The replication-group to remove from is determined as follows: 1. Find all replication-groups that contain at least one out-of-sync replica for this partition. 2. Of these, find replication-groups with more than the average number of replicas of this partition. 3. Choose the replication-group with the most overall partitions. 4. Repeat steps 1-3 with in-sync replicas After this operation, the preferred leader for this partition will be set to the broker that leads the fewest other partitions, even if the current preferred leader is not removed. This is done to keep the number of preferred replicas balanced across brokers in the cluster. :param partition_name: (topic_id, partition_id) of the partition to remove replicas of. :param osr_broker_ids: A list of the partition's out-of-sync broker ids. :param count: The number of replicas to remove. :raises: InvalidReplicationFactorError when count is greater than the replication factor of the partition.
2.28536
2.183086
1.046848
# Is the new consumer already subscribed to any of these topics? common_topics = [topic for topic in topics_dest_group if topic in source_topics] if common_topics: print( "Error: Consumer Group ID: {groupid} is already " "subscribed to following topics: {topic}.\nPlease delete this " "topics from new group before re-running the " "command.".format( groupid=dest_groupid, topic=', '.join(common_topics), ), file=sys.stderr, ) sys.exit(1) # Let's confirm what the user intends to do. if topics_dest_group: in_str = ( "New Consumer Group: {dest_groupid} already " "exists.\nTopics subscribed to by the consumer groups are listed " "below:\n{source_groupid}: {source_group_topics}\n" "{dest_groupid}: {dest_group_topics}\nDo you intend to copy into" "existing consumer destination-group? (y/n)".format( source_groupid=source_groupid, source_group_topics=source_topics, dest_groupid=dest_groupid, dest_group_topics=topics_dest_group, ) ) prompt_user_input(in_str)
def preprocess_topics(source_groupid, source_topics, dest_groupid, topics_dest_group)
Pre-process the topics in source and destination group for duplicates.
3.674887
3.717153
0.98863
# Create new offsets for topic, partition_offsets in six.iteritems(offsets): for partition, offset in six.iteritems(partition_offsets): new_path = "/consumers/{groupid}/offsets/{topic}/{partition}".format( groupid=consumer_group, topic=topic, partition=partition, ) try: zk.create(new_path, value=offset, makepath=True) except NodeExistsError: print( "Error: Path {path} already exists. Please re-run the " "command.".format(path=new_path), file=sys.stderr, ) raise
def create_offsets(zk, consumer_group, offsets)
Create path with offset value for each topic-partition of given consumer group. :param zk: Zookeeper client :param consumer_group: Consumer group id for given offsets :type consumer_group: int :param offsets: Offsets of all topic-partitions :type offsets: dict(topic, dict(partition, offset))
2.487237
2.501552
0.994278
source_offsets = defaultdict(dict) for topic, partitions in six.iteritems(topics): for partition in partitions: offset, _ = zk.get( "/consumers/{groupid}/offsets/{topic}/{partition}".format( groupid=consumer_group, topic=topic, partition=partition, ) ) source_offsets[topic][partition] = offset return source_offsets
def fetch_offsets(zk, consumer_group, topics)
Fetch offsets for given topics of given consumer group. :param zk: Zookeeper client :param consumer_group: Consumer group id for given offsets :type consumer_group: int :rtype: dict(topic, dict(partition, offset))
2.400267
2.520161
0.952426
metadata = get_topic_partition_metadata(kafka_config.broker_list) if CONSUMER_OFFSET_TOPIC not in metadata: raise UnknownTopic("Consumer offset topic is missing.") return len(metadata[CONSUMER_OFFSET_TOPIC])
def get_offset_topic_partition_count(kafka_config)
Given a kafka cluster configuration, return the number of partitions in the offset topic. It will raise an UnknownTopic exception if the topic cannot be found.
4.386901
3.844715
1.141021
def java_string_hashcode(s): h = 0 for c in s: h = (31 * h + ord(c)) & 0xFFFFFFFF return ((h + 0x80000000) & 0xFFFFFFFF) - 0x80000000 return abs(java_string_hashcode(group)) % partition_count
def get_group_partition(group, partition_count)
Given a group name, return the partition number of the consumer offset topic containing the data associated to that group.
2.286686
2.203542
1.037732
tp_timestamps = {} for topic in topics: topic_partitions = consumer_partitions_for_topic(consumer, topic) for tp in topic_partitions: tp_timestamps[tp] = timestamp return consumer.offsets_for_times(tp_timestamps)
def topic_offsets_for_timestamp(consumer, timestamp, topics)
Given an initialized KafkaConsumer, timestamp, and list of topics, looks up the offsets for the given topics by timestamp. The returned offset for each partition is the earliest offset whose timestamp is greater than or equal to the given timestamp in the corresponding partition. Arguments: consumer (KafkaConsumer): an initialized kafka-python consumer timestamp (int): Unix epoch milliseconds. Unit should be milliseconds since beginning of the epoch (midnight Jan 1, 1970 (UTC)) topics (list): List of topics whose offsets are to be fetched. :returns: ``{TopicPartition: OffsetAndTimestamp}``: mapping from partition to the timestamp and offset of the first message with timestamp greater than or equal to the target timestamp. Returns ``{TopicPartition: None}`` for specific topic-partiitons if: 1. Timestamps are not supported in messages 2. No offsets in the partition after the given timestamp 3. No data in the topic-partition :raises: ValueError: If the target timestamp is negative UnsupportedVersionError: If the broker does not support looking up the offsets by timestamp. KafkaTimeoutError: If fetch failed in request_timeout_ms
3.041888
3.572919
0.851373
topic_partitions = [] partitions = consumer.partitions_for_topic(topic) if partitions is not None: for partition in partitions: topic_partitions.append(TopicPartition(topic, partition)) else: logging.error( "No partitions found for topic {}. Maybe it doesn't exist?".format(topic), ) return topic_partitions
def consumer_partitions_for_topic(consumer, topic)
Returns a list of all TopicPartitions for a given topic. Arguments: consumer: an initialized KafkaConsumer topic: a topic name to fetch TopicPartitions for :returns: list(TopicPartition): A list of TopicPartitions that belong to the given topic
2.365952
2.453436
0.964342
no_offsets = set() for tp, offset in six.iteritems(partition_to_offset): if offset is None: logging.error( "No offsets found for topic-partition {tp}. Either timestamps not supported" " for the topic {tp}, or no offsets found after timestamp specified, or there is no" " data in the topic-partition.".format(tp=tp), ) no_offsets.add(tp) if atomic and len(no_offsets) > 0: logging.error( "Commit aborted; offsets were not found for timestamps in" " topics {}".format(",".join([str(tp) for tp in no_offsets])), ) return offsets_metadata = { tp: OffsetAndMetadata(partition_to_offset[tp].offset, metadata=None) for tp in six.iterkeys(partition_to_offset) if tp not in no_offsets } if len(offsets_metadata) != 0: consumer.commit(offsets_metadata)
def consumer_commit_for_times(consumer, partition_to_offset, atomic=False)
Commits offsets to Kafka using the given KafkaConsumer and offsets, a mapping of TopicPartition to Unix Epoch milliseconds timestamps. Arguments: consumer (KafkaConsumer): an initialized kafka-python consumer. partitions_to_offset (dict TopicPartition: OffsetAndTimestamp): Map of TopicPartition to OffsetAndTimestamp. Return value of offsets_for_times. atomic (bool): Flag to specify whether the commit should fail if offsets are not found for some TopicPartition: timestamp pairs.
3.511414
3.577892
0.98142
if not kafka_topology_base_path: config_dirs = get_conf_dirs() else: config_dirs = [kafka_topology_base_path] topology = None for config_dir in config_dirs: try: topology = TopologyConfiguration( cluster_type, config_dir, ) except MissingConfigurationError: pass if not topology: raise MissingConfigurationError( "No available configuration for type {0}".format(cluster_type), ) if cluster_name: return topology.get_cluster_by_name(cluster_name) else: return topology.get_local_cluster()
def get_cluster_config( cluster_type, cluster_name=None, kafka_topology_base_path=None, )
Return the cluster configuration. Use the local cluster if cluster_name is not specified. :param cluster_type: the type of the cluster :type cluster_type: string :param cluster_name: the name of the cluster :type cluster_name: string :param kafka_topology_base_path: base path to look for <cluster_type>.yaml :type cluster_name: string :returns: the cluster :rtype: ClusterConfig
2.545051
2.493117
1.020831
if not kafka_topology_base_path: config_dirs = get_conf_dirs() else: config_dirs = [kafka_topology_base_path] types = set() for config_dir in config_dirs: new_types = [x for x in map( lambda x: os.path.basename(x)[:-5], glob.glob('{0}/*.yaml'.format(config_dir)), ) if x not in types] for cluster_type in new_types: try: topology = TopologyConfiguration( cluster_type, config_dir, ) except ConfigurationError: continue types.add(cluster_type) yield topology
def iter_configurations(kafka_topology_base_path=None)
Cluster topology iterator. Iterate over all the topologies available in config.
2.813891
2.769552
1.01601
config_path = os.path.join( self.kafka_topology_path, '{id}.yaml'.format(id=self.cluster_type), ) self.log.debug("Loading configuration from %s", config_path) if os.path.isfile(config_path): topology_config = load_yaml_config(config_path) else: raise MissingConfigurationError( "Topology configuration {0} for cluster {1} " "does not exist".format( config_path, self.cluster_type, ) ) self.log.debug("Topology configuration %s", topology_config) try: self.clusters = topology_config['clusters'] except KeyError: self.log.exception("Invalid topology file") raise InvalidConfigurationError("Invalid topology file {0}".format( config_path)) if 'local_config' in topology_config: self.local_config = topology_config['local_config']
def load_topology_config(self)
Load the topology configuration
2.557699
2.514648
1.01712
error_msg = 'Positive integer or -1 required, {string} given.'.format(string=string) try: value = int(string) except ValueError: raise argparse.ArgumentTypeError(error_msg) if value <= 0 and value != -1: raise argparse.ArgumentTypeError(error_msg) return value
def convert_to_broker_id(string)
Convert string to kafka broker_id.
3.003138
2.842932
1.056353
parser = argparse.ArgumentParser( description='Check kafka current status', ) parser.add_argument( "--cluster-type", "-t", dest='cluster_type', required=True, help='Type of cluster', default=None, ) parser.add_argument( "--cluster-name", "-c", dest='cluster_name', help='Name of the cluster', ) parser.add_argument( '--discovery-base-path', dest='discovery_base_path', type=str, help='Path of the directory containing the <cluster_type>.yaml config', ) parser.add_argument( "--broker-id", help='The broker id where the check is running. Set to -1 if you use automatic ' 'broker ids, and it will read the id from data-path instead. This parameter is ' 'required only in case controller-only or first-broker-only are used.', type=convert_to_broker_id, ) parser.add_argument( "--data-path", help='Path to the Kafka data folder.', ) parser.add_argument( '--controller-only', action="store_true", help='If this parameter is specified, it will do nothing and succeed on ' 'non-controller brokers. Default: %(default)s', ) parser.add_argument( '--first-broker-only', action='store_true', help='If specified, the command will only perform the check if ' 'broker_id is the lowest broker id in the cluster. If it is not the lowest, ' 'it will not perform any check and succeed immediately. ' 'Default: %(default)s', ) parser.add_argument( '-v', '--verbose', help='print verbose execution information. Default: %(default)s', action="store_true", default=False, ) parser.add_argument( '-j', '--json', help='Print output in json format. Default: %(default)s', action="store_true", default=False, ) subparsers = parser.add_subparsers() MinIsrCmd().add_subparser(subparsers) ReplicaUnavailabilityCmd().add_subparser(subparsers) ReplicationFactorCmd().add_subparser(subparsers) OfflineCmd().add_subparser(subparsers) return parser.parse_args()
def parse_args()
Parse the command line arguments.
3.145116
3.114693
1.009767
args = parse_args() logging.basicConfig(level=logging.WARN) # to prevent flooding for sensu-check. logging.getLogger('kafka').setLevel(logging.CRITICAL) if args.controller_only and args.first_broker_only: terminate( status_code.WARNING, prepare_terminate_message( "Only one of controller_only and first_broker_only should be used", ), args.json, ) if args.controller_only or args.first_broker_only: if args.broker_id is None: terminate( status_code.WARNING, prepare_terminate_message("broker_id is not specified"), args.json, ) elif args.broker_id == -1: try: args.broker_id = get_broker_id(args.data_path) except Exception as e: terminate( status_code.WARNING, prepare_terminate_message("{}".format(e)), args.json, ) try: cluster_config = config.get_cluster_config( args.cluster_type, args.cluster_name, args.discovery_base_path, ) code, msg = args.command(cluster_config, args) except ConfigurationError as e: terminate( status_code.CRITICAL, prepare_terminate_message("ConfigurationError {0}".format(e)), args.json, ) terminate(code, msg, args.json)
def run()
Verify command-line arguments and run commands
3.169836
3.134746
1.011194
parser = argparse.ArgumentParser( description='Manage and describe partition layout over brokers of' ' a cluster.', ) parser.add_argument( '--cluster-type', '-t', dest='cluster_type', help='Type of the cluster.', type=str, required=True, ) parser.add_argument( '--cluster-name', '-c', dest='cluster_name', help='Name of the cluster (Default to local cluster).', ) parser.add_argument( '--discovery-base-path', dest='discovery_base_path', type=str, help='Path of the directory containing the <cluster_type>.yaml config', ) parser.add_argument( '--logconf', type=str, help='Path to logging configuration file. Default: log to console.', ) parser.add_argument( '--apply', action='store_true', help='Proposed-plan will be executed on confirmation.', ) parser.add_argument( '--no-confirm', action='store_true', help='Proposed-plan will be executed without confirmation.' ' --apply flag also required.', ) parser.add_argument( '--write-to-file', dest='proposed_plan_file', metavar='<reassignment-plan-file-path>', type=str, help='Write the partition reassignment plan ' 'to a json file.', ) parser.add_argument( '--group-parser', type=str, help='Module containing an implementation of ReplicationGroupParser. ' 'The module should be specified as path_to_include_to_py_path:module. ' 'Ex: "/module/path:module.parser". ' 'If not specified the default replication group parser will create ' 'only one group for all brokers.', ) parser.add_argument( '--partition-measurer', type=str, help='Module containing an implementation of PartitionMeasurer. ' 'The module should be specified as path_to_include_to_py_path:module. ' 'Default: Assign each partition a weight and size of 1.' ) parser.add_argument( '--measurer-args', type=str, action='append', default=[], help='Argument list that is passed to the chosen PartitionMeasurer. ' 'Ex: --measurer-args "--n 10" will pass ["--n", "10"] to the ' 'PartitionMeasurer\'s parse_args method.' ) parser.add_argument( '--cluster-balancer', type=str, help='Module containing an implementation of ClusterBalancer. ' 'The module should be specified as path_to_include_to_py_path:module. ' 'Default: PartitionCountBalancer.', ) parser.add_argument( '--balancer-args', type=str, action='append', default=[], help='Argument list that is passed to the chosen ClusterBalancer. ' 'Ex: --balancer-args "--n 10" will pass ["--n", "10"] to the ' 'ClusterBalancer\'s parse_args method.' ) parser.add_argument( '--partition-count-balancer', action='store_const', const=PARTITION_COUNT_BALANCER_MODULE, dest='cluster_balancer', help='Use the number of partitions on each broker to balance the ' 'cluster.', ) parser.add_argument( '--genetic-balancer', action='store_const', const=GENETIC_BALANCER_MODULE, dest='cluster_balancer', help='Use partition metrics and a genetic algorithm to balance the ' 'cluster.', ) subparsers = parser.add_subparsers() RebalanceCmd().add_subparser(subparsers) DecommissionCmd().add_subparser(subparsers) RevokeLeadershipCmd().add_subparser(subparsers) StatsCmd().add_subparser(subparsers) StoreAssignmentsCmd().add_subparser(subparsers) ReplaceBrokerCmd().add_subparser(subparsers) SetReplicationFactorCmd().add_subparser(subparsers) return parser.parse_args()
def parse_args()
Parse the arguments.
2.802136
2.788443
1.004911
if not issubclass(exc_type, KeyboardInterrupt): # do not log Ctrl-C _log.critical( "Uncaught exception:", exc_info=(exc_type, exc_value, exc_traceback) ) sys.__excepthook__(exc_type, exc_value, exc_traceback)
def exception_logger(exc_type, exc_value, exc_traceback)
Log unhandled exceptions
2.288816
2.278615
1.004477
topics_with_wrong_rf = [] for topic_name, partitions in topics.items(): min_isr = get_min_isr(zk, topic_name) or default_min_isr replication_factor = len(partitions[0].replicas) if replication_factor >= min_isr + 1: continue topics_with_wrong_rf.append({ 'replication_factor': replication_factor, 'min_isr': min_isr, 'topic': topic_name, }) return topics_with_wrong_rf
def _find_topics_with_wrong_rp(topics, zk, default_min_isr)
Returns topics with wrong replication factor.
2.279562
2.111926
1.079376
out = {} topics_count = len(topics_with_wrong_rf) out['raw'] = { 'topics_with_wrong_replication_factor_count': topics_count, } if topics_count == 0: out['message'] = 'All topics have proper replication factor.' else: out['message'] = ( "{0} topic(s) have replication factor lower than specified min ISR + 1." ).format(topics_count) if verbose: lines = ( "replication_factor={replication_factor} is lower than min_isr={min_isr} + 1 for {topic}" .format( min_isr=topic['min_isr'], topic=topic['topic'], replication_factor=topic['replication_factor'], ) for topic in topics_with_wrong_rf ) out['verbose'] = "Topics:\n" + "\n".join(lines) if verbose: out['raw']['topics'] = topics_with_wrong_rf return out
def _prepare_output(topics_with_wrong_rf, verbose)
Returns dict with 'raw' and 'message' keys filled.
3.00228
2.816234
1.066062
topics = get_topic_partition_metadata(self.cluster_config.broker_list) topics_with_wrong_rf = _find_topics_with_wrong_rp( topics, self.zk, self.args.default_min_isr, ) errcode = status_code.OK if not topics_with_wrong_rf else status_code.CRITICAL out = _prepare_output(topics_with_wrong_rf, self.args.verbose) return errcode, out
def run_command(self)
Replication factor command, checks replication factor settings and compare it with min.isr in the cluster.
6.896889
6.16631
1.118479
return kafka.protocol.commit.OffsetCommitRequest[2]( consumer_group=group, consumer_group_generation_id=kafka.protocol.commit.OffsetCommitRequest[2].DEFAULT_GENERATION_ID, consumer_id='', retention_time=kafka.protocol.commit.OffsetCommitRequest[2].DEFAULT_RETENTION_TIME, topics=[( topic, [( partition, payload.offset, payload.metadata) for partition, payload in six.iteritems(topic_payloads)]) for topic, topic_payloads in six.iteritems(group_by_topic_and_partition(payloads))])
def encode_offset_commit_request_kafka(cls, group, payloads)
Encode an OffsetCommitRequest struct Arguments: group: string, the consumer group you are committing offsets for payloads: list of OffsetCommitRequestPayload
3.648733
3.459114
1.054817
return ConsumerMetadataResponse( response.error_code, response.coordinator_id, response.host, response.port, )
def decode_consumer_metadata_response(cls, response)
Decode GroupCoordinatorResponse. Note that ConsumerMetadataResponse is renamed to GroupCoordinatorResponse in 0.9+ Arguments: response: response to decode
5.219951
4.252683
1.227449
if self.args.num_gens < self.args.max_partition_movements: self.log.warning( "num-gens ({num_gens}) is less than max-partition-movements" " ({max_partition_movements}). max-partition-movements will" " never be reached.".format( num_gens=self.args.num_gens, max_partition_movements=self.args.max_partition_movements, ) ) if self.args.replication_groups: self.log.info("Rebalancing replicas across replication groups...") rg_movement_count, rg_movement_size = self.rebalance_replicas( max_movement_count=self.args.max_partition_movements, max_movement_size=self.args.max_movement_size, ) self.log.info( "Done rebalancing replicas. %d partitions moved.", rg_movement_count, ) else: rg_movement_size = 0 rg_movement_count = 0 # Use a fixed random seed to make results reproducible. random.seed(RANDOM_SEED) # NOTE: only active brokers are considered when rebalancing state = _State( self.cluster_topology, brokers=self.cluster_topology.active_brokers ) state.movement_size = rg_movement_size pop = {state} do_rebalance = self.args.brokers or self.args.leaders # Cannot rebalance when all partitions have zero weight because the # score function is undefined. if do_rebalance and not state.total_weight: self.log.error( "Rebalance impossible. All partitions have zero weight.", ) do_rebalance = False if do_rebalance: self.log.info("Rebalancing with genetic algorithm.") # Run the genetic algorithm for a fixed number of generations. for i in range(self.args.num_gens): start = time.time() pop_candidates = self._explore(pop) pop = self._prune(pop_candidates) end = time.time() self.log.debug( "Generation %d: keeping %d of %d assignment(s) in %f seconds", i, len(pop), len(pop_candidates), end - start, ) # Choose the state with the greatest score. state = sorted(pop, key=self._score, reverse=True)[0] self.log.info( "Done rebalancing. %d partitions moved.", state.movement_count, ) self.log.info("Total movement size: %f", state.movement_size) assignment = state.assignment # Since only active brokers are considered when rebalancing, inactive # brokers need to be added back to the new assignment. all_brokers = set(self.cluster_topology.brokers.values()) inactive_brokers = all_brokers - set(state.brokers) for partition_name, replicas in assignment: for broker in inactive_brokers: if broker in self.cluster_topology.partitions[partition_name].replicas: replicas.append(broker.id) self.cluster_topology.update_cluster_topology(assignment)
def rebalance(self)
The genetic rebalancing algorithm runs for a fixed number of generations. Each generation has two phases: exploration and pruning. In exploration, a large set of possible states are found by randomly applying assignment changes to the existing states. In pruning, each state is given a score based on the balance of the cluster and the states with the highest scores are chosen as the starting states for the next generation.
3.139132
2.975099
1.055135
decommission_brokers = [] for broker_id in broker_ids: try: broker = self.cluster_topology.brokers[broker_id] broker.mark_decommissioned() decommission_brokers.append(broker) except KeyError: raise InvalidBrokerIdError( "No broker found with id {broker_id}".format(broker_id=broker_id) ) partitions = defaultdict(int) # Remove all partitions from decommissioned brokers. for broker in decommission_brokers: broker_partitions = list(broker.partitions) for partition in broker_partitions: broker.remove_partition(partition) partitions[partition.name] += 1 active_brokers = self.cluster_topology.active_brokers # Create state from the initial cluster topology. self.state = _State(self.cluster_topology, brokers=active_brokers) # Add partition replicas to active brokers one-by-one. for partition_name in sorted(six.iterkeys(partitions)): # repeatability partition = self.cluster_topology.partitions[partition_name] replica_count = partitions[partition_name] try: self.add_replica(partition_name, replica_count) except InvalidReplicationFactorError: raise BrokerDecommissionError( "Not enough active brokers in the cluster. " "Partition {partition} has replication-factor {rf}, " "but only {brokers} active brokers remain." .format( partition=partition_name, rf=partition.replication_factor + replica_count, brokers=len(active_brokers) ) )
def decommission_brokers(self, broker_ids)
Decommissioning brokers is done by removing all partitions from the decommissioned brokers and adding them, one-by-one, back to the cluster. :param broker_ids: List of broker ids that should be decommissioned.
2.926543
2.823784
1.03639
try: partition = self.cluster_topology.partitions[partition_name] except KeyError: raise InvalidPartitionError( "Partition name {name} not found.".format(name=partition_name), ) active_brokers = self.cluster_topology.active_brokers if partition.replication_factor + count > len(active_brokers): raise InvalidReplicationFactorError( "Cannot increase replication factor from {rf} to {new_rf}." " There are only {brokers} active brokers." .format( rf=partition.replication_factor, new_rf=partition.replication_factor + count, brokers=len(active_brokers), ) ) partition_index = self.state.partition_indices[partition] for _ in range(count): # Find eligible replication-groups. non_full_rgs = [ rg for rg in six.itervalues(self.cluster_topology.rgs) if rg.count_replica(partition) < len(rg.active_brokers) ] # Since replicas can only be added to non-full rgs, only consider # replicas on those rgs when determining which rgs are # under-replicated. replica_count = sum( rg.count_replica(partition) for rg in non_full_rgs ) opt_replicas, _ = compute_optimum( len(non_full_rgs), replica_count, ) under_replicated_rgs = [ rg for rg in non_full_rgs if rg.count_replica(partition) < opt_replicas ] or non_full_rgs # Add the replica to every eligible broker, as follower and leader new_states = [] for rg in under_replicated_rgs: for broker in rg.active_brokers: if broker not in partition.replicas: broker_index = self.state.brokers.index(broker) new_state = self.state.add_replica( partition_index, broker_index, ) new_state_leader = new_state.move_leadership( partition_index, broker_index, ) new_states.extend([new_state, new_state_leader]) # Update cluster topology with highest scoring state. self.state = sorted(new_states, key=self._score, reverse=True)[0] self.cluster_topology.update_cluster_topology(self.state.pending_assignment) # Update the internal state to match. self.state.clear_pending_assignment()
def add_replica(self, partition_name, count=1)
Adding a replica is done by trying to add the replica to every broker in the cluster and choosing the resulting state with the highest fitness score. :param partition_name: (topic_id, partition_id) of the partition to add replicas of. :param count: The number of replicas to add.
3.092904
3.091048
1.0006
try: partition = self.cluster_topology.partitions[partition_name] except KeyError: raise InvalidPartitionError( "Partition name {name} not found.".format(name=partition_name), ) if partition.replication_factor - count < 1: raise InvalidReplicationFactorError( "Cannot decrease replication factor from {rf} to {new_rf}." "Replication factor must be at least 1." .format( rf=partition.replication_factor, new_rf=partition.replication_factor - count, ) ) osr = { broker for broker in partition.replicas if broker.id in osr_broker_ids } # Create state from current cluster topology. state = _State(self.cluster_topology) partition_index = state.partitions.index(partition) for _ in range(count): # Find eligible replication groups. non_empty_rgs = [ rg for rg in six.itervalues(self.cluster_topology.rgs) if rg.count_replica(partition) > 0 ] rgs_with_osr = [ rg for rg in non_empty_rgs if any(b in osr for b in rg.brokers) ] candidate_rgs = rgs_with_osr or non_empty_rgs # Since replicas will only be removed from the candidate rgs, only # count replicas on those rgs when determining which rgs are # over-replicated. replica_count = sum( rg.count_replica(partition) for rg in candidate_rgs ) opt_replicas, _ = compute_optimum( len(candidate_rgs), replica_count, ) over_replicated_rgs = [ rg for rg in candidate_rgs if rg.count_replica(partition) > opt_replicas ] or candidate_rgs candidate_rgs = over_replicated_rgs or candidate_rgs # Remove the replica from every eligible broker. new_states = [] for rg in candidate_rgs: osr_brokers = { broker for broker in rg.brokers if broker in osr } candidate_brokers = osr_brokers or rg.brokers for broker in candidate_brokers: if broker in partition.replicas: broker_index = state.brokers.index(broker) new_states.append( state.remove_replica(partition_index, broker_index) ) # Update cluster topology with highest scoring state. state = sorted(new_states, key=self._score, reverse=True)[0] self.cluster_topology.update_cluster_topology(state.assignment) osr = {b for b in osr if b in partition.replicas}
def remove_replica(self, partition_name, osr_broker_ids, count=1)
Removing a replica is done by trying to remove a replica from every broker and choosing the resulting state with the highest fitness score. Out-of-sync replicas will always be removed before in-sync replicas. :param partition_name: (topic_id, partition_id) of the partition to remove replicas of. :param osr_broker_ids: A list of the partition's out-of-sync broker ids. :param count: The number of replicas to remove.
2.754645
2.734431
1.007392
new_pop = set(pop) exploration_per_state = self.args.max_exploration // len(pop) mutations = [] if self.args.brokers: mutations.append(self._move_partition) if self.args.leaders: mutations.append(self._move_leadership) for state in pop: for _ in range(exploration_per_state): new_state = random.choice(mutations)(state) if new_state: new_pop.add(new_state) return new_pop
def _explore(self, pop)
Exploration phase: Find a set of candidate states based on the current population. :param pop: The starting population for this generation.
3.796149
3.767831
1.007516
partition = random.randint(0, len(self.cluster_topology.partitions) - 1) # Choose distinct source and destination brokers. source = random.choice(state.replicas[partition]) dest = random.randint(0, len(self.cluster_topology.brokers) - 1) if dest in state.replicas[partition]: return None source_rg = state.broker_rg[source] dest_rg = state.broker_rg[dest] # Ensure replicas remain balanced across replication groups. if source_rg != dest_rg: source_rg_replicas = state.rg_replicas[source_rg][partition] dest_rg_replicas = state.rg_replicas[dest_rg][partition] if source_rg_replicas <= dest_rg_replicas: return None # Ensure movement size capacity is not surpassed partition_size = state.partition_sizes[partition] if (self.args.max_movement_size is not None and state.movement_size + partition_size > self.args.max_movement_size): return None return state.move(partition, source, dest)
def _move_partition(self, state)
Attempt to move a random partition to a random broker. If the chosen movement is not possible, None is returned. :param state: The starting state. :return: The resulting State object if a movement is found. None if no movement is found.
3.152644
3.074427
1.025441
partition = random.randint(0, len(self.cluster_topology.partitions) - 1) # Moving zero weight partitions will not improve balance for any of the # balance criteria. Disallow these movements here to avoid wasted # effort. if state.partition_weights[partition] == 0: return None if len(state.replicas[partition]) <= 1: return None dest_index = random.randint(1, len(state.replicas[partition]) - 1) dest = state.replicas[partition][dest_index] if (self.args.max_leader_changes is not None and state.leader_movement_count >= self.args.max_leader_changes): return None return state.move_leadership(partition, dest)
def _move_leadership(self, state)
Attempt to move a random partition to a random broker. If the chosen movement is not possible, None is returned. :param state: The starting state. :return: The resulting State object if a leader change is found. None if no change is found.
4.547363
4.381415
1.037876
return set( sorted(pop_candidates, key=self._score, reverse=True) [:self.args.max_pop] )
def _prune(self, pop_candidates)
Choose a subset of the candidate states to continue on to the next generation. :param pop_candidates: The set of candidate states.
6.343013
8.220838
0.771577
score = 0 max_score = 0 if state.total_weight: # Coefficient of variance is a value between 0 and the sqrt(n) # where n is the length of the series (the number of brokers) # so those parameters are scaled by (1 / sqrt(# or brokers)) to # get a value between 0 and 1. # # Since smaller imbalance values are preferred use 1 - x so that # higher scores correspond to more balanced states. score += self.args.partition_weight_cv_score_weight * \ (1 - state.broker_weight_cv / sqrt(len(state.brokers))) score += self.args.leader_weight_cv_score_weight * \ (1 - state.broker_leader_weight_cv / sqrt(len(state.brokers))) score += self.args.topic_broker_imbalance_score_weight * \ (1 - state.weighted_topic_broker_imbalance) score += self.args.broker_partition_count_score_weight * \ (1 - state.broker_partition_count_cv / sqrt(len(state.brokers))) score += self.args.broker_leader_count_score_weight * \ (1 - state.broker_leader_count_cv / sqrt(len(state.brokers))) max_score += self.args.partition_weight_cv_score_weight max_score += self.args.leader_weight_cv_score_weight max_score += self.args.topic_broker_imbalance_score_weight max_score += self.args.broker_partition_count_score_weight max_score += self.args.broker_leader_count_score_weight if self.args.max_movement_size is not None and score_movement: # Avoid potential divide-by-zero error max_movement = max(self.args.max_movement_size, 1) score += self.args.movement_size_score_weight * \ (1 - state.movement_size / max_movement) max_score += self.args.movement_size_score_weight if self.args.max_leader_changes is not None and score_movement: # Avoid potential divide-by-zero error max_leader = max(self.args.max_leader_changes, 1) score += self.args.leader_change_score_weight * \ (1 - state.leader_movement_count / max_leader) max_score += self.args.leader_change_score_weight return score / max_score
def _score(self, state, score_movement=True)
Score a state based on how balanced it is. A higher score represents a more balanced state. :param state: The state to score.
2.481297
2.494949
0.994528
new_state = copy(self) # Update the partition replica tuple source_index = self.replicas[partition].index(source) new_state.replicas = tuple_alter( self.replicas, (partition, lambda replicas: tuple_replace( replicas, (source_index, dest), )), ) new_state.pending_partitions = self.pending_partitions + (partition, ) # Update the broker weights partition_weight = self.partition_weights[partition] new_state.broker_weights = tuple_alter( self.broker_weights, (source, lambda broker_weight: broker_weight - partition_weight), (dest, lambda broker_weight: broker_weight + partition_weight), ) # Update the broker partition count new_state.broker_partition_counts = tuple_alter( self.broker_partition_counts, (source, lambda partition_count: partition_count - 1), (dest, lambda partition_count: partition_count + 1), ) # Update the broker leader weights if source_index == 0: new_state.broker_leader_weights = tuple_alter( self.broker_leader_weights, (source, lambda lw: lw - partition_weight), (dest, lambda lw: lw + partition_weight), ) new_state.broker_leader_counts = tuple_alter( self.broker_leader_counts, (source, lambda leader_count: leader_count - 1), (dest, lambda leader_count: leader_count + 1), ) new_state.leader_movement_count += 1 # Update the topic broker counts topic = self.partition_topic[partition] new_state.topic_broker_count = tuple_alter( self.topic_broker_count, (topic, lambda broker_count: tuple_alter( broker_count, (source, lambda count: count - 1), (dest, lambda count: count + 1), )), ) # Update the topic broker imbalance new_state.topic_broker_imbalance = tuple_replace( self.topic_broker_imbalance, (topic, new_state._calculate_topic_imbalance(topic)), ) new_state._weighted_topic_broker_imbalance = ( self._weighted_topic_broker_imbalance + self.topic_weights[topic] * ( new_state.topic_broker_imbalance[topic] - self.topic_broker_imbalance[topic] ) ) # Update the replication group replica counts source_rg = self.broker_rg[source] dest_rg = self.broker_rg[dest] if source_rg != dest_rg: new_state.rg_replicas = tuple_alter( self.rg_replicas, (source_rg, lambda replica_counts: tuple_alter( replica_counts, (partition, lambda replica_count: replica_count - 1), )), (dest_rg, lambda replica_counts: tuple_alter( replica_counts, (partition, lambda replica_count: replica_count + 1), )), ) # Update the movement sizes new_state.movement_size += self.partition_sizes[partition] new_state.movement_count += 1 return new_state
def move(self, partition, source, dest)
Return a new state that is the result of moving a single partition. :param partition: The partition index of the partition to move. :param source: The broker index of the broker to move the partition from. :param dest: The broker index of the broker to move the partition to.
2.056271
2.004094
1.026035
new_state = copy(self) # Update the partition replica tuple source = new_state.replicas[partition][0] new_leader_index = self.replicas[partition].index(new_leader) new_state.replicas = tuple_alter( self.replicas, (partition, lambda replicas: tuple_replace( replicas, (0, replicas[new_leader_index]), (new_leader_index, replicas[0]), )), ) new_state.pending_partitions = self.pending_partitions + (partition, ) # Update the leader count new_state.broker_leader_counts = tuple_alter( self.broker_leader_counts, (source, lambda leader_count: leader_count - 1), (new_leader, lambda leader_count: leader_count + 1), ) # Update the broker leader weights partition_weight = self.partition_weights[partition] new_state.broker_leader_weights = tuple_alter( self.broker_leader_weights, (source, lambda leader_weight: leader_weight - partition_weight), (new_leader, lambda leader_weight: leader_weight + partition_weight), ) # Update the total leader movement size new_state.leader_movement_count += 1 return new_state
def move_leadership(self, partition, new_leader)
Return a new state that is the result of changing the leadership of a single partition. :param partition: The partition index of the partition to change the leadership of. :param new_leader: The broker index of the new leader replica.
2.890439
2.902985
0.995678
return { partition.name: [ self.brokers[bid].id for bid in self.replicas[pid] ] for pid, partition in enumerate(self.partitions) }
def assignment(self)
Return the partition assignment that this state represents.
7.590373
5.745683
1.321057
return { self.partitions[pid].name: [ self.brokers[bid].id for bid in self.replicas[pid] ] for pid in set(self.pending_partitions) }
def pending_assignment(self)
Return the pending partition assignment that this state represents.
7.27937
5.36028
1.358021
parser = argparse.ArgumentParser( description='Show available clusters.' ) parser.add_argument( '-v', '--version', action='version', version="%(prog)s {0}".format(__version__), ) parser.add_argument( '--discovery-base-path', dest='discovery_base_path', type=str, help='Path of the directory containing the <cluster_type>.yaml config.' ' Default try: ' '$KAFKA_DISCOVERY_DIR, $HOME/.kafka_discovery, /etc/kafka_discovery', ) return parser.parse_args()
def parse_args()
Parse the arguments.
3.760906
3.598149
1.045233
partitions_count = len(partitions) out = {} out['raw'] = { 'replica_unavailability_count': partitions_count, } if partitions_count == 0: out['message'] = 'All replicas available for communication.' else: out['message'] = "{replica_unavailability} replicas unavailable for communication. " \ "Unavailable Brokers: {unavailable_brokers}".format( replica_unavailability=partitions_count, unavailable_brokers=', '.join([str(e) for e in unavailable_brokers]), ) if verbose: lines = ( '{}:{}'.format(topic, partition) for (topic, partition) in partitions ) out['verbose'] = "Partitions:\n" + "\n".join(lines) if verbose: out['raw']['partitions'] = [ {'topic': topic, 'partition': partition} for (topic, partition) in partitions ] return out
def _prepare_output(partitions, unavailable_brokers, verbose)
Returns dict with 'raw' and 'message' keys filled.
3.115446
2.894923
1.076176
fetch_unavailable_brokers = True result = get_topic_partition_with_error( self.cluster_config, REPLICA_NOT_AVAILABLE_ERROR, fetch_unavailable_brokers=fetch_unavailable_brokers, ) if fetch_unavailable_brokers: replica_unavailability, unavailable_brokers = result else: replica_unavailability = result errcode = status_code.OK if not replica_unavailability else status_code.CRITICAL out = _prepare_output(replica_unavailability, unavailable_brokers, self.args.verbose) return errcode, out
def run_command(self)
replica_unavailability command, checks number of replicas not available for communication over all brokers in the Kafka cluster.
5.079827
4.109071
1.236247
ISR_CONF_NAME = 'min.insync.replicas' try: config = zk.get_topic_config(topic) except NoNodeError: return None if ISR_CONF_NAME in config['config']: return int(config['config'][ISR_CONF_NAME]) else: return None
def get_min_isr(zk, topic)
Return the min-isr for topic, or None if not specified
3.077882
2.674392
1.150871
not_in_sync_partitions = [] for topic_name, partitions in topics.items(): min_isr = get_min_isr(zk, topic_name) or default_min_isr if min_isr is None: continue for metadata in partitions.values(): cur_isr = len(metadata.isr) if cur_isr < min_isr: not_in_sync_partitions.append({ 'isr': cur_isr, 'min_isr': min_isr, 'topic': metadata.topic, 'partition': metadata.partition, }) return not_in_sync_partitions
def _process_metadata_response(topics, zk, default_min_isr)
Returns not in sync partitions.
2.391678
2.02904
1.178724
out = {} partitions_count = len(partitions) out['raw'] = { 'not_enough_replicas_count': partitions_count, } if partitions_count == 0: out['message'] = 'All replicas in sync.' else: out['message'] = ( "{0} partition(s) have the number of replicas in " "sync that is lower than the specified min ISR." ).format(partitions_count) if verbose: lines = ( "isr={isr} is lower than min_isr={min_isr} for {topic}:{partition}" .format( isr=p['isr'], min_isr=p['min_isr'], topic=p['topic'], partition=p['partition'], ) for p in partitions ) out['verbose'] = "Partitions:\n" + "\n".join(lines) if verbose: out['raw']['partitions'] = partitions return out
def _prepare_output(partitions, verbose)
Returns dict with 'raw' and 'message' keys filled.
3.775996
3.537349
1.067465
if partition in self._partitions: # Remove partition from set self._partitions.remove(partition) # Remove broker from replica list of partition partition.replicas.remove(self) else: raise ValueError( 'Partition: {topic_id}:{partition_id} not found in broker ' '{broker_id}'.format( topic_id=partition.topic.id, partition_id=partition.partition_id, broker_id=self._id, ) )
def remove_partition(self, partition)
Remove partition from partition list.
3.165676
3.032835
1.043801
assert(partition not in self._partitions) # Add partition to existing set self._partitions.add(partition) # Add broker to replica list partition.add_replica(self)
def add_partition(self, partition)
Add partition to partition list.
6.197332
5.574266
1.111775