code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
edgepaths = super(Graph, self).edgepaths edgepaths.crs = self.crs return edgepaths
def edgepaths(self)
Returns the fixed EdgePaths or computes direct connections between supplied nodes.
7.074056
5.901814
1.198624
edgepaths = super(TriMesh, self).edgepaths edgepaths.crs = self.crs return edgepaths
def edgepaths(self)
Returns the fixed EdgePaths or computes direct connections between supplied nodes.
7.514316
6.962255
1.079293
reader = Reader(shapefile) return cls.from_records(reader.records(), *args, **kwargs)
def from_shapefile(cls, shapefile, *args, **kwargs)
Loads a shapefile from disk and optionally merges it with a dataset. See ``from_records`` for full signature. Parameters ---------- records: list of cartopy.io.shapereader.Record Iterator containing Records. dataset: holoviews.Dataset Any HoloViews Dataset type. on: str or list or dict A mapping between the attribute names in the records and the dimensions in the dataset. value: str The value dimension in the dataset the values will be drawn from. index: str or list One or more dimensions in the dataset the Shapes will be indexed by. drop_missing: boolean Whether to drop shapes which are missing from the provides dataset. Returns ------- shapes: Polygons or Path object A Polygons or Path object containing the geometries
3.894839
10.101747
0.385561
if dataset is not None and not on: raise ValueError('To merge dataset with shapes mapping ' 'must define attribute(s) to merge on.') if util.pd and isinstance(dataset, util.pd.DataFrame): dataset = Dataset(dataset) if not isinstance(on, (dict, list)): on = [on] if on and not isinstance(on, dict): on = {o: o for o in on} if not isinstance(index, list): index = [index] kdims = [] for ind in index: if dataset and dataset.get_dimension(ind): dim = dataset.get_dimension(ind) else: dim = Dimension(ind) kdims.append(dim) ddims = [] if dataset: if value: vdims = [dataset.get_dimension(value)] else: vdims = dataset.vdims ddims = dataset.dimensions() if None in vdims: raise ValueError('Value dimension %s not found ' 'in dataset dimensions %s' % (value, ddims) ) else: vdims = [] data = [] for i, rec in enumerate(records): geom = {} if dataset: selection = {dim: rec.attributes.get(attr, None) for attr, dim in on.items()} row = dataset.select(**selection) if len(row): values = {k: v[0] for k, v in row.iloc[0].columns().items()} elif drop_missing: continue else: values = {vd.name: np.nan for vd in vdims} geom.update(values) if index: for kdim in kdims: if kdim in ddims and len(row): k = row[kdim.name][0] elif kdim.name in rec.attributes: k = rec.attributes[kdim.name] else: k = None geom[kdim.name] = k geom['geometry'] = rec.geometry data.append(geom) if element is not None: pass elif data and data[0]: if isinstance(data[0]['geometry'], poly_types): element = Polygons else: element = Path else: element = Polygons return element(data, vdims=kdims+vdims, **kwargs).opts(color=value)
def from_records(cls, records, dataset=None, on=None, value=None, index=[], drop_missing=False, element=None, **kwargs)
Load data from a collection of `cartopy.io.shapereader.Record` objects and optionally merge it with a dataset to assign values to each polygon and form a chloropleth. Supplying just records will return an NdOverlayof Shape Elements with a numeric index. If a dataset is supplied, a mapping between the attribute names in the records and the dimension names in the dataset must be supplied. The values assigned to each shape file can then be drawn from the dataset by supplying a ``value`` and keys the Shapes are indexed by specifying one or index dimensions. Parameters ---------- records: list of cartopy.io.shapereader.Record Iterator containing Records. dataset: holoviews.Dataset Any HoloViews Dataset type. on: str or list or dict A mapping between the attribute names in the records and the dimensions in the dataset. value: str The value dimension in the dataset the values will be drawn from. index: str or list One or more dimensions in the dataset the Shapes will be indexed by. drop_missing: boolean Whether to drop shapes which are missing from the provides dataset. Returns ------- shapes: Polygons or Path object A Polygons or Path object containing the geometries
3.119084
2.898668
1.07604
plot = plot or cb.plot if isinstance(plot, GeoOverlayPlot): plots = [get_cb_plot(cb, p) for p in plot.subplots.values()] plots = [p for p in plots if any(s in cb.streams and getattr(s, '_triggering', False) for s in p.streams)] if plots: plot = plots[0] return plot
def get_cb_plot(cb, plot=None)
Finds the subplot with the corresponding stream.
4.201634
3.676056
1.142973
if not all(a in msg for a in attributes): return True plot = get_cb_plot(cb) return (not getattr(plot, 'geographic', False) or not hasattr(plot.current_frame, 'crs'))
def skip(cb, msg, attributes)
Skips applying transforms if data is not geographic.
6.916313
5.864422
1.179368
if skip(cb, msg, attributes): return msg plot = get_cb_plot(cb) x0, x1 = msg.get('x_range', (0, 1000)) y0, y1 = msg.get('y_range', (0, 1000)) extents = x0, y0, x1, y1 x0, y0, x1, y1 = project_extents(extents, plot.projection, plot.current_frame.crs) coords = {'x_range': (x0, x1), 'y_range': (y0, y1)} return {k: v for k, v in coords.items() if k in attributes}
def project_ranges(cb, msg, attributes)
Projects ranges supplied by a callback.
2.981854
3.033801
0.982877
if skip(cb, msg, attributes): return msg plot = get_cb_plot(cb) x, y = msg.get('x', 0), msg.get('y', 0) crs = plot.current_frame.crs coordinates = crs.transform_points(plot.projection, np.array([x]), np.array([y])) msg['x'], msg['y'] = coordinates[0, :2] return {k: v for k, v in msg.items() if k in attributes}
def project_point(cb, msg, attributes=('x', 'y'))
Projects a single point supplied by a callback
3.237332
3.303154
0.980073
stream = cb.streams[0] old_data = stream.data stream.update(data=msg['data']) element = stream.element stream.update(data=old_data) proj = cb.plot.projection if not isinstance(element, _Element) or element.crs == proj: return None crs = element.crs element.crs = proj return project(element, projection=crs)
def project_drawn(cb, msg)
Projects a drawn element to the declared coordinate system
6.116119
5.991758
1.020755
deleted = [] for f in cls._files: try: os.remove(f) deleted.append(f) except FileNotFoundError: pass print('Deleted %d weight files' % len(deleted)) cls._files = []
def clean_weight_files(cls)
Cleans existing weight files.
2.940858
2.810874
1.046243
result = None if hasattr(el, 'crs'): result = (int(el._auxiliary_component), el.crs) return result
def _get_projection(el)
Get coordinate reference system from non-auxiliary elements. Return value is a tuple of a precedence integer and the projection, to allow non-auxiliary components to take precedence.
10.23632
5.38085
1.902361
proj = self.projection if self.global_extent and range_type in ('combined', 'data'): (x0, x1), (y0, y1) = proj.x_limits, proj.y_limits return (x0, y0, x1, y1) extents = super(ProjectionPlot, self).get_extents(element, ranges, range_type) if not getattr(element, 'crs', None) or not self.geographic: return extents elif any(e is None or not np.isfinite(e) for e in extents): extents = None else: extents = project_extents(extents, element.crs, proj) return (np.NaN,)*4 if not extents else extents
def get_extents(self, element, ranges, range_type='combined')
Subclasses the get_extents method using the GeoAxes set_extent method to project the extents to the Elements coordinate reference system.
3.044012
2.922024
1.041748
lons = lons.astype(np.float64) return ((lons - base + period * 2) % period) + base
def wrap_lons(lons, base, period)
Wrap longitude values into the range between base and base+period.
4.113277
3.933217
1.045779
x, y = coord_names geom = geom_dict['geometry'] new_dict = {k: v for k, v in geom_dict.items() if k != 'geometry'} array = geom_to_array(geom) new_dict[x] = array[:, 0] new_dict[y] = array[:, 1] if geom.geom_type == 'Polygon': holes = [] for interior in geom.interiors: holes.append(geom_to_array(interior)) if holes: new_dict['holes'] = [holes] elif geom.geom_type == 'MultiPolygon': outer_holes = [] for g in geom: holes = [] for interior in g.interiors: holes.append(geom_to_array(interior)) outer_holes.append(holes) if any(hs for hs in outer_holes): new_dict['holes'] = outer_holes return new_dict
def geom_dict_to_array_dict(geom_dict, coord_names=['Longitude', 'Latitude'])
Converts a dictionary containing an geometry key to a dictionary of x- and y-coordinate arrays and if present a list-of-lists of hole array.
1.925053
1.805533
1.066196
interface = polygons.interface.datatype if interface == 'geodataframe': return [row.to_dict() for _, row in polygons.data.iterrows()] elif interface == 'geom_dictionary': return polygons.data polys = [] xdim, ydim = polygons.kdims has_holes = polygons.has_holes holes = polygons.holes() if has_holes else None for i, polygon in enumerate(polygons.split(datatype='columns')): array = np.column_stack([polygon.pop(xdim.name), polygon.pop(ydim.name)]) splits = np.where(np.isnan(array[:, :2].astype('float')).sum(axis=1))[0] arrays = np.split(array, splits+1) if len(splits) else [array] invalid = False subpolys = [] subholes = None if has_holes: subholes = [[LinearRing(h) for h in hs] for hs in holes[i]] for j, arr in enumerate(arrays): if j != (len(arrays)-1): arr = arr[:-1] # Drop nan if len(arr) == 0: continue elif len(arr) == 1: if skip_invalid: continue poly = Point(arr[0]) invalid = True elif len(arr) == 2: if skip_invalid: continue poly = LineString(arr) invalid = True elif not len(splits): poly = Polygon(arr, (subholes[j] if has_holes else [])) else: poly = Polygon(arr) hs = [h for h in subholes[j]] if has_holes else [] poly = Polygon(poly.exterior, holes=hs) subpolys.append(poly) if invalid: polys += [dict(polygon, geometry=sp) for sp in subpolys] continue elif len(subpolys) == 1: geom = subpolys[0] elif subpolys: geom = MultiPolygon(subpolys) else: continue polygon['geometry'] = geom polys.append(polygon) return polys
def polygons_to_geom_dicts(polygons, skip_invalid=True)
Converts a Polygons element into a list of geometry dictionaries, preserving all value dimensions. For array conversion the following conventions are applied: * Any nan separated array are converted into a MultiPolygon * Any array without nans is converted to a Polygon * If there are holes associated with a nan separated array the holes are assigned to the polygons by testing for an intersection * If any single array does not have at least three coordinates it is skipped by default * If skip_invalid=False and an array has less than three coordinates it will be converted to a LineString
2.965796
2.970835
0.998304
interface = path.interface.datatype if interface == 'geodataframe': return [row.to_dict() for _, row in path.data.iterrows()] elif interface == 'geom_dictionary': return path.data geoms = [] invalid = False xdim, ydim = path.kdims for i, path in enumerate(path.split(datatype='columns')): array = np.column_stack([path.pop(xdim.name), path.pop(ydim.name)]) splits = np.where(np.isnan(array[:, :2].astype('float')).sum(axis=1))[0] arrays = np.split(array, splits+1) if len(splits) else [array] subpaths = [] for j, arr in enumerate(arrays): if j != (len(arrays)-1): arr = arr[:-1] # Drop nan if len(arr) == 0: continue elif len(arr) == 1: if skip_invalid: continue g = Point(arr[0]) invalid = True else: g = LineString(arr) subpaths.append(g) if invalid: geoms += [dict(path, geometry=sp) for sp in subpaths] continue elif len(subpaths) == 1: geom = subpaths[0] elif subpaths: geom = MultiLineString(subpaths) path['geometry'] = geom geoms.append(path) return geoms
def path_to_geom_dicts(path, skip_invalid=True)
Converts a Path element into a list of geometry dictionaries, preserving all value dimensions.
3.302156
3.268219
1.010384
if isinstance(geom, sgeom.Polygon) and not geom.exterior.is_ccw: geom = sgeom.polygon.orient(geom) return geom
def to_ccw(geom)
Reorients polygon to be wound counter-clockwise.
3.614331
3.499156
1.032915
if geom.geom_type == 'Point': return 1 if hasattr(geom, 'exterior'): geom = geom.exterior if not geom.geom_type.startswith('Multi') and hasattr(geom, 'array_interface_base'): return len(geom.array_interface_base['data'])//2 else: length = 0 for g in geom: length += geom_length(g) return length
def geom_length(geom)
Calculates the length of coordinates in a shapely geometry.
3.086004
3.054782
1.010221
if len(element.vdims) > 1: xs, ys = (element.dimension_values(i, False, False) for i in range(2)) zs = np.dstack([element.dimension_values(i, False, False) for i in range(2, 2+len(element.vdims))]) else: xs, ys, zs = (element.dimension_values(i, False, False) for i in range(3)) lon0, lon1 = element.range(0) if isinstance(element.crs, ccrs._CylindricalProjection) and (lon1 - lon0) == 360: xs = np.append(xs, xs[0:1] + 360, axis=0) zs = np.ma.concatenate([zs, zs[:, 0:1]], axis=1) return xs, ys, zs
def geo_mesh(element)
Get mesh data from a 2D Element ensuring that if the data is on a cylindrical coordinate system and wraps globally that data actually wraps around.
2.664939
2.613713
1.019599
import pyproj if isinstance(crs, pyproj.Proj): out = crs elif isinstance(crs, dict) or isinstance(crs, basestring): try: out = pyproj.Proj(crs) except RuntimeError: try: out = pyproj.Proj(init=crs) except RuntimeError: out = None else: out = None return out
def check_crs(crs)
Checks if the crs represents a valid grid, projection or ESPG string. (Code copied from https://github.com/fmaussion/salem) Examples -------- >>> p = check_crs('+units=m +init=epsg:26915') >>> p.srs '+units=m +init=epsg:26915 ' >>> p = check_crs('wrong') >>> p is None True Returns ------- A valid crs if possible, otherwise None
2.308523
2.54701
0.906366
import cartopy.crs as ccrs try: from osgeo import osr has_gdal = True except ImportError: has_gdal = False proj = check_crs(proj) if proj.is_latlong(): return ccrs.PlateCarree() srs = proj.srs if has_gdal: # this is more robust, as srs could be anything (espg, etc.) s1 = osr.SpatialReference() s1.ImportFromProj4(proj.srs) srs = s1.ExportToProj4() km_proj = {'lon_0': 'central_longitude', 'lat_0': 'central_latitude', 'x_0': 'false_easting', 'y_0': 'false_northing', 'k': 'scale_factor', 'zone': 'zone', } km_globe = {'a': 'semimajor_axis', 'b': 'semiminor_axis', } km_std = {'lat_1': 'lat_1', 'lat_2': 'lat_2', } kw_proj = dict() kw_globe = dict() kw_std = dict() for s in srs.split('+'): s = s.split('=') if len(s) != 2: continue k = s[0].strip() v = s[1].strip() try: v = float(v) except: pass if k == 'proj': if v == 'tmerc': cl = ccrs.TransverseMercator if v == 'lcc': cl = ccrs.LambertConformal if v == 'merc': cl = ccrs.Mercator if v == 'utm': cl = ccrs.UTM if k in km_proj: kw_proj[km_proj[k]] = v if k in km_globe: kw_globe[km_globe[k]] = v if k in km_std: kw_std[km_std[k]] = v globe = None if kw_globe: globe = ccrs.Globe(**kw_globe) if kw_std: kw_proj['standard_parallels'] = (kw_std['lat_1'], kw_std['lat_2']) # mercatoooor if cl.__name__ == 'Mercator': kw_proj.pop('false_easting', None) kw_proj.pop('false_northing', None) return cl(globe=globe, **kw_proj)
def proj_to_cartopy(proj)
Converts a pyproj.Proj to a cartopy.crs.Projection (Code copied from https://github.com/fmaussion/salem) Parameters ---------- proj: pyproj.Proj the projection to convert Returns ------- a cartopy.crs.Projection object
1.998262
1.989196
1.004558
try: import cartopy.crs as ccrs import geoviews as gv # noqa import pyproj except: raise ImportError('Geographic projection support requires GeoViews and cartopy.') if crs is None: return ccrs.PlateCarree() if isinstance(crs, basestring) and crs.lower().startswith('epsg'): try: crs = ccrs.epsg(crs[5:].lstrip().rstrip()) except: raise ValueError("Could not parse EPSG code as CRS, must be of the format 'EPSG: {code}.'") elif isinstance(crs, int): crs = ccrs.epsg(crs) elif isinstance(crs, (basestring, pyproj.Proj)): try: crs = proj_to_cartopy(crs) except: raise ValueError("Could not parse EPSG code as CRS, must be of the format 'proj4: {proj4 string}.'") elif not isinstance(crs, ccrs.CRS): raise ValueError("Projection must be defined as a EPSG code, proj4 string, cartopy CRS or pyproj.Proj.") return crs
def process_crs(crs)
Parses cartopy CRS definitions defined in one of a few formats: 1. EPSG codes: Defined as string of the form "EPSG: {code}" or an integer 2. proj.4 string: Defined as string of the form "{proj.4 string}" 3. cartopy.crs.CRS instance 4. None defaults to crs.PlateCaree
2.856904
2.610278
1.094483
try: import xarray as xr except: raise ImportError('Loading tiffs requires xarray to be installed') with warnings.catch_warnings(): warnings.filterwarnings('ignore') da = xr.open_rasterio(filename) return from_xarray(da, crs, apply_transform, nan_nodata, **kwargs)
def load_tiff(filename, crs=None, apply_transform=False, nan_nodata=False, **kwargs)
Returns an RGB or Image element loaded from a geotiff file. The data is loaded using xarray and rasterio. If a crs attribute is present on the loaded data it will attempt to decode it into a cartopy projection otherwise it will default to a non-geographic HoloViews element. Parameters ---------- filename: string Filename pointing to geotiff file to load crs: Cartopy CRS or EPSG string (optional) Overrides CRS inferred from the data apply_transform: boolean Whether to apply affine transform if defined on the data nan_nodata: boolean If data contains nodata values convert them to NaNs **kwargs: Keyword arguments passed to the HoloViews/GeoViews element Returns ------- element: Image/RGB/QuadMesh element
2.629839
2.938559
0.894942
if crs: kwargs['crs'] = crs elif hasattr(da, 'crs'): try: kwargs['crs'] = process_crs(da.crs) except: param.main.warning('Could not decode projection from crs string %r, ' 'defaulting to non-geographic element.' % da.crs) coords = list(da.coords) if coords not in (['band', 'y', 'x'], ['y', 'x']): from .element.geo import Dataset, HvDataset el = Dataset if 'crs' in kwargs else HvDataset return el(da, **kwargs) if len(coords) == 2: y, x = coords bands = 1 else: y, x = coords[1:] bands = len(da.coords[coords[0]]) if apply_transform: from affine import Affine transform = Affine.from_gdal(*da.attrs['transform'][:6]) nx, ny = da.sizes[x], da.sizes[y] xs, ys = np.meshgrid(np.arange(nx)+0.5, np.arange(ny)+0.5) * transform data = (xs, ys) else: xres, yres = da.attrs['res'] if 'res' in da.attrs else (1, 1) xs = da.coords[x][::-1] if xres < 0 else da.coords[x] ys = da.coords[y][::-1] if yres < 0 else da.coords[y] data = (xs, ys) for b in range(bands): values = da[b].values if nan_nodata and da.attrs.get('nodatavals', []): values = values.astype(float) for d in da.attrs['nodatavals']: values[values==d] = np.NaN data += (values,) if 'datatype' not in kwargs: kwargs['datatype'] = ['xarray', 'grid', 'image'] if xs.ndim > 1: from .element.geo import QuadMesh, HvQuadMesh el = QuadMesh if 'crs' in kwargs else HvQuadMesh el = el(data, [x, y], **kwargs) elif bands < 3: from .element.geo import Image, HvImage el = Image if 'crs' in kwargs else HvImage el = el(data, [x, y], **kwargs) else: from .element.geo import RGB, HvRGB el = RGB if 'crs' in kwargs else HvRGB vdims = el.vdims[:bands] el = el(data, [x, y], vdims, **kwargs) if hasattr(el.data, 'attrs'): el.data.attrs = da.attrs return el
def from_xarray(da, crs=None, apply_transform=False, nan_nodata=False, **kwargs)
Returns an RGB or Image element given an xarray DataArray loaded using xr.open_rasterio. If a crs attribute is present on the loaded data it will attempt to decode it into a cartopy projection otherwise it will default to a non-geographic HoloViews element. Parameters ---------- da: xarray.DataArray DataArray to convert to element crs: Cartopy CRS or EPSG string (optional) Overrides CRS inferred from the data apply_transform: boolean Whether to apply affine transform if defined on the data nan_nodata: boolean If data contains nodata values convert them to NaNs **kwargs: Keyword arguments passed to the HoloViews/GeoViews element Returns ------- element: Image/RGB/QuadMesh element
2.630765
2.527446
1.040879
if not isinstance(self.handles.get('artist'), GoogleTiles): self.handles['artist'].remove()
def teardown_handles(self)
If no custom update_handles method is supplied this method is called to tear down any previous handles before replacing them.
16.722113
17.011747
0.982974
for i, g in enumerate(geoms): if g is geom: return i
def find_geom(geom, geoms)
Returns the index of a geometry in a list of geometries avoiding expensive equality checks of `in` operator.
3.656793
3.433084
1.065163
area_fraction = min(bounds.area/domain.area, 1) return int(min(round(np.log2(1/area_fraction)), levels))
def compute_zoom_level(bounds, domain, levels)
Computes a zoom level given a bounds polygon, a polygon of the overall domain and the number of zoom levels to divide the data into. Parameters ---------- bounds: shapely.geometry.Polygon Polygon representing the area of the current viewport domain: shapely.geometry.Polygon Polygon representing the overall bounding region of the data levels: int Number of zoom levels to divide the domain into Returns ------- zoom_level: int Integer zoom level
5.280281
6.114902
0.86351
x0, y0, x1, y1 = bounds return Polygon([(x0, y0), (x1, y0), (x1, y1), (x0, y1)])
def bounds_to_poly(bounds)
Constructs a shapely Polygon from the provided bounds tuple. Parameters ---------- bounds: tuple Tuple representing the (left, bottom, right, top) coordinates Returns ------- polygon: shapely.geometry.Polygon Shapely Polygon geometry of the bounds
1.788991
2.874707
0.622321
tile_source = mapping['tile_source'] level = properties.pop('level', 'underlay') renderer = plot.add_tile(tile_source, level=level) renderer.alpha = properties.get('alpha', 1) # Remove save tool plot.tools = [t for t in plot.tools if not isinstance(t, SaveTool)] return renderer, tile_source
def _init_glyph(self, plot, mapping, properties)
Returns a Bokeh glyph object.
4.419827
4.147783
1.065588
try: plan = json.loads(open(self.args.plan_file_path).read()) return plan_to_assignment(plan) except IOError: self.log.exception( 'Given json file {file} not found.' .format(file=self.args.plan_file_path), ) raise except ValueError: self.log.exception( 'Given json file {file} could not be decoded.' .format(file=self.args.plan_file_path), ) raise except KeyError: self.log.exception( 'Given json file {file} could not be parsed in desired format.' .format(file=self.args.plan_file_path), ) raise
def get_assignment(self)
Parse the given json plan in dict format.
2.514736
2.199722
1.143206
# encoders / decoders do not maintain ordering currently # so we need to keep this so we can rebuild order before returning original_ordering = [(p.topic, p.partition) for p in payloads] retries = 0 broker = None while not broker: try: broker = self._get_coordinator_for_group(group) except (GroupCoordinatorNotAvailableError, GroupLoadInProgressError) as e: if retries == CONSUMER_OFFSET_TOPIC_CREATION_RETRIES: raise e time.sleep(CONSUMER_OFFSET_RETRY_INTERVAL_SEC) retries += 1 # Send the list of request payloads and collect the responses and # errors responses = {} def failed_payloads(payloads): for payload in payloads: topic_partition = (str(payload.topic), payload.partition) responses[topic_partition] = FailedPayloadsError(payload) host, port, afi = get_ip_port_afi(broker.host) try: conn = self._get_conn(host, broker.port, afi) except ConnectionError: failed_payloads(payloads) else: request = encoder_fn(payloads=payloads) # decoder_fn=None signal that the server is expected to not # send a response. This probably only applies to # ProduceRequest w/ acks = 0 future = conn.send(request) while not future.is_done: for r, f in conn.recv(): f.success(r) if future.failed(): failed_payloads(payloads) elif not request.expect_response(): failed_payloads(payloads) else: for payload_response in decoder_fn(future.value): topic_partition = (str(payload_response.topic), payload_response.partition) responses[topic_partition] = payload_response # Return responses in the same order as provided return [responses[tp] for tp in original_ordering]
def _send_consumer_aware_request(self, group, payloads, encoder_fn, decoder_fn)
Send a list of requests to the consumer coordinator for the group specified using the supplied encode/decode functions. As the payloads that use consumer-aware requests do not contain the group (e.g. OffsetFetchRequest), all payloads must be for a single group. Arguments: group: the name of the consumer group (str) the payloads are for payloads: list of object-like entities with topic (str) and partition (int) attributes; payloads with duplicate topic+partition are not supported. encode_fn: a method to encode the list of payloads to a request body, must accept client_id, correlation_id, and payloads as keyword arguments decode_fn: a method to decode a response body into response objects. The response objects must be object-like and have topic and partition attributes Returns: List of response objects in the same order as the supplied payloads
4.352613
4.348905
1.000853
with ZK(cluster_config) as zk: brokers = sorted(list(zk.get_brokers().items()), key=itemgetter(0)) return [(id, data['host']) for id, data in brokers]
def get_broker_list(cluster_config)
Returns a list of brokers in the form [(id: host)] :param cluster_config: the configuration of the cluster :type cluster_config: map
4.512273
4.712114
0.95759
filter_by_set = set(filter_by) return [(id, host) for id, host in brokers if id in filter_by_set]
def filter_broker_list(brokers, filter_by)
Returns sorted list, a subset of elements from brokers in the form [(id, host)]. Passing empty list for filter_by will return empty list. :param brokers: list of brokers to filter, assumes the data is in so`rted order :type brokers: list of (id, host) :param filter_by: the list of ids of brokers to keep :type filter_by: list of integers
3.561453
2.98993
1.191149
session = FuturesSession() for host in hosts: url = "http://{host}:{port}/{prefix}/read/{key}".format( host=host, port=jolokia_port, prefix=jolokia_prefix, key=UNDER_REPL_KEY, ) yield host, session.get(url)
def generate_requests(hosts, jolokia_port, jolokia_prefix)
Return a generator of requests to fetch the under replicated partition number from the specified hosts. :param hosts: list of brokers ip addresses :type hosts: list of strings :param jolokia_port: HTTP port for Jolokia :type jolokia_port: integer :param jolokia_prefix: HTTP prefix on the server for the Jolokia queries :type jolokia_prefix: string :returns: generator of requests
3.51214
3.398395
1.03347
under_replicated = 0 missing_brokers = 0 for host, request in generate_requests(hosts, jolokia_port, jolokia_prefix): try: response = request.result() if 400 <= response.status_code <= 599: print("Got status code {0}. Exiting.".format(response.status_code)) sys.exit(1) json = response.json() under_replicated += json['value'] except RequestException as e: print("Broker {0} is down: {1}." "This maybe because it is starting up".format(host, e), file=sys.stderr) missing_brokers += 1 except KeyError: print("Cannot find the key, Kafka is probably still starting up", file=sys.stderr) missing_brokers += 1 return under_replicated, missing_brokers
def read_cluster_status(hosts, jolokia_port, jolokia_prefix)
Read and return the number of under replicated partitions and missing brokers from the specified hosts. :param hosts: list of brokers ip addresses :type hosts: list of strings :param jolokia_port: HTTP port for Jolokia :type jolokia_port: integer :param jolokia_prefix: HTTP prefix on the server for the Jolokia queries :type jolokia_prefix: string :returns: tuple of integers
3.200999
3.067932
1.043374
print("Will restart the following brokers in {0}:".format(cluster_config.name)) for id, host in brokers: print(" {0}: {1}".format(id, host))
def print_brokers(cluster_config, brokers)
Print the list of brokers that will be restarted. :param cluster_config: the cluster configuration :type cluster_config: map :param brokers: the brokers that will be restarted :type brokers: map of broker ids and host names
3.995519
3.492829
1.143921
while True: print("Do you want to restart these brokers? ", end="") choice = input().lower() if choice in ['yes', 'y']: return True elif choice in ['no', 'n']: return False else: print("Please respond with 'yes' or 'no'")
def ask_confirmation()
Ask for confirmation to the user. Return true if the user confirmed the execution, false otherwise. :returns: bool
2.833099
3.049378
0.929074
_, stdout, stderr = connection.sudo_command(start_command) if verbose: report_stdout(host, stdout) report_stderr(host, stderr)
def start_broker(host, connection, start_command, verbose)
Execute the start
3.987805
4.842483
0.823504
_, stdout, stderr = connection.sudo_command(stop_command) if verbose: report_stdout(host, stdout) report_stderr(host, stderr)
def stop_broker(host, connection, stop_command, verbose)
Execute the stop
3.874691
4.6792
0.828067
stable_counter = 0 max_checks = int(math.ceil(unhealthy_time_limit / check_interval)) for i in itertools.count(): partitions, brokers = read_cluster_status( hosts, jolokia_port, jolokia_prefix, ) if partitions or brokers: stable_counter = 0 else: stable_counter += 1 print( "Under replicated partitions: {p_count}, missing brokers: {b_count} ({stable}/{limit})".format( p_count=partitions, b_count=brokers, stable=stable_counter, limit=check_count, )) if stable_counter >= check_count: print("The cluster is stable") return if i >= max_checks: raise WaitTimeoutException() time.sleep(check_interval)
def wait_for_stable_cluster( hosts, jolokia_port, jolokia_prefix, check_interval, check_count, unhealthy_time_limit, )
Block the caller until the cluster can be considered stable. :param hosts: list of brokers ip addresses :type hosts: list of strings :param jolokia_port: HTTP port for Jolokia :type jolokia_port: integer :param jolokia_prefix: HTTP prefix on the server for the Jolokia queries :type jolokia_prefix: string :param check_interval: the number of seconds it will wait between each check :type check_interval: integer :param check_count: the number of times the check should be positive before restarting the next broker :type check_count: integer :param unhealthy_time_limit: the maximum number of seconds it will wait for the cluster to become stable before exiting with error :type unhealthy_time_limit: integer
2.835737
3.03534
0.93424
all_hosts = [b[1] for b in brokers] for n, host in enumerate(all_hosts[skip:]): with ssh(host=host, forward_agent=True, sudoable=True, max_attempts=3, max_timeout=2, ssh_password=ssh_password) as connection: execute_task(pre_stop_task, host) wait_for_stable_cluster( all_hosts, jolokia_port, jolokia_prefix, check_interval, 1 if n == 0 else check_count, unhealthy_time_limit, ) print("Stopping {0} ({1}/{2})".format(host, n + 1, len(all_hosts) - skip)) stop_broker(host, connection, stop_command, verbose) execute_task(post_stop_task, host) # we open a new SSH connection in case the hostname has a new IP with ssh(host=host, forward_agent=True, sudoable=True, max_attempts=3, max_timeout=2, ssh_password=ssh_password) as connection: print("Starting {0} ({1}/{2})".format(host, n + 1, len(all_hosts) - skip)) start_broker(host, connection, start_command, verbose) # Wait before terminating the script wait_for_stable_cluster( all_hosts, jolokia_port, jolokia_prefix, check_interval, check_count, unhealthy_time_limit, )
def execute_rolling_restart( brokers, jolokia_port, jolokia_prefix, check_interval, check_count, unhealthy_time_limit, skip, verbose, pre_stop_task, post_stop_task, start_command, stop_command, ssh_password=None )
Execute the rolling restart on the specified brokers. It checks the number of under replicated partitions on each broker, using Jolokia. The check is performed at constant intervals, and a broker will be restarted when all the brokers are answering and are reporting zero under replicated partitions. :param brokers: the brokers that will be restarted :type brokers: map of broker ids and host names :param jolokia_port: HTTP port for Jolokia :type jolokia_port: integer :param jolokia_prefix: HTTP prefix on the server for the Jolokia queries :type jolokia_prefix: string :param check_interval: the number of seconds it will wait between each check :type check_interval: integer :param check_count: the number of times the check should be positive before restarting the next broker :type check_count: integer :param unhealthy_time_limit: the maximum number of seconds it will wait for the cluster to become stable before exiting with error :type unhealthy_time_limit: integer :param skip: the number of brokers to skip :type skip: integer :param verbose: print commend execution information :type verbose: bool :param pre_stop_task: a list of tasks to execute before running stop :type pre_stop_task: list :param post_stop_task: a list of task to execute after running stop :type post_stop_task: list :param start_command: the start command for kafka :type start_command: string :param stop_command: the stop command for kafka :type stop_command: string :param ssh_password: The ssh password to use if needed :type ssh_password: string
2.526661
2.540288
0.994636
if opts.skip < 0 or opts.skip >= brokers_num: print("Error: --skip must be >= 0 and < #brokers") return True if opts.check_count < 0: print("Error: --check-count must be >= 0") return True if opts.unhealthy_time_limit < 0: print("Error: --unhealthy-time-limit must be >= 0") return True if opts.check_count == 0: print("Warning: no check will be performed") if opts.check_interval < 0: print("Error: --check-interval must be >= 0") return True return False
def validate_opts(opts, brokers_num)
Basic option validation. Returns True if the options are not valid, False otherwise. :param opts: the command line options :type opts: map :param brokers_num: the number of brokers :type brokers_num: integer :returns: bool
2.179513
2.254951
0.966546
all_ids = set(broker_ids) valid = True for subset_id in subset_ids: valid = valid and subset_id in all_ids if subset_id not in all_ids: print("Error: user specified broker id {0} does not exist in cluster.".format(subset_id)) return valid
def validate_broker_ids_subset(broker_ids, subset_ids)
Validate that user specified broker ids to restart exist in the broker ids retrieved from cluster config. :param broker_ids: all broker IDs in a cluster :type broker_ids: list of integers :param subset_ids: broker IDs specified by user :type subset_ids: list of integers :returns: bool
3.057297
2.704661
1.130381
pre_stop_tasks = [] post_stop_tasks = [] task_to_task_args = dict(list(zip(tasks, task_args))) tasks_classes = [PreStopTask, PostStopTask] for func, task_args in task_to_task_args.items(): for task_class in tasks_classes: imported_class = dynamic_import(func, task_class) if imported_class: if task_class is PreStopTask: pre_stop_tasks.append(imported_class(task_args)) elif task_class is PostStopTask: post_stop_tasks.append(imported_class(task_args)) else: print("ERROR: Class is not a type of Pre/Post StopTask:" + func) sys.exit(1) return pre_stop_tasks, post_stop_tasks
def get_task_class(tasks, task_args)
Reads in a list of tasks provided by the user, loads the appropiate task, and returns two lists, pre_stop_tasks and post_stop_tasks :param tasks: list of strings locating tasks to load :type tasks: list :param task_args: list of strings to be used as args :type task_args: list
2.465314
2.389665
1.031656
self.cluster_config = cluster_config self.args = args with ZK(self.cluster_config) as self.zk: self.log.debug( 'Starting %s for cluster: %s and zookeeper: %s', self.__class__.__name__, self.cluster_config.name, self.cluster_config.zookeeper, ) brokers = self.zk.get_brokers() assignment = self.zk.get_cluster_assignment() pm = partition_measurer( self.cluster_config, brokers, assignment, args, ) ct = ClusterTopology( assignment, brokers, pm, rg_parser.get_replication_group, ) if len(ct.partitions) == 0: self.log.info("The cluster is empty. No actions to perform.") return # Exit if there is an on-going reassignment if self.is_reassignment_pending(): self.log.error('Previous reassignment pending.') sys.exit(1) self.run_command(ct, cluster_balancer(ct, args))
def run( self, cluster_config, rg_parser, partition_measurer, cluster_balancer, args, )
Initialize cluster_config, args, and zk then call run_command.
3.462014
3.218931
1.075517
if self.should_execute(): result = self.zk.execute_plan(plan, allow_rf_change=allow_rf_change) if not result: self.log.error('Plan execution unsuccessful.') sys.exit(1) else: self.log.info( 'Plan sent to zookeeper for reassignment successfully.', ) else: self.log.info('Proposed plan won\'t be executed (--apply and confirmation needed).')
def execute_plan(self, plan, allow_rf_change=False)
Save proposed-plan and execute the same if requested.
5.889122
5.320135
1.10695
return self.args.apply and (self.args.no_confirm or self.confirm_execution())
def should_execute(self)
Confirm if proposed-plan should be executed.
13.566002
8.516402
1.592926
in_progress_plan = self.zk.get_pending_plan() if in_progress_plan: in_progress_partitions = in_progress_plan['partitions'] self.log.info( 'Previous re-assignment in progress for {count} partitions.' ' Current partitions in re-assignment queue: {partitions}' .format( count=len(in_progress_partitions), partitions=in_progress_partitions, ) ) return True else: return False
def is_reassignment_pending(self)
Return True if there are reassignment tasks pending.
3.820988
3.765322
1.014784
new_assignment = cluster_topology.assignment if (not original_assignment or not new_assignment or max_partition_movements < 0 or max_leader_only_changes < 0 or max_movement_size < 0): return {} # The replica set stays the same for leaders only changes leaders_changes = [ (t_p, new_assignment[t_p]) for t_p, replica in six.iteritems(original_assignment) if replica != new_assignment[t_p] and set(replica) == set(new_assignment[t_p]) ] # The replica set is different for partitions changes # Here we create a list of tuple ((topic, partion), # replica movements) partition_change_count = [ ( t_p, len(set(replica) - set(new_assignment[t_p])), ) for t_p, replica in six.iteritems(original_assignment) if set(replica) != set(new_assignment[t_p]) ] self.log.info( "Total number of actions before reduction: %s.", len(partition_change_count) + len(leaders_changes), ) # Extract reduced plan maximizing uniqueness of topics and ensuring we do not # go over the max_movement_size reduced_actions = self._extract_actions_unique_topics( partition_change_count, max_partition_movements, cluster_topology, max_movement_size, ) # Ensure progress is made if force_progress=True if len(reduced_actions) == 0 and force_progress: smallest_size = min([cluster_topology.partitions[t_p[0]].size for t_p in partition_change_count]) self.log.warning( '--max-movement-size={max_movement_size} is too small, using smallest size' ' in set of partitions to move, {smallest_size} instead to force progress'.format( max_movement_size=max_movement_size, smallest_size=smallest_size, ) ) max_movement_size = smallest_size reduced_actions = self._extract_actions_unique_topics( partition_change_count, max_partition_movements, cluster_topology, max_movement_size, ) reduced_partition_changes = [ (t_p, new_assignment[t_p]) for t_p in reduced_actions ] self.log.info( "Number of partition changes: %s." " Number of leader-only changes: %s", len(reduced_partition_changes), min(max_leader_only_changes, len(leaders_changes)), ) # Merge leaders and partition changes and generate the assignment reduced_assignment = { t_p: replicas for t_p, replicas in ( reduced_partition_changes + leaders_changes[:max_leader_only_changes] ) } return reduced_assignment
def get_reduced_assignment( self, original_assignment, cluster_topology, max_partition_movements, max_leader_only_changes, max_movement_size=DEFAULT_MAX_MOVEMENT_SIZE, force_progress=False, )
Reduce the assignment based on the total actions. Actions represent actual partition movements and/or changes in preferred leader. Get the difference of original and proposed assignment and take the subset of this plan for given limit. Argument(s): original_assignment: Current assignment of cluster in zookeeper cluster_topology: Cluster topology containing the new proposed-assignment of cluster max_partition_movements:Maximum number of partition-movements in final set of actions max_leader_only_changes:Maximum number of actions with leader only changes max_movement_size: Maximum size, in bytes, to move in final set of actions force_progress: Whether to force progress if max_movement_size is too small :return: :reduced_assignment: Final reduced assignment
2.929633
2.889758
1.013799
# Group actions by topic topic_actions = defaultdict(list) for t_p, replica_change_cnt in movement_counts: topic_actions[t_p[0]].append((t_p, replica_change_cnt)) # Create reduced assignment minimizing duplication of topics extracted_actions = [] curr_movements = 0 curr_size = 0 action_available = True while curr_movements < max_movements and curr_size <= max_movement_size and action_available: action_available = False for topic, actions in six.iteritems(topic_actions): for action in actions: action_size = cluster_topology.partitions[action[0]].size if curr_movements + action[1] > max_movements or curr_size + action_size > max_movement_size: # Remove action since it won't be possible to use it actions.remove(action) else: # Append (topic, partition) to the list of movements action_available = True extracted_actions.append(action[0]) curr_movements += action[1] curr_size += action_size actions.remove(action) break return extracted_actions
def _extract_actions_unique_topics(self, movement_counts, max_movements, cluster_topology, max_movement_size)
Extract actions limiting to given max value such that the resultant has the minimum possible number of duplicate topics. Algorithm: 1. Group actions by by topic-name: {topic: action-list} 2. Iterate through the dictionary in circular fashion and keep extracting actions with until max_partition_movements are reached. :param movement_counts: list of tuple ((topic, partition), movement count) :param max_movements: max number of movements to extract :param cluster_topology: cluster topology containing the new proposed assignment for the cluster :param max_movement_size: maximum size of data to move at a time in extracted actions :return: list of tuple (topic, partitions) to include in the reduced plan
3.216105
2.80882
1.145002
permit = '' while permit.lower() not in ('yes', 'no'): permit = input('Execute Proposed Plan? [yes/no] ') if permit.lower() == 'yes': return True else: return False
def confirm_execution(self)
Confirm from your if proposed-plan be executed.
4.381073
3.063052
1.430296
with open(proposed_plan_file, 'w') as output: json.dump(proposed_layout, output)
def write_json_plan(self, proposed_layout, proposed_plan_file)
Dump proposed json plan to given output file for future usage.
2.379903
2.247459
1.05893
# Replica set cannot be changed assert(new_leader in self._replicas) curr_leader = self.leader idx = self._replicas.index(new_leader) self._replicas[0], self._replicas[idx] = \ self._replicas[idx], self._replicas[0] return curr_leader
def swap_leader(self, new_leader)
Change the preferred leader with one of given replicas. Note: Leaders for all the replicas of current partition needs to be changed.
3.627405
3.540813
1.024455
for i, broker in enumerate(self.replicas): if broker == source: self.replicas[i] = dest return
def replace(self, source, dest)
Replace source broker with destination broker in replica set if found.
5.553222
3.157984
1.758471
count = sum( int(self.topic == partition.topic) for partition in partitions ) return count
def count_siblings(self, partitions)
Count siblings of partition in given partition-list. :key-term: sibling: partitions with same topic
7.453388
7.331223
1.016664
kafka_client = KafkaToolClient(hosts, timeout=10) kafka_client.load_metadata_for_topics() topic_partitions = kafka_client.topic_partitions resp = kafka_client.send_metadata_request() for _, topic, partitions in resp.topics: for partition_error, partition, leader, replicas, isr in partitions: if topic_partitions.get(topic, {}).get(partition) is not None: topic_partitions[topic][partition] = PartitionMetadata(topic, partition, leader, replicas, isr, partition_error) return topic_partitions
def get_topic_partition_metadata(hosts)
Returns topic-partition metadata from Kafka broker. kafka-python 1.3+ doesn't include partition metadata information in topic_partitions so we extract it from metadata ourselves.
3.013548
2.989979
1.007883
topic_data = zk.get_topics(partition_metadata.topic) topic = partition_metadata.topic partition = partition_metadata.partition expected_replicas = set(topic_data[topic]['partitions'][str(partition)]['replicas']) available_replicas = set(partition_metadata.replicas) return expected_replicas - available_replicas
def get_unavailable_brokers(zk, partition_metadata)
Returns the set of unavailable brokers from the difference of replica set of given partition to the set of available replicas.
3.027192
2.482895
1.219218
metadata = get_topic_partition_metadata(cluster_config.broker_list) affected_partitions = set() if fetch_unavailable_brokers: unavailable_brokers = set() with ZK(cluster_config) as zk: for partitions in metadata.values(): for partition_metadata in partitions.values(): if int(partition_metadata.error) == error: if fetch_unavailable_brokers: unavailable_brokers |= get_unavailable_brokers(zk, partition_metadata) affected_partitions.add((partition_metadata.topic, partition_metadata.partition)) if fetch_unavailable_brokers: return affected_partitions, unavailable_brokers else: return affected_partitions
def get_topic_partition_with_error(cluster_config, error, fetch_unavailable_brokers=False)
Fetches the metadata from the cluster and returns the set of (topic, partition) tuples containing all the topic-partitions currently affected by the specified error. It also fetches unavailable-broker list if required.
2.301897
2.181348
1.055264
topics = _verify_topics_and_partitions(kafka_client, topics, raise_on_error) group_offset_reqs = [ OffsetFetchRequestPayload(topic, partition) for topic, partitions in six.iteritems(topics) for partition in partitions ] group_offsets = {} send_api = kafka_client.send_offset_fetch_request_kafka if group_offset_reqs: # fail_on_error = False does not prevent network errors group_resps = send_api( group=group, payloads=group_offset_reqs, fail_on_error=False, callback=pluck_topic_offset_or_zero_on_unknown, ) for resp in group_resps: group_offsets.setdefault( resp.topic, {}, )[resp.partition] = resp.offset return group_offsets
def get_current_consumer_offsets( kafka_client, group, topics, raise_on_error=True, )
Get current consumer offsets. NOTE: This method does not refresh client metadata. It is up to the caller to avoid using stale metadata. If any partition leader is not available, the request fails for all the other topics. This is the tradeoff of sending all topic requests in batch and save both in performance and Kafka load. :param kafka_client: a connected KafkaToolClient :param group: kafka group_id :param topics: topic list or dict {<topic>: [partitions]} :param raise_on_error: if False the method ignores missing topics and missing partitions. It still may fail on the request send. :returns: a dict topic: partition: offset :raises: :py:class:`kafka_utils.util.error.UnknownTopic`: upon missing topics and raise_on_error=True :py:class:`kafka_utils.util.error.UnknownPartition`: upon missing partitions and raise_on_error=True FailedPayloadsError: upon send request error.
4.058604
3.933102
1.031909
topics = _verify_topics_and_partitions( kafka_client, topics, raise_on_error, ) highmark_offset_reqs = [] lowmark_offset_reqs = [] for topic, partitions in six.iteritems(topics): # Batch watermark requests for partition in partitions: # Request the the latest offset highmark_offset_reqs.append( OffsetRequestPayload( topic, partition, -1, max_offsets=1 ) ) # Request the earliest offset lowmark_offset_reqs.append( OffsetRequestPayload( topic, partition, -2, max_offsets=1 ) ) watermark_offsets = {} if not (len(highmark_offset_reqs) + len(lowmark_offset_reqs)): return watermark_offsets # fail_on_error = False does not prevent network errors highmark_resps = kafka_client.send_offset_request( highmark_offset_reqs, fail_on_error=False, callback=_check_fetch_response_error, ) lowmark_resps = kafka_client.send_offset_request( lowmark_offset_reqs, fail_on_error=False, callback=_check_fetch_response_error, ) # At this point highmark and lowmark should ideally have the same length. assert len(highmark_resps) == len(lowmark_resps) aggregated_offsets = defaultdict(lambda: defaultdict(dict)) for resp in highmark_resps: aggregated_offsets[resp.topic][resp.partition]['highmark'] = \ resp.offsets[0] for resp in lowmark_resps: aggregated_offsets[resp.topic][resp.partition]['lowmark'] = \ resp.offsets[0] for topic, partition_watermarks in six.iteritems(aggregated_offsets): for partition, watermarks in six.iteritems(partition_watermarks): watermark_offsets.setdefault( topic, {}, )[partition] = PartitionOffsets( topic, partition, watermarks['highmark'], watermarks['lowmark'], ) return watermark_offsets
def get_topics_watermarks(kafka_client, topics, raise_on_error=True)
Get current topic watermarks. NOTE: This method does not refresh client metadata. It is up to the caller to use avoid using stale metadata. If any partition leader is not available, the request fails for all the other topics. This is the tradeoff of sending all topic requests in batch and save both in performance and Kafka load. :param kafka_client: a connected KafkaToolClient :param topics: topic list or dict {<topic>: [partitions]} :param raise_on_error: if False the method ignores missing topics and missing partitions. It still may fail on the request send. :returns: a dict topic: partition: Part :raises: :py:class:`~kafka_utils.util.error.UnknownTopic`: upon missing topics and raise_on_error=True :py:class:`~kafka_utils.util.error.UnknownPartition`: upon missing partitions and raise_on_error=True FailedPayloadsError: upon send request error.
2.3188
2.372273
0.977459
kafka_client.load_metadata_for_topics() return _commit_offsets_to_watermark( kafka_client, group, topics, HIGH_WATERMARK, raise_on_error, )
def advance_consumer_offsets( kafka_client, group, topics, raise_on_error=True, )
Advance consumer offsets to the latest message in the topic partition (the high watermark). This method shall refresh the client metadata prior to updating the offsets. If any partition leader is not available, the request fails for all the other topics. This is the tradeoff of sending all topic requests in batch and save both in performance and Kafka load. :param kafka_client: a connected KafkaToolClient :param group: kafka group_id :param topics: topic list or dict {<topic>: [partitions]} :param raise_on_error: if False the method does not raise exceptions on missing topics/partitions. It may still fail on the request send. :returns: a list of errors for each partition offset update that failed. :rtype: list [OffsetCommitError] :raises: :py:class:`kafka_utils.util.error.UnknownTopic`: upon missing topics and raise_on_error=True :py:class:`kafka_utils.util.error.UnknownPartition`: upon missing partitions and raise_on_error=True FailedPayloadsError: upon send request error.
6.217515
8.195325
0.758666
kafka_client.load_metadata_for_topics() return _commit_offsets_to_watermark( kafka_client, group, topics, LOW_WATERMARK, raise_on_error, )
def rewind_consumer_offsets( kafka_client, group, topics, raise_on_error=True, )
Rewind consumer offsets to the earliest message in the topic partition (the low watermark). This method shall refresh the client metadata prior to updating the offsets. If any partition leader is not available, the request fails for all the other topics. This is the tradeoff of sending all topic requests in batch and save both in performance and Kafka load. :param kafka_client: a connected KafkaToolClient :param group: kafka group_id :param topics: topic list or dict {<topic>: [partitions]} :param raise_on_error: if False the method does not raise exceptions on missing topics/partitions. It may still fail on the request send. :returns: a list of errors for each partition offset update that failed. :rtype: list [OffsetCommitError] :raises: :py:class:`kafka_utils.util.error.UnknownTopic`: upon missing topics and raise_on_error=True :py:class:`kafka_utils.util.error.UnknownPartition`: upon missing partitions and raise_on_error=True FailedPayloadsError: upon send request error.
5.952087
7.559995
0.787314
valid_new_offsets = _verify_commit_offsets_requests( kafka_client, new_offsets, raise_on_error ) group_offset_reqs = [ OffsetCommitRequestPayload( topic, partition, offset, metadata='', ) for topic, new_partition_offsets in six.iteritems(valid_new_offsets) for partition, offset in six.iteritems(new_partition_offsets) ] send_api = kafka_client.send_offset_commit_request_kafka status = [] if group_offset_reqs: status = send_api( group, group_offset_reqs, raise_on_error, callback=_check_commit_response_error ) return [_f for _f in status if _f and _f.error != 0]
def set_consumer_offsets( kafka_client, group, new_offsets, raise_on_error=True, )
Set consumer offsets to the specified offsets. This method does not validate the specified offsets, it is up to the caller to specify valid offsets within a topic partition. If any partition leader is not available, the request fails for all the other topics. This is the tradeoff of sending all topic requests in batch and save both in performance and Kafka load. :param kafka_client: a connected KafkaToolClient :param group: kafka group_id :param topics: dict {<topic>: {<partition>: <offset>}} :param raise_on_error: if False the method does not raise exceptions on errors encountered. It may still fail on the request send. :returns: a list of errors for each partition offset update that failed. :rtype: list [OffsetCommitError] :raises: :py:class:`kafka_utils.util.error.UnknownTopic`: upon missing topics and raise_on_error=True :py:class:`kafka_utils.util.error.UnknownPartition`: upon missing partitions and raise_on_error=True :py:class:`exceptions.TypeError`: upon badly formatted input new_offsets FailedPayloadsError: upon send request error.
3.865835
3.920346
0.986095
result = {} for topic, partition_offsets in six.iteritems(offsets): result[topic] = _nullify_partition_offsets(partition_offsets) return result
def nullify_offsets(offsets)
Modify offsets metadata so that the partition offsets have null payloads. :param offsets: dict {<topic>: {<partition>: <offset>}} :returns: a dict topic: partition: offset
3.510843
3.582564
0.979981
assert all(len(row) == len(headers) for row in table) str_headers = [str(header) for header in headers] str_table = [[str(cell) for cell in row] for row in table] column_lengths = [ max(len(header), *(len(row[i]) for row in str_table)) for i, header in enumerate(str_headers) ] print( " | ".join( str(header).ljust(length) for header, length in zip(str_headers, column_lengths) ) ) print("-+-".join("-" * length for length in column_lengths)) for row in str_table: print( " | ".join( str(cell).ljust(length) for cell, length in zip(row, column_lengths) ) )
def display_table(headers, table)
Print a formatted table. :param headers: A list of header objects that are displayed in the first row of the table. :param table: A list of lists where each sublist is a row of the table. The number of elements in each row should be equal to the number of headers.
1.775557
1.834997
0.967608
assert cluster_topologies rg_ids = list(next(six.itervalues(cluster_topologies)).rgs.keys()) assert all( set(rg_ids) == set(cluster_topology.rgs.keys()) for cluster_topology in six.itervalues(cluster_topologies) ) rg_imbalances = [ stats.get_replication_group_imbalance_stats( list(cluster_topology.rgs.values()), list(cluster_topology.partitions.values()), ) for cluster_topology in six.itervalues(cluster_topologies) ] _display_table_title_multicolumn( 'Extra Replica Count', 'Replication Group', rg_ids, list(cluster_topologies.keys()), [ [erc[rg_id] for rg_id in rg_ids] for _, erc in rg_imbalances ], ) for name, imbalance in zip( six.iterkeys(cluster_topologies), (imbalance for imbalance, _ in rg_imbalances) ): print( '\n' '{name}' 'Total extra replica count: {imbalance}' .format( name='' if len(cluster_topologies) == 1 else name + '\n', imbalance=imbalance, ) )
def display_replica_imbalance(cluster_topologies)
Display replica replication-group distribution imbalance statistics. :param cluster_topologies: A dictionary mapping a string name to a ClusterTopology object.
3.202173
3.140258
1.019717
broker_ids = list(next(six.itervalues(cluster_topologies)).brokers.keys()) assert all( set(broker_ids) == set(cluster_topology.brokers.keys()) for cluster_topology in six.itervalues(cluster_topologies) ) broker_partition_counts = [ stats.get_broker_partition_counts( cluster_topology.brokers[broker_id] for broker_id in broker_ids ) for cluster_topology in six.itervalues(cluster_topologies) ] broker_weights = [ stats.get_broker_weights( cluster_topology.brokers[broker_id] for broker_id in broker_ids ) for cluster_topology in six.itervalues(cluster_topologies) ] _display_table_title_multicolumn( 'Partition Count', 'Broker', broker_ids, list(cluster_topologies.keys()), broker_partition_counts, ) print('') _display_table_title_multicolumn( 'Partition Weight', 'Broker', broker_ids, list(cluster_topologies.keys()), broker_weights, ) for name, bpc, bw in zip( list(cluster_topologies.keys()), broker_partition_counts, broker_weights ): print( '\n' '{name}' 'Partition count imbalance: {net_imbalance}\n' 'Broker weight mean: {weight_mean}\n' 'Broker weight stdev: {weight_stdev}\n' 'Broker weight cv: {weight_cv}' .format( name='' if len(cluster_topologies) == 1 else name + '\n', net_imbalance=stats.get_net_imbalance(bpc), weight_mean=stats.mean(bw), weight_stdev=stats.stdevp(bw), weight_cv=stats.coefficient_of_variation(bw), ) )
def display_partition_imbalance(cluster_topologies)
Display partition count and weight imbalance statistics. :param cluster_topologies: A dictionary mapping a string name to a ClusterTopology object.
2.332075
2.277802
1.023827
broker_ids = list(next(six.itervalues(cluster_topologies)).brokers.keys()) assert all( set(broker_ids) == set(cluster_topology.brokers.keys()) for cluster_topology in six.itervalues(cluster_topologies) ) broker_leader_counts = [ stats.get_broker_leader_counts( cluster_topology.brokers[broker_id] for broker_id in broker_ids ) for cluster_topology in six.itervalues(cluster_topologies) ] broker_leader_weights = [ stats.get_broker_leader_weights( cluster_topology.brokers[broker_id] for broker_id in broker_ids ) for cluster_topology in six.itervalues(cluster_topologies) ] _display_table_title_multicolumn( 'Leader Count', 'Brokers', broker_ids, list(cluster_topologies.keys()), broker_leader_counts, ) print('') _display_table_title_multicolumn( 'Leader weight', 'Brokers', broker_ids, list(cluster_topologies.keys()), broker_leader_weights, ) for name, blc, blw in zip( list(cluster_topologies.keys()), broker_leader_counts, broker_leader_weights ): print( '\n' '{name}' 'Leader count imbalance: {net_imbalance}\n' 'Broker leader weight mean: {weight_mean}\n' 'Broker leader weight stdev: {weight_stdev}\n' 'Broker leader weight cv: {weight_cv}' .format( name='' if len(cluster_topologies) == 1 else name + '\n', net_imbalance=stats.get_net_imbalance(blc), weight_mean=stats.mean(blw), weight_stdev=stats.stdevp(blw), weight_cv=stats.coefficient_of_variation(blw), ) )
def display_leader_imbalance(cluster_topologies)
Display leader count and weight imbalance statistics. :param cluster_topologies: A dictionary mapping a string name to a ClusterTopology object.
2.271545
2.241678
1.013323
broker_ids = list(next(six.itervalues(cluster_topologies)).brokers.keys()) assert all( set(broker_ids) == set(cluster_topology.brokers.keys()) for cluster_topology in six.itervalues(cluster_topologies) ) topic_names = list(next(six.itervalues(cluster_topologies)).topics.keys()) assert all( set(topic_names) == set(cluster_topology.topics.keys()) for cluster_topology in six.itervalues(cluster_topologies) ) imbalances = [ stats.get_topic_imbalance_stats( [cluster_topology.brokers[broker_id] for broker_id in broker_ids], [cluster_topology.topics[tname] for tname in topic_names], ) for cluster_topology in six.itervalues(cluster_topologies) ] weighted_imbalances = [ stats.get_weighted_topic_imbalance_stats( [cluster_topology.brokers[broker_id] for broker_id in broker_ids], [cluster_topology.topics[tname] for tname in topic_names], ) for cluster_topology in six.itervalues(cluster_topologies) ] _display_table_title_multicolumn( 'Extra-Topic-Partition Count', 'Brokers', broker_ids, list(cluster_topologies.keys()), [ [i[1][broker_id] for broker_id in broker_ids] for i in imbalances ] ) print('') _display_table_title_multicolumn( 'Weighted Topic Imbalance', 'Brokers', broker_ids, list(cluster_topologies.keys()), [ [wi[1][broker_id] for broker_id in broker_ids] for wi in weighted_imbalances ] ) for name, topic_imbalance, weighted_topic_imbalance in zip( six.iterkeys(cluster_topologies), (i[0] for i in imbalances), (wi[0] for wi in weighted_imbalances), ): print( '\n' '{name}' 'Topic partition imbalance count: {topic_imbalance}\n' 'Weighted topic partition imbalance: {weighted_topic_imbalance}' .format( name='' if len(cluster_topologies) == 1 else name + '\n', topic_imbalance=topic_imbalance, weighted_topic_imbalance=weighted_topic_imbalance, ) )
def display_topic_broker_imbalance(cluster_topologies)
Display topic broker imbalance statistics. :param cluster_topologies: A dictionary mapping a string name to a ClusterTopology object.
1.954299
1.982065
0.985991
movement_count, movement_size, leader_changes = \ stats.get_partition_movement_stats(ct, base_assignment) print( 'Total partition movements: {movement_count}\n' 'Total partition movement size: {movement_size}\n' 'Total leader changes: {leader_changes}' .format( movement_count=movement_count, movement_size=movement_size, leader_changes=leader_changes, ) )
def display_movements_stats(ct, base_assignment)
Display how the amount of movement between two assignments. :param ct: The cluster's ClusterTopology. :param base_assignment: The cluster assignment to compare against.
2.717394
2.971293
0.914549
curr_plan_list, new_plan_list, total_changes = plan_details action_cnt = '\n[INFO] Total actions required {0}'.format(total_changes) _log_or_display(to_log, action_cnt) action_cnt = ( '[INFO] Total actions that will be executed {0}' .format(len(new_plan_list)) ) _log_or_display(to_log, action_cnt) changes = ('[INFO] Proposed Changes in current cluster-layout:\n') _log_or_display(to_log, changes) tp_str = 'Topic - Partition' curr_repl_str = 'Previous-Assignment' new_rep_str = 'Proposed-Assignment' tp_list = [tp_repl[0] for tp_repl in curr_plan_list] # Display heading msg = '=' * 80 _log_or_display(to_log, msg) row = ( '{tp:^30s}: {curr_rep_str:^20s} ==> {new_rep_str:^20s}' .format( tp=tp_str, curr_rep_str=curr_repl_str, new_rep_str=new_rep_str, ) ) _log_or_display(to_log, row) msg = '=' * 80 _log_or_display(to_log, msg) # Display each topic-partition list with changes tp_list_sorted = sorted(tp_list, key=lambda tp: (tp[0], tp[1])) for tp in tp_list_sorted: curr_repl = [ tp_repl[1] for tp_repl in curr_plan_list if tp_repl[0] == tp ][0] proposed_repl = [ tp_repl[1] for tp_repl in new_plan_list if tp_repl[0] == tp ][0] tp_str = '{topic} - {partition:<2d}'.format(topic=tp[0], partition=tp[1]) row = ( '{tp:<30s}: {curr_repl:<20s} ==> {proposed_repl:<20s}'.format( tp=tp_str, curr_repl=curr_repl, proposed_repl=proposed_repl, ) ) _log_or_display(to_log, row)
def display_assignment_changes(plan_details, to_log=True)
Display current and proposed changes in topic-partition to replica layout over brokers.
2.583836
2.431566
1.062622
data_mean = data_mean or mean(data) return sum((x - data_mean) ** 2 for x in data) / len(data)
def variance(data, data_mean=None)
Return variance of a sequence of numbers. :param data_mean: Precomputed mean of the sequence.
2.176145
2.732102
0.796509
data_variance = data_variance or variance(data, data_mean) return sqrt(data_variance)
def stdevp(data, data_mean=None, data_variance=None)
Return standard deviation of a sequence of numbers. :param data_mean: Precomputed mean of the sequence. :param data_variance: Precomputed variance of the sequence.
3.231037
5.055966
0.639054
data_mean = data_mean or mean(data) data_stdev = data_stdev or stdevp(data, data_mean) if data_mean == 0: return float("inf") if data_stdev != 0 else 0 else: return data_stdev / data_mean
def coefficient_of_variation(data, data_mean=None, data_stdev=None)
Return the coefficient of variation (CV) of a sequence of numbers. :param data_mean: Precomputed mean of the sequence. :param data_stdevp: Precomputed stdevp of the sequence.
2.48282
2.443472
1.016103
net_imbalance = 0 opt_count, extra_allowed = \ compute_optimum(len(count_per_broker), sum(count_per_broker)) for count in count_per_broker: extra_cnt, extra_allowed = \ get_extra_element_count(count, opt_count, extra_allowed) net_imbalance += extra_cnt return net_imbalance
def get_net_imbalance(count_per_broker)
Calculate and return net imbalance based on given count of partitions or leaders per broker. Net-imbalance in case of partitions implies total number of extra partitions from optimal count over all brokers. This is also implies, the minimum number of partition movements required for overall balancing. For leaders, net imbalance implies total number of extra brokers as leaders from optimal count.
4.019559
4.259558
0.943656
if curr_count > opt_count: # We still can allow 1 extra count if extra_allowed_cnt > 0: extra_allowed_cnt -= 1 extra_cnt = curr_count - opt_count - 1 else: extra_cnt = curr_count - opt_count else: extra_cnt = 0 return extra_cnt, extra_allowed_cnt
def get_extra_element_count(curr_count, opt_count, extra_allowed_cnt)
Evaluate and return extra same element count based on given values. :key-term: group: In here group can be any base where elements are place i.e. replication-group while placing replicas (elements) or brokers while placing partitions (elements). element: Generic term for units which are optimally placed over group. :params: curr_count: Given count opt_count: Optimal count for each group. extra_allowed_cnt: Count of groups which can have 1 extra element _ on each group.
2.671592
2.995541
0.891856
tot_rgs = len(rgs) extra_replica_cnt_per_rg = defaultdict(int) for partition in partitions: # Get optimal replica-count for each partition opt_replica_cnt, extra_replicas_allowed = \ compute_optimum(tot_rgs, partition.replication_factor) # Extra replica count for each rg for rg in rgs: replica_cnt_rg = rg.count_replica(partition) extra_replica_cnt, extra_replicas_allowed = \ get_extra_element_count( replica_cnt_rg, opt_replica_cnt, extra_replicas_allowed, ) extra_replica_cnt_per_rg[rg.id] += extra_replica_cnt # Evaluate net imbalance across all replication-groups net_imbalance = sum(extra_replica_cnt_per_rg.values()) return net_imbalance, extra_replica_cnt_per_rg
def get_replication_group_imbalance_stats(rgs, partitions)
Calculate extra replica count replica count over each replication-group and net extra-same-replica count.
3.513666
3.206758
1.095707
extra_partition_cnt_per_broker = defaultdict(int) tot_brokers = len(brokers) # Sort the brokers so that the iteration order is deterministic. sorted_brokers = sorted(brokers, key=lambda b: b.id) for topic in topics: # Optimal partition-count per topic per broker total_partition_replicas = \ len(topic.partitions) * topic.replication_factor opt_partition_cnt, extra_partitions_allowed = \ compute_optimum(tot_brokers, total_partition_replicas) # Get extra-partition count per broker for each topic for broker in sorted_brokers: partition_cnt_broker = broker.count_partitions(topic) extra_partitions, extra_partitions_allowed = \ get_extra_element_count( partition_cnt_broker, opt_partition_cnt, extra_partitions_allowed, ) extra_partition_cnt_per_broker[broker.id] += extra_partitions # Net extra partitions over all brokers net_imbalance = sum(six.itervalues(extra_partition_cnt_per_broker)) return net_imbalance, extra_partition_cnt_per_broker
def get_topic_imbalance_stats(brokers, topics)
Return count of topics and partitions on each broker having multiple partitions of same topic. :rtype dict(broker_id: same-topic-partition count) Example: Total-brokers (b1, b2): 2 Total-partitions of topic t1: 5 (b1 has 4 partitions), (b2 has 1 partition) opt-count: 5/2 = 2 extra-count: 5%2 = 1 i.e. 1 broker can have 2 + 1 = 3 partitions and rest of brokers can have 2 partitions for given topic Extra-partition or imbalance: b1: current-partitions - optimal-count = 4 - 2 - 1(extra allowed) = 1 Net-imbalance = 1
3.390456
3.350086
1.012051
total_movements = 0 movements = {} for prev_partition, prev_replicas in six.iteritems(prev_assignment): curr_replicas = curr_assignment[prev_partition] diff = len(set(curr_replicas) - set(prev_replicas)) if diff: total_movements += diff movements[prev_partition] = ( (set(prev_replicas) - set(curr_replicas)), (set(curr_replicas) - set(prev_replicas)), ) return movements, total_movements
def calculate_partition_movement(prev_assignment, curr_assignment)
Calculate the partition movements from initial to current assignment. Algorithm: For each partition in initial assignment # If replica set different in current assignment: # Get Difference in sets :rtype: tuple dict((partition, (from_broker_set, to_broker_set)), total_movements
2.359195
1.985886
1.187981
try: with open(meta_properties_path, 'r') as f: broker_id = _parse_meta_properties_file(f) except IOError: raise IOError( "Cannot open meta.properties file: {path}" .format(path=meta_properties_path), ) except ValueError: raise ValueError("Broker id not valid") if broker_id is None: raise ValueError("Autogenerated broker id missing from data directory") return broker_id
def _read_generated_broker_id(meta_properties_path)
reads broker_id from meta.properties file. :param string meta_properties_path: path for meta.properties file :returns int: broker_id from meta_properties_path
3.372839
3.401485
0.991578
# Path to the meta.properties file. This is used to read the automatic broker id # if the given broker id is -1 META_FILE_PATH = "{data_path}/meta.properties" if not data_path: raise ValueError("You need to specify the data_path if broker_id == -1") meta_properties_path = META_FILE_PATH.format(data_path=data_path) return _read_generated_broker_id(meta_properties_path)
def get_broker_id(data_path)
This function will look into the data folder to get the automatically created broker_id. :param string data_path: the path to the kafka data folder :returns int: the real broker_id
5.013473
5.00839
1.001015
# Refresh client metadata. We do not use the topic list, because we # don't want to accidentally create the topic if it does not exist. # If Kafka is unavailable, let's retry loading client metadata try: kafka_client.load_metadata_for_topics() except KafkaUnavailableError: kafka_client.load_metadata_for_topics() group_offsets = get_current_consumer_offsets( kafka_client, group, topics, raise_on_error ) watermarks = get_topics_watermarks( kafka_client, topics, raise_on_error ) result = {} for topic, partitions in six.iteritems(group_offsets): result[topic] = [ ConsumerPartitionOffsets( topic=topic, partition=partition, current=group_offsets[topic][partition], highmark=watermarks[topic][partition].highmark, lowmark=watermarks[topic][partition].lowmark, ) for partition in partitions ] return result
def get_consumer_offsets_metadata( kafka_client, group, topics, raise_on_error=True, )
This method: * refreshes metadata for the kafka client * fetches group offsets * fetches watermarks :param kafka_client: KafkaToolClient instance :param group: group id :param topics: list of topics :param raise_on_error: if False the method ignores missing topics and missing partitions. It still may fail on the request send. :returns: dict <topic>: [ConsumerPartitionOffsets]
3.206435
2.865121
1.119127
# Refresh client metadata. We do not use the topic list, because we # don't want to accidentally create the topic if it does not exist. # If Kafka is unavailable, let's retry loading client metadata try: kafka_client.load_metadata_for_topics() except KafkaUnavailableError: kafka_client.load_metadata_for_topics() topics_to_be_considered = [] for topic in kafka_client.topic_partitions: if re.search(topic_regex, topic): topics_to_be_considered.append(topic) watermarks = get_topics_watermarks( kafka_client, topics_to_be_considered ) return watermarks
def get_watermark_for_regex( kafka_client, topic_regex, )
This method: * refreshes metadata for the kafka client * fetches watermarks :param kafka_client: KafkaToolClient instance :param topic: the topic regex :returns: dict <topic>: [ConsumerPartitionOffsets]
3.791303
3.708359
1.022367
# Refresh client metadata. We do not use the topic list, because we # don't want to accidentally create the topic if it does not exist. # If Kafka is unavailable, let's retry loading client metadata try: kafka_client.load_metadata_for_topics() except KafkaUnavailableError: kafka_client.load_metadata_for_topics() watermarks = get_topics_watermarks( kafka_client, [topic] ) return watermarks
def get_watermark_for_topic( kafka_client, topic, )
This method: * refreshes metadata for the kafka client * fetches watermarks :param kafka_client: KafkaToolClient instance :param topic: the topic :returns: dict <topic>: [ConsumerPartitionOffsets]
5.662444
5.430851
1.042644
result = dict() for topic in topics: partition_offsets = [ response[topic] for response in offsets_responses if topic in response ] result[topic] = merge_partition_offsets(*partition_offsets) return result
def merge_offsets_metadata(topics, *offsets_responses)
Merge the offset metadata dictionaries from multiple responses. :param topics: list of topics :param offsets_responses: list of dict topic: partition: offset :returns: dict topic: partition: offset
2.80848
2.822342
0.995088
output = dict() for partition_offset in partition_offsets: for partition, offset in six.iteritems(partition_offset): prev_offset = output.get(partition, 0) output[partition] = max(prev_offset, offset) return output
def merge_partition_offsets(*partition_offsets)
Merge the partition offsets of a single topic from multiple responses. :param partition_offsets: list of dict partition: offset :returns: dict partition: offset
2.328483
2.700565
0.862221
movement_count = 0 movement_size = 0 for partition in six.itervalues(self.cluster_topology.partitions): count, size = self._rebalance_partition_replicas( partition, None if not max_movement_count else max_movement_count - movement_count, None if not max_movement_size else max_movement_size - movement_size, ) movement_count += count movement_size += size return movement_count, movement_size
def rebalance_replicas( self, max_movement_count=None, max_movement_size=None, )
Balance replicas across replication-groups. :param max_movement_count: The maximum number of partitions to move. :param max_movement_size: The maximum total size of the partitions to move. :returns: A 2-tuple whose first element is the number of partitions moved and whose second element is the total size of the partitions moved.
2.231464
2.511375
0.888543
# Separate replication-groups into under and over replicated total = partition.replication_factor over_replicated_rgs, under_replicated_rgs = separate_groups( list(self.cluster_topology.rgs.values()), lambda g: g.count_replica(partition), total, ) # Move replicas from over-replicated to under-replicated groups movement_count = 0 movement_size = 0 while ( under_replicated_rgs and over_replicated_rgs ) and ( max_movement_size is None or movement_size + partition.size <= max_movement_size ) and ( max_movement_count is None or movement_count < max_movement_count ): # Decide source and destination group rg_source = self._elect_source_replication_group( over_replicated_rgs, partition, ) rg_destination = self._elect_dest_replication_group( rg_source.count_replica(partition), under_replicated_rgs, partition, ) if rg_source and rg_destination: # Actual movement of partition self.log.debug( 'Moving partition {p_name} from replication-group ' '{rg_source} to replication-group {rg_dest}'.format( p_name=partition.name, rg_source=rg_source.id, rg_dest=rg_destination.id, ), ) rg_source.move_partition(rg_destination, partition) movement_count += 1 movement_size += partition.size else: # Groups balanced or cannot be balanced further break # Re-compute under and over-replicated replication-groups over_replicated_rgs, under_replicated_rgs = separate_groups( list(self.cluster_topology.rgs.values()), lambda g: g.count_replica(partition), total, ) return movement_count, movement_size
def _rebalance_partition_replicas( self, partition, max_movement_count=None, max_movement_size=None, )
Rebalance replication groups for given partition.
2.525425
2.52126
1.001652
return max( over_replicated_rgs, key=lambda rg: rg.count_replica(partition), )
def _elect_source_replication_group( self, over_replicated_rgs, partition, )
Decide source replication-group based as group with highest replica count.
5.229637
3.788797
1.38029
min_replicated_rg = min( under_replicated_rgs, key=lambda rg: rg.count_replica(partition), ) # Locate under-replicated replication-group with lesser # replica count than source replication-group if min_replicated_rg.count_replica(partition) < replica_count_source - 1: return min_replicated_rg return None
def _elect_dest_replication_group( self, replica_count_source, under_replicated_rgs, partition, )
Decide destination replication-group based on replica-count.
3.289628
2.955546
1.113036
with open(json_file, 'r') as consumer_offsets_json: try: parsed_offsets = {} parsed_offsets_data = json.load(consumer_offsets_json) # Create new dict with partition-keys as integers parsed_offsets['groupid'] = parsed_offsets_data['groupid'] parsed_offsets['offsets'] = {} for topic, topic_data in six.iteritems(parsed_offsets_data['offsets']): parsed_offsets['offsets'][topic] = {} for partition, offset in six.iteritems(topic_data): parsed_offsets['offsets'][topic][int(partition)] = offset return parsed_offsets except ValueError: print( "Error: Given consumer-data json data-file {file} could not be " "parsed".format(file=json_file), file=sys.stderr, ) raise
def parse_consumer_offsets(cls, json_file)
Parse current offsets from json-file.
2.921399
2.806733
1.040854
new_offsets = defaultdict(dict) try: for topic, partitions in six.iteritems(topic_partitions): # Validate current offsets in range of low and highmarks # Currently we only validate for positive offsets and warn # if out of range of low and highmarks valid_partitions = set() for topic_partition_offsets in current_offsets[topic]: partition = topic_partition_offsets.partition valid_partitions.add(partition) # Skip the partition not present in list if partition not in topic_partitions[topic]: continue lowmark = topic_partition_offsets.lowmark highmark = topic_partition_offsets.highmark new_offset = topics_offset_data[topic][partition] if new_offset < 0: print( "Error: Given offset: {offset} is negative" .format(offset=new_offset), file=sys.stderr, ) sys.exit(1) if new_offset < lowmark or new_offset > highmark: print( "Warning: Given offset {offset} for topic-partition " "{topic}:{partition} is outside the range of lowmark " "{lowmark} and highmark {highmark}".format( offset=new_offset, topic=topic, partition=partition, lowmark=lowmark, highmark=highmark, ) ) new_offsets[topic][partition] = new_offset if not set(partitions).issubset(valid_partitions): print( "Error: Some invalid partitions {partitions} for topic " "{topic} found. Valid partition-list {valid_partitions}. " "Exiting...".format( partitions=', '.join([str(p) for p in partitions]), valid_partitions=', '.join([str(p) for p in valid_partitions]), topic=topic, ), file=sys.stderr, ) sys.exit(1) except KeyError as ex: print( "Error: Possible invalid topic or partition. Error msg: {ex}. " "Exiting...".format(ex=ex), ) sys.exit(1) return new_offsets
def build_new_offsets(cls, client, topics_offset_data, topic_partitions, current_offsets)
Build complete consumer offsets from parsed current consumer-offsets and lowmarks and highmarks from current-offsets for.
2.522419
2.422934
1.04106
# Fetch current offsets try: consumer_group = parsed_consumer_offsets['groupid'] topics_offset_data = parsed_consumer_offsets['offsets'] topic_partitions = dict( (topic, [partition for partition in offset_data.keys()]) for topic, offset_data in six.iteritems(topics_offset_data) ) except IndexError: print( "Error: Given parsed consumer-offset data {consumer_offsets} " "could not be parsed".format(consumer_offsets=parsed_consumer_offsets), file=sys.stderr, ) raise current_offsets = get_consumer_offsets_metadata( client, consumer_group, topic_partitions, ) # Build new offsets new_offsets = cls.build_new_offsets( client, topics_offset_data, topic_partitions, current_offsets, ) # Commit offsets consumer_group = parsed_consumer_offsets['groupid'] set_consumer_offsets(client, consumer_group, new_offsets) print("Restored to new offsets {offsets}".format(offsets=dict(new_offsets)))
def restore_offsets(cls, client, parsed_consumer_offsets)
Fetch current offsets from kafka, validate them against given consumer-offsets data and commit the new offsets. :param client: Kafka-client :param parsed_consumer_offsets: Parsed consumer offset data from json file :type parsed_consumer_offsets: dict(group: dict(topic: partition-offsets))
2.982885
2.907596
1.025894
tuple_list = list(tup) for index, value in pairs: tuple_list[index] = value return tuple(tuple_list)
def tuple_replace(tup, *pairs)
Return a copy of a tuple with some elements replaced. :param tup: The tuple to be copied. :param pairs: Any number of (index, value) tuples where index is the index of the item to replace and value is the new value of the item.
2.676144
3.287306
0.814084
# timeit says that this is faster than a similar tuple_list = list(tup) for i, f in pairs: tuple_list[i] = f(tuple_list[i]) return tuple(tuple_list)
def tuple_alter(tup, *pairs)
Return a copy of a tuple with some elements altered. :param tup: The tuple to be copied. :param pairs: Any number of (index, func) tuples where index is the index of the item to alter and the new value is func(tup[index]).
5.294024
5.300419
0.998793
tuple_list = list(tup) for item in items: tuple_list.remove(item) return tuple(tuple_list)
def tuple_remove(tup, *items)
Return a copy of a tuple with some items removed. :param tup: The tuple to be copied. :param items: Any number of items. The first instance of each item will be removed from the tuple.
2.300543
3.386401
0.679347
error_msg = 'Positive integer required, {string} given.'.format(string=string) try: value = int(string) except ValueError: raise ArgumentTypeError(error_msg) if value < 0: raise ArgumentTypeError(error_msg) return value
def positive_int(string)
Convert string to positive integer.
2.292733
2.216361
1.034459
error_msg = 'Positive non-zero integer required, {string} given.'.format(string=string) try: value = int(string) except ValueError: raise ArgumentTypeError(error_msg) if value <= 0: raise ArgumentTypeError(error_msg) return value
def positive_nonzero_int(string)
Convert string to positive integer greater than zero.
2.427009
2.34596
1.034548
error_msg = 'Positive float required, {string} given.'.format(string=string) try: value = float(string) except ValueError: raise ArgumentTypeError(error_msg) if value < 0: raise ArgumentTypeError(error_msg) return value
def positive_float(string)
Convert string to positive float.
2.420738
2.331923
1.038086
return dict(list(set1.items()) + list(set2.items()))
def dict_merge(set1, set2)
Joins two dictionaries.
2.749715
2.617035
1.050699