code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
return self.call(
'catalog_product.info', [
product, store_view, attributes, identifierType
]
)
|
def info(self, product, store_view=None, attributes=None,
identifierType=None)
|
Retrieve product data
:param product: ID or SKU of product
:param store_view: ID or Code of store view
:param attributes: List of fields required
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: `dict` of values
| 5.01403 | 6.589958 | 0.760859 |
return int(self.call(
'catalog_product.create',
[product_type, attribute_set_id, sku, data]
)
)
|
def create(self, product_type, attribute_set_id, sku, data)
|
Create Product and return ID
:param product_type: String type of product
:param attribute_set_id: ID of attribute set
:param sku: SKU of the product
:param data: Dictionary of data
:return: INT id of product created
| 4.639174 | 5.674157 | 0.817597 |
return bool(self.call(
'catalog_product.update',
[product, data, store_view, identifierType]
))
|
def update(self, product, data, store_view=None, identifierType=None)
|
Update product Information
:param product: ID or SKU of product
:param data: Dictionary of attributes to update
:param store_view: ID or Code of store view
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: Boolean
| 6.054215 | 6.2582 | 0.967405 |
return bool(self.call(
'catalog_product.setSpecialPrice', [
product, special_price, from_date, to_date, store_view,
identifierType
]
))
|
def setSpecialPrice(self, product, special_price=None,
from_date=None, to_date=None, store_view=None,
identifierType=None)
|
Update product's special price
:param product: ID or SKU of product
:param special_price: Special Price
:param from_date: From date
:param to_date: To Date
:param store_view: ID or Code of Store View
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: Boolean
| 3.465627 | 3.820776 | 0.907048 |
return self.call(
'catalog_product.getSpecialPrice', [
product, store_view, identifierType
]
)
|
def getSpecialPrice(self, product, store_view=None, identifierType=None)
|
Get product special price data
:param product: ID or SKU of product
:param store_view: ID or Code of Store view
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: Dictionary
| 5.28761 | 7.609836 | 0.694839 |
return self.call('catalog_product_attribute_media.list',
[product, store_view, identifierType])
|
def list(self, product, store_view=None, identifierType=None)
|
Retrieve product image list
:param product: ID or SKU of product
:param store_view: Code or ID of store view
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: `list` of `dict`
| 10.999932 | 13.860537 | 0.793615 |
return self.call('catalog_product_attribute_media.info',
[product, image_file, store_view, identifierType])
|
def info(self, product, image_file, store_view=None, identifierType=None)
|
Retrieve product image data
:param product: ID or SKU of product
:param store_view: ID or Code of store view
:param attributes: List of fields required
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: `list` of `dict`
| 7.796736 | 10.452019 | 0.745955 |
return self.call('catalog_product_attribute_media.create',
[product, data, store_view, identifierType])
|
def create(self, product, data, store_view=None, identifierType=None)
|
Upload a new product image.
:param product: ID or SKU of product
:param data: `dict` of image data (label, position, exclude, types)
Example: { 'label': 'description of photo',
'position': '1', 'exclude': '0',
'types': ['image', 'small_image', 'thumbnail']}
:param store_view: Store view ID or Code
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: string - image file name
| 9.33395 | 10.356341 | 0.901279 |
return self.call('catalog_product_attribute_media.update',
[product, img_file_name, data, store_view, identifierType])
|
def update(self, product, img_file_name, data, store_view=None,
identifierType=None)
|
Update a product image.
:param product: ID or SKU of product
:param img_file_name: The image file name
Example: '/m/y/my_image_thumb.jpg'
:param data: `dict` of image data (label, position, exclude, types)
Example: { 'label': 'description of photo',
'position': '1', 'exclude': '0',
'types': ['image', 'small_image', 'thumbnail']}
:param store_view: Store view ID or Code
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: string - image file name
| 6.377679 | 7.252177 | 0.879416 |
return self.call('catalog_product_attribute_media.remove',
[product, img_file_name, identifierType])
|
def remove(self, product, img_file_name, identifierType=None)
|
Remove a product image.
:param product: ID or SKU of product
:param img_file_name: The image file name
Example: '/m/y/my_image_thumb.jpg'
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: boolean
| 9.393627 | 10.609179 | 0.885424 |
return self.call('catalog_product_link.list',
[link_type, product, identifierType])
|
def list(self, link_type, product, identifierType=None)
|
Retrieve list of linked products
:param link_type: type of link, one of 'cross_sell', 'up_sell',
'related' or 'grouped'
:param product: ID or SKU of product
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: `list` of `dict`
| 7.133588 | 10.573378 | 0.674675 |
return bool(self.call('catalog_product_link.assign',
[link_type, product, linked_product, data, identifierType]))
|
def assign(self, link_type, product, linked_product, data=None,
identifierType=None)
|
Assign a product link
:param link_type: type of link, one of 'cross_sell', 'up_sell',
'related' or 'grouped'
:param product: ID or SKU of product
:param linked_product: ID or SKU of linked product
:param data: dictionary of link data, (position, qty, etc.)
Example: { 'position': '0', 'qty': 1}
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: boolean
| 5.608674 | 5.996219 | 0.935368 |
return bool(self.call('catalog_product_link.remove',
[link_type, product, linked_product, identifierType]))
|
def remove(self, link_type, product, linked_product, identifierType=None)
|
Remove a product link
:param link_type: type of link, one of 'cross_sell', 'up_sell',
'related' or 'grouped'
:param product: ID or SKU of product
:param linked_product: ID or SKU of linked product to unlink
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: boolean
| 6.107521 | 6.518836 | 0.936903 |
return bool(self.call('ol_catalog_product_link.assign',
[product, linked_products, attributes]))
|
def update(self, product, linked_products, attributes)
|
Configurable Update product
:param product: ID or SKU of product
:param linked_products: List ID or SKU of linked product to link
:param attributes: dicc
:return: True/False
| 18.547085 | 19.862375 | 0.93378 |
return self.call('cart.order', [quote_id, store_view, license_id])
|
def order(self, quote_id, store_view=None, license_id=None)
|
Allows you to create an order from a shopping cart (quote).
Before placing the order, you need to add the customer, customer
address, shipping and payment methods.
:param quote_id: Shopping cart ID (quote ID)
:param store_view: Store view ID or code
:param license_id: Website license ID
:return: string, result of creating order
| 5.077804 | 5.104737 | 0.994724 |
return bool(
self.call('cart_coupon.add', [quote_id, coupon_code, store_view])
)
|
def add(self, quote_id, coupon_code, store_view=None)
|
Add a coupon code to a quote.
:param quote_id: Shopping cart ID (quote ID)
:param coupon_code, string, Coupon code
:param store_view: Store view ID or code
:return: boolean, True if the coupon code is added
| 6.15167 | 5.850275 | 1.051518 |
return bool(
self.call('cart_customer.addresses',
[quote_id, address_data, store_view])
)
|
def addresses(self, quote_id, address_data, store_view=None)
|
Add customer information into a shopping cart
:param quote_id: Shopping cart ID (quote ID)
:param address_data, list of dicts of address details, example
[
{
'mode': 'billing',
'address_id': 'customer_address_id'
},
{
'mode': 'shipping',
'firstname': 'testFirstname',
'lastname': 'testLastname',
'company': 'testCompany',
'street': 'testStreet',
'city': 'testCity',
'region': 'testRegion',
'region_id': 'testRegionId',
'postcode': 'testPostcode',
'country_id': 'id',
'telephone': '0123456789',
'fax': '0123456789',
'is_default_shipping': 0,
'is_default_billing': 0
},
]
:param store_view: Store view ID or code
:return: boolean, True if the address is set
| 10.441124 | 8.326093 | 1.254024 |
return bool(
self.call('cart_customer.set',
[quote_id, customer_data, store_view])
)
|
def set(self, quote_id, customer_data, store_view=None)
|
Add customer information into a shopping cart
:param quote_id: Shopping cart ID (quote ID)
:param customer_data, dict of customer details, example
{
'firstname': 'testFirstname',
'lastname': 'testLastName',
'email': 'testEmail',
'website_id': '0',
'store_id': '0',
'mode': 'guest'
}
:param store_view: Store view ID or code
:return: boolean, True if information added
| 7.378323 | 7.4726 | 0.987384 |
return bool(
self.call('cart_payment.method',
[quote_id, payment_data, store_view])
)
|
def method(self, quote_id, payment_data, store_view=None)
|
Allows you to set a payment method for a shopping cart (quote).
:param quote_id: Shopping cart ID (quote ID)
:param payment_data, dict of payment details, example
{
'po_number': '',
'method': 'checkmo',
'cc_cid': '',
'cc_owner': '',
'cc_number': '',
'cc_type': '',
'cc_exp_year': '',
'cc_exp_month': ''
}
:param store_view: Store view ID or code
:return: boolean, True on success
| 7.357656 | 5.893164 | 1.248507 |
return bool(
self.call('cart_product.add', [quote_id, product_data, store_view])
)
|
def add(self, quote_id, product_data, store_view=None)
|
Allows you to add one or more products to the shopping cart (quote).
:param quote_id: Shopping cart ID (quote ID)
:param product_data, list of dicts of product details, example
[
{
'product_id': 1,
'qty': 2,
'options': {
'option_1': 'value_1',
'option_2': 'value_2',
...
},
'bundle_option': {},
'bundle_option_qty': {},
'links': [],
},
{
'sku': 'S0012345',
'qty': 4,
},
]
:param store_view: Store view ID or code
:return: boolean, True on success (if the product is added to the
shopping cart)
| 6.685844 | 5.538695 | 1.207115 |
return bool(
self.call('cart_product.moveToCustomerQuote',
[quote_id, product_data, store_view])
)
|
def move_to_customer_quote(self, quote_id, product_data, store_view=None)
|
Allows you to move products from the current quote to a customer quote.
:param quote_id: Shopping cart ID (quote ID)
:param product_data, list of dicts of product details, example
[
{
'product_id': 1,
'qty': 2,
'options': {
'option_1': 'value_1',
'option_2': 'value_2',
...
},
'bundle_option': {},
'bundle_option_qty': {},
'links': [],
},
{
'sku': 'S0012345',
'qty': 4,
},
]
:param store_view: Store view ID or code
:return: boolean, True if the product is moved to customer quote
| 7.473075 | 6.770743 | 1.10373 |
return bool(
self.call('cart_product.remove',
[quote_id, product_data, store_view])
)
|
def remove(self, quote_id, product_data, store_view=None)
|
Allows you to remove one or several products from a shopping cart
(quote).
:param quote_id: Shopping cart ID (quote ID)
:param product_data, list of dicts of product details, see def add()
:param store_view: Store view ID or code
:return: boolean, True if the product is removed
| 7.596895 | 6.954369 | 1.092392 |
return bool(
self.call('cart_product.update',
[quote_id, product_data, store_view])
)
|
def update(self, quote_id, product_data, store_view=None)
|
Allows you to update one or several products in the shopping cart
(quote).
:param quote_id: Shopping cart ID (quote ID)
:param product_data, list of dicts of product details, see def add()
:param store_view: Store view ID or code
:return: boolean, True if the product is updated .
| 7.892707 | 6.304112 | 1.251993 |
return bool(
self.call('cart_shipping.method',
[quote_id, shipping_method, store_view])
)
|
def method(self, quote_id, shipping_method, store_view=None)
|
Allows you to set a shipping method for a shopping cart (quote).
:param quote_id: Shopping cart ID (quote ID)
:param shipping_method, string, shipping method code
:param store_view: Store view ID or code
:return: boolean, True if the shipping method is set
| 7.158611 | 6.610461 | 1.082922 |
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
with ROOT.joinpath('Pipfile.lock').open() as f:
lockfile = plette.Lockfile.load(f)
libdir = OUTPUT_DIR.joinpath('lib')
paths = {'purelib': libdir, 'platlib': libdir}
sources = lockfile.meta.sources._data
maker = distlib.scripts.ScriptMaker(None, None)
# Install packages from Pipfile.lock.
for name, package in lockfile.default._data.items():
if name in DONT_PACKAGE:
continue
print(f'[pack] Installing {name}')
package.pop('editable', None) # Don't install things as editable.
package.pop('markers', None) # Always install everything.
r = requirementslib.Requirement.from_pipfile(name, package)
wheel = passa.internals._pip.build_wheel(
r.as_ireq(), sources, r.hashes or None,
)
wheel.install(paths, maker, lib_only=True)
for pattern in IGNORE_LIB_PATTERNS:
for path in libdir.rglob(pattern):
print(f'[pack] Removing {path}')
path.unlink()
# Pack everything into ZIP.
zipname = OUTPUT_DIR.joinpath('passa.zip')
with zipfile.ZipFile(zipname, 'w') as zf:
_recursive_write_to_zip(zf, OUTPUT_DIR)
_recursive_write_to_zip(zf, STUBFILES_DIR)
print(f'[pack] Written archive {zipname}')
if remove_lib and libdir.exists():
print(f'[pack] Removing {libdir}')
shutil.rmtree(str(libdir))
|
def pack(ctx, remove_lib=True)
|
Build a isolated runnable package.
| 4.308303 | 4.208794 | 1.023643 |
unprebump(ctx)
if bump_release(ctx, type_=type_):
return
this_version = _read_version()
ctx.run('towncrier')
ctx.run(f'git commit -am "Release {this_version}"')
ctx.run(f'git tag -fa {this_version} -m "Version {this_version}"')
if repo:
if upload(ctx, repo=repo):
return
else:
print('[release] Missing --repo, skip uploading')
prebump(ctx, type_=prebump_to)
next_version = _read_version()
ctx.run(f'git commit -am "Prebump to {next_version}"')
|
def release(ctx, type_, repo=None, prebump_to=PREBUMP)
|
Make a new release.
| 4.017614 | 3.845199 | 1.044839 |
multiprocess_on = 'prometheus_multiproc_dir' in os.environ
get_endpoint = endpoint.fn_by_type(endpoint_type, get_endpoint_fn)
memcollect_enabled = mmc_period_sec is not None
@app.listener('before_server_start')
def before_start(app, loop):
metrics.init(
latency_buckets, multiprocess_mode,
memcollect_enabled=memcollect_enabled
)
@app.middleware('request')
async def before_request(request):
if request.path != '/metrics':
metrics.before_request_handler(request)
@app.middleware('response')
async def before_response(request, response):
if request.path != '/metrics':
metrics.after_request_handler(request, response, get_endpoint)
if multiprocess_on:
@app.listener('after_server_stop')
def after_stop(app, loop):
multiprocess.mark_process_dead(os.getpid())
elif memcollect_enabled:
@app.listener('before_server_start')
async def start_memcollect_task(app, loop):
app.memcollect_task = loop.create_task(
metrics.periodic_memcollect_task(
mmc_period_sec,
loop
)
)
@app.listener('after_server_stop')
async def stop_memcollect_task(app, loop):
app.memcollect_task.cancel()
return MonitorSetup(app, multiprocess_on)
|
def monitor(app, endpoint_type='url:1',
get_endpoint_fn=None,
latency_buckets=None,
mmc_period_sec=30,
multiprocess_mode='all')
|
Regiesters a bunch of metrics for Sanic server
(request latency, count, etc) and exposes /metrics endpoint
to allow Prometheus to scrape them out.
:param app: an instance of sanic.app
:param endpoint_type: All request related metrics have a label called
'endpoint'. It can be fetched from Sanic `request`
object using different strategies specified by
`endpoint_type`:
url - full relative path of a request URL
(i.e. for http://something/a/b/c?f=x you end up
having `/a/b/c` as the endpoint)
url:n - like URL but with at most `n` path elements
in the endpoint (i.e. with `url:1`
http://something/a/b/c becomes `/a`).
custom - custom endpoint fetching funciton that
should be specified by `get_endpoint_fn`
:param get_endpoint_fn: a custom endpoint fetching function that is ignored
until `endpoint_type='custom'`.
get_endpoint_fn = lambda r: ...
where `r` is Sanic request object
:param latency_buckets: an optional list of bucket sizes for latency
histogram (see prometheus `Histogram` metric)
:param mmc_period_sec: set a period (in seconds) of how frequently memory
usage related metrics are collected.
Setting it to None will disable memory metrics
collection.
NOTE: memory usage is not collected when when multiprocessing is enabled
| 2.489657 | 2.562647 | 0.971517 |
@self._app.route('/metrics', methods=['GET'])
async def expose_metrics(request):
return raw(self._get_metrics_data(),
content_type=CONTENT_TYPE_LATEST)
|
def expose_endpoint(self)
|
Expose /metrics endpoint on the same Sanic server.
This may be useful if Sanic is launched from a container
and you do not want to expose more than one port for some
reason.
| 5.511608 | 4.176765 | 1.319588 |
if self._multiprocess_on:
raise SanicPrometheusError(
"start_server can not be used when multiprocessing " +
"is turned on")
start_http_server(addr=addr, port=port)
|
def start_server(self, addr='', port=8000)
|
Expose /metrics endpoint on a new server that will
be launched on `<addr>:<port>`.
This may be useful if you want to restrict access to
metrics data with firewall rules.
NOTE: can not be used in multiprocessing mode
| 8.635295 | 7.224128 | 1.195341 |
config = default_config() if config is None else config
config = remove_node_attributes(config, "converters")
feed = load_feed(inpath, view, config)
return write_feed_dangerously(feed, outpath)
|
def extract_feed(
inpath: str, outpath: str, view: View, config: nx.DiGraph = None
) -> str
|
Extract a subset of a GTFS zip into a new file
| 6.977171 | 6.248333 | 1.116645 |
nodes = DEFAULT_NODES if nodes is None else nodes
try:
tmpdir = tempfile.mkdtemp()
def write_node(node):
df = feed.get(node)
if not df.empty:
path = os.path.join(tmpdir, node)
df.to_csv(path, index=False)
pool = ThreadPool(len(nodes))
try:
pool.map(write_node, nodes)
finally:
pool.terminate()
if outpath.endswith(".zip"):
outpath, _ = os.path.splitext(outpath)
outpath = shutil.make_archive(outpath, "zip", tmpdir)
finally:
shutil.rmtree(tmpdir)
return outpath
|
def write_feed_dangerously(
feed: Feed, outpath: str, nodes: Optional[Collection[str]] = None
) -> str
|
Naively write a feed to a zipfile
This function provides no sanity checks. Use it at
your own risk.
| 2.326432 | 2.260763 | 1.029047 |
feed = load_raw_feed(path)
return _busiest_date(feed)
|
def read_busiest_date(path: str) -> Tuple[datetime.date, FrozenSet[str]]
|
Find the earliest date with the most trips
| 9.780842 | 7.658723 | 1.277085 |
feed = load_raw_feed(path)
return _busiest_week(feed)
|
def read_busiest_week(path: str) -> Dict[datetime.date, FrozenSet[str]]
|
Find the earliest week with the most trips
| 8.935615 | 7.520372 | 1.188188 |
feed = load_raw_feed(path)
return _service_ids_by_date(feed)
|
def read_service_ids_by_date(path: str) -> Dict[datetime.date, FrozenSet[str]]
|
Find all service identifiers by date
| 6.740478 | 5.482598 | 1.229431 |
feed = load_raw_feed(path)
return _dates_by_service_ids(feed)
|
def read_dates_by_service_ids(
path: str
) -> Dict[FrozenSet[str], FrozenSet[datetime.date]]
|
Find dates with identical service
| 6.806304 | 6.581982 | 1.034081 |
feed = load_raw_feed(path)
return _trip_counts_by_date(feed)
|
def read_trip_counts_by_date(path: str) -> Dict[datetime.date, int]
|
A useful proxy for busyness
| 6.230083 | 5.327878 | 1.169337 |
config_ = remove_node_attributes(config, ["converters", "transformations"])
feed_ = Feed(path, view={}, config=config_)
for filename, column_filters in view.items():
config_ = reroot_graph(config_, filename)
view_ = {filename: column_filters}
feed_ = Feed(feed_, view=view_, config=config_)
return Feed(feed_, config=config)
|
def _load_feed(path: str, view: View, config: nx.DiGraph) -> Feed
|
Multi-file feed filtering
| 7.533021 | 6.753348 | 1.11545 |
view = self._view.get(filename)
if view is None:
return df
for col, values in view.items():
# If applicable, filter this dataframe by the given set of values
if col in df.columns:
df = df[df[col].isin(setwrap(values))]
return df
|
def _filter(self, filename: str, df: pd.DataFrame) -> pd.DataFrame
|
Apply view filters
| 5.971697 | 4.800539 | 1.243964 |
dependencies = []
for _, depf, data in self._config.out_edges(filename, data=True):
deps = data.get("dependencies")
if deps is None:
msg = f"Edge missing `dependencies` attribute: {filename}->{depf}"
raise ValueError(msg)
dependencies.append((depf, deps))
if not dependencies:
return df
for depfile, column_pairs in dependencies:
# Read the filtered, cached file dependency
depdf = self.get(depfile)
for deps in column_pairs:
col = deps[filename]
depcol = deps[depfile]
# If applicable, prune this dataframe by the other
if col in df.columns and depcol in depdf.columns:
df = df[df[col].isin(depdf[depcol])]
return df
|
def _prune(self, filename: str, df: pd.DataFrame) -> pd.DataFrame
|
Depth-first search through the dependency graph
and prune dependent DataFrames along the way.
| 5.991687 | 5.572057 | 1.07531 |
if df.empty:
return
converters = self._config.nodes.get(filename, {}).get("converters", {})
for col, converter in converters.items():
if col in df.columns:
df[col] = converter(df[col])
|
def _convert_types(self, filename: str, df: pd.DataFrame) -> None
|
Apply type conversions
| 3.571562 | 3.106177 | 1.149826 |
G = G.copy()
for n, successors in list(nx.bfs_successors(G, source=node)):
for s in successors:
G.add_edge(s, n, **G.edges[n, s])
G.remove_edge(n, s)
return G
|
def reroot_graph(G: nx.DiGraph, node: str) -> nx.DiGraph
|
Return a copy of the graph rooted at the given node
| 2.705387 | 2.320364 | 1.165932 |
return set(map(str, set(flatten([value]))))
|
def setwrap(value: Any) -> Set[str]
|
Returns a flattened and stringified set from the given object or iterable.
For use in public functions which accept argmuents or kwargs that can be
one object or a list of objects.
| 11.028666 | 12.661938 | 0.871009 |
G = G.copy()
for _, data in G.nodes(data=True):
for attribute in setwrap(attributes):
if attribute in data:
del data[attribute]
return G
|
def remove_node_attributes(G: nx.DiGraph, attributes: Union[str, Iterable[str]])
|
Return a copy of the graph with the given attributes
deleted from all nodes.
| 4.561601 | 3.894457 | 1.171306 |
if not isinstance(topic, string_types):
raise TypeError('topic={!r} must be text'.format(topic))
if not isinstance(topic, text_type):
topic = topic.decode('ascii')
if len(topic) < 1:
raise ValueError('invalid empty topic name')
if len(topic) > 249:
raise ValueError('topic={!r} name is too long: {} > 249'.format(
topic, len(topic)))
return topic
|
def _coerce_topic(topic)
|
Ensure that the topic name is text string of a valid length.
:param topic: Kafka topic name. Valid characters are in the set ``[a-zA-Z0-9._-]``.
:raises ValueError: when the topic name exceeds 249 bytes
:raises TypeError: when the topic is not :class:`unicode` or :class:`str`
| 2.620664 | 2.483082 | 1.055408 |
if not isinstance(consumer_group, string_types):
raise TypeError('consumer_group={!r} must be text'.format(consumer_group))
if not isinstance(consumer_group, text_type):
consumer_group = consumer_group.decode('utf-8')
return consumer_group
|
def _coerce_consumer_group(consumer_group)
|
Ensure that the consumer group is a text string.
:param consumer_group: :class:`bytes` or :class:`str` instance
:raises TypeError: when `consumer_group` is not :class:`bytes`
or :class:`str`
| 2.61937 | 2.421584 | 1.081677 |
if isinstance(client_id, type(u'')):
client_id = client_id.encode('utf-8')
if not isinstance(client_id, bytes):
raise TypeError('{!r} is not a valid consumer group (must be'
' str or bytes)'.format(client_id))
return client_id
|
def _coerce_client_id(client_id)
|
Ensure the provided client ID is a byte string. If a text string is
provided, it is encoded as UTF-8 bytes.
:param client_id: :class:`bytes` or :class:`str` instance
| 3.194491 | 3.5425 | 0.901762 |
if s is None:
return _NULL_SHORT_STRING
if not isinstance(s, string_types):
raise TypeError('{!r} is not text'.format(s))
return write_short_bytes(s.encode('ascii'))
|
def write_short_ascii(s)
|
Encode a Kafka short string which represents text.
:param str s:
Text string (`str` on Python 3, `str` or `unicode` on Python 2) or
``None``. The string will be ASCII-encoded.
:returns: length-prefixed `bytes`
:raises:
`struct.error` for strings longer than 32767 characters
| 3.930104 | 3.967361 | 0.990609 |
if b is None:
return _NULL_SHORT_STRING
if not isinstance(b, bytes):
raise TypeError('{!r} is not bytes'.format(b))
elif len(b) > 32767:
raise struct.error(len(b))
else:
return struct.pack('>h', len(b)) + b
|
def write_short_bytes(b)
|
Encode a Kafka short string which contains arbitrary bytes. A short string
is limited to 32767 bytes in length by the signed 16-bit length prefix.
A length prefix of -1 indicates ``null``, represented as ``None`` in
Python.
:param bytes b:
No more than 32767 bytes, or ``None`` for the null encoding.
:return: length-prefixed `bytes`
:raises:
`struct.error` for strings longer than 32767 characters
| 3.038251 | 2.379396 | 1.2769 |
stripped = line.strip()
if stripped and stripped.startswith('#') is False:
rexres = self.rex.search(stripped)
if rexres:
return ' '.join(rexres.group(1).split())
return None
|
def parse_cron_line(self, line)
|
Parses crontab line and returns only starting time string
Args:
line: crontab line
Returns:
Time part of cron line
| 4.6601 | 5.017983 | 0.92868 |
if self.node_id != new.node_id:
raise ValueError("Broker metadata {!r} doesn't match node_id={}".format(new, self.node_id))
self.node_id = new.node_id
self.host = new.host
self.port = new.port
|
def updateMetadata(self, new)
|
Update the metadata stored for this broker.
Future connections made to the broker will use the host and port
defined in the new metadata. Any existing connection is not dropped,
however.
:param new:
:clas:`afkak.common.BrokerMetadata` with the same node ID as the
current metadata.
| 3.569987 | 2.81047 | 1.270246 |
if requestId in self.requests:
# Id is duplicate to 'in-flight' request. Reject it, as we
# won't be able to properly deliver the response(s)
# Note that this won't protect against a client calling us
# twice with the same ID, but first with expectResponse=False
# But that's pathological, and the only defense is to track
# all requestIds sent regardless of whether we expect to see
# a response, which is effectively a memory leak...
raise DuplicateRequestError(
'Reuse of requestId:{}'.format(requestId))
# If we've been told to shutdown (close() called) then fail request
if self._dDown:
return fail(ClientError('makeRequest() called after close()'))
# Ok, we are going to save/send it, create a _Request object to track
canceller = partial(
self.cancelRequest, requestId,
CancelledError(message="Request correlationId={} was cancelled".format(requestId)))
tReq = _Request(requestId, request, expectResponse, canceller)
# add it to our requests dict
self.requests[requestId] = tReq
# Add an errback to the tReq.d to remove it from our requests dict
# if something goes wrong...
tReq.d.addErrback(self._handleRequestFailure, requestId)
# Do we have a connection over which to send the request?
if self.proto:
# Send the request
self._sendRequest(tReq)
# Have we not even started trying to connect yet? Do so now
elif not self.connector:
self._connect()
return tReq.d
|
def makeRequest(self, requestId, request, expectResponse=True)
|
Send a request to our broker via our self.proto KafkaProtocol object.
Return a deferred which will fire when the reply matching the requestId
comes back from the server, or, if expectResponse is False, then
return None instead.
If we are not currently connected, then we buffer the request to send
when the connection comes back up.
| 8.27387 | 7.875789 | 1.050545 |
if self.proto:
log.debug('%r Disconnecting from %r', self, self.proto.transport.getPeer())
self.proto.transport.loseConnection()
|
def disconnect(self)
|
Disconnect from the Kafka broker.
This is used to implement disconnection on timeout as a workaround for
Kafka connections occasionally getting stuck on the server side under
load. Requests are not cancelled, so they will be retried.
| 4.199959 | 5.74446 | 0.731132 |
log.debug('%r: close() proto=%r connector=%r', self, self.proto, self.connector)
assert self._dDown is None
self._dDown = Deferred()
if self.proto is not None:
self.proto.transport.loseConnection()
elif self.connector is not None:
def connectingFailed(reason):
log.debug('%r: connection attempt has been cancelled: %r', self, reason)
self._dDown.callback(None)
self.connector.addErrback(connectingFailed)
self.connector.cancel()
else:
# Fake a cleanly closing connection
self._dDown.callback(None)
try:
raise CancelledError(message="Broker client for node_id={} {}:{} was closed".format(
self.node_id, self.host, self.port))
except Exception:
reason = Failure()
# Cancel any requests
for correlation_id in list(self.requests.keys()): # must copy, may del
self.cancelRequest(correlation_id, reason)
return self._dDown
|
def close(self)
|
Permanently dispose of the broker client.
This terminates any outstanding connection and cancels any pending
requests.
| 5.288684 | 4.984903 | 1.06094 |
log.info('%r: Connection closed: %r', self, reason)
# Reset our proto so we don't try to send to a down connection
self.proto = None
# Mark any in-flight requests as unsent.
for tReq in self.requests.values():
tReq.sent = False
if self._dDown:
self._dDown.callback(None)
elif self.requests:
self._connect()
|
def _connectionLost(self, reason)
|
Called when the protocol connection is lost
- Log the disconnection.
- Mark any outstanding requests as unsent so they will be sent when
a new connection is made.
- If closing the broker client, mark completion of that process.
:param reason:
Failure that indicates the reason for disconnection.
| 6.821727 | 6.074126 | 1.12308 |
requestId = KafkaCodec.get_response_correlation_id(response)
# Protect against responses coming back we didn't expect
tReq = self.requests.pop(requestId, None)
if tReq is None:
# This could happen if we've sent it, are waiting on the response
# when it's cancelled, causing us to remove it from self.requests
log.warning('Unexpected response with correlationId=%d: %r',
requestId, reprlib.repr(response))
else:
tReq.d.callback(response)
|
def handleResponse(self, response)
|
Handle the response string received by KafkaProtocol.
Ok, we've received the response from the broker. Find the requestId
in the message, lookup & fire the deferred with the response.
| 8.154657 | 7.422463 | 1.098646 |
try:
tReq.sent = True
self.proto.sendString(tReq.data)
except Exception as e:
log.exception('%r: Failed to send request %r', self, tReq)
del self.requests[tReq.id]
tReq.d.errback(e)
else:
if not tReq.expect:
# Once we've sent a request for which we don't expect a reply,
# we're done, remove it from requests, and fire the deferred
# with 'None', since there is no reply to be expected
del self.requests[tReq.id]
tReq.d.callback(None)
|
def _sendRequest(self, tReq)
|
Send a single request over our protocol to the Kafka broker.
| 3.906689 | 3.83409 | 1.018935 |
for tReq in list(self.requests.values()): # must copy, may del
if not tReq.sent:
self._sendRequest(tReq)
|
def _sendQueued(self)
|
Connection just came up, send the unsent requests.
| 9.075659 | 6.806087 | 1.333462 |
if reason is None:
reason = CancelledError()
tReq = self.requests.pop(requestId)
tReq.d.errback(reason)
|
def cancelRequest(self, requestId, reason=None, _=None)
|
Cancel a request: remove it from requests, & errback the deferred.
NOTE: Attempts to cancel a request which is no longer tracked
(expectResponse == False and already sent, or response already
received) will raise KeyError
| 4.16959 | 4.042596 | 1.031414 |
def tryConnect():
self.connector = d = maybeDeferred(connect)
d.addCallback(cbConnect)
d.addErrback(ebConnect)
def connect():
endpoint = self._endpointFactory(self._reactor, self.host, self.port)
log.debug('%r: connecting with %s', self, endpoint)
return endpoint.connect(self)
def cbConnect(proto):
log.debug('%r: connected to %r', self, proto.transport.getPeer())
self._failures = 0
self.connector = None
self.proto = proto
if self._dDown:
proto.transport.loseConnection()
else:
self._sendQueued()
def ebConnect(fail):
if self._dDown:
log.debug('%r: breaking connect loop due to %r after close()', self, fail)
return fail
self._failures += 1
delay = self._retryPolicy(self._failures)
log.debug('%r: failure %d to connect -> %s; retry in %.2f seconds.',
self, self._failures, fail.value, delay)
self.connector = d = deferLater(self._reactor, delay, lambda: None)
d.addCallback(cbDelayed)
def cbDelayed(result):
tryConnect()
self._failures = 0
tryConnect()
|
def _connect(self)
|
Connect to the Kafka Broker
This routine will repeatedly try to connect to the broker (with backoff
according to the retry policy) until it succeeds.
| 3.755755 | 3.719113 | 1.009852 |
# Initialize all elements of parsed array to empty strings
parsed = ['', '', '', '', '', '', '']
if self._expression is None or len(self._expression) == 0:
raise MissingFieldException("ExpressionDescriptor.expression")
else:
expression_parts_temp = self._expression.split()
expression_parts_temp_length = len(expression_parts_temp)
if expression_parts_temp_length < 5:
raise FormatException(
"Error: Expression only has {0} parts. At least 5 part are required.".format(
expression_parts_temp_length))
elif expression_parts_temp_length == 5:
# 5 part cron so shift array past seconds element
for i, expression_part_temp in enumerate(expression_parts_temp):
parsed[i + 1] = expression_part_temp
elif expression_parts_temp_length == 6:
# If last element ends with 4 digits, a year element has been
# supplied and no seconds element
year_regex = re.compile(r"\d{4}$")
if year_regex.search(expression_parts_temp[5]) is not None:
for i, expression_part_temp in enumerate(expression_parts_temp):
parsed[i + 1] = expression_part_temp
else:
for i, expression_part_temp in enumerate(expression_parts_temp):
parsed[i] = expression_part_temp
elif expression_parts_temp_length == 7:
parsed = expression_parts_temp
else:
raise FormatException(
"Error: Expression has too many parts ({0}). Expression must not have more than 7 parts.".format(
expression_parts_temp_length))
self.normalize_expression(parsed)
return parsed
|
def parse(self)
|
Parses the cron expression string
Returns:
A 7 part string array, one part for each component of the cron expression (seconds, minutes, etc.)
Raises:
MissingFieldException: if _expression is empty or None
FormatException: if _expression has wrong format
| 3.059285 | 2.666146 | 1.147456 |
# convert ? to * only for DOM and DOW
expression_parts[3] = expression_parts[3].replace("?", "*")
expression_parts[5] = expression_parts[5].replace("?", "*")
# convert 0/, 1/ to */
if expression_parts[0].startswith("0/"):
expression_parts[0] = expression_parts[
0].replace("0/", "*/") # seconds
if expression_parts[1].startswith("0/"):
expression_parts[1] = expression_parts[
1].replace("0/", "*/") # minutes
if expression_parts[2].startswith("0/"):
expression_parts[2] = expression_parts[
2].replace("0/", "*/") # hours
if expression_parts[3].startswith("1/"):
expression_parts[3] = expression_parts[3].replace("1/", "*/") # DOM
if expression_parts[4].startswith("1/"):
expression_parts[4] = expression_parts[
4].replace("1/", "*/") # Month
if expression_parts[5].startswith("1/"):
expression_parts[5] = expression_parts[5].replace("1/", "*/") # DOW
if expression_parts[6].startswith("1/"):
expression_parts[6] = expression_parts[6].replace("1/", "*/")
# handle DayOfWeekStartIndexZero option where SUN=1 rather than SUN=0
if self._options.day_of_week_start_index_zero is False:
expression_parts[5] = self.decrease_days_of_week(expression_parts[5])
if expression_parts[3] == "?":
expression_parts[3] = "*"
# convert SUN-SAT format to 0-6 format
for day_number in self._cron_days:
expression_parts[5] = expression_parts[5].upper().replace(self._cron_days[day_number], str(day_number))
# convert JAN-DEC format to 1-12 format
for month_number in self._cron_months:
expression_parts[4] = expression_parts[4].upper().replace(
self._cron_months[month_number], str(month_number))
# convert 0 second to (empty)
if expression_parts[0] == "0":
expression_parts[0] = ''
# Loop through all parts and apply global normalization
length = len(expression_parts)
for i in range(length):
# convert all '*/1' to '*'
if expression_parts[i] == "*/1":
expression_parts[i] = "*"
if "/" in expression_parts[i] and any(exp in expression_parts[i] for exp in ['*', '-', ',']) is False:
choices = {
4: "12",
5: "6",
6: "9999"
}
step_range_through = choices.get(i)
if step_range_through is not None:
parts = expression_parts[i].split('/')
expression_parts[i] = "{0}-{1}/{2}".format(parts[0], step_range_through, parts[1])
|
def normalize_expression(self, expression_parts)
|
Converts cron expression components into consistent, predictable formats.
Args:
expression_parts: A 7 part string array, one part for each component of the cron expression
Returns:
None
| 2.591894 | 2.473533 | 1.047851 |
groups = {}
for env in envlist:
envpy, category = env.split('-')[0:2]
if category == 'lint':
category = 'unit'
try:
groups[envpy, category].append(env)
except KeyError:
groups[envpy, category] = [env]
return sorted((envpy, category, envs) for (envpy, category), envs in groups.items())
|
def group_envs(envlist)
|
Group Tox environments for Travis CI builds
Separate by Python version so that they can go in different Travis jobs:
>>> group_envs('py37-int-snappy', 'py36-int')
[('py36', 'int', ['py36-int']), ('py37', 'int', ['py37-int-snappy'])]
Group unit tests and linting together:
>>> group_envs(['py27-unit', 'py27-lint'])
[('py27', 'unit', ['py27-unit', 'py27-lint'])]
| 3.404204 | 3.193506 | 1.065977 |
assert payload is None or isinstance(payload, bytes), 'payload={!r} should be bytes or None'.format(payload)
assert key is None or isinstance(key, bytes), 'key={!r} should be bytes or None'.format(key)
return Message(0, 0, key, payload)
|
def create_message(payload, key=None)
|
Construct a :class:`Message`
:param payload: The payload to send to Kafka.
:type payload: :class:`bytes` or ``None``
:param key: A key used to route the message when partitioning and to
determine message identity on a compacted topic.
:type key: :class:`bytes` or ``None``
| 2.641251 | 2.614716 | 1.010148 |
encoded_message_set = KafkaCodec._encode_message_set(message_set)
gzipped = gzip_encode(encoded_message_set)
return Message(0, CODEC_GZIP, None, gzipped)
|
def create_gzip_message(message_set)
|
Construct a gzip-compressed message containing multiple messages
The given messages will be encoded, compressed, and sent as a single atomic
message to Kafka.
:param list message_set: a list of :class:`Message` instances
| 5.856966 | 7.939695 | 0.737681 |
encoded_message_set = KafkaCodec._encode_message_set(message_set)
snapped = snappy_encode(encoded_message_set)
return Message(0, CODEC_SNAPPY, None, snapped)
|
def create_snappy_message(message_set)
|
Construct a Snappy-compressed message containing multiple messages
The given messages will be encoded, compressed, and sent as a single atomic
message to Kafka.
:param list message_set: a list of :class:`Message` instances
| 5.748192 | 7.731673 | 0.74346 |
msglist = []
for req in requests:
msglist.extend([create_message(m, key=req.key) for m in req.messages])
if codec == CODEC_NONE:
return msglist
elif codec == CODEC_GZIP:
return [create_gzip_message(msglist)]
elif codec == CODEC_SNAPPY:
return [create_snappy_message(msglist)]
else:
raise UnsupportedCodecError("Codec 0x%02x unsupported" % codec)
|
def create_message_set(requests, codec=CODEC_NONE)
|
Create a message set from a list of requests.
Each request can have a list of messages and its own key. If codec is
:data:`CODEC_NONE`, return a list of raw Kafka messages. Otherwise, return
a list containing a single codec-encoded message.
:param codec:
The encoding for the message set, one of the constants:
- `afkak.CODEC_NONE`
- `afkak.CODEC_GZIP`
- `afkak.CODEC_SNAPPY`
:raises: :exc:`UnsupportedCodecError` for an unsupported codec
| 2.776436 | 2.511333 | 1.105563 |
return (struct.pack('>hhih',
request_key, # ApiKey
api_version, # ApiVersion
correlation_id, # CorrelationId
len(client_id)) + # ClientId size
client_id)
|
def _encode_message_header(cls, client_id, correlation_id, request_key,
api_version=0)
|
Encode the common request envelope
| 4.293044 | 4.468029 | 0.960836 |
message_set = []
incr = 1
if offset is None:
incr = 0
offset = 0
for message in messages:
encoded_message = KafkaCodec._encode_message(message)
message_set.append(struct.pack('>qi', offset, len(encoded_message)))
message_set.append(encoded_message)
offset += incr
return b''.join(message_set)
|
def _encode_message_set(cls, messages, offset=None)
|
Encode a MessageSet. Unlike other arrays in the protocol,
MessageSets are not length-prefixed. Format::
MessageSet => [Offset MessageSize Message]
Offset => int64
MessageSize => int32
| 2.584138 | 2.637895 | 0.979621 |
if message.magic == 0:
msg = struct.pack('>BB', message.magic, message.attributes)
msg += write_int_string(message.key)
msg += write_int_string(message.value)
crc = zlib.crc32(msg) & 0xffffffff # Ensure unsigned
msg = struct.pack('>I', crc) + msg
else:
raise ProtocolError("Unexpected magic number: %d" % message.magic)
return msg
|
def _encode_message(cls, message)
|
Encode a single message.
The magic number of a message is a format version number. The only
supported magic number right now is zero. Format::
Message => Crc MagicByte Attributes Key Value
Crc => int32
MagicByte => int8
Attributes => int8
Key => bytes
Value => bytes
| 3.566777 | 2.890489 | 1.23397 |
if not isinstance(client_id, bytes):
raise TypeError('client_id={!r} should be bytes'.format(client_id))
payloads = [] if payloads is None else payloads
grouped_payloads = group_by_topic_and_partition(payloads)
message = cls._encode_message_header(client_id, correlation_id,
KafkaCodec.PRODUCE_KEY)
message += struct.pack('>hii', acks, timeout, len(grouped_payloads))
for topic, topic_payloads in grouped_payloads.items():
message += write_short_ascii(topic)
message += struct.pack('>i', len(topic_payloads))
for partition, payload in topic_payloads.items():
msg_set = KafkaCodec._encode_message_set(payload.messages)
message += struct.pack('>ii', partition, len(msg_set))
message += msg_set
return message
|
def encode_produce_request(cls, client_id, correlation_id,
payloads=None, acks=1,
timeout=DEFAULT_REPLICAS_ACK_TIMEOUT_MSECS)
|
Encode some ProduceRequest structs
:param bytes client_id:
:param int correlation_id:
:param list payloads: list of ProduceRequest
:param int acks:
How "acky" you want the request to be:
0: immediate response
1: written to disk by the leader
2+: waits for this many number of replicas to sync
-1: waits for all replicas to be in sync
:param int timeout:
Maximum time the server will wait for acks from replicas. This is
_not_ a socket timeout.
| 2.569501 | 2.662502 | 0.96507 |
((correlation_id, num_topics), cur) = relative_unpack('>ii', data, 0)
for _i in range(num_topics):
topic, cur = read_short_ascii(data, cur)
((num_partitions,), cur) = relative_unpack('>i', data, cur)
for _i in range(num_partitions):
((partition, error, offset), cur) = relative_unpack('>ihq', data, cur)
yield ProduceResponse(topic, partition, error, offset)
|
def decode_produce_response(cls, data)
|
Decode bytes to a ProduceResponse
:param bytes data: bytes to decode
:returns: iterable of `afkak.common.ProduceResponse`
| 4.672907 | 5.769955 | 0.809869 |
payloads = [] if payloads is None else payloads
grouped_payloads = group_by_topic_and_partition(payloads)
message = cls._encode_message_header(client_id, correlation_id,
KafkaCodec.FETCH_KEY)
assert isinstance(max_wait_time, int)
# -1 is the replica id
message += struct.pack('>iiii', -1, max_wait_time, min_bytes,
len(grouped_payloads))
for topic, topic_payloads in grouped_payloads.items():
message += write_short_ascii(topic)
message += struct.pack('>i', len(topic_payloads))
for partition, payload in topic_payloads.items():
message += struct.pack('>iqi', partition, payload.offset,
payload.max_bytes)
return message
|
def encode_fetch_request(cls, client_id, correlation_id, payloads=None,
max_wait_time=100, min_bytes=4096)
|
Encodes some FetchRequest structs
:param bytes client_id:
:param int correlation_id:
:param list payloads: list of :class:`FetchRequest`
:param int max_wait_time: how long to block waiting on min_bytes of data
:param int min_bytes:
the minimum number of bytes to accumulate before returning the
response
| 2.793742 | 3.033584 | 0.920938 |
((correlation_id, num_topics), cur) = relative_unpack('>ii', data, 0)
for _i in range(num_topics):
(topic, cur) = read_short_ascii(data, cur)
((num_partitions,), cur) = relative_unpack('>i', data, cur)
for _i in range(num_partitions):
((partition, error, highwater_mark_offset), cur) = \
relative_unpack('>ihq', data, cur)
(message_set, cur) = read_int_string(data, cur)
yield FetchResponse(
topic, partition, error,
highwater_mark_offset,
KafkaCodec._decode_message_set_iter(message_set))
|
def decode_fetch_response(cls, data)
|
Decode bytes to a FetchResponse
:param bytes data: bytes to decode
| 4.35095 | 4.813543 | 0.903898 |
((correlation_id, num_topics), cur) = relative_unpack('>ii', data, 0)
for _i in range(num_topics):
(topic, cur) = read_short_ascii(data, cur)
((num_partitions,), cur) = relative_unpack('>i', data, cur)
for _i in range(num_partitions):
((partition, error, num_offsets), cur) = \
relative_unpack('>ihi', data, cur)
offsets = []
for _i in range(num_offsets):
((offset,), cur) = relative_unpack('>q', data, cur)
offsets.append(offset)
yield OffsetResponse(topic, partition, error, tuple(offsets))
|
def decode_offset_response(cls, data)
|
Decode bytes to an :class:`OffsetResponse`
:param bytes data: bytes to decode
| 3.412886 | 3.826957 | 0.891801 |
topics = [] if topics is None else topics
message = [
cls._encode_message_header(client_id, correlation_id,
KafkaCodec.METADATA_KEY),
struct.pack('>i', len(topics)),
]
for topic in topics:
message.append(write_short_ascii(topic))
return b''.join(message)
|
def encode_metadata_request(cls, client_id, correlation_id, topics=None)
|
Encode a MetadataRequest
:param bytes client_id: string
:param int correlation_id: int
:param list topics: list of text
| 3.474553 | 4.326398 | 0.803105 |
((correlation_id, numbrokers), cur) = relative_unpack('>ii', data, 0)
# In testing, I saw this routine swap my machine to death when
# passed bad data. So, some checks are in order...
if numbrokers > MAX_BROKERS:
raise InvalidMessageError(
"Brokers:{} exceeds max:{}".format(numbrokers, MAX_BROKERS))
# Broker info
brokers = {}
for _i in range(numbrokers):
((nodeId, ), cur) = relative_unpack('>i', data, cur)
(host, cur) = read_short_ascii(data, cur)
((port,), cur) = relative_unpack('>i', data, cur)
brokers[nodeId] = BrokerMetadata(nodeId, nativeString(host), port)
# Topic info
((num_topics,), cur) = relative_unpack('>i', data, cur)
topic_metadata = {}
for _i in range(num_topics):
((topic_error,), cur) = relative_unpack('>h', data, cur)
(topic_name, cur) = read_short_ascii(data, cur)
((num_partitions,), cur) = relative_unpack('>i', data, cur)
partition_metadata = {}
for _j in range(num_partitions):
((partition_error_code, partition, leader, numReplicas),
cur) = relative_unpack('>hiii', data, cur)
(replicas, cur) = relative_unpack(
'>%di' % numReplicas, data, cur)
((num_isr,), cur) = relative_unpack('>i', data, cur)
(isr, cur) = relative_unpack('>%di' % num_isr, data, cur)
partition_metadata[partition] = \
PartitionMetadata(
topic_name, partition, partition_error_code, leader,
replicas, isr)
topic_metadata[topic_name] = TopicMetadata(
topic_name, topic_error, partition_metadata)
return brokers, topic_metadata
|
def decode_metadata_response(cls, data)
|
Decode bytes to a MetadataResponse
:param bytes data: bytes to decode
| 3.12097 | 3.249887 | 0.960332 |
message = cls._encode_message_header(client_id, correlation_id,
KafkaCodec.CONSUMER_METADATA_KEY)
message += write_short_ascii(consumer_group)
return message
|
def encode_consumermetadata_request(cls, client_id, correlation_id,
consumer_group)
|
Encode a ConsumerMetadataRequest
:param bytes client_id: string
:param int correlation_id: int
:param str consumer_group: string
| 4.773335 | 5.930751 | 0.804845 |
(correlation_id, error_code, node_id), cur = \
relative_unpack('>ihi', data, 0)
host, cur = read_short_ascii(data, cur)
(port,), cur = relative_unpack('>i', data, cur)
return ConsumerMetadataResponse(
error_code, node_id, nativeString(host), port)
|
def decode_consumermetadata_response(cls, data)
|
Decode bytes to a ConsumerMetadataResponse
:param bytes data: bytes to decode
| 8.340233 | 9.361812 | 0.890878 |
grouped_payloads = group_by_topic_and_partition(payloads)
message = cls._encode_message_header(
client_id, correlation_id, KafkaCodec.OFFSET_COMMIT_KEY,
api_version=1,
)
message += write_short_ascii(group)
message += struct.pack('>i', group_generation_id)
message += write_short_ascii(consumer_id)
message += struct.pack('>i', len(grouped_payloads))
for topic, topic_payloads in grouped_payloads.items():
message += write_short_ascii(topic)
message += struct.pack('>i', len(topic_payloads))
for partition, payload in topic_payloads.items():
message += struct.pack('>iqq', partition, payload.offset,
payload.timestamp)
message += write_short_bytes(payload.metadata)
return message
|
def encode_offset_commit_request(cls, client_id, correlation_id,
group, group_generation_id, consumer_id,
payloads)
|
Encode some OffsetCommitRequest structs (v1)
:param bytes client_id: string
:param int correlation_id: int
:param str group: the consumer group to which you are committing offsets
:param int group_generation_id: int32, generation ID of the group
:param str consumer_id: string, Identifier for the consumer
:param list payloads: list of :class:`OffsetCommitRequest`
| 2.283389 | 2.377011 | 0.960613 |
((correlation_id,), cur) = relative_unpack('>i', data, 0)
((num_topics,), cur) = relative_unpack('>i', data, cur)
for _i in range(num_topics):
(topic, cur) = read_short_ascii(data, cur)
((num_partitions,), cur) = relative_unpack('>i', data, cur)
for _i in range(num_partitions):
((partition, error), cur) = relative_unpack('>ih', data, cur)
yield OffsetCommitResponse(topic, partition, error)
|
def decode_offset_commit_response(cls, data)
|
Decode bytes to an OffsetCommitResponse
:param bytes data: bytes to decode
| 3.733146 | 3.811596 | 0.979418 |
grouped_payloads = group_by_topic_and_partition(payloads)
message = cls._encode_message_header(
client_id, correlation_id, KafkaCodec.OFFSET_FETCH_KEY,
api_version=1)
message += write_short_ascii(group)
message += struct.pack('>i', len(grouped_payloads))
for topic, topic_payloads in grouped_payloads.items():
message += write_short_ascii(topic)
message += struct.pack('>i', len(topic_payloads))
for partition in topic_payloads:
message += struct.pack('>i', partition)
return message
|
def encode_offset_fetch_request(cls, client_id, correlation_id,
group, payloads)
|
Encode some OffsetFetchRequest structs
:param bytes client_id: string
:param int correlation_id: int
:param bytes group: string, the consumer group you are fetching offsets for
:param list payloads: list of :class:`OffsetFetchRequest`
| 2.613826 | 2.8535 | 0.916007 |
((correlation_id,), cur) = relative_unpack('>i', data, 0)
((num_topics,), cur) = relative_unpack('>i', data, cur)
for _i in range(num_topics):
(topic, cur) = read_short_ascii(data, cur)
((num_partitions,), cur) = relative_unpack('>i', data, cur)
for _i in range(num_partitions):
((partition, offset), cur) = relative_unpack('>iq', data, cur)
(metadata, cur) = read_short_bytes(data, cur)
((error,), cur) = relative_unpack('>h', data, cur)
yield OffsetFetchResponse(topic, partition, offset,
metadata, error)
|
def decode_offset_fetch_response(cls, data)
|
Decode bytes to an OffsetFetchResponse
:param bytes data: bytes to decode
| 3.033295 | 3.096428 | 0.979611 |
correlation_id = response[0:4]
try:
d = self._pending.pop(correlation_id)
except KeyError:
self._log.warn((
"Response has unknown correlation ID {correlation_id!r}."
" Dropping connection to {peer}."
), correlation_id=correlation_id, peer=self.transport.getPeer())
self.transport.loseConnection()
else:
d.callback(response)
|
def stringReceived(self, response)
|
Handle a response from the broker.
| 3.91937 | 3.744676 | 1.046651 |
self._failed = reason
pending, self._pending = self._pending, None
for d in pending.values():
d.errback(reason)
|
def connectionLost(self, reason=connectionDone)
|
Mark the protocol as failed and fail all pending operations.
| 4.260079 | 3.438394 | 1.238974 |
if self._failed is not None:
return fail(self._failed)
correlation_id = request[4:8]
assert correlation_id not in self._pending
d = Deferred()
self.sendString(request)
self._pending[correlation_id] = d
return d
|
def request(self, request)
|
Send a request to the Kafka broker.
:param bytes request:
The bytes of a Kafka `RequestMessage`_ structure. It must have
a unique (to this connection) correlation ID.
:returns:
`Deferred` which will:
- Succeed with the bytes of a Kafka `ResponseMessage`_
- Fail when the connection terminates
.. _RequestMessage:: https://kafka.apache.org/protocol.html#protocol_messages
| 5.526374 | 5.155136 | 1.072013 |
try:
topic = _coerce_topic(topic)
if key is not None and not isinstance(key, bytes):
raise TypeError('key={!r} must be bytes or None'.format(key))
if not msgs:
raise ValueError("msgs must be a non-empty sequence")
msg_cnt = len(msgs)
byte_cnt = 0
for index, m in enumerate(msgs):
if m is None:
continue
if not isinstance(m, bytes):
raise TypeError('Message {} to topic {} ({!r:.100}) has type {}, but must have type {}'.format(
index, topic, m, type(m).__name__, type(bytes).__name__))
byte_cnt += len(m)
except Exception:
return fail()
d = Deferred(self._cancel_send_messages)
self._batch_reqs.append(SendRequest(topic, key, msgs, d))
self._waitingMsgCount += msg_cnt
self._waitingByteCount += byte_cnt
# Add request to list of outstanding reqs' callback to remove
self._outstanding.append(d)
d.addBoth(self._remove_from_outstanding, d)
# See if we have enough messages in the batch to do a send.
self._check_send_batch()
return d
|
def send_messages(self, topic, key=None, msgs=())
|
Given a topic, and optional key (for partitioning) and a list of
messages, send them to Kafka, either immediately, or when a batch is
ready, depending on the Producer's batch settings.
:param str topic: Kafka topic to send the messages to
:param str key:
Message key used to determine the topic partition to which the
messages will be written. Either `bytes` or `None`.
`None` means that there is no key, but note that that:
- Kafka does not permit producing unkeyed messages to a compacted topic.
- The *partitioner_class* may require a non-`None` key
(`HashedPartitioner` does so).
:param list msgs:
A non-empty sequence of message bytestrings to send. `None`
indicates a ``null`` message (i.e. a tombstone on a compacted
topic).
:returns:
A :class:`~twisted.internet.defer.Deferred` that fires when the
messages have been received by the Kafka cluster.
It will fail with `TypeError` when:
- *topic* is not text (`str` on Python 3, `str` or `unicode` on Python 2)
- *key* is not `bytes` or `None`
- *msgs* is not a sequence of `bytes` or `None`
It will fail with `ValueError` when *msgs* is empty.
| 3.775472 | 3.593455 | 1.050652 |
self.stopping = True
# Cancel any outstanding request to our client
if self._batch_send_d:
self._batch_send_d.cancel()
# Do we have to worry about our looping call?
if self.batch_every_t is not None:
# Stop our looping call, and wait for the deferred to be called
if self._sendLooper is not None:
self._sendLooper.stop()
# Make sure requests that wasn't cancelled above are now
self._cancel_outstanding()
return self._sendLooperD or succeed(None)
|
def stop(self)
|
Terminate any outstanding requests.
:returns: :class:``Deferred` which fires when fully stopped.
| 9.628692 | 8.969017 | 1.07355 |
log.warning('_send_timer_failed:%r: %s', fail,
fail.getBriefTraceback())
self._sendLooperD = self._sendLooper.start(
self.batch_every_t, now=False)
|
def _send_timer_failed(self, fail)
|
Our _send_batch() function called by the LoopingCall failed. Some
error probably came back from Kafka and _check_error() raised the
exception
For now, just log the failure and restart the loop
| 15.795243 | 10.96596 | 1.440389 |
if self._sendLooper is not lCall:
log.warning('commitTimerStopped with wrong timer:%s not:%s',
lCall, self._sendLooper)
else:
self._sendLooper = None
self._sendLooperD = None
|
def _send_timer_stopped(self, lCall)
|
We're shutting down, clean up our looping call...
| 7.921056 | 7.369102 | 1.074901 |
# check if the client has metadata for the topic
while self.client.metadata_error_for_topic(topic):
# client doesn't have good metadata for topic. ask to fetch...
# check if we have request attempts left
if self._req_attempts >= self._max_attempts:
# No, no attempts left, so raise the error
_check_error(self.client.metadata_error_for_topic(topic))
yield self.client.load_metadata_for_topics(topic)
if not self.client.metadata_error_for_topic(topic):
break
self._req_attempts += 1
d = Deferred()
self.client.reactor.callLater(
self._retry_interval, d.callback, True)
self._retry_interval *= self.RETRY_INTERVAL_FACTOR
yield d
# Ok, should be safe to get the partitions now...
partitions = self.client.topic_partitions[topic]
# Do we have a partitioner for this topic already?
if topic not in self.partitioners:
# No, create a new paritioner for topic, partitions
self.partitioners[topic] = \
self.partitioner_class(topic, partitions)
# Lookup the next partition
partition = self.partitioners[topic].partition(key, partitions)
returnValue(partition)
|
def _next_partition(self, topic, key=None)
|
get the next partition to which to publish
Check with our client for the latest partitions for the topic, then
ask our partitioner for the next partition to which we should publish
for the give key. If needed, create a new partitioner for the topic.
| 4.661417 | 4.387506 | 1.06243 |
# We use these dictionaries to be able to combine all the messages
# destined to the same topic/partition into one request
# the messages & deferreds, both by topic+partition
reqsByTopicPart = defaultdict(list)
payloadsByTopicPart = defaultdict(list)
deferredsByTopicPart = defaultdict(list)
# We now have a list of (succeeded/failed, partition/None) tuples
# for the partition lookups we did on each message group, zipped with
# the requests
for (success, part_or_failure), req in zip(parts_results, requests):
if req.deferred.called:
# Submitter cancelled the request while we were waiting for
# the topic/partition, skip it
continue
if not success:
# We failed to get a partition for this request, errback to the
# caller with the failure. Maybe this should retry? However,
# since this failure is likely to affect an entire Topic, there
# should be no issues with ordering of messages within a
# partition of a topic getting out of order. Let the caller
# retry the particular request if they like, or they could
# cancel all their outstanding requests in
req.deferred.errback(part_or_failure)
continue
# Ok, we now have a partition for this request, we can add the
# request for this topic/partition to reqsByTopicPart, and the
# caller's deferred to deferredsByTopicPart
topicPart = TopicAndPartition(req.topic, part_or_failure)
reqsByTopicPart[topicPart].append(req)
deferredsByTopicPart[topicPart].append(req.deferred)
# Build list of payloads grouped by topic/partition
# That is, we bundle all the messages destined for a given
# topic/partition, even if they were submitted by different
# requests into a single 'payload', and then we submit all the
# payloads as a list to the client for sending to the various
# brokers. The finest granularity of success/failure is at the
# payload (topic/partition) level.
payloads = []
for (topic, partition), reqs in reqsByTopicPart.items():
msgSet = create_message_set(reqs, self.codec)
req = ProduceRequest(topic, partition, msgSet)
topicPart = TopicAndPartition(topic, partition)
payloads.append(req)
payloadsByTopicPart[topicPart] = req
# Make sure we have some payloads to send
if not payloads:
return
# send the request
d = self.client.send_produce_request(
payloads, acks=self.req_acks, timeout=self.ack_timeout,
fail_on_error=False)
self._req_attempts += 1
# add our handlers
d.addBoth(self._handle_send_response, payloadsByTopicPart,
deferredsByTopicPart)
return d
|
def _send_requests(self, parts_results, requests)
|
Send the requests
We've determined the partition for each message group in the batch, or
got errors for them.
| 5.585799 | 5.348899 | 1.044289 |
self._batch_send_d = None
self._req_attempts = 0
self._retry_interval = self._init_retry_interval
if isinstance(resp, Failure) and not resp.check(tid_CancelledError,
CancelledError):
log.error("Failure detected in _complete_batch_send: %r\n%r",
resp, resp.getTraceback())
return
|
def _complete_batch_send(self, resp)
|
Complete the processing of our batch send operation
Clear the deferred tracking our current batch processing
and reset our retry count and retry interval
Return none to eat any errors coming from up the deferred chain
| 5.995172 | 4.612844 | 1.299669 |
if (
(self.batch_every_n and self.batch_every_n <= self._waitingMsgCount) or
(self.batch_every_b and self.batch_every_b <= self._waitingByteCount)
):
self._send_batch()
return result
|
def _check_send_batch(self, result=None)
|
Check if we have enough messages/bytes to send
Since this can be called from the callback chain, we
pass through our first (non-self) arg
| 5.027057 | 4.589657 | 1.095301 |
# We can be triggered by the LoopingCall, and have nothing to send...
# Or, we've got SendRequest(s) to send, but are still processing the
# previous batch...
if (not self._batch_reqs) or self._batch_send_d:
return
# Save a local copy, and clear the global list & metrics
requests, self._batch_reqs = self._batch_reqs, []
self._waitingByteCount = 0
self._waitingMsgCount = 0
# Iterate over them, fetching the partition for each message batch
d_list = []
for req in requests:
# For each request, we get the topic & key and use that to lookup
# the next partition on which we should produce
d_list.append(self._next_partition(req.topic, req.key))
d = self._batch_send_d = Deferred()
# Since DeferredList doesn't propagate cancel() calls to deferreds it
# might be waiting on for a result, we need to use this structure,
# rather than just using the DeferredList directly
d.addCallback(lambda r: DeferredList(d_list, consumeErrors=True))
d.addCallback(self._send_requests, requests)
# Once we finish fully processing the current batch, clear the
# _batch_send_d and check if any more requests piled up when we
# were busy.
d.addBoth(self._complete_batch_send)
d.addBoth(self._check_send_batch)
# Fire off the callback to start processing...
d.callback(None)
|
def _send_batch(self)
|
Send the waiting messages, if there are any, and we can...
This is called by our LoopingCall every send_every_t interval, and
from send_messages everytime we have enough messages to send.
This is also called from py:method:`send_messages` via
py:method:`_check_send_batch` if there are enough messages/bytes
to require a send.
Note, the send will be delayed (triggered by completion or failure of
previous) if we are currently trying to complete the last batch send.
| 7.48166 | 7.010306 | 1.067237 |
# Is the request in question in an unsent batch?
for req in self._batch_reqs:
if req.deferred == d:
# Found the request, remove it and return.
msgs = req.messages
self._waitingMsgCount -= len(msgs)
for m in (_m for _m in msgs if _m is not None):
self._waitingByteCount -= len(m)
# This _should_ be safe as we abort the iteration upon removal
self._batch_reqs.remove(req)
d.errback(CancelledError(request_sent=False))
return
# If it wasn't found in the unsent batch. We just rely on the
# downstream processing of the request to check if the deferred
# has been called and skip further processing for this request
# Errback the deferred with whether or not we sent the request
# to Kafka already
d.errback(
CancelledError(request_sent=(self._batch_send_d is not None)))
return
|
def _cancel_send_messages(self, d)
|
Cancel a `send_messages` request
First check if the request is in a waiting batch, of so, great, remove
it from the batch. If it's not found, we errback() the deferred and
the downstream processing steps take care of aborting further
processing.
We check if there's a current _batch_send_d to determine where in the
chain we were (getting partitions, or already sent request to Kafka)
and errback differently.
| 8.075363 | 6.66129 | 1.212282 |
for d in list(self._outstanding):
d.addErrback(lambda _: None) # Eat any uncaught errors
d.cancel()
|
def _cancel_outstanding(self)
|
Cancel all of our outstanding requests
| 6.724239 | 5.560425 | 1.209303 |
if isinstance(hosts, bytes):
hosts = hosts.split(b',')
elif isinstance(hosts, _unicode):
hosts = hosts.split(u',')
result = set()
for host_port in hosts:
# FIXME This won't handle IPv6 addresses
res = nativeString(host_port).split(':')
host = res[0].strip()
port = int(res[1].strip()) if len(res) > 1 else DefaultKafkaPort
result.add((host, port))
return sorted(result)
|
def _normalize_hosts(hosts)
|
Canonicalize the *hosts* parameter.
>>> _normalize_hosts("host,127.0.0.2:2909")
[('127.0.0.2', 2909), ('host', 9092)]
:param hosts:
A list or comma-separated string of hostnames which may also include
port numbers. All of the following are valid::
b'host'
u'host'
b'host:1234'
u'host:1234,host:2345'
b'host:1234 , host:2345 '
[u'host1', b'host2']
[b'host:1234', b'host:2345']
Hostnames must be ASCII (IDN is not supported). The default Kafka port
of 9092 is implied when no port is given.
:returns: A list of unique (host, port) tuples.
:rtype: :class:`list` of (:class:`str`, :class:`int`) tuples
| 3.543647 | 3.366346 | 1.052669 |
groups = tuple(_coerce_consumer_group(g) for g in groups)
for group in groups:
if group in self.consumer_group_to_brokers:
del self.consumer_group_to_brokers[group]
|
def reset_consumer_group_metadata(self, *groups)
|
Reset cache of what broker manages the offset for specified groups
Remove the cache of what Kafka broker should be contacted when
fetching or updating the committed offsets for a given consumer
group or groups.
NOTE: Does not cancel any outstanding requests for updates to the
consumer group metadata for the specified groups.
| 3.168483 | 3.736048 | 0.848084 |
self.topics_to_brokers.clear()
self.topic_partitions.clear()
self.topic_errors.clear()
self.consumer_group_to_brokers.clear()
|
def reset_all_metadata(self)
|
Clear all cached metadata
Metadata will be re-fetched as required to satisfy requests.
| 4.64501 | 4.692133 | 0.989957 |
topic = _coerce_topic(topic)
if topic not in self.topic_partitions:
return False
if not self.topic_partitions[topic]:
# Don't consider an empty partition list 'fully replicated'
return False
return all(
self.partition_fully_replicated(TopicAndPartition(topic, p))
for p in self.topic_partitions[topic]
)
|
def topic_fully_replicated(self, topic)
|
Determine if the given topic is fully replicated according to the
currently known cluster metadata.
.. note::
This relies on cached cluster metadata. You may call
:meth:`load_metadata_for_topics()` first to refresh this cache.
:param str topic: Topic name
:returns:
A boolean indicating that:
1. The number of partitions in the topic is non-zero.
2. For each partition, all replicas are in the in-sync replica
(ISR) set.
:rtype: :class:`bool`
| 3.579759 | 4.001128 | 0.894687 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.