code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
with open('src/vegas/__init__.py', 'a') as vfile: vfile.write("\n__version__ = '%s'\n" % VEGAS_VERSION) _build_py.run(self)
def run(self)
Append version number to vegas/__init__.py
6.11985
3.347983
1.827921
ny = 2000 y = numpy.random.uniform(0., 1., (ny,1)) limit = numpy.arctan(limit) m = AdaptiveMap([[-limit, limit]], ninc=100) theta = numpy.empty(y.shape, float) jac = numpy.empty(y.shape[0], float) for itn in range(10): m.map(y, theta, jac) tan_theta = numpy.tan(theta[:, 0]) x = self.scale * tan_theta fx = (tan_theta ** 2 + 1) * numpy.exp(-(x ** 2) / 2.) m.add_training_data(y, (jac * fx) ** 2) m.adapt(alpha=1.5) return numpy.array(m.grid[0])
def _make_map(self, limit)
Make vegas grid that is adapted to the pdf.
5.929698
5.65065
1.049383
def ff(theta, nopdf=nopdf): tan_theta = numpy.tan(theta) x = self.scale * tan_theta jac = self.scale * (tan_theta ** 2 + 1.) if nopdf: pdf = jac * self.pdf.pjac[None, :] else: pdf = jac * numpy.exp(-(x ** 2) / 2.) / numpy.sqrt(2 * numpy.pi) dp = self.pdf.x2dpflat(x) parg = None ans = None fparg_is_dict = False # iterate through the batch for i, (dpi, pdfi) in enumerate(zip(dp, pdf)): p = self.pdf.meanflat + dpi if parg is None: # first time only if self.pdf.shape is None: parg = _gvar.BufferDict(self.pdf.g, buf=p) else: parg = p.reshape(self.pdf.shape) else: if parg.shape is None: parg.buf = p else: parg.flat[:] = p fparg = 1. if f is None else f(parg) if ans is None: # first time only if hasattr(fparg, 'keys'): fparg_is_dict = True if not isinstance(fparg, _gvar.BufferDict): fparg = _gvar.BufferDict(fparg) ans = _gvar.BufferDict() for k in fparg: ans[k] = numpy.empty( (len(pdf),) + fparg.slice_shape(k)[1], float ) else: if numpy.shape(fparg) == (): ans = numpy.empty(len(pdf), float) else: ans = numpy.empty( (len(pdf),) + numpy.shape(fparg), float ) if fparg_is_dict: prod_pdfi = numpy.prod(pdfi) for k in ans: ans[k][i] = fparg[k] ans[k][i] *= prod_pdfi else: if not isinstance(fparg, numpy.ndarray): fparg = numpy.asarray(fparg) ans[i] = fparg * numpy.prod(pdfi) return ans return ff
def _expval(self, f, nopdf)
Return integrand using the tan mapping.
3.317309
3.321979
0.998594
# Ensure we open the file in binary mode try: with open(filename, 'rb') as f: chunk = f.read(length) return chunk except IOError as e: print(e)
def get_starting_chunk(filename, length=1024)
:param filename: File to open and get the first little chunk of. :param length: Number of bytes to read, default 1024. :returns: Starting chunk of bytes.
3.000275
3.195936
0.938778
assert isinstance(function_name, str) self.stdout.write(function_name) self.stdout.write("\n") self.stdout.write("-" * len(function_name)) self.stdout.write("\n\n") byte_code = self.interpreter.compiled_functions[function_name] self.stdout.write(byte_code.dump()) self.stdout.write("\n")
def dump(self, function_name)
Pretty-dump the bytecode for the function with the given name.
2.863655
2.511884
1.140043
template = os.path.dirname(os.path.abspath(__file__)) + "/app_template" name = options.pop("name") call_command("startapp", name, template=template, **options)
def handle(self, **options)
Call "startapp" to generate app with custom user model.
5.069888
3.480939
1.456471
if not email: raise ValueError("The given email must be set") email = self.normalize_email(email) user = self.model(email=email, **extra_fields) user.set_password(password) user.save(using=self._db) return user
def _create_user(self, email, password, **extra_fields)
Create and save a User with the given email and password.
1.520572
1.397515
1.088054
iters = itertools.tee(iterable, n) iters = (itertools.islice(it, i, None) for i, it in enumerate(iters)) return itertools.izip(*iters)
def nwise(iterable, n)
Iterate through a sequence with a defined length window >>> list(nwise(range(8), 3)) [(0, 1, 2), (1, 2, 3), (2, 3, 4), (3, 4, 5), (5, 6, 7)] >>> list(nwise(range(3), 5)) [] Parameters ---------- iterable n : length of each sequence Yields ------ Tuples of length n
2.416039
3.361118
0.71882
c = [g.node[n]['coordinate'] for n in nodes] return c
def coordinates(g, nodes)
Extract (lon, lat) coordinate pairs from nodes in an osmgraph osmgraph nodes have a 'coordinate' property on each node. This is a shortcut for extracting a coordinate list from an iterable of nodes >>> g = osmgraph.parse_file(filename) >>> node_ids = g.nodes()[:3] # Grab 3 nodes [61341696, 61341697, 61341698] >>> coords = coordinates(g, node_ids) [(-71.0684107, 42.3516822), (-71.133251, 42.350308), (-71.170641, 42.352689)] Parameters ---------- g : networkx graph created with osmgraph nodes : iterable of node ids Returns ------- List of (lon, lat) coordinate pairs
4.376397
9.945977
0.440017
forw = g.successors back = g.predecessors if backward: back, forw = forw, back nodes = forw(n2) if inbound: nodes = set(nodes + back(n2)) candidates = [n for n in nodes if n != n1] if len(candidates) == 1: result = candidates[0] elif continue_fn: result = continue_fn(g, n1, n2, backward) else: result = None return result
def step(g, n1, n2, inbound=False, backward=False, continue_fn=None)
Step along a path through a directed graph unless there is an intersection Example graph: Note that edge (1, 2) and (2, 3) are bidirectional, i.e., (2, 1) and (3, 2) are also edges 1 -- 2 -- 3 -->-- 5 -->-- 7 | | ^ v | | 4 6 >>> step(g, 1, 2) 3 >>> step(g, 3, 5) None >>> step(g, 2, 3) 5 >>> step(g, 2, 3, inbound=True) None >>> step(g, 7, 5, 3, backward=True) 3 >>> def f(g, n1, n2, backward): if n2 == 5: return 7 return None >>> step(g, 3, 5, continue_fn=f) 7 Parameters ---------- g : networkx DiGraph n1 : node id in g n2 : node id in g (n1, n2) must be an edge in g inbound : bool (default False) whether incoming edges should be considered backward : bool (default False) whether edges are in reverse order (i.e., point from n2 to n1) continue_fn : callable (optional) if at an intersection, continue_fn is called to indicate how to proceed continue_fn takes the form: f(g, n1, n2, backward) where all arguments are as passed into step. f should return a node id such that f(g, n1, n2, backward) is a successors of n2. f should return None if no way forward. Returns ------- The next node in the path from n1 to n2. Returns None if there are no edges from n2 or multiple edges from n2
3.367884
3.607502
0.933578
prev = n1 curr = n2 _next = step(g, prev, curr, **kwargs) yield prev yield curr visited_nodes = set([prev, curr]) while _next: yield _next if _next in visited_nodes: return visited_nodes.add(_next) prev = curr curr = _next _next = step(g, prev, curr, **kwargs)
def move(g, n1, n2, **kwargs)
Step along a graph until it ends or reach an intersection Example graph: Note that edge (1, 2) and (2, 3) are bidirectional, i.e., (2, 1) and (3, 2) are also edges 1 -- 2 -- 3 -->-- 5 -->-- 7 | | ^ v | | 4 6 >>> list(move(g, 1, 2)) [1, 2, 3, 5] # Stops at 5 because you can get to both 6 and 7 from 3 >>> step(g, 1, 2, inbound=True) [1, 2, 3] Parameters ---------- Same as step() Yields ------ Node IDs until either there is no path forward or the path reaches an intersection
2.753869
3.160662
0.871295
return len(set(g.predecessors(n) + g.successors(n))) > 2
def is_intersection(g, n)
Determine if a node is an intersection graph: 1 -->-- 2 -->-- 3 >>> is_intersection(g, 2) False graph: 1 -- 2 -- 3 | 4 >>> is_intersection(g, 2) True Parameters ---------- g : networkx DiGraph n : node id Returns ------- bool
4.325426
6.022377
0.718226
if not self._is_valid: self.validate() from .converters import to_dict return to_dict(self)
def as_dict(self)
Returns the model as a dict
7.803244
6.012196
1.297902
for node_id, lon, lat in data: self.coords[node_id] = (lon, lat)
def coords_callback(self, data)
Callback for nodes that have no tags
4.968142
4.204343
1.181669
for node_id, tags, coords in data: # Discard the coords because they go into add_coords self.nodes[node_id] = tags
def nodes_callback(self, data)
Callback for nodes with tags
15.450578
14.013352
1.102561
for way_id, tags, nodes in data: # Imposm passes all ways through regardless of whether the tags # have been filtered or not. It needs to do this in order to # handle relations, but we don't care about relations at the # moment. if tags: self.ways[way_id] = (tags, nodes)
def ways_callback(self, data)
Callback for all ways
7.745967
7.754332
0.998921
g = nx.DiGraph() for way_id, (tags, nodes) in self.ways.items(): # If oneway is '-1', reverse the way and treat as a normal oneway if tags.get('oneway') == '-1': nodes = reversed(nodes) tags['oneway'] = 'yes' oneway = tags.get('oneway') == 'yes' for n0, n1 in tools.pairwise(nodes): g.add_edge(n0, n1, attr_dict=tags) if parse_direction: g[n0][n1]['_direction'] = 'forward' if not oneway: g.add_edge(n1, n0, attr_dict=tags) if parse_direction: g[n1][n0]['_direction'] = 'backward' g.node[n0].update(self._node_properties(n0)) g.node[n1].update(self._node_properties(n1)) return g
def get_graph(self, parse_direction=False)
Return the networkx directed graph of received data
2.563941
2.605727
0.983964
importer, parser = make_importer_parser(OSMParser, **kwargs) parser.parse(filename) return importer.get_graph(parse_direction=parse_direction)
def parse_file(filename, parse_direction=False, **kwargs)
Return an OSM networkx graph from the input OSM file Only works with OSM xml, xml.bz2 and pbf files. This function cannot take OSM QA tile files. Use parse_qa_tile() for QA tiles. >>> graph = parse_file(filename)
6.901945
8.243294
0.83728
suffixes = { 'xml': '.osm', 'pbf': '.pbf', } try: suffix = suffixes[type] except KeyError: raise ValueError('Unknown data type "%s"' % type) fd, filename = tempfile.mkstemp(suffix=suffix) try: os.write(fd, data) os.close(fd) return parse_file(filename, **kwargs) finally: os.remove(filename)
def parse_data(data, type, **kwargs)
Return an OSM networkx graph from the input OSM data Parameters ---------- data : string type : string ('xml' or 'pbf') >>> graph = parse_data(data, 'xml')
2.339536
2.201402
1.062748
import osmqa importer, parser = make_importer_parser(osmqa.QATileParser, **kwargs) parser.parse_data(x, y, zoom, data) return importer.get_graph(parse_direction=parse_direction)
def parse_qa_tile(x, y, zoom, data, parse_direction=False, **kwargs)
Return an OSM networkx graph from the input OSM QA tile data Parameters ---------- data : string x : int tile's x coordinate y : int tile's y coordinate zoom : int tile's zoom level >>> graph = parse_qa_tile(data, 1239, 1514, 12)
5.438076
7.074016
0.76874
if not isinstance(fname, Path): fname = Path(fname) path, name, ext = fname.parent, fname.stem, fname.suffix return path, name, ext
def _basename(fname)
Return file name without path.
2.838567
2.794134
1.015902
df = pd.read_csv( fname, skiprows=2, parse_dates=[1], index_col=0, names=["bottle_number", "time", "startscan", "endscan"], ) df._metadata = { "time_of_reset": pd.to_datetime( linecache.getline(str(fname), 2)[6:-1] ).to_pydatetime() } return df
def from_bl(fname)
Read Seabird bottle-trip (bl) file Example ------- >>> from pathlib import Path >>> import ctd >>> data_path = Path(__file__).parents[1].joinpath("tests", "data") >>> df = ctd.from_bl(str(data_path.joinpath('bl', 'bottletest.bl'))) >>> df._metadata["time_of_reset"] datetime.datetime(2018, 6, 25, 20, 8, 55)
4.642642
4.147286
1.119441
f = _read_file(fname) metadata = _parse_seabird(f.readlines(), ftype="btl") f.seek(0) df = pd.read_fwf( f, header=None, index_col=False, names=metadata["names"], parse_dates=False, skiprows=metadata["skiprows"], ) f.close() # At this point the data frame is not correctly lined up (multiple rows # for avg, std, min, max or just avg, std, etc). # Also needs date,time,and bottle number to be converted to one per line. # Get row types, see what you have: avg, std, min, max or just avg, std. rowtypes = df[df.columns[-1]].unique() # Get times and dates which occur on second line of each bottle. dates = df.iloc[:: len(rowtypes), 1].reset_index(drop=True) times = df.iloc[1 :: len(rowtypes), 1].reset_index(drop=True) datetimes = dates + " " + times # Fill the Date column with datetimes. df.loc[:: len(rowtypes), "Date"] = datetimes.values df.loc[1 :: len(rowtypes), "Date"] = datetimes.values # Fill missing rows. df["Bottle"] = df["Bottle"].fillna(method="ffill") df["Date"] = df["Date"].fillna(method="ffill") df["Statistic"] = df["Statistic"].str.replace(r"\(|\)", "") # (avg) to avg name = _basename(fname)[1] dtypes = { "bpos": int, "pumps": bool, "flag": bool, "Bottle": int, "Scan": int, "Statistic": str, "Date": str, } for column in df.columns: if column in dtypes: df[column] = df[column].astype(dtypes[column]) else: try: df[column] = df[column].astype(float) except ValueError: warnings.warn("Could not convert %s to float." % column) df["Date"] = pd.to_datetime(df["Date"]) metadata["name"] = str(name) setattr(df, "_metadata", metadata) return df
def from_btl(fname)
DataFrame constructor to open Seabird CTD BTL-ASCII format. Examples -------- >>> from pathlib import Path >>> import ctd >>> data_path = Path(__file__).parents[1].joinpath("tests", "data") >>> bottles = ctd.from_btl(data_path.joinpath('btl', 'bottletest.btl'))
3.93871
4.164494
0.945784
f = _read_file(fname) header, names = [], [] for k, line in enumerate(f.readlines()): line = line.strip() if line.startswith("Serial Number"): serial = line.strip().split(":")[1].strip() elif line.startswith("Latitude"): try: hemisphere = line[-1] lat = line.strip(hemisphere).split(":")[1].strip() lat = np.float_(lat.split()) if hemisphere == "S": lat = -(lat[0] + lat[1] / 60.0) elif hemisphere == "N": lat = lat[0] + lat[1] / 60.0 except (IndexError, ValueError): lat = None elif line.startswith("Longitude"): try: hemisphere = line[-1] lon = line.strip(hemisphere).split(":")[1].strip() lon = np.float_(lon.split()) if hemisphere == "W": lon = -(lon[0] + lon[1] / 60.0) elif hemisphere == "E": lon = lon[0] + lon[1] / 60.0 except (IndexError, ValueError): lon = None else: header.append(line) if line.startswith("Field"): col, unit = [l.strip().lower() for l in line.split(":")] names.append(unit.split()[0]) if line == "// Data": skiprows = k + 1 break f.seek(0) df = pd.read_csv( f, header=None, index_col=None, names=names, skiprows=skiprows, delim_whitespace=True, ) f.close() df.set_index("depth", drop=True, inplace=True) df.index.name = "Depth [m]" name = _basename(fname)[1] metadata = { "lon": lon, "lat": lat, "name": str(name), "header": "\n".join(header), "serial": serial, } setattr(df, "_metadata", metadata) return df
def from_edf(fname)
DataFrame constructor to open XBT EDF ASCII format. Examples -------- >>> from pathlib import Path >>> import ctd >>> data_path = Path(__file__).parents[1].joinpath("tests", "data") >>> cast = ctd.from_edf(data_path.joinpath('XBT.EDF.gz')) >>> ax = cast['temperature'].plot_cast()
2.177611
2.329546
0.934779
f = _read_file(fname) metadata = _parse_seabird(f.readlines(), ftype="cnv") f.seek(0) df = pd.read_fwf( f, header=None, index_col=None, names=metadata["names"], skiprows=metadata["skiprows"], delim_whitespace=True, widths=[11] * len(metadata["names"]), ) f.close() key_set = False prkeys = ["prDM", "prdM", "pr"] for prkey in prkeys: try: df.set_index(prkey, drop=True, inplace=True) key_set = True except KeyError: continue if not key_set: raise KeyError( f"Could not find pressure field (supported names are {prkeys})." ) df.index.name = "Pressure [dbar]" name = _basename(fname)[1] dtypes = {"bpos": int, "pumps": bool, "flag": bool} for column in df.columns: if column in dtypes: df[column] = df[column].astype(dtypes[column]) else: try: df[column] = df[column].astype(float) except ValueError: warnings.warn("Could not convert %s to float." % column) metadata["name"] = str(name) setattr(df, "_metadata", metadata) return df
def from_cnv(fname)
DataFrame constructor to open Seabird CTD CNV-ASCII format. Examples -------- >>> from pathlib import Path >>> import ctd >>> data_path = Path(__file__).parents[1].joinpath("tests", "data") >>> cast = ctd.from_cnv(data_path.joinpath('CTD_big.cnv.bz2')) >>> downcast, upcast = cast.split() >>> ax = downcast['t090C'].plot_cast()
3.574754
4.069109
0.87851
f = _read_file(fname) df = pd.read_csv( f, header="infer", index_col=None, skiprows=skiprows, dtype=float, delim_whitespace=True, ) f.close() df.set_index("PRES", drop=True, inplace=True) df.index.name = "Pressure [dbar]" metadata = {"name": str(fname)} setattr(df, "_metadata", metadata) return df
def from_fsi(fname, skiprows=9)
DataFrame constructor to open Falmouth Scientific, Inc. (FSI) CTD ASCII format. Examples -------- >>> from pathlib import Path >>> import ctd >>> data_path = Path(__file__).parents[1].joinpath("tests", "data") >>> cast = ctd.from_fsi(data_path.joinpath('FSI.txt.gz')) >>> downcast, upcast = cast.split() >>> ax = downcast['TEMP'].plot_cast()
3.235551
4.535585
0.71337
ros = from_cnv(fname) ros["pressure"] = ros.index.values.astype(float) ros["nbf"] = ros["nbf"].astype(int) ros.set_index("nbf", drop=True, inplace=True, verify_integrity=False) return ros
def rosette_summary(fname)
Make a BTL (bottle) file from a ROS (bottle log) file. More control for the averaging process and at which step we want to perform this averaging eliminating the need to read the data into SBE Software again after pre-processing. NOTE: Do not run LoopEdit on the upcast! Examples -------- >>> from pathlib import Path >>> import ctd >>> data_path = Path(__file__).parents[1].joinpath("tests", "data") >>> fname = data_path.joinpath('CTD/g01l01s01.ros') >>> ros = ctd.rosette_summary(fname) >>> ros = ros.groupby(ros.index).mean() >>> ros.pressure.values.astype(int) array([835, 806, 705, 604, 503, 404, 303, 201, 151, 100, 51, 1])
5.037399
5.425026
0.928548
xs, ys = interpolator.x, interpolator.y def pointwise(x): if x < xs[0]: return ys[0] + (x - xs[0]) * (ys[1] - ys[0]) / (xs[1] - xs[0]) elif x > xs[-1]: return ys[-1] + (x - xs[-1]) * (ys[-1] - ys[-2]) / ( xs[-1] - xs[-2] ) else: return interpolator(x) def ufunclike(xs): return np.array(list(map(pointwise, np.array(xs)))) return ufunclike
def _extrap1d(interpolator)
http://stackoverflow.com/questions/2745329/ How to make scipy.interpolate return an extrapolated result beyond the input range.
1.744368
1.706508
1.022186
from scipy.interpolate import interp1d new_data1 = [] for row in data: mask = ~np.isnan(row) if mask.any(): y = row[mask] if y.size == 1: row = np.repeat(y, len(mask)) else: x = dist[mask] f_i = interp1d(x, y) f_x = _extrap1d(f_i) row = f_x(dist) new_data1.append(row) new_data2 = [] for col in data.T: mask = ~np.isnan(col) if mask.any(): y = col[mask] if y.size == 1: col = np.repeat(y, len(mask)) else: z = depth[mask] f_i = interp1d(z, y) f_z = _extrap1d(f_i) col = f_z(depth) new_data2.append(col) new_data = np.array(new_data1) * w1 + np.array(new_data2).T * w2 return new_data
def extrap_sec(data, dist, depth, w1=1.0, w2=0)
Extrapolates `data` to zones where the shallow stations are shadowed by the deep stations. The shadow region usually cannot be extrapolates via linear interpolation. The extrapolation is applied using the gradients of the `data` at a certain level. Parameters ---------- data : array_like Data to be extrapolated dist : array_like Stations distance fd : float Decay factor [0-1] Returns ------- Sec_extrap : array_like Extrapolated variable
1.877222
2.049606
0.915894
import gsw from scipy.interpolate import interp1d h, lon, lat = list(map(np.asanyarray, (h, lon, lat))) # Distance in km. x = np.append(0, np.cumsum(gsw.distance(lon, lat)[0] / 1e3)) h = -gsw.z_from_p(h, lat.mean()) Ih = interp1d(x, h, kind=kind, bounds_error=False, fill_value=h[-1]) xm = np.arange(0, x.max() + dx, dx) hm = Ih(xm) return xm, hm
def gen_topomask(h, lon, lat, dx=1.0, kind="linear", plot=False)
Generates a topography mask from an oceanographic transect taking the deepest CTD scan as the depth of each station. Inputs ------ h : array Pressure of the deepest CTD scan for each station [dbar]. lons : array Longitude of each station [decimal degrees east]. lat : Latitude of each station. [decimal degrees north]. dx : float Horizontal resolution of the output arrays [km]. kind : string, optional Type of the interpolation to be performed. See scipy.interpolate.interp1d documentation for details. plot : bool Whether to plot mask for visualization. Outputs ------- xm : array Horizontal distances [km]. hm : array Local depth [m]. Author ------ André Palóczy Filho ([email protected]) -- October/2012
3.17381
3.131004
1.013672
alpha = 0.03 # Thermal anomaly amplitude. beta = 1.0 / 7 # Thermal anomaly time constant (1/beta). sample_interval = 1 / 15.0 a = 2 * alpha / (sample_interval * beta + 2) b = 1 - (2 * a / alpha) dCodT = 0.1 * (1 + 0.006 * [temperature - 20]) dT = np.diff(temperature) ctm = -1.0 * b * conductivity + a * (dCodT) * dT # [S/m] return ctm
def cell_thermal_mass(temperature, conductivity)
Sample interval is measured in seconds. Temperature in degrees. CTM is calculated in S/m.
7.758798
6.516246
1.190685
import gsw sigma_theta = gsw.sigma0(SA, CT) mask = mixed_layer_depth(CT) mld = np.where(mask)[0][-1] sig_surface = sigma_theta[0] sig_bottom_mld = gsw.sigma0(SA[0], CT[mld]) d_sig_t = sig_surface - sig_bottom_mld d_sig = sigma_theta - sig_bottom_mld mask = d_sig < d_sig_t # Barrier layer. return Series(mask, index=SA.index, name="BLT")
def barrier_layer_thickness(SA, CT)
Compute the thickness of water separating the mixed surface layer from the thermocline. A more precise definition would be the difference between mixed layer depth (MLD) calculated from temperature minus the mixed layer depth calculated using density.
5.593716
5.134703
1.089394
ax = kwargs.pop("ax", None) fignums = plt.get_fignums() if ax is None and not fignums: ax = plt.axes() fig = ax.get_figure() fig.set_size_inches((5.25, 6.75)) else: ax = plt.gca() fig = plt.gcf() figsize = kwargs.pop("figsize", fig.get_size_inches()) fig.set_size_inches(figsize) y_inverted = False if not getattr(ax, "y_inverted", False): setattr(ax, "y_inverted", True) y_inverted = True if secondary_y: ax = ax.twiny() xlabel = getattr(df, "name", None) ylabel = getattr(df.index, "name", None) if isinstance(df, pd.DataFrame): labels = label if label else df.columns for k, (col, series) in enumerate(df.iteritems()): ax.plot(series, series.index, label=labels[k]) elif isinstance(df, pd.Series): label = label if label else str(df.name) ax.plot(df.values, df.index, label=label, *args, **kwargs) ax.set_ylabel(ylabel) ax.set_xlabel(xlabel) if y_inverted and not secondary_y: ax.invert_yaxis() return ax
def plot_cast(df, secondary_y=False, label=None, *args, **kwargs)
Plot a CTD variable with the index in the y-axis instead of x-axis.
2.406472
2.337776
1.029385
shape = data.shape[:-1] + (data.shape[-1] - block + 1, block) strides = data.strides + (data.strides[-1],) return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides)
def _rolling_window(data, block)
http://stackoverflow.com/questions/4936620/ Using strides for an efficient moving average filter.
1.542286
1.483514
1.039617
idx = df.index.argmax() + 1 down = df.iloc[:idx] # Reverse index to orient it as a CTD cast. up = df.iloc[idx:][::-1] return down, up
def split(df)
Returns a tuple with down/up-cast.
9.936947
7.339111
1.353971
from scipy import signal # Butter is closer to what SBE is doing with their cosine filter. Wn = (1.0 / time_constant) / (sample_rate * 2.0) b, a = signal.butter(2, Wn, "low") new_df = df.copy() new_df.index = signal.filtfilt(b, a, df.index.values) return new_df
def lp_filter(df, sample_rate=24.0, time_constant=0.15)
Filter a series with `time_constant` (use 0.15 s for pressure), and for a signal of `sample_rate` in Hertz (24 Hz for 911+). NOTE: 911+ systems do not require filter for temperature nor salinity. Examples -------- >>> from pathlib import Path >>> import matplotlib.pyplot as plt >>> import ctd >>> data_path = Path(__file__).parents[1].joinpath("tests", "data") >>> raw = ctd.from_cnv(data_path.joinpath('CTD-spiked-unfiltered.cnv.bz2')) >>> prc = ctd.from_cnv(data_path.joinpath('CTD-spiked-filtered.cnv.bz2')) >>> kw = {"sample_rate": 24.0, "time_constant": 0.15} >>> original = prc.index.values >>> unfiltered = raw.index.values >>> filtered = raw.lp_filter(**kw).index.values >>> fig, ax = plt.subplots() >>> l1, = ax.plot(original, 'k', label='original') >>> l2, = ax.plot(unfiltered, 'r', label='unfiltered') >>> l3, = ax.plot(filtered, 'g', label='filtered') >>> leg = ax.legend() Notes ----- https://scipy-cookbook.readthedocs.io/items/FIRFilter.html
4.508939
4.812274
0.936966
new_df = df.copy() press = new_df.copy().index.values ref = press[0] inversions = np.diff(np.r_[press, press[-1]]) < 0 mask = np.zeros_like(inversions) for k, p in enumerate(inversions): if p: ref = press[k] cut = press[k + 1 :] < ref mask[k + 1 :][cut] = True new_df[mask] = np.NaN return new_df
def press_check(df)
Remove pressure reversals from the index.
4.046026
3.726924
1.085621
start = np.floor(df.index[0]) stop = np.ceil(df.index[-1]) new_index = np.arange(start, stop, delta) binned = pd.cut(df.index, bins=new_index) if method == "average": new_df = df.groupby(binned).mean() new_df.index = new_index[:-1] elif method == "interpolate": raise NotImplementedError( "Bin-average via interpolation method is not Implemented yet." ) else: raise ValueError( f"Expected method `average` or `interpolate`, but got {method}." ) return new_df
def bindata(df, delta=1.0, method="average")
Bin average the index (usually pressure) to a given interval (default delta = 1).
2.70209
2.554425
1.057807
data = series.values.astype(float).copy() roll = _rolling_window(data, block) roll = ma.masked_invalid(roll) std = n1 * roll.std(axis=1) mean = roll.mean(axis=1) # Use the last value to fill-up. std = np.r_[std, np.tile(std[-1], block - 1)] mean = np.r_[mean, np.tile(mean[-1], block - 1)] mask = np.abs(data - mean.filled(fill_value=np.NaN)) > std.filled( fill_value=np.NaN ) data[mask] = np.NaN # Pass two recompute the mean and std without the flagged values from pass # one and removed the flagged data. roll = _rolling_window(data, block) roll = ma.masked_invalid(roll) std = n2 * roll.std(axis=1) mean = roll.mean(axis=1) # Use the last value to fill-up. std = np.r_[std, np.tile(std[-1], block - 1)] mean = np.r_[mean, np.tile(mean[-1], block - 1)] values = series.values.astype(float) mask = np.abs(values - mean.filled(fill_value=np.NaN)) > std.filled( fill_value=np.NaN ) clean = series.astype(float).copy() clean[mask] = np.NaN return clean
def _despike(series, n1, n2, block, keep)
Wild Edit Seabird-like function. Passes with Standard deviation `n1` and `n2` with window size `block`.
2.488353
2.416451
1.029755
if isinstance(df, pd.Series): new_df = _despike(df, n1=n1, n2=n2, block=block, keep=keep) else: new_df = df.apply(_despike, n1=n1, n2=n2, block=block, keep=keep) return new_df
def despike(df, n1=2, n2=20, block=100, keep=0)
Wild Edit Seabird-like function. Passes with Standard deviation `n1` and `n2` with window size `block`.
1.641966
1.762097
0.931825
windows = { "flat": np.ones, "hanning": np.hanning, "hamming": np.hamming, "bartlett": np.bartlett, "blackman": np.blackman, } data = series.values.copy() if window_len < 3: return pd.Series(data, index=series.index, name=series.name) if window not in list(windows.keys()): raise ValueError( ) s = np.r_[ 2 * data[0] - data[window_len:1:-1], data, 2 * data[-1] - data[-1:-window_len:-1], ] w = windows[window](window_len) data = np.convolve(w / w.sum(), s, mode="same") data = data[window_len - 1 : -window_len + 1] return pd.Series(data, index=series.index, name=series.name)
def _smooth(series, window_len, window)
Smooth the data using a window with requested size.
1.794884
1.725495
1.040214
if isinstance(df, pd.Series): new_df = _smooth(df, window_len=window_len, window=window) else: new_df = df.apply(_smooth, window_len=window_len, window=window) return new_df
def smooth(df, window_len=11, window="hanning")
Smooth the data using a window with requested size.
1.977433
1.997176
0.990114
if isinstance(body, str): if self.encoding is None: raise TypeError("Unable to encode string with no encoding set") body = body.encode(self.encoding) cmd = b'put %d %d %d %d\r\n%b' % (priority, delay, ttr, len(body), body) return self._int_cmd(cmd, b'INSERTED')
def put(self, body: Body, priority: int = DEFAULT_PRIORITY, delay: int = DEFAULT_DELAY, ttr: int = DEFAULT_TTR) -> int
Inserts a job into the currently used tube and returns the job ID. :param body: The data representing the job. :param priority: An integer between 0 and 4,294,967,295 where 0 is the most urgent. :param delay: The number of seconds to delay the job for. :param ttr: The maximum number of seconds the job can be reserved for before timing out.
3.815668
3.739923
1.020253
self._send_cmd(b'use %b' % tube.encode('ascii'), b'USING')
def use(self, tube: str) -> None
Changes the currently used tube. :param tube: The tube to use.
10.389589
12.353234
0.841042
if timeout is None: cmd = b'reserve' else: cmd = b'reserve-with-timeout %d' % timeout return self._job_cmd(cmd, b'RESERVED')
def reserve(self, timeout: Optional[int] = None) -> Job
Reserves a job from a tube on the watch list, giving this client exclusive access to it for the TTR. Returns the reserved job. This blocks until a job is reserved unless a ``timeout`` is given, which will raise a :class:`TimedOutError <greenstalk.TimedOutError>` if a job cannot be reserved within that time. :param timeout: The maximum number of seconds to wait.
4.663896
6.042356
0.771867
self._send_cmd(b'delete %d' % _to_id(job), b'DELETED')
def delete(self, job: JobOrID) -> None
Deletes a job. :param job: The job or job ID to delete.
12.048194
12.858903
0.936953
self._send_cmd(b'release %d %d %d' % (job.id, priority, delay), b'RELEASED')
def release(self, job: Job, priority: int = DEFAULT_PRIORITY, delay: int = DEFAULT_DELAY) -> None
Releases a reserved job. :param job: The job to release. :param priority: An integer between 0 and 4,294,967,295 where 0 is the most urgent. :param delay: The number of seconds to delay the job for.
7.144092
6.662315
1.072314
self._send_cmd(b'bury %d %d' % (job.id, priority), b'BURIED')
def bury(self, job: Job, priority: int = DEFAULT_PRIORITY) -> None
Buries a reserved job. :param job: The job to bury. :param priority: An integer between 0 and 4,294,967,295 where 0 is the most urgent.
7.545692
7.492363
1.007118
self._send_cmd(b'touch %d' % job.id, b'TOUCHED')
def touch(self, job: Job) -> None
Refreshes the TTR of a reserved job. :param job: The job to touch.
12.248182
12.276585
0.997686
return self._int_cmd(b'watch %b' % tube.encode('ascii'), b'WATCHING')
def watch(self, tube: str) -> int
Adds a tube to the watch list. Returns the number of tubes this client is watching. :param tube: The tube to watch.
9.334963
12.727768
0.733433
return self._int_cmd(b'ignore %b' % tube.encode('ascii'), b'WATCHING')
def ignore(self, tube: str) -> int
Removes a tube from the watch list. Returns the number of tubes this client is watching. :param tube: The tube to ignore.
13.673149
14.878831
0.918967
return self._int_cmd(b'kick %d' % bound, b'KICKED')
def kick(self, bound: int) -> int
Moves delayed and buried jobs into the ready queue and returns the number of jobs effected. Only jobs from the currently used tube are moved. A kick will only move jobs in a single state. If there are any buried jobs, only those will be moved. Otherwise delayed jobs will be moved. :param bound: The maximum number of jobs to kick.
12.083825
16.610004
0.727503
self._send_cmd(b'kick-job %d' % _to_id(job), b'KICKED')
def kick_job(self, job: JobOrID) -> None
Moves a delayed or buried job into the ready queue. :param job: The job or job ID to kick.
9.900994
10.328084
0.958648
return self._stats_cmd(b'stats-job %d' % _to_id(job))
def stats_job(self, job: JobOrID) -> Stats
Returns job statistics. :param job: The job or job ID to return statistics for.
10.427576
14.711091
0.708824
return self._stats_cmd(b'stats-tube %b' % tube.encode('ascii'))
def stats_tube(self, tube: str) -> Stats
Returns tube statistics. :param tube: The tube to return statistics for.
8.995075
12.218058
0.736212
self._send_cmd(b'pause-tube %b %d' % (tube.encode('ascii'), delay), b'PAUSED')
def pause_tube(self, tube: str, delay: int) -> None
Prevents jobs from being reserved from a tube for a period of time. :param tube: The tube to pause. :param delay: The number of seconds to pause the tube for.
5.831149
5.750381
1.014046
if conf: if not os.path.isfile(conf): raise click.exceptions.BadParameter("{} is not a file".format(conf)) try: config.conf.load_config(config_path=conf) except exceptions.ConfigurationException as e: raise click.exceptions.BadParameter(str(e)) twisted_observer = legacy_twisted_log.PythonLoggingObserver() twisted_observer.start() config.conf.setup_logging()
def cli(conf)
The fedora-messaging command line interface.
4.167215
4.215594
0.988524
# The configuration validates these are not null and contain all required keys # when it is loaded. bindings = config.conf["bindings"] queues = config.conf["queues"] # The CLI and config.DEFAULTS have different defaults for the queue # settings at the moment. We should select a universal default in the # future and remove this. Unfortunately that will break backwards compatibility. if queues == config.DEFAULTS["queues"]: queues[config._default_queue_name]["durable"] = True queues[config._default_queue_name]["auto_delete"] = False if queue_name: queues = {queue_name: config.conf["queues"][config._default_queue_name]} for binding in bindings: binding["queue"] = queue_name if exchange: for binding in bindings: binding["exchange"] = exchange if routing_key: for binding in bindings: binding["routing_keys"] = routing_key callback_path = callback or config.conf["callback"] if not callback_path: raise click.ClickException( "A Python path to a callable object that accepts the message must be provided" ' with the "--callback" command line option or in the configuration file' ) try: module, cls = callback_path.strip().split(":") except ValueError: raise click.ClickException( "Unable to parse the callback path ({}); the " 'expected format is "my_package.module:' 'callable_object"'.format(callback_path) ) try: module = importlib.import_module(module) except ImportError as e: provider = "--callback argument" if callback else "configuration file" raise click.ClickException( "Failed to import the callback module ({}) provided in the {}".format( str(e), provider ) ) try: callback = getattr(module, cls) except AttributeError as e: raise click.ClickException( "Unable to import {} ({}); is the package installed? The python path should " 'be in the format "my_package.module:callable_object"'.format( callback_path, str(e) ) ) if app_name: config.conf["client_properties"]["app"] = app_name _log.info("Starting consumer with %s callback", callback_path) try: deferred_consumers = api.twisted_consume( callback, bindings=bindings, queues=queues ) deferred_consumers.addCallback(_consume_callback) deferred_consumers.addErrback(_consume_errback) except ValueError as e: click_version = pkg_resources.get_distribution("click").parsed_version if click_version < pkg_resources.parse_version("7.0"): raise click.exceptions.BadOptionUsage(str(e)) else: raise click.exceptions.BadOptionUsage("callback", str(e)) reactor.run() sys.exit(_exit_code)
def consume(exchange, queue_name, routing_key, callback, app_name)
Consume messages from an AMQP queue using a Python callback.
3.727818
3.698035
1.008054
global _exit_code if failure.check(exceptions.BadDeclaration): _log.error( "Unable to declare the %s object on the AMQP broker. The " "broker responded with %s. Check permissions for your user.", failure.value.obj_type, failure.value.reason, ) _exit_code = 10 elif failure.check(exceptions.PermissionException): _exit_code = 15 _log.error( "The consumer could not proceed because of a permissions problem: %s", str(failure.value), ) elif failure.check(exceptions.ConnectionException): _exit_code = 14 _log.error(failure.value.reason) else: _exit_code = 11 _log.exception( "An unexpected error (%r) occurred while registering the " "consumer, please report this bug.", failure.value, ) try: reactor.stop() except error.ReactorNotRunning: pass
def _consume_errback(failure)
Handle any errors that occur during consumer registration.
3.904442
3.794833
1.028884
for consumer in consumers: def errback(failure): global _exit_code if failure.check(exceptions.HaltConsumer): _exit_code = failure.value.exit_code if _exit_code: _log.error( "Consumer halted with non-zero exit code (%d): %s", _exit_code, str(failure.value.reason), ) elif failure.check(exceptions.ConsumerCanceled): _exit_code = 12 _log.error( "The consumer was canceled server-side, check with system administrators." ) elif failure.check(exceptions.PermissionException): _exit_code = 15 _log.error( "The consumer could not proceed because of a permissions problem: %s", str(failure.value), ) else: _exit_code = 13 _log.error( "Unexpected error occurred in consumer %r: %r", consumer, failure ) try: reactor.stop() except error.ReactorNotRunning: pass def callback(consumer): _log.info("The %r consumer halted.", consumer) if all([c.result.called for c in consumers]): _log.info("All consumers have stopped; shutting down.") try: # Last consumer out shuts off the lights reactor.stop() except error.ReactorNotRunning: pass consumer.result.addCallbacks(callback, errback)
def _consume_callback(consumers)
Callback when consumers are successfully registered. This simply registers callbacks for consumer.result deferred object which fires when the consumer stops. Args consumers (list of fedora_messaging.api.Consumer): The list of consumers that were successfully created.
3.605334
3.501864
1.029547
try: deferred.addTimeout(timeout, reactor) except AttributeError: # Twisted 12.2 (in EL7) does not have the addTimeout API, so make do with # the slightly more annoying approach of scheduling a call to cancel which # is then canceled if the deferred succeeds before the timeout is up. delayed_cancel = reactor.callLater(timeout, deferred.cancel) def cancel_cancel_call(result): if not delayed_cancel.called: delayed_cancel.cancel() return result deferred.addBoth(cancel_cancel_call)
def _add_timeout(deferred, timeout)
Add a timeout to the given deferred. This is designed to work with both old Twisted and versions of Twisted with the addTimeout API. This is exclusively to support EL7. The deferred will errback with a :class:`defer.CancelledError` if the version of Twisted being used doesn't have the ``defer.Deferred.addTimeout`` API, otherwise it will errback with the normal ``error.TimeoutError``
5.609558
5.302524
1.057903
try: channel = yield self.channel() except pika.exceptions.NoFreeChannels: raise NoFreeChannels() _std_log.debug("Created AMQP channel id %d", channel.channel_number) if self._confirms: yield channel.confirm_delivery() defer.returnValue(channel)
def _allocate_channel(self)
Allocate a new AMQP channel. Raises: NoFreeChannels: If this connection has reached its maximum number of channels.
5.519207
4.577753
1.205659
self._channel = yield self._allocate_channel() if _pika_version < pkg_resources.parse_version("1.0.0b1"): extra_args = dict(all_channels=True) else: extra_args = dict(global_qos=True) yield self._channel.basic_qos( prefetch_count=config.conf["qos"]["prefetch_count"], prefetch_size=config.conf["qos"]["prefetch_size"], **extra_args ) if _pika_version < pkg_resources.parse_version("1.0.0b1"): TwistedProtocolConnection.connectionReady(self, res)
def connectionReady(self, res=None)
Callback invoked when the AMQP connection is ready (when self.ready fires). This API is not meant for users. Args: res: This is an unused argument that provides compatibility with Pika versions lower than 1.0.0.
3.475587
3.55155
0.978611
while consumer._running: try: deferred_get = queue_object.get() _add_timeout(deferred_get, 1) channel, delivery_frame, properties, body = yield deferred_get except (defer.TimeoutError, defer.CancelledError): continue _std_log.debug( "Message arrived with delivery tag %s for %r", delivery_frame.delivery_tag, consumer._tag, ) try: message = get_message(delivery_frame.routing_key, properties, body) message.queue = consumer.queue except ValidationError: _std_log.warning( "Message id %s did not pass validation; ignoring message", properties.message_id, ) yield channel.basic_nack( delivery_tag=delivery_frame.delivery_tag, requeue=False ) continue try: _std_log.info( "Consuming message from topic %s (message id %s)", message.topic, properties.message_id, ) yield threads.deferToThread(consumer.callback, message) except Nack: _std_log.warning( "Returning message id %s to the queue", properties.message_id ) yield channel.basic_nack( delivery_tag=delivery_frame.delivery_tag, requeue=True ) except Drop: _std_log.warning( "Consumer requested message id %s be dropped", properties.message_id ) yield channel.basic_nack( delivery_tag=delivery_frame.delivery_tag, requeue=False ) except HaltConsumer as e: _std_log.info( "Consumer indicated it wishes consumption to halt, shutting down" ) if e.requeue: yield channel.basic_nack( delivery_tag=delivery_frame.delivery_tag, requeue=True ) else: yield channel.basic_ack(delivery_tag=delivery_frame.delivery_tag) raise e except Exception as e: _std_log.exception( "Received unexpected exception from consumer %r", consumer ) yield channel.basic_nack(delivery_tag=0, multiple=True, requeue=True) raise e else: _std_log.info( "Successfully consumed message from topic %s (message id %s)", message.topic, properties.message_id, ) yield channel.basic_ack(delivery_tag=delivery_frame.delivery_tag)
def _read(self, queue_object, consumer)
The loop that reads from the message queue and calls the consumer callback wrapper. Serialized Processing --------------------- This loop processes messages serially. This is because a second ``queue_object.get()`` operation can only occur after the Deferred from ``self._on_message`` completes. Thus, we can be sure that callbacks never run concurrently in two different threads. This is done rather than saturating the Twisted thread pool as the documentation for callbacks (in fedmsg and here) has never indicated that they are not thread-safe. In the future we can add a flag for users who are confident in their ability to write thread-safe code. Gracefully Halting ------------------ This is a loop that only exits when the consumer._running variable is set to False. The call to cancel will set this to false, as will the call to :meth:`pauseProducing`. These calls will then wait for the Deferred from this function to call back in order to ensure the message finishes processing. The Deferred object only completes when this method returns, so we need to periodically check the status of consumer._running. That's why there's a short timeout on the call to ``queue_object.get``. queue_object (pika.adapters.twisted_connection.ClosableDeferredQueue): The AMQP queue the consumer is bound to. consumer (dict): A dictionary describing the consumer for the given queue_object.
2.434689
2.389339
1.01898
message.validate() try: yield self._channel.basic_publish( exchange=exchange, routing_key=message._encoded_routing_key, body=message._encoded_body, properties=message._properties, ) except (pika.exceptions.NackError, pika.exceptions.UnroutableError) as e: _std_log.error("Message was rejected by the broker (%s)", str(e)) raise PublishReturned(reason=e) except pika.exceptions.ChannelClosed: self._channel = yield self._allocate_channel() yield self.publish(message, exchange) except pika.exceptions.ConnectionClosed as e: raise ConnectionException(reason=e)
def publish(self, message, exchange)
Publish a :class:`fedora_messaging.message.Message` to an `exchange`_ on the message broker. Args: message (message.Message): The message to publish. exchange (str): The name of the AMQP exchange to publish to Raises: NoFreeChannels: If there are no available channels on this connection. If this occurs, you can either reduce the number of consumers on this connection or create an additional connection. PublishReturned: If the broker rejected the message. This can happen if there are resource limits that have been reached (full disk, for example) or if the message will be routed to 0 queues and the exchange is set to reject such messages. .. _exchange: https://www.rabbitmq.com/tutorials/amqp-concepts.html#exchanges
3.348531
3.078358
1.087765
channel = yield self._allocate_channel() try: for exchange in exchanges: args = exchange.copy() args.setdefault("passive", config.conf["passive_declares"]) try: yield channel.exchange_declare(**args) except pika.exceptions.ChannelClosed as e: raise BadDeclaration("exchange", args, e) finally: try: channel.close() except pika.exceptions.AMQPError: pass
def declare_exchanges(self, exchanges)
Declare a number of exchanges at once. This simply wraps the :meth:`pika.channel.Channel.exchange_declare` method and deals with error handling and channel allocation. Args: exchanges (list of dict): A list of dictionaries, where each dictionary represents an exchange. Each dictionary can have the following keys: * exchange (str): The exchange's name * exchange_type (str): The type of the exchange ("direct", "topic", etc) * passive (bool): If true, this will just assert that the exchange exists, but won't create it if it doesn't. Defaults to the configuration value :ref:`conf-passive-declares` * durable (bool): Whether or not the exchange is durable * arguments (dict): Extra arguments for the exchange's creation. Raises: NoFreeChannels: If there are no available channels on this connection. If this occurs, you can either reduce the number of consumers on this connection or create an additional connection. BadDeclaration: If an exchange could not be declared. This can occur if the exchange already exists, but does its type does not match (e.g. it is declared as a "topic" exchange, but exists as a "direct" exchange). It can also occur if it does not exist, but the current user does not have permissions to create the object.
4.539551
3.325902
1.364908
channel = yield self._allocate_channel() try: for queue in queues: args = queue.copy() args.setdefault("passive", config.conf["passive_declares"]) try: yield channel.queue_declare(**args) except pika.exceptions.ChannelClosed as e: raise BadDeclaration("queue", args, e) finally: try: channel.close() except pika.exceptions.AMQPError: pass
def declare_queues(self, queues)
Declare a list of queues. Args: queues (list of dict): A list of dictionaries, where each dictionary represents an exchange. Each dictionary can have the following keys: * queue (str): The name of the queue * passive (bool): If true, this will just assert that the queue exists, but won't create it if it doesn't. Defaults to the configuration value :ref:`conf-passive-declares` * durable (bool): Whether or not the queue is durable * exclusive (bool): Whether or not the queue is exclusive to this connection. * auto_delete (bool): Whether or not the queue should be automatically deleted once this connection ends. * arguments (dict): Additional arguments for the creation of the queue. Raises: NoFreeChannels: If there are no available channels on this connection. If this occurs, you can either reduce the number of consumers on this connection or create an additional connection. BadDeclaration: If a queue could not be declared. This can occur if the queue already exists, but does its type does not match (e.g. it is declared as a durable queue, but exists as a non-durable queue). It can also occur if it does not exist, but the current user does not have permissions to create the object.
4.767859
3.577562
1.332712
channel = yield self._allocate_channel() try: for binding in bindings: try: yield channel.queue_bind(**binding) except pika.exceptions.ChannelClosed as e: raise BadDeclaration("binding", binding, e) finally: try: channel.close() except pika.exceptions.AMQPError: pass
def bind_queues(self, bindings)
Declare a set of bindings between queues and exchanges. Args: bindings (list of dict): A list of binding definitions. Each dictionary must contain the "queue" key whose value is the name of the queue to create the binding on, as well as the "exchange" key whose value should be the name of the exchange to bind to. Additional acceptable keys are any keyword arguments accepted by :meth:`pika.channel.Channel.queue_bind`. Raises: NoFreeChannels: If there are no available channels on this connection. If this occurs, you can either reduce the number of consumers on this connection or create an additional connection. BadDeclaration: If a binding could not be declared. This can occur if the queue or exchange don't exist, or if they do, but the current user does not have permissions to create bindings.
3.963278
3.479452
1.139052
if self.is_closed: # We were asked to stop because the connection is already gone. # There's no graceful way to stop because we can't acknowledge # messages in the middle of being processed. _std_log.info("Disconnect requested, but AMQP connection already gone") self._channel = None return _std_log.info( "Waiting for %d consumer(s) to finish processing before halting", len(self._consumers), ) pending_cancels = [] for c in list(self._consumers.values()): pending_cancels.append(c.cancel()) yield defer.gatherResults(pending_cancels) _std_log.info("Finished canceling %d consumers", len(self._consumers)) try: yield self.close() except pika.exceptions.ConnectionWrongStateError: pass # Already closing, not a problem since that's what we want. self._consumers = {} self._channel = None
def halt(self)
Signal to consumers they should stop after finishing any messages currently being processed, then close the connection. Returns: defer.Deferred: fired when all consumers have successfully stopped and the connection is closed.
4.869051
4.43021
1.099056
while self._running: try: _channel, delivery_frame, properties, body = yield queue_object.get() except (error.ConnectionDone, ChannelClosedByClient): # This is deliberate. _legacy_twisted_log.msg( "Stopping the AMQP consumer with tag {tag}", tag=consumer.tag ) break except pika.exceptions.ChannelClosed as e: _legacy_twisted_log.msg( "Stopping AMQP consumer {tag} for queue {q}: {e}", tag=consumer.tag, q=consumer.queue, e=str(e), logLevel=logging.ERROR, ) break except pika.exceptions.ConsumerCancelled as e: _legacy_twisted_log.msg( "The AMQP broker canceled consumer {tag} on queue {q}: {e}", tag=consumer.tag, q=consumer.queue, e=str(e), logLevel=logging.ERROR, ) break except Exception: _legacy_twisted_log.msg( "An unexpected error occurred consuming from queue {q}", q=consumer.queue, logLevel=logging.ERROR, ) break if body: yield self._on_message(delivery_frame, properties, body, consumer)
def _read(self, queue_object, consumer)
The loop that reads from the message queue and calls the consumer callback wrapper. queue_object (pika.adapters.twisted_connection.ClosableDeferredQueue): The AMQP queue the consumer is bound to. consumer (dict): A dictionary describing the consumer for the given queue_object.
3.021487
2.918944
1.03513
_legacy_twisted_log.msg( "Message arrived with delivery tag {tag} for {consumer}", tag=delivery_frame.delivery_tag, consumer=consumer, logLevel=logging.DEBUG, ) try: message = get_message(delivery_frame.routing_key, properties, body) message.queue = consumer.queue except ValidationError: _legacy_twisted_log.msg( "Message id {msgid} did not pass validation; ignoring message", msgid=properties.message_id, logLevel=logging.WARNING, ) yield consumer.channel.basic_nack( delivery_tag=delivery_frame.delivery_tag, requeue=False ) return try: _legacy_twisted_log.msg( "Consuming message from topic {topic!r} (id {msgid})", topic=message.topic, msgid=properties.message_id, ) yield defer.maybeDeferred(consumer.callback, message) except Nack: _legacy_twisted_log.msg( "Returning message id {msgid} to the queue", msgid=properties.message_id, logLevel=logging.WARNING, ) yield consumer.channel.basic_nack( delivery_tag=delivery_frame.delivery_tag, requeue=True ) except Drop: _legacy_twisted_log.msg( "Consumer requested message id {msgid} be dropped", msgid=properties.message_id, logLevel=logging.WARNING, ) yield consumer.channel.basic_nack( delivery_tag=delivery_frame.delivery_tag, requeue=False ) except HaltConsumer: _legacy_twisted_log.msg( "Consumer indicated it wishes consumption to halt, shutting down" ) yield consumer.channel.basic_ack(delivery_tag=delivery_frame.delivery_tag) yield self.cancel(consumer.queue) except Exception: _legacy_twisted_log.msg( "Received unexpected exception from consumer {c}", c=consumer, logLevel=logging.ERROR, ) yield consumer.channel.basic_nack( delivery_tag=0, multiple=True, requeue=True ) yield self.cancel(consumer.queue) else: yield consumer.channel.basic_ack(delivery_tag=delivery_frame.delivery_tag)
def _on_message(self, delivery_frame, properties, body, consumer)
Callback when a message is received from the server. This method wraps a user-registered callback for message delivery. It decodes the message body, determines the message schema to validate the message with, and validates the message before passing it on to the user callback. This also handles acking, nacking, and rejecting messages based on exceptions raised by the consumer callback. For detailed documentation on the user-provided callback, see the user guide on consuming. Args: delivery_frame (pika.spec.Deliver): The delivery frame which includes details about the message like content encoding and its delivery tag. properties (pika.spec.BasicProperties): The message properties like the message headers. body (bytes): The message payload. consumer (dict): A dictionary describing the consumer of the message. Returns: Deferred: fired when the message has been handled.
2.200636
2.272509
0.968373
if queue in self._consumers and self._consumers[queue].channel.is_open: consumer = Consumer( tag=self._consumers[queue].tag, queue=queue, callback=callback, channel=self._consumers[queue].channel, ) self._consumers[queue] = consumer defer.returnValue(consumer) channel = yield self._allocate_channel() consumer = Consumer( tag=str(uuid.uuid4()), queue=queue, callback=callback, channel=channel ) self._consumers[queue] = consumer if not self._running: yield self.resumeProducing() defer.returnValue(consumer) queue_object, _ = yield consumer.channel.basic_consume( queue=consumer.queue, consumer_tag=consumer.tag ) deferred = self._read(queue_object, consumer) deferred.addErrback( lambda f: _legacy_twisted_log.msg, "_read failed on consumer {c}", c=consumer, logLevel=logging.ERROR, ) _legacy_twisted_log.msg("Successfully registered AMQP consumer {c}", c=consumer) defer.returnValue(consumer)
def consume(self, callback, queue)
Register a message consumer that executes the provided callback when messages are received. The queue must exist prior to calling this method. If a consumer already exists for the given queue, the callback is simply updated and any new messages for that consumer use the new callback. If :meth:`resumeProducing` has not been called when this method is called, it will be called for you. Args: callback (callable): The callback to invoke when a message is received. queue (str): The name of the queue to consume from. Returns: fedora_messaging.twisted.protocol.Consumer: A namedtuple that identifies this consumer. NoFreeChannels: If there are no available channels on this connection. If this occurs, you can either reduce the number of consumers on this connection or create an additional connection.
3.411941
3.058083
1.115713
try: consumer = self._consumers[queue] yield consumer.channel.basic_cancel(consumer_tag=consumer.tag) except pika.exceptions.AMQPChannelError: # Consumers are tied to channels, so if this channel is dead the # consumer should already be canceled (and we can't get to it anyway) pass except KeyError: defer.returnValue(None) try: yield consumer.channel.close() except pika.exceptions.AMQPChannelError: pass del self._consumers[queue]
def cancel(self, queue)
Cancel the consumer for a queue. Args: queue (str): The name of the queue the consumer is subscribed to. Returns: defer.Deferred: A Deferred that fires when the consumer is canceled, or None if the consumer was already canceled. Wrap the call in :func:`.defer.maybeDeferred` to always receive a Deferred.
4.050325
3.975084
1.018928
# Start consuming self._running = True for consumer in self._consumers.values(): queue_object, _ = yield consumer.channel.basic_consume( queue=consumer.queue, consumer_tag=consumer.tag ) deferred = self._read(queue_object, consumer) deferred.addErrback( lambda f: _legacy_twisted_log.msg, "_read failed on consumer {c}", c=consumer, logLevel=logging.ERROR, ) _legacy_twisted_log.msg("AMQP connection successfully established")
def resumeProducing(self)
Starts or resumes the retrieval of messages from the server queue. This method starts receiving messages from the server, they will be passed to the consumer callback. .. note:: This is called automatically when :meth:`.consume` is called, so users should not need to call this unless :meth:`.pauseProducing` has been called. Returns: defer.Deferred: fired when the production is ready to start
6.310614
6.670784
0.946008
if not self._running: return # Exit the read loop and cancel the consumer on the server. self._running = False for consumer in self._consumers.values(): yield consumer.channel.basic_cancel(consumer_tag=consumer.tag) _legacy_twisted_log.msg("Paused retrieval of messages for the server queue")
def pauseProducing(self)
Pause the reception of messages by canceling all existing consumers. This does not disconnect from the server. Message reception can be resumed with :meth:`resumeProducing`. Returns: Deferred: fired when the production is paused.
10.459252
9.892774
1.057262
_legacy_twisted_log.msg("Disconnecting from the AMQP broker") yield self.pauseProducing() yield self.close() self._consumers = {} self._channel = None
def stopProducing(self)
Stop producing messages and disconnect from the server. Returns: Deferred: fired when the production is stopped.
10.251801
10.595655
0.967548
global _registry_loaded if not _registry_loaded: load_message_classes() try: return _schema_name_to_class[schema_name] except KeyError: _log.warning( 'The schema "%s" is not in the schema registry! Either install ' "the package with its schema definition or define a schema. " "Falling back to the default schema...", schema_name, ) return Message
def get_class(schema_name)
Retrieve the message class associated with the schema name. If no match is found, the default schema is returned and a warning is logged. Args: schema_name (six.text_type): The name of the :class:`Message` sub-class; this is typically the Python path. Returns: Message: A sub-class of :class:`Message` to create the message from.
5.493711
5.27237
1.041981
global _registry_loaded if not _registry_loaded: load_message_classes() try: return _class_to_schema_name[cls] except KeyError: raise TypeError( "The class {} is not in the message registry, which indicates it is" ' not in the current list of entry points for "fedora_messaging".' " Please check that the class has been added to your package's" " entry points.".format(repr(cls)) )
def get_name(cls)
Retrieve the schema name associated with a message class. Returns: str: The schema name. Raises: TypeError: If the message class isn't registered. Check your entry point for correctness.
6.961821
5.789557
1.202479
for message in pkg_resources.iter_entry_points("fedora.messages"): cls = message.load() _log.info( "Registering the '%s' key as the '%r' class in the Message " "class registry", message.name, cls, ) _schema_name_to_class[message.name] = cls _class_to_schema_name[cls] = message.name global _registry_loaded _registry_loaded = True
def load_message_classes()
Load the 'fedora.messages' entry points and register the message classes.
4.392367
3.559053
1.234139
if properties.headers is None: _log.error( "Message (body=%r) arrived without headers. " "A publisher is misbehaving!", body, ) properties.headers = {} try: MessageClass = get_class(properties.headers["fedora_messaging_schema"]) except KeyError: _log.error( "Message (headers=%r, body=%r) arrived without a schema header." " A publisher is misbehaving!", properties.headers, body, ) MessageClass = Message try: severity = properties.headers["fedora_messaging_severity"] except KeyError: _log.error( "Message (headers=%r, body=%r) arrived without a severity." " A publisher is misbehaving! Defaulting to INFO.", properties.headers, body, ) severity = INFO if properties.content_encoding is None: _log.error("Message arrived without a content encoding") properties.content_encoding = "utf-8" try: body = body.decode(properties.content_encoding) except UnicodeDecodeError as e: _log.error( "Unable to decode message body %r with %s content encoding", body, properties.content_encoding, ) raise ValidationError(e) try: body = json.loads(body) except ValueError as e: _log.error("Failed to load message body %r, %r", body, e) raise ValidationError(e) message = MessageClass( body=body, topic=routing_key, properties=properties, severity=severity ) try: message.validate() _log.debug("Successfully validated message %r", message) except jsonschema.exceptions.ValidationError as e: _log.error("Message validation of %r failed: %r", message, e) raise ValidationError(e) return message
def get_message(routing_key, properties, body)
Construct a Message instance given the routing key, the properties and the body received from the AMQP broker. Args: routing_key (str): The AMQP routing key (will become the message topic) properties (pika.BasicProperties): the AMQP properties body (bytes): The encoded message body Raises: ValidationError: If Message validation failed or message body docoding/loading is impossible.
2.409034
2.413628
0.998097
serialized_messages = [] try: for message in messages: message_dict = message._dump() serialized_messages.append(message_dict) except AttributeError: _log.error("Improper object for messages serialization.") raise TypeError("Message have to be instance of Message class or subclass.") return json.dumps(serialized_messages, sort_keys=True)
def dumps(messages)
Serialize messages to a JSON formatted str Args: messages (list): The list of messages to serialize. Each message in the messages is subclass of Messge. Returns: str: Serialized messages. Raises: TypeError: If at least one message is not instance of Message class or subclass.
4.213396
4.410065
0.955404
try: messages_dicts = json.loads(serialized_messages) except ValueError: _log.error("Loading serialized messages failed.") raise messages = [] for message_dict in messages_dicts: try: headers = message_dict["headers"] except KeyError: _log.error("Message saved without headers.") raise try: MessageClass = get_class(headers["fedora_messaging_schema"]) except KeyError: _log.error("Message (headers=%r) saved without a schema header.", headers) raise try: body = message_dict["body"] except KeyError: _log.error("Message saved without body.") raise try: id = message_dict["id"] except KeyError: _log.error("Message saved without id.") raise try: queue = message_dict["queue"] except KeyError: _log.warning("Message saved without queue.") queue = None try: topic = message_dict["topic"] except KeyError: _log.error("Message saved without topic.") raise try: severity = headers["fedora_messaging_severity"] except KeyError: _log.error("Message saved without a severity.") raise message = MessageClass( body=body, topic=topic, headers=headers, severity=severity ) try: message.validate() _log.debug("Successfully validated message %r", message) except jsonschema.exceptions.ValidationError as e: _log.error("Message validation of %r failed: %r", message, e) raise ValidationError(e) message.queue = queue message.id = id messages.append(message) return messages
def loads(serialized_messages)
Deserialize messages from a JSON formatted str Args: serialized_messages (JSON str): Returns: list: Deserialized message objects. Raises: ValidationError: If deserialized message validation failed. KeyError: If serialized_messages aren't properly serialized. ValueError: If serialized_messages is not valid JSON
2.265333
2.303822
0.983294
headers = {} for user in self.usernames: headers["fedora_messaging_user_{}".format(user)] = True for package in self.packages: headers["fedora_messaging_rpm_{}".format(package)] = True for container in self.containers: headers["fedora_messaging_container_{}".format(container)] = True for module in self.modules: headers["fedora_messaging_module_{}".format(module)] = True for flatpak in self.flatpaks: headers["fedora_messaging_flatpak_{}".format(flatpak)] = True return headers
def _filter_headers(self)
Add headers designed for filtering messages based on objects. Returns: dict: Filter-related headers to be combined with the existing headers
2.599826
2.636818
0.985971
topic = self.topic if config.conf["topic_prefix"]: topic = ".".join((config.conf["topic_prefix"].rstrip("."), topic)) return topic.encode("utf-8")
def _encoded_routing_key(self)
The encoded routing key used to publish the message on the broker.
4.980049
4.057231
1.22745
for schema in (self.headers_schema, Message.headers_schema): _log.debug( 'Validating message headers "%r" with schema "%r"', self._headers, schema, ) jsonschema.validate(self._headers, schema) for schema in (self.body_schema, Message.body_schema): _log.debug( 'Validating message body "%r" with schema "%r"', self.body, schema ) jsonschema.validate(self.body, schema)
def validate(self)
Validate the headers and body with the message schema, if any. In addition to the user-provided schema, all messages are checked against the base schema which requires certain message headers and the that body be a JSON object. .. warning:: This method should not be overridden by sub-classes. Raises: jsonschema.ValidationError: If either the message headers or the message body are invalid. jsonschema.SchemaError: If either the message header schema or the message body schema are invalid.
2.74684
2.446898
1.122581
return { "topic": self.topic, "headers": self._headers, "id": self.id, "body": self.body, "queue": self.queue, }
def _dump(self)
Dump message attributes. Returns: dict: A dictionary of message attributes.
4.103252
3.984909
1.029698
# If the callback is a class, create an instance of it first if inspect.isclass(callback): callback_object = callback() if not callable(callback_object): raise ValueError( "Callback must be a class that implements __call__ or a function." ) elif callable(callback): callback_object = callback else: raise ValueError( "Callback must be a class that implements __call__ or a function." ) return callback_object
def _check_callback(callback)
Turns a callback that is potentially a class into a callable object. Args: callback (object): An object that might be a class, method, or function. if the object is a class, this creates an instance of it. Raises: ValueError: If an instance can't be created or it isn't a callable object. TypeError: If the class requires arguments to be instantiated. Returns: callable: A callable object suitable for use as the consumer callback.
2.477363
2.535154
0.977204
if isinstance(bindings, dict): bindings = [bindings] callback = _check_callback(callback) global _twisted_service if _twisted_service is None: _twisted_service = service.FedoraMessagingServiceV2(config.conf["amqp_url"]) reactor.callWhenRunning(_twisted_service.startService) # Twisted is killing the underlying connection before stopService gets # called, so we need to add it as a pre-shutdown event to gracefully # finish up messages in progress. reactor.addSystemEventTrigger( "before", "shutdown", _twisted_service.stopService ) return _twisted_service._service.factory.consume(callback, bindings, queues)
def twisted_consume(callback, bindings=None, queues=None)
Start a consumer using the provided callback and run it using the Twisted event loop (reactor). .. note:: Callbacks run in a Twisted-managed thread pool using the :func:`twisted.internet.threads.deferToThread` API to avoid them blocking the event loop. If you wish to use Twisted APIs in your callback you must use the :func:`twisted.internet.threads.blockingCallFromThread` or :class:`twisted.internet.interfaces.IReactorFromThreads` APIs. This API expects the caller to start the reactor. Args: callback (callable): A callable object that accepts one positional argument, a :class:`.Message` or a class object that implements the ``__call__`` method. The class will be instantiated before use. bindings (dict or list of dict): Bindings to declare before consuming. This should be the same format as the :ref:`conf-bindings` configuration. queues (dict): The queue to declare and consume from. Each key in this dictionary should be a queue name to declare, and each value should be a dictionary with the "durable", "auto_delete", "exclusive", and "arguments" keys. Returns: twisted.internet.defer.Deferred: A deferred that fires with the list of one or more :class:`.Consumer` objects. Each consumer object has a :attr:`.Consumer.result` instance variable that is a Deferred that fires or errors when the consumer halts. Note that this API is meant to survive network problems, so consuming will continue until :meth:`.Consumer.cancel` is called or a fatal server error occurs. The deferred returned by this function may error back with a :class:`fedora_messaging.exceptions.BadDeclaration` if queues or bindings cannot be declared on the broker, a :class:`fedora_messaging.exceptions.PermissionException` if the user doesn't have access to the queue, or :class:`fedora_messaging.exceptions.ConnectionException` if the TLS or AMQP handshake fails.
5.36974
5.366867
1.000535
if isinstance(bindings, dict): bindings = [bindings] if bindings is None: bindings = config.conf["bindings"] else: try: config.validate_bindings(bindings) except exceptions.ConfigurationException as e: raise ValueError(e.message) if queues is None: queues = config.conf["queues"] else: try: config.validate_queues(queues) except exceptions.ConfigurationException as e: raise ValueError(e.message) session = _session.ConsumerSession() session.consume(callback, bindings=bindings, queues=queues)
def consume(callback, bindings=None, queues=None)
Start a message consumer that executes the provided callback when messages are received. This API is blocking and will not return until the process receives a signal from the operating system. .. warning:: This API is runs the callback in the IO loop thread. This means if your callback could run for a length of time near the heartbeat interval, which is likely on the order of 60 seconds, the broker will kill the TCP connection and the message will be re-delivered on start-up. For now, use the :func:`twisted_consume` API which runs the callback in a thread and continues to handle AMQP events while the callback runs if you have a long-running callback. The callback receives a single positional argument, the message: >>> from fedora_messaging import api >>> def my_callback(message): ... print(message) >>> bindings = [{'exchange': 'amq.topic', 'queue': 'demo', 'routing_keys': ['#']}] >>> queues = { ... "demo": {"durable": False, "auto_delete": True, "exclusive": True, "arguments": {}} ... } >>> api.consume(my_callback, bindings=bindings, queues=queues) If the bindings and queue arguments are not provided, they will be loaded from the configuration. For complete documentation on writing consumers, see the :ref:`consumers` documentation. Args: callback (callable): A callable object that accepts one positional argument, a :class:`Message` or a class object that implements the ``__call__`` method. The class will be instantiated before use. bindings (dict or list of dict): Bindings to declare before consuming. This should be the same format as the :ref:`conf-bindings` configuration. queues (dict): The queue or queues to declare and consume from. This should be in the same format as the :ref:`conf-queues` configuration dictionary where each key is a queue name and each value is a dictionary of settings for that queue. Raises: fedora_messaging.exceptions.HaltConsumer: If the consumer requests that it be stopped. ValueError: If the consumer provide callback that is not a class that implements __call__ and is not a function, if the bindings argument is not a dict or list of dicts with the proper keys, or if the queues argument isn't a dict with the proper keys.
2.51424
2.392047
1.051083
pre_publish_signal.send(publish, message=message) if exchange is None: exchange = config.conf["publish_exchange"] global _session_cache if not hasattr(_session_cache, "session"): _session_cache.session = _session.PublisherSession() try: _session_cache.session.publish(message, exchange=exchange) publish_signal.send(publish, message=message) except exceptions.PublishException as e: publish_failed_signal.send(publish, message=message, reason=e) raise
def publish(message, exchange=None)
Publish a message to an exchange. This is a synchronous call, meaning that when this function returns, an acknowledgment has been received from the message broker and you can be certain the message was published successfully. There are some cases where an error occurs despite your message being successfully published. For example, if a network partition occurs after the message is received by the broker. Therefore, you may publish duplicate messages. For complete details, see the :ref:`publishing` documentation. >>> from fedora_messaging import api >>> message = api.Message(body={'Hello': 'world'}, topic='Hi') >>> api.publish(message) If an attempt to publish fails because the broker rejects the message, it is not retried. Connection attempts to the broker can be configured using the "connection_attempts" and "retry_delay" options in the broker URL. See :class:`pika.connection.URLParameters` for details. Args: message (message.Message): The message to publish. exchange (str): The name of the AMQP exchange to publish to; defaults to :ref:`conf-publish-exchange` Raises: fedora_messaging.exceptions.PublishReturned: Raised if the broker rejects the message. fedora_messaging.exceptions.ConnectionException: Raised if a connection error occurred before the publish confirmation arrived. fedora_messaging.exceptions.ValidationError: Raised if the message fails validation with its JSON schema. This only depends on the message you are trying to send, the AMQP server is not involved.
3.446228
3.972722
0.867473
self.resetDelay() self.client = self.protocol(self._parameters) self.client.factory = self self.client.ready.addCallback(lambda _: self._on_client_ready()) return self.client
def buildProtocol(self, addr)
Create the Protocol instance. See the documentation of `twisted.internet.protocol.ReconnectingClientFactory` for details.
5.601241
5.038573
1.111672
_legacy_twisted_log.msg("Successfully connected to the AMQP broker.") yield self.client.resumeProducing() yield self.client.declare_exchanges(self.exchanges) yield self.client.declare_queues(self.queues) yield self.client.bind_queues(self.bindings) for queue, callback in self.consumers.items(): yield self.client.consume(callback, queue) _legacy_twisted_log.msg("Successfully declared all AMQP objects.") self._client_ready.callback(None)
def _on_client_ready(self)
Called when the client is ready to send and receive messages.
4.365515
4.264399
1.023712
if not isinstance(reason.value, error.ConnectionDone): _legacy_twisted_log.msg( "Lost connection to the AMQP broker ({reason})", reason=reason.value, logLevel=logging.WARNING, ) if self._client_ready.called: # Renew the ready deferred, it will callback when the # next connection is ready. self._client_ready = defer.Deferred() protocol.ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
def clientConnectionLost(self, connector, reason)
Called when the connection to the broker has been lost. See the documentation of `twisted.internet.protocol.ReconnectingClientFactory` for details.
5.444028
5.380101
1.011882
_legacy_twisted_log.msg( "Connection to the AMQP broker failed ({reason})", reason=reason.value, logLevel=logging.WARNING, ) protocol.ReconnectingClientFactory.clientConnectionFailed( self, connector, reason )
def clientConnectionFailed(self, connector, reason)
Called when the client has failed to connect to the broker. See the documentation of `twisted.internet.protocol.ReconnectingClientFactory` for details.
6.408596
6.413316
0.999264
protocol.ReconnectingClientFactory.stopTrying(self) if not self._client_ready.called: self._client_ready.errback( pika.exceptions.AMQPConnectionError( u"Could not connect, reconnection cancelled." ) )
def stopTrying(self)
Stop trying to reconnect to the broker. See the documentation of `twisted.internet.protocol.ReconnectingClientFactory` for details.
5.415424
5.364187
1.009552
if self.client: yield self.client.stopProducing() protocol.ReconnectingClientFactory.stopFactory(self)
def stopFactory(self)
Stop the factory. See the documentation of `twisted.internet.protocol.ReconnectingClientFactory` for details.
7.339528
5.325611
1.378157
self.consumers[queue] = callback if self._client_ready.called: return self.client.consume(callback, queue)
def consume(self, callback, queue)
Register a new consumer. This consumer will be configured for every protocol this factory produces so it will be reconfigured on network failures. If a connection is already active, the consumer will be added to it. Args: callback (callable): The callback to invoke when a message arrives. queue (str): The name of the queue to consume from.
7.695776
9.911072
0.776483
try: del self.consumers[queue] except KeyError: pass if self.client: return self.client.cancel(queue)
def cancel(self, queue)
Cancel the consumer for a queue. This removes the consumer from the list of consumers to be configured for every connection. Args: queue (str): The name of the queue the consumer is subscribed to. Returns: defer.Deferred or None: Either a Deferred that fires when the consumer is canceled, or None if the consumer was already canceled. Wrap the call in :func:`defer.maybeDeferred` to always receive a Deferred.
4.004139
3.94019
1.01623
exchange = exchange or config.conf["publish_exchange"] while True: client = yield self.whenConnected() try: yield client.publish(message, exchange) break except ConnectionException: continue
def publish(self, message, exchange=None)
Publish a :class:`fedora_messaging.message.Message` to an `exchange`_ on the message broker. This call will survive connection failures and try until it succeeds or is canceled. Args: message (message.Message): The message to publish. exchange (str): The name of the AMQP exchange to publish to; defaults to :ref:`conf-publish-exchange` returns: defer.Deferred: A deferred that fires when the message is published. Raises: PublishReturned: If the published message is rejected by the broker. ConnectionException: If a connection error occurs while publishing. Calling this method again will wait for the next connection and publish when it is available. .. _exchange: https://www.rabbitmq.com/tutorials/amqp-concepts.html#exchanges
7.292766
5.960085
1.223601
self._client = self.protocol(self._parameters, confirms=self.confirms) self._client.factory = self @defer.inlineCallbacks def on_ready(unused_param=None): _std_log.debug("AMQP handshake completed; connection ready for use") self.resetDelay() self._client_deferred.callback(self._client) self._client_deferred = defer.Deferred() # Restart any consumer from previous connections that wasn't canceled # including queues and bindings, as the queue might not have been durable for consumer, queue, bindings in self._consumers.values(): _std_log.info("Re-registering the %r consumer", consumer) yield self._client.declare_queues([queue]) yield self._client.bind_queues(bindings) yield self._client.consume(consumer.callback, consumer.queue, consumer) def on_ready_errback(failure): if failure.check(pika.exceptions.AMQPConnectionError, error.ConnectionDone): # In this case the connection failed to open. This will be called # if the TLS handshake goes wrong (likely) and it may be called if # the AMQP handshake fails. It's *probably* a problem with the # credentials. msg = ( "The TCP connection appears to have started, but the TLS or AMQP handshake " "with the broker failed; check your connection and authentication " "parameters and ensure your user has permission to access the vhost" ) wrapped_failure = Failure( exc_value=ConnectionException(reason=msg, original=failure), exc_type=ConnectionException, ) self._client_deferred.errback(wrapped_failure) else: # Some unexpected exception happened _std_log.exception( "The connection failed with an unexpected exception; please report this bug." ) self._client.ready.addCallback(on_ready) self._client.ready.addErrback(on_ready_errback) return self._client
def buildProtocol(self, addr)
Create the Protocol instance. See the documentation of `twisted.internet.protocol.ReconnectingClientFactory` for details.
6.130347
5.991855
1.023113