index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
51,766 |
dictknife.accessing
|
maybe_remove
| null |
def maybe_remove(self, d, path):
container = self.maybe_access_container(d, path)
if container is not None:
container.pop(path[-1])
|
(self, d, path)
|
51,767 |
dictknife.operators
|
And
| null |
class And(object):
__repr__ = repr
def __init__(self, args):
self.args = args
def __call__(self, v, *args):
for e in self.args:
if not apply(e, v, *args):
return False
return True
|
(args)
|
51,768 |
dictknife.operators
|
__call__
| null |
def __call__(self, v, *args):
for e in self.args:
if not apply(e, v, *args):
return False
return True
|
(self, v, *args)
|
51,769 |
dictknife.operators
|
__init__
| null |
def __init__(self, args):
self.args = args
|
(self, args)
|
51,770 |
dictknife.operators
|
repr
| null |
def repr(self):
return "<{self.__class__.__name__} args={args!r}>".format(self=self, args=self.args)
|
(self)
|
51,771 |
dictknife.operators
|
Any
| null |
class Any(object):
def __repr__(self):
return "<{self.__class__.__name__}>".format(self=self)
def __call__(self, v, *args):
return True
|
()
|
51,772 |
dictknife.operators
|
__call__
| null |
def __call__(self, v, *args):
return True
|
(self, v, *args)
|
51,773 |
dictknife.operators
|
__repr__
| null |
def __repr__(self):
return "<{self.__class__.__name__}>".format(self=self)
|
(self)
|
51,774 |
dictknife.walkers
|
DictWalker
| null |
class DictWalker(object):
context_factory = PathContext
handler_factory = ContainerHandler
def __init__(self, qs, handler=None, context_factory=None):
self.qs = qs
self.context_factory = context_factory or self.__class__.context_factory
self.handler = handler or self.__class__.handler_factory()
def on_found(self, ctx, d, k):
yield self.handler(self, ctx, d, k)
def create_context(self, ctx=None):
return ctx or self.context_factory()
def walk(self, d, qs=None, depth=-1, ctx=None):
qs = qs or self.qs
ctx = self.create_context(ctx)
return self._walk(ctx, deque(self.qs), d, depth=depth)
def _walk(self, ctx, qs, d, depth):
if depth == 0:
return
if not qs:
return
if hasattr(d, "keys"):
for k, v in list(d.items()):
ctx.push(k)
if apply(qs[0], k, v):
q = qs.popleft()
yield from self._walk(ctx, qs, d[k], depth - 1)
if len(qs) == 0:
yield from self.on_found(ctx, d, k)
qs.appendleft(q)
else:
yield from self._walk(ctx, qs, d[k], depth)
ctx.pop()
return
elif isinstance(d, (list, tuple)):
for i, e in enumerate(d):
ctx.push(i)
yield from self._walk(ctx, qs, e, depth)
ctx.pop()
return
else:
return
iterate = walk # for backward compatibility
|
(qs, handler=None, context_factory=None)
|
51,775 |
dictknife.walkers
|
__init__
| null |
def __init__(self, qs, handler=None, context_factory=None):
self.qs = qs
self.context_factory = context_factory or self.__class__.context_factory
self.handler = handler or self.__class__.handler_factory()
|
(self, qs, handler=None, context_factory=None)
|
51,776 |
dictknife.walkers
|
_walk
| null |
def _walk(self, ctx, qs, d, depth):
if depth == 0:
return
if not qs:
return
if hasattr(d, "keys"):
for k, v in list(d.items()):
ctx.push(k)
if apply(qs[0], k, v):
q = qs.popleft()
yield from self._walk(ctx, qs, d[k], depth - 1)
if len(qs) == 0:
yield from self.on_found(ctx, d, k)
qs.appendleft(q)
else:
yield from self._walk(ctx, qs, d[k], depth)
ctx.pop()
return
elif isinstance(d, (list, tuple)):
for i, e in enumerate(d):
ctx.push(i)
yield from self._walk(ctx, qs, e, depth)
ctx.pop()
return
else:
return
|
(self, ctx, qs, d, depth)
|
51,777 |
dictknife.walkers
|
create_context
| null |
def create_context(self, ctx=None):
return ctx or self.context_factory()
|
(self, ctx=None)
|
51,778 |
dictknife.walkers
|
walk
| null |
def walk(self, d, qs=None, depth=-1, ctx=None):
qs = qs or self.qs
ctx = self.create_context(ctx)
return self._walk(ctx, deque(self.qs), d, depth=depth)
|
(self, d, qs=None, depth=-1, ctx=None)
|
51,779 |
dictknife.walkers
|
on_found
| null |
def on_found(self, ctx, d, k):
yield self.handler(self, ctx, d, k)
|
(self, ctx, d, k)
|
51,788 |
dictknife.operators
|
Not
| null |
class Not(object):
__repr__ = repr
def __init__(self, value):
self.args = value
def __call__(self, v, *args):
return not apply(self.args, v, *args)
|
(value)
|
51,789 |
dictknife.operators
|
__call__
| null |
def __call__(self, v, *args):
return not apply(self.args, v, *args)
|
(self, v, *args)
|
51,790 |
dictknife.operators
|
__init__
| null |
def __init__(self, value):
self.args = value
|
(self, value)
|
51,792 |
dictknife.operators
|
Or
| null |
class Or(object):
__repr__ = repr
def __init__(self, args):
self.args = args
def __call__(self, v, *args):
for e in self.args:
if apply(e, v, *args):
return True
return False
|
(args)
|
51,793 |
dictknife.operators
|
__call__
| null |
def __call__(self, v, *args):
for e in self.args:
if apply(e, v, *args):
return True
return False
|
(self, v, *args)
|
51,796 |
dictknife.operators
|
Regexp
| null |
class Regexp(object):
__repr__ = repr
def __init__(self, rx):
if isinstance(rx, (str, bytes)):
rx = re.compile(rx)
self.args = rx
def __call__(self, v, *args):
return self.args.search(v)
|
(rx)
|
51,797 |
dictknife.operators
|
__call__
| null |
def __call__(self, v, *args):
return self.args.search(v)
|
(self, v, *args)
|
51,798 |
dictknife.operators
|
__init__
| null |
def __init__(self, rx):
if isinstance(rx, (str, bytes)):
rx = re.compile(rx)
self.args = rx
|
(self, rx)
|
51,801 |
dictknife.operators
|
apply
| null |
def apply(q, v, *args):
if callable(q):
return q(v, *args)
else:
return q == v
|
(q, v, *args)
|
51,803 |
dictknife.deepequal
|
deepequal
| null |
def deepequal(d0, d1, normalize=False):
if normalize:
d0 = sort_flexibly(d0)
d1 = sort_flexibly(d1)
return halfequal(d0, d1) and halfequal(d1, d0)
|
(d0, d1, normalize=False)
|
51,804 |
dictknife.deepmerge
|
deepmerge
| null |
def deepmerge(*ds, override=False, method="addtoset"):
"""deepmerge: methods in {METHODS!r}""".format(METHODS=METHODS)
if len(ds) == 0:
return make_dict()
if override:
warnings.warn(
"override option is deprecated, will be removed, near future",
category=DeprecationWarning,
)
merge = _deepmerge_replace
elif method == "addtoset":
merge = partial(_deepmerge_extend, dedup=True)
elif method == "append":
merge = partial(_deepmerge_extend, dedup=False)
elif method == "merge":
merge = _deepmerge_merge
elif method == "replace":
merge = _deepmerge_replace
else:
raise ValueError(
"unavailable method not in {METHODS!r}".format(METHODS=METHODS)
)
left = ds[0].__class__()
for right in ds:
if not right:
continue
left = merge(left, right)
return left
|
(*ds, override=False, method='addtoset')
|
51,805 |
dictknife.accessing
|
dictmap
| null |
def dictmap(fn, x, *, mutable=False, with_key=False):
modifier = get_modifier(mutable=mutable)
if with_key:
modify_dict = modifier.modify_dict_with_keys
else:
modify_dict = modifier.modify_dict
def _map(d):
if isinstance(d, (list, tuple)):
return modifier.modify_list(_map, d)
elif hasattr(d, "keys"):
return modify_dict(_map, d)
else:
return fn(d)
return _map(x)
|
(fn, x, *, mutable=False, with_key=False)
|
51,806 |
dictknife.diff.diff
|
diff
|
fancy diff
|
def diff(
d0,
d1,
tostring=None,
fromfile="left",
tofile="right",
n=3,
terminator="\n",
normalize=False,
sort_keys=False,
):
"""fancy diff"""
if normalize:
d0 = sort_flexibly(d0)
d1 = sort_flexibly(d1)
str_dict(d0)
str_dict(d1)
# iterator?
if hasattr(d0, "__next__"):
d0 = list(d0)
if hasattr(d1, "__next__"):
d1 = list(d1)
tostring = tostring or _default_tostring
s0 = tostring(d0, sort_keys=sort_keys).split(terminator)
s1 = tostring(d1, sort_keys=sort_keys).split(terminator)
return difflib.unified_diff(
s0, s1, fromfile=fromfile, tofile=tofile, lineterm="", n=n
)
|
(d0, d1, tostring=None, fromfile='left', tofile='right', n=3, terminator='\n', normalize=False, sort_keys=False)
|
51,813 |
dictknife.pp
|
pp
| null |
def pp(d, out=None):
import json
out = out or sys.stdout
try:
json.dump(d, out, sort_keys=True, indent=2, ensure_ascii=False, default=str)
except TypeError:
# xxx: such as `unorderable types: NoneType() < str()`
json.dump(d, out, sort_keys=False, indent=2, ensure_ascii=False, default=str)
|
(d, out=None)
|
51,814 |
dictknife.shape
|
shape
| null |
def shape(
d,
traverse=Traverser().traverse,
aggregate=_build_pathlist_from_state,
*,
squash=False,
skiplist=False,
separator="/",
transform=as_jsonpointer
):
return aggregate(
traverse(d),
squash=squash,
skiplist=skiplist,
separator=separator,
transform=transform,
)
|
(d, traverse=<bound method Traverser.traverse of <dictknife.shape.Traverser object at 0x7f4745067d90>>, aggregate=<function _build_pathlist_from_state at 0x7f47450abbe0>, *, squash=False, skiplist=False, separator='/', transform=<function as_jsonpointer at 0x7f47450a9900>)
|
51,819 |
tokamesh.mesh
|
TriangularMesh
|
Class for performing operations with a triangular mesh, such as
interpolation and plotting.
:param R: The major radius of each mesh vertex as a 1D numpy array.
:param z: The z-height of each mesh vertex as a 1D numpy array.
:param triangles: A 2D numpy array of integers specifying the indices of the vertices which form
each of the triangles in the mesh. The array must have shape ``(N, 3)`` where
``N`` is the total number of triangles.
|
class TriangularMesh:
"""
Class for performing operations with a triangular mesh, such as
interpolation and plotting.
:param R: \
The major radius of each mesh vertex as a 1D numpy array.
:param z: \
The z-height of each mesh vertex as a 1D numpy array.
:param triangles: \
A 2D numpy array of integers specifying the indices of the vertices which form
each of the triangles in the mesh. The array must have shape ``(N, 3)`` where
``N`` is the total number of triangles.
"""
def __init__(self, R: ndarray, z: ndarray, triangles: ndarray):
for name, obj in [("R", R), ("z", z), ("triangles", triangles)]:
if not isinstance(obj, ndarray):
raise TypeError(
f"""\n
[ TriangularMesh error ]
>> The '{name}' argument of TriangularMesh should have type:
>> {ndarray}
>> but instead has type:
>> {type(obj)}
"""
)
for name, obj in [("R", R), ("z", z)]:
if obj.squeeze().ndim > 1:
raise ValueError(
f"""\n
[ TriangularMesh error ]
>> The '{name}' argument of TriangularMesh should be
>> a 1D array, but given array has shape {obj.shape}.
"""
)
if R.size != z.size:
raise ValueError(
f"""\n
[ TriangularMesh error ]
>> The 'R' and 'z' arguments of TriangularMesh should be
>> of equal size, but given arrays have sizes {R.size} and {z.size}.
"""
)
if triangles.squeeze().ndim != 2 or triangles.squeeze().shape[1] != 3:
raise ValueError(
f"""\n
[ TriangularMesh error ]
>> The 'triangles' argument must have shape (num_triangles, 3)
>> but given array has shape {triangles.shape}.
"""
)
self.R = R.squeeze()
self.z = z.squeeze()
self.triangle_vertices = triangles.squeeze()
self.n_vertices = self.R.size
self.n_triangles = self.triangle_vertices.shape[0]
# pre-calculate barycentric coordinate coefficients for each triangle
R1, R2, R3 = [self.R[self.triangle_vertices[:, k]] for k in range(3)]
z1, z2, z3 = [self.z[self.triangle_vertices[:, k]] for k in range(3)]
self.area = 0.5 * ((z2 - z3) * (R1 - R3) + (R3 - R2) * (z1 - z3))
self.lam1_coeffs = (
0.5
* stack([z2 - z3, R3 - R2, R2 * z3 - R3 * z2], axis=1)
/ self.area[:, None]
)
self.lam2_coeffs = (
0.5
* stack([z3 - z1, R1 - R3, R3 * z1 - R1 * z3], axis=1)
/ self.area[:, None]
)
# Construct a mapping from triangles to edges, and edges to vertices
self.triangle_edges, self.edge_vertices, _ = build_edge_map(
self.triangle_vertices
)
self.R_edges = self.R[self.edge_vertices]
self.z_edges = self.z[self.edge_vertices]
self.n_edges = self.edge_vertices.shape[0]
# store info about the bounds of the mesh
self.R_limits = [self.R.min(), self.R.max()]
self.z_limits = [self.z.min(), self.z.max()]
self.build_binary_trees()
def build_binary_trees(self):
# we now divide the bounding rectangle of the mesh into
# a rectangular grid, and create a mapping between each
# grid cell and all triangles which intersect it.
# find an appropriate depth for each tree
R_extent = self.R[self.triangle_vertices].ptp(axis=1).mean()
z_extent = self.z[self.triangle_vertices].ptp(axis=1).mean()
R_depth = max(
int(floor(log2((self.R_limits[1] - self.R_limits[0]) / R_extent))), 2
)
z_depth = max(
int(floor(log2((self.z_limits[1] - self.z_limits[0]) / z_extent))), 2
)
# build binary trees for each axis
self.R_tree = BinaryTree(R_depth, self.R_limits)
self.z_tree = BinaryTree(z_depth, self.z_limits)
# now build a map between rectangle centres and a list of
# all triangles which intersect that rectangle
self.tree_map = {}
for i, j in product(range(self.R_tree.nodes), range(self.z_tree.nodes)):
# limits of the rectangle
R_lims = self.R_tree.edges[i : i + 2]
z_lims = self.z_tree.edges[j : j + 2]
# find all edges which intersect the rectangle
edge_inds = edge_rectangle_intersection(
R_lims, z_lims, self.R_edges, self.z_edges
)
edge_bools = zeros(self.n_edges, dtype=int64)
edge_bools[edge_inds] = 1
# use this to find which triangles intersect the rectangle
triangle_bools = edge_bools[self.triangle_edges].any(axis=1)
# add the indices of these triangles to the dict
if triangle_bools.any():
self.tree_map[(i, j)] = triangle_bools.nonzero()[0]
def interpolate(self, R: ndarray, z: ndarray, vertex_values: ndarray) -> ndarray:
"""
Given the values of a function at each vertex of the mesh, use barycentric
interpolation to approximate the function at a chosen set of points. Any
points which lie outside the mesh will be assigned a value of zero.
:param R: \
The major-radius of each interpolation point as a numpy array.
:param z: \
The z-height of each interpolation point as a numpy array.
:param vertex_values: \
The function value at each mesh vertex as a 1D numpy array.
:return: \
The interpolated function values as a numpy array.
"""
if type(vertex_values) is not ndarray or vertex_values.ndim != 1:
raise TypeError(
"""\n
[ TriangularMesh error ]
>> The 'vertex_values' argument of the TriangularMesh.interpolate
>> method must have type numpy.ndarray, and have only one dimension.
"""
)
if vertex_values.size != self.n_vertices:
raise ValueError(
f"""\n
[ TriangularMesh error ]
>> The size of the 'vertex_values' argument of TriangularMesh.interpolate
>> must be equal to the number of mesh vertices.
>> The mesh has {self.n_vertices} vertices but given array is of size {vertex_values.size}.
"""
)
R_vals = atleast_1d(R)
z_vals = atleast_1d(z)
if R_vals.shape != z_vals.shape:
raise ValueError(
f"""\n
[ TriangularMesh error ]
>> The 'R' and 'z' arguments of TriangularMesh.interpolate
>> have inconsistent shapes:
>> {R_vals.shape} != {z_vals.shape}
"""
)
input_shape = R_vals.shape
if len(input_shape) > 1:
R_vals = R_vals.flatten()
z_vals = z_vals.flatten()
# lookup sets of coordinates are in each grid cell
unique_coords, slices, indices = self.grid_lookup(R_vals, z_vals)
# loop over each unique grid coordinate
interpolated_values = zeros(R_vals.size)
for v, slc in zip(unique_coords, slices):
# only need to proceed if the current coordinate contains triangles
key = (v[0], v[1])
if key in self.tree_map:
# get triangles intersecting this cell
search_triangles = self.tree_map[key]
cell_indices = indices[slc] # the indices of points inside this cell
# get the barycentric coord values of each point, and the
# index of the triangle which contains them
coords, container_triangles = self.bary_coords(
R_vals[cell_indices], z_vals[cell_indices], search_triangles
)
# get the values of the vertices for the triangles which contain the points
vals = vertex_values[self.triangle_vertices[container_triangles, :]]
# take the dot-product of the coordinates and the vertex
# values to get the interpolated value
interpolated_values[cell_indices] = (coords * vals).sum(axis=1)
if len(input_shape) > 1:
interpolated_values.resize(input_shape)
return interpolated_values
def find_triangle(self, R: ndarray, z: ndarray) -> ndarray:
"""
Find the indices of the triangles which contain a given set of points.
:param R: \
The major-radius of each point as a numpy array.
:param z: \
The z-height of each point as a numpy array.
:return: \
The indices of the triangles which contain each point as numpy array.
Any points which are not inside a triangle are given an index of -1.
"""
R_vals = atleast_1d(R)
z_vals = atleast_1d(z)
if R_vals.shape != z_vals.shape:
raise ValueError(
f"""\n
[ TriangularMesh error ]
>> The 'R' and 'z' arguments of TriangularMesh.find_triangle
>> have inconsistent shapes:
>> {R_vals.shape} != {z_vals.shape}
"""
)
input_shape = R_vals.shape
if len(input_shape) > 1:
R_vals = R_vals.flatten()
z_vals = z_vals.flatten()
# lookup sets of coordinates are in each grid cell
unique_coords, slices, indices = self.grid_lookup(R_vals, z_vals)
# loop over each unique grid coordinate
triangle_indices = full(R_vals.size, fill_value=-1, dtype=int)
for v, slc in zip(unique_coords, slices):
# only need to proceed if the current coordinate contains triangles
key = (v[0], v[1])
if key in self.tree_map:
# get triangles intersecting this cell
search_triangles = self.tree_map[key]
cell_indices = indices[slc] # the indices of points inside this cell
# get the barycentric coord values of each point, and the
# index of the triangle which contains them
_, container_triangles = self.bary_coords(
R_vals[cell_indices], z_vals[cell_indices], search_triangles
)
triangle_indices[cell_indices] = container_triangles
if len(input_shape) > 1:
triangle_indices.resize(input_shape)
return triangle_indices
def grid_lookup(self, R, z):
# first determine in which cell each point lies using the binary trees
grid_coords = zeros([R.size, 2], dtype=int64)
grid_coords[:, 0] = self.R_tree.lookup_index(R)
grid_coords[:, 1] = self.z_tree.lookup_index(z)
# find the set of unique grid coordinates
unique_coords, inverse, counts = unique(
grid_coords, axis=0, return_inverse=True, return_counts=True
)
# now create an array of indices which are ordered according
# to which of the unique values they match
indices = inverse.argsort()
# build a list of slice objects which addresses those indices
# which match each unique coordinate
ranges = counts.cumsum()
slices = [slice(0, ranges[0])]
slices.extend([slice(*ranges[i : i + 2]) for i in range(ranges.size - 1)])
return unique_coords, slices, indices
def bary_coords(self, R, z, search_triangles):
Q = stack([atleast_1d(R), atleast_1d(z), full(R.size, fill_value=1.0)], axis=0)
lam1 = self.lam1_coeffs[search_triangles, :].dot(Q)
lam2 = self.lam2_coeffs[search_triangles, :].dot(Q)
lam3 = 1 - lam1 - lam2
bools = (lam1 >= 0.0) & (lam2 >= 0.0) & (lam3 >= 0.0)
i1, i2 = bools.nonzero()
coords = zeros([R.size, 3])
coords[i2, 0] = lam1[i1, i2]
coords[i2, 1] = lam2[i1, i2]
coords[i2, 2] = lam3[i1, i2]
container_triangles = full(R.size, fill_value=-1)
container_triangles[i2] = search_triangles[i1]
return coords, container_triangles
def draw(self, ax, **kwargs):
"""
Draw the mesh using a given ``matplotlib.pyplot`` axis object.
:param ax: \
A ``matplotlib.pyplot`` axis object on which the mesh will be drawn by
calling the 'plot' method of the object.
:param kwargs: \
Any valid keyword argument of ``matplotlib.pyplot.plot`` may be given in
order to change the properties of the plot.
"""
if ("color" not in kwargs) and ("c" not in kwargs):
kwargs["color"] = "black"
ax.plot(self.R_edges[0, :].T, self.z_edges[0, :].T, **kwargs)
if "label" in kwargs:
kwargs["label"] = None
ax.plot(self.R_edges[1:, :].T, self.z_edges[1:, :].T, **kwargs)
def get_field_image(self, vertex_values, shape=(150, 150), pad_fraction=0.01):
"""
Given the value of a field at each mesh vertex, use interpolation to generate
an image of the field across the whole mesh.
:param vertex_values: \
The value of the field being plotted at each vertex of the mesh as a 1D numpy array.
:param shape: \
A tuple of two integers specifying the dimensions of the image.
:param pad_fraction: \
The fraction of the mesh width/height used as padding to create a gap between
the edge of the mesh and the edge of the plot.
:return R_axis, z_axis, field_image: \
``R_axis`` is a 1D array of the major-radius value of each column of the image array.
``z_axis`` is a 1D array of the z-height value of each column of the image array.
``field_image`` is a 2D array of the interpolated field values. Any points outside
the mesh are assigned a value of zero.
"""
R_pad = (self.R_limits[1] - self.R_limits[0]) * pad_fraction
z_pad = (self.R_limits[1] - self.R_limits[0]) * pad_fraction
R_axis = linspace(self.R_limits[0] - R_pad, self.R_limits[1] + R_pad, shape[0])
z_axis = linspace(self.z_limits[0] - z_pad, self.z_limits[1] + z_pad, shape[1])
R_grid, z_grid = meshgrid(R_axis, z_axis)
image = self.interpolate(
R_grid.flatten(), z_grid.flatten(), vertex_values=vertex_values
)
image.resize((shape[1], shape[0]))
return R_axis, z_axis, image.T
def build_interpolator_matrix(self, R: ndarray, z: ndarray) -> ndarray:
"""
For a given set of points, construct an 'interpolator' matrix, such
that its product with a vector of field values at each mesh vertex
yields the interpolated values of the field at the given set of points.
:param R: \
The major-radius of each interpolation point as 1D ``numpy.ndarray``.
:param z: \
The z-height of each interpolation point as a 1D ``numpy.ndarray``.
:return: \
The interpolator matrix as a 2D ``numpy.ndarray`` with a shape of
the number of interpolation points by the number of mesh vertices.
"""
R_vals = atleast_1d(R)
z_vals = atleast_1d(z)
if R_vals.ndim != 1 or z_vals.ndim != 1 or R_vals.size != z_vals.size:
raise ValueError(
f"""\n
[ TriangularMesh error ]
>> The 'R' and 'z' arguments of build_interpolator_matrix
>> must be 1D arrays of equal size, however their shapes are
>> {R_vals.shape}, {z_vals.shape}
>> respectively.
"""
)
interpolator_matrix = zeros([R_vals.size, self.n_vertices])
# lookup sets of coordinates are in each grid cell
unique_coords, slices, indices = self.grid_lookup(R_vals, z_vals)
# loop over each unique grid coordinate
for v, slc in zip(unique_coords, slices):
# only need to proceed if the current coordinate contains triangles
key = (v[0], v[1])
if key in self.tree_map:
# get triangles intersecting this cell
search_triangles = self.tree_map[key]
cell_indices = indices[slc] # the indices of points inside this cell
# get the barycentric coord values of each point, and the
# index of the triangle which contains them
coords, container_triangles = self.bary_coords(
R_vals[cell_indices], z_vals[cell_indices], search_triangles
)
# get corresponding cell indices for the vertex indices
vertex_inds = self.triangle_vertices[container_triangles, :]
# insert the coordinate values into the matrix
interpolator_matrix[cell_indices[:, None], vertex_inds] = coords
return interpolator_matrix
def save(self, filepath: str):
"""
Save the mesh using the numpy 'npz' format.
:param str filepath:
File path to which the mesh will be saved.
"""
savez(filepath, R=self.R, z=self.z, triangles=self.triangle_vertices)
@classmethod
def load(cls, filepath: str):
"""
Load and return a previously saved instance of ``TriangularMesh``.
:param str filepath:
File path of the saved mesh.
:return:
The loaded mesh as an instance of ``TriangularMesh``.
"""
D = load(filepath)
return cls(R=D["R"], z=D["z"], triangles=D["triangles"])
|
(R: numpy.ndarray, z: numpy.ndarray, triangles: numpy.ndarray)
|
51,820 |
tokamesh.mesh
|
__init__
| null |
def __init__(self, R: ndarray, z: ndarray, triangles: ndarray):
for name, obj in [("R", R), ("z", z), ("triangles", triangles)]:
if not isinstance(obj, ndarray):
raise TypeError(
f"""\n
[ TriangularMesh error ]
>> The '{name}' argument of TriangularMesh should have type:
>> {ndarray}
>> but instead has type:
>> {type(obj)}
"""
)
for name, obj in [("R", R), ("z", z)]:
if obj.squeeze().ndim > 1:
raise ValueError(
f"""\n
[ TriangularMesh error ]
>> The '{name}' argument of TriangularMesh should be
>> a 1D array, but given array has shape {obj.shape}.
"""
)
if R.size != z.size:
raise ValueError(
f"""\n
[ TriangularMesh error ]
>> The 'R' and 'z' arguments of TriangularMesh should be
>> of equal size, but given arrays have sizes {R.size} and {z.size}.
"""
)
if triangles.squeeze().ndim != 2 or triangles.squeeze().shape[1] != 3:
raise ValueError(
f"""\n
[ TriangularMesh error ]
>> The 'triangles' argument must have shape (num_triangles, 3)
>> but given array has shape {triangles.shape}.
"""
)
self.R = R.squeeze()
self.z = z.squeeze()
self.triangle_vertices = triangles.squeeze()
self.n_vertices = self.R.size
self.n_triangles = self.triangle_vertices.shape[0]
# pre-calculate barycentric coordinate coefficients for each triangle
R1, R2, R3 = [self.R[self.triangle_vertices[:, k]] for k in range(3)]
z1, z2, z3 = [self.z[self.triangle_vertices[:, k]] for k in range(3)]
self.area = 0.5 * ((z2 - z3) * (R1 - R3) + (R3 - R2) * (z1 - z3))
self.lam1_coeffs = (
0.5
* stack([z2 - z3, R3 - R2, R2 * z3 - R3 * z2], axis=1)
/ self.area[:, None]
)
self.lam2_coeffs = (
0.5
* stack([z3 - z1, R1 - R3, R3 * z1 - R1 * z3], axis=1)
/ self.area[:, None]
)
# Construct a mapping from triangles to edges, and edges to vertices
self.triangle_edges, self.edge_vertices, _ = build_edge_map(
self.triangle_vertices
)
self.R_edges = self.R[self.edge_vertices]
self.z_edges = self.z[self.edge_vertices]
self.n_edges = self.edge_vertices.shape[0]
# store info about the bounds of the mesh
self.R_limits = [self.R.min(), self.R.max()]
self.z_limits = [self.z.min(), self.z.max()]
self.build_binary_trees()
|
(self, R: numpy.ndarray, z: numpy.ndarray, triangles: numpy.ndarray)
|
51,821 |
tokamesh.mesh
|
bary_coords
| null |
def bary_coords(self, R, z, search_triangles):
Q = stack([atleast_1d(R), atleast_1d(z), full(R.size, fill_value=1.0)], axis=0)
lam1 = self.lam1_coeffs[search_triangles, :].dot(Q)
lam2 = self.lam2_coeffs[search_triangles, :].dot(Q)
lam3 = 1 - lam1 - lam2
bools = (lam1 >= 0.0) & (lam2 >= 0.0) & (lam3 >= 0.0)
i1, i2 = bools.nonzero()
coords = zeros([R.size, 3])
coords[i2, 0] = lam1[i1, i2]
coords[i2, 1] = lam2[i1, i2]
coords[i2, 2] = lam3[i1, i2]
container_triangles = full(R.size, fill_value=-1)
container_triangles[i2] = search_triangles[i1]
return coords, container_triangles
|
(self, R, z, search_triangles)
|
51,822 |
tokamesh.mesh
|
build_binary_trees
| null |
def build_binary_trees(self):
# we now divide the bounding rectangle of the mesh into
# a rectangular grid, and create a mapping between each
# grid cell and all triangles which intersect it.
# find an appropriate depth for each tree
R_extent = self.R[self.triangle_vertices].ptp(axis=1).mean()
z_extent = self.z[self.triangle_vertices].ptp(axis=1).mean()
R_depth = max(
int(floor(log2((self.R_limits[1] - self.R_limits[0]) / R_extent))), 2
)
z_depth = max(
int(floor(log2((self.z_limits[1] - self.z_limits[0]) / z_extent))), 2
)
# build binary trees for each axis
self.R_tree = BinaryTree(R_depth, self.R_limits)
self.z_tree = BinaryTree(z_depth, self.z_limits)
# now build a map between rectangle centres and a list of
# all triangles which intersect that rectangle
self.tree_map = {}
for i, j in product(range(self.R_tree.nodes), range(self.z_tree.nodes)):
# limits of the rectangle
R_lims = self.R_tree.edges[i : i + 2]
z_lims = self.z_tree.edges[j : j + 2]
# find all edges which intersect the rectangle
edge_inds = edge_rectangle_intersection(
R_lims, z_lims, self.R_edges, self.z_edges
)
edge_bools = zeros(self.n_edges, dtype=int64)
edge_bools[edge_inds] = 1
# use this to find which triangles intersect the rectangle
triangle_bools = edge_bools[self.triangle_edges].any(axis=1)
# add the indices of these triangles to the dict
if triangle_bools.any():
self.tree_map[(i, j)] = triangle_bools.nonzero()[0]
|
(self)
|
51,823 |
tokamesh.mesh
|
build_interpolator_matrix
|
For a given set of points, construct an 'interpolator' matrix, such
that its product with a vector of field values at each mesh vertex
yields the interpolated values of the field at the given set of points.
:param R: The major-radius of each interpolation point as 1D ``numpy.ndarray``.
:param z: The z-height of each interpolation point as a 1D ``numpy.ndarray``.
:return: The interpolator matrix as a 2D ``numpy.ndarray`` with a shape of
the number of interpolation points by the number of mesh vertices.
|
def build_interpolator_matrix(self, R: ndarray, z: ndarray) -> ndarray:
"""
For a given set of points, construct an 'interpolator' matrix, such
that its product with a vector of field values at each mesh vertex
yields the interpolated values of the field at the given set of points.
:param R: \
The major-radius of each interpolation point as 1D ``numpy.ndarray``.
:param z: \
The z-height of each interpolation point as a 1D ``numpy.ndarray``.
:return: \
The interpolator matrix as a 2D ``numpy.ndarray`` with a shape of
the number of interpolation points by the number of mesh vertices.
"""
R_vals = atleast_1d(R)
z_vals = atleast_1d(z)
if R_vals.ndim != 1 or z_vals.ndim != 1 or R_vals.size != z_vals.size:
raise ValueError(
f"""\n
[ TriangularMesh error ]
>> The 'R' and 'z' arguments of build_interpolator_matrix
>> must be 1D arrays of equal size, however their shapes are
>> {R_vals.shape}, {z_vals.shape}
>> respectively.
"""
)
interpolator_matrix = zeros([R_vals.size, self.n_vertices])
# lookup sets of coordinates are in each grid cell
unique_coords, slices, indices = self.grid_lookup(R_vals, z_vals)
# loop over each unique grid coordinate
for v, slc in zip(unique_coords, slices):
# only need to proceed if the current coordinate contains triangles
key = (v[0], v[1])
if key in self.tree_map:
# get triangles intersecting this cell
search_triangles = self.tree_map[key]
cell_indices = indices[slc] # the indices of points inside this cell
# get the barycentric coord values of each point, and the
# index of the triangle which contains them
coords, container_triangles = self.bary_coords(
R_vals[cell_indices], z_vals[cell_indices], search_triangles
)
# get corresponding cell indices for the vertex indices
vertex_inds = self.triangle_vertices[container_triangles, :]
# insert the coordinate values into the matrix
interpolator_matrix[cell_indices[:, None], vertex_inds] = coords
return interpolator_matrix
|
(self, R: numpy.ndarray, z: numpy.ndarray) -> numpy.ndarray
|
51,824 |
tokamesh.mesh
|
draw
|
Draw the mesh using a given ``matplotlib.pyplot`` axis object.
:param ax: A ``matplotlib.pyplot`` axis object on which the mesh will be drawn by
calling the 'plot' method of the object.
:param kwargs: Any valid keyword argument of ``matplotlib.pyplot.plot`` may be given in
order to change the properties of the plot.
|
def draw(self, ax, **kwargs):
"""
Draw the mesh using a given ``matplotlib.pyplot`` axis object.
:param ax: \
A ``matplotlib.pyplot`` axis object on which the mesh will be drawn by
calling the 'plot' method of the object.
:param kwargs: \
Any valid keyword argument of ``matplotlib.pyplot.plot`` may be given in
order to change the properties of the plot.
"""
if ("color" not in kwargs) and ("c" not in kwargs):
kwargs["color"] = "black"
ax.plot(self.R_edges[0, :].T, self.z_edges[0, :].T, **kwargs)
if "label" in kwargs:
kwargs["label"] = None
ax.plot(self.R_edges[1:, :].T, self.z_edges[1:, :].T, **kwargs)
|
(self, ax, **kwargs)
|
51,825 |
tokamesh.mesh
|
find_triangle
|
Find the indices of the triangles which contain a given set of points.
:param R: The major-radius of each point as a numpy array.
:param z: The z-height of each point as a numpy array.
:return: The indices of the triangles which contain each point as numpy array.
Any points which are not inside a triangle are given an index of -1.
|
def find_triangle(self, R: ndarray, z: ndarray) -> ndarray:
"""
Find the indices of the triangles which contain a given set of points.
:param R: \
The major-radius of each point as a numpy array.
:param z: \
The z-height of each point as a numpy array.
:return: \
The indices of the triangles which contain each point as numpy array.
Any points which are not inside a triangle are given an index of -1.
"""
R_vals = atleast_1d(R)
z_vals = atleast_1d(z)
if R_vals.shape != z_vals.shape:
raise ValueError(
f"""\n
[ TriangularMesh error ]
>> The 'R' and 'z' arguments of TriangularMesh.find_triangle
>> have inconsistent shapes:
>> {R_vals.shape} != {z_vals.shape}
"""
)
input_shape = R_vals.shape
if len(input_shape) > 1:
R_vals = R_vals.flatten()
z_vals = z_vals.flatten()
# lookup sets of coordinates are in each grid cell
unique_coords, slices, indices = self.grid_lookup(R_vals, z_vals)
# loop over each unique grid coordinate
triangle_indices = full(R_vals.size, fill_value=-1, dtype=int)
for v, slc in zip(unique_coords, slices):
# only need to proceed if the current coordinate contains triangles
key = (v[0], v[1])
if key in self.tree_map:
# get triangles intersecting this cell
search_triangles = self.tree_map[key]
cell_indices = indices[slc] # the indices of points inside this cell
# get the barycentric coord values of each point, and the
# index of the triangle which contains them
_, container_triangles = self.bary_coords(
R_vals[cell_indices], z_vals[cell_indices], search_triangles
)
triangle_indices[cell_indices] = container_triangles
if len(input_shape) > 1:
triangle_indices.resize(input_shape)
return triangle_indices
|
(self, R: numpy.ndarray, z: numpy.ndarray) -> numpy.ndarray
|
51,826 |
tokamesh.mesh
|
get_field_image
|
Given the value of a field at each mesh vertex, use interpolation to generate
an image of the field across the whole mesh.
:param vertex_values: The value of the field being plotted at each vertex of the mesh as a 1D numpy array.
:param shape: A tuple of two integers specifying the dimensions of the image.
:param pad_fraction: The fraction of the mesh width/height used as padding to create a gap between
the edge of the mesh and the edge of the plot.
:return R_axis, z_axis, field_image: ``R_axis`` is a 1D array of the major-radius value of each column of the image array.
``z_axis`` is a 1D array of the z-height value of each column of the image array.
``field_image`` is a 2D array of the interpolated field values. Any points outside
the mesh are assigned a value of zero.
|
def get_field_image(self, vertex_values, shape=(150, 150), pad_fraction=0.01):
"""
Given the value of a field at each mesh vertex, use interpolation to generate
an image of the field across the whole mesh.
:param vertex_values: \
The value of the field being plotted at each vertex of the mesh as a 1D numpy array.
:param shape: \
A tuple of two integers specifying the dimensions of the image.
:param pad_fraction: \
The fraction of the mesh width/height used as padding to create a gap between
the edge of the mesh and the edge of the plot.
:return R_axis, z_axis, field_image: \
``R_axis`` is a 1D array of the major-radius value of each column of the image array.
``z_axis`` is a 1D array of the z-height value of each column of the image array.
``field_image`` is a 2D array of the interpolated field values. Any points outside
the mesh are assigned a value of zero.
"""
R_pad = (self.R_limits[1] - self.R_limits[0]) * pad_fraction
z_pad = (self.R_limits[1] - self.R_limits[0]) * pad_fraction
R_axis = linspace(self.R_limits[0] - R_pad, self.R_limits[1] + R_pad, shape[0])
z_axis = linspace(self.z_limits[0] - z_pad, self.z_limits[1] + z_pad, shape[1])
R_grid, z_grid = meshgrid(R_axis, z_axis)
image = self.interpolate(
R_grid.flatten(), z_grid.flatten(), vertex_values=vertex_values
)
image.resize((shape[1], shape[0]))
return R_axis, z_axis, image.T
|
(self, vertex_values, shape=(150, 150), pad_fraction=0.01)
|
51,827 |
tokamesh.mesh
|
grid_lookup
| null |
def grid_lookup(self, R, z):
# first determine in which cell each point lies using the binary trees
grid_coords = zeros([R.size, 2], dtype=int64)
grid_coords[:, 0] = self.R_tree.lookup_index(R)
grid_coords[:, 1] = self.z_tree.lookup_index(z)
# find the set of unique grid coordinates
unique_coords, inverse, counts = unique(
grid_coords, axis=0, return_inverse=True, return_counts=True
)
# now create an array of indices which are ordered according
# to which of the unique values they match
indices = inverse.argsort()
# build a list of slice objects which addresses those indices
# which match each unique coordinate
ranges = counts.cumsum()
slices = [slice(0, ranges[0])]
slices.extend([slice(*ranges[i : i + 2]) for i in range(ranges.size - 1)])
return unique_coords, slices, indices
|
(self, R, z)
|
51,828 |
tokamesh.mesh
|
interpolate
|
Given the values of a function at each vertex of the mesh, use barycentric
interpolation to approximate the function at a chosen set of points. Any
points which lie outside the mesh will be assigned a value of zero.
:param R: The major-radius of each interpolation point as a numpy array.
:param z: The z-height of each interpolation point as a numpy array.
:param vertex_values: The function value at each mesh vertex as a 1D numpy array.
:return: The interpolated function values as a numpy array.
|
def interpolate(self, R: ndarray, z: ndarray, vertex_values: ndarray) -> ndarray:
"""
Given the values of a function at each vertex of the mesh, use barycentric
interpolation to approximate the function at a chosen set of points. Any
points which lie outside the mesh will be assigned a value of zero.
:param R: \
The major-radius of each interpolation point as a numpy array.
:param z: \
The z-height of each interpolation point as a numpy array.
:param vertex_values: \
The function value at each mesh vertex as a 1D numpy array.
:return: \
The interpolated function values as a numpy array.
"""
if type(vertex_values) is not ndarray or vertex_values.ndim != 1:
raise TypeError(
"""\n
[ TriangularMesh error ]
>> The 'vertex_values' argument of the TriangularMesh.interpolate
>> method must have type numpy.ndarray, and have only one dimension.
"""
)
if vertex_values.size != self.n_vertices:
raise ValueError(
f"""\n
[ TriangularMesh error ]
>> The size of the 'vertex_values' argument of TriangularMesh.interpolate
>> must be equal to the number of mesh vertices.
>> The mesh has {self.n_vertices} vertices but given array is of size {vertex_values.size}.
"""
)
R_vals = atleast_1d(R)
z_vals = atleast_1d(z)
if R_vals.shape != z_vals.shape:
raise ValueError(
f"""\n
[ TriangularMesh error ]
>> The 'R' and 'z' arguments of TriangularMesh.interpolate
>> have inconsistent shapes:
>> {R_vals.shape} != {z_vals.shape}
"""
)
input_shape = R_vals.shape
if len(input_shape) > 1:
R_vals = R_vals.flatten()
z_vals = z_vals.flatten()
# lookup sets of coordinates are in each grid cell
unique_coords, slices, indices = self.grid_lookup(R_vals, z_vals)
# loop over each unique grid coordinate
interpolated_values = zeros(R_vals.size)
for v, slc in zip(unique_coords, slices):
# only need to proceed if the current coordinate contains triangles
key = (v[0], v[1])
if key in self.tree_map:
# get triangles intersecting this cell
search_triangles = self.tree_map[key]
cell_indices = indices[slc] # the indices of points inside this cell
# get the barycentric coord values of each point, and the
# index of the triangle which contains them
coords, container_triangles = self.bary_coords(
R_vals[cell_indices], z_vals[cell_indices], search_triangles
)
# get the values of the vertices for the triangles which contain the points
vals = vertex_values[self.triangle_vertices[container_triangles, :]]
# take the dot-product of the coordinates and the vertex
# values to get the interpolated value
interpolated_values[cell_indices] = (coords * vals).sum(axis=1)
if len(input_shape) > 1:
interpolated_values.resize(input_shape)
return interpolated_values
|
(self, R: numpy.ndarray, z: numpy.ndarray, vertex_values: numpy.ndarray) -> numpy.ndarray
|
51,829 |
tokamesh.mesh
|
save
|
Save the mesh using the numpy 'npz' format.
:param str filepath:
File path to which the mesh will be saved.
|
def save(self, filepath: str):
"""
Save the mesh using the numpy 'npz' format.
:param str filepath:
File path to which the mesh will be saved.
"""
savez(filepath, R=self.R, z=self.z, triangles=self.triangle_vertices)
|
(self, filepath: str)
|
51,834 |
toml_sort.tomlsort
|
TomlSort
|
API to manage sorting toml files.
|
class TomlSort:
"""API to manage sorting toml files."""
def __init__( # pylint: disable=too-many-arguments
self,
input_toml: str,
comment_config: Optional[CommentConfiguration] = None,
sort_config: Optional[SortConfiguration] = None,
format_config: Optional[FormattingConfiguration] = None,
sort_config_overrides: Optional[
Dict[str, SortOverrideConfiguration]
] = None,
) -> None:
"""Initializer."""
self.input_toml = input_toml
if comment_config is None:
comment_config = CommentConfiguration()
self.comment_config = comment_config
if sort_config is None:
sort_config = SortConfiguration()
self._sort_config = sort_config
if format_config is None:
format_config = FormattingConfiguration()
self.format_config = format_config
if sort_config_overrides is None:
sort_config_overrides = {}
self.sort_config_overrides = sort_config_overrides
def _find_config_override(
self, keys: Optional[TomlSortKeys]
) -> Optional[SortOverrideConfiguration]:
"""Returns a SortOverrideConfiguration for a particular TomlSortKeys
object, if one exists. If none exists returns None.
Override matches are evaluated as glob patterns by the python
fnmatch function. If there are multiple matches, return the
exact match first otherwise return the first match.
"""
if keys is None:
return None
if keys.as_string() in self.sort_config_overrides:
return self.sort_config_overrides.get(keys.as_string())
matches = [
config
for pattern, config in self.sort_config_overrides.items()
if fnmatch.fnmatch(keys.as_string(), pattern)
]
if len(matches) > 0:
return matches[0]
return None
def sort_config(
self, keys: Optional[TomlSortKeys] = None
) -> SortConfiguration:
"""Returns the SortConfiguration to use for particular TomlSortKeys.
This merges the global SortConfiguration with any matching
SortOverrideConfiguration to give the full SortConfiguration
that applies to this Key.
"""
override = self._find_config_override(keys)
if override is None:
return self._sort_config
main_config = asdict(self._sort_config)
override_config = asdict(override)
merged_config = {}
for key, value in main_config.items():
if key in override_config and override_config[key] is not None:
merged_config[key] = override_config[key]
else:
merged_config[key] = value
return SortConfiguration(**merged_config)
def sort_array(
self, keys: TomlSortKeys, array: Array, indent_depth: int = 0
) -> Array:
"""Sort and format an inline array item while preserving comments."""
multiline = "\n" in array.as_string()
indent_size = self.format_config.spaces_indent_inline_array
indent = (
"\n" + " " * indent_size * (indent_depth + 1) if multiline else ""
)
comma = "," if multiline else ", "
comments: List[_ArrayItemGroup] = []
new_array_items = []
for array_item in array._value: # pylint: disable=protected-access
if isinstance(array_item.value, Null) and isinstance(
array_item.comment, Comment
):
# Previous comments are orphaned if there is whitespace
if (
array_item.indent is not None
and "\n\n" in array_item.indent.as_string()
):
comments = []
# Comment on its own line within the array
array_item.indent = Whitespace(indent)
array_item.comma = Whitespace("")
array_item.comment.trivia.comment = format_comment(
array_item.comment.trivia.comment
)
comments.append(array_item)
elif array_item.value is not None and not isinstance(
array_item.value, Null
):
# Actual array item
array_item.indent = Whitespace(indent)
array_item.comma = Whitespace(comma)
if array_item.comment is not None:
if self.comment_config.inline:
array_item.comment.trivia.comment = format_comment(
array_item.comment.trivia.comment
)
array_item.comment.trivia.indent = (
" "
* self.format_config.spaces_before_inline_comment
)
else:
array_item.comment = None
new_array_items.append((array_item, comments))
comments = []
array_item.value = self.sort_item(
keys,
array_item.value,
indent_depth=indent_depth + 1
if multiline
else indent_depth,
)
if self.sort_config(keys).inline_arrays:
new_array_items = sorted(new_array_items, key=self.array_sort_func)
new_array_value = []
for array_item, comments in new_array_items:
if comments and self.comment_config.block:
new_array_value.extend(comments)
new_array_value.append(array_item)
if len(new_array_value) != 0 and not (
multiline and self.format_config.trailing_comma_inline_array
):
new_array_value[-1].comma = Whitespace("")
if multiline:
array_item = _ArrayItemGroup()
array_item.value = Whitespace(
"\n" + " " * indent_size * indent_depth
)
new_array_value.append(array_item)
array._value = new_array_value # pylint: disable=protected-access
array._reindex() # pylint: disable=protected-access
array = normalize_trivia(
array,
include_comments=self.comment_config.inline,
comment_spaces=self.format_config.spaces_before_inline_comment,
)
return array
def sort_item(
self, keys: TomlSortKeys, item: Item, indent_depth: int = 0
) -> Item:
"""Sort an item, recursing down if the item is an inline table or
array."""
if isinstance(item, Array):
return self.sort_array(keys, item, indent_depth=indent_depth)
if isinstance(item, InlineTable):
return self.sort_inline_table(
keys, item, indent_depth=indent_depth
)
return item
def sort_keys(
self, items: Iterable[TomlSortItem], sort_config: SortConfiguration
) -> List[TomlSortItem]:
"""Sorts Iterable of Tomlsort item based on keys.
The sort respects the sort_config.first setting which allows
overriding the sorted order of keys.
"""
def sort_first(item):
if item.keys.base in sort_config.first:
return sort_config.first.index(item.keys.base)
return len(sort_config.first)
items = sorted(items, key=self.key_sort_func)
items = sorted(items, key=sort_first)
return items
def sort_inline_table(
self, keys: TomlSortKeys, item: Item, indent_depth: int = 0
) -> InlineTable:
"""Sort an inline table, recursing into its items."""
tomlsort_items = [
TomlSortItem(
keys=keys + k,
value=self.sort_item(keys + k, v, indent_depth=indent_depth),
)
for k, v in item.value.body
if not isinstance(v, Whitespace) and k is not None
]
sort_config = self.sort_config(keys)
if sort_config.inline_tables:
tomlsort_items = self.sort_keys(tomlsort_items, sort_config)
new_table = InlineTable(
Container(parsed=True), trivia=item.trivia, new=True
)
for tomlsort_item in tomlsort_items:
normalize_trivia(tomlsort_item.value, include_comments=False)
new_table.append(
self.format_key(tomlsort_item.keys.base), tomlsort_item.value
)
new_table = normalize_trivia(
new_table,
include_comments=self.comment_config.inline,
comment_spaces=self.format_config.spaces_before_inline_comment,
)
return new_table
@staticmethod
def format_key(key: Key) -> Key:
"""
Format a key, removing any extra whitespace, and making sure that it
will be formatted like: key = value with one space on either side of
the equal sign.
"""
key.sep = " = "
key._original = ( # pylint: disable=protected-access
key.as_string().strip()
)
return key
def sort_items(
self, items: Iterable[TomlSortItem]
) -> Iterable[TomlSortItem]:
"""Sort an iterable full of TomlSortItem, making sure the key is
correctly formatted and recursing into any sub-items."""
for item in items:
item.keys.base = self.format_key(item.keys.base)
item.value = self.sort_item(item.keys, item.value)
return items
def key_sort_func(self, value: TomlSortItem) -> str:
"""Sort function that looks at TomlSortItems keys, respecting the
configured value for ignore_case."""
key = value.keys.base.key
if self.sort_config().ignore_case:
key = key.lower()
return key
def array_sort_func(self, value: Tuple[_ArrayItemGroup, Any]) -> str:
"""Sort function that operates on the .value member of an
ArrayItemGroup respects the class setting for ignore_case."""
if value[0].value is None:
return ""
ret = value[0].value.as_string()
if self.sort_config().ignore_case:
ret = ret.lower()
return ret
def sorted_children_table(
self, parent_keys: Optional[TomlSortKeys], parent: List[TomlSortItem]
) -> Iterable[TomlSortItem]:
"""Get the sorted children of a table."""
sort_config = self.sort_config(parent_keys)
tables = coalesce_tables(
item for item in parent if isinstance(item.value, (Table, AoT))
)
non_tables = self.sort_items(
[
item
for item in parent
if not isinstance(item.value, (Table, AoT))
]
)
non_tables_final = (
self.sort_keys(non_tables, sort_config)
if sort_config.table_keys
else non_tables
)
tables_final = (
self.sort_keys(tables, sort_config)
if self.sort_config(parent_keys).tables
else tables
)
return itertools.chain(non_tables_final, tables_final)
def write_header_comment(
self,
from_doc_body: List[Tuple[Optional[Key], Item]],
to_doc: TOMLDocument,
) -> List[Tuple[Optional[Key], Item]]:
"""Write header comment from the FROM doc to the TO doc.
Only writes comments / whitespace from the beginning of a TOML
document.
"""
# Discard leading whitespace
while len(from_doc_body) > 0 and isinstance(
from_doc_body[0][1], Whitespace
):
from_doc_body.pop(0)
# Remove the header comment from the input document, adding it to
# the output document, followed by a newline.
spaces = self.format_config.spaces_before_inline_comment
while len(from_doc_body) > 0 and isinstance(
from_doc_body[0][1], Comment
):
_, value = from_doc_body.pop(0)
value = normalize_trivia(
value,
comment_spaces=spaces,
)
to_doc.add(value)
to_doc.add(ws("\n"))
return from_doc_body
def toml_elements_sorted(
self, original: TomlSortItem, parent: Table | TOMLDocument
) -> Item:
"""Returns a sorted item, recursing collections to their base."""
if original.is_table:
new_table = original.table
for item in self.sorted_children_table(
original.keys, original.children
):
previous_item = self.table_previous_item(new_table, parent)
attach_comments(item, previous_item)
new_table.add(
item.keys.base,
self.toml_elements_sorted(item, previous_item),
)
return new_table
if original.is_aot:
new_aot = normalize_trivia(
original.aot,
self.comment_config.inline,
self.format_config.spaces_before_inline_comment,
)
for table in original.children:
previous_item = next(iter(new_aot), parent)
attach_comments(table, previous_item)
new_aot.append(
self.toml_elements_sorted(
table, next(iter(new_aot), previous_item)
)
)
return new_aot
return original.value
@staticmethod
def table_previous_item(parent_table, grandparent):
"""Finds the previous item that we should attach a comment to, in the
case where the previous item is a table.
This take into account that a table may be a super table.
"""
if parent_table.is_super_table():
if len(parent_table) == 0:
return grandparent
last_item = parent_table.value.last_item()
if isinstance(last_item, Table):
return last_item
return parent_table
def body_to_tomlsortitems(
self,
parent: List[Tuple[Optional[Key], Item]],
parent_key: Optional[TomlSortKeys] = None,
) -> Tuple[List[TomlSortItem], List[Comment]]:
"""Iterate over Container.body, recursing down into sub-containers
attaching the comments that are found to the correct TomlSortItem. We
need to do this iteration because TomlKit puts comments into end of the
collection they appear in, instead of the start of the next collection.
For example
```toml
[xyz]
# Comment
[abc]
```
TomlKit would place the comment from the example into the [xyz]
collection, when we would like it to be attached to the [abc]
collection.
So before sorting we have to iterate over the container, correctly
attaching the comments, then undo this process once everything is
sorted.
"""
items: List[TomlSortItem] = []
comments: List[Comment] = []
for key, value in parent:
if key is None:
if isinstance(value, Whitespace):
comments = []
elif isinstance(value, Comment) and self.comment_config.block:
comment_spaces = (
self.format_config.spaces_before_inline_comment
)
value = normalize_trivia(
value,
comment_spaces=comment_spaces,
)
comments.append(value)
continue
value = convert_tomlkit_buggy_types(value, parent, key.key)
value = normalize_trivia(
value,
self.comment_config.inline,
comment_spaces=self.format_config.spaces_before_inline_comment,
)
full_key = parent_key + key if parent_key else TomlSortKeys(key)
if isinstance(value, Table):
comments, item = self.table_to_tomlsortitem(
comments, full_key, value
)
elif isinstance(value, AoT):
comments, item = self.aot_to_tomlsortitem(
comments, full_key, value
)
elif isinstance(value, Item):
item = TomlSortItem(full_key, value, comments)
comments = []
else:
raise TypeError(
"Invalid TOML; " + str(type(value)) + " is not an Item."
)
items.append(item)
return items, comments
def aot_to_tomlsortitem(
self, comments: List[Comment], keys: TomlSortKeys, value: AoT
) -> Tuple[List[Comment], TomlSortItem]:
"""Turn an AoT into a TomlSortItem, recursing down through its
collections and attaching all the comments to the correct items."""
new_aot = AoT([], parsed=True)
children = []
for table in value.body:
[first_child], trailing_comments = self.body_to_tomlsortitems(
[(keys.base, table)]
)
first_child.attached_comments = comments
comments = trailing_comments
children.append(first_child)
item = TomlSortItem(keys, new_aot, children=children)
return comments, item
def table_to_tomlsortitem(
self, comments: List[Comment], keys: TomlSortKeys, value: Table
) -> Tuple[List[Comment], TomlSortItem]:
"""Turn a table into a TomlSortItem, recursing down through its
collections and attaching all the comments to the correct items."""
children, trailing_comments = self.body_to_tomlsortitems(
value.value.body, parent_key=keys
)
new_table = Table(
Container(parsed=True),
trivia=value.trivia,
is_aot_element=value.is_aot_element(),
is_super_table=value.is_super_table(),
)
if not value.is_super_table():
new_table.trivia.indent = "\n"
first_child = next(iter(children), None)
# If the first child of this item is an AoT, we want the
# comment to be attached to the first table within the AoT,
# rather than the parent AoT object
if first_child and first_child.is_aot:
first_child.children[0].attached_comments = comments
comments = []
# If this item is a super table we want to walk down
# the tree and attach the comment to the first non-super table.
if value.is_super_table():
child_table = children[0]
while child_table.is_super_table:
child_table = child_table.children[0]
child_table.attached_comments = comments
comments = []
item = TomlSortItem(keys, new_table, comments, children)
comments = trailing_comments
return comments, item
def toml_doc_sorted(self, original: TOMLDocument) -> TOMLDocument:
"""Sort a TOMLDocument."""
sorted_document = TOMLDocument(parsed=True)
original_body = original.body
if self.comment_config.header:
original_body = self.write_header_comment(
original_body, sorted_document
)
items, footer_comment = self.body_to_tomlsortitems(original_body)
for item in self.sorted_children_table(None, items):
attach_comments(item, sorted_document)
sorted_document.add(
item.keys.base,
self.toml_elements_sorted(item, sorted_document),
)
if self.comment_config.footer and footer_comment:
sorted_document.add(Whitespace("\n"))
for comment in footer_comment:
sorted_document.add(comment)
return sorted_document
def sorted(self) -> str:
"""Sort a TOML string."""
clean_toml = clean_toml_text(self.input_toml)
toml_doc = tomlkit.parse(clean_toml)
sorted_toml = self.toml_doc_sorted(toml_doc)
return clean_toml_text(tomlkit.dumps(sorted_toml)).strip() + "\n"
|
(input_toml: 'str', comment_config: 'Optional[CommentConfiguration]' = None, sort_config: 'Optional[SortConfiguration]' = None, format_config: 'Optional[FormattingConfiguration]' = None, sort_config_overrides: 'Optional[Dict[str, SortOverrideConfiguration]]' = None) -> 'None'
|
51,835 |
toml_sort.tomlsort
|
__init__
|
Initializer.
|
def __init__( # pylint: disable=too-many-arguments
self,
input_toml: str,
comment_config: Optional[CommentConfiguration] = None,
sort_config: Optional[SortConfiguration] = None,
format_config: Optional[FormattingConfiguration] = None,
sort_config_overrides: Optional[
Dict[str, SortOverrideConfiguration]
] = None,
) -> None:
"""Initializer."""
self.input_toml = input_toml
if comment_config is None:
comment_config = CommentConfiguration()
self.comment_config = comment_config
if sort_config is None:
sort_config = SortConfiguration()
self._sort_config = sort_config
if format_config is None:
format_config = FormattingConfiguration()
self.format_config = format_config
if sort_config_overrides is None:
sort_config_overrides = {}
self.sort_config_overrides = sort_config_overrides
|
(self, input_toml: str, comment_config: Optional[toml_sort.tomlsort.CommentConfiguration] = None, sort_config: Optional[toml_sort.tomlsort.SortConfiguration] = None, format_config: Optional[toml_sort.tomlsort.FormattingConfiguration] = None, sort_config_overrides: Optional[Dict[str, toml_sort.tomlsort.SortOverrideConfiguration]] = None) -> NoneType
|
51,836 |
toml_sort.tomlsort
|
_find_config_override
|
Returns a SortOverrideConfiguration for a particular TomlSortKeys
object, if one exists. If none exists returns None.
Override matches are evaluated as glob patterns by the python
fnmatch function. If there are multiple matches, return the
exact match first otherwise return the first match.
|
def _find_config_override(
self, keys: Optional[TomlSortKeys]
) -> Optional[SortOverrideConfiguration]:
"""Returns a SortOverrideConfiguration for a particular TomlSortKeys
object, if one exists. If none exists returns None.
Override matches are evaluated as glob patterns by the python
fnmatch function. If there are multiple matches, return the
exact match first otherwise return the first match.
"""
if keys is None:
return None
if keys.as_string() in self.sort_config_overrides:
return self.sort_config_overrides.get(keys.as_string())
matches = [
config
for pattern, config in self.sort_config_overrides.items()
if fnmatch.fnmatch(keys.as_string(), pattern)
]
if len(matches) > 0:
return matches[0]
return None
|
(self, keys: Optional[toml_sort.tomlsort.TomlSortKeys]) -> Optional[toml_sort.tomlsort.SortOverrideConfiguration]
|
51,837 |
toml_sort.tomlsort
|
aot_to_tomlsortitem
|
Turn an AoT into a TomlSortItem, recursing down through its
collections and attaching all the comments to the correct items.
|
def aot_to_tomlsortitem(
self, comments: List[Comment], keys: TomlSortKeys, value: AoT
) -> Tuple[List[Comment], TomlSortItem]:
"""Turn an AoT into a TomlSortItem, recursing down through its
collections and attaching all the comments to the correct items."""
new_aot = AoT([], parsed=True)
children = []
for table in value.body:
[first_child], trailing_comments = self.body_to_tomlsortitems(
[(keys.base, table)]
)
first_child.attached_comments = comments
comments = trailing_comments
children.append(first_child)
item = TomlSortItem(keys, new_aot, children=children)
return comments, item
|
(self, comments: List[tomlkit.items.Comment], keys: toml_sort.tomlsort.TomlSortKeys, value: tomlkit.items.AoT) -> Tuple[List[tomlkit.items.Comment], toml_sort.tomlsort.TomlSortItem]
|
51,838 |
toml_sort.tomlsort
|
array_sort_func
|
Sort function that operates on the .value member of an
ArrayItemGroup respects the class setting for ignore_case.
|
def array_sort_func(self, value: Tuple[_ArrayItemGroup, Any]) -> str:
"""Sort function that operates on the .value member of an
ArrayItemGroup respects the class setting for ignore_case."""
if value[0].value is None:
return ""
ret = value[0].value.as_string()
if self.sort_config().ignore_case:
ret = ret.lower()
return ret
|
(self, value: Tuple[tomlkit.items._ArrayItemGroup, Any]) -> str
|
51,839 |
toml_sort.tomlsort
|
body_to_tomlsortitems
|
Iterate over Container.body, recursing down into sub-containers
attaching the comments that are found to the correct TomlSortItem. We
need to do this iteration because TomlKit puts comments into end of the
collection they appear in, instead of the start of the next collection.
For example
```toml
[xyz]
# Comment
[abc]
```
TomlKit would place the comment from the example into the [xyz]
collection, when we would like it to be attached to the [abc]
collection.
So before sorting we have to iterate over the container, correctly
attaching the comments, then undo this process once everything is
sorted.
|
def body_to_tomlsortitems(
self,
parent: List[Tuple[Optional[Key], Item]],
parent_key: Optional[TomlSortKeys] = None,
) -> Tuple[List[TomlSortItem], List[Comment]]:
"""Iterate over Container.body, recursing down into sub-containers
attaching the comments that are found to the correct TomlSortItem. We
need to do this iteration because TomlKit puts comments into end of the
collection they appear in, instead of the start of the next collection.
For example
```toml
[xyz]
# Comment
[abc]
```
TomlKit would place the comment from the example into the [xyz]
collection, when we would like it to be attached to the [abc]
collection.
So before sorting we have to iterate over the container, correctly
attaching the comments, then undo this process once everything is
sorted.
"""
items: List[TomlSortItem] = []
comments: List[Comment] = []
for key, value in parent:
if key is None:
if isinstance(value, Whitespace):
comments = []
elif isinstance(value, Comment) and self.comment_config.block:
comment_spaces = (
self.format_config.spaces_before_inline_comment
)
value = normalize_trivia(
value,
comment_spaces=comment_spaces,
)
comments.append(value)
continue
value = convert_tomlkit_buggy_types(value, parent, key.key)
value = normalize_trivia(
value,
self.comment_config.inline,
comment_spaces=self.format_config.spaces_before_inline_comment,
)
full_key = parent_key + key if parent_key else TomlSortKeys(key)
if isinstance(value, Table):
comments, item = self.table_to_tomlsortitem(
comments, full_key, value
)
elif isinstance(value, AoT):
comments, item = self.aot_to_tomlsortitem(
comments, full_key, value
)
elif isinstance(value, Item):
item = TomlSortItem(full_key, value, comments)
comments = []
else:
raise TypeError(
"Invalid TOML; " + str(type(value)) + " is not an Item."
)
items.append(item)
return items, comments
|
(self, parent: List[Tuple[Optional[tomlkit.items.Key], tomlkit.items.Item]], parent_key: Optional[toml_sort.tomlsort.TomlSortKeys] = None) -> Tuple[List[toml_sort.tomlsort.TomlSortItem], List[tomlkit.items.Comment]]
|
51,840 |
toml_sort.tomlsort
|
format_key
|
Format a key, removing any extra whitespace, and making sure that it
will be formatted like: key = value with one space on either side of
the equal sign.
|
@staticmethod
def format_key(key: Key) -> Key:
"""
Format a key, removing any extra whitespace, and making sure that it
will be formatted like: key = value with one space on either side of
the equal sign.
"""
key.sep = " = "
key._original = ( # pylint: disable=protected-access
key.as_string().strip()
)
return key
|
(key: tomlkit.items.Key) -> tomlkit.items.Key
|
51,841 |
toml_sort.tomlsort
|
key_sort_func
|
Sort function that looks at TomlSortItems keys, respecting the
configured value for ignore_case.
|
def key_sort_func(self, value: TomlSortItem) -> str:
"""Sort function that looks at TomlSortItems keys, respecting the
configured value for ignore_case."""
key = value.keys.base.key
if self.sort_config().ignore_case:
key = key.lower()
return key
|
(self, value: toml_sort.tomlsort.TomlSortItem) -> str
|
51,842 |
toml_sort.tomlsort
|
sort_array
|
Sort and format an inline array item while preserving comments.
|
def sort_array(
self, keys: TomlSortKeys, array: Array, indent_depth: int = 0
) -> Array:
"""Sort and format an inline array item while preserving comments."""
multiline = "\n" in array.as_string()
indent_size = self.format_config.spaces_indent_inline_array
indent = (
"\n" + " " * indent_size * (indent_depth + 1) if multiline else ""
)
comma = "," if multiline else ", "
comments: List[_ArrayItemGroup] = []
new_array_items = []
for array_item in array._value: # pylint: disable=protected-access
if isinstance(array_item.value, Null) and isinstance(
array_item.comment, Comment
):
# Previous comments are orphaned if there is whitespace
if (
array_item.indent is not None
and "\n\n" in array_item.indent.as_string()
):
comments = []
# Comment on its own line within the array
array_item.indent = Whitespace(indent)
array_item.comma = Whitespace("")
array_item.comment.trivia.comment = format_comment(
array_item.comment.trivia.comment
)
comments.append(array_item)
elif array_item.value is not None and not isinstance(
array_item.value, Null
):
# Actual array item
array_item.indent = Whitespace(indent)
array_item.comma = Whitespace(comma)
if array_item.comment is not None:
if self.comment_config.inline:
array_item.comment.trivia.comment = format_comment(
array_item.comment.trivia.comment
)
array_item.comment.trivia.indent = (
" "
* self.format_config.spaces_before_inline_comment
)
else:
array_item.comment = None
new_array_items.append((array_item, comments))
comments = []
array_item.value = self.sort_item(
keys,
array_item.value,
indent_depth=indent_depth + 1
if multiline
else indent_depth,
)
if self.sort_config(keys).inline_arrays:
new_array_items = sorted(new_array_items, key=self.array_sort_func)
new_array_value = []
for array_item, comments in new_array_items:
if comments and self.comment_config.block:
new_array_value.extend(comments)
new_array_value.append(array_item)
if len(new_array_value) != 0 and not (
multiline and self.format_config.trailing_comma_inline_array
):
new_array_value[-1].comma = Whitespace("")
if multiline:
array_item = _ArrayItemGroup()
array_item.value = Whitespace(
"\n" + " " * indent_size * indent_depth
)
new_array_value.append(array_item)
array._value = new_array_value # pylint: disable=protected-access
array._reindex() # pylint: disable=protected-access
array = normalize_trivia(
array,
include_comments=self.comment_config.inline,
comment_spaces=self.format_config.spaces_before_inline_comment,
)
return array
|
(self, keys: toml_sort.tomlsort.TomlSortKeys, array: tomlkit.items.Array, indent_depth: int = 0) -> tomlkit.items.Array
|
51,843 |
toml_sort.tomlsort
|
sort_config
|
Returns the SortConfiguration to use for particular TomlSortKeys.
This merges the global SortConfiguration with any matching
SortOverrideConfiguration to give the full SortConfiguration
that applies to this Key.
|
def sort_config(
self, keys: Optional[TomlSortKeys] = None
) -> SortConfiguration:
"""Returns the SortConfiguration to use for particular TomlSortKeys.
This merges the global SortConfiguration with any matching
SortOverrideConfiguration to give the full SortConfiguration
that applies to this Key.
"""
override = self._find_config_override(keys)
if override is None:
return self._sort_config
main_config = asdict(self._sort_config)
override_config = asdict(override)
merged_config = {}
for key, value in main_config.items():
if key in override_config and override_config[key] is not None:
merged_config[key] = override_config[key]
else:
merged_config[key] = value
return SortConfiguration(**merged_config)
|
(self, keys: Optional[toml_sort.tomlsort.TomlSortKeys] = None) -> toml_sort.tomlsort.SortConfiguration
|
51,844 |
toml_sort.tomlsort
|
sort_inline_table
|
Sort an inline table, recursing into its items.
|
def sort_inline_table(
self, keys: TomlSortKeys, item: Item, indent_depth: int = 0
) -> InlineTable:
"""Sort an inline table, recursing into its items."""
tomlsort_items = [
TomlSortItem(
keys=keys + k,
value=self.sort_item(keys + k, v, indent_depth=indent_depth),
)
for k, v in item.value.body
if not isinstance(v, Whitespace) and k is not None
]
sort_config = self.sort_config(keys)
if sort_config.inline_tables:
tomlsort_items = self.sort_keys(tomlsort_items, sort_config)
new_table = InlineTable(
Container(parsed=True), trivia=item.trivia, new=True
)
for tomlsort_item in tomlsort_items:
normalize_trivia(tomlsort_item.value, include_comments=False)
new_table.append(
self.format_key(tomlsort_item.keys.base), tomlsort_item.value
)
new_table = normalize_trivia(
new_table,
include_comments=self.comment_config.inline,
comment_spaces=self.format_config.spaces_before_inline_comment,
)
return new_table
|
(self, keys: toml_sort.tomlsort.TomlSortKeys, item: tomlkit.items.Item, indent_depth: int = 0) -> tomlkit.items.InlineTable
|
51,845 |
toml_sort.tomlsort
|
sort_item
|
Sort an item, recursing down if the item is an inline table or
array.
|
def sort_item(
self, keys: TomlSortKeys, item: Item, indent_depth: int = 0
) -> Item:
"""Sort an item, recursing down if the item is an inline table or
array."""
if isinstance(item, Array):
return self.sort_array(keys, item, indent_depth=indent_depth)
if isinstance(item, InlineTable):
return self.sort_inline_table(
keys, item, indent_depth=indent_depth
)
return item
|
(self, keys: toml_sort.tomlsort.TomlSortKeys, item: tomlkit.items.Item, indent_depth: int = 0) -> tomlkit.items.Item
|
51,846 |
toml_sort.tomlsort
|
sort_items
|
Sort an iterable full of TomlSortItem, making sure the key is
correctly formatted and recursing into any sub-items.
|
def sort_items(
self, items: Iterable[TomlSortItem]
) -> Iterable[TomlSortItem]:
"""Sort an iterable full of TomlSortItem, making sure the key is
correctly formatted and recursing into any sub-items."""
for item in items:
item.keys.base = self.format_key(item.keys.base)
item.value = self.sort_item(item.keys, item.value)
return items
|
(self, items: Iterable[toml_sort.tomlsort.TomlSortItem]) -> Iterable[toml_sort.tomlsort.TomlSortItem]
|
51,847 |
toml_sort.tomlsort
|
sort_keys
|
Sorts Iterable of Tomlsort item based on keys.
The sort respects the sort_config.first setting which allows
overriding the sorted order of keys.
|
def sort_keys(
self, items: Iterable[TomlSortItem], sort_config: SortConfiguration
) -> List[TomlSortItem]:
"""Sorts Iterable of Tomlsort item based on keys.
The sort respects the sort_config.first setting which allows
overriding the sorted order of keys.
"""
def sort_first(item):
if item.keys.base in sort_config.first:
return sort_config.first.index(item.keys.base)
return len(sort_config.first)
items = sorted(items, key=self.key_sort_func)
items = sorted(items, key=sort_first)
return items
|
(self, items: Iterable[toml_sort.tomlsort.TomlSortItem], sort_config: toml_sort.tomlsort.SortConfiguration) -> List[toml_sort.tomlsort.TomlSortItem]
|
51,848 |
toml_sort.tomlsort
|
sorted
|
Sort a TOML string.
|
def sorted(self) -> str:
"""Sort a TOML string."""
clean_toml = clean_toml_text(self.input_toml)
toml_doc = tomlkit.parse(clean_toml)
sorted_toml = self.toml_doc_sorted(toml_doc)
return clean_toml_text(tomlkit.dumps(sorted_toml)).strip() + "\n"
|
(self) -> str
|
51,849 |
toml_sort.tomlsort
|
sorted_children_table
|
Get the sorted children of a table.
|
def sorted_children_table(
self, parent_keys: Optional[TomlSortKeys], parent: List[TomlSortItem]
) -> Iterable[TomlSortItem]:
"""Get the sorted children of a table."""
sort_config = self.sort_config(parent_keys)
tables = coalesce_tables(
item for item in parent if isinstance(item.value, (Table, AoT))
)
non_tables = self.sort_items(
[
item
for item in parent
if not isinstance(item.value, (Table, AoT))
]
)
non_tables_final = (
self.sort_keys(non_tables, sort_config)
if sort_config.table_keys
else non_tables
)
tables_final = (
self.sort_keys(tables, sort_config)
if self.sort_config(parent_keys).tables
else tables
)
return itertools.chain(non_tables_final, tables_final)
|
(self, parent_keys: Optional[toml_sort.tomlsort.TomlSortKeys], parent: List[toml_sort.tomlsort.TomlSortItem]) -> Iterable[toml_sort.tomlsort.TomlSortItem]
|
51,850 |
toml_sort.tomlsort
|
table_previous_item
|
Finds the previous item that we should attach a comment to, in the
case where the previous item is a table.
This take into account that a table may be a super table.
|
@staticmethod
def table_previous_item(parent_table, grandparent):
"""Finds the previous item that we should attach a comment to, in the
case where the previous item is a table.
This take into account that a table may be a super table.
"""
if parent_table.is_super_table():
if len(parent_table) == 0:
return grandparent
last_item = parent_table.value.last_item()
if isinstance(last_item, Table):
return last_item
return parent_table
|
(parent_table, grandparent)
|
51,851 |
toml_sort.tomlsort
|
table_to_tomlsortitem
|
Turn a table into a TomlSortItem, recursing down through its
collections and attaching all the comments to the correct items.
|
def table_to_tomlsortitem(
self, comments: List[Comment], keys: TomlSortKeys, value: Table
) -> Tuple[List[Comment], TomlSortItem]:
"""Turn a table into a TomlSortItem, recursing down through its
collections and attaching all the comments to the correct items."""
children, trailing_comments = self.body_to_tomlsortitems(
value.value.body, parent_key=keys
)
new_table = Table(
Container(parsed=True),
trivia=value.trivia,
is_aot_element=value.is_aot_element(),
is_super_table=value.is_super_table(),
)
if not value.is_super_table():
new_table.trivia.indent = "\n"
first_child = next(iter(children), None)
# If the first child of this item is an AoT, we want the
# comment to be attached to the first table within the AoT,
# rather than the parent AoT object
if first_child and first_child.is_aot:
first_child.children[0].attached_comments = comments
comments = []
# If this item is a super table we want to walk down
# the tree and attach the comment to the first non-super table.
if value.is_super_table():
child_table = children[0]
while child_table.is_super_table:
child_table = child_table.children[0]
child_table.attached_comments = comments
comments = []
item = TomlSortItem(keys, new_table, comments, children)
comments = trailing_comments
return comments, item
|
(self, comments: List[tomlkit.items.Comment], keys: toml_sort.tomlsort.TomlSortKeys, value: tomlkit.items.Table) -> Tuple[List[tomlkit.items.Comment], toml_sort.tomlsort.TomlSortItem]
|
51,852 |
toml_sort.tomlsort
|
toml_doc_sorted
|
Sort a TOMLDocument.
|
def toml_doc_sorted(self, original: TOMLDocument) -> TOMLDocument:
"""Sort a TOMLDocument."""
sorted_document = TOMLDocument(parsed=True)
original_body = original.body
if self.comment_config.header:
original_body = self.write_header_comment(
original_body, sorted_document
)
items, footer_comment = self.body_to_tomlsortitems(original_body)
for item in self.sorted_children_table(None, items):
attach_comments(item, sorted_document)
sorted_document.add(
item.keys.base,
self.toml_elements_sorted(item, sorted_document),
)
if self.comment_config.footer and footer_comment:
sorted_document.add(Whitespace("\n"))
for comment in footer_comment:
sorted_document.add(comment)
return sorted_document
|
(self, original: tomlkit.toml_document.TOMLDocument) -> tomlkit.toml_document.TOMLDocument
|
51,853 |
toml_sort.tomlsort
|
toml_elements_sorted
|
Returns a sorted item, recursing collections to their base.
|
def toml_elements_sorted(
self, original: TomlSortItem, parent: Table | TOMLDocument
) -> Item:
"""Returns a sorted item, recursing collections to their base."""
if original.is_table:
new_table = original.table
for item in self.sorted_children_table(
original.keys, original.children
):
previous_item = self.table_previous_item(new_table, parent)
attach_comments(item, previous_item)
new_table.add(
item.keys.base,
self.toml_elements_sorted(item, previous_item),
)
return new_table
if original.is_aot:
new_aot = normalize_trivia(
original.aot,
self.comment_config.inline,
self.format_config.spaces_before_inline_comment,
)
for table in original.children:
previous_item = next(iter(new_aot), parent)
attach_comments(table, previous_item)
new_aot.append(
self.toml_elements_sorted(
table, next(iter(new_aot), previous_item)
)
)
return new_aot
return original.value
|
(self, original: toml_sort.tomlsort.TomlSortItem, parent: tomlkit.items.Table | tomlkit.toml_document.TOMLDocument) -> tomlkit.items.Item
|
51,854 |
toml_sort.tomlsort
|
write_header_comment
|
Write header comment from the FROM doc to the TO doc.
Only writes comments / whitespace from the beginning of a TOML
document.
|
def write_header_comment(
self,
from_doc_body: List[Tuple[Optional[Key], Item]],
to_doc: TOMLDocument,
) -> List[Tuple[Optional[Key], Item]]:
"""Write header comment from the FROM doc to the TO doc.
Only writes comments / whitespace from the beginning of a TOML
document.
"""
# Discard leading whitespace
while len(from_doc_body) > 0 and isinstance(
from_doc_body[0][1], Whitespace
):
from_doc_body.pop(0)
# Remove the header comment from the input document, adding it to
# the output document, followed by a newline.
spaces = self.format_config.spaces_before_inline_comment
while len(from_doc_body) > 0 and isinstance(
from_doc_body[0][1], Comment
):
_, value = from_doc_body.pop(0)
value = normalize_trivia(
value,
comment_spaces=spaces,
)
to_doc.add(value)
to_doc.add(ws("\n"))
return from_doc_body
|
(self, from_doc_body: List[Tuple[Optional[tomlkit.items.Key], tomlkit.items.Item]], to_doc: tomlkit.toml_document.TOMLDocument) -> List[Tuple[Optional[tomlkit.items.Key], tomlkit.items.Item]]
|
51,858 |
pycountry
|
Currencies
|
Provides access to an ISO 4217 database (Currencies).
|
class Currencies(pycountry.db.Database):
"""Provides access to an ISO 4217 database (Currencies)."""
data_class = "Currency"
root_key = "4217"
|
(filename: str) -> None
|
51,859 |
pycountry.db
|
__init__
| null |
def __init__(self, filename: str) -> None:
self.filename = filename
self._is_loaded = False
self._load_lock = threading.Lock()
if isinstance(self.data_class, str):
self.factory = type(self.data_class, (Data,), {})
else:
self.factory = self.data_class
|
(self, filename: str) -> NoneType
|
51,860 |
pycountry.db
|
load_if_needed
| null |
def lazy_load(f):
def load_if_needed(self, *args, **kw):
if not self._is_loaded:
with self._load_lock:
self._load()
return f(self, *args, **kw)
return load_if_needed
|
(self, *args, **kw)
|
51,862 |
pycountry.db
|
_clear
| null |
def _clear(self):
self._is_loaded = False
self.objects = []
self.index_names = set()
self.indices = {}
|
(self)
|
51,863 |
pycountry.db
|
_load
| null |
def _load(self) -> None:
if self._is_loaded:
# Help keeping the _load_if_needed code easier
# to read.
return
self._clear()
with open(self.filename, encoding="utf-8") as f:
tree = json.load(f)
for entry in tree[self.root_key]:
obj = self.factory(**entry)
self.objects.append(obj)
# Inject into index.
for key, value in entry.items():
if key in self.no_index:
continue
# Lookups and searches are case insensitive. Normalize
# here.
index = self.indices.setdefault(key, {})
value = value.lower()
if value in index:
logger.debug(
"%s %r already taken in index %r and will be "
"ignored. This is an error in the databases."
% (self.factory.__name__, value, key)
)
index[value] = obj
self._is_loaded = True
|
(self) -> NoneType
|
51,868 |
pycountry
|
ExistingCountries
|
Provides access to an ISO 3166 database (Countries).
|
class ExistingCountries(pycountry.db.Database):
"""Provides access to an ISO 3166 database (Countries)."""
data_class = pycountry.db.Country
root_key = "3166-1"
def search_fuzzy(self, query: str) -> List[Type["ExistingCountries"]]:
query = remove_accents(query.strip().lower())
# A country-code to points mapping for later sorting countries
# based on the query's matching incidence.
results: dict[str, int] = {}
def add_result(country: "pycountry.db.Country", points: int) -> None:
results.setdefault(country.alpha_2, 0)
results[country.alpha_2] += points
# Prio 1: exact matches on country names
try:
add_result(self.lookup(query), 50)
except LookupError:
pass
# Prio 2: exact matches on subdivision names
match_subdivions = pycountry.Subdivisions.match(
self=subdivisions, query=query
)
for candidate in match_subdivions:
print(candidate)
add_result(candidate.country, 49)
# Prio 3: partial matches on country names
for candidate in self:
# Higher priority for a match on the common name
for v in [
candidate._fields.get("name"),
candidate._fields.get("official_name"),
candidate._fields.get("comment"),
]:
if v is not None:
v = remove_accents(v.lower())
if query in v:
# This prefers countries with a match early in their name
# and also balances against countries with a number of
# partial matches and their name containing 'new' in the
# middle
add_result(
candidate, max([5, 30 - (2 * v.find(query))])
)
break
# Prio 4: partial matches on subdivision names
partial_match_subdivisions = pycountry.Subdivisions.partial_match(
self=subdivisions, query=query
)
for candidate in partial_match_subdivisions:
v = candidate._fields.get("name")
v = remove_accents(v.lower())
if query in v:
add_result(candidate.country, max([1, 5 - v.find(query)]))
if not results:
raise LookupError(query)
sorted_results = [
self.get(alpha_2=x[0])
# sort by points first, by alpha2 code second, and to ensure stable
# results the negative value allows us to sort reversely on the
# points but ascending on the country code.
for x in sorted(results.items(), key=lambda x: (-x[1], x[0]))
]
return sorted_results
|
(filename: str) -> None
|
51,878 |
pycountry
|
search_fuzzy
| null |
def search_fuzzy(self, query: str) -> List[Type["ExistingCountries"]]:
query = remove_accents(query.strip().lower())
# A country-code to points mapping for later sorting countries
# based on the query's matching incidence.
results: dict[str, int] = {}
def add_result(country: "pycountry.db.Country", points: int) -> None:
results.setdefault(country.alpha_2, 0)
results[country.alpha_2] += points
# Prio 1: exact matches on country names
try:
add_result(self.lookup(query), 50)
except LookupError:
pass
# Prio 2: exact matches on subdivision names
match_subdivions = pycountry.Subdivisions.match(
self=subdivisions, query=query
)
for candidate in match_subdivions:
print(candidate)
add_result(candidate.country, 49)
# Prio 3: partial matches on country names
for candidate in self:
# Higher priority for a match on the common name
for v in [
candidate._fields.get("name"),
candidate._fields.get("official_name"),
candidate._fields.get("comment"),
]:
if v is not None:
v = remove_accents(v.lower())
if query in v:
# This prefers countries with a match early in their name
# and also balances against countries with a number of
# partial matches and their name containing 'new' in the
# middle
add_result(
candidate, max([5, 30 - (2 * v.find(query))])
)
break
# Prio 4: partial matches on subdivision names
partial_match_subdivisions = pycountry.Subdivisions.partial_match(
self=subdivisions, query=query
)
for candidate in partial_match_subdivisions:
v = candidate._fields.get("name")
v = remove_accents(v.lower())
if query in v:
add_result(candidate.country, max([1, 5 - v.find(query)]))
if not results:
raise LookupError(query)
sorted_results = [
self.get(alpha_2=x[0])
# sort by points first, by alpha2 code second, and to ensure stable
# results the negative value allows us to sort reversely on the
# points but ascending on the country code.
for x in sorted(results.items(), key=lambda x: (-x[1], x[0]))
]
return sorted_results
|
(self, query: str) -> List[Type[pycountry.ExistingCountries]]
|
51,879 |
pycountry
|
HistoricCountries
|
Provides access to an ISO 3166-3 database
(Countries that have been removed from the standard).
|
class HistoricCountries(ExistingCountries):
"""Provides access to an ISO 3166-3 database
(Countries that have been removed from the standard)."""
data_class = pycountry.db.Country
root_key = "3166-3"
|
(filename: str) -> None
|
51,890 |
pycountry
|
LanguageFamilies
|
Provides access to an ISO 639-5 database
(Language Families and Groups).
|
class LanguageFamilies(pycountry.db.Database):
"""Provides access to an ISO 639-5 database
(Language Families and Groups)."""
data_class = "LanguageFamily"
root_key = "639-5"
|
(filename: str) -> None
|
51,900 |
pycountry
|
Languages
|
Provides access to an ISO 639-1/2T/3 database (Languages).
|
class Languages(pycountry.db.Database):
"""Provides access to an ISO 639-1/2T/3 database (Languages)."""
no_index = ["status", "scope", "type", "inverted_name", "common_name"]
data_class = "Language"
root_key = "639-3"
|
(filename: str) -> None
|
51,910 |
pycountry
|
Scripts
|
Provides access to an ISO 15924 database (Scripts).
|
class Scripts(pycountry.db.Database):
"""Provides access to an ISO 15924 database (Scripts)."""
data_class = "Script"
root_key = "15924"
|
(filename: str) -> None
|
51,920 |
pycountry
|
SubdivisionHierarchy
| null |
class SubdivisionHierarchy(pycountry.db.Data):
def __init__(self, **kw):
if "parent" in kw:
kw["parent_code"] = kw["parent"]
else:
kw["parent_code"] = None
super().__init__(**kw)
self.country_code = self.code.split("-")[0]
if self.parent_code is not None:
self.parent_code = f"{self.country_code}-{self.parent_code}"
@property
def country(self):
return countries.get(alpha_2=self.country_code)
@property
def parent(self):
if not self.parent_code:
return None
return subdivisions.get(code=self.parent_code)
|
(**kw)
|
51,921 |
pycountry.db
|
__dir__
| null |
def __dir__(self) -> List[str]:
return dir(self.__class__) + list(self._fields)
|
(self) -> List[str]
|
51,922 |
pycountry.db
|
__getattr__
| null |
def __getattr__(self, key):
if key in self._fields:
return self._fields[key]
raise AttributeError()
|
(self, key)
|
51,923 |
pycountry
|
__init__
| null |
def __init__(self, **kw):
if "parent" in kw:
kw["parent_code"] = kw["parent"]
else:
kw["parent_code"] = None
super().__init__(**kw)
self.country_code = self.code.split("-")[0]
if self.parent_code is not None:
self.parent_code = f"{self.country_code}-{self.parent_code}"
|
(self, **kw)
|
51,924 |
pycountry.db
|
__iter__
| null |
def __iter__(self):
# allow casting into a dict
for field in self._fields:
yield field, getattr(self, field)
|
(self)
|
51,925 |
pycountry.db
|
__repr__
| null |
def __repr__(self) -> str:
cls_name = self.__class__.__name__
fields = ", ".join("%s=%r" % i for i in sorted(self._fields.items()))
return f"{cls_name}({fields})"
|
(self) -> str
|
51,926 |
pycountry.db
|
__setattr__
| null |
def __setattr__(self, key: str, value: str) -> None:
if key != "_fields":
self._fields[key] = value
super().__setattr__(key, value)
|
(self, key: str, value: str) -> NoneType
|
51,927 |
pycountry
|
Subdivisions
| null |
class Subdivisions(pycountry.db.Database):
# Note: subdivisions can be hierarchical to other subdivisions. The
# parent_code attribute is related to other subdivisions, *not*
# the country!
data_class = SubdivisionHierarchy
no_index = ["name", "parent_code", "parent", "type"]
root_key = "3166-2"
def _load(self, *args, **kw):
super()._load(*args, **kw)
# Add index for the country code.
self.indices["country_code"] = {}
for subdivision in self:
divs = self.indices["country_code"].setdefault(
subdivision.country_code.lower(), set()
)
divs.add(subdivision)
def get(self, **kw):
default = kw.setdefault("default", None)
subdivisions = super().get(**kw)
if subdivisions is default and "country_code" in kw:
# This handles the case where we know about a country but there
# are no subdivisions: we return an empty list in this case
# (sticking to the expected type here) instead of None.
if countries.get(alpha_2=kw["country_code"]) is not None:
return []
return subdivisions
def match(self, query):
query = remove_accents(query.strip().lower())
matching_candidates = []
for candidate in subdivisions:
for v in candidate._fields.values():
if v is not None:
v = remove_accents(v.lower())
# Some names include alternative versions which we want to
# match exactly.
for w in v.split(";"):
if w == query:
matching_candidates.append(candidate)
break
return matching_candidates
def partial_match(self, query):
query = remove_accents(query.strip().lower())
matching_candidates = []
for candidate in subdivisions:
v = candidate._fields.get("name")
v = remove_accents(v.lower())
if query in v:
matching_candidates.append(candidate)
return matching_candidates
def search_fuzzy(self, query: str) -> List[Type["Subdivisions"]]:
query = remove_accents(query.strip().lower())
# A Subdivision's code to points mapping for later sorting subdivisions
# based on the query's matching incidence.
results: dict[str, int] = {}
def add_result(
subdivision: "pycountry.db.Subdivision", points: int
) -> None:
results.setdefault(subdivision.code, 0)
results[subdivision.code] += points
# Prio 1: exact matches on subdivision names
match_subdivisions = self.match(query)
for candidate in match_subdivisions:
add_result(candidate, 50)
# Prio 2: partial matches on subdivision names
partial_match_subdivisions = self.partial_match(query)
for candidate in partial_match_subdivisions:
v = candidate._fields.get("name")
v = remove_accents(v.lower())
if query in v:
add_result(candidate, max([1, 5 - v.find(query)]))
if not results:
raise LookupError(query)
sorted_results = [
self.get(code=x[0])
# sort by points first, by alpha2 code second, and to ensure stable
# results the negative value allows us to sort reversely on the
# points but ascending on the country code.
for x in sorted(results.items(), key=lambda x: (-x[1], x[0]))
]
return sorted_results
|
(filename: str) -> None
|
51,932 |
pycountry
|
_load
| null |
def _load(self, *args, **kw):
super()._load(*args, **kw)
# Add index for the country code.
self.indices["country_code"] = {}
for subdivision in self:
divs = self.indices["country_code"].setdefault(
subdivision.country_code.lower(), set()
)
divs.add(subdivision)
|
(self, *args, **kw)
|
51,934 |
pycountry
|
get
| null |
def get(self, **kw):
default = kw.setdefault("default", None)
subdivisions = super().get(**kw)
if subdivisions is default and "country_code" in kw:
# This handles the case where we know about a country but there
# are no subdivisions: we return an empty list in this case
# (sticking to the expected type here) instead of None.
if countries.get(alpha_2=kw["country_code"]) is not None:
return []
return subdivisions
|
(self, **kw)
|
51,936 |
pycountry
|
match
| null |
def match(self, query):
query = remove_accents(query.strip().lower())
matching_candidates = []
for candidate in subdivisions:
for v in candidate._fields.values():
if v is not None:
v = remove_accents(v.lower())
# Some names include alternative versions which we want to
# match exactly.
for w in v.split(";"):
if w == query:
matching_candidates.append(candidate)
break
return matching_candidates
|
(self, query)
|
51,937 |
pycountry
|
partial_match
| null |
def partial_match(self, query):
query = remove_accents(query.strip().lower())
matching_candidates = []
for candidate in subdivisions:
v = candidate._fields.get("name")
v = remove_accents(v.lower())
if query in v:
matching_candidates.append(candidate)
return matching_candidates
|
(self, query)
|
51,939 |
pycountry
|
search_fuzzy
| null |
def search_fuzzy(self, query: str) -> List[Type["Subdivisions"]]:
query = remove_accents(query.strip().lower())
# A Subdivision's code to points mapping for later sorting subdivisions
# based on the query's matching incidence.
results: dict[str, int] = {}
def add_result(
subdivision: "pycountry.db.Subdivision", points: int
) -> None:
results.setdefault(subdivision.code, 0)
results[subdivision.code] += points
# Prio 1: exact matches on subdivision names
match_subdivisions = self.match(query)
for candidate in match_subdivisions:
add_result(candidate, 50)
# Prio 2: partial matches on subdivision names
partial_match_subdivisions = self.partial_match(query)
for candidate in partial_match_subdivisions:
v = candidate._fields.get("name")
v = remove_accents(v.lower())
if query in v:
add_result(candidate, max([1, 5 - v.find(query)]))
if not results:
raise LookupError(query)
sorted_results = [
self.get(code=x[0])
# sort by points first, by alpha2 code second, and to ensure stable
# results the negative value allows us to sort reversely on the
# points but ascending on the country code.
for x in sorted(results.items(), key=lambda x: (-x[1], x[0]))
]
return sorted_results
|
(self, query: str) -> List[Type[pycountry.Subdivisions]]
|
51,941 |
pycountry
|
get_version
| null |
def get_version(distribution_name: str) -> Optional[str]:
try:
return importlib_metadata.version(distribution_name)
except importlib_metadata.PackageNotFoundError:
return "n/a"
|
(distribution_name: str) -> Optional[str]
|
51,946 |
pycountry
|
remove_accents
| null |
def remove_accents(input_str: str) -> str:
output_str = input_str
if not input_str.isascii():
# Borrowed from https://stackoverflow.com/a/517974/1509718
nfkd_form = unicodedata.normalize("NFKD", input_str)
output_str = "".join(
[c for c in nfkd_form if not unicodedata.combining(c)]
)
return output_str
|
(input_str: str) -> str
|
51,947 |
pycountry
|
resource_filename
| null |
def resource_filename(package_or_requirement: str, resource_name: str) -> str:
return str(
importlib_resources.files(package_or_requirement) / resource_name
)
|
(package_or_requirement: str, resource_name: str) -> str
|
51,949 |
_queue
|
Empty
|
Exception raised by Queue.get(block=0)/get_nowait().
|
from _queue import Empty
| null |
51,950 |
multiprocessing_logging
|
MultiProcessingHandler
| null |
class MultiProcessingHandler(logging.Handler):
def __init__(self, name, sub_handler=None):
super(MultiProcessingHandler, self).__init__()
if sub_handler is None:
sub_handler = logging.StreamHandler()
self.sub_handler = sub_handler
self.setLevel(self.sub_handler.level)
self.setFormatter(self.sub_handler.formatter)
self.filters = self.sub_handler.filters
self.queue = multiprocessing.Queue(-1)
self._is_closed = False
# The thread handles receiving records asynchronously.
self._receive_thread = threading.Thread(target=self._receive, name=name)
self._receive_thread.daemon = True
self._receive_thread.start()
def setFormatter(self, fmt):
super(MultiProcessingHandler, self).setFormatter(fmt)
self.sub_handler.setFormatter(fmt)
def _receive(self):
while True:
try:
if self._is_closed and self.queue.empty():
break
record = self.queue.get(timeout=0.2)
self.sub_handler.emit(record)
except (KeyboardInterrupt, SystemExit):
raise
except (EOFError, OSError):
break # The queue was closed by child?
except Empty:
pass # This periodically checks if the logger is closed.
except:
from sys import stderr
from traceback import print_exc
print_exc(file=stderr)
raise
self.queue.close()
self.queue.join_thread()
def _send(self, s):
self.queue.put_nowait(s)
def _format_record(self, record):
# ensure that exc_info and args
# have been stringified. Removes any chance of
# unpickleable things inside and possibly reduces
# message size sent over the pipe.
if record.args:
record.msg = record.msg % record.args
record.args = None
if record.exc_info:
self.format(record)
record.exc_info = None
return record
def emit(self, record):
try:
s = self._format_record(record)
self._send(s)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def close(self):
if not self._is_closed:
self._is_closed = True
self._receive_thread.join(5.0) # Waits for receive queue to empty.
self.sub_handler.close()
super(MultiProcessingHandler, self).close()
|
(name, sub_handler=None)
|
51,951 |
multiprocessing_logging
|
__init__
| null |
def __init__(self, name, sub_handler=None):
super(MultiProcessingHandler, self).__init__()
if sub_handler is None:
sub_handler = logging.StreamHandler()
self.sub_handler = sub_handler
self.setLevel(self.sub_handler.level)
self.setFormatter(self.sub_handler.formatter)
self.filters = self.sub_handler.filters
self.queue = multiprocessing.Queue(-1)
self._is_closed = False
# The thread handles receiving records asynchronously.
self._receive_thread = threading.Thread(target=self._receive, name=name)
self._receive_thread.daemon = True
self._receive_thread.start()
|
(self, name, sub_handler=None)
|
51,954 |
multiprocessing_logging
|
_format_record
| null |
def _format_record(self, record):
# ensure that exc_info and args
# have been stringified. Removes any chance of
# unpickleable things inside and possibly reduces
# message size sent over the pipe.
if record.args:
record.msg = record.msg % record.args
record.args = None
if record.exc_info:
self.format(record)
record.exc_info = None
return record
|
(self, record)
|
51,955 |
multiprocessing_logging
|
_receive
| null |
def _receive(self):
while True:
try:
if self._is_closed and self.queue.empty():
break
record = self.queue.get(timeout=0.2)
self.sub_handler.emit(record)
except (KeyboardInterrupt, SystemExit):
raise
except (EOFError, OSError):
break # The queue was closed by child?
except Empty:
pass # This periodically checks if the logger is closed.
except:
from sys import stderr
from traceback import print_exc
print_exc(file=stderr)
raise
self.queue.close()
self.queue.join_thread()
|
(self)
|
51,956 |
multiprocessing_logging
|
_send
| null |
def _send(self, s):
self.queue.put_nowait(s)
|
(self, s)
|
51,959 |
multiprocessing_logging
|
close
| null |
def close(self):
if not self._is_closed:
self._is_closed = True
self._receive_thread.join(5.0) # Waits for receive queue to empty.
self.sub_handler.close()
super(MultiProcessingHandler, self).close()
|
(self)
|
51,961 |
multiprocessing_logging
|
emit
| null |
def emit(self, record):
try:
s = self._format_record(record)
self._send(s)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
|
(self, record)
|
51,970 |
multiprocessing_logging
|
setFormatter
| null |
def setFormatter(self, fmt):
super(MultiProcessingHandler, self).setFormatter(fmt)
self.sub_handler.setFormatter(fmt)
|
(self, fmt)
|
51,973 |
multiprocessing_logging
|
install_mp_handler
|
Wraps the handlers in the given Logger with an MultiProcessingHandler.
:param logger: whose handlers to wrap. By default, the root logger.
|
def install_mp_handler(logger=None):
"""Wraps the handlers in the given Logger with an MultiProcessingHandler.
:param logger: whose handlers to wrap. By default, the root logger.
"""
if logger is None:
logger = logging.getLogger()
for i, orig_handler in enumerate(list(logger.handlers)):
handler = MultiProcessingHandler("mp-handler-{0}".format(i), sub_handler=orig_handler)
logger.removeHandler(orig_handler)
logger.addHandler(handler)
|
(logger=None)
|
51,977 |
multiprocessing_logging
|
uninstall_mp_handler
|
Unwraps the handlers in the given Logger from a MultiProcessingHandler wrapper
:param logger: whose handlers to unwrap. By default, the root logger.
|
def uninstall_mp_handler(logger=None):
"""Unwraps the handlers in the given Logger from a MultiProcessingHandler wrapper
:param logger: whose handlers to unwrap. By default, the root logger.
"""
if logger is None:
logger = logging.getLogger()
for handler in list(logger.handlers):
if isinstance(handler, MultiProcessingHandler):
orig_handler = handler.sub_handler
logger.removeHandler(handler)
logger.addHandler(orig_handler)
|
(logger=None)
|
51,980 |
sqlalchemy_cockroachdb.transaction
|
run_transaction
|
Run a transaction with retries.
``callback()`` will be called with one argument to execute the
transaction. ``callback`` may be called more than once; it should have
no side effects other than writes to the database on the given
connection. ``callback`` should not call ``commit()` or ``rollback()``;
these will be called automatically.
The ``transactor`` argument may be one of the following types:
* `sqlalchemy.engine.Connection`: the same connection is passed to the callback.
* `sqlalchemy.engine.Engine`: a connection is created and passed to the callback.
* `sqlalchemy.orm.sessionmaker`: a session is created and passed to the callback.
``max_retries`` is an optional integer that specifies how many times the
transaction should be retried before giving up.
``max_backoff`` is an optional integer that specifies the capped number of seconds
for the exponential back-off.
|
def run_transaction(transactor, callback, max_retries=None, max_backoff=0):
"""Run a transaction with retries.
``callback()`` will be called with one argument to execute the
transaction. ``callback`` may be called more than once; it should have
no side effects other than writes to the database on the given
connection. ``callback`` should not call ``commit()` or ``rollback()``;
these will be called automatically.
The ``transactor`` argument may be one of the following types:
* `sqlalchemy.engine.Connection`: the same connection is passed to the callback.
* `sqlalchemy.engine.Engine`: a connection is created and passed to the callback.
* `sqlalchemy.orm.sessionmaker`: a session is created and passed to the callback.
``max_retries`` is an optional integer that specifies how many times the
transaction should be retried before giving up.
``max_backoff`` is an optional integer that specifies the capped number of seconds
for the exponential back-off.
"""
if isinstance(transactor, (sqlalchemy.engine.Connection, sqlalchemy.orm.Session)):
return _txn_retry_loop(transactor, callback, max_retries, max_backoff)
elif isinstance(transactor, sqlalchemy.engine.Engine):
with transactor.connect() as connection:
return _txn_retry_loop(connection, callback, max_retries, max_backoff)
elif isinstance(transactor, sqlalchemy.orm.sessionmaker):
session = transactor()
return _txn_retry_loop(session, callback, max_retries, max_backoff)
else:
raise TypeError("don't know how to run a transaction on %s", type(transactor))
|
(transactor, callback, max_retries=None, max_backoff=0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.