code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
# begin arc support block ########################
try:
p.large_arc
return p.point(t)
except:
pass
# end arc support block ##########################
deg = len(p) - 1
if deg == 3:
return p[0] + t*(
3*(p[1] - p[0]) + t*(
3*(p[0] + p[2]) - 6*p[1] + t*(
-p[0] + 3*(p[1] - p[2]) + p[3])))
elif deg == 2:
return p[0] + t*(
2*(p[1] - p[0]) + t*(
p[0] - 2*p[1] + p[2]))
elif deg == 1:
return p[0] + t*(p[1] - p[0])
elif deg == 0:
return p[0]
else:
bern = bernstein(deg, t)
return sum(bern[k]*p[k] for k in range(deg+1))
|
def bezier_point(p, t)
|
Evaluates the Bezier curve given by it's control points, p, at t.
Note: Uses Horner's rule for cubic and lower order Bezier curves.
Warning: Be concerned about numerical stability when using this function
with high order curves.
| 2.635218 | 2.559951 | 1.029402 |
if len(p) == 4:
coeffs = (-p[0] + 3*(p[1] - p[2]) + p[3],
3*(p[0] - 2*p[1] + p[2]),
3*(p[1]-p[0]),
p[0])
elif len(p) == 3:
coeffs = (p[0] - 2*p[1] + p[2],
2*(p[1] - p[0]),
p[0])
elif len(p) == 2:
coeffs = (p[1]-p[0],
p[0])
elif len(p) == 1:
coeffs = p
else:
# https://en.wikipedia.org/wiki/Bezier_curve#Polynomial_form
n = len(p) - 1
coeffs = [fac(n)//fac(n-j) * sum(
(-1)**(i+j) * p[i] / (fac(i) * fac(j-i)) for i in range(j+1))
for j in range(n+1)]
coeffs.reverse()
if not numpy_ordering:
coeffs = coeffs[::-1] # can't use .reverse() as might be tuple
if return_poly1d:
return poly1d(coeffs)
return coeffs
|
def bezier2polynomial(p, numpy_ordering=True, return_poly1d=False)
|
Converts a tuple of Bezier control points to a tuple of coefficients
of the expanded polynomial.
return_poly1d : returns a numpy.poly1d object. This makes computations
of derivatives/anti-derivatives and many other operations quite quick.
numpy_ordering : By default (to accommodate numpy) the coefficients will
be output in reverse standard order.
| 2.310863 | 2.373061 | 0.97379 |
if isinstance(poly, poly1d):
c = poly.coeffs
else:
c = poly
order = len(c)-1
if order == 3:
bpoints = (c[3], c[2]/3 + c[3], (c[1] + 2*c[2])/3 + c[3],
c[0] + c[1] + c[2] + c[3])
elif order == 2:
bpoints = (c[2], c[1]/2 + c[2], c[0] + c[1] + c[2])
elif order == 1:
bpoints = (c[1], c[0] + c[1])
else:
raise AssertionError("This function is only implemented for linear, "
"quadratic, and cubic polynomials.")
return bpoints
|
def polynomial2bezier(poly)
|
Converts a cubic or lower order Polynomial object (or a sequence of
coefficients) to a CubicBezier, QuadraticBezier, or Line object as
appropriate.
| 2.195612 | 2.188767 | 1.003127 |
def split_bezier_recursion(bpoints_left_, bpoints_right_, bpoints_, t_):
if len(bpoints_) == 1:
bpoints_left_.append(bpoints_[0])
bpoints_right_.append(bpoints_[0])
else:
new_points = [None]*(len(bpoints_) - 1)
bpoints_left_.append(bpoints_[0])
bpoints_right_.append(bpoints_[-1])
for i in range(len(bpoints_) - 1):
new_points[i] = (1 - t_)*bpoints_[i] + t_*bpoints_[i + 1]
bpoints_left_, bpoints_right_ = split_bezier_recursion(
bpoints_left_, bpoints_right_, new_points, t_)
return bpoints_left_, bpoints_right_
bpoints_left = []
bpoints_right = []
bpoints_left, bpoints_right = \
split_bezier_recursion(bpoints_left, bpoints_right, bpoints, t)
bpoints_right.reverse()
return bpoints_left, bpoints_right
|
def split_bezier(bpoints, t)
|
Uses deCasteljau's recursion to split the Bezier curve at t into two
Bezier curves of the same order.
| 1.762626 | 1.714891 | 1.027836 |
local_extremizers = [0, 1]
if len(p) == 4: # cubic case
a = [p.real for p in p]
denom = a[0] - 3*a[1] + 3*a[2] - a[3]
if denom != 0:
delta = a[1]**2 - (a[0] + a[1])*a[2] + a[2]**2 + (a[0] - a[1])*a[3]
if delta >= 0: # otherwise no local extrema
sqdelta = sqrt(delta)
tau = a[0] - 2*a[1] + a[2]
r1 = (tau + sqdelta)/denom
r2 = (tau - sqdelta)/denom
if 0 < r1 < 1:
local_extremizers.append(r1)
if 0 < r2 < 1:
local_extremizers.append(r2)
local_extrema = [bezier_point(a, t) for t in local_extremizers]
return min(local_extrema), max(local_extrema)
# find reverse standard coefficients of the derivative
dcoeffs = bezier2polynomial(a, return_poly1d=True).deriv().coeffs
# find real roots, r, such that 0 <= r <= 1
local_extremizers += polyroots01(dcoeffs)
local_extrema = [bezier_point(a, t) for t in local_extremizers]
return min(local_extrema), max(local_extrema)
|
def bezier_real_minmax(p)
|
returns the minimum and maximum for any real cubic bezier
| 3.166041 | 3.161221 | 1.001525 |
# begin arc support block ########################
try:
bla = bez.large_arc
return bez.bbox() # added to support Arc objects
except:
pass
# end arc support block ##########################
if len(bez) == 4:
xmin, xmax = bezier_real_minmax([p.real for p in bez])
ymin, ymax = bezier_real_minmax([p.imag for p in bez])
return xmin, xmax, ymin, ymax
poly = bezier2polynomial(bez, return_poly1d=True)
x = real(poly)
y = imag(poly)
dx = x.deriv()
dy = y.deriv()
x_extremizers = [0, 1] + polyroots(dx, realroots=True,
condition=lambda r: 0 < r < 1)
y_extremizers = [0, 1] + polyroots(dy, realroots=True,
condition=lambda r: 0 < r < 1)
x_extrema = [x(t) for t in x_extremizers]
y_extrema = [y(t) for t in y_extremizers]
return min(x_extrema), max(x_extrema), min(y_extrema), max(y_extrema)
|
def bezier_bounding_box(bez)
|
returns the bounding box for the segment in the form
(xmin, xmax, ymin, ymax).
Warning: For the non-cubic case this is not particularly efficient.
| 3.319636 | 3.272073 | 1.014536 |
return max(0, min(b, d) - max(a, c))
|
def interval_intersection_width(a, b, c, d)
|
returns the width of the intersection of intervals [a,b] and [c,d]
(thinking of these as intervals on the real number line)
| 3.432924 | 3.204555 | 1.071264 |
xmin1, xmax1, ymin1, ymax1 = box1
xmin2, xmax2, ymin2, ymax2 = box2
if interval_intersection_width(xmin1, xmax1, xmin2, xmax2) and \
interval_intersection_width(ymin1, ymax1, ymin2, ymax2):
return True
else:
return False
|
def boxes_intersect(box1, box2)
|
Determines if two rectangles, each input as a tuple
(xmin, xmax, ymin, ymax), intersect.
| 2.087337 | 2.105848 | 0.99121 |
maxits = int(ceil(1-log(tol_deC/longer_length)/log(2)))
pair_list = [BPair(bez1, bez2, 0.5, 0.5)]
intersection_list = []
k = 0
approx_point_set = ApproxSolutionSet(tol)
while pair_list and k < maxits:
new_pairs = []
delta = 0.5**(k + 2)
for pair in pair_list:
bbox1 = bezier_bounding_box(pair.bez1)
bbox2 = bezier_bounding_box(pair.bez2)
if boxes_intersect(bbox1, bbox2):
if box_area(*bbox1) < tol_deC and box_area(*bbox2) < tol_deC:
point = bezier_point(bez1, pair.t1)
if point not in approx_point_set:
approx_point_set.append(point)
# this is the point in the middle of the pair
intersection_list.append((pair.t1, pair.t2))
# this prevents the output of redundant intersection points
for otherPair in pair_list:
if pair.bez1 == otherPair.bez1 or \
pair.bez2 == otherPair.bez2 or \
pair.bez1 == otherPair.bez2 or \
pair.bez2 == otherPair.bez1:
pair_list.remove(otherPair)
else:
(c11, c12) = halve_bezier(pair.bez1)
(t11, t12) = (pair.t1 - delta, pair.t1 + delta)
(c21, c22) = halve_bezier(pair.bez2)
(t21, t22) = (pair.t2 - delta, pair.t2 + delta)
new_pairs += [BPair(c11, c21, t11, t21),
BPair(c11, c22, t11, t22),
BPair(c12, c21, t12, t21),
BPair(c12, c22, t12, t22)]
pair_list = new_pairs
k += 1
if k >= maxits:
raise Exception("bezier_intersections has reached maximum "
"iterations without terminating... "
"either there's a problem/bug or you can fix by "
"raising the max iterations or lowering tol_deC")
return intersection_list
|
def bezier_intersections(bez1, bez2, longer_length, tol=1e-8, tol_deC=1e-8)
|
INPUT:
bez1, bez2 = [P0,P1,P2,...PN], [Q0,Q1,Q2,...,PN] defining the two
Bezier curves to check for intersections between.
longer_length - the length (or an upper bound) on the longer of the two
Bezier curves. Determines the maximum iterations needed together with tol.
tol - is the smallest distance that two solutions can differ by and still
be considered distinct solutions.
OUTPUT: a list of tuples (t,s) in [0,1]x[0,1] such that
abs(bezier_point(bez1[0],t) - bezier_point(bez2[1],s)) < tol_deC
Note: This will return exactly one such tuple for each intersection
(assuming tol_deC is small enough).
| 2.585391 | 2.595648 | 0.996049 |
# The method here is to translate (shift) then rotate the complex plane so
# that line starts at the origin and proceeds along the positive real axis.
# After this transformation, the intersection points are the real roots of
# the imaginary component of the bezier for which the real component is
# between 0 and abs(line[1]-line[0])].
assert len(line[:]) == 2
assert line[0] != line[1]
if not any(p != bezier[0] for p in bezier):
raise ValueError("bezier is nodal, use "
"bezier_by_line_intersection(bezier[0], line) "
"instead for a bool to be returned.")
# First let's shift the complex plane so that line starts at the origin
shifted_bezier = [z - line[0] for z in bezier]
shifted_line_end = line[1] - line[0]
line_length = abs(shifted_line_end)
# Now let's rotate the complex plane so that line falls on the x-axis
rotation_matrix = line_length/shifted_line_end
transformed_bezier = [rotation_matrix*z for z in shifted_bezier]
# Now all intersections should be roots of the imaginary component of
# the transformed bezier
transformed_bezier_imag = [p.imag for p in transformed_bezier]
coeffs_y = bezier2polynomial(transformed_bezier_imag)
roots_y = list(polyroots01(coeffs_y)) # returns real roots 0 <= r <= 1
transformed_bezier_real = [p.real for p in transformed_bezier]
intersection_list = []
for bez_t in set(roots_y):
xval = bezier_point(transformed_bezier_real, bez_t)
if 0 <= xval <= line_length:
line_t = xval/line_length
intersection_list.append((bez_t, line_t))
return intersection_list
|
def bezier_by_line_intersections(bezier, line)
|
Returns tuples (t1,t2) such that bezier.point(t1) ~= line.point(t2).
| 4.478591 | 4.444547 | 1.00766 |
if not isinstance(group, Element):
raise TypeError('Must provide an xml.etree.Element object. '
'Instead you provided {0}'.format(type(group)))
# Stop right away if the group_selector rejects this group
if not group_filter(group):
return []
# To handle the transforms efficiently, we'll traverse the tree of
# groups depth-first using a stack of tuples.
# The first entry in the tuple is a group element and the second
# entry is its transform. As we pop each entry in the stack, we
# will add all its child group elements to the stack.
StackElement = collections.namedtuple('StackElement',
['group', 'transform'])
def new_stack_element(element, last_tf):
return StackElement(element, last_tf.dot(
parse_transform(element.get('transform'))))
def get_relevant_children(parent, last_tf):
children = []
for elem in filter(group_filter,
parent.iterfind(group_search_xpath, SVG_NAMESPACE)):
children.append(new_stack_element(elem, last_tf))
return children
stack = [new_stack_element(group, np.identity(3))]
FlattenedPath = collections.namedtuple('FlattenedPath',
['path', 'element', 'transform'])
paths = []
while stack:
top = stack.pop()
# For each element type that we know how to convert into path
# data, parse the element after confirming that the path_filter
# accepts it.
for key, converter in path_conversions.items():
for path_elem in filter(path_filter, top.group.iterfind(
'svg:'+key, SVG_NAMESPACE)):
path_tf = top.transform.dot(
parse_transform(path_elem.get('transform')))
path = transform(parse_path(converter(path_elem)), path_tf)
paths.append(FlattenedPath(path, path_elem, path_tf))
stack.extend(get_relevant_children(top.group, top.transform))
return paths
|
def flatten_all_paths(group, group_filter=lambda x: True,
path_filter=lambda x: True, path_conversions=CONVERSIONS,
group_search_xpath=SVG_GROUP_TAG)
|
Returns the paths inside a group (recursively), expressing the
paths in the base coordinates.
Note that if the group being passed in is nested inside some parent
group(s), we cannot take the parent group(s) into account, because
xml.etree.Element has no pointer to its parent. You should use
Document.flatten_group(group) to flatten a specific nested group into
the root coordinates.
Args:
group is an Element
path_conversions (dict):
A dictionary to convert from an SVG element to a path data
string. Any element tags that are not included in this
dictionary will be ignored (including the `path` tag). To
only convert explicit path elements, pass in
`path_conversions=CONVERT_ONLY_PATHS`.
| 3.533838 | 3.44628 | 1.025406 |
if not any(group_to_flatten is descendant for descendant in root.iter()):
warnings.warn('The requested group_to_flatten is not a '
'descendant of root')
# We will shortcut here, because it is impossible for any paths
# to be returned anyhow.
return []
# We create a set of the unique IDs of each element that we wish to
# flatten, if those elements are groups. Any groups outside of this
# set will be skipped while we flatten the paths.
desired_groups = set()
if recursive:
for group in group_to_flatten.iter():
desired_groups.add(id(group))
else:
desired_groups.add(id(group_to_flatten))
def desired_group_filter(x):
return (id(x) in desired_groups) and group_filter(x)
return flatten_all_paths(root, desired_group_filter, path_filter,
path_conversions, group_search_xpath)
|
def flatten_group(group_to_flatten, root, recursive=True,
group_filter=lambda x: True, path_filter=lambda x: True,
path_conversions=CONVERSIONS,
group_search_xpath=SVG_GROUP_TAG)
|
Flatten all the paths in a specific group.
The paths will be flattened into the 'root' frame. Note that root
needs to be an ancestor of the group that is being flattened.
Otherwise, no paths will be returned.
| 4.018612 | 3.956022 | 1.015822 |
return flatten_all_paths(self.tree.getroot(), group_filter,
path_filter, path_conversions)
|
def flatten_all_paths(self, group_filter=lambda x: True,
path_filter=lambda x: True,
path_conversions=CONVERSIONS)
|
Forward the tree of this document into the more general
flatten_all_paths function and return the result.
| 3.732287 | 3.554447 | 1.050033 |
# If not given a parent, assume that the path does not have a group
if group is None:
group = self.tree.getroot()
# If given a list of strings (one or more), assume it represents
# a sequence of nested group names
elif all(isinstance(elem, str) for elem in group):
group = self.get_or_add_group(group)
elif not isinstance(group, Element):
raise TypeError(
'Must provide a list of strings or an xml.etree.Element '
'object. Instead you provided {0}'.format(group))
else:
# Make sure that the group belongs to this Document object
if not self.contains_group(group):
warnings.warn('The requested group does not belong to '
'this Document')
# TODO: It might be better to use duck-typing here with a try-except
if isinstance(path, Path):
path_svg = path.d()
elif is_path_segment(path):
path_svg = Path(path).d()
elif isinstance(path, str):
# Assume this is a valid d-string.
# TODO: Should we sanity check the input string?
path_svg = path
else:
raise TypeError(
'Must provide a Path, a path segment type, or a valid '
'SVG path d-string. Instead you provided {0}'.format(path))
if attribs is None:
attribs = {}
else:
attribs = attribs.copy()
attribs['d'] = path_svg
return SubElement(group, 'path', attribs)
|
def add_path(self, path, attribs=None, group=None)
|
Add a new path to the SVG.
| 3.840186 | 3.771315 | 1.018262 |
group = self.tree.getroot()
# Drill down through the names until we find the desired group
while len(nested_names):
prev_group = group
next_name = nested_names.pop(0)
for elem in group.iterfind(SVG_GROUP_TAG, SVG_NAMESPACE):
if elem.get(name_attr) == next_name:
group = elem
break
if prev_group is group:
# The group we're looking for does not exist, so let's
# create the group structure
nested_names.insert(0, next_name)
while nested_names:
next_name = nested_names.pop(0)
group = self.add_group({'id': next_name}, group)
# Now nested_names will be empty, so the topmost
# while-loop will end
return group
|
def get_or_add_group(self, nested_names, name_attr='id')
|
Get a group from the tree, or add a new one with the given
name structure.
`nested_names` is a list of strings which represent group names.
Each group name will be nested inside of the previous group name.
`name_attr` is the group attribute that is being used to
represent the group's name. Default is 'id', but some SVGs may
contain custom name labels, like 'inkscape:label'.
Returns the requested group. If the requested group did not
exist, this function will create it, as well as all parent
groups that it requires. All created groups will be left with
blank attributes.
| 3.714827 | 3.791585 | 0.979756 |
if parent is None:
parent = self.tree.getroot()
elif not self.contains_group(parent):
warnings.warn('The requested group {0} does not belong to '
'this Document'.format(parent))
if group_attribs is None:
group_attribs = {}
else:
group_attribs = group_attribs.copy()
return SubElement(parent, '{{{0}}}g'.format(
SVG_NAMESPACE['svg']), group_attribs)
|
def add_group(self, group_attribs=None, parent=None)
|
Add an empty group element to the SVG.
| 3.689941 | 3.228958 | 1.142765 |
if filename is None:
filename = self.original_filename
# write to a (by default temporary) file
with open(filename, 'w') as output_svg:
output_svg.write(etree.tostring(self.tree.getroot()))
open_in_browser(filename)
|
def display(self, filename=None)
|
Displays/opens the doc using the OS's default application.
| 5.12195 | 5.064934 | 1.011257 |
A = np.array([[-1, 3, -3, 1], # transforms cubic bez to standard poly
[ 3, -6, 3, 0],
[-3, 3, 0, 0],
[ 1, 0, 0, 0]])
B = [seg.bpoints() for seg in path]
return np.dot(B, np.dot(A, np.power(tvals, [[3],[2],[1],[0]])))
|
def points_in_each_seg(path, tvals)
|
Compute seg.point(t) for each seg in path and each t in tvals.
| 4.257467 | 4.071295 | 1.045728 |
if not transform_str:
return np.identity(3)
elif not isinstance(transform_str, str):
raise TypeError('Must provide a string to parse')
total_transform = np.identity(3)
transform_substrs = transform_str.split(')')[:-1] # Skip the last element, because it should be empty
for substr in transform_substrs:
total_transform = total_transform.dot(_parse_transform_substr(substr))
return total_transform
|
def parse_transform(transform_str)
|
Converts a valid SVG transformation string into a 3x3 matrix.
If the string is empty or null, this returns a 3x3 identity matrix
| 3.462421 | 3.28178 | 1.055044 |
kink_list = []
for idx in range(len(path)):
if idx == 0 and not path.isclosed():
continue
try:
u = path[(idx - 1) % len(path)].unit_tangent(1)
v = path[idx].unit_tangent(0)
u_dot_v = u.real*v.real + u.imag*v.imag
flag = False
except ValueError:
flag = True
if flag or abs(u_dot_v - 1) > tol:
kink_list.append(idx)
return kink_list
|
def kinks(path, tol=1e-8)
|
returns indices of segments that start on a non-differentiable joint.
| 2.913264 | 2.839588 | 1.025946 |
if len(path) == 1:
return path
assert path.iscontinuous()
sharp_kinks = []
new_path = [path[0]]
for idx in range(len(path)):
if idx == len(path)-1:
if not path.isclosed():
continue
else:
seg1 = new_path[0]
else:
seg1 = path[idx + 1]
seg0 = new_path[-1]
try:
unit_tangent0 = seg0.unit_tangent(1)
unit_tangent1 = seg1.unit_tangent(0)
flag = False
except ValueError:
flag = True # unit tangent not well-defined
if not flag and isclose(unit_tangent0, unit_tangent1): # joint is already smooth
if idx != len(path)-1:
new_path.append(seg1)
continue
else:
kink_idx = (idx + 1) % len(path) # kink at start of this seg
if not flag and isclose(-unit_tangent0, unit_tangent1):
# joint is sharp 180 deg (must be fixed manually)
new_path.append(seg1)
sharp_kinks.append(kink_idx)
else: # joint is not smooth, let's smooth it.
args = (seg0, seg1, maxjointsize, tightness)
new_seg0, elbow_segs, new_seg1 = smoothed_joint(*args)
new_path[-1] = new_seg0
new_path += elbow_segs
if idx == len(path) - 1:
new_path[0] = new_seg1
else:
new_path.append(new_seg1)
# If unfixable kinks were found, let the user know
if sharp_kinks and not ignore_unfixable_kinks:
_report_unfixable_kinks(path, sharp_kinks)
return Path(*new_path)
|
def smoothed_path(path, maxjointsize=3, tightness=1.99, ignore_unfixable_kinks=False)
|
returns a path with no non-differentiable joints.
| 3.070412 | 3.047277 | 1.007592 |
if filename is None:
filename = 'display_temp.svg'
self.save(filename)
open_in_browser(filename)
|
def display(self, filename=None)
|
Displays/opens the doc using the OS's default application.
| 5.831529 | 5.475214 | 1.065078 |
value = value.lstrip('#')
lv = len(value)
return tuple(int(value[i:i+lv//3], 16) for i in range(0, lv, lv//3))
|
def hex2rgb(value)
|
Converts a hexadeximal color string to an RGB 3-tuple
EXAMPLE
-------
>>> hex2rgb('#0000FF')
(0, 0, 255)
| 1.460831 | 2.232524 | 0.654341 |
return abs(a - b) < (atol + rtol * abs(b))
|
def isclose(a, b, rtol=1e-5, atol=1e-8)
|
This is essentially np.isclose, but slightly faster.
| 2.697083 | 2.530086 | 1.066005 |
# If just the name of the file was given, check if it's in the Current
# Working Directory.
if not os.path.isfile(file_location):
file_location = os.path.join(os.getcwd(), file_location)
if not os.path.isfile(file_location):
raise IOError("\n\nFile not found.")
# For some reason OSX requires this adjustment (tested on 10.10.4)
if sys.platform == "darwin":
file_location = "file:///"+file_location
new = 2 # open in a new tab, if possible
webbrowser.get().open(file_location, new=new)
|
def open_in_browser(file_location)
|
Attempt to open file located at file_location in the default web
browser.
| 4.104213 | 3.962272 | 1.035823 |
cx = ellipse.get('cx', 0)
cy = ellipse.get('cy', 0)
rx = ellipse.get('rx', None)
ry = ellipse.get('ry', None)
r = ellipse.get('r', None)
if r is not None:
rx = ry = float(r)
else:
rx = float(rx)
ry = float(ry)
cx = float(cx)
cy = float(cy)
d = ''
d += 'M' + str(cx - rx) + ',' + str(cy)
d += 'a' + str(rx) + ',' + str(ry) + ' 0 1,0 ' + str(2 * rx) + ',0'
d += 'a' + str(rx) + ',' + str(ry) + ' 0 1,0 ' + str(-2 * rx) + ',0'
return d
|
def ellipse2pathd(ellipse)
|
converts the parameters from an ellipse or a circle to a string for a
Path object d-attribute
| 1.508705 | 1.564664 | 0.964236 |
points = COORD_PAIR_TMPLT.findall(polyline_d)
closed = (float(points[0][0]) == float(points[-1][0]) and
float(points[0][1]) == float(points[-1][1]))
# The `parse_path` call ignores redundant 'z' (closure) commands
# e.g. `parse_path('M0 0L100 100Z') == parse_path('M0 0L100 100L0 0Z')`
# This check ensures that an n-point polygon is converted to an n-Line path.
if is_polygon and closed:
points.append(points[0])
d = 'M' + 'L'.join('{0} {1}'.format(x,y) for x,y in points)
if is_polygon or closed:
d += 'z'
return d
|
def polyline2pathd(polyline_d, is_polygon=False)
|
converts the string from a polyline points-attribute to a string for a
Path object d-attribute
| 3.665249 | 3.796693 | 0.965379 |
x0, y0 = float(rect.get('x', 0)), float(rect.get('y', 0))
w, h = float(rect.get('width', 0)), float(rect.get('height', 0))
x1, y1 = x0 + w, y0
x2, y2 = x0 + w, y0 + h
x3, y3 = x0, y0 + h
d = ("M{} {} L {} {} L {} {} L {} {} z"
"".format(x0, y0, x1, y1, x2, y2, x3, y3))
return d
|
def rect2pathd(rect)
|
Converts an SVG-rect element to a Path d-string.
The rectangle will start at the (x,y) coordinate specified by the
rectangle object and proceed counter-clockwise.
| 1.686509 | 1.685979 | 1.000315 |
return svg2paths(svg_file_location=svg_file_location,
return_svg_attributes=return_svg_attributes,
convert_circles_to_paths=convert_circles_to_paths,
convert_ellipses_to_paths=convert_ellipses_to_paths,
convert_lines_to_paths=convert_lines_to_paths,
convert_polylines_to_paths=convert_polylines_to_paths,
convert_polygons_to_paths=convert_polygons_to_paths,
convert_rectangles_to_paths=convert_rectangles_to_paths)
|
def svg2paths2(svg_file_location,
return_svg_attributes=True,
convert_circles_to_paths=True,
convert_ellipses_to_paths=True,
convert_lines_to_paths=True,
convert_polylines_to_paths=True,
convert_polygons_to_paths=True,
convert_rectangles_to_paths=True)
|
Convenience function; identical to svg2paths() except that
return_svg_attributes=True by default. See svg2paths() docstring for more
info.
| 1.164548 | 1.24485 | 0.935493 |
if type(instance) is ModbusDevice:
post_save.send_robust(sender=Device, instance=instance.modbus_device)
elif type(instance) is ModbusVariable:
post_save.send_robust(sender=Variable, instance=instance.modbus_variable)
elif type(instance) is ExtendedModbusVariable:
post_save.send_robust(sender=Variable, instance=Variable.objects.get(pk=instance.pk))
elif type(instance) is ExtendedModbusDevice:
post_save.send_robust(sender=Device, instance=Device.objects.get(pk=instance.pk))
|
def _reinit_daq_daemons(sender, instance, **kwargs)
|
update the daq daemon configuration when changes be applied in the models
| 2.140405 | 2.088731 | 1.024739 |
if self.inst is None:
return
if variable_instance.visavariable.device_property.upper() == 'PRESENT_VALUE':
return self.parse_value(self.inst.query('?U6P0'))
elif variable_instance.visavariable.device_property.upper() == 'PRESENT_VALUE_MANUAL_C_FREQ':
freq = VariableProperty.objects.get_property(variable=variable_instance, name='VISA:FREQ')
if freq is None:
freq = 500
return self.parse_value(self.inst.query('?MAM1SR9HT3ST2SM%dE'%freq))
return None
|
def read_data(self, variable_instance)
|
read values from the device
| 8.683577 | 7.965531 | 1.090144 |
if dtype.str in ['<f8']:
return 'double'
elif dtype.str in ['<f4']:
return 'single'
elif dtype.str in ['<i8']:
return 'int64'
elif dtype.str in ['<u8']:
return 'uint64'
elif dtype.str in ['<i4']:
return 'int32'
elif dtype.str in ['<u4']:
return 'uint32'
elif dtype.str in ['<i2']:
return 'int16'
elif dtype.str in ['<u2']:
return 'uint16'
elif dtype.str in ['|i1']:
return 'int8'
elif dtype.str in ['|u1']:
return 'uint8'
|
def dtype_to_matlab_class(dtype)
|
convert dtype to matlab class string
| 1.421919 | 1.404332 | 1.012523 |
if not driver_ok:
logger.error("Visa driver NOT ok")
return False
visa_backend = '@py' # use PyVISA-py as backend
if hasattr(settings, 'VISA_BACKEND'):
visa_backend = settings.VISA_BACKEND
try:
self.rm = visa.ResourceManager(visa_backend)
except:
logger.error("Visa ResourceManager cannot load resources : %s" %self)
return False
try:
resource_prefix = self._device.visadevice.resource_name.split('::')[0]
extras = {}
if hasattr(settings, 'VISA_DEVICE_SETTINGS'):
if resource_prefix in settings.VISA_DEVICE_SETTINGS:
extras = settings.VISA_DEVICE_SETTINGS[resource_prefix]
logger.debug('VISA_DEVICE_SETTINGS for %s: %r'%(resource_prefix,extras))
self.inst = self.rm.open_resource(self._device.visadevice.resource_name, **extras)
except:
logger.error("Visa ResourceManager cannot open resource : %s" %self._device.visadevice.resource_name)
return False
logger.debug('connected visa device')
return True
|
def connect(self)
|
establish a connection to the Instrument
| 3.170192 | 3.185118 | 0.995314 |
bin_str_out = ''
if isinstance(values, integer_types):
bin_str_out = bin(values)[2:].zfill(16)
bin_str_out = bin_str_out[::-1]
else:
for value in values:
bin_str = bin(value)[2:].zfill(16)
bin_str = bin_str[::-1]
bin_str_out = bin_str + bin_str_out
dec_num = 0
for i in range(len(bin_str_out) / 4):
bcd_num = int(bin_str_out[(i * 4):(i + 1) * 4][::-1], 2)
if bcd_num > 9:
dec_num = -dec_num
else:
dec_num = dec_num + (bcd_num * pow(10, i))
return dec_num
|
def decode_bcd(values)
|
decode bcd as int to dec
| 2.063571 | 2.03626 | 1.013412 |
for index in range(len(l)):
if l[index] == value:
return None
if l[index] > value:
return index
|
def find_gap(l, value)
|
try to find a address gap in the list of modbus registers
| 2.72663 | 2.405129 | 1.133673 |
framer = None
if self._framer == 0: # Socket Framer
framer = ModbusSocketFramer
elif self._framer == 1: # RTU Framer
framer = ModbusRtuFramer
elif self._framer == 2: # ASCII Framer
framer = ModbusAsciiFramer
elif self._framer == 3: # Binary Framer
framer = ModbusBinaryFramer
if self._protocol == 0: # TCP
if self._framer is None: # No Framer
self.slave = ModbusTcpClient(self._address, int(self._port))
else:
self.slave = ModbusTcpClient(self._address, int(self._port), framer=framer)
elif self._protocol == 1: # UDP
if self._framer is None: # No Framer
self.slave = ModbusUdpClient(self._address, int(self._port))
else:
self.slave = ModbusUdpClient(self._address, int(self._port), framer=framer)
elif self._protocol in (2, 3, 4): # serial
method_list = {2: 'ascii', 3: 'rtu', 4: 'binary'}
self.slave = ModbusSerialClient(
method=method_list[self._protocol],
port=self._port,
stopbits=self._stopbits,
bytesize=self._bytesize,
parity=self._parity,
baudrate=self._baudrate,
timeout=self._timeout)
else:
raise NotImplementedError("Protocol not supported")
status = self.slave.connect()
return status
|
def _connect(self)
|
connect to the modbus slave (server)
| 1.779059 | 1.709861 | 1.04047 |
if variable_id not in self.variables:
return False
if not self.variables[variable_id].writeable:
return False
if self.variables[variable_id].modbusvariable.function_code_read == 3:
# write register
if 0 <= self.variables[variable_id].modbusvariable.address <= 65535:
if self._connect():
if self.variables[variable_id].get_bits_by_class() / 16 == 1:
# just write the value to one register
self.slave.write_register(self.variables[variable_id].modbusvariable.address, int(value),
unit=self._unit_id)
else:
# encode it first
self.slave.write_registers(self.variables[variable_id].modbusvariable.address,
list(self.variables[variable_id].encode_value(value)),
unit=self._unit_id)
self._disconnect()
return True
else:
logger.info("device with id: %d is now accessible" % self.device.pk)
return False
else:
logger.error('Modbus Address %d out of range' % self.variables[variable_id].modbusvariable.address)
return False
elif self.variables[variable_id].modbusvariable.function_code_read == 1:
# write coil
if 0 <= self.variables[variable_id].modbusvariable.address <= 65535:
if self._connect():
self.slave.write_coil(self.variables[variable_id].modbusvariable.address, bool(value),
unit=self._unit_id)
self._disconnect()
return True
else:
logger.info("device with id: %d is now accessible" % self.device.pk)
return False
else:
logger.error('Modbus Address %d out of range' % self.variables[variable_id].modbusvariable.address)
else:
logger.error('wrong type of function code %d' %
self.variables[variable_id].modbusvariable.function_code_read)
return False
|
def write_data(self, variable_id, value, task)
|
write value to single modbus register or coil
| 2.253287 | 2.18623 | 1.030672 |
for process in queryset:
process.stop(signum=signal.SIGKILL)
|
def kill_process(modeladmin, request, queryset)
|
restarts a dedicated process
:return:
| 5.237147 | 8.549936 | 0.612536 |
qs = model_admin.get_queryset(request)
qs.filter(id__range=(1, 99))
for item in qs:
dp = DeviceProtocol.objects.filter(pk=item.id).first()
if dp:
yield (dp.pk, dp.app_name)
|
def lookups(self, request, model_admin)
|
Returns a list of tuples. The first element in each
tuple is the coded value for the option that will
appear in the URL query. The second element is the
human-readable name for the option that will appear
in the right sidebar.
| 5.289716 | 5.153407 | 1.02645 |
if self.value() is not None:
if self.value() > 0:
return queryset.filter(parent_process_id=self.value())
|
def queryset(self, request, queryset)
|
Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`.
| 3.551549 | 4.642987 | 0.764928 |
if type(instance) is OneWireDevice:
post_save.send_robust(sender=Device, instance=instance.onewire_device)
elif type(instance) is OneWireVariable:
post_save.send_robust(sender=Variable, instance=instance.onewire_variable)
elif type(instance) is ExtendedOneWireVariable:
post_save.send_robust(sender=Variable, instance=Variable.objects.get(pk=instance.pk))
elif type(instance) is ExtendedOneWireDevice:
post_save.send_robust(sender=Device, instance=Device.objects.get(pk=instance.pk))
|
def _reinit_daq_daemons(sender, instance, **kwargs)
|
update the daq daemon configuration when changes be applied in the models
| 2.334333 | 2.299163 | 1.015297 |
'''
write value to the instrument/device
'''
output = []
if not driver_visa_ok:
logger.info("Visa-device-write data-visa NOT ok")
return output
for item in self.variables.values():
if not (item.visavariable.variable_type == 0 and item.id == variable_id):
# skip all config values
continue
start=time()
# read_value = self._h.write_data(item.visavariable.device_property, value)
read_value = self._h.write_data(variable_id, value, task)
end=time()
duration=float(end - start)
logger.info(("%s - %s - %s - %s - %s - %s") %(item.device.__str__(), item.__str__(), item.visavariable.device_property, value, read_value, duration))
if read_value is not None and item.update_value(read_value, time()):
output.append(item.create_recorded_data_element())
else:
logger.info("Visa-Output not ok : %s" % output)
return output
|
def write_data(self,variable_id, value, task)
|
write value to the instrument/device
| 5.743341 | 5.411112 | 1.061397 |
output = []
if not driver_visa_ok:
logger.info('Request Data Visa Driver Not Ok')
return output
for item in self.variables.values():
if not item.visavariable.variable_type == 1:
# skip all config values
continue
value = self._h.read_data(item)
if value is not None and item.update_value(value, time()):
output.append(item.create_recorded_data_element())
return output
|
def request_data(self)
|
request data from the instrument/device
| 10.848678 | 10.219593 | 1.061557 |
'''
from: https://stackoverflow.com/questions/7835272/django-operationalerror-2006-mysql-server-has-gone-away
'''
# mysql is lazily connected to in django.
# connection.connection is None means
# you have not connected to mysql before
if connection.connection and not connection.is_usable():
# destroy the default mysql connection
# after this line, when you use ORM methods
# django will reconnect to the default mysql
logger.debug('deleted default connection')
del connections._connections.default
|
def check_db_connection()
|
from: https://stackoverflow.com/questions/7835272/django-operationalerror-2006-mysql-server-has-gone-away
| 10.099551 | 6.112514 | 1.652274 |
# check if a process is already running
if access(self.pid_file_name, F_OK):
# read the pid file
pid = self.read_pid()
try:
kill(pid, 0) # check if process is running
self.stderr.write("process is already running\n")
return False
except OSError as e:
if e.errno == errno.ESRCH:
# process is dead
self.delete_pid(force_del=True)
else:
self.stderr.write("demonize failed, something went wrong: %d (%s)\n" % (e.errno, e.strerror))
return False
try:
pid = fork()
if pid > 0:
# Exit from the first parent
timeout = time() + 60
while self.read_pid() is None:
self.stderr.write("waiting for pid..\n")
sleep(0.5)
if time() > timeout:
break
self.stderr.write("pid is %d\n" % self.read_pid())
sys.exit(0)
except OSError as e:
self.stderr.write("demonize failed in 1. Fork: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# Decouple from parent environment
# os.chdir("/")
setsid()
umask(0)
# Do the Second fork
try:
pid = fork()
if pid > 0:
# Exit from the second parent
sys.exit(0)
except OSError as e:
self.stderr.write("demonize failed in 2. Fork: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# Redirect standard file descriptors
# sys.stdout.flush()
# sys.stderr.flush()
# si = file(self.stdin, 'r')
# so = file(self.stdout, 'a+')
# se = file(self.stderr, 'a+',
# os.dup2(si.fileno(), sys.stdin.fileno())
# os.dup2(so.fileno(), sys.stdout.fileno())
# os.dup2(se.fileno(), sys.stderr.fileno())
# Write the PID file
#atexit.register(self.delete_pid)
self.write_pid()
return True
|
def demonize(self)
|
do the double fork magic
| 2.21988 | 2.190408 | 1.013455 |
pid = self.read_pid()
if pid != getpid() or force_del:
logger.debug('process %d tried to delete pid' % getpid())
return False
if access(self.pid_file_name, F_OK):
try:
remove(self.pid_file_name) # remove the old pid file
logger.debug('delete pid (%d)' % getpid())
except:
logger.debug("can't delete pid file")
|
def delete_pid(self, force_del=False)
|
delete the pid file
| 4.039641 | 3.851564 | 1.048831 |
# demonize
if self.run_as_daemon:
if not self.demonize():
self.delete_pid()
sys.exit(0)
# recreate the DB connection
if connection.connection is not None:
connection.connection.close()
connection.connection = None
master_process = BackgroundProcess.objects.filter(parent_process__isnull=True,
label=self.label,
enabled=True).first()
self.pid = getpid()
if not master_process:
self.delete_pid(force_del=True)
logger.debug('no such process in BackgroundProcesses\n')
sys.exit(0)
self.process_id = master_process.pk
master_process.pid = self.pid
master_process.last_update = now()
master_process.running_since = now()
master_process.done = False
master_process.failed = False
master_process.message = 'init master process'
master_process.save()
BackgroundProcess.objects.filter(parent_process__pk=self.process_id, done=False).update(message='stopped')
for parent_process in BackgroundProcess.objects.filter(parent_process__pk=self.process_id, done=False):
for process in BackgroundProcess.objects.filter(parent_process__pk=parent_process.pk, done=False):
try:
kill(process.pid, 0)
except OSError as e:
if e.errno == errno.ESRCH:
process.delete()
continue
logger.debug('process %d is alive' % process.pk)
process.stop()
# clean up
BackgroundProcess.objects.filter(parent_process__pk=parent_process.pk, done=False).delete()
# register signals
[signal.signal(s, self.signal) for s in self.SIGNALS]
#signal.signal(signal.SIGCHLD, self.handle_chld)
# start the main loop
self.run()
self.delete_pid()
sys.exit(0)
|
def start(self)
|
start the scheduler
| 3.32837 | 3.324967 | 1.001023 |
try:
master_process = BackgroundProcess.objects.filter(pk=self.process_id).first()
if master_process:
master_process.last_update = now()
master_process.message = 'init child processes'
master_process.save()
else:
self.delete_pid(force_del=True)
self.stderr.write("no such process in BackgroundProcesses")
sys.exit(0)
self.manage_processes()
while True:
# handle signals
sig = self.SIG_QUEUE.pop(0) if len(self.SIG_QUEUE) else None
# check the DB connection
check_db_connection()
# update the P
BackgroundProcess.objects.filter(pk=self.process_id).update(
last_update=now(),
message='running..')
if sig is None:
self.manage_processes()
elif sig not in self.SIGNALS:
logger.error('%s, unhandled signal %d' % (self.label, sig))
continue
elif sig == signal.SIGTERM:
logger.debug('%s, termination signal' % self.label)
raise StopIteration
elif sig == signal.SIGHUP:
# todo handle sighup
pass
elif sig == signal.SIGUSR1:
# restart all child processes
logger.debug('PID %d, processed SIGUSR1 (%d) signal' % (self.pid, sig))
self.restart()
elif sig == signal.SIGUSR2:
# write the process status to stdout
self.status()
pass
sleep(5)
except StopIteration:
self.stop()
self.delete_pid()
sys.exit(0)
except SystemExit:
raise
except:
logger.error('%s(%d), unhandled exception\n%s' % (self.label, getpid(), traceback.format_exc()))
|
def run(self)
|
the main loop
| 3.956845 | 3.90022 | 1.014518 |
try:
while True:
wpid, status = waitpid(-1, WNOHANG)
if not wpid:
break
# self.stdout.write('%d,%d\n' % (wpid, status))
except:
pass
|
def handle_chld(self, sig, frame)
|
SIGCHLD handling
:param sig:
:param frame:
:return:
| 4.432962 | 4.204303 | 1.054387 |
BackgroundProcess.objects.filter(pk=self.process_id).update(
last_update=now(),
message='restarting..')
timeout = time() + 60 # wait max 60 seconds
self.kill_processes(signal.SIGTERM)
while self.PROCESSES and time() < timeout:
sleep(0.1)
self.kill_processes(signal.SIGKILL)
self.manage_processes()
logger.debug('BD %d: restarted'%self.process_id)
|
def restart(self)
|
restart all child processes
| 5.753253 | 5.42583 | 1.060345 |
if self.pid is None:
self.pid = self.read_pid()
if self.pid is None:
sp = BackgroundProcess.objects.filter(pk=1).first()
if sp:
self.pid = sp.pid
if self.pid is None or self.pid == 0:
logger.error("can't determine process id exiting.")
return False
if self.pid != getpid():
# calling from outside the daemon instance
logger.debug('send sigterm to daemon')
try:
kill(self.pid, sig)
return True
except OSError as e:
if e.errno == errno.ESRCH:
return False
else:
return False
logger.debug('start termination of the daemon')
BackgroundProcess.objects.filter(pk=self.process_id).update(
last_update=now(),
message='stopping..')
timeout = time() + 60 # wait max 60 seconds
self.kill_processes(signal.SIGTERM)
while self.PROCESSES and time() < timeout:
self.kill_processes(signal.SIGTERM)
sleep(1)
self.kill_processes(signal.SIGKILL)
BackgroundProcess.objects.filter(pk=self.process_id).update(
last_update=now(),
message='stopped')
logger.debug('termination of the daemon done')
return True
|
def stop(self, sig=signal.SIGTERM)
|
stop the scheduler and stop all processes
| 3.515559 | 3.403827 | 1.032825 |
if process is None:
return False
# start new child process
pid = fork()
if pid != 0:
# parent process
process.pid = pid
self.PROCESSES[process.process_id] = process
connections.close_all()
return True
# child process
process.pid = getpid()
# connection.connection.close()
# connection.connection = None
process.pre_init_process()
process.init_process()
process.run()
sys.exit(0)
|
def spawn_process(self, process=None)
|
spawn a new process
| 4.599548 | 4.591013 | 1.001859 |
if self.pid is None:
self.pid = self.read_pid()
if self.pid is None:
sp = BackgroundProcess.objects.filter(pk=1).first()
if sp:
self.pid = sp.pid
if self.pid is None or self.pid == 0:
self.stderr.write("%s: can't determine process id exiting.\n" % datetime.now().isoformat(' '))
return False
if self.pid != getpid():
# calling from outside the daemon instance
try:
kill(self.pid, signal.SIGUSR2)
return True
except OSError as e:
if e.errno == errno.ESRCH:
return False
else:
return False
process_list = []
for process in BackgroundProcess.objects.filter(parent_process__pk=self.process_id):
process_list.append(process)
process_list += list(process.backgroundprocess_set.filter())
for process in process_list:
logger.debug('%s, parrent process_id %d' % (self.label, process.parent_process.pk))
logger.debug('%s, process_id %d' % (self.label, self.process_id))
|
def status(self)
|
write the current daemon status to stdout
| 3.917259 | 3.668472 | 1.067818 |
logger.debug('PID %d, received signal: %d' % (self.pid, signum))
self.SIG_QUEUE.append(signum)
|
def signal(self, signum=None, frame=None)
|
handle signals
| 5.567117 | 5.590875 | 0.995751 |
db.connections.close_all()
# update process info
BackgroundProcess.objects.filter(pk=self.process_id).update(
pid=self.pid,
last_update=now(),
running_since=now(),
done=False,
failed=False,
message='init process..',
)
[signal.signal(s, signal.SIG_DFL) for s in self.SIGNALS] # reset
[signal.signal(s, self.signal) for s in self.SIGNALS]
|
def pre_init_process(self)
|
will be executed after process fork
| 5.483538 | 5.115386 | 1.071969 |
BackgroundProcess.objects.filter(pk=self.process_id
).update(pid=0, last_update=now(), message='stopping..')
# run the cleanup
self.cleanup()
BackgroundProcess.objects.filter(pk=self.process_id).update(pid=0,
last_update=now(),
message='stopped')
|
def stop(self, signum=None, frame=None)
|
handel's a termination signal
| 5.416118 | 5.475087 | 0.98923 |
self.device = Device.objects.filter(protocol__daq_daemon=1, active=1, id=self.device_id).first()
if not self.device:
logger.error("Error init_process for %s" % self.device_id)
return False
self.dt_set = min(self.dt_set, self.device.polling_interval)
self.dt_query_data = self.device.polling_interval
try:
self.device = self.device.get_device_instance()
except:
var = traceback.format_exc()
logger.error("exception while initialisation of DAQ Process for Device %d %s %s" % (
self.device_id, linesep, var))
return True
|
def init_process(self)
|
init a standard daq process for a single device
| 5.186388 | 4.693934 | 1.104913 |
for item in Device.objects.filter(protocol__daq_daemon=1, active=1, id__in=self.device_ids):
try:
tmp_device = item.get_device_instance()
if tmp_device is not None:
self.devices[item.pk] = tmp_device
self.dt_set = min(self.dt_set, item.polling_interval)
self.dt_query_data = min(self.dt_query_data, item.polling_interval)
except:
var = traceback.format_exc()
logger.error("exception while initialisation of DAQ Process for Device %d %s %s" % (
item.pk, linesep, var))
return True
|
def init_process(self)
|
init a standard daq process for multiple devices
| 5.342032 | 4.975561 | 1.073654 |
if type(instance) is SMbusDevice:
post_save.send_robust(sender=Device, instance=instance.smbus_device)
elif type(instance) is SMbusVariable:
post_save.send_robust(sender=Variable, instance=instance.smbus_variable)
elif type(instance) is ExtendedSMbusVariable:
post_save.send_robust(sender=Variable, instance=Variable.objects.get(pk=instance.pk))
elif type(instance) is ExtendedSMBusDevice:
post_save.send_robust(sender=Device, instance=Device.objects.get(pk=instance.pk))
|
def _reinit_daq_daemons(sender, instance, **kwargs)
|
update the daq daemon configuration when changes be applied in the models
| 2.589634 | 2.560154 | 1.011515 |
if self.inst is None:
return
if variable_instance.visavariable.device_property.upper() == 'PRESENT_VALUE':
return self.parse_value(self.inst.query('?U6P0'))
elif variable_instance.visavariable.device_property.upper() == 'PRESENT_VALUE_DCV':
return self.parse_value(self.inst.query('?U6P0F1T3'))
elif variable_instance.visavariable.device_property.upper() == 'PRESENT_VALUE_ACV':
return self.parse_value(self.inst.query('?U6P0F2T3'))
elif variable_instance.visavariable.device_property.upper() == 'PRESENT_VALUE_DCV+ACV':
return self.parse_value(self.inst.query('?U6P0F3T3'))
elif variable_instance.visavariable.device_property.upper() == 'PRESENT_VALUE_2W_OHM':
return self.parse_value(self.inst.query('?U6P0F4T3'))
elif variable_instance.visavariable.device_property.upper() == 'PRESENT_VALUE_4W_OHM':
return self.parse_value(self.inst.query('?U6P0F5T3'))
return None
|
def read_data(self,variable_instance)
|
read values from the device
| 2.07907 | 1.992593 | 1.043399 |
variable = self._variables[variable_id]
if task.property_name != '':
# write the freq property to VariableProperty use that for later read
vp = VariableProperty.objects.update_or_create_property(variable=variable, name=task.property_name.upper(),
value=value, value_class='FLOAT64')
return True
if variable.visavariable.variable_type == 0: # configuration
# only write to configuration variables
pass
else:
return False
|
def write_data(self,variable_id, value, task)
|
write values to the device
| 12.504788 | 12.517835 | 0.998958 |
if type(instance) is SystemStatVariable:
post_save.send_robust(sender=Variable, instance=instance.system_stat_variable)
elif type(instance) is ExtendedSystemStatVariable:
post_save.send_robust(sender=Variable, instance=Variable.objects.get(pk=instance.pk))
elif type(instance) is ExtendedSystemStatDevice:
post_save.send_robust(sender=Device, instance=Device.objects.get(pk=instance.pk))
|
def _reinit_daq_daemons(sender, instance, **kwargs)
|
update the daq daemon configuration when changes be applied in the models
| 2.973961 | 2.932687 | 1.014074 |
if self.inst is None:
return
if variable_instance.visavariable.device_property.upper() == 'vrms_chan1':
return self.parse_value(self.inst.query(':MEAS:ITEM? VRMS,CHAN1'))
return None
|
def read_data(self, variable_instance)
|
read values from the device
| 14.023211 | 11.642852 | 1.204448 |
if not issubclass(sender, WidgetContentModel):
return
# delete WidgetContent Entry
wcs = WidgetContent.objects.filter(
content_pk=instance.pk,
content_model=('%s' % instance.__class__).replace("<class '", '').replace("'>", ''))
for wc in wcs:
logger.debug('delete wc %r'%wc)
wc.delete()
|
def _delete_widget_content(sender, instance, **kwargs)
|
delete the widget content instance when a WidgetContentModel is deleted
| 5.200956 | 4.670565 | 1.11356 |
if not issubclass(sender, WidgetContentModel):
return
# create a WidgetContent Entry
if created:
instance.create_widget_content_entry()
return
|
def _create_widget_content(sender, instance, created=False, **kwargs)
|
create a widget content instance when a WidgetContentModel is deleted
| 7.154974 | 5.021478 | 1.424874 |
if type(instance) is Device:
try:
bp = BackgroundProcess.objects.get(pk=instance.protocol_id)
except:
return False
bp.restart()
elif type(instance) is Variable:
try:
bp = BackgroundProcess.objects.get(pk=instance.device.protocol_id)
except:
return False
bp.restart()
elif type(instance) is Scaling:
for bp_pk in list(instance.variable_set.all().values_list('device__protocol_id').distinct()):
try:
bp = BackgroundProcess.objects.get(pk=bp_pk)
except:
return False
bp.restart()
else:
logger.debug('post_save from %s' % type(instance))
|
def _reinit_daq_daemons(sender, instance, **kwargs)
|
update the daq daemon configuration when changes be applied in the models
| 2.845756 | 2.787354 | 1.020952 |
#if not access(path.dirname(self.log_file_name), W_OK):
# self.stderr.write("logfile path is not writeable\n")
# sys.exit(0)
#if access(self.log_file_name, F_OK) and not access(self.log_file_name, W_OK):
# self.stderr.write("logfile is not writeable\n")
# sys.exit(0)
if message_short is None:
message_len = len(message)
if message_len > 35:
message_short = message[0:31] + '...'
else:
message_short = message
#log_ob = Log(message=message, level=level, message_short=message_short, timestamp=time())
#if user:
# log_ob.user = user
#log_ob.save()
stdout = open(log_file_name, "a+")
stdout.write("%s (%s,%d):%s\n" % (datetime.now().isoformat(' '),'none',level,message))
stdout.flush()
|
def add(message, level=0, user=None, message_short=None,log_file_name='%s/pyscada_daemon.log' % settings.BASE_DIR,)
|
add a new massage/error notice to the log
<0 - Debug
1 - Emergency
2 - Critical
3 - Errors
4 - Alerts
5 - Warnings
6 - Notification (webnotice)
7 - Information (webinfo)
8 - Notification (notice)
9 - Information (info)
| 2.559479 | 2.706424 | 0.945705 |
if type(instance) is VISADevice:
post_save.send_robust(sender=Device, instance=instance.visa_device)
elif type(instance) is VISAVariable:
post_save.send_robust(sender=Variable, instance=instance.visa_variable)
elif type(instance) is VISADeviceHandler:
# todo
pass
elif type(instance) is ExtendedVISAVariable:
post_save.send_robust(sender=Variable, instance=Variable.objects.get(pk=instance.pk))
elif type(instance) is ExtendedVISADevice:
post_save.send_robust(sender=Device, instance=Device.objects.get(pk=instance.pk))
|
def _reinit_daq_daemons(sender, instance, **kwargs)
|
update the daq daemon configuration when changes be applied in the models
| 2.587212 | 2.554612 | 1.012761 |
import subprocess
output = {}
try:
apc_status = subprocess.check_output("/sbin/apcaccess")
except:
return None
output['timestamp'] = time()
for line in apc_status.split('\n'):
(key, spl, val) = line.partition(': ')
key = key.rstrip().upper()
val = val.strip()
val = val.split(' ')[0]
if key == 'STATUS':
output[key] = True if val.upper() == 'ONLINE' else False
elif key in ['LINEV', 'BATTV', 'BCHARGE', 'TIMELEFT', 'LOADPCT']:
output[key] = float(val)
return output
|
def query_apsupsd_status()
|
(100, 'STATUS'), # True/False
(101, 'LINEV'), # Volts
(102, 'BATTV'), # Volts
(103, 'BCHARGE'), # %
(104, 'TIMELEFT'), # Minutes
(105, 'LOADPCT'), # %
| 4.165565 | 2.937591 | 1.418021 |
if self.inst is None:
logger.error("Visa-MDO3014-read data-Self.inst : None")
return None
if device_property == 'present_value':
return self.parse_value(self.inst.query(':READ?'))
else:
value = self.inst.query(device_property)
logger.info("Visa-MDO3014-read data-property : %s - value : %s" %(device_property, value))
return self.parse_value(value)
return None
|
def read_data(self, device_property)
|
read values from the device
| 4.873218 | 4.728721 | 1.030557 |
if 'init' in request.POST:
init = bool(float(request.POST['init']))
else:
init = False
active_variables = []
if 'variables[]' in request.POST:
active_variables = request.POST.getlist('variables[]')
active_variable_properties = []
if 'variable_properties[]' in request.POST:
active_variable_properties = request.POST.getlist('variable_properties[]')
timestamp_from = time.time()
if 'timestamp_from' in request.POST:
timestamp_from = float(request.POST['timestamp_from']) / 1000.0
timestamp_to = time.time()
if 'timestamp_to' in request.POST:
timestamp_to = min(timestamp_to, float(request.POST['timestamp_to']) / 1000.0)
if timestamp_to == 0:
timestamp_to = time.time()
if timestamp_from == 0:
timestamp_from == time.time() - 60
if timestamp_to - timestamp_from > 120 * 60:
timestamp_from = timestamp_to - 120 * 60
#if not init:
#timestamp_to = min(timestamp_from + 30, timestamp_to)
if len(active_variables) > 0:
data = RecordedData.objects.db_data(
variable_ids=active_variables,
time_min=timestamp_from,
time_max=timestamp_to,
time_in_ms=True,
query_first_value=init)
else:
data = None
if data is None:
data = {}
data['variable_properties'] = {}
for item in VariableProperty.objects.filter(pk__in=active_variable_properties):
data['variable_properties'][item.pk] = item.value()
data["server_time"] = time.time() * 1000
return HttpResponse(json.dumps(data), content_type='application/json')
|
def get_cache_data(request)
|
else:
active_variables = list(
GroupDisplayPermission.objects.filter(hmi_group__in=request.user.groups.iterator()).values_list(
'charts__variables', flat=True))
active_variables += list(
GroupDisplayPermission.objects.filter(hmi_group__in=request.user.groups.iterator()).values_list(
'xy_charts__variables', flat=True))
active_variables += list(
GroupDisplayPermission.objects.filter(hmi_group__in=request.user.groups.iterator()).values_list(
'control_items__variable', flat=True))
active_variables += list(
GroupDisplayPermission.objects.filter(hmi_group__in=request.user.groups.iterator()).values_list(
'custom_html_panels__variables', flat=True))
active_variables = list(set(active_variables))
| 2.489292 | 2.500976 | 0.995328 |
if self.inst is None:
return
vp_func = variable_instance.variableproperty_set.filter(name=':FUNC').first()
measure_function = ''
if vp_func:
if vp_func.value():
measure_function = ':FUNC "%s";'%vp_func.value()
trig_delay = 0.1
if variable_instance.visavariable.device_property.upper() == 'PRESENT_VALUE':
return self.parse_value(self.inst.query(':FETCH?'))
m = re.search('(PRESENT_VALUE_CH)([0-9]*)', variable_instance.visavariable.device_property.upper())
if m:
return self.parse_value(
self.inst.query(':route:close (@%s);%s:TRIG:DEL %1.3f;:fetch?'%(m.group(2),measure_function,trig_delay)))
return self.parse_value(self.inst.query(variable_instance.visavariable.device_property.upper()))
|
def read_data(self,variable_instance)
|
read values from the device
| 5.987526 | 5.725579 | 1.04575 |
variable = self._variables[variable_id]
if task.property_name != '':
# write the property to VariableProperty use that for later read
vp = VariableProperty.objects.update_or_create_property(variable=variable,
name='VISA:%s' % task.property_name.upper(),
value=value, value_class='FLOAT64')
return True
return False
i = 0
j = 0
while i < 10:
try:
self.inst.read_termination = '\n'
self.inst.query('*IDN?')
i = 12
j = 1
except:
self.connect()
time.sleep(1)
i += 1
logger.error("Keithley connect error i : %s" %i)
if j == 0:
logger.error("Keithley-Instrument not connected")
return False
# if variable_id == 'present_value':
if task.variable.visavariable.device_property.upper() == 'PRESENT_VALUE':
i = 0
while i < 10:
Vseff = ""
try:
Vseff = self.parse_value(self.inst.query(':READ?'))
except:
Vseff = ""
if Vseff is None or Vseff is "":
i += 1
logger.error("Keithley - Error Read - i : %s" %i)
self.inst.write('*CLS')
else:
i = 12
# Call Phase Osc
# cwt = DeviceWriteTask(variable_id=Variable.objects.get(name='Find_Phase_Osc').id, value=Vseff, start=time.time())
# cwt.save()
logger.info("Variable %s - task.property_name : %s - value %s" %(variable, task.property_name.upper(), value))
vp = VariableProperty.objects.update_or_create_property(variable=variable,
name='VISA:%s' % task.property_name.upper(),
value=value, value_class='FLOAT64')
#vp = VariableProperty.objects.update_or_create_property(variable=variable,
# name='VISA:%s' % task.property_name.upper())
return Vseff
if variable_instance.visavariable.device_property.upper() == 'SET_AC_RANGE_RES':
# if variable_id == 'set_ac_range_res':
CMD = str('*RST;:FUNC "VOLTage:AC";:VOLTage:AC:RANGe:AUTO 1;:VOLTage:AC:RESolution MIN;:TRIG:DEL MIN')
self.inst.write(CMD)
return True
else:
logger.error("Keithley - variable_id : %s" %variable_id)
return self.parse_value(self.inst.query(str(variable_id)+' '+str(value)))
|
def write_data(self, variable_id, value, task)
|
write values to the device
| 4.549851 | 4.483078 | 1.014894 |
if info == 'ad1':
data = smbus_device.read_word_data(0x69, 0x05)
data = format(data,"02x")
return (float(data) / 100) # °C
if info == 'ad2':
data = smbus_device.read_word_data(0x69, 0x07)
data = format(data,"02x")
return (float(data) / 100) # °C
if info == 'rpi_level':
data = smbus_device.read_word_data(0x69, 0x03)
data = format(data,"02x")
return (float(data) / 100) # Volt
if info == 'bat_level':
data = smbus_device.read_word_data(0x69, 0x01)
data = format(data,"02x")
return (float(data) / 100) # Volt
if info == 'pwr_mode':
data = smbus_device.read_word_data(0x69, 0x00)
return data & ~(1 << 7) # 1 RPi, 2 Bat
if info == 'sot23_temp':
data = smbus_device.read_byte_data(0x69, 0x0C)
data = format(data,"02x")
return data
if info == 'to92_temp':
data = smbus_device.read_byte_data(0x69, 0x0D)
data = format(data,"02x")
return data
return None
|
def ups_pico(smbus_device,info)
|
query data via smbus (I2C) from a UPS Pico device
| 1.963204 | 1.972468 | 0.995304 |
qs = super(VISADeviceAdmin, self).get_queryset(request)
return qs.filter(protocol_id=PROTOCOL_ID)
|
def get_queryset(self, request)
|
Limit Pages to those that belong to the request's user.
| 7.331535 | 7.406212 | 0.989917 |
qs = super(VISAVariableAdmin, self).get_queryset(request)
return qs.filter(device__protocol_id=PROTOCOL_ID)
|
def get_queryset(self, request)
|
Limit Pages to those that belong to the request's user.
| 8.793903 | 10.106796 | 0.870098 |
if _type.upper() == 'FLOAT64':
return float64(value)
elif _type.upper() == 'FLOAT32':
return float32(value)
elif _type.upper() == 'INT32':
return int32(value)
elif _type.upper() == 'UINT16':
return uint16(value)
elif _type.upper() == 'INT16':
return int16(value)
elif _type.upper() == 'BOOLEAN':
return uint8(value)
else:
return float64(value)
|
def _cast_value(value, _type)
|
cast value to _type
| 1.719066 | 1.704544 | 1.00852 |
for mail in Mail.objects.filter(done=False, send_fail_count__lt=3):
# send all emails that are not already send or failed to send less
# then three times
mail.send_mail()
for mail in Mail.objects.filter(done=True, timestamp__lt=time() - 60 * 60 * 24 * 7):
# delete all done emails older then one week
mail.delete()
return 1, None
|
def loop(self)
|
check for mails and send them
| 5.505316 | 4.989681 | 1.10334 |
s = float(sum(l))
if s == 0:
raise ValueError("Cannot normalize list with sum 0")
return [x / s for x in l]
|
def normalize(l)
|
Normalizes input list.
Parameters
----------
l: list
The list to be normalized
Returns
-------
The normalized list or numpy array
Raises
------
ValueError, if the list sums to zero
| 3.977653 | 3.950273 | 1.006931 |
start = 0
if not boundary:
start = 1
for i in range(start, scale + (1 - start)):
for j in range(start, scale + (1 - start) - i):
k = scale - i - j
yield (i, j, k)
|
def simplex_iterator(scale, boundary=True)
|
Systematically iterates through a lattice of points on the 2-simplex.
Parameters
----------
scale: Int
The normalized scale of the simplex, i.e. N such that points (x,y,z)
satisify x + y + z == N
boundary: bool, True
Include the boundary points (tuples where at least one
coordinate is zero)
Yields
------
3-tuples, There are binom(n+2, 2) points (the triangular
number for scale + 1, less 3*(scale+1) if boundary=False
| 2.835701 | 3.394461 | 0.835391 |
if not permutation:
return p
return [p[int(permutation[i])] for i in range(len(p))]
|
def permute_point(p, permutation=None)
|
Permutes the point according to the permutation keyword argument. The
default permutation is "012" which does not change the order of the
coordinate. To rotate counterclockwise, use "120" and to rotate clockwise
use "201".
| 4.140982 | 4.053269 | 1.02164 |
permuted = permute_point(p, permutation=permutation)
a = permuted[0]
b = permuted[1]
x = a + b/2.
y = SQRT3OVER2 * b
return numpy.array([x, y])
|
def project_point(p, permutation=None)
|
Maps (x,y,z) coordinates to planar simplex.
Parameters
----------
p: 3-tuple
The point to be projected p = (x, y, z)
permutation: string, None, equivalent to "012"
The order of the coordinates, counterclockwise from the origin
| 4.055794 | 6.226259 | 0.651401 |
xs, ys = unzip([project_point(p, permutation=permutation) for p in s])
return xs, ys
|
def project_sequence(s, permutation=None)
|
Projects a point or sequence of points using `project_point` to lists xs, ys
for plotting with Matplotlib.
Parameters
----------
s, Sequence-like
The sequence of points (3-tuples) to be projected.
Returns
-------
xs, ys: The sequence of projected points in coordinates as two lists
| 7.397406 | 6.001121 | 1.232671 |
p = []
for k in range(3):
p.append(conversion[axisorder[k]](q[k]))
return tuple(p)
|
def convert_coordinates(q, conversion, axisorder)
|
Convert a 3-tuple in data coordinates into to simplex data
coordinates for plotting.
Parameters
----------
q: 3-tuple
the point to be plotted in data coordinates
conversion: dict
keys = ['b','l','r']
values = lambda function giving the conversion
axisorder: String giving the order of the axes for the coordinate tuple
e.g. 'blr' for bottom, left, right coordinates.
Returns
-------
p: 3-tuple
The point converted to simplex coordinates.
| 3.716253 | 5.16106 | 0.720056 |
fb = float(scale) / float(limits['b'][1] - limits['b'][0])
fl = float(scale) / float(limits['l'][1] - limits['l'][0])
fr = float(scale) / float(limits['r'][1] - limits['r'][0])
conversion = {"b": lambda x: (x - limits['b'][0]) * fb,
"l": lambda x: (x - limits['l'][0]) * fl,
"r": lambda x: (x - limits['r'][0]) * fr}
return conversion
|
def get_conversion(scale, limits)
|
Get the conversion equations for each axis.
limits: dict of min and max values for the axes in the order blr.
| 1.906882 | 1.84289 | 1.034724 |
conversion = get_conversion(scale, limits)
return [convert_coordinates(q, conversion, axisorder) for q in qs]
|
def convert_coordinates_sequence(qs, scale, limits, axisorder)
|
Take a sequence of 3-tuples in data coordinates and convert them
to simplex coordinates for plotting. This is needed for custom
plots where the scale of the simplex axes is set within limits rather
than being defined by the scale parameter.
Parameters
----------
qs, sequence of 3-tuples
The points to be plotted in data coordinates.
scale: int
The scale parameter for the plot.
limits: dict
keys = ['b','l','r']
values = min,max data values for this axis.
axisorder: String giving the order of the axes for the coordinate tuple
e.g. 'blr' for bottom, left, right coordinates.
Returns
-------
s, list of 3-tuples
the points converted to simplex coordinates
| 3.964312 | 10.572949 | 0.374949 |
ax.set_ylim((-0.10 * scale, .90 * scale))
ax.set_xlim((-0.05 * scale, 1.05 * scale))
|
def resize_drawing_canvas(ax, scale=1.)
|
Makes sure the drawing surface is large enough to display projected
content.
Parameters
----------
ax: Matplotlib AxesSubplot, None
The subplot to draw on.
scale: float, 1.0
Simplex scale size.
| 3.346872 | 3.979013 | 0.841131 |
if not ax:
return
if axis.lower() in ["both", "x", "horizontal"]:
ax.set_xticks([], [])
if axis.lower() in ["both", "y", "vertical"]:
ax.set_yticks([], [])
|
def clear_matplotlib_ticks(ax=None, axis="both")
|
Clears the default matplotlib axes, or the one specified by the axis
argument.
Parameters
----------
ax: Matplotlib AxesSubplot, None
The subplot to draw on.
axis: string, "both"
The axis to clear: "x" or "horizontal", "y" or "vertical", or "both"
| 2.533383 | 2.883756 | 0.878501 |
if not ax:
fig, ax = pyplot.subplots()
xs, ys = project_sequence(points, permutation=permutation)
ax.plot(xs, ys, **kwargs)
return ax
|
def plot(points, ax=None, permutation=None, **kwargs)
|
Analogous to maplotlib.plot. Plots trajectory points where each point is a
tuple (x,y,z) satisfying x + y + z = scale (not checked). The tuples are
projected and plotted as a curve.
Parameters
----------
points: List of 3-tuples
The list of tuples to be plotted as a connected curve.
ax: Matplotlib AxesSubplot, None
The subplot to draw on.
kwargs:
Any kwargs to pass through to matplotlib.
| 3.581579 | 4.847217 | 0.738894 |
if not ax:
fig, ax = pyplot.subplots()
cmap = get_cmap(cmap)
xs, ys = project_sequence(points, permutation=permutation)
# We want to color each segment independently...which is annoying.
segments = []
for i in range(len(xs) - 1):
cur_line = []
x_before = xs[i]
y_before = ys[i]
x_after = xs[i+1]
y_after = ys[i+1]
cur_line.append([x_before, y_before])
cur_line.append([x_after, y_after])
segments.append(cur_line)
segments = np.array(segments)
line_segments = matplotlib.collections.LineCollection(segments, cmap=cmap, **kwargs)
line_segments.set_array(np.arange(len(segments)))
ax.add_collection(line_segments)
return ax
|
def plot_colored_trajectory(points, cmap=None, ax=None, permutation=None,
**kwargs)
|
Plots trajectories with changing color, simlar to `plot`. Trajectory points
are tuples (x,y,z) satisfying x + y + z = scale (not checked). The tuples are
projected and plotted as a curve.
Parameters
----------
points: List of 3-tuples
The list of tuples to be plotted as a connected curve.
ax: Matplotlib AxesSubplot, None
The subplot to draw on.
cmap: String or matplotlib.colors.Colormap, None
The name of the Matplotlib colormap to use.
kwargs:
Any kwargs to pass through to matplotlib.
| 2.329405 | 2.537149 | 0.918119 |
if not ax:
fig, ax = pyplot.subplots()
xs, ys = project_sequence(points, permutation=permutation)
ax.scatter(xs, ys, vmin=vmin, vmax=vmax, **kwargs)
if colorbar and (colormap != None):
if cb_kwargs != None:
colorbar_hack(ax, vmin, vmax, colormap, scientific=scientific,
cbarlabel=cbarlabel, **cb_kwargs)
else:
colorbar_hack(ax, vmin, vmax, colormap, scientific=scientific,
cbarlabel=cbarlabel)
return ax
|
def scatter(points, ax=None, permutation=None, colorbar=False, colormap=None,
vmin=0, vmax=1, scientific=False, cbarlabel=None, cb_kwargs=None,
**kwargs)
|
Plots trajectory points where each point satisfies x + y + z = scale.
First argument is a list or numpy array of tuples of length 3.
Parameters
----------
points: List of 3-tuples
The list of tuples to be scatter-plotted.
ax: Matplotlib AxesSubplot, None
The subplot to draw on.
colorbar: bool, False
Show colorbar.
colormap: String or matplotlib.colors.Colormap, None
The name of the Matplotlib colormap to use.
vmin: int, 0
Minimum value for colorbar.
vmax: int, 1
Maximum value for colorbar.
cb_kwargs: dict
Any additional kwargs to pass to colorbar
kwargs:
Any kwargs to pass through to matplotlib.
| 2.391427 | 2.768475 | 0.863807 |
enth = abs(energy - concs[0]*A -concs[1]*B -concs[2]*C)
return enth
|
def _en_to_enth(energy,concs,A,B,C)
|
Converts an energy to an enthalpy.
Converts energy to enthalpy using the following formula:
Enthalpy = energy - (energy contribution from A) - (energy contribution from B) -
(energy contribution from C)
An absolute value is taken afterward for convenience.
Parameters
----------
energy : float
The energy of the structure
concs : list of floats
The concentrations of each element
A : float
The energy of pure A
B : float
The energy of pure B
C : float
The energy of pure C
Returns
-------
enth : float
The enthalpy of formation.
| 3.76261 | 4.563703 | 0.824464 |
pureA = [energy[0][0],energy[0][1]]
pureB = [energy[1][0],energy[1][1]]
pureC = [energy[2][0],energy[2][1]]
enthalpy = []
for en in energy:
c = en[2]
conc = [float(i)/sum(c) for i in c]
CE = _en_to_enth(en[0],conc,pureA[0],pureB[0],pureC[0])
VASP = _en_to_enth(en[1],conc,pureA[1],pureB[1],pureC[1])
enthalpy.append([CE,VASP,c])
return enthalpy
|
def _energy_to_enthalpy(energy)
|
Converts energy to enthalpy.
This function take the energies stored in the energy array and
converts them to formation enthalpy.
Parameters
---------
energy : list of lists of floats
Returns
-------
enthalpy : list of lists containing the enthalpies.
| 2.999569 | 3.006963 | 0.997541 |
err_vals = []
for en in vals:
c = en[2]
conc = [float(i)/sum(c) for i in c]
err = abs(en[0]-en[1])
err_vals.append([conc,err])
return err_vals
|
def _find_error(vals)
|
Find the errors in the energy values.
This function finds the errors in the enthalpys.
Parameters
----------
vals : list of lists of floats
Returns
-------
err_vals : list of lists containing the errors.
| 5.272326 | 4.281453 | 1.231434 |
energy = []
with open(fname,'r') as f:
for line in f:
CE = abs(float(line.strip().split()[0]))
VASP = abs(float(line.strip().split()[1]))
conc = [i for i in line.strip().split()[2:]]
conc_f = []
for c in conc:
if '[' in c and ']' in c:
conc_f.append(int(c[1:-1]))
elif '[' in c:
conc_f.append(int(c[1:-1]))
elif ']' in c or ',' in c:
conc_f.append(int(c[:-1]))
else:
conc_f.append(int(c))
energy.append([CE,VASP,conc_f])
return energy
|
def _read_data(fname)
|
Reads data from file.
Reads the data in 'fname' into a list where each list entry contains
[energy predicted, energy calculated, list of concentrations].
Parameters
----------
fname : str
The name and path to the data file.
Returns
-------
energy : list of lists of floats
A list of the energies and the concentrations.
| 2.534659 | 2.417693 | 1.048379 |
energies = _read_data(fname)
enthalpy = _energy_to_enthalpy(energies)
this_errors = _find_error(enthalpy)
points = []
colors = []
for er in this_errors:
concs = er[0]
points.append((concs[0]*100,concs[1]*100,concs[2]*100))
colors.append(er[1])
scale = 100
figure, tax = ternary.figure(scale=scale)
tax.boundary(linewidth = 1.0)
tax.set_title("Errors in Convex Hull Predictions.",fontsize=20)
tax.gridlines(multiple=10,color="blue")
tax.scatter(points,vmax=max(colors),colormap=plt.cm.viridis,colorbar=True,c=colors,cmap=plt.cm.viridis)
tax.show()
|
def conc_err_plot(fname)
|
Plots the error in the CE data.
This plots the error in the CE predictions within a ternary concentration diagram.
Parameters
----------
fname : string containing the input file name.
| 4.153135 | 3.920087 | 1.05945 |
ternary_ax = TernaryAxesSubplot(ax=ax, scale=scale, permutation=permutation)
return ternary_ax.get_figure(), ternary_ax
|
def figure(ax=None, scale=None, permutation=None)
|
Wraps a Matplotlib AxesSubplot or generates a new one. Emulates matplotlib's
> figure, ax = pyplot.subplots()
Parameters
----------
ax: AxesSubplot, None
The matplotlib AxesSubplot to wrap
scale: float, None
The scale factor of the ternary plot
| 3.849623 | 4.35164 | 0.884637 |
figure = self.get_figure()
callback = partial(mpl_redraw_callback, tax=self)
event_names = ('resize_event', 'draw_event')
for event_name in event_names:
figure.canvas.mpl_connect(event_name, callback)
|
def _connect_callbacks(self)
|
Connect resize matplotlib callbacks.
| 4.904956 | 4.048504 | 1.211548 |
ax = self.get_axes()
ax.set_title(title, **kwargs)
|
def set_title(self, title, **kwargs)
|
Sets the title on the underlying matplotlib AxesSubplot.
| 3.757476 | 2.922765 | 1.285589 |
if not position:
position = (-offset, 3./5, 2./5)
self._labels["left"] = (label, position, rotation, kwargs)
|
def left_axis_label(self, label, position=None, rotation=60, offset=0.08,
**kwargs)
|
Sets the label on the left axis.
Parameters
----------
label: String
The axis label
position: 3-Tuple of floats, None
The position of the text label
rotation: float, 60
The angle of rotation of the label
offset: float,
Used to compute the distance of the label from the axis
kwargs:
Any kwargs to pass through to matplotlib.
| 6.021843 | 7.822513 | 0.769809 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.